
    ie                        d Z ddlZddlZddlmZ ddlZddlmZ ddlmZ	 ddl
mZ ddlmZ dd	lmZmZmZmZ dd
lmZmZ ddlmZ ddlmZmZmZmZ ddlmZmZ ddl m!Z!  ejD                  e#      Z$ G d dejJ                        Z& G d dejJ                        Z'	 	 d9dejJ                  dejP                  dejP                  dejP                  dejP                  dz  de)dz  de)dee   fdZ* G d dejJ                        Z+ G d d ejJ                        Z, G d! d"ejJ                        Z- G d# d$ejJ                        Z. G d% d&ejJ                        Z/ G d' d(e      Z0 G d) d*ejJ                        Z1e G d+ d,e             Z2e G d- d.e2             Z3 G d/ d0ejJ                        Z4 ed12       G d3 d4e2             Z5 ed52       G d6 d7e2             Z6g d8Z7y):zPyTorch ViT model.    N)Callable)nn   )initialization)ACT2FN)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutputMaskedImageModelingOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringlogging	torch_int)can_return_tuplecheck_model_inputs   )	ViTConfigc            	            e Zd ZdZddedef fdZdej                  de	de	dej                  fd	Z
	 	 ddej                  dej                  d
z  dedej                  fdZ xZS )ViTEmbeddingszb
    Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
    configuse_mask_tokenc                 J   t         |           t        j                  t	        j
                  dd|j                              | _        |r4t        j                  t	        j                  dd|j                              nd | _	        t        |      | _        | j                  j                  }t        j                  t	        j
                  d|dz   |j                              | _        t        j                  |j                        | _        |j"                  | _        || _        y )Nr   )super__init__r   	Parametertorchrandnhidden_size	cls_tokenzeros
mask_tokenViTPatchEmbeddingspatch_embeddingsnum_patchesposition_embeddingsDropouthidden_dropout_probdropout
patch_sizer   )selfr   r   r(   	__class__s       n/home/obispo/Crisostomo_bridge/mision_env/lib/python3.12/site-packages/transformers/models/vit/modeling_vit.pyr   zViTEmbeddings.__init__/   s    ekk!Q8J8J&KLQ_",,u{{1a9K9K'LMei 26 :++77#%<<A{QPVPbPb0c#d zz&"<"<= ++    
embeddingsheightwidthreturnc                    |j                   d   dz
  }| j                  j                   d   dz
  }t        j                  j	                         s||k(  r||k(  r| j                  S | j                  ddddf   }| j                  ddddf   }|j                   d   }|| j
                  z  }	|| j
                  z  }
t        |dz        }|j                  d|||      }|j                  dddd      }t        j                  j                  ||	|
fdd	
      }|j                  dddd      j                  dd|      }t        j                  ||fd      S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   N      ?r   r      bicubicF)sizemodealign_cornersdim)shaper)   r    jit
is_tracingr-   r   reshapepermuter   
functionalinterpolateviewcat)r.   r2   r3   r4   r(   num_positionsclass_pos_embedpatch_pos_embedr?   
new_height	new_widthsqrt_num_positionss               r0   interpolate_pos_encodingz&ViTEmbeddings.interpolate_pos_encoding;   s`    !&&q)A-0066q9A= yy##%+*F6UZ?+++221bqb59221ab59r"t.
T__,	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nyy/?;CCr1   Npixel_valuesbool_masked_posrO   c                    |j                   \  }}}}| j                  ||      }|Z|j                   d   }	| j                  j                  ||	d      }
|j	                  d      j                  |
      }|d|z
  z  |
|z  z   }| j                  j                  |dd      }t        j                  ||fd      }|r|| j                  |||      z   }n|| j                  z   }| j                  |      }|S )N)rO   r   r7   g      ?r>   )r@   r'   r%   expand	unsqueezetype_asr#   r    rH   rO   r)   r,   )r.   rP   rQ   rO   
batch_sizenum_channelsr3   r4   r2   
seq_lengthmask_tokensmask
cls_tokenss                r0   forwardzViTEmbeddings.forwardc   s    3?2D2D/
L&%**<Rj*k
&#))!,J//00ZLK",,R088ED#sTz2[45GGJ ^^**:r2>
YY
J7Q?
 $#d&C&CJPVX]&^^J#d&>&>>J\\*-
r1   FNF)__name__
__module____qualname____doc__r   boolr   r    TensorintrO   
BoolTensorr\   __classcell__r/   s   @r0   r   r   *   s    
y 
$ 
&D5<< &D &DUX &D]b]i]i &DV 48).	ll ))D0 #'	
 
r1   r   c                   f     e Zd ZdZdef fdZddej                  dedej                  fdZ	 xZ
S )	r&   z
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
    Transformer.
    r   c                    t         |           |j                  |j                  }}|j                  |j
                  }}t        |t        j                  j                        r|n||f}t        |t        j                  j                        r|n||f}|d   |d   z  |d   |d   z  z  }|| _        || _        || _        || _
        t        j                  ||||      | _        y )Nr   r   )kernel_sizestride)r   r   
image_sizer-   rW   r"   
isinstancecollectionsabcIterabler(   r   Conv2d
projection)r.   r   rm   r-   rW   r"   r(   r/   s          r0   r   zViTPatchEmbeddings.__init__   s    !'!2!2F4E4EJ
$*$7$79K9Kk#-j+//:R:R#SZZdfpYq
#-j+//:R:R#SZZdfpYq
!!}
15*Q-:VW=:XY$$(&))L+:^hir1   rP   rO   r5   c                    |j                   \  }}}}|| j                  k7  rt        d| j                   d| d      |sV|| j                  d   k7  s|| j                  d   k7  r2t        d| d| d| j                  d    d| j                  d    d		      | j	                  |      j                  d
      j                  dd
      }|S )NzoMake sure that the channel dimension of the pixel values match with the one set in the configuration. Expected z	 but got .r   r   zInput image size (*z) doesn't match model (z).r9   )r@   rW   
ValueErrorrm   rs   flatten	transpose)r.   rP   rO   rV   rW   r3   r4   r2   s           r0   r\   zViTPatchEmbeddings.forward   s    2>2D2D/
L&%4,,,!../yaI  (++u8J/J (% 9+,Adooa.@-AE  __\2::1=GG1M
r1   r]   )r_   r`   ra   rb   r   r   r    rd   rc   r\   rg   rh   s   @r0   r&   r&      s;    jy jELL D ]b]i]i r1   r&   modulequerykeyvalueattention_maskscalingr,   kwargsc                    ||j                  d      dz  }t        j                  ||j                  dd            |z  }|#|d d d d d d d |j                  d   f   }||z   }t
        j                  j                  |d      }t
        j                  j                  ||| j                        }t        j                  ||      }	|	j                  dd      j                         }	|	|fS )	Nr7         r9   r   r>   )ptrainingr   )r;   r    matmulry   r@   r   rE   softmaxr,   r   
contiguous)
rz   r{   r|   r}   r~   r   r,   r   attn_weightsattn_outputs
             r0   eager_attention_forwardr      s     **R.D( <<s}}Q':;gEL!'1a399R=(@A#n4==((2(>L==((6??([L,,|U3K''1-88:K$$r1   c                   z     e Zd Zdef fdZdej                  deej                  ej                  f   fdZ xZ	S )ViTSelfAttentionr   c                 2   t         |           |j                  |j                  z  dk7  r2t	        |d      s&t        d|j                   d|j                   d      || _        |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _	        |j                  | _        | j                  dz  | _        d| _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        y )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads ru   r   F)bias)r   r   r"   num_attention_headshasattrrw   r   re   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probr   	is_causalr   Linearqkv_biasr{   r|   r}   r.   r   r/   s     r0   r   zViTSelfAttention.__init__   sF    : ::a?PVXhHi"6#5#5"6 7334A7 
 #)#=#= #&v'9'9F<V<V'V#W !558P8PP"??//5YYv1143E3EFOO\
99V//1C1C&//ZYYv1143E3EFOO\
r1   hidden_statesr5   c           
         |j                   d   }|d| j                  | j                  f} | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      }t        j                  | j                  j                  t              } || |||d | j                  | j                  | j                  sdn| j                         \  }}	|j#                         d d | j$                  fz   }
|j'                  |
      }||	fS )Nr   r7   r   r9           )r   r   r,   r   )r@   r   r   r|   rG   ry   r}   r{   r   get_interfacer   _attn_implementationr   r   r   r   r   r;   r   rC   )r.   r   rV   	new_shape	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapes              r0   r\   zViTSelfAttention.forward   sF   "((+
D$<$<d>V>VV	0DHH]+00)<FFq!L	4djj/44i@JJ1aP4djj/44i@JJ1aP(?(M(MKK,,.E)
 *=nnLL#}}C$2C2C	*
& #0"4"4"6s";t?Q?Q>S"S%--.EFo--r1   )
r_   r`   ra   r   r   r    rd   tupler\   rg   rh   s   @r0   r   r      s:    ]y ](.U\\ .eELL%,,<V6W .r1   r   c                   x     e Zd ZdZdef fdZdej                  dej                  dej                  fdZ xZ	S )ViTSelfOutputz
    The residual connection is defined in ViTLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r   c                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y N)	r   r   r   r   r"   denser*   r+   r,   r   s     r0   r   zViTSelfOutput.__init__   sB    YYv1163E3EF
zz&"<"<=r1   r   input_tensorr5   c                 J    | j                  |      }| j                  |      }|S r   r   r,   r.   r   r   s      r0   r\   zViTSelfOutput.forward  s$    

=1]3r1   
r_   r`   ra   rb   r   r   r    rd   r\   rg   rh   s   @r0   r   r      s=    
>y >
U\\  RWR^R^ r1   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )ViTAttentionr   c                 b    t         |           t        |      | _        t	        |      | _        y r   )r   r   r   	attentionr   outputr   s     r0   r   zViTAttention.__init__  s&    )&1#F+r1   r   r5   c                 R    | j                  |      \  }}| j                  ||      }|S r   )r   r   )r.   r   self_attn_output_r   s        r0   r\   zViTAttention.forward  s,    "nn];!-}=r1   	r_   r`   ra   r   r   r    rd   r\   rg   rh   s   @r0   r   r   
  s*    ,y ,
U\\ ell r1   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )ViTIntermediater   c                    t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        y |j                  | _        y r   )r   r   r   r   r"   intermediate_sizer   rn   
hidden_actstrr   intermediate_act_fnr   s     r0   r   zViTIntermediate.__init__  s]    YYv1163K3KL
f''-'-f.?.?'@D$'-'8'8D$r1   r   r5   c                 J    | j                  |      }| j                  |      }|S r   )r   r   )r.   r   s     r0   r\   zViTIntermediate.forward  s&    

=100?r1   r   rh   s   @r0   r   r     s*    9y 9U\\ ell r1   r   c                   t     e Zd Zdef fdZdej                  dej                  dej                  fdZ xZS )	ViTOutputr   c                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y r   )
r   r   r   r   r   r"   r   r*   r+   r,   r   s     r0   r   zViTOutput.__init__&  sB    YYv779K9KL
zz&"<"<=r1   r   r   r5   c                 T    | j                  |      }| j                  |      }||z   }|S r   r   r   s      r0   r\   zViTOutput.forward+  s.    

=1]3%4r1   r   rh   s   @r0   r   r   %  s8    >y >
U\\  RWR^R^ r1   r   c                   `     e Zd ZdZdef fdZdej                  dej                  fdZ xZ	S )ViTLayerz?This corresponds to the Block class in the timm implementation.r   c                 r   t         |           |j                  | _        d| _        t	        |      | _        t        |      | _        t        |      | _	        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        y )Nr   eps)r   r   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormr"   layer_norm_epslayernorm_beforelayernorm_afterr   s     r0   r   zViTLayer.__init__5  s    '-'E'E$%f-+F3' "V-?-?VEZEZ [!||F,>,>FDYDYZr1   r   r5   c                     | j                  |      }| j                  |      }||z   }| j                  |      }| j                  |      }| j	                  ||      }|S r   )r   r   r   r   r   )r.   r   hidden_states_normattention_outputlayer_outputs        r0   r\   zViTLayer.forward?  si    !22=A>>*<= )=8 ++M:((6 {{<?r1   r   rh   s   @r0   r   r   2  s/    I[y [U\\ ell r1   r   c                   H     e Zd Zdef fdZdej                  defdZ xZ	S )
ViTEncoderr   c                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w r^   )
r   r   r   r   
ModuleListrangenum_hidden_layersr   layergradient_checkpointing)r.   r   r   r/   s      r0   r   zViTEncoder.__init__Q  sN    ]]eFD\D\>]#^HV$4#^_
&+# $_s   A#r   r5   c                 d    t        | j                        D ]  \  }} ||      } t        |      S )N)last_hidden_state)	enumerater   r	   )r.   r   ilayer_modules       r0   r\   zViTEncoder.forwardW  s5    (4 	8OA|(7M	8 ??r1   )
r_   r`   ra   r   r   r    rd   r	   r\   rg   rh   s   @r0   r   r   P  s)    ,y ,@U\\ @o @r1   r   c                       e Zd ZU eed<   dZdZdZdZddgZ	dZ
dZdZdZeedZ ej$                         d	ej(                  ej*                  z  ej,                  z  fd
       Zy)ViTPreTrainedModelr   vitrP   )imageTr   r   )r   
attentionsrz   c                 "   t        |t        j                  t        j                  f      rct	        j
                  |j                  d| j                  j                         |j                   t	        j                  |j                         yyt        |t        j                        r?t	        j                  |j                         t	        j                  |j                         yt        |t              rt	        j
                  |j                  d| j                  j                         t	        j
                  |j                  d| j                  j                         |j                    t	        j                  |j                          yyy)zInitialize the weightsr   )meanstdN)rn   r   r   rr   inittrunc_normal_weightr   initializer_ranger   zeros_r   ones_r   r)   r#   r%   )r.   rz   s     r0   _init_weightsz ViTPreTrainedModel._init_weightso  s     fryy"))45v}}3DKK<Y<YZ{{&FKK( '-KK$JJv}}%.v99IfIfgv//ct{{?\?\]  ,F--. - /r1   N)r_   r`   ra   r   __annotations__base_model_prefixmain_input_nameinput_modalitiessupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_supports_flex_attn_supports_attention_backendr   r   _can_record_outputsr    no_gradr   r   rr   r   r    r1   r0   r   r   ^  s    $O!&*#(*5N"&!&
 U]]_/BII		$9BLL$H / /r1   r   c                        e Zd Zddededef fdZdefdZ ed      e		 	 	 dd
e
j                  d	z  de
j                  d	z  ded	z  dee   def
d              Z xZS )ViTModelFr   add_pooling_layerr   c                    t         |   |       || _        t        ||      | _        t        |      | _        t        j                  |j                  |j                        | _        |rt        |      nd| _        | j                          y)z
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        use_mask_token (`bool`, *optional*, defaults to `False`):
            Whether to use a mask token for masked image modeling.
        )r   r   N)r   r   r   r   r2   r   encoderr   r   r"   r   	layernorm	ViTPoolerpooler	post_init)r.   r   r  r   r/   s       r0   r   zViTModel.__init__  sm     	 '~N!&)f&8&8f>S>ST+<i'$ 	r1   r5   c                 .    | j                   j                  S r   )r2   r'   )r.   s    r0   get_input_embeddingszViTModel.get_input_embeddings  s    ///r1   )tie_last_hidden_statesNrP   rQ   rO   r   c                    |t        d      | j                  j                  j                  j                  j
                  }|j
                  |k7  r|j                  |      }| j                  |||      }| j                  |      }|j                  }| j                  |      }| j                  | j                  |      nd}	t        ||	      S )z
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
        Nz You have to specify pixel_valuesrQ   rO   )r   pooler_output)rw   r2   r'   rs   r   dtypetor  r   r  r  r
   )
r.   rP   rQ   rO   r   expected_dtypeembedding_outputencoder_outputssequence_outputpooled_outputs
             r0   r\   zViTModel.forward  s     ?@@ 99DDKKQQ/'??>:L??/Tl + 
 ,0<<8H+I);;..98<8OO4UY)O[hiir1   )TFNNN)r_   r`   ra   r   rc   r   r&   r  r   r   r    rd   rf   r   r   r
   r\   rg   rh   s   @r0   r  r    s    y T Z^ &0&8 0 u5 -13704	jllT)j ))D0j #'+	j
 +,j 
$j  6jr1   r  c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )r  r   c                     t         |           t        j                  |j                  |j
                        | _        t        |j                     | _	        y r   )
r   r   r   r   r"   pooler_output_sizer   r   
pooler_act
activationr   s     r0   r   zViTPooler.__init__  s>    YYv1163L3LM
 !2!23r1   r   r5   c                 \    |d d df   }| j                  |      }| j                  |      }|S )Nr   )r   r  )r.   r   first_token_tensorr  s       r0   r\   zViTPooler.forward  s6     +1a40

#566r1   r   rh   s   @r0   r  r    s*    4y 4
U\\ ell r1   r  ac  
    ViT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://huggingface.co/papers/2111.09886).

    <Tip>

    Note that we provide a script to pre-train this model on custom data in our [examples
    directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).

    </Tip>
    )custom_introc                        e Zd Zdef fdZee	 	 	 d
dej                  dz  dej                  dz  de
dz  dee   def
d	              Z xZS )ViTForMaskedImageModelingr   c                 N   t         |   |       t        |dd      | _        t	        j
                  t	        j                  |j                  |j                  dz  |j                  z  d      t	        j                  |j                              | _        | j                          y )NFT)r  r   r9   r   )in_channelsout_channelsrk   )r   r   r  r   r   
Sequentialrr   r"   encoder_striderW   PixelShuffledecoderr	  r   s     r0   r   z"ViTForMaskedImageModeling.__init__  s     FeDQ}}II"..#22A58K8KK
 OOF112
 	r1   NrP   rQ   rO   r   r5   c                 L   |g| j                   j                  | j                   j                  k7  r:t        d| j                   j                   d| j                   j                   d       | j                  |f||d|}|j
                  }|ddddf   }|j                  \  }}}	t        j                  |dz        x}
}|j                  dd	d      j                  ||	|
|      }| j                  |      }d}|| j                   j                  | j                   j                  z  }|j                  d
||      }|j                  | j                   j                  d      j                  | j                   j                  d	      j                  d      j                         }t         j"                  j%                  ||d      }||z  j'                         |j'                         dz   z  | j                   j(                  z  }t+        |||j,                  |j.                        S )a|  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, ViTForMaskedImageModeling
        >>> import torch
        >>> from PIL import Image
        >>> import httpx
        >>> from io import BytesIO

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> with httpx.stream("GET", url) as response:
        ...     image = Image.open(BytesIO(response.read()))

        >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
        >>> model = ViTForMaskedImageModeling.from_pretrained("google/vit-base-patch16-224-in21k")

        >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
        >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
        >>> # create random boolean mask of shape (batch_size, num_patches)
        >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
        >>> list(reconstructed_pixel_values.shape)
        [1, 3, 224, 224]
        ```NzWhen `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that the reconstructed image has the same dimensions as the input. Got `patch_size` = z and `encoder_stride` = ru   r  r   r8   r   r9   r7   none)	reductiongh㈵>)lossreconstructionr   r   )r   r-   r&  rw   r   r   r@   mathfloorrD   rC   r(  rm   repeat_interleaverT   r   r   rE   l1_losssumrW   r   r   r   )r.   rP   rQ   rO   r   outputsr  rV   sequence_lengthrW   r3   r4   reconstructed_pixel_valuesmasked_im_lossr;   rZ   reconstruction_losss                    r0   r\   z!ViTForMaskedImageModeling.forward  s   N &DKK,B,BdkkF`F`,`&&*kk&<&<%==UVZVaVaVpVpUqqrt  /7dhh/
+%=/
 	/
 "33 *!QR%04C4I4I1
O\OS$899)11!Q:BB:|]cejk &*\\/%B"&;;))T[[-C-CCD-55b$EO11$++2H2H!L""4;;#9#91=1	  #%--"7"7F`lr"7"s1D8==?488:PTCTUX\XcXcXpXppN(5!//))	
 	
r1   r  )r_   r`   ra   r   r   r   r   r    rd   rf   rc   r   r   r   r\   rg   rh   s   @r0   r!  r!    s    y "  -13704	P
llT)P
 ))D0P
 #'+	P

 +,P
 
#P
  P
r1   r!  a  
    ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
    the [CLS] token) e.g. for ImageNet.

    <Tip>

        Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
        setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
        position embeddings to the higher resolution.

    </Tip>
    c                        e Zd Zdef fdZee	 	 	 d
dej                  dz  dej                  dz  de	dz  de
e   def
d	              Z xZS )ViTForImageClassificationr   c                 .   t         |   |       |j                  | _        t        |d      | _        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _	        | j                          y )NF)r  r   )r   r   
num_labelsr  r   r   r   r"   Identity
classifierr	  r   s     r0   r   z"ViTForImageClassification.__init__L  ss      ++Fe< OUN_N_bcNc"))F$6$68I8IJikititiv 	r1   NrP   labelsrO   r   r5   c                 
    | j                   |fd|i|}|j                  }|dddddf   }| j                  |      }d}	| | j                  ||| j                  fi |}	t        |	||j                  |j                        S )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        rO   Nr   )r,  logitsr   r   )r   r   r=  loss_functionr   r   r   r   )
r.   rP   r>  rO   r   r3  r  r  r@  r,  s
             r0   r\   z!ViTForImageClassification.forwardX  s      /7dhh/
%=/
 /
 "33'1a0/%4%%ffdkkLVLD$!//))	
 	
r1   r  )r_   r`   ra   r   r   r   r   r    rd   rc   r   r   r   r\   rg   rh   s   @r0   r9  r9  =  s    
y 
  -1&*04	!
llT)!
 t#!
 #'+	!

 +,!
 
!
  !
r1   r9  )r9  r!  r  r   )Nr   )8rb   collections.abcro   r.  r   r    r    r   r   activationsr   modeling_layersr   modeling_outputsr	   r
   r   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   utils.genericr   r   configuration_vitr   
get_loggerr_   loggerModuler   r&   rd   floatr   r   r   r   r   r   r   r   r   r  r  r!  r9  __all__r   r1   r0   <module>rQ     s
      $   & ! 9  G & K K A ( 
		H	%UBII Up$ $\ !%II%<<% 
% <<	%
 LL4'% T\% % '(%:/.ryy /.dBII "	299 	bii 
		 
) <@ @ / / /B 7j! 7j 7jt		  	d
 2 d
d
N 0
 2 0
0
f gr1   