
    i                     |   d dl Z d dlmZ d dlmZ d dlZd dlmZ ddlm	Z	 ddl
mZ ddlmZmZ ddlmZ dd	lmZ dd
lmZmZmZmZmZ ddlmZ ddlmZ e G d de             Ze ed       G d de                    Ze G d de             Ze ed       G d de                    Z  G d dee      Z!g dZ"y)    N)	dataclass)Any   )Cache)GenerationMixin)BaseModelOutputWithPoolingModelOutput)PreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tupleis_torchdynamo_compilingtorch_compilable_check   )	AutoModel   )Glm46VConfigc                   @    e Zd ZU eed<   dZdZdZdZdZ	dZ
dZdZdZdZy)Glm46VPreTrainedModelconfigmodel)imagevideotextTNpast_key_values)__name__
__module____qualname__r   __annotations__base_model_prefixinput_modalitiessupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_can_compile_fullgraph_supports_attention_backend_can_record_outputs     t/home/obispo/Crisostomo_bridge/mision_env/lib/python3.12/site-packages/transformers/models/glm46v/modeling_glm46v.pyr   r   -   sC    1&*#"3N!"&r,   r   zJ
    Base class for Llava outputs, with hidden states and attentions.
    )custom_introc                       e Zd ZU dZdZej                  dz  ed<   dZe	dz  ed<   dZ
eej                     dz  ed<   dZeej                     dz  ed<   dZej                  dz  ed<   y)Glm46VModelOutputWithPasta[  
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
        The rope index difference between sequence length and multimodal rope.
    Nlast_hidden_stater   hidden_states
attentionsrope_deltas)r   r   r   __doc__r1   torchFloatTensorr    r   r   r2   tupler3   r4   
LongTensorr+   r,   r-   r0   r0   =   sv     37u((4/6$(OUT\(59M5**+d2926Je''(4/6+/K!!D(/r,   r0   c                       e Zd ZU dZi ZdZeed<   dZ fdZ	d Z
d Z	 	 	 	 ddej                  dz  d	ej                  dz  d
ej                  dz  dej                  dz  deej                  ej                  f   f
dZee	 ddej&                  d
ej                  dz  dee   deez  fd              Zee	 ddej&                  d	ej                  dz  dee   deez  fd              Z	 	 ddej                  dej&                  dej&                  dz  dej&                  dz  fdZee	 	 	 	 	 	 	 	 	 	 	 ddej                  dz  dej                  dz  dej                  dz  dedz  dej&                  dz  dej                  dz  dej&                  dz  d	ej                  dz  d
ej                  dz  dej                  dz  dej                  dz  dee   deez  fd              Z xZS ) Glm46VModelr   Fr   Nc                     t         |   |       t        j                  |j                        | _        t        j                  |j                        | _        d | _        | j                          y N)
super__init__r   from_configvision_configvisualtext_configlanguage_modelr4   	post_initselfr   	__class__s     r-   r?   zGlm46VModel.__init__^   sU     ++F,@,@A'33F4F4FG 	r,   c                 6    | j                   j                         S r=   )rD   get_input_embeddingsrG   s    r-   rJ   z Glm46VModel.get_input_embeddingsg   s    ""7799r,   c                 :    | j                   j                  |       y r=   )rD   set_input_embeddingsrG   values     r-   rM   z Glm46VModel.set_input_embeddingsj   s    007r,   	input_idsimage_grid_thwvideo_grid_thwattention_maskreturnc           
      F   | j                   j                  j                  }| j                   j                  }| j                   j                  }| j                   j
                  }g }	|h||c|}
|t        j                  |
      }t        j                  d|j                  d   |j                  d   |j                  |j                        }d\  }}d}|j                  |
j                        }t        |
      D ]  \  }}|||   dk(     }|j                         }g }d}|D ]T  }||k(  rd}n||k(  rd}||k(  r|s|j                  d       +||k(  r|r|j                  d	       D|j                  d
       V g }t!        j"                  t        |      d       D ]7  \  }}t%        |      }|d   d   }|d   d   dz   }|j                  |||f       9 g }d}|D ]:  \  }}}t'        |      dkD  r|d   j)                         dz   nd}|dk(  rQ||   d   ||   d   ||   d   }!} }|j+                         | j+                         |z  |!j+                         |z  }$}#}"t        j,                  |"      j/                  dd      j1                  d|#|$z        j3                         }%t        j,                  |#      j/                  ddd      j1                  |"d|$      j3                         }&t        j,                  |$      j/                  ddd      j1                  |"|#d      j3                         }'|j                  t        j4                  |%|&|'g      |z          |dz  }d}|d	k(  rb|||   d   ||   d   }!} }|| j+                         |z  |!j+                         |z  }$}#}"t7        |"      D ]  }(t        j8                  |(      j/                  dd      j1                  d|#|$z        j3                         }%t        j,                  |#      j/                  ddd      j1                  dd|$      j3                         }&t        j,                  |$      j/                  ddd      j1                  d|#d      j3                         }'|j                  t        j4                  |%|&|'g      |z           |dz  }|||   d   k\  r|dz  }d}|dz  }||z
  })|j                  t        j,                  |)      j/                  dd      j1                  dd      |z          d}= t        j:                  |d      j=                  dd      }*|*j                  |j                        |d|||   dk(  f<   |	j                  |*j)                         dz   t'        |
|         z
          t        j8                  |	|j                        j?                  d      }	||	fS ||jA                         jC                  d      dz
  }|jE                  |dk(  d       |j?                  d      j1                  ddd      j                  |j                        }|j)                  dd      d   j)                  dd      d   }+|+dz   |j                  d   z
  }	||	fS t        j,                  |j                  d   |j                        j/                  ddd      j1                  d|j                  d   d      }t        jF                  |j                  d   dg|j                  |j                        }	||	fS )aU  
        Calculate the 3D rope index based on image and video's temporal, height and width in LLM.

        Explanation:
            Each embedding sequence contains vision embedding and text embedding or just contains text embedding.

            For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
            Examples:
                input_ids: [T T T T T], here T is for text.
                temporal position_ids: [0, 1, 2, 3, 4]
                height position_ids: [0, 1, 2, 3, 4]
                width position_ids: [0, 1, 2, 3, 4]

            For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
            and 1D rotary position embedding for text part.
            Examples:
                Temporal (Time): 3 patches, representing different segments of the video in time.
                Height: 2 patches, dividing each frame vertically.
                Width: 2 patches, dividing each frame horizontally.
                We also have some important parameters:
                fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
                tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
                temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
                interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
                input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
                vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
                vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
                vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
                text temporal position_ids: [101, 102, 103, 104, 105]
                text height position_ids: [101, 102, 103, 104, 105]
                text width position_ids: [101, 102, 103, 104, 105]
                Here we calculate the text start position_ids as the max vision position_ids plus 1.

        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
                it.
            image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
                The temporal, height and width of feature shape of each image in LLM.
            video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
                The temporal, height and width of feature shape of each video in LLM.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

        Returns:
            position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
            mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
        r   r   r   dtypedevice)r   r   FTr   r   r   c                     | d   S )Nr   r+   )xs    r-   <lambda>z,Glm46VModel.get_rope_index.<locals>.<lambda>   s    [\]^[_ r,   r   dim.rX   )keepdim)rX   rW   )$r   rA   spatial_merge_sizeimage_token_idvideo_start_token_idvideo_end_token_idr6   	ones_likeonesshaperW   rX   to	enumeratetolistappend	itertoolsgroupbylistlenmaxitemarangeviewexpandflattenstackrangetensorcatreshape	unsqueezelongcumsummasked_fill_zeros),rG   rP   rQ   rR   rS   ra   rb   rc   rd   mrope_position_deltastotal_input_idsposition_idsimage_indexvideo_indexvideo_group_indexiinput_tokensinput_token_typevideo_check_flgtokeninput_type_groupkeygroupstart_index	end_indexllm_pos_ids_listvideo_frame_nummodality_type	start_idxend_idxst_idxthw
llm_grid_t
llm_grid_h
llm_grid_wt_indexh_indexw_indext_idxtext_lenllm_positionsmax_position_idss,                                               r-   get_rope_indexzGlm46VModel.get_rope_indexm   sL   v "[[66II33#{{??![[;; " n&@ND^'O%!&!A ::""oo ''L (,$K !+../E/EFN )/ : W`9%nQ&71&<=	(//1#% "') 8E 44*."44*/.(//8.0_(//8(//78 $& "+"3"3I>N4OQ_"` KJC KE"'(1+K %b	!q 0I$++S+y,IJ	K $& "#9I 7,5M9g?BCS?TWX?X-b1557!;^_F$/*;7:*;7:*;7:  1 FFHFFH(::FFH(:: 1;J
 #(,,z":"?"?A"F"M"MbR\_iRi"j"r"r"t"',,z":"?"?2q"I"P"PQ[]_ak"l"t"t"v"',,z":"?"?1b"I"P"PQ[]gik"l"t"t"v(//Wgw<W0X[a0ab#q(*+&'1+*;7:*;7:  1 FFH(::FFH(:: 1;J
 &+:%6 gE&+ll5&9&>&>r1&E&L&LRQ[^hQh&i&q&q&sG&+ll:&>&C&CAr1&M&T&TUVXZ\f&g&o&o&qG&+ll:&>&C&CAq"&M&T&TUVXbdf&g&o&o&qG,33EKK'SZ@[4\_e4efg *Q.),{0KA0NN'1,K01-'1, $+Y#6(//X0F0K0KAr0R0Y0YZ[]_0`ci0ij*+o7,r !&		*: B J J1b Q?L?O?OP\PcPc?dS!^A%6!%;;<%,,]->->-@1-Ds?[\K]G^-^_oW`p %*LL1FyO_O_$`$j$jkl$m!!666)-224;;B?!C)).A*=qA+55a8??2rJMMnNcNcd#/#3#3Au#3#Ea#H#L#LRY]#L#^_`#a (81(<~?S?STV?W(W%  !666 LL!3I<L<LMT!Q^VAyq126 
 ).__Q'+$++#//)%  !666r,   pixel_values_videoskwargsc                 L   |j                  | j                  j                        }g }|D ]j  \  }}}t        j                  d|j                         |j                         g      j                  d      j                  |d      }|j                  |       l t        j                  |d      }	 | j                  |f|	dd|}
|j                  d      | j                  j                  dz  z  j                         }t        j                  |
j                  |      }||
_        |
S )[  
        pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
            The tensors corresponding to the input videos.
        video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
            The temporal, height and width of feature shape of each video in LLM.
        r   r   r]   Tgrid_thwreturn_dictr\   r   )typerB   rW   r6   rx   rq   r{   repeatrk   ry   prodra   rj   splitpooler_output)rG   r   rR   r   temp_frames_hwr   r   r   repeated_rowflattened_video_grid_thwvision_outputssplit_sizesvideo_embedss                r-   get_video_featureszGlm46VModel.get_video_features+  s    266t{{7H7HI% 	0GAq! <<AFFHaffh(?@JJ1MTTUVXYZL!!,/	0 $)99^#C $
*BPT
X^
 &**2.$++2P2PRS2SS[[]{{>#?#?M'3$r,   pixel_valuesc                 <   |j                  | j                  j                        } | j                  |f|dd|}|j                  d      | j                  j                  dz  z  j                         }t        j                  |j                  |      }||_        |S )T  
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
            The tensors corresponding to the input images.
        image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
            The temporal, height and width of feature shape of each image in LLM.
        Tr   r\   r   )	r   rB   rW   r   ra   rj   r6   r   r   )rG   r   rQ   r   r   r   image_embedss          r-   get_image_featureszGlm46VModel.get_image_featuresI  s     $(():):;$\gNX\g`fg%**2.$++2P2PRS2SS[[]{{>#?#?M'3$r,   inputs_embedsimage_featuresvideo_featuresc                 T   || | j                         t        j                  | j                  j                  t        j
                  |j                              k(  }|j                  d      }| | j                         t        j                  | j                  j                  t        j
                  |j                              k(  }|j                  d      }n2|| j                  j                  k(  }|| j                  j                  k(  }|j                         }|j                  d      j                  |      j                  |j                        }|At        ||   j                         |j                         k(  d| d|j                  d           |j                         }|j                  d      j                  |      j                  |j                        }|At        ||   j                         |j                         k(  d| d|j                  d           ||fS )z
        Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
        equal to the length of multimodal features. If the lengths are different, an error is raised.
        rV   r\   z6Image features and image tokens do not match, tokens: z, features: r   z6Video features and video tokens do not match, tokens: )rJ   r6   rx   r   rb   r|   rX   allvideo_token_idsumr{   	expand_asrh   r   numelrg   )	rG   rP   r   r   r   special_image_maskspecial_video_maskn_image_tokensn_video_tokenss	            r-   get_placeholder_maskz Glm46VModel.get_placeholder_mask_  s    !.2M$2K2K2MT[[77uzzR_RfRfg3 " "4!7!7!;!.2M$2K2K2MT[[77uzzR_RfRfg3 " "4!7!7!; "+dkk.H.H!H!*dkk.H.H!H+//1/99"=GGVYYZgZnZno%"01779^=Q=Q=SSHHXXdeseyeyz{e|d}~
 ,//1/99"=GGVYYZgZnZno%"01779^=Q=Q=SSHHXXdeseyeyz{e|d}~ "#555r,   r   r   r4   cache_positionc           
      Z   |du |duz  rt        d      | | j                         |      }|| j                  ||d      j                  }t	        j
                  |d      j                  |j                  |j                        }| j                  |||      \  }}|j                  ||      }|| j                  ||	d      j                  }t	        j
                  |d      j                  |j                  |j                        }| j                  |||      \  }}|j                  ||      }|t        |t              s|n|d	   }||j                  d
k(  rtt	        j                  |dddf   dd      }|j                  j                   r?|t	        j"                  |j                        j$                  z  }d|z
  j'                         }t)               xr2 |duxr |j*                  d   dk7  xs |duxr |j*                  d   dk7  }t)                xr) |duxr |d   dk(  xs |du xs |j-                         dk(  }|s|s| j.                   | j1                  |||	|      \  }}
|
| _        n|j*                  \  }}}|+|d   | j.                  z   j                  |j                        nd}t	        j2                  ||j                        }|j5                  dd      j7                  |d      }|#|j9                  ||j*                  d   z  d      }|j;                  |      }|j=                  d      j7                  ddd      } | j>                  dd|||||d|}tA        |jB                  |jD                  |jF                  |jH                  | j.                        S )a  
        image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
            The temporal, height and width of feature shape of each image in LLM.
        video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
            The temporal, height and width of feature shape of each video in LLM.
        rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
            The rope index difference between sequence length and multimodal rope.
        Nz:You must specify exactly one of input_ids or inputs_embedsT)r   r   r]   )r   )r   full_attention   r   r   )dim1dim2g      ?)rS   r_   r\   r   )rP   r   rS   r   r   r   )r1   r   r2   r3   r4   r+   )%
ValueErrorrJ   r   r   r6   ry   rh   rX   rW   r   masked_scatterr   
isinstancedictndimdiagonalis_floating_pointfinfominintr   rg   get_seq_lengthr4   r   rr   rs   rt   repeat_interleaveaddr{   rD   r0   r1   r   r2   r3   )rG   rP   rS   r   r   r   r   r   rQ   rR   r4   r   r   r   
image_mask_r   
video_maskattention_mask_tensorprefill_compiled_stageprefill_noncompiled_stage
batch_size
seq_lengthdeltaoutputss                            r-   forwardzGlm46VModel.forward  s   2 -t";<YZZ 7D557	BM#22<]a2bppL 99\q9<<]=Q=QS`SfSfgL 55i_k5lMJ)88\RM*223Fdh2iwwL 99\q9<<]=Q=QS`SfSfgL 55i_k5lMAz)88\RM&0&FN[kLl " %05J5O5OST5T(-7LQPQT7RYZab(c%(..@@,AEKKPePkPkDlDpDp,p)-03H-H,M,M,O) &>%? &$&B9??1+=+B O!-M-2E2Ea2HA2M # -E,F(F )t+Fq0AQ0F V#t+T/M/M/OST/T & '*CHXHXH`,0,?,?""#8	 -@ -)k $/  -:,?,?)
J &1 $A&)9)99==m>R>RS 
  %||J}?S?ST+00B7>>z2N!-!33J%++a.4PVW3XE+//6+55a8??2rJ%$%% 
%)+')
 
 )%77#33!//))((
 	
r,   )NNNNr=   )NN)NNNNNNNNNNN)r   r   r   r!   _checkpoint_conversion_mappingaccepts_loss_kwargsr   r    r$   r?   rJ   rM   r6   r9   Tensorr8   r   r   r   r7   r   r   r   r   r   r   r   r0   r   __classcell__rH   s   @r-   r;   r;   U   s   %'":8
 .22626.2|7##d*|7 ((4/|7 ((4/	|7
 t+|7 
u||U\\)	*|7|  37".. ((4/ +,	
 
+	+  8  37'' ((4/ +,	
 
+	+  0 4837(6##(6 (((6 ))D0	(6
 ))D0(6T  .2.204(,26,08<2626/326g
##d*g
 t+g
 &&-	g

 g
 ((4/g
 llT)g
 #..5g
 ((4/g
 ((4/g
 %%,g
 ((4/g
 +,g
 
*	*g
  g
r,   r;   zR
    Base class for Glm46V causal language model (or autoregressive) outputs.
    c                       e Zd ZU dZdZej                  dz  ed<   dZej                  dz  ed<   dZ	e
dz  ed<   dZeej                     dz  ed<   dZeej                     dz  ed<   dZej                  dz  ed<   y)	Glm46VCausalLMOutputWithPasta  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction).
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
        The rope index difference between sequence length and multimodal rope.
    Nlosslogitsr   r2   r3   r4   )r   r   r   r5   r   r6   r7   r    r   r   r   r2   r8   r3   r4   r9   r+   r,   r-   r   r     s     &*D%

d
")'+FE$+$(OUT\(59M5**+d2926Je''(4/6+/K!!D(/r,   r   c                        e Zd Zi ZddiZdZ fdZd Zd Ze		 dde
j                  d	e
j                  dz  d
ee   deez  fd       Ze		 dde
j                  de
j                  dz  d
ee   deez  fd       Zee		 	 	 	 	 	 	 	 	 	 	 	 dde
j                  dz  de
j(                  dz  de
j                  dz  dedz  de
j                  dz  de
j                  dz  de
j(                  dz  de
j                  dz  de
j                  dz  d	e
j                  dz  de
j                  dz  dee
j(                  z  d
ee   deez  fd              Z	 	 	 	 	 	 	 	 	 	 	 d  fd	Z	 dde
j                  dz  de
j(                  dz  dee
j(                  e
j(                  f   fdZ	 	 	 d!dedede
j                  dz  dee
j                  eeef   f   fdZ xZ S )"Glm46VForConditionalGenerationzlm_head.weightz(model.language_model.embed_tokens.weightFc                     t         |   |       t        |      | _        t	        j
                  |j                  j                  |j                  j                  d      | _	        | j                          y )NF)bias)r>   r?   r;   r   nnLinearrC   hidden_size
vocab_sizelm_headrE   rF   s     r-   r?   z'Glm46VForConditionalGeneration.__init__  sS      (
yy!3!3!?!?ASASA^A^ejkr,   c                 6    | j                   j                         S r=   )r   rJ   rK   s    r-   rJ   z3Glm46VForConditionalGeneration.get_input_embeddings  s    zz..00r,   c                 :    | j                   j                  |       y r=   )r   rM   rN   s     r-   rM   z3Glm46VForConditionalGeneration.set_input_embeddings"  s    

''.r,   Nr   rR   r   rT   c                 @     | j                   j                  d||d|S )r   )r   rR   r+   )r   r   )rG   r   rR   r   s       r-   r   z1Glm46VForConditionalGeneration.get_video_features%  s/     -tzz,, 
 3N
V\
 	
r,   r   rQ   c                 @     | j                   j                  d||d|S )r   )r   rQ   r+   )r   r   )rG   r   rQ   r   s       r-   r   z1Glm46VForConditionalGeneration.get_image_features6  s'     -tzz,,p,Wepioppr,   rP   rS   r   r   r   labelsr   logits_to_keepc                     | j                   d||||	|
|||||d
|}|d   }t        |t              rt        | d      n|}| j	                  |dd|ddf         }d}|2| j                  ||| j                  j                  j                        }t        |||j                  |j                  |j                  |j                        S )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
        image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
            The temporal, height and width of feature shape of each image in LLM.
        video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
            The temporal, height and width of feature shape of each video in LLM.

        Example:

        ```python
        >>> from PIL import Image
        >>> import httpx
        >>> from io import BytesIO
        >>> from transformers import AutoProcessor, Glm46VForConditionalGeneration

        >>> model = Glm46VForConditionalGeneration.from_pretrained("zai-org/GLM-4.1V-9B-Thinking")
        >>> processor = AutoProcessor.from_pretrained("zai-org/GLM-4.1V-9B-Thinking")

        >>> messages = [
            {
                "role": "user",
                "content": [
                    {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
                    {"type": "text", "text": "What is shown in this image?"},
                ],
            },
        ]
        >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
        >>> with httpx.stream("GET", url) as response:
        ...     image = Image.open(BytesIO(response.read()))

        >>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        >>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..."
        ```)
rP   r   r   rQ   rR   r   rS   r   r   r   r   N)r   r   r   )r   r   r   r2   r3   r4   r+   )r   r   r   slicer   loss_functionr   rC   r   r   r   r2   r3   r4   )rG   rP   rS   r   r   r   r   r   r   rQ   rR   r   r   r   r   r2   slice_indicesr   r   s                      r-   r   z&Glm46VForConditionalGeneration.forwardE  s    x $** 
% 3))%)+')
 
  
 9C>SV8W~ot4]kmA}a,?@A%%VFt{{OfOfOqOq%rD+#33!//))++
 	
r,   c                 f    t        |   |f|||||||	|
|||d|}d |d<   |s|r
d |d<   d |d<   |S )N)r   rS   r   r   r   r   r   rQ   rR   	use_cacheis_first_iterationr   r   r   )r>   prepare_inputs_for_generation)rG   rP   r   rS   r   r   r   r  r   r   rQ   rR   r  r   model_inputsrH   s                  r-   r  z<Glm46VForConditionalGeneration.prepare_inputs_for_generation  ss    $ w<
+)')%% 3))1
 
" (,^$!i+/L(26L./r,   c                    || | j                         t        j                  | j                  j                  t        j
                  |j                              k(  d   }| | j                         t        j                  | j                  j                  t        j
                  |j                              k(  d   }| | j                         t        j                  | j                  j                  t        j
                  |j                              k(  d   }nK|| j                  j                  k(  }|| j                  j                  k(  }|| j                  j                  k(  }t        j                  |j                         |j                         z
  d      }|dkD  }|| z  }|j                  d      }	|j                  d      }
|	|
fS )aa  
        Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
        These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.

        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary.

        Returns:
            image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
            video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
        rV   ).r   r   r]   r   )rJ   r6   rx   r   image_start_token_idr|   rX   rc   rd   r}   r   r   )rG   rP   r   is_imageis_video_startis_video_endvideo_levelinside_videostandalone_imagesimage_countsvideo_countss              r-   _get_image_nums_and_video_numsz=Glm46VForConditionalGeneration._get_image_nums_and_video_nums  s   $ $.4,,.LL!A!A\i\p\pq H .4,,.LL!A!A\i\p\pq N .4,,.LL!?!?uzzZgZnZno L !DKK$D$DDH&$++*J*JJN$(F(FFL ll>#5#5#7,:J:J:L#LRST"Q %6 ),,,3%))a)0\))r,   expand_sizeis_encoder_decoderc                      dk(  rfS g d fd}fd} |      j                  d       |      |r*j                  d      t        d       |d         d<   fS )	Nr   )r   rQ   r   rR   second_per_grid_tsc                 4   j                  dd       }j                  dd       }j                  j                  dd             \  }}d }| D ]9  }|dk(  rct        j                  |t	        |            }|D cg c]'  }t        j
                  |d      j                         ) }	} || |   |	
	      | |<   l|dk(  rt	        |      }	 || |   |	
	      | |<   |d
k(  rct        j                  |t	        |            }|D cg c]'  }t        j
                  |d      j                         ) }	} || |   |	
	      | |<   |dk(  rt	        |      }	 || |   |	
	      | |<   |dk(  s  || |   t	        |      
	      | |<   < | S c c}w c c}w )NrQ   rR   r   )r   c                     t        j                  | |      }|gdg| j                         dz
  z  z   }t        j                  |D cg c]  } |j                  |  c}d      }|S c c}w )Nr   r   r]   )r6   r   r^   ry   r   )rZ   lengthsrepeat_timessamplesrepeat_argssampleresults          r-   _repeat_interleave_sampleszGlm46VForConditionalGeneration._expand_inputs_for_generation.<locals>._expand_dict_for_generation_visual.<locals>._repeat_interleave_samples  sa    ++a1+nsaeegk/BBg#VFMFMM;$?#V\]^ $Ws   A&r   r   r]   )r  r  r   r  )getr  r6   r   rn   r   r   )dict_to_expandrQ   rR   
image_nums
video_numsr   r   r  r  r  r  rP   model_kwargsrG   s             r-   "_expand_dict_for_generation_visualzhGlm46VForConditionalGeneration._expand_inputs_for_generation.<locals>._expand_dict_for_generation_visual  s   )--.>EN)--.>EN%)%H%H)9)9/4)P &I &"J
 & .(#kk.$z:JKGMTU6uzz&a8<<>UGU*D&s+W;+N3' ,,":.G*D&s+W;+N3' 11#kk.$z:JKGMTU6uzz&a8<<>UGU*D&s+W;+N3' ,,":.G*D&s+W;+N3' 00*D&s+T*5ET_+N3'7< "!3 V Vs   =,F,Fc                     | D ]J  }|dk7  s	| |   t        | |   t        j                        s-|vs2| |   j                  d      | |<   L | S )Nr   r   r]   )r   r6   r   r   )r"  r   r  visual_keyss     r-   _expand_dict_for_generationzaGlm46VForConditionalGeneration._expand_inputs_for_generation.<locals>._expand_dict_for_generationC  sl    % d++&s+7">##6E;.*8*=*O*OP[ab*O*cN3'd "!r,   r   r]   encoder_outputszMIf `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.)r   r!  r   )rG   r  r  rP   r%  r&  r)  r(  s   `` ``  @r-   _expand_inputs_for_generationz<Glm46VForConditionalGeneration._expand_inputs_for_generation  s     !l**w+	"Z		" :,G !33KQ3GI2<@ 12: !pqq.I,WhJi.jL*+,&&r,   r=   )NNNNNNNNNNNr   )NNNNNTNNNNF)r   FN)!r   r   r   r   _tied_weights_keysr   r?   rJ   rM   r   r6   r7   r9   r   r   r8   r   r   r   r   r   r   r   r   r   r  r  boolr   strr   r+  r   r   s   @r-   r   r     s   %'"*,VW1/  37
"..
 ((4/
 +,	

 
+	+
 
   37q''q ((4/q +,	q
 
+	+q q  .2.204(,26*.,08<262626-.Y
##d*Y
 t+Y
 &&-	Y

 Y
 ((4/Y
   4'Y
 llT)Y
 #..5Y
 ((4/Y
 ((4/Y
 ((4/Y
 ell*Y
 +,Y
 
-	-Y
  Y
|   )\ .26*##d*6* ||d*6* 
u||U\\)	*	6*t #(-1	U'U' !U' ##d*	U' 
uc3h/	0U'r,   r   )r;   r   r   )#rl   dataclassesr   typingr   r6   torch.nnr   cache_utilsr   
generationr   modeling_outputsr   r	   modeling_utilsr
   processing_utilsr   utilsr   r   r   r   r   autor   configuration_glm46vr   r   r0   r;   r   r   __all__r+   r,   r-   <module>r;     s   ,  !      ) G - &   . O   
0 0 0$ \
' \
 \
~ 
0; 0 0.H'%:O H'V
 Ur,   