
    ig                     r   d Z ddlmZ ddlmZmZ ddlZddlmZ ddlm	Z	 ddl
mZmZ dd	lmZ dd
lmZ ddlmZmZmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlmZ ddl m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z'm(Z( ddl)m*Z*  e$       rddl+m,Z, ddl-m.Z.  e%j^                  e0      Z1 G d dejd                        Z3d Z4d:dZ5 G d dejd                        Z6 G d dejd                        Z7dejp                  de9d ejp                  fd!Z:	 d;d"ejd                  d#ejp                  d$ejp                  d%ejp                  d&ejp                  dz  d'e;d(e;d)ee!   fd*Z< G d+ d,ejd                        Z= G d- d.e      Z>e" G d/ d0e             Z?e" G d1 d2e?             Z@ G d3 d4e?e      ZA G d5 d6ee?      ZB G d7 d8ee?      ZCg d9ZDy)<zPyTorch StableLM model.    )Callable)OptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter) GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging)is_flash_attention_requestedmaybe_autocast   )StableLmConfig)	BlockMask)make_flex_block_causal_maskc                        e Zd ZU ej                  ed<   ddef fdZe	 	 	 ddedz  de	d   de
dz  ded	ef   fd
       Z ej                         ed               Z xZS )StableLmRotaryEmbeddinginv_freqNconfigc                    t         |           |j                  | _        |j                  | _        || _        | j
                  j                  d   | _        | j                  }| j                  dk7  rt        | j                     } || j
                  |      \  }| _
        | j                  d|d       | j                  d|j                         d       y )N	rope_typedefaultr$   F)
persistentoriginal_inv_freq)super__init__max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr%   rope_parametersr'   compute_default_rope_parametersr   attention_scalingregister_bufferclone)selfr%   devicerope_init_fnr$   	__class__s        x/home/obispo/Crisostomo_bridge/mision_env/lib/python3.12/site-packages/transformers/models/stablelm/modeling_stablelm.pyr,   z StableLmRotaryEmbedding.__init__@   s    "("@"@$*$B$B!44[A!%!E!E>>Y&.t~~>L+7V+L($(ZeD0(..2BuU    r6   ztorch.deviceseq_lenreturnztorch.Tensorc                 n   | j                   d   }| j                   j                  dd      }t        | dd      xs | j                  | j                  z  }t        ||z        }d}d|t        j                  d|dt        j                        j                  |t        j                  	      |z  z  z  }||fS )
a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetapartial_rotary_factorg      ?head_dimNr      dtype)r6   rC   )r0   getgetattrhidden_sizenum_attention_headsinttorcharangeint64tofloat)	r%   r6   r;   baser?   r@   dimattention_factorr$   s	            r9   r1   z7StableLmRotaryEmbedding.compute_default_rope_parametersP   s    ( %%l3 & 6 6 : :;RTW X6:t4h8J8JfNhNh8h(223 U\\!S!5;;?BB&X]XcXcBdgjjk
 )))r:   c                 N   | j                   d d d d f   j                         j                  |j                  d   dd      j	                  |j
                        }|d d d d d f   j                         }t        |j
                  j                  t              r/|j
                  j                  dk7  r|j
                  j                  nd}t        |d      5  |j                         |j                         z  j                  dd      }t        j                  ||fd	      }|j                         | j                  z  }|j                         | j                  z  }	d d d        j	                  |j                   
      	j	                  |j                   
      fS # 1 sw Y   AxY w)Nr   r   mpscpuF)device_typeenabledrA   rO   rB   )r$   rM   expandshaperL   r6   
isinstancetypestrr   	transposerI   catcosr2   sinrC   )
r5   xposition_idsinv_freq_expandedposition_ids_expandedrU   freqsembr_   r`   s
             r9   forwardzStableLmRotaryEmbedding.forwardq   sR    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E!((--[`J`ahhmmfkUC 	5&,,.1F1L1L1NNYYZ[]^_E))UEN3C'')d444C'')d444C		5 vvAGGv$cff177f&;;;	5 	5s   BFF$N)NNN)__name__
__module____qualname__rI   Tensor__annotations__r   r,   staticmethodr   rH   tuplerM   r1   no_gradr   rg   __classcell__r8   s   @r9   r#   r#   =   s    llV~ V   )-+/"*%*(* t* 
~u$	%	* *> U]]_<  <r:   r#   c                     | dd| j                   d   dz  f   }| d| j                   d   dz  df   }t        j                  | |fd      S )z*Rotates half the hidden dims of the input..NrR   rA   rW   )rY   rI   r^   )ra   x1x2s      r9   rotate_halfrv      sZ    	
3"!''"+"""	#B	
3q ""	#B99rc2YB''r:   c                     |j                  |      }|j                  |      }| |z  t        |       |z  z   }||z  t        |      |z  z   }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerv   )qkr_   r`   unsqueeze_dimq_embedk_embeds          r9   apply_rotary_pos_embr~      sY    $ --
&C
--
&C3w;q>C/0G3w;q>C/0GGr:   c                   $     e Zd Z fdZd Z xZS )StableLmMLPc                    t         |           || _        |j                  | _        |j                  | _        t        j                  | j                  | j                  d      | _        t        j                  | j                  | j                  d      | _        t        j                  | j                  | j                  d      | _	        t        |j                     | _        y NFbias)r+   r,   r%   rF   intermediate_sizer   Linear	gate_projup_proj	down_projr   
hidden_actact_fnr5   r%   r8   s     r9   r,   zStableLmMLP.__init__   s    !--!'!9!94#3#3T5K5KRWXyy!1!143I3IPUV4#9#94;K;KRWXV../r:   c                     | j                  | j                  | j                  |            | j                  |      z        }|S rh   )r   r   r   r   )r5   ra   r   s      r9   rg   zStableLmMLP.forward   s6    NN4;;t~~a/@#ADLLQRO#ST	r:   )ri   rj   rk   r,   rg   rq   rr   s   @r9   r   r      s    0r:   r   c                   @     e Zd Zd fd	Zdej
                  fdZ xZS )StableLmLayerNormPerHeadc           
          t         |           || _        || _        t	        j
                  t        | j                        D cg c]  }t	        j                  |||       c}      | _        y c c}w )N)epsr   )	r+   r,   rO   	num_headsr   
ModuleListrange	LayerNormnorms)r5   rO   r   r   r   _r8   s         r9   r,   z!StableLmLayerNormPerHead.__init__   sT    "]]SXY]YgYgSh#iaBLL#D$I#ij
#is   A0hidden_statesc           	          t        j                  |dd      }t        j                  t        | j                  |      D cg c]  \  }} ||       c}}d      S c c}}w )Nr   rW   )rI   splitr^   zipr   )r5   r   states_per_headsnorms       r9   rg   z StableLmLayerNormPerHead.forward   sM     !;;}aQ?yyTZZYiIjk2E$$}-kqrssks   A
)gh㈵>F)ri   rj   rk   r,   rI   rl   rg   rq   rr   s   @r9   r   r      s    ktU\\ tr:   r   r   n_repr<   c                     | j                   \  }}}}|dk(  r| S | dddddddddf   j                  |||||      } | j                  |||z  ||      S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rY   rX   reshape)r   r   batchnum_key_value_headsslenr@   s         r9   	repeat_kvr      so    
 2?1D1D.Ehz!!Qa"23::5BUW\^bdlmM  (;e(CT8TTr:   modulequerykeyvalueattention_maskscalingdropoutkwargsc                 T   t        || j                        }t        || j                        }	t        j                  ||j	                  dd            |z  }
|#|d d d d d d d |j
                  d   f   }|
|z   }
t        j                  j                  |
dt        j                        j                  |j                        }
t        j                  j                  |
|| j                        }
t        j                  |
|	      }|j	                  dd      j                         }||
fS )NrA   r   rR   )rO   rC   )ptrainingr   )r   num_key_value_groupsrI   matmulr]   rY   r   
functionalsoftmaxfloat32rL   rC   r   r   
contiguous)r   r   r   r   r   r   r   r   
key_statesvalue_statesattn_weightscausal_maskattn_outputs                r9   eager_attention_forwardr      s    3 ; ;<JUF$?$?@L<<z';';Aq'ABWLL!$Q1.D
0@0@0D.D%DE#k1==((2U]](SVVW\WbWbcL==((6??([L,,|\:K''1-88:K$$r:   c                   j    e Zd ZdZddededz  f fdZ	 	 	 	 	 	 	 ddej                  dej                  dz  dej                  dz  d	e
dz  d
ededej                  dz  deej                  ej                  f   dz  deej                  ej                  dz  eej                     dz  f   fdZ xZS )StableLmAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNr%   	layer_idxc                    t         |           || _        || _        |-t        j                  d| j                  j                   d       |j                  | _        |j                  | _
        | j                  | j                  z  | _        |j                  | _        | j                  | j                  z  | _        t        | j                  |j                  d   z        | _        d| _        | j                  dz  | _        | j                  | j                  z  | j                  k7  r&t'        d| j                   d| j                   d      t)        j*                  | j                  | j                  | j                  z  |j,                  	      | _        t)        j*                  | j                  | j                  | j                  z  |j,                  	      | _        t)        j*                  | j                  | j                  | j                  z  |j,                  	      | _        t)        j*                  | j                  | j                  d
	      | _        |j6                  | _        | j6                  rbt9        | j                  | j                  |j:                        | _        t9        | j                  | j                  |j:                        | _        |j@                  | _         y )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.r?   Tg      z?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r   Fr   )!r+   r,   r%   r   loggerwarning_oncer8   ri   rF   rG   r   r@   r   r   rH   r0   rotary_ndims	is_causalr   
ValueErrorr   r   use_qkv_biasq_projk_projv_projo_projqk_layernormr   layer_norm_epsq_layernormk_layernormattention_dropoutr5   r%   r   r8   s      r9   r,   zStableLmAttention.__init__   sB   " !8!8 9 :, , "--33((DNN:#)#=#= $(NNd6N6N$N!0F0FG^0_ _`}}d*MMDNN*t/?/??QRVRbRbQc$T^^$4B8  ii 0 0$..4==2PW]WjWjkii 0 0$2J2JT]]2Zagatatuii 0 0$2J2JT]]2Zagatatuii 0 0$2B2BO"//7t~~[a[p[pqD7t77V=R=R D "(!9!9r:   r   r   rb   past_key_valuesoutput_attentions	use_cachecache_positionposition_embeddingsr<   c	                    |j                         \  }
}}| j                  |      }| j                  |      }| j                  |      }|j	                  |
|| j
                  | j                        j                  dd      }|j	                  |
|| j                  | j                        j                  dd      }|j	                  |
|| j                  | j                        j                  dd      }| j                  r"| j                  |      }| j                  |      }|\  }}|dd | j                  f   |d| j                  d f   }}|dd | j                  f   |d| j                  d f   }}t        ||||      \  }}t        j                  ||fd      }t        j                  ||fd      }|2||| j                  |d}|j!                  ||| j"                  |      \  }}t%        j&                  | j(                  j*                  t,              } || ||||f| j.                  sdn| j0                  | j2                  |d|	\  }}|j5                  |
|d      }| j7                  |      }||fS )	Nr   rA   .rR   rW   )r`   r_   partial_rotation_sizer           )r   r   rb   )sizer   r   r   viewr   r@   r]   r   r   r   r   r   r~   rI   r^   updater   r   get_interfacer%   _attn_implementationr   r   r   r   r   r   )r5   r   r   rb   r   r   r   r   r   r   bszq_lenr   query_statesr   r   r_   r`   	query_rot
query_passkey_rotkey_passcache_kwargsattention_interfacer   r   s                             r9   rg   zStableLmAttention.forward  s    &**,UA{{=1[[/
{{=1#((eT^^T]]S]]^_abc__S%1I1I4==Yccdeghi
#((eT5M5Mt}}]gghiklm++L9L))*5J&S1 1 1112d//112 	
 s/d////0sD--//0 
 2)Wc3O	7 yy)Z!8bAYY2;
& )-):):"0	L (7'='=j,X\XfXfht'u$J(?(M(MKK,,.E)
 %8
%
  $}}C$2H2HLL%
%
 
%
!\ "))#ub9kk+.L((r:   rh   NNNFFNN)ri   rj   rk   __doc__r   rH   r,   rI   rl   
LongTensorr	   boolro   rg   rq   rr   s   @r9   r   r      s   G&:~ &:#* &:V /304(,"'26HLG)||G) t+G) &&-	G)
 G)  G) G) ((4/G) #5<<#=>EG) 
u||U\\D0%2E2LL	MG)r:   r   c                   f    e Zd Zdedef fdZ	 	 	 	 	 	 	 ddej                  dej                  dz  dej                  dz  de	dz  d	e
dz  d
e
dz  dej                  dz  deej                  ej                  f   dz  deej                  eej                  ej                  f   dz  f   fdZ xZS )StableLmDecoderLayerr%   r   c                    t         |           |j                  | _        |j                  | _        t	        ||      | _        t        |      | _        t        j                  |j                  |j                        | _        d | _        | j                  s0t        j                  |j                  |j                        | _        t        j                  |j                        | _        y )N)r   r   )r+   r,   use_parallel_residualrF   r   	self_attnr   mlpr   r   r   input_layernormpost_attention_layernormDropouthidden_dropoutr   r   s      r9   r,   zStableLmDecoderLayer.__init__a  s    %+%A%A"!--*6YGv&!||F,>,>FDYDYZ(,%)),.LL9K9KQWQfQf,gD)zz&"7"78r:   Nr   r   rb   r   r   r   r   r   r<   c	           
      ^   |}	| j                  |      }| j                  ||||||||      \  }
}| j                  r+| j                  |      }| j	                  |      }|	|
z   |z   }n;|	|
z   }	| j                  | j                  |	            }| j	                  |      }|	|z   }|f}|r||fz  }|S )ao  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
                Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
                `[0, config.n_positions - 1]`.

                [What are position IDs?](../glossary#position-ids)
            past_key_values (`Cache`, *optional*):
                cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )r   r   rb   r   r   r   r   r   )r   r   r   r   r   r   )r5   r   r   rb   r   r   r   r   r   residualself_attn_outputself_attn_weights
mlp_outputoutputss                 r9   rg   zStableLmDecoderLayer.forwardm  s    H !,,]; /3nn')%+/) 3 /= 	/
++ %% -0Jj1J$'77*DM  "22H$"?"?"IJJj1J$z1M ")++Gr:   r   )ri   rj   rk   r   rH   r,   rI   rl   r   r	   r   ro   FloatTensorrg   rq   rr   s   @r9   r   r   `  s   
9~ 
9# 
9 /304(,).!&26HLI||I t+I &&-	I
 I  $;I $;I ((4/I #5<<#=>EI 
u  %(9(95;L;L(L"MPT"TT	UIr:   r   c                   6    e Zd ZU eed<   dZdZdgZdZdZ	dZ
dZy)StableLmPreTrainedModelr%   modelTr   r   N)ri   rj   rk   r   rm   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_can_compile_fullgraph r:   r9   r   r     s4    &*#/0"3N!r:   r   c                       e Zd ZdZdef fdZee	 	 	 	 	 	 	 	 	 ddej                  dz  dej                  dz  dej                  dz  dedz  d	ej                  dz  d
edz  dedz  dedz  dej                  dz  defd              Z	 ddeej                  df   dej                  dej                  dedef
dZedej                  dededej(                  dej                  defd       Z xZS )StableLmModelz
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`StableLmDecoderLayer`]

    Args:
        config: StableLmConfig
    r%   c           	      V   t         |   |       |j                  | _        |j                  | _        t        j                  |j                  |j                  | j                        | _        t        j                  t        |j                        D cg c]  }t        ||       c}      | _        t        j                  |j                  |j                        | _        |j"                  | _        d| _        t'        | j(                        | _        | j-                          y c c}w )Nr   Fr%   )r+   r,   pad_token_idpadding_idx
vocab_sizer   	EmbeddingrF   embed_tokensr   r   num_hidden_layersr   layersr   r   r   r   gradient_checkpointingr#   r%   
rotary_emb	post_initr   s      r9   r,   zStableLmModel.__init__  s     !.. ++LL):):F<N<NPTP`P`ammFKFLdLdFef!&)4f
 LL!3!39N9NO	$*$?$?!&+#1E 	 gs   D&N	input_idsr   rb   r   inputs_embedsr   r   output_hidden_statesr   r<   c
                 h   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|d u |d uz  rt	        d      | j
                  r%| j                  r|rt        j                  d       d}|r|t        | j                         }|| j                  |      }|	F||j                         nd}t        j                  |||j                  d   z   |j                        }	||	j!                  d      }| j#                  |||	||      }|}| j%                  ||      }|rd	nd }|rd	nd }| j&                  D ]+  }|r||fz  } ||||||||	|
      }|d   }|s#||d   fz  }- | j)                  |      }|r||fz  }t+        ||||      S )Nz:You must specify exactly one of input_ids or inputs_embedszZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr  r   r   r6   )rb   r  )r   rb   r   r   r   r   r   )last_hidden_stater   r   
attentions)r%   r   r  r   r   r  r   r   r   r
   r  get_seq_lengthrI   rJ   rY   r6   rx   _update_causal_maskr  r  r   r   )r5   r  r   rb   r   r  r   r   r  r   r   past_seen_tokensr   r   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                      r9   rg   zStableLmModel.forward  s    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==##p "	0*$++>O  --i8M!CRC^==?de"\\ "2]5H5H5K"KTaThThN )33A6L..M>?L]
 &"oom,oW #7BD0d![[ 	6M#!m%55!)*) /"3#-$7	M *!,M =#3"55%	6( 		-0  -!11&+++%	
 	
r:   r    input_tensorc           	         t        | j                        r||dk(  j                         r|S y | j                  j                  dk(  r't	        |t
        j                        rt        |      }|S ||j                         nd}||j                  nd}| j                  j                  dk(  r(|s&|s$t        j                  |||| j                        ry |j                  }|j                  d   }	|r|j                         }
n1t	        |t
        j                        r|j                  d   n||	z   dz   }
| j!                  ||	|
|||j                  d   	      }| j                  j                  dk(  rQ|O|j"                  j$                  d
v r7|s5t        j&                  |      j(                  }t        j*                  ||      }|S )Nr   flex_attentionr   Fsdpa)r  past_key_values_lengthis_trainingr   rR   )sequence_lengthtarget_lengthrC   r   
batch_size)cudaxpunpu)r   r%   anyr   rZ   rI   rl   r!   r  is_compileabler   _ignore_causal_mask_sdpar   rC   rY   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr6   r[   finfomin_unmask_unattended)r5   r   r$  r   r   r   r  using_compilable_cacherC   r*  r+  r   	min_dtypes                r9   r  z!StableLmModel._update_causal_mask:  s    (4)~/D.I.I.K%%;;++/??.%,,7!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCKQZ[Kr:   r*  r+  rC   r,  c                    | | j                         dk(  r| }|S t        j                  |      j                  }t        j                  ||f|||j
                        }|dk7  rt        j                  |d      }|t        j                  ||j
                        |j                  dd      kD  z  }|ddddddf   j                  |ddd      }| |j                         }| j                  d   }	|ddddddd|	f   | ddddddf   j                  |j
                        z   }
|
dk(  }
|ddddddd|	f   j                  |
|      |ddddddd|	f<   |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuerC   r6   r   )diagonalr  rR   r   )rO   rI   r5  r6  fullr6   triurJ   r   rX   r4   rY   rL   masked_fill)r   r*  r+  rC   r   r,  r   r   r9  mask_lengthpadding_masks              r9   r4  zCStableLmModel._prepare_4d_causal_attention_mask_with_cache_position~  s   > %.*<*<*>!*C(K* ' E*..I** -0Ye\j\q\qK !##jjqA5<<n>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c )6Aq!\k\12 r:   )	NNNNNNNNN)F)ri   rj   rk   r   r   r,   r   r   rI   r   rl   r	   r   r   r   rg   r   r  rn   rH   rC   r4  rq   rr   s   @r9   r	  r	    s   ~ $  .2.204(,26!%)-,026U
##d*U
 t+U
 &&-	U

 U
 ((4/U
 $;U
  $;U
 #TkU
 ((4/U
 
!U
  U
| #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r:   r	  c                   X    e Zd ZddiZ fdZee	 	 	 	 	 	 	 	 	 	 	 ddej                  dz  dej                  dz  dej                  dz  de
dz  d	ej                  dz  d
ej                  dz  dedz  dedz  dedz  dej                  dz  deej                  z  defd              Z xZS )StableLmForCausalLMzlm_head.weightzmodel.embed_tokens.weightc                     t         |   |       t        |      | _        |j                  | _        t        j                  |j                  |j                  d      | _        | j                          y r   )
r+   r,   r	  r   r  r   r   rF   lm_headr  r   s     r9   r,   zStableLmForCausalLM.__init__  sU     "6*
 ++yy!3!3V5F5FUS 	r:   Nr  r   rb   r   r  labelsr   r   r  r   logits_to_keepr<   c                    ||n| j                   j                  }|	|	n| j                   j                  }	| j                  ||||||||	|
	      }|j                  }t        |t              rt        | d      n|}| j                  |dd|ddf         }d}|* | j                  ||fd| j                   j                  i|}t        |||j                  |j                  |j                        S )ui  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, StableLmForCausalLM

        >>> model = StableLmForCausalLM.from_pretrained("adept/persimmon-8b-base")
        >>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base")

        >>> prompt = "human: Hey, what should I eat for dinner?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n'
        ```N)	r  r   rb   r   r  r   r   r  r   r  )losslogitsr   r   r  )r%   r   r  r   r  rZ   rH   slicerF  loss_functionr  r   r   r   r  )r5   r  r   rb   r   r  rG  r   r   r  r   rH  r   r   r   slice_indicesrK  rJ  s                     r9   rg   zStableLmForCausalLM.forward  s   R 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0::)%+'/!5) ,6 
,
  118B>SV8W~ot4]kmA}a,?@A%4%%  ;;11 	D &#33!//))
 	
r:   )NNNNNNNNNNr   )ri   rj   rk   _tied_weights_keysr,   r   r   rI   r   rl   r	   r   r   rH   r   rg   rq   rr   s   @r9   rD  rD    s0   *,GH  .2.204(,26*.!%)-,026-.L
##d*L
 t+L
 &&-	L

 L
 ((4/L
   4'L
 $;L
  $;L
 #TkL
 ((4/L
 ell*L
 
 L
  L
r:   rD  c                       e Zd Zy)!StableLmForSequenceClassificationNri   rj   rk   r  r:   r9   rQ  rQ        r:   rQ  c                       e Zd Zy)StableLmForTokenClassificationNrR  r  r:   r9   rU  rU    rS  r:   rU  )rD  r	  r   rQ  rU  )r   )r   )Er   collections.abcr   typingr   r   rI   r   activationsr   cache_utilsr	   r
   
generationr   modeling_attn_mask_utilsr   modeling_layersr   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   utils.genericr   r   configuration_stablelmr   !torch.nn.attention.flex_attentionr    integrations.flex_attentionr!   
get_loggerri   r   Moduler#   rv   r~   r   r   rl   rH   r   rM   r   r   r   r   r	  rD  rQ  rU  __all__r  r:   r9   <module>ri     s  &  $ "   ! . ) > 
 G & p p I 2  !;J 
		H	%A<bii A<J(4"))  tryy t 	UU\\ 	U# 	U%,, 	U( %II%<<% 
% <<	%
 LL4'% % % '(%4r)		 r)jV5 Vr "o " " n+ n nd\
1? \
~ h(HJa g b%BD[ ar:   