
    Z jk                        S r SSKJr  SSKrSSKJr  SSKJr  SSKJ	r	J
r
  SSKJrJr  SS	KJr  SS
KJr  SSKJrJrJrJr  SSKJrJr  SSKJr  SSKJrJrJrJ r J!r!  SSK"J#r#  SSK$J%r%  SSK&J'r'J(r(J)r)J*r*  SSK+J,r,  SSK-J.r.J/r/J0r0  SSK1J2r2  \!Rf                  " \45      r5\ " S S\5      5       r6 " S S\Rn                  5      r8 " S S\,5      r9 " S S\(5      r: " S S \)5      r; " S! S"\'5      r< " S# S$\Rn                  5      r= " S% S&\5      r> " S' S(\65      r? " S) S*\5      r@ " S+ S,\65      rA\" S-S.9 " S/ S0\65      5       rB\" S1S.9 " S2 S3\6\25      5       rC/ S4QrDg)5zPyTorch Dia model.    )CallableN)nn   )initialization)DynamicCacheEncoderDecoderCache)create_bidirectional_maskcreate_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentionsSeq2SeqLMOutputSeq2SeqModelOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tupleis_torchdynamo_compilinglogging)merge_with_config_defaults)capture_outputs   )LlamaAttentionLlamaRMSNormLlamaRotaryEmbeddingeager_attention_forward)Phi3MLP   )	DiaConfigDiaDecoderConfigDiaEncoderConfig)DiaGenerationMixinc                   X   ^  \ rS rSr% \\S'   SrSrSrSr	Sr
SrSrSS/rU 4S jrS	rU =r$ )
DiaPreTrainedModel3   configmodelT	input_idsDiaEncoderLayerDiaDecoderLayerc                 .  > [         TU ]  U5        [        U[        5      (       ap  [        R
                  " U R                  R                  [        R                  S9U R                  R                  -  n[        R                  " UR                  U5        g g )Ndtype)super_init_weights
isinstanceDiaMultiChannelEmbeddingtorcharanger)   num_channelslong
vocab_sizeinitcopy_offsets)selfmoduler<   	__class__s      t/root/GenerationalWealth/GenerationalWealth/venv/lib/python3.13/site-packages/transformers/models/dia/modular_dia.pyr2    DiaPreTrainedModel._init_weights?   se    f%f677ll4;;#;#;5::NQUQ\Q\QgQggGJJv~~w/ 8     )__name__
__module____qualname____firstlineno__r"   __annotations__base_model_prefixsupports_gradient_checkpointing_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraphmain_input_name_no_split_modulesr2   __static_attributes____classcell__r?   s   @r@   r'   r'   3   sG    &*#N!!O*,=>0 0rB   r'   c                   n   ^  \ rS rSrSrS\4U 4S jjrS\R                  S\R                  4S jr	Sr
U =r$ )	r4   F   a  In order to efficiently compute the audio embedding from the 9 different channels,
we vectorize the embedding process by using a single embedding layer and an offset.
Example:
- num_embeds = 4
- vocab_size = 8
- num_channels = 3
We would have offsets = [0, 8, 16]
If audio_codes = [0, 1, 2, 3], [1, 3, 4, 7], [5, 6, 7, 8],
then tokens = audio_codes + offsets
            = [0, 1, 2, 3, 9, 11, 12, 15, 21, 22, 23, 24]
This allows us to use a single embedding layer for all channels.
r)   c                 v  > [         TU ]  5         [        R                  " UR                  UR
                  -  UR                  5      U l        UR                  U l        UR
                  U l        [        R                  " UR
                  [        R                  S9UR                  -  nU R                  SUSS9  g )Nr/   r<   F)
persistent)r1   __init__r   	Embeddingr9   r7   hidden_sizeembedr5   r6   r8   register_buffer)r=   r)   r<   r?   s      r@   rX   !DiaMultiChannelEmbedding.__init__T   s    \\&"3"3f6I6I"I6K]K]^
!--"//,,v22%**EHYHYYYEBrB   audio_codesreturnc                    XR                   R                  UR                  5      -   R                  S5      nU R	                  U5      R                  UR                  S   UR                  S   SU R                  5      nUR                  SS9$ )Nr!   r   r   )dim)	r<   todevicesqueezer[   viewshaperZ   sum)r=   r^   tokensembedss       r@   forward DiaMultiChannelEmbedding.forward\   ss    0B0B CCLLQOF#((a+:K:KA:NPRTXTdTdezzaz  rB   )r[   rZ   r7   )rD   rE   rF   rG   __doc__r#   rX   r5   Tensorrk   rQ   rR   rS   s   @r@   r4   r4   F   s7    C/ C!5<< !ELL ! !rB   r4   c                       \ rS rSrSrg)DiaMLPb   rC   NrD   rE   rF   rG   rQ   rC   rB   r@   rp   rp   b       rB   rp   c                       \ rS rSrSrg)
DiaRMSNormf   rC   Nrr   rC   rB   r@   ru   ru   f   rs   rB   ru   c                       \ rS rSrSrg)DiaRotaryEmbeddingj   rC   Nrr   rC   rB   r@   rx   rx   j   rs   rB   rx   c                   8    \ rS rSrSrS	S\\-  S\S\4S jjr	Sr
g)
DiaSelfAttentionn   =Multi-headed attention from 'Attention Is All You Need' paperr)   	layer_idx	is_causalc                    [         R                  R                  U 5        Xl        X l        UR
                  U l        U R                  R                  U l        U R                  R                  =(       d    U R                  U l        U R                  U R                  -  U l	        [        USUR
                  U R                  -  5      U l        SU l        SU l        X0l        [         R                  " U R
                  U R                  U R                  -  SS9U l        [         R                  " U R
                  U R                  U R                  -  SS9U l        [         R                  " U R
                  U R                  U R                  -  SS9U l        [         R                  " U R                  U R                  -  U R
                  SS9U l        g )Nhead_dimr!           Fbias)r   ModulerX   r)   r~   rZ   num_attention_heads	num_headsnum_key_value_headsnum_key_value_groupsgetattrr   scalingattention_dropoutr   Linearq_projk_projv_projo_proj)r=   r)   r~   r   s       r@   rX   DiaSelfAttention.__init__q   sF   
		4 "!--88#';;#B#B#Tdnn $(NNd6N6N$N!
F4F4F$..4XY!$"ii 0 0$..4==2PW\]ii 0 0$2J2JT]]2Zafgii 0 0$2J2JT]]2Zafgii >@P@PW\]rB   )r   r)   r   rZ   r   r   r~   r   r   r   r   r   r   r   N)F)rD   rE   rF   rG   rm   r$   r#   intboolrX   rQ   rC   rB   r@   r{   r{   n   s1    G^/2BB ^s ^_c ^ ^rB   r{   c                      ^  \ rS rSrSrS\S\4U 4S jjr  SS\R                  S\R                  S	\R                  S-  S
\
S-  S\\   S\\R                  \R                  S-  4   4S jjrSrU =r$ )DiaCrossAttention   r}   r)   r~   c                 R  > [         TU ]  5         Xl        X l        UR                  U l        UR
                  U l        U R                  R                  U l        U R                  R                  U l	        U R                  U R                  -  U l
        UR                  U l        SU l        SU l        SU l        [         R"                  " U R                  U R                  U R                  -  SS9U l        [         R"                  " U R
                  U R                  U R                  -  SS9U l        [         R"                  " U R
                  U R                  U R                  -  SS9U l        [         R"                  " U R                  U R                  -  U R                  SS9U l        g )Nr!   r   Fr   )r1   rX   r)   r~   rZ   cross_hidden_sizecross_num_attention_headsr   cross_num_key_value_headsr   r   cross_head_dimr   r   r   r   r   r   r   r   r   r   r=   r)   r~   r?   s      r@   rX   DiaCrossAttention.__init__   s;   "!--!'!9!9>>#';;#H#H $(NNd6N6N$N!--!$ii 0 0$..4==2PW\]ii 6 68P8PSWS`S`8`glmii 6 68P8PSWS`S`8`glmii >@P@PW\]rB   Nhidden_statescross_attention_statesattention_maskpast_key_valueskwargsr_   c                 R   UR                   S S n/ UQSPU R                  P7n/ UR                   S S QSPU R                  P7nU R                  U5      R                  U5      R	                  SS5      n	Ub%  UR
                  R                  U R                  5      OSn
Ubb  U
(       a[  UR                  R                  U R                     R                  nUR                  R                  U R                     R                  nOU R                  U5      R                  U5      R	                  SS5      nU R                  U5      R                  U5      R	                  SS5      nUbB  UR                  R                  UUU R                  5      u  pSUR
                  U R                  '   [        R                   " U R"                  R$                  [&        5      nU" U U	UUU4SU R(                  0UD6u  pUR+                  / UQSP75      R-                  5       nU R/                  U5      nX4$ )Nra   r!   r   FTr   )rg   r   r   rf   	transpose
is_updatedgetr~   cross_attention_cachelayerskeysvaluesr   r   updater   get_interfacer)   _attn_implementationr   r   reshape
contiguousr   )r=   r   r   r   r   r   input_shapehidden_shapecross_shapequery_statesr   
key_statesvalue_statesattention_interfaceattn_outputattn_weightss                   r@   rk   DiaCrossAttention.forward   s    $))#2.88b8$--8M.44Sb9M2Mt}}M{{=166|DNNqRSTGVGb_//33DNNChm
&:(>>EEdnnUZZJ*@@GGW^^L%;<AA+NXXYZ\]^J;;'=>CCKPZZ[\^_`L*+:+P+P+W+W NN,(
 >B**4>>:(?(M(MKK,,.E)
 %8%
 LL%
 %
! "))*<K*<*<=HHJkk+.((rB   )r   r)   r   r   rZ   r   r   r~   r   r   r   r   r   r   r   NN)rD   rE   rF   rG   rm   r#   r   rX   r5   rn   r   r   r   tuplerk   rQ   rR   rS   s   @r@   r   r      s    G^/ ^C ^. /36:1)||1) !&1) t+	1)
 -t31) -.1) 
u||U\\D00	11) 1)rB   r   c                      ^  \ rS rSrS\S\4U 4S jjr  SS\R                  S\	\R                  \R                  4   S-  S\R                  S-  S	\
\   S
\	\R                  \R                  S-  4   4
S jjrSrU =r$ )r,      r)   r~   c                    > [         TU ]  5         [        UR                  UR                  S9U l        [        XSS9U l        [        UR                  UR                  S9U l        [        U5      U l
        g )NepsFr   )r1   rX   ru   rZ   norm_epspre_sa_normr{   self_attentionpost_sa_normrp   mlpr   s      r@   rX   DiaEncoderLayer.__init__   sZ    %f&8&8fooN.vER&v'9'9vO&>rB   Nr   position_embeddingsr   r   r_   c                     UnU R                  U5      nU R                  " U4UUS.UD6u  pxXW-   nUnU R                  U5      nU R                  U5      n	XY-   nU$ )N)r   r   )r   r   r   r   )
r=   r   r   r   r   residualnormed_statesself_attn_output_mlp_outs
             r@   rk   DiaEncoderLayer.forward   s     !((7"11
 3)
 	
 !3 ))-8((=) *rB   )r   r   r   r   r   )rD   rE   rF   rG   r$   r   rX   r5   rn   r   r   r   rk   rQ   rR   rS   s   @r@   r,   r,      s    "/ "C " IM.2	|| #5<<#=>E t+	
 -. 
u||U\\D00	1 rB   r,   c                      ^  \ rS rSr\\S.rS\4U 4S jjr\	\
\ SS\R                  S\R                  S-  S\\   S	\4S
 jj5       5       5       rSrU =r$ )
DiaEncoder   r   
attentionsr)   c           	        > [         TU ]  U5        Xl        [        R                  " UR
                  UR                  5      U l        [        R                  " [        UR                  5       Vs/ s H  n[        X5      PM     sn5      U l        [        UR                  UR                  S9U l        [!        US9U l        U R%                  5         g s  snf Nr   r)   )r1   rX   r)   r   rY   r9   rZ   	embedding
ModuleListrangenum_hidden_layersr,   r   ru   r   normrx   
rotary_emb	post_initr   s      r@   rX   DiaEncoder.__init__   s     f&7&79K9KLmmAFvG_G_A`aA`I_V/A`a
 v11vG	,F; bs   .CNr+   r   r   r_   c                 F   U R                  U5      n[        R                  " UR                  S   UR                  S9S S S 24   n[        U R                  UUS9nU R                  XES9nU R                   H  nU" U4UUUS.UD6nM     U R                  U5      n[        US9$ )Nra   rd   )r)   inputs_embedsr   position_ids)r   r   r   )last_hidden_state)r   r5   r6   rg   rd   r	   r)   r   r   r   r   )r=   r+   r   r   r   r   r   encoder_layers           r@   rk   DiaEncoder.forward  s     y1
 ||IOOB$7	@P@PQRVXYRYZ2;;')

 #oomoW![[M)-)$7	
 M ) 		-0??rB   )r)   r   r   r   r   N)rD   rE   rF   rG   r,   r{   _can_record_outputsr$   rX   r   r   r   r5   rn   r   r   r   rk   rQ   rR   rS   s   @r@   r   r      s    (&
/    /3@<<@ t+@ +,	@
 
@    @rB   r   c                   \  ^  \ rS rSrS\S\4U 4S jjr     SS\R                  S\	\R                  \R                  4   S-  S\R                  S-  S	\R                  S-  S
\R                  S-  S\
S-  S\	\R                  \R                  S-  \R                  S-  4   4S jjrSrU =r$ )r-   i'  r)   r~   c                 t  > [         TU ]  5         UR                  U l        [	        XSS9U l        [        X5      U l        [        UR                  UR                  S9U l
        [        UR                  UR                  S9U l        [        UR                  UR                  S9U l        [        U5      U l        g )NTr   r   )r1   rX   rZ   	embed_dimr{   r   r   cross_attentionru   r   r   pre_ca_normpre_mlp_normrp   r   r   s      r@   rX   DiaDecoderLayer.__init__(  s    ++.vDQ0C%f&8&8fooN%f&8&8fooN&v'9'9vO&>rB   Nr   r   r   encoder_hidden_statesencoder_attention_maskr   r_   c                 Z   Un[        U[        5      (       a  UR                  nUn	U R                  U5      n
U R                  " U
UUU40 UD6u  pX-   nUn	U R                  U5      n
U R                  " U
U4UUS.UD6u  pX-   nUn	U R                  U5      n
U R                  U
5      nX-   nU$ )N)r   r   )	r3   r   self_attention_cacher   r   r   r   r   r   )r=   r   r   r   r   r   r   r   self_attn_cacher   r   r   r   cross_statesr   s                  r@   rk   DiaDecoderLayer.forward2  s     *o':;;-BBO ((7"11 
 
 !3 ((7..!
 2+	

 
 !/ ))-8((=) *rB   )r   r   r   r   r   r   r   NNNNN)rD   rE   rF   rG   r#   r   rX   r5   rn   r   r   rk   rQ   rR   rS   s   @r@   r-   r-   '  s    "/ "C " IM.2596:6:+||+ #5<<#=>E+ t+	+
  %||d2+ !&t 3+ -t3+ 
u||U\\D0%,,2EE	F+ +rB   r-   c                   &  ^  \ rS rSrSr\\\/S.rS\	4U 4S jjr
\\\     SS\R                  S\R                   S-  S	\R                  S-  S
\R"                  S-  S\R                   S-  S\S-  S\\   S\\-  4S jj5       5       5       rSrU =r$ )
DiaDecoderi`  z-Transformer Decoder Stack using DenseGeneral.r   r)   c           	        > [         TU ]  U5        UR                  U l        UR                  U l        [	        U5      U l        [        R                  " [        UR                  5       Vs/ s H  n[        X5      PM     sn5      U l        [        UR                  UR                  S9U l        [!        US9U l        U R%                  5         g s  snf r   )r1   rX   r7   r9   r4   
embeddingsr   r   r   r   r-   r   ru   rZ   r   r   rx   r   r   r   s      r@   rX   DiaDecoder.__init__h  s     "// ++26:mmAFvG_G_A`aA`I_V/A`a
 v11vG	,F; bs   *CNr+   r   r   r   r   r   r   r_   c                 >   UR                  5       SS u  pUb  UR                  5       OSn
Uc2  [        R                  " XR                  S9U
-   nUR                  S5      nU R                  U5      nUc2  [        5       (       d#  X-   n[        R                  " XUR                  S9n[        U R                  UUUS9n[        U R                  UUUS9nU R                  XS9nU R                   H  nU" UUUU4UUUS.UD6nM     U R                  U5      n[        UUS	9$ )
z
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`):
    The original `decoder_input_ids` in 3D shape to facilitate more efficient computations.

    [What are input IDs?](../glossary#input-ids)
Nra   r   r   )r)   r   r   r   )r)   r   r   r   r   )r   r   r   )r   r   )sizeget_seq_lengthr5   r6   rd   	unsqueezer   r   onesr
   r)   r	   r   r   r   r   )r=   r+   r   r   r   r   r   r   
batch_size
seq_lengthpast_key_values_lengthr   mask_seq_lengthr   layers                  r@   rk   DiaDecoder.forwardu  sK   ( "+!1#2!6
ETE`!?!?!Afg <<
;K;KLOeeL'11!4L 	2!*B*D*D4AO"ZZ
IL\L\]N+;;')+	
 ";;;'1"7	"
 #oomoW[[E! $% (> /) M ! 		-08++
 	
rB   )r   r   r   r7   r   r9   r   )rD   rE   rF   rG   rm   r-   r{   r   r   r#   rX   r   r   r   r5   rn   
LongTensorFloatTensorr   r   r   r   r   rk   rQ   rR   rS   s   @r@   r   r   `  s    7 )'):;
/    15.2:>:>6:A
<<A
 &&-A
 t+	A

  %0047A
 !& 0 04 7A
 -t3A
 +,A
 
3U	:A
    A
rB   r   z[
    The bare Dia model outputting raw hidden-states without any specific head on top.
    )custom_introc                   &  ^  \ rS rSrS\4U 4S jjr\\        SS\R                  S-  S\R                  S-  S\R                  S-  S\R                  S-  S	\R                  S-  S
\
\-  S-  S\S-  S\S-  S\\-  4S jj5       5       rSrU =r$ )DiaModeli  r)   c                    > [         TU ]  U5        Xl        [        UR                  5      U l        [        UR                  5      U l        U R                  5         g r   )
r1   rX   r)   r   encoder_configencoderr   decoder_configdecoderr   r=   r)   r?   s     r@   rX   DiaModel.__init__  sC     !&"7"78!&"7"78rB   Nr+   r   decoder_input_idsdecoder_position_idsdecoder_attention_maskencoder_outputsr   	use_cacher_   c	                    Uc  Uc  [        S5      eU R                  (       a/  U R                  (       a  U(       a  [        R	                  S5        SnU(       a1  Uc.  [        [        U R                  S9[        U R                  S95      nUc  U R                  " SUUS.U	D6nOK[        U[        5      (       d6  [        US   [        U5      S:  a  US   OS[        U5      S	:  a  US	   OSS
9nUS   R                  S   SU R                  R                  R                  pn
UcA  [        R                   " U
SU4U R                  R                  R"                  U R$                  S9nUR&                  S	:X  a"  UR)                  XU5      R+                  SS	5      nU R,                  " SUUUUS   UUUS.U	D6n[/        UR0                  UR2                  UR4                  UR6                  UR8                  US   UR4                  UR6                  S9$ )a  
decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)
or (batch_size, target_sequence_length, num_codebooks)`, *optional*):
    1. (batch_size * num_codebooks, target_sequence_length): corresponds to the general use case where
    the audio input codebooks are flattened into the batch dimension. This also aligns with the flat-
    tened audio logits which are used to calculate the loss.

    2. (batch_size, sequence_length, num_codebooks): corresponds to the internally used shape of
    Dia to calculate embeddings and subsequent steps more efficiently.

    If no `decoder_input_ids` are provided, it will create a tensor of `bos_token_id` with shape
    `(batch_size, 1, num_codebooks)`. Indices can be obtained using the [`DiaProcessor`]. See
    [`DiaProcessor.__call__`] for more details.

    [What are decoder input IDs?](../glossary#decoder-input-ids)
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
    Indices of positions of each input sequence tokens in the position embeddings.
    Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`.

    [What are position IDs?](../glossary#position-ids)
NzXYou should either provide text ids or the cached text encodings. Neither has been found.zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr   )r+   r   r   r!   r   )r   r   r   ra   )r   
fill_valuerd   )r+   r   r   r   r   r   r  )r   r   decoder_hidden_statesdecoder_attentionscross_attentionsencoder_last_hidden_stater   encoder_attentionsrC   )
ValueErroris_gradient_checkpointingtrainingloggerwarning_oncer   r   r)   r  r3   r   lenrg   r  r7   r5   fullbos_token_idrd   ndimr   r   r  r   r   r   r   r   r  )r=   r+   r   r  r  r  r  r   r  r   bszseq_lenchannelsdecoder_outputss                 r@   rk   DiaModel.forward  s   H !8j  ))dmm##p "	01,dkk2RT`hlhshsTtuO""ll #- O O_==-"1!"4474H14Loa0RV14_1E1I?1-tO #2!"4":":1"=r4;;C]C]CjCjh$ %

1h'DKK4N4N4[4[dhdodo! !!Q& 1 9 9# Q [ [\]_` a,, 	
'-1"1!"4#1+	
 	
 "-??+;;"1"?"?.99,==&5a&8"1"?"?.99	
 		
rB   )r)   r  r  )NNNNNNNN)rD   rE   rF   rG   r"   rX   r   r   r5   r
  r   r   r   r   r   rk   rQ   rR   rS   s   @r@   r  r    s    y   .226598<:>:>6:!%]
##d*]
 ((4/]
 !++d2	]

 $..5]
 !& 0 04 7]
 )5047]
 -t3]
 $;]
 
#	#]
  ]
rB   r  zl
    The Dia model consisting of a (byte) text encoder and audio decoder with a prediction head on top.
    c                   N  ^  \ rS rSrSrSrS\4U 4S jjr\\	         SS\
R                  S-  S\
R                  S-  S	\
R                  S-  S
\
R                  S-  S\
R                  S-  S\\-  S-  S\S-  S\S-  S\
R                  S-  S\\-  4S jj5       5       rSrU =r$ )DiaForConditionalGenerationi+  r*   )audior)   c                 v  > [         TU ]  U5        Xl        [        U5      U l        UR
                  R                  U l        UR
                  R                  U l        [        R                  " UR
                  R                  U R                  U R                  -  SS9U l        SU l        U R                  5         g )NFr   ForMaskedLM)r1   rX   r)   r  r*   r  r7   r9   r   r   rZ   logits_dense	loss_typer   r  s     r@   rX   $DiaForConditionalGeneration.__init__4  s     f%
"11>> //::II!!--0A0ADOO0S[`
 ' 	rB   Nr+   r   r  r  r  r  r   r  labelsr_   c
                 P   U R                   " S	UUUUUUUUS.U
D6nUS   nUR                  S   nU R                  U5      R                  USU R                  U R
                  45      R                  SS5      R                  5       R                  XR                  -  SU R
                  5      nSnU	b  U R                  " S	XU R
                  S.U
D6n[        UUUR                  UR                  UR                  UR                  UR                  UR                  UR                   S9	$ )
a   
decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)
or (batch_size, target_sequence_length, num_codebooks)`, *optional*):
    1. (batch_size * num_codebooks, target_sequence_length): corresponds to the general use case where
    the audio input codebooks are flattened into the batch dimension. This also aligns with the flat-
    tened audio logits which are used to calculate the loss.

    2. (batch_size, sequence_length, num_codebooks): corresponds to the internally used shape of
    Dia to calculate embeddings and subsequent steps more efficiently.

    If no `decoder_input_ids` are provided, it will create a tensor of `bos_token_id` with shape
    `(batch_size, 1, num_codebooks)`. Indices can be obtained using the [`DiaProcessor`]. See
    [`DiaProcessor.__call__`] for more details.

    [What are decoder input IDs?](../glossary#decoder-input-ids)
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
    Indices of positions of each input sequence tokens in the position embeddings.
    Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`.

    [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size * num_codebooks,)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in
    `[0, ..., config.decoder_config.vocab_size - 1]` or -100. Tokens with indices set to `-100`
    are ignored (masked).
)r+   r   r  r  r  r  r   r  r   ra   r!   r   N)logitsr8  r9   )	lossr:  r   r  r  r  r   r   r!  rC   )r*   rg   r5  rf   r7   r9   r   r   loss_functionr   r   r  r  r  r   r   r!  )r=   r+   r   r  r  r  r  r   r  r8  r   outputsr   r  audio_logitsr;  s                   r@   rk   #DiaForConditionalGeneration.forwardC  s7   R ** 

)/!5#9++

 

 $AJ&,,Q/
 /0T:r4#4#4dooFGYq!_Z\T*000"dooF 	 %%o\UYUdUdohnoD#33")"?"?&99$55&-&G&G")"?"?&99

 
	
rB   )r)   r5  r6  r*   r7   r9   )	NNNNNNNNN)rD   rE   rF   rG   rI   output_modalitiesr"   rX   r   r   r5   r
  r   r   r   r   r   rk   rQ   rR   rS   s   @r@   r1  r1  +  s     "y   .226598<:>:>6:!%*.L
##d*L
 ((4/L
 !++d2	L

 $..5L
 !& 0 04 7L
 )5047L
 -t3L
 $;L
   4'L
 
	 L
  L
rB   r1  )r  r'   r1  )Erm   collections.abcr   r5   r    r   r:   cache_utilsr   r   masking_utilsr	   r
   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   utils.genericr   utils.output_capturingr   llama.modeling_llamar   r   r   r   phi3.modeling_phi3r    configuration_diar"   r#   r$   generation_diar%   
get_loggerrD   r%  r'   r   r4   rp   ru   rx   r{   r   r,   r   r-   r   r  r1  __all__rC   rB   r@   <module>rS     sw    $   & < J B 9  G & l l 7 5  ) L L . 
		H	% 0 0 0$!ryy !8	W 		 		- 	^~ ^,G)		 G)T0 B5@# 5@p60 6rY
# Y
x 
g
! g

g
T 
a
"46H a

a
H LrB   