
    Z jxg                       S r SSKrSSKrSSKJr  SSKJr  SSKrSSK	r	SSK
Jr  SSK	Jr  SSKJr  SS	KJr  SS
KJr  SSKJr  SSKJr  SSKJr  SSKJr  SSKJr  SSKJ r J!r!J"r"J#r#J$r$J%r%  SSK&J'r'J(r(J)r)  SSK*J+r+  SSK,J-r-J.r.J/r/J0r0J1r1J2r2J3r3  SSK4J5r5  Sr6Sr7\3Rp                  " \95      r:Sr;\/" SS9\ " S S\-5      5       5       r<  SiS\=\>\>4   S\?S\>S \	R                  S-  S!\>S"\R                  4S# jjrBSjS$\=S%\>S&\R                  S-  4S' jjrC " S( S)\5      rD " S* S+\5      rE " S, S-\5      rF " S. S/\R                  5      rH " S0 S1\R                  5      rI " S2 S3\R                  5      rJ " S4 S5\R                  5      rK  SkS6\R                  S7\	R                  S8\	R                  S9\	R                  S \	R                  S-  S:\?S-  S;\?S<\+\.   4S= jjrM " S> S?\R                  5      rN " S@ SA\R                  5      rO " SB SC\5      rP " SD SE\5      rQ " SF SG\R                  5      rR " SH SI\R                  5      rS " SJ SK\R                  5      rT " SL SM\R                  5      rU " SN SO\R                  5      rV " SP SQ\R                  5      rW\/ " SR SS\(5      5       rX\/ " ST SU\X5      5       rY\/" SVS9 " SW SX\X5      5       rZ\/" SYS9 " SZ S[\X5      5       r[\/" S\S9 " S] S^\X5      5       r\\/ " S_ S`\X5      5       r] " Sa Sb\R                  5      r^ " Sc Sd\R                  5      r_\/" SeS9 " Sf Sg\X5      5       r`/ ShQrag)lzPyTorch Wav2Vec2 model.    N)Callable)	dataclass)	load_file)nn)CrossEntropyLoss   )initialization)ACT2FN)is_deepspeed_zero3_enabled)is_fsdp_managed_module)create_bidirectional_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputCausalLMOutputSequenceClassifierOutputTokenClassifierOutputWav2Vec2BaseModelOutputXVectorOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel*get_torch_context_manager_or_global_device)Unpack)ModelOutputTransformersKwargsauto_docstringcached_filecheck_torch_load_is_safeis_peft_availablelogging   )Wav2Vec2Configzadapter.{}.binzadapter.{}.safetensors   za
    Output type of [`Wav2Vec2ForPreTraining`], with potential hidden states and attentions.
    )custom_introc                   f   \ rS rSr% SrSr\R                  S-  \S'   Sr	\R                  S-  \S'   Sr
\R                  S-  \S'   Sr\R                  S-  \S'   Sr\\R                     S-  \S'   Sr\\R                     S-  \S	'   Sr\R                  S-  \S
'   Sr\R                  S-  \S'   Srg)Wav2Vec2ForPreTrainingOutputB   a  
loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
    Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
    paper](https://huggingface.co/papers/2006.11477).
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
    Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
    projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
    Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
    target vectors for contrastive loss.
codevector_perplexity (`torch.FloatTensor` of shape `(1,)`):
    The perplexity of the codevector distribution, used to measure the diversity of the codebook.
contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
    The contrastive loss (L_m) as stated in the [official paper](https://huggingface.co/papers/2006.11477).
diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
    The diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477).
Nlossprojected_statesprojected_quantized_statescodevector_perplexityhidden_states
attentionscontrastive_lossdiversity_loss )__name__
__module____qualname____firstlineno____doc__r(   torchFloatTensor__annotations__r)   r*   r+   r,   tupler-   r.   r/   __static_attributes__r0       /root/GenerationalWealth/GenerationalWealth/venv/lib/python3.13/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.pyr&   r&   B   s    $ &*D%

d
")15e''$.5;? 1 1D 8?6:5,,t3:59M5**+d2926Je''(4/615e''$.5/3NE%%,3r;   r&   shape	mask_probmask_lengthattention_mask	min_masksreturnc           	        ^^^^^ U u  nmTS:  a  [        S5      eTT:  a  [        ST ST S35      e[        R                  R                  S5      R	                  5       mUUUUU4S jnUb-  UR                  5       R                  S5      R                  5       O[        U5       Vs/ s H  nTPM     snn[        R                  " UT4[        S	9n	/ n
U" T5      nUS
:X  a  U	$ U H  nU" U5      n[        R                  R                  [        R                  " UTS-
  -
  5      USS9n[        U5      S
:X  a  TS-
  nOUS
   n[        R                  " U[        R                  " X-
  [        R                   S	9U-  /5      nU
R#                  U5        M     [        R$                  " U
5      n
[        R&                  " U
SS2SS2S4   X[T45      n
U
R)                  X[T-  5      n
[        R                  " T5      SSSS24   n[        R&                  " UX[T45      R)                  X[T-  5      nU
U-   n
U
R+                  5       TS-
  :  a  TS-
  XTS-
  :  '   [        R,                  " XSS5        U	$ s  snf )a2  
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.

Args:
    shape: The shape for which to compute masks. This should be of a tuple of size 2 where
           the first element is the batch size and the second element is the length of the axis to span.
    mask_prob:  The percentage of the whole axis (between 0 and 1) which will be masked. The number of
                independently generated mask spans of length `mask_length` is computed by
                `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
                actual percentage will be smaller.
    mask_length: size of the mask
    min_masks: minimum number of masked spans
    attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
                    each batch dimension.
r!   z&`mask_length` has to be bigger than 0.zO`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: z and `sequence_length`: `c                    > [        TU -  T-  T-   5      n[        UT5      nUT-  T:  a  TT-  nU TS-
  -
  U:  a  [        U TS-
  -
  S5      nU$ )z;Given input length, compute how many spans should be maskedr!   r   )intmax)input_lengthnum_masked_spanepsilonr?   r>   rA   sequence_lengths     r<   compute_num_masked_span6_compute_mask_indices.<locals>.compute_num_masked_span   so    i,6DwNOoy9 [(?:-<O ;?+o=!,+/"BAFOr;   Ndtyper   F)replace)
ValueErrornprandomranditemdetachsumtolistrangezerosboolchoicearangelenconcatenateonesint32appendarraybroadcast_toreshaperG   put_along_axis)r=   r>   r?   r@   rA   
batch_sizerL   _input_lengthsspec_aug_maskspec_aug_mask_idxsmax_num_masked_spanrH   rI   spec_aug_mask_idxdummy_mask_idxoffsetsrJ   rK   s    `` `            @@r<   _compute_mask_indicesrq   e   s   0 #(JQABB_$]^i]j&&7q:
 	
 iinnQ$$&G $ % 	##B'..0',Z'89'8!o'89  HHj/:$GM1/Ba%1,? II,,IIlkAo67RW - 
  !Q& -q0N.q1NNN(;(MUWU]U] ^ao op
 	!!"34/ &2 "45 1a:&+(V ,33JVa@ab ii$T4]3Goog
'UV^^+5G ,g5 /A"55GVYZGZ!0CCD mB?w :s   (I0features_shapenum_negativesmask_time_indicesc                 L   U u  p4[         R                  " U5      n[         R                  " X4U4[         R                  S9nUb  UR	                  [
        5      O[         R                  " U [
        S9n[        U5       H  nX'   R                  5       S-
  nXRU      n	[         R                  " [         R                  " US-   5      SS2S4   US-   U45      n
[         R                  R                  SXS-   U4S9nXU
:  ==   S-  ss'   X   Xg   X'   '   Xg==   Xt-  -  ss'   M     U$ )z6
Sample `num_negatives` vectors from feature vectors.
)r=   rP   NrO   r!   r   )size)rS   r^   r[   rb   astyper\   ra   rZ   rX   re   rT   randint)rr   rs   rt   rh   rK   sequence_length_rangesampled_negative_indices	batch_idxhighmapped_masked_indicesfeature_indicessampled_indicess               r<   _sample_negative_indicesr      s5    #1J IIo6  "xxzM.Zbdbjbjk +<*G  &RWWUckoMp  :&	 +//1A5 5	6R S//"))D1H*=ag*FPQS`Hab))++At!8]:S+T?:;q@; MbLr +,=,HI 	!+y/JJ+ ' $#r;   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )Wav2Vec2NoLayerNormConvLayer   c                 b  > [         TU ]  5         US:  a  UR                  US-
     OSU l        UR                  U   U l        [
        R                  " U R                  U R                  UR                  U   UR                  U   UR                  S9U l
        [        UR                     U l        g )Nr   r!   kernel_sizestridebias)super__init__conv_dimin_conv_dimout_conv_dimr   Conv1dconv_kernelconv_stride	conv_biasconvr
   feat_extract_activation
activationselfconfiglayer_id	__class__s      r<   r   %Wav2Vec2NoLayerNormConvLayer.__init__   s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 !!?!?@r;   c                 J    U R                  U5      nU R                  U5      nU$ N)r   r   r   r,   s     r<   forward$Wav2Vec2NoLayerNormConvLayer.forward  s$    		-06r;   )r   r   r   r   r   r1   r2   r3   r4   r   r   r:   __classcell__r   s   @r<   r   r      s    A r;   r   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )Wav2Vec2LayerNormConvLayeri  c                   > [         TU ]  5         US:  a  UR                  US-
     OSU l        UR                  U   U l        [
        R                  " U R                  U R                  UR                  U   UR                  U   UR                  S9U l
        [
        R                  " U R                  SS9U l        [        UR                     U l        g )Nr   r!   r   T)elementwise_affine)r   r   r   r   r   r   r   r   r   r   r   	LayerNorm
layer_normr
   r   r   r   s      r<   r   #Wav2Vec2LayerNormConvLayer.__init__  s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 ,,t'8'8TR !?!?@r;   c                     U R                  U5      nUR                  SS5      nU R                  U5      nUR                  SS5      nU R                  U5      nU$ )NrN   )r   	transposer   r   r   s     r<   r   "Wav2Vec2LayerNormConvLayer.forward#  sV    		-0%//B76%//B76r;   r   r   r   r   r   r   r   r   s   @r<   r   r     s    A r;   r   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )Wav2Vec2GroupNormConvLayeri.  c                   > [         TU ]  5         US:  a  UR                  US-
     OSU l        UR                  U   U l        [
        R                  " U R                  U R                  UR                  U   UR                  U   UR                  S9U l
        [        UR                     U l        [
        R                  " U R                  U R                  SS9U l        g )Nr   r!   r   T)
num_groupsnum_channelsaffine)r   r   r   r   r   r   r   r   r   r   r   r
   r   r   	GroupNormr   r   s      r<   r   #Wav2Vec2GroupNormConvLayer.__init__/  s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 !!?!?@,,$2C2CRVRcRclpqr;   c                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ r   )r   r   r   r   s     r<   r   "Wav2Vec2GroupNormConvLayer.forward?  s2    		-066r;   r   r   r   r   s   @r<   r   r   .  s    r  r;   r   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )Wav2Vec2PositionalConvEmbeddingiF  c                   > [         TU ]  5         [        R                  " UR                  UR                  UR
                  UR
                  S-  UR                  S9U l        [        R                  R                  n[        [        R                  R                  S5      (       a$  [        R                  R                  R                  n[        5       (       Ga%  SS KnUR                  R                  U R                  R                   SS9   U" U R                  SSS9U l        S S S 5        [        U R                  S5      (       aU  U R                  R                  R                   R"                  nU R                  R                  R                   R$                  nO,U R                  R&                  nU R                  R(                  nUR                  R+                  X5        UR                  R+                  X5        OU" U R                  SSS9U l        [-        UR
                  5      U l        [0        UR2                     U l        g ! , (       d  f       GN,= f)	Nr#   )r   paddinggroupsweight_normr   )modifier_rankweight)namedimparametrizations)r   r   r   r   hidden_sizenum_conv_pos_embeddingsnum_conv_pos_embedding_groupsr   utilsr   hasattrr   r   	deepspeedzeroGatheredParametersr   	original0	original1weight_gweight_vregister_external_parameterWav2Vec2SamePadLayerr   r
   r   r   )r   r   r   r   r   r   r   s         r<   r   (Wav2Vec2PositionalConvEmbedding.__init__G  s   II6622a777
	 hh**288,,m<<((33??K%''224993C3CST2U'		aH	 Vtyy"4559955<<FF9955<<FF99--99--NN66tFNN66tF#DIIH!DDI+F,J,JK !?!?@ VUs   I
Ic                     UR                  SS5      nU R                  U5      nU R                  U5      nU R                  U5      nUR                  SS5      nU$ Nr!   r#   )r   r   r   r   r   s     r<   r   'Wav2Vec2PositionalConvEmbedding.forwardh  sV    %//15		-0]36%//15r;   )r   r   r   r   r   s   @r<   r   r   F  s    AB r;   r   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )r   is  c                 R   > [         TU ]  5         US-  S:X  a  SU l        g SU l        g )Nr#   r   r!   )r   r   num_pad_remove)r   r   r   s     r<   r   Wav2Vec2SamePadLayer.__init__t  s)    #:Q#>!#Car;   c                 X    U R                   S:  a  US S 2S S 2S U R                   * 24   nU$ Nr   r   r   s     r<   r   Wav2Vec2SamePadLayer.forwardx  s6    ")!Q0F43F3F2F0F*FGMr;   r   r   r   s   @r<   r   r   s  s    K r;   r   c                   8   ^  \ rS rSrSrU 4S jrS rS rSrU =r	$ )Wav2Vec2FeatureEncoderi~  z.Construct the features from raw audio waveformc           	        > [         TU ]  5         UR                  S:X  a@  [        USS9/[	        UR
                  S-
  5       Vs/ s H  n[        XS-   S9PM     sn-   nOVUR                  S:X  a-  [	        UR
                  5       Vs/ s H  n[        XS9PM     nnO[        SUR                   S35      e[        R                  " U5      U l        SU l        S	U l        g s  snf s  snf )
Ngroupr   )r   r!   layerz`config.feat_extract_norm` is z), but has to be one of ['group', 'layer']FT)r   r   feat_extract_normr   rZ   num_feat_extract_layersr   r   rR   r   
ModuleListconv_layersgradient_checkpointing_requires_grad)r   r   ir   r   s       r<   r   Wav2Vec2FeatureEncoder.__init__  s    ##w.5fqIJNSTZTrTruvTvNwNNw,V!eDNwN K %%0HMfNlNlHmHm1*6>Hm  K 01I1I0JJst  ==5&+#"Ns   C C%c                 N    U R                  5        H
  nSUl        M     SU l        g NF)
parametersrequires_gradr   r   params     r<   _freeze_parameters)Wav2Vec2FeatureEncoder._freeze_parameters  s#    __&E"'E '#r;   c                     US S 2S 4   nU R                   (       a  U R                  (       a  SUl        U R                   H  nU" U5      nM     U$ )NT)r   trainingr   r   )r   input_valuesr,   
conv_layers       r<   r   Wav2Vec2FeatureEncoder.forward  sK    $QW- 4==*.M'**J&}5M + r;   )r   r   r   )
r1   r2   r3   r4   r5   r   r   r   r:   r   r   s   @r<   r   r   ~  s    8#&$

 
r;   r   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )Wav2Vec2FeatureProjectioni  c                 4  > [         TU ]  5         [        R                  " UR                  S   UR
                  S9U l        [        R                  " UR                  S   UR                  5      U l	        [        R                  " UR                  5      U l        g )NrN   eps)r   r   r   r   r   layer_norm_epsr   Linearr   
projectionDropoutfeat_proj_dropoutdropoutr   r   r   s     r<   r   "Wav2Vec2FeatureProjection.__init__  sf    ,,vr':@U@UV))FOOB$79K9KLzz&":":;r;   c                 n    U R                  U5      nU R                  U5      nU R                  U5      nX4$ r   )r   r   r  )r   r,   norm_hidden_statess      r<   r   !Wav2Vec2FeatureProjection.forward  s7    !__];(:;]300r;   )r  r   r   r   r   s   @r<   r   r     s    <1 1r;   r   modulequerykeyvaluescalingr  kwargsc                    Uc  UR                  S5      S-  n[        R                  " XR                  SS5      5      U-  nUb  X-   n[        R
                  R                  USS9n[        R
                  R                  XU R                  S9n[        R                  " X5      n	U	R                  SS5      R                  5       n	X4$ )NrN         r#   r   r   )pr   r!   )
rv   r6   matmulr   r   
functionalsoftmaxr  r   
contiguous)
r	  r
  r  r  r@   r  r  r  attn_weightsattn_outputs
             r<   eager_attention_forwardr    s     **R.D( <<}}Q':;gEL!#4==((2(>L==((6??([L,,|3K''1-88:K$$r;   c                   :  ^  \ rS rSrSr     SS\S\S\S\S\S	\S
\S-  4U 4S jjjr	   SS\
R                  S\
R                  S-  S\
R                  S-  S\S-  S\\   S\\
R                  \
R                  S-  \\
R                     S-  4   4S jjrSrU =r$ )Wav2Vec2Attentioni  z=Multi-headed attention from 'Attention Is All You Need' paperN	embed_dim	num_headsr  
is_decoderr   	is_causalr   c                   > [         TU ]  5         Xl        X l        X0l        X-  U l        Xpl        U R
                  U-  U R                  :w  a  [        SU R                   SU S35      eU R
                  S-  U l        X@l	        X`l
        [        R                  " XUS9U l        [        R                  " XUS9U l        [        R                  " XUS9U l        [        R                  " XUS9U l        g )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).r  )r   )r   r   r  r  r  head_dimr   rR   r  r  r  r   r   k_projv_projq_projout_proj)	r   r  r  r  r  r   r  r   r   s	           r<   r   Wav2Vec2Attention.__init__  s     	""!.MMI%$..8MdnnM]$YKr3  }}d*$"ii	4@ii	4@ii	4@		)TBr;   r,   key_value_statesr@   output_attentionsr  rB   c                    USLnUR                   SS n/ UQSPU R                  P7nU R                  U5      R                  U5      R	                  SS5      n	U(       a  UOUn
/ U
R                   SS QSPU R                  P7nU R                  U
5      R                  U5      R	                  SS5      nU R                  U
5      R                  U5      R	                  SS5      n[        R                  " U R                  R                  [        5      nU" U U	UUU4U R                  (       d  SOU R                  U R                  US.UD6u  nnUR                  " / UQSP76 R!                  5       nU R#                  U5      nUUS4$ )z#Input shape: Batch x Time x ChannelNrN   r!   r#           )r  r  r(  )r=   r!  r$  viewr   r"  r#  r   get_interfacer   _attn_implementationr  r   r  r  rf   r  r%  )r   r,   r'  r@   r(  r  is_cross_attentioninput_shapehidden_shapequery_statescurrent_stateskv_shape
key_statesvalue_statesattention_interfacer  r  s                    r<   r   Wav2Vec2Attention.forward  s    .T9 $))#2.88b8$--8 {{=166|DNNqRST-?)]B^))#2.BBDMMB[[055h?II!QO
{{>277AKKAqQ(?(M(MKK,,.E)
 %8
%
  $}}C$,,LL/
%
 
%
!\ "));;;;FFHmmK0L$..r;   )r   r  r  r!  r  r  r"  r  r%  r$  r  r#  )r*  FTFN)NNF)r1   r2   r3   r4   r5   rF   floatr\   r"   r   r6   Tensorr   r   r9   r   r:   r   r   s   @r<   r  r    s
   G  (,CC C 	C
 C C C %C CD 15.2).0/||0/  ,,-0/ t+	0/
  $;0/ -.0/ 
u||U\\D0%2E2LL	M0/ 0/r;   r  c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )Wav2Vec2FeedForwardi'  c                   > [         TU ]  5         [        R                  " UR                  5      U l        [        R                  " UR                  UR                  5      U l	        [        UR                  [        5      (       a  [        UR                     U l        OUR                  U l        [        R                  " UR                  UR                  5      U l        [        R                  " UR                   5      U l        g r   )r   r   r   r  activation_dropoutintermediate_dropoutr   r   intermediate_sizeintermediate_dense
isinstance
hidden_actstrr
   intermediate_act_fnoutput_densehidden_dropoutoutput_dropoutr  s     r<   r   Wav2Vec2FeedForward.__init__(  s    $&JJv/H/H$I!"$))F,>,>@X@X"Yf''--'-f.?.?'@D$'-'8'8D$IIf&>&>@R@RS jj)>)>?r;   c                     U R                  U5      nU R                  U5      nU R                  U5      nU R                  U5      nU R	                  U5      nU$ r   )r@  rD  r>  rE  rG  r   s     r<   r   Wav2Vec2FeedForward.forward5  sX    //>00?11-@))-8++M:r;   )rD  r@  r>  rE  rG  r   r   s   @r<   r;  r;  '  s    @ r;   r;  c                   2   ^  \ rS rSrU 4S jrSS jrSrU =r$ )Wav2Vec2EncoderLayeri?  c                   > [         TU ]  5         [        UR                  UR                  UR
                  SUS9U l        [        R                  " UR                  5      U l
        [        R                  " UR                  UR                  S9U l        [        U5      U l        [        R                  " UR                  UR                  S9U l        g )NFr  r  r  r  r   r   )r   r   r  r   num_attention_headsattention_dropout	attentionr   r  rF  r  r   r   r   r;  feed_forwardfinal_layer_normr  s     r<   r   Wav2Vec2EncoderLayer.__init__@  s    *((00,,
 zz&"7"78,,v'9'9v?T?TU/7 "V-?-?VEZEZ [r;   c                     UnU R                  XUS9u  pnU R                  U5      nXA-   nU R                  U5      nXR                  U5      -   nU R	                  U5      nU4nU(       a  Xu4-  nU$ Nr@   r(  )rQ  r  r   rR  rS  r   r,   r@   r(  attn_residualr  ri   outputss           r<   r   Wav2Vec2EncoderLayer.forwardO  s    %)-L] *8 *
&Q ]3%56%(9(9-(HH--m< "&Gr;   )rQ  r  rR  rS  r   r   r   r   s   @r<   rL  rL  ?  s    \ r;   rL  c                   t   ^  \ rS rSrU 4S jr  S	S\R                  S\R                  S-  S\4S jjrSr	U =r
$ )
#Wav2Vec2EncoderLayerStableLayerNormic  c                   > [         TU ]  5         [        UR                  UR                  UR
                  SUS9U l        [        R                  " UR                  5      U l
        [        R                  " UR                  UR                  S9U l        [        U5      U l        [        R                  " UR                  UR                  S9U l        [#        USS 5      b  [%        U5      U l        g S U l        g )NFrN  r   adapter_attn_dim)r   r   r  r   rO  rP  rQ  r   r  rF  r  r   r   r   r;  rR  rS  getattrWav2Vec2AttnAdapterLayeradapter_layerr  s     r<   r   ,Wav2Vec2EncoderLayerStableLayerNorm.__init__d  s    *((00,,
 zz&"7"78,,v'9'9v?T?TU/7 "V-?-?VEZEZ [6-t4@!9&!AD!%Dr;   Nr,   r@   r(  c                    UnU R                  U5      nU R                  XUS9u  pnU R                  U5      nXA-   nXR                  U R	                  U5      5      -   nU R
                  b  XR                  U5      -   nU4nU(       a  Xu4-  nU$ rV  )r   rQ  r  rR  rS  rb  rX  s           r<   r   +Wav2Vec2EncoderLayerStableLayerNorm.forwardw  s     &6)-L] *8 *
&Q ]3%5%(9(9$:O:OP]:^(__)),>,>},MMM "&Gr;   )rb  rQ  r  rR  rS  r   r   )r1   r2   r3   r4   r   r6   r9  r\   r   r:   r   r   s   @r<   r]  r]  c  sC    &, /3"'	|| t+  	 r;   r]  c                      ^  \ rS rSrU 4S jr    SS\R                  S\R                  S-  S\S\S\4
S	 jjr	S
r
U =r$ )Wav2Vec2Encoderi  c                   > [         TU ]  5         Xl        [        U5      U l        [
        R                  " UR                  UR                  S9U l	        [
        R                  " UR                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[!        U5      PM     sn5      U l        SU l        g s  snf Nr   F)r   r   r   r   pos_conv_embedr   r   r   r   r   r  rF  r  r   rZ   num_hidden_layersrL  layersr   r   r   ri   r   s      r<   r   Wav2Vec2Encoder.__init__  s    =fE,,v'9'9v?T?TUzz&"7"78mm5QWQiQiKj$kKja%9&%AKj$kl&+# %l    C	Nr,   r@   r(  output_hidden_statesreturn_dictc                 ,   U(       a  SOS nU(       a  SOS nUb4  UR                  S5      R                  SSUR                  S   5      nSX) '   [        U R                  UUS9nU R                  U5      n	XR                  UR                  5      -   nU R                  U5      nU R                  U5      n[        5       =(       d    [        U 5      n
U R                   H  nU(       a  Xa4-   n[        R                  " / 5      nU R                  =(       a    XR                  R                   :  nU(       a  U
(       a  U" XUS9nUS   nU(       a  SnU(       d  M|  UWS   4-   nM     U(       a  Xa4-   nU(       d  [#        S	 XU4 5       5      $ [%        UUUS
9$ )Nr0   rN   r!   r#   r   r   inputs_embedsr@   rW  NNc              3   .   #    U  H  oc  M  Uv   M     g 7fr   r0   .0vs     r<   	<genexpr>*Wav2Vec2Encoder.forward.<locals>.<genexpr>       m$[q$[   	last_hidden_stater,   r-   )	unsqueezerepeatr=   r   r   rj  todevicer   r  r   r   rl  r6   rU   r   	layerdropr9   r   r   r,   r@   r(  rp  rq  all_hidden_statesall_self_attentionsexpand_attention_maskposition_embeddingssynced_gpusr   dropout_probabilityskip_the_layerlayer_outputss                  r<   r   Wav2Vec2Encoder.forward  s    #7BD$5b4%$2$<$<R$@$G$G1mNaNabcNd$e!45M012;;')
 #11-@%(>(>}?S?S(TT6]302R6LT6R[[E#$58H$H! #(**R.!]]Z/B[[EZEZ/ZN![ %!Te! !.a 0 ,  &9]1=M<O&O#' !*   14D Dm]GZ$[mmm++*
 	
r;   r   r  r   r   rl  rj  NFFT)r1   r2   r3   r4   r   r6   tensorr9  r\   r   r:   r   r   s   @r<   rg  rg    s]    , /3"'%* ;
||;
 t+;
  	;

 #;
 ;
 ;
r;   rg  c                   :   ^  \ rS rSrU 4S jr    SS jrSrU =r$ )Wav2Vec2EncoderStableLayerNormi  c                   > [         TU ]  5         Xl        [        U5      U l        [
        R                  " UR                  UR                  S9U l	        [
        R                  " UR                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[!        U5      PM     sn5      U l        SU l        g s  snf ri  )r   r   r   r   rj  r   r   r   r   r   r  rF  r  r   rZ   rk  r]  rl  r   rm  s      r<   r   'Wav2Vec2EncoderStableLayerNorm.__init__  s    =fE,,v'9'9v?T?TUzz&"7"78mmBGH`H`BabBaQ08Bab
 ',# cro  c                    U(       a  SOS nU(       a  SOS nUb4  UR                  S5      R                  SSUR                  S   5      nSX) '   [        U R                  UUS9nU R                  U5      n	X-   nU R                  U5      n[        5       =(       d    [        U 5      n
U R                   H  nU(       a  Xa4-   n[        R                  " / 5      nU R                  =(       a    XR                  R                  :  nU(       a  U
(       a  U" XUS9nUS   nU(       a  SnU(       d  M|  UWS   4-   nM     U R                  U5      nU(       a  Xa4-   nU(       d  [        S	 XU4 5       5      $ [!        UUUS
9$ )Nr0   rN   r!   r#   r   rs  rW  ru  c              3   .   #    U  H  oc  M  Uv   M     g 7fr   r0   rw  s     r<   rz  9Wav2Vec2EncoderStableLayerNorm.forward.<locals>.<genexpr>  r|  r}  r~  )r  r  r=   r   r   rj  r  r   r   rl  r6   rU   r   r  r   r9   r   r  s                  r<   r   &Wav2Vec2EncoderStableLayerNorm.forward  s    #7BD$5b4%$2$<$<R$@$G$G1mNaNabcNd$e!45M012;;')
 #11-@%;]302R6LT6R[[E#$58H$H! #(**R.!]]Z/B[[EZEZ/ZN![ !&!Te! !.a 0 ,  &9]1=M<O&O#) !, 6 14D Dm]GZ$[mmm++*
 	
r;   r  r  r   r   s   @r<   r  r    s     	, "=
 =
r;   r  c                   J   ^  \ rS rSrSrU 4S jr\SS j5       rSS jrSr	U =r
$ )Wav2Vec2GumbelVectorQuantizeri%  z
Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information.
c                 8  > [         TU ]  5         UR                  U l        UR                  U l        UR                  U R                  -  S:w  a&  [        SUR                   SU R                   S35      e[        R                  " [        R                  " SU R                  U R
                  -  UR                  U R                  -  5      5      U l        [        R                  " UR                  S   U R                  U R
                  -  5      U l        SU l        g )Nr   z`config.codevector_dim z5 must be divisible by `config.num_codevector_groups` z for concatenationr!   rN   r#   )r   r   num_codevector_groupsr   num_codevectors_per_groupnum_varscodevector_dimrR   r   	Parameterr6   r7   codevectorsr   r   weight_projtemperaturer  s     r<   r   &Wav2Vec2GumbelVectorQuantizer.__init__+  s     6688  4??2a7)&*?*?)@ A559__4EEWY  <<a4==!@&BWBW[_[j[jBjk
 99V__R%8$//DMM:YZ r;   c                    Ub}  UR                  5       S S 2S S 4   R                  U R                  5      n[        R                  " X [        R
                  " U 5      5      n U R                  SS9UR                  5       -  nOU R                  SS9n[        R                  " [        R                  " [        R                  " X35      SS9* 5      R                  5       nU$ )Nr   r  rN   )
flattenexpandr=   r6   where
zeros_likerX   meanexpxlogy)probsmaskmask_extendedmarginal_probs
perplexitys        r<   _compute_perplexity1Wav2Vec2GumbelVectorQuantizer._compute_perplexity?  s     LLN1dD=9@@MMKKe6F6Fu6MNE"YY1Y-
:N"ZZAZ.NYY		%++n*U[] ^^_cce
r;   c                    UR                   u  p4nU R                  U5      nUR                  X4-  U R                  -  S5      nU R                  (       a  [
        R                  R                  UR                  5       U R                  SS9R                  U5      n[        R                  " UR                  X4-  U R                  S5      R                  5       SS9nU R                  Xr5      nO{UR                  SS9n	UR                  UR                   5      R!                  SU	R                  SS5      S5      nUR                  X4-  U R                  S5      nU R                  Xb5      nUR                  X4-  S5      nUR#                  S5      U R$                  -  n
U
R                  X4-  U R                  U R&                  S5      nUR)                  S5      R                  X4S5      nX4$ )NrN   T)tauhardr  r!   g      ?r   )r=   r  r+  r   r   r   r  gumbel_softmaxr8  r  type_asr6   r  r  argmax	new_zerosscatter_r  r  r  rX   )r   r,   rt   rh   rK   r   codevector_probscodevector_soft_distr  codevector_idxcodevectors_per_groupr  s               r<   r   %Wav2Vec2GumbelVectorQuantizer.forwardK  s   3@3F3F0
[ ((7%**:+G$//+Y[]^==!}};;##%4+;+;$  <  gm$ 
 $)=="":#?RTU[[]ce$  112FZJ +11b19N,66}7J7JKTTN''A.   044Z5QSWSbSbdfg112BVJ+001MrR 0 : :2 >AQAQ Q+001Mt`d`m`moqr!oob)..zBO&&r;   )r  r   r  r  r  r   )r1   r2   r3   r4   r5   r   staticmethodr  r   r:   r   r   s   @r<   r  r  %  s+    
( 	 	#' #'r;   r  c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )Wav2Vec2Adapteriq  c                   >^ [         TU ]  5         TR                  TR                  :w  aV  [        R
                  " TR                  TR                  5      U l        [        R                  " TR                  5      U l        OS =U l        U l        [        R                  " U4S j[        TR                  5       5       5      U l        TR                  U l        g )Nc              3   :   >#    U  H  n[        T5      v   M     g 7fr   )Wav2Vec2AdapterLayer)rx  ri   r   s     r<   rz  +Wav2Vec2Adapter.__init__.<locals>.<genexpr>|  s     #kJjQ$8$@$@Jjs   )r   r   output_hidden_sizer   r   r   projr   proj_layer_normr   rZ   num_adapter_layersrl  r  r  s    `r<   r   Wav2Vec2Adapter.__init__r  s     $$(:(::		&"4"4f6O6OPDI#%<<0I0I#JD /33DI,mm#k%PVPiPiJj#kk))r;   c                 |   U R                   b/  U R                  b"  U R                  U5      nU R                  U5      nUR                  SS5      nU R                   HK  n[        R
                  R                  5       nU R                  (       a  X0R                  :  d  MC  U" U5      nMM     UR                  SS5      nU$ r   )r  r  r   rl  rS   rT   r   r  )r   r,   r   layerdrop_probs       r<   r   Wav2Vec2Adapter.forward  s    99 T%9%9%E IIm4M 00?M%//15[[EYY--/N==^nn%D %m 4 !
 &//15r;   )r  rl  r  r  r   r   s   @r<   r  r  q  s    * r;   r  c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )r  i  c                    > [         TU ]  5         [        R                  " UR                  SUR                  -  UR
                  UR                  SS9U l        g )Nr#   r!   )r   r   )r   r   r   r   r  adapter_kernel_sizeadapter_strider   r  s     r<   r   Wav2Vec2AdapterLayer.__init__  sJ    II%%)))&&((
	r;   c                 d    U R                  U5      n[        R                  R                  USS9nU$ )Nr!   r  )r   r   r  glur   s     r<   r   Wav2Vec2AdapterLayer.forward  s/    		-0))-Q)?r;   )r   r   r   s   @r<   r  r    s    
 r;   r  c                   J   ^  \ rS rSrU 4S jrS\R                  4S jrSrU =r	$ )ra  i  c                   > [         TU ]  5         UR                  U l        UR                  U l        [        R                  " U R
                  5      U l        [        R                  " U R
                  U R                  5      U l
        [        R                  " 5       U l        [        R                  " U R                  U R
                  5      U l        g)z
Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
up training throughput.
N)r   r   r_  	input_dimr   
hidden_dimr   r   normr   linear_1ReLUact_fnlinear_2r  s     r<   r   !Wav2Vec2AttnAdapterLayer.__init__  s    
 	00 ,,LL1			$//4>>Bggi		$..$//Br;   r,   c                     U R                  U5      nU R                  U5      nU R                  U5      nU R                  U5      nU$ r   )r  r  r  r  r   s     r<   r    Wav2Vec2AttnAdapterLayer.forward  s@    		-0m4M2m4r;   )r  r  r  r  r  r  )
r1   r2   r3   r4   r   r6   r7   r   r:   r   r   s   @r<   ra  ra    s     CU%6%6  r;   ra  c                       \ rS rSr% \\S'   SrSrSrSr	Sr
SrSr\R                  " 5       S 5       rSS	\R                   \-  S
\S-  4S jjr SS\S\R                   4S jjrS rS rSS\4S jjrSrg)Wav2Vec2PreTrainedModeli  r   wav2vec2r   audioTc           
         [        U[        5      (       a5  UR                  R                  5         UR                  R                  5         g[        U[
        5      (       au  [        R                  " UR                  R                  SSS9  [        R                  " UR                  R                  5        [        R                  " UR                  5        g[        U[        5      (       a  [        R                  " UR                  R                  SS[         R"                  " SUR                  R$                  S   UR                  R&                  -  -  5      -  S9  [        R(                  " UR                  R                  S5        g[        U[*        5      (       a  [         R"                  " SUR,                  R.                  -  5      n[        R                  " UR,                  R                  U* US9  [        R                  " UR,                  R                  U* US9  g[        U[0        R2                  5      (       ac  [        R                  " UR                  SU R4                  R6                  S9  UR                  b!  [        R                  " UR                  5        gg[        U[0        R8                  [0        R:                  45      (       aA  [        R                  " UR                  5        [        R<                  " UR                  5        g[        U[0        R>                  5      (       a  [        R@                  " UR                  5        UR                  b_  [         R"                  " URB                  UR&                  UR$                  S   -  -  5      n[        R                  " UR                  U* US9  ggg)zInitialize the weightsr*  r!   )r  stdr   r#   )abN)"rA  Wav2Vec2ForPreTrainingproject_hidreset_parameters	project_qr  initnormal_r  r   zeros_r   uniform_r  r   r   mathsqrtr   in_channels	constant_r   r   in_featuresr   r   r   initializer_ranger   r   ones_r   kaiming_normal_r   )r   r	  ks      r<   _init_weights%Wav2Vec2PreTrainedModel._init_weights  sn    f455//1--/ =>>LL++22!DKK**//0MM&,,- ?@@LL""		!v{{'>'>q'AFKKD[D['["\]]
 NN6;;++Q/ 9::		!f//;;;<AMM&++22qbA>MM&++00QB!<		**LLSdkk6S6ST{{&FKK( 'r|| <==KK$JJv}}%		**  /{{&IIfmmv/A/AFDVDVWXDY/YZ[fkkaR15 ' +r;   Nrj   add_adapterc                 d   Uc  U R                   R                  OUnS n[        U R                   R                  U R                   R                  5       H  u  pEU" XU5      nM     U(       aD  [        U R                   R                  5       H!  nU" USU R                   R                  5      nM#     U$ )z8
Computes the output length of the convolutional layers
c                 8    [         R                  " X-
  USS9S-   $ )Nfloor)rounding_moder!   )r6   divrH   r   r   s      r<   _conv_out_lengthRWav2Vec2PreTrainedModel._get_feat_extract_output_lengths.<locals>._conv_out_length  s      99\7wWZ[[[r;   r!   )r   r   zipr   r   rZ   r  r  )r   rj   r   r  r   r   ri   s          r<    _get_feat_extract_output_lengths8Wav2Vec2PreTrainedModel._get_feat_extract_output_lengths  s    
 2=1Ddkk--+	\
 $'t{{'>'>@W@W#XK,]PM $Y 4;;99: 04;;C]C] ^ ; r;   feature_vector_lengthr@   c                    UR                  SS9S S 2S4   nU R                  XCS9nUR                  [        R                  5      nUR
                  S   n[        R                  " Xa4UR                  UR                  S9nSU[        R                  " UR
                  S   UR                  S9US-
  4'   UR                  S/5      R                  S5      R                  S/5      R                  5       nU$ )NrN   r  r   r   )rP   r  r!   )r  )cumsumr
  r  r6   longr=   r[   rP   r  r^   flipr\   )r   r  r@   r   non_padded_lengthsoutput_lengthsrh   s          r<   "_get_feature_vector_attention_mask:Wav2Vec2PreTrainedModel._get_feature_vector_attention_mask  s    
 ,22r2:1b5A>>?Q>k'**5::6#))!,
/~7K7KTbTiTi
 uv^%9%9!%<^EZEZ[]kno]opq',,bT299"=BBB4HMMOr;   c                    U R                   R                  c  [        U R                   S35      e0 nU R	                  5        HI  u  p#[        U[        5      (       d  M  UR                  5        H  u  pEXQSR                  X$/5      '   M     MK     [        U [        5      (       a8  U R                  R                  5        H  u  p%XQSR                  SU/5      '   M     U$ )NzF has no adapter layers. Make sure to define `config.adapter_attn_dim`..lm_head)r   r_  rR   r   named_modulesrA  ra  named_parametersjoinWav2Vec2ForCTCr  )r   adapter_weightsr   r	  
param_namer   s         r<   _get_adapters%Wav2Vec2PreTrainedModel._get_adapters  s    ;;''///uvww ..0LD&":;;)/)@)@)B%JDICHHd-?$@A *C 1
 dN++#||<<>?D)T): ;<  ? r;   c                     U R                  5        H+  n[        U[        5      (       d  M  U R                  U5        M-     [        U [        5      (       a  U R                  U R
                  5        gg)zS
(Re-)initialize attention adapter layers and lm head for adapter-only fine-tuning
N)modulesrA  ra  r  r  r  )r   r	  s     r<   init_adapter_layers+Wav2Vec2PreTrainedModel.init_adapter_layers%  sV    
 llnF&":;;""6* %
 dN++t||, ,r;   target_langc                     U R                   R                  c  [        SU S35      eXR                  :X  a!  U(       d  [        R                  SU S35        gUR                  SS5      nUR                  SS5      nUR                  S	S5      nUR                  S
S5      nUR                  SS5      nUR                  SS5      n	UR                  SS5      n
U R                   R                  nSnU
SLa1  [        R                  U5      n [        UUUUUUU	US9n[        U5      nUcF  [        R                  U5      n [        UUUUUUU	US9n[        5         [         R"                  " USSS9nU R%                  5       n['        UR)                  5       5      ['        UR)                  5       5      -
  n['        UR)                  5       5      ['        UR)                  5       5      -
  n[+        U5      S:  a!  [        SW SSR-                  U5       S35      e[+        U5      S:  a!  [        SW SSR-                  U5       S35      eUS   R.                  S   nUU R                   R0                  :w  aU  [2        R4                  " U R                   R6                  UU R8                  U R:                  S9U l        UU R                   l        UR?                  5        VVs0 s H  u  nnUURA                  UU   5      _M     nnnU RC                  USS9  Xl        g! [         a    U
(       a  e  GN[         a     U
(       a  [        SU SU SU S35      e GN,f = f! [         a    e [         a    e [         a    [        SU SU SU S35      ef = fs  snnf )a
  
Load a language adapter model from a pre-trained adapter model.

Parameters:
    target_lang (`str`):
        Has to be a language id of an existing adapter weight. Adapter weights are stored in the format
        adapter.<lang>.safetensors or adapter.<lang>.bin
    force_load (`bool`, defaults to `True`):
        Whether the weights shall be loaded even if `target_lang` matches `self.target_lang`.
    cache_dir (`Union[str, os.PathLike]`, *optional*):
        Path to a directory in which a downloaded pretrained model configuration should be cached if the
        standard cache should not be used.
    force_download (`bool`, *optional*, defaults to `False`):
        Whether or not to force the (re-)download of the model weights and configuration files, overriding the
        cached versions if they exist.
    proxies (`dict[str, str]`, *optional*):
        A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
        'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
    local_files_only(`bool`, *optional*, defaults to `False`):
        Whether or not to only look at local files (i.e., do not try to download the model).
    token (`str` or `bool`, *optional*):
        The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
        the token generated when running `hf auth login` (stored in `~/.huggingface`).
    revision (`str`, *optional*, defaults to `"main"`):
        The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
        git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
        identifier allowed by git.

        <Tip>

        To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.

        </Tip>

    mirror (`str`, *optional*):
        Mirror source to accelerate downloads in China. If you are from China and have an accessibility
        problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
        Please refer to the mirror site for more information.

<Tip>

Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to
use this method in a firewalled environment.

</Tip>

Examples:

```python
>>> from transformers import Wav2Vec2ForCTC, AutoProcessor

>>> ckpt = "facebook/mms-1b-all"
>>> processor = AutoProcessor.from_pretrained(ckpt)
>>> model = Wav2Vec2ForCTC.from_pretrained(ckpt, target_lang="eng")
>>> # set specific language
>>> processor.tokenizer.set_target_lang("spa")
>>> model.load_adapter("spa")
```
NzCannot load_adapter for - if `config.adapter_attn_dim` is not defined.z#Adapter weights are already set to r  	cache_dirforce_downloadFproxieslocal_files_onlytokenrevisionuse_safetensors)filenamer)  r*  r+  r,  r-  r(  zCan't load the model for 'z'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'z=' is the correct path to a directory containing a file named cpuT)map_locationweights_onlyr   zThe adapter weights z has unexpected keys: z, z has missing keys: zlm_head.weightr  rP   )strict)"r   r_  rR   r%  loggerwarningpop_name_or_pathWAV2VEC2_ADAPTER_SAFE_FILEformatr   safe_load_fileOSError	ExceptionWAV2VEC2_ADAPTER_PT_FILEr   r6   loadr  setkeysr_   r  r=   
vocab_sizer   r   r  r  rP   r  itemsr  load_state_dict)r   r%  
force_loadr  r(  r)  r*  r+  r,  r-  r.  model_path_or_id
state_dictfilepathweight_pathr  unexpected_keysmissing_keystarget_vocab_sizer  ry  s                        r<   load_adapter$Wav2Vec2PreTrainedModel.load_adapter2  s   x ;;''/7}Dqrss***:NN@QOPJJ{D1	$4e<**Y-!::&8%@

7D)::j$/ **%6=;;44
 %'188EH)$%#1#%5%'	 ,K8
& /66{CH")$%#1#%5%'	 )*"ZZ!&!%
. ,,.joo/037K7K7M3NN?//12S9J5KK!#3K=@VW[W`W`apWqVrrstuu"3K=@STXT]T]^jTkSllmnoo ''78>>qA 6 6699..0A$++]a]g]gDL &7DKK" ?I>N>N>PQ>Pdaaoa011>P
QZ6 ']  "  #
  "!45E4F G==M<N O>>FZqJ  #B     01A0B C99I8J K::B1F 6 Rs*   L <0M "N
M'%MM4N)r  r%  r   )T)r1   r2   r3   r4   r"   r8   base_model_prefixmain_input_nameinput_modalitiessupports_gradient_checkpointing_supports_flash_attn_supports_sdpa_supports_flex_attnr6   no_gradr  
LongTensorrF   r\   r
  r  r  r#  rC  rM  r:   r0   r;   r<   r  r    s    "$O&*#N
]]_#6 #6Je>N>NQT>T cgjncn , Y]%(:?:J:J( -m' m' m'r;   r  c                   @  ^  \ rS rSrS\4U 4S jjrS r  SS\R                  S\R                  S-  S\R                  S-  4S	 jjr
\     SS
\R                  S-  S\R                  S-  S\R                  S-  S\S-  S\S-  S\S-  S\\-  4S jj5       rSrU =r$ )Wav2Vec2Modeli  r   c                   > [         TU ]  U5        Xl        [        U5      U l        [        U5      U l        UR                  S:  d  UR                  S:  aG  [        R                  " [        R                  " UR                  5      R                  5       5      U l        UR                   (       a  [#        U5      U l        O['        U5      U l        UR(                  (       a  [+        U5      OS U l        U R/                  5         g Nr*  )r   r   r   r   feature_extractorr   feature_projectionmask_time_probmask_feature_probr   r  r6   r9  r   r  masked_spec_embeddo_stable_layer_normr  encoderrg  r   r  adapter	post_initr  s     r<   r   Wav2Vec2Model.__init__  s     !7!?";F"C   3&&*B*BS*H%'\\%,,v?Q?Q2R2[2[2]%^D"&&9&ADL*62DL282D2Dv.$ 	r;   c                 8    U R                   R                  5         gz
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
N)r\  r   r   s    r<   freeze_feature_encoder$Wav2Vec2Model.freeze_feature_encoder  s    
 	113r;   Nr,   rt   r@   c                    [        U R                  SS5      (       d  U$ UR                  5       u  pEnUb(  U R                  R	                  UR
                  5      X'   OU R                  R                  S:  a  U R                  (       a  [        XE4U R                  R                  U R                  R                  UU R                  R                  S9n[        R                  " X!R                  [        R                  S9nU R                  R	                  UR
                  5      X'   U R                  R                  S:  a  U R                  (       a  [        XF4U R                  R                  U R                  R                   U R                  R"                  S9n[        R                  " XqR                  [        R                  S9nUSS2S4   R%                  SUS5      nSX'   U$ )	z
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
apply_spec_augmentTNr   )r>   r?   r@   rA   r3  )r>   r?   rA   rN   )r`  r   rv   r`  r  rP   r^  r   rq   mask_time_lengthmask_time_min_masksr6   r  r  r\   r_  mask_feature_lengthmask_feature_min_masksr  )r   r,   rt   r@   rh   rK   r   mask_feature_indicess           r<   _mask_hidden_states!Wav2Vec2Model._mask_hidden_states  s    t{{$8$??   4A3E3E3G0
[(/3/E/E/H/HI\I\/]M,[[''!+ 5-++44 KK88-++99! !&->G[G[chcmcm n/3/E/E/H/HI\I\/]M,;;((1,#8)++77 KK;;++<<	$  $)<<0DMaMainisis#t #74#@#G#GO]_#` 23M/r;   r   r(  rp  rq  rB   c                 >   Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nU R	                  U5      nUR                  SS5      nUb  U R                  UR                  S   USS9nU R                  U5      u  pU R                  XUS9n	U R                  U	UUUUS9n
U
S   n	U R                  b  U R                  U	5      n	U(       d	  X4U
SS -   $ [        U	UU
R                  U
R                  S	9$ )
a  
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
    masked extracted features in *config.proj_codevector_dim* space.
Nr!   r#   Fr  )rt   r@   r@   r(  rp  rq  r   )r  extract_featuresr,   r-   )r   r(  rp  rq  r\  r   r  r=   r]  rr  rb  rc  r   r,   r-   )r   r   r@   rt   r(  rp  rq  r  rv  r,   encoder_outputss              r<   r   Wav2Vec2Model.forward-  sY     2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++BYBY11,?+55a;%!DD &&q)>u E N +/*A*ABR*S'00~ 1 
 ,,)/!5# ' 
 (*<<# LL7M!4qr7JJJ&+-)77&11	
 	
r;   )rc  r   rb  r\  r]  r`  ru  NNNNN)r1   r2   r3   r4   r"   r   ri  r6   r7   rW  rr  r   r9  r\   r9   r   r   r:   r   r   s   @r<   rY  rY    s    ~ (4 7;26	,((, !,,t3, ((4/	,\  /36:)-,0#'8
llT)8
 t+8
 !,,t3	8

  $;8
 #Tk8
 D[8
 
(	(8
 8
r;   rY  z?
    Wav2Vec2 Model with a quantizer and `VQ` head on top.
    c                   n  ^  \ rS rSrS\4U 4S jjrS\4S jrS r\	 SS\
R                  S\
R                  S	\
R                  S\4S
 jj5       r\      SS\
R                  S-  S\
R                  S-  S\
R                   S-  S\
R                   S-  S\S-  S\S-  S\S-  S\\-  4S jj5       rSrU =r$ )r  ii  r   c                   > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  5      U l        [        U5      U l	        [        R                  " UR                  UR                  5      U l        [        R                  " UR                  UR                  5      U l        U R!                  5         g r   )r   r   rY  r  r   r  feat_quantizer_dropoutdropout_featuresr  	quantizerr   r   proj_codevector_dimr  r  r  rd  r  s     r<   r   Wav2Vec2ForPreTraining.__init__o  s     %f- "

6+H+H I6v>99V%7%79S9ST6#8#8&:T:TU 	r;   r  c                 $    XR                   l        g)zR
Set the Gumbel softmax temperature to a given value. Only necessary for training
N)r~  r  )r   r  s     r<   set_gumbel_temperature-Wav2Vec2ForPreTraining.set_gumbel_temperature|  s     &1"r;   c                 L    U R                   R                  R                  5         grg  r  r\  r   rh  s    r<   ri  -Wav2Vec2ForPreTraining.freeze_feature_encoder      
 	''::<r;   target_featuresnegative_featurespredicted_featuresc                     [         R                  " X/SS9n [         R                  " UR                  5       U R                  5       SS9R	                  U 5      nXC-  nU$ )z
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
r   r  rN   )r6   catcosine_similarityr8  r  )r  r  r  r  logitss        r<   compute_contrastive_logits1Wav2Vec2ForPreTraining.compute_contrastive_logits  s\      ))_$HaP(();)A)A)C_EZEZE\bdemm

 %r;   Nr   r@   rt   rz   r(  rp  rq  rB   c                    Ub  UOU R                   R                  nUb  UR                  [        R                  5      nU R                  UUUUUUS9n	U R                  U	S   5      n
U R                  U	S   5      nUb  U R                  UR                  S   USS9nU R                  XS9u  pUR                  U R                  R                  R                  5      nU R                  U5      nS=n=nnUGb  UR                  u  nnnUR                  SU5      UR                  5       R                  S5         nUR                  UUSU5      R!                  S	SSS
5      nU R#                  USSS24   UU
U R                   R$                  5      nUU:H  R'                  S5      nUR)                  5       (       a  [+        S5      USS U'   UR-                  SS	5      R/                  SUR1                  S5      5      nSUR                  5       -
  S-  R-                  SS5      R3                  5       n[4        R6                  R9                  UR+                  5       USS9nU R                   R:                  U R                   R<                  -  nUU-
  U-  UR?                  5       -  nXR                   R@                  U-  -   nU(       d  Ub
  XX4U	S	S -   $ XU4U	S	S -   $ [C        UU
UUU	RD                  U	RF                  UUS9$ )aE
  
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
    masked extracted features in *config.proj_codevector_dim* space.
sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):
    Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.
    Required input for pre-training.

Example:

```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, Wav2Vec2ForPreTraining
>>> from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices
>>> from datasets import load_dataset

>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")
>>> model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base")

>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values  # Batch size 1

>>> # compute masked indices
>>> batch_size, raw_sequence_length = input_values.shape
>>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length).item()
>>> mask_time_indices = _compute_mask_indices(
...     shape=(batch_size, sequence_length), mask_prob=0.2, mask_length=2
... )
>>> sampled_negative_indices = _sample_negative_indices(
...     features_shape=(batch_size, sequence_length),
...     num_negatives=model.config.num_negatives,
...     mask_time_indices=mask_time_indices,
... )
>>> mask_time_indices = torch.tensor(data=mask_time_indices, device=input_values.device, dtype=torch.long)
>>> sampled_negative_indices = torch.tensor(
...     data=sampled_negative_indices, device=input_values.device, dtype=torch.long
... )

>>> with torch.no_grad():
...     outputs = model(input_values, mask_time_indices=mask_time_indices)

>>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)
>>> cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1)

>>> # show that cosine similarity is much higher than random
>>> cosine_sim[mask_time_indices.to(torch.bool)].mean() > 0.5
tensor(True)

>>> # for contrastive loss training model should be put into train mode
>>> model = model.train()
>>> loss = model(
...     input_values, mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices
... ).loss
```N)r@   r(  rp  rt   rq  r   r!   Fr  )rt   rN   r#   r   z-infirX   )	reduction)r(   r)   r*   r+   r,   r-   r.   r/   )$r   rq  r  r6   r\   r  r  r}  r  r=   r~  r  r   rP   r+  r  permuter  contrastive_logits_temperatureallanyr8  r   rf   rv   r  r   r  cross_entropyr  r  rX   diversity_loss_weightr&   r,   r-   )r   r   r@   rt   rz   r(  rp  rq  r  rZ  transformer_featuresrv  quantized_featuresr+   r(   r.   r/   rh   rK   r   negative_quantized_featuresr  
neg_is_postargetnum_codevectorss                            r<   r   Wav2Vec2ForPreTraining.forward  s;   F &1%<k$++BYBY( 1 4 4UZZ @--)/!5/#   
  $//
;  00<%!DD &&q)>u E N 59NN 5C 5
1 0224>>3H3H3N3NO!^^,>?3777.#/7I7O7O4J +=*A*A"k*R(--/44R8+' +F*J*JOR+gaAq! ( 44"47++$::	F -0KKPPQSTJ~~).vqr
:& %%a+33BAGF,1133t;FFq!LTTVF!}}::6<<>6]b:c"kkCCdkkFgFggO.1FF/Y]n]r]r]ttN $kk&G&G.&XXD4F^ahijikalll(>STW^_`_aWbbb+1'9"7!//))-)	
 		
r;   )r}  r  r  r~  r  )g?)NNNNNN)r1   r2   r3   r4   r"   r   rF   r  ri  r  r6   r7   r8  r  r   r9  
BoolTensorr\   r9   r&   r   r:   r   r   s   @r<   r  r  i  s)   ~ 1# 1= 
 !	** ,, "-- 	 (  /359<@)-,0#']
llT)]
 t+]
 !++d2	]

 #("2"2T"9]
  $;]
 #Tk]
 D[]
 
-	-]
 ]
r;   r  zp
    Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).
    c                      ^  \ rS rSrSS\S-  4U 4S jjjrS rS rS r\	     SS\
R                  S-  S	\
R                  S-  S
\S-  S\S-  S\S-  S\
R                  S-  S\\-  4S jj5       rSrU =r$ )r  i?  Nr%  c                   > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  5      U l        X l        UR                  c  [        SU R                   S35      e[        US5      (       a  UR                  (       a  UR                  OUR                  n[        R                   " X1R                  5      U l        U R%                  5         g)a
  
target_lang (`str`, *optional*):
    Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
    adapter.<lang>.bin. Only relevant when using an instance of [`Wav2Vec2ForCTC`] with adapters. Uses 'eng' by
    default.
NzYou are trying to instantiate z with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `Wav2Vec2ForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.r   )r   r   rY  r  r   r  final_dropoutr  r%  rB  rR   r   r   r   r  r   r   r  rd  )r   r   r%  r  r   s       r<   r   Wav2Vec2ForCTC.__init__E  s     	 %f-zz&"6"67&$00@ AH H  *1)G)GFL^L^F%%djdvdv 	 yy!35F5FG 	r;   c                 @   [        5       [        R                  " S5      :X  a  gU R                  nUb'  [	        U R
                  SS5      c  [        SU S35      eUc.  [	        U R
                  SS5      b  [        R                  S5        gUb  U R                  USS9  gg)	a  
This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
passing `target_lang=...` to `from_pretrained(...)`.

This method is **not** supposed to be called by the user and is prone to be changed in the future.
metaNr_  zCannot pass `target_lang`: r'  z)By default `target_lang` is set to 'eng'.T)rE  )
r   r6   r  r%  r`  r   rR   r5  inforM  )r   r  r%  s      r<   tie_weightsWav2Vec2ForCTC.tie_weightsb  s     675<<;OO &&"wt{{<NPT'U']:;-Gtuvv WT[[:Ld%S%_KKCD$kd; %r;   c                 L    U R                   R                  R                  5         grg  r  rh  s    r<   ri  %Wav2Vec2ForCTC.freeze_feature_encoderz  r  r;   c                 T    U R                   R                  5        H
  nSUl        M     gz
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
FNr  r   r   r   s     r<   freeze_base_model Wav2Vec2ForCTC.freeze_base_model  #    
 ]]--/E"'E 0r;   r   r@   r(  rp  rq  labelsrB   c                    Ub  UOU R                   R                  nUbJ  UR                  5       U R                   R                  :  a"  [	        SU R                   R                   35      eU R                  UUUUUS9nUS   n	U R                  U	5      n	U R                  U	5      n
SnUGbX  Ub  UO"[        R                  " U[        R                  S9nU R                  UR                  S5      5      R                  [        R                  5      nUS:  nUR                  S5      nUR                  U5      n[        R                   R#                  U
S[        R$                  S9R'                  SS5      n[        R(                  R*                  R-                  S	S
9   [        R                   R/                  UUUUU R                   R0                  U R                   R2                  U R                   R4                  S9nSSS5        U(       d  U
4U[6        S -   nUb  U4U-   $ U$ [9        XUR:                  UR<                  S9$ ! , (       d  f       NL= f)a  
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
    Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
    the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
    All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
    config.vocab_size - 1]`.
Nz$Label values must be <= vocab_size: ru  r   rO   rN   )r   rP   r!   F)enabled)blankr  zero_infinityr(   r  r,   r-   )r   rq  rG   rB  rR   r  r  r  r6   	ones_liker  r
  rX   r  masked_selectr   r  log_softmaxfloat32r   backendscudnnflagsctc_losspad_token_idctc_loss_reductionctc_zero_infinity_HIDDEN_STATES_START_POSITIONr   r,   r-   )r   r   r@   r(  rp  rq  r  r  rZ  r,   r  r(   rj   labels_masktarget_lengthsflattened_targets	log_probsoutputs                     r<   r   Wav2Vec2ForCTC.forward  s   $ &1%<k$++BYBY&**,$++2H2H"HCDKKDZDZC[\]]--)/!5#   
  
]3m, #1"<%//R^fkfpfpBq  !AA.BTBTUWBXY\\]b]g]ghM !A+K(__R0N & 4 4[ A 11&b1V``abdefI%%++E+:}}--%!"++22"kk<<"&++"?"? .  ; Y)F)G!HHF)-)9TGf$EvEG4I4IV]VhVh
 	
 ;:s   A H??
I)r  r  r%  r  r   ry  )r1   r2   r3   r4   rC  r   r  ri  r  r   r6   r9  r\   r9   r   r   r:   r   r   s   @r<   r  r  ?  s    C$J  :<0=(  /3)-,0#'&*E
llT)E
 t+E
  $;	E

 #TkE
 D[E
 t#E
 
	E
 E
r;   r  z
    Wav2Vec2 Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
    SUPERB Keyword Spotting.
    c                      ^  \ rS rSrU 4S jrS rS r\     SS\R                  S-  S\R                  S-  S\
S-  S	\
S-  S
\
S-  S\R                  S-  S\\-  4S jj5       rSrU =r$ )!Wav2Vec2ForSequenceClassificationi  c                 "  > [         TU ]  U5        [        US5      (       a  UR                  (       a  [	        S5      e[        U5      U l        UR                  S-   nUR                  (       a2  [        R                  " [        R                  " U5      U-  5      U l        [        R                  " UR                  UR                   5      U l        [        R                  " UR                   UR$                  5      U l        U R)                  5         g )Nr   z_Sequence classification does not support the use of Wav2Vec2 adapters (config.add_adapter=True)r!   )r   r   r   r   rR   rY  r  rk  use_weighted_layer_sumr   r  r6   ra   layer_weightsr   r   classifier_proj_size	projector
num_labels
classifierrd  r   r   
num_layersr   s      r<   r   *Wav2Vec2ForSequenceClassification.__init__  s     6=))f.@.@q  &f---1
((!#ejj.Dz.Q!RD6#5#5v7R7RS))F$?$?ARARS 	r;   c                 L    U R                   R                  R                  5         grg  r  rh  s    r<   ri  8Wav2Vec2ForSequenceClassification.freeze_feature_encoder  r  r;   c                 T    U R                   R                  5        H
  nSUl        M     gr  r  r   s     r<   r  3Wav2Vec2ForSequenceClassification.freeze_base_model  r  r;   Nr   r@   r(  rp  rq  r  rB   c                 0   Ub  UOU R                   R                  nU R                   R                  (       a  SOUnU R                  UUUUUS9nU R                   R                  (       ai  U[           n	[
        R                  " U	SS9n	[        R                  R                  U R                  SS9n
XR                  SSS5      -  R                  SS9n	OUS   n	U R                  U	5      n	Uc  U	R                  SS9nOU R                  U	R                   S   U5      nUR#                  S5      R%                  SSU	R                   S   5      nS	X) '   U	R                  SS9UR                  SS9R                  SS5      -  nU R'                  U5      nSnUbF  [)        5       nU" UR                  SU R                   R*                  5      UR                  S5      5      nU(       d  U4U[        S -   nUb  U4U-   $ U$ [-        UUUR.                  UR0                  S
9$ )  
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
    Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
    into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
    (`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
    To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
    into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
    config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
    `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
NTru  r!   r  rN   r   r#   r*  r  )r   rq  r  r  r  r6   stackr   r  r  r  r+  rX   r  r  r  r=   r  r  r  r   r  r   r,   r-   )r   r   r@   r(  rp  rq  r  r  rZ  r,   norm_weightspooled_outputpadding_maskexpand_padding_maskr  r(   loss_fctr  s                     r<   r   )Wav2Vec2ForSequenceClassification.forward  s   0 &1%<k$++BYBY'+{{'I'ItOc--)/!5#   
 ;;--#$ABM!KK1=M==001C1C0LL*->->r1a-HHMMRSMTM#AJM}5!)..1.5MBB=CVCVWXCY[ijL"."8"8"<"C"CAq-J]J]^_J`"a25M./)--!-4|7G7GA7G7N7S7STVXY7ZZM/')HFKKDKK,B,BCV[[QS_UDY)F)G!HHF)-)9TGf$EvE'!//))	
 	
r;   )r  r  r  r  ry  )r1   r2   r3   r4   r   ri  r  r   r6   r9  r\   r9   r   r   r:   r   r   s   @r<   r  r    s    "=(  /3)-,0#'&*C
llT)C
 t+C
  $;	C

 #TkC
 D[C
 t#C
 
)	)C
 C
r;   r  c                      ^  \ rS rSrU 4S jrS rS r\     SS\R                  S-  S\R                  S-  S\R                  S-  S	\
S-  S
\
S-  S\
S-  S\\-  4S jj5       rSrU =r$ )#Wav2Vec2ForAudioFrameClassificationi@  c                   > [         TU ]  U5        [        US5      (       a  UR                  (       a  [	        S5      e[        U5      U l        UR                  S-   nUR                  (       a2  [        R                  " [        R                  " U5      U-  5      U l        [        R                  " UR                  UR                   5      U l        UR                   U l        U R%                  5         g )Nr   zbAudio frame classification does not support the use of Wav2Vec2 adapters (config.add_adapter=True)r!   )r   r   r   r   rR   rY  r  rk  r  r   r  r6   ra   r  r   r   r  r  rd  r  s      r<   r   ,Wav2Vec2ForAudioFrameClassification.__init__B  s     6=))f.@.@t  &f---1
((!#ejj.Dz.Q!RD))F$6$68I8IJ ++r;   c                 L    U R                   R                  R                  5         grg  r  rh  s    r<   ri  :Wav2Vec2ForAudioFrameClassification.freeze_feature_encoderR  r  r;   c                 T    U R                   R                  5        H
  nSUl        M     gr  r  r   s     r<   r  5Wav2Vec2ForAudioFrameClassification.freeze_base_modelY  r  r;   Nr   r@   r  r(  rp  rq  rB   c           	         Ub  UOU R                   R                  nU R                   R                  (       a  SOUnU R                  UUUUUS9nU R                   R                  (       ai  U[           n	[
        R                  " U	SS9n	[        R                  R                  U R                  SS9n
XR                  SSS5      -  R                  SS9n	OUS   n	U R                  U	5      nSnUbZ  [        5       nU" UR                  SU R                  5      [
        R                   " UR                  SU R                  5      SS95      nU(       d  U4U[        S -   nU$ [#        UUUR$                  UR&                  S	9$ )
r  NTru  r!   r  rN   r   )axisr  )r   rq  r  r  r  r6   r  r   r  r  r  r+  rX   r  r   r  r  r   r,   r-   )r   r   r@   r  r(  rp  rq  r  rZ  r,   r  r  r(   r  r  s                  r<   r   +Wav2Vec2ForAudioFrameClassification.forwarda  sf   0 &1%<k$++BYBY'+{{'I'ItOc--)/!5#   
 ;;--#$ABM!KK1=M==001C1C0LL*->->r1a-HHMMRSMTM#AJM/')HFKKDOO<ell6;;WY[_[j[jKkrs>tuDY)F)G!HHFM$!//))	
 	
r;   )r  r  r  r  ry  )r1   r2   r3   r4   r   ri  r  r   r6   r9  r\   r9   r   r   r:   r   r   s   @r<   r  r  @  s     =(  /3&*)-,0#':
llT):
 t+:
 t#	:

  $;:
 #Tk:
 D[:
 
&	&:
 :
r;   r  c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )AMSoftmaxLossi  c                    > [         TU ]  5         X0l        X@l        X l        [
        R                  " [        R                  " X5      SS9U l	        [
        R                  " 5       U l        g )NT)r   )r   r   scalemarginr  r   r  r6   randnr   r   r(   )r   r  r  r  r  r   s        r<   r   AMSoftmaxLoss.__init__  sI    
$ll5;;y#EUYZ'')	r;   c                    UR                  5       n[        R                  R                  U R                  SS9n[        R                  R                  USS9n[
        R                  " X5      nX@R                  -
  n[        R                  R                  X R                  5      nU R                  [
        R                  " UR                  5       XT5      -  nU R                  Xr5      nU$ )Nr   r  r!   )r  r   r  	normalizer   r6   mmr  one_hotr  r  r  r\   r(   )	r   r,   r  r   	cos_thetapsionehotr  r(   s	            r<   r   AMSoftmaxLoss.forward  s    !((!(<//1/EHH]3	++%&&v?ekk&++-HHyy(r;   )r(   r  r  r  r   )g      >@g?r   r   s   @r<   r  r    s    * r;   r  c                   f   ^  \ rS rSrSU 4S jjrS\R                  S\R                  4S jrSrU =r	$ )	TDNNLayeri  c                   > [         TU ]  5         US:  a  UR                  US-
     OUR                  U   U l        UR                  U   U l        UR
                  U   U l        UR                  U   U l        [        R                  " U R                  U R                  -  U R                  5      U l        [        R                  " 5       U l        g )Nr   r!   )r   r   tdnn_dimr   r   tdnn_kernelr   tdnn_dilationdilationr   r   kernelr  r   r   s      r<   r   TDNNLayer.__init__  s    <DqL6??8a<8foo^fNg"OOH5!--h7,,X6ii 0 043C3C CTEVEVW'')r;   r,   rB   c                 >   [        5       (       a  SSKJn  [        5       (       a1  [        U R                  W5      (       a  [
        R                  " S5        UR                  SS5      nU R                  R                  R                  U R                  U R                  U R                  5      R                  SS5      n[        R                  R                  XU R                  R                   U R"                  S9nUR                  SS5      nU R%                  U5      nU$ )Nr   )	LoraLayerzDetected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. You should exclude TDNNLayer from LoRA's target modules.r!   r#   )r  )r   peft.tuners.lorar  rA  r  warningswarnr   r   r+  r   r   r   r   r  conv1dr   r  r   )r   r,   r  r   s       r<   r   TDNNLayer.forward  s    2$++y11O &//15##(():):D<L<LdN^N^_iijkmno,,]DKKDTDT_c_l_l,m%//156r;   )r   r  r   r  r   r   r   )
r1   r2   r3   r4   r   r6   r9  r   r:   r   r   s   @r<   r  r    s(    $U\\ ell  r;   r  zl
    Wav2Vec2 Model with an XVector feature extraction head on top for tasks like Speaker Verification.
    c                      ^  \ rS rSrU 4S jrS rS rS\R                  \	-  4S jr
\     SS\R                  S-  S	\R                  S-  S
\S-  S\S-  S\S-  S\R                  S-  S\\-  4S jj5       rSrU =r$ )Wav2Vec2ForXVectori  c                 2  > [         TU ]  U5        [        U5      U l        UR                  S-   nUR
                  (       a2  [        R                  " [        R                  " U5      U-  5      U l
        [        R                  " UR                  UR                  S   5      U l        [        [!        UR                  5      5       Vs/ s H  n[#        X5      PM     nn[        R$                  " U5      U l        [        R                  " UR                  S   S-  UR(                  5      U l        [        R                  " UR(                  UR(                  5      U l        [/        UR(                  UR0                  5      U l        U R5                  5         g s  snf )Nr!   r   rN   r#   )r   r   rY  r  rk  r  r   r  r6   ra   r  r   r   r   r  rZ   r_   r  r   tdnnxvector_output_dimr\  r  r  r  	objectiverd  )r   r   r  r   tdnn_layersr   s        r<   r   Wav2Vec2ForXVector.__init__  s    %f---1
((!#ejj.Dz.Q!RD6#5#5vq7IJ5:3v;O5PQ5Py+5PQMM+.	!#6??2+>+BFD]D]!^))F$=$=v?X?XY&v'@'@&BSBST Rs   Fc                 L    U R                   R                  R                  5         grg  r  rh  s    r<   ri  )Wav2Vec2ForXVector.freeze_feature_encoder  r  r;   c                 T    U R                   R                  5        H
  nSUl        M     gr  r  r   s     r<   r  $Wav2Vec2ForXVector.freeze_base_model  r  r;   rj   c                 X    S nU R                   R                   H  nU" XS5      nM     U$ )z/
Computes the output length of the TDNN layers
c                     X-
  U-  S-   $ )Nr!   r0   r  s      r<   r  EWav2Vec2ForXVector._get_tdnn_output_lengths.<locals>._conv_out_length  s     !.69A==r;   r!   )r   r  )r   rj   r  r   s       r<   _get_tdnn_output_lengths+Wav2Vec2ForXVector._get_tdnn_output_lengths  s1    
	>
  ;;22K,]KM 3 r;   Nr   r@   r(  rp  rq  r  rB   c                    Ub  UOU R                   R                  nU R                   R                  (       a  SOUnU R                  UUUUUS9nU R                   R                  (       ai  U[           n	[
        R                  " U	SS9n	[        R                  R                  U R                  SS9n
XR                  SSS5      -  R                  SS9n	OUS   n	U R                  U	5      n	U R                   H  nU" U	5      n	M     Uc  U	R                  SS9nU	R!                  SS9nOU R#                  UR                  SS95      nU R%                  U5      n/ n/ n['        U5       HP  u  nnUR)                  U	USU24   R                  SS95        UR)                  U	USU24   R!                  SS95        MR     [
        R                  " U5      n[
        R                  " U5      n[
        R*                  " X/SS9nU R-                  U5      nU R/                  U5      nSnUb  U R1                  UU5      nU(       d  UU4U[        S -   nUb  U4U-   $ U$ [3        UUUUR4                  UR6                  S9$ )	r  NTru  r!   r  rN   r   )r(   r  
embeddingsr,   r-   )r   rq  r  r  r  r6   r  r   r  r  r  r+  rX   r  r  r  r  r
  r  	enumeraterc   r  r\  r  r  r   r,   r-   )r   r   r@   r(  rp  rq  r  r  rZ  r,   r  
tdnn_layermean_featuresstd_featuresfeat_extract_output_lengthstdnn_output_lengthsr   lengthstatistic_poolingoutput_embeddingsr  r(   r  s                          r<   r   Wav2Vec2ForXVector.forward  s   0 &1%<k$++BYBY'+{{'I'ItOc--)/!5#   
 ;;--#$ABM!KK1=M==001C1C0LL*->->r1a-HHMMRSMTM#AJM}5))J&}5M $ !)..1.5M(,,,3L*.*O*OP^PbPbghPbPi*j'"&"?"?@["\ML&':;	6$$]1gvg:%>%C%C%C%JK##M!WfW*$=$A$Aa$A$HI < "KK6M ;;|4L!II}&CL 223DE!23>>&&1D/07;X;Y3ZZF)-)9TGf$EvE(!//))
 	
r;   )r  r\  r  r  r  r  r  ry  )r1   r2   r3   r4   r   ri  r  r6   rW  rF   r  r   r9  r\   r9   r   r   r:   r   r   s   @r<   r  r    s    &=(e6F6F6L   /3)-,0#'&*P
llT)P
 t+P
  $;	P

 #TkP
 D[P
 t#P
 
	P
 P
r;   r  )r  r  r  r  r  rY  r  r   r   r[  )br5   r  r	  collections.abcr   dataclassesr   numpyrS   r6   safetensors.torchr   r;  r   torch.nnr    r	   r  activationsr
   integrations.deepspeedr   integrations.fsdpr   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   r   r   modeling_utilsr   r   r   processing_utilsr   r   r   r   r   r   r   r   r    configuration_wav2vec2r"   r>  r9  
get_loggerr1   r5  r  r&   r9   rF   r8  rW  ndarrayrq   r   r   r   r   Moduler   r   r   r   r9  r  r  r;  rL  r]  rg  r  r  r  r  ra  r  rY  r  r  r  r  r  r  r  __all__r0   r;   r<   <module>r>     s      $ !   9  % & ! @ 7 6 B 9  s r &   3 , 5  
		H	% !"  
 4; 4 4B /3tc?tt t $$t+	t
 t ZZtn$U $3 $[][e[ehl[l $D#= *!; 6!; 0*bii *Z299 %RYY %P1		 1, !%II%<<% 
% <<	%
 LL4'% T\% % '(%8R/		 R/j")) 0!5 !H+*D +\E
bii E
PI
RYY I
XI'BII I'Xbii >299 $ryy 2 c'o c' c'L	 C
+ C
 C
L 
N
4 N

N
b 
K
, K

K
\ e
(? e
e
P [
*A [
 [
|BII .		 @ 
C
0 C

C
Lr;   