
    rh3"                    6   d Z ddlZddlmZmZmZ ddlZddlZddl	Zddlm
Z
 ddlmZ ddlmZ ddlmZmZmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZmZmZmZm Z m!Z! ddl"m#Z#m$Z$ ddl%m&Z& ddl'm(Z(m)Z) ddl*m+Z+ ddl,m-Z-  e)j\                  e/      Z0dZ1dHde2de2de3dejh                  fdZ5dejh                  de2de2fdZ6	 	 dIde7e2e2f   de3de2d eejp                     d!e2dejr                  fd"Z: G d# d$e
jv                        Z<	 	 	 dJd%e
jz                  d&ejh                  d'ejh                  d(ejh                  d eejh                     d)ee3   d*e3d+eejh                     fd,Z> G d- d.e
jz                        Z? G d/ d0e      Z@ G d1 d2e      ZAe( G d3 d4e$             ZB G d5 d6eB      ZC G d7 d8eB      ZDe( G d9 d:eB             ZE e(d;<       G d= d>e-eB             ZF G d? d@eB      ZG e(dA<       G dB dCeBe             ZH e(dD<       G dE dFeB             ZIg dGZJy)KzPyTorch Whisper model.    N)CallableOptionalUnion)nn)CrossEntropyLoss   )ACT2FN)CacheDynamicCacheEncoderDecoderCache)GenerationMixin)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentionsSeq2SeqLMOutputSeq2SeqModelOutputSequenceClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringlogging   )WhisperConfig)WhisperGenerationMixinlengthchannelsmax_timescalereturnc                    |dz  dk7  rt        d| d      t        j                  |      |dz  dz
  z  }t        j                  | t        j
                  |dz        z        }t        j
                  |       j                  dd      |j                  dd      z  }t        j                  |j                         |j                         gd      S )z*Returns sinusoids for positional embedding   r   zVNumber of channels has to be divisible by 2 for sinusoidal positional embeddings, got z
 channels.r   dim)

ValueErrormathlogtorchexparangeviewcatsincos)r   r    r!   log_timescale_incrementinv_timescalesscaled_times         /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/whisper/modeling_whisper.py	sinusoidsr6   6   s    !|qdemdnnxy
 	
 #hh}5Q9JKYY 77%,,xST}:UUVN,,v&++B2^5H5HB5OOK99koo'):;CC    	input_idspad_token_iddecoder_start_token_idc                     | j                  | j                        }| ddddf   j                         |ddddf<   ||dddf<   |t        d      |j	                  |dk(  |       |S )z1
    Shift input ids one token to the right.
    Nr%   r   r   z1self.model.config.pad_token_id has to be defined.i)	new_zerosshapecloner(   masked_fill_)r8   r9   r:   shifted_input_idss       r5   shift_tokens_rightrA   C   s}     "++IOO<(CRC0668ae4adLMM""#4#<lKr7   r=   	mask_probmask_lengthattention_mask	min_masksc                    | \  }dk  rt        d      kD  rt        d d d      t        j                  j                  d      j	                         fd}|-|j                         j                  d      j                         nt        |      D cg c]  } c}}t        j                  |ft        	      }	g }
 |      }|d
k(  r|	S |D ]  } ||      }t        j                  j                  t        j                  |dz
  z
        |d      }t        |      d
k(  rdz
  }n|d
   }t        j                  |t        j                  ||z
  t        j                   	      |z  g      }|
j#                  |        t        j$                  |
      }
t        j&                  |
dddddf   ||f      }
|
j)                  ||z        }
t        j                        ddddf   }t        j&                  |||f      j)                  ||z        }|
|z   }
|
j+                         dz
  kD  rdz
  |
|
dz
  kD  <   t        j,                  |	|
dd       |	S c c}w )an  
    Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
    ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on
    CPU as part of the preprocessing during training.

    Args:
        shape: The shape for which to compute masks. This should be of a tuple of size 2 where
               the first element is the batch size and the second element is the length of the axis to span.
        mask_prob:  The percentage of the whole axis (between 0 and 1) which will be masked. The number of
                    independently generated mask spans of length `mask_length` is computed by
                    `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
                    actual percentage will be smaller.
        mask_length: size of the mask
        min_masks: minimum number of masked spans
        attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
                        each batch dimension.
    r   z&`mask_length` has to be bigger than 0.zO`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: z and `sequence_length`: `c                     t        | z  z  z         }t        |      }|z  kD  rz  }| dz
  z
  |k  rt        | dz
  z
  d      }|S )z;Given input length, compute how many spans should be maskedr   r   )intmax)input_lengthnum_masked_spanepsilonrC   rB   rE   sequence_lengths     r5   compute_num_masked_spanz6_compute_mask_indices.<locals>.compute_num_masked_spanz   so    i,6DwNOoy9 [(?:-<O ;?+o=!,+/"BAFOr7   Nr%   )dtyper   F)replace)r(   nprandomranditemdetachsumtolistrangezerosboolchoicer-   lenconcatenateonesint32appendarraybroadcast_toreshaperJ   put_along_axis)r=   rB   rC   rD   rE   
batch_sizerO   _input_lengthsspec_aug_maskspec_aug_mask_idxsmax_num_masked_spanrK   rL   spec_aug_mask_idxdummy_mask_idxoffsetsrM   rN   s    `` `            @@r5   _compute_mask_indicesro   T   s   0 #(JQABB_$]^i]j&&7q:
 	
 iinnQ$$&G $ % 	##B'..0',Z'89!o9  HHj/:$GM1/Ba% 51,? II,,IIlkAo67RW - 
  !Q& -q0N.q1NNN(;o(MUWU]U] ^ao op
 	!!"34/52 "45 1a:&5H+(V ,33J@SVa@ab ii$T4]3Goog
4G'UV^^'+5G ,g5 /A"55GVYZGZ-!0CCD m%7B?w :s   $	I+c                   <     e Zd Zddededee   f fdZddZ xZS )WhisperPositionalEmbeddingnum_positionsembedding_dimpadding_idxc                 &    t         |   ||       y N)super__init__)selfrr   rs   rt   	__class__s       r5   rx   z#WhisperPositionalEmbedding.__init__   s    6r7   c                 b    || j                   |||j                  d   z    S | j                   |   S Nr   )weightr=   )ry   r8   past_key_values_lengthposition_idss       r5   forwardz"WhisperPositionalEmbedding.forward   s<    ;;58NQZQ`Q`abQc8cdd;;|,,r7   rv   )r   N)__name__
__module____qualname__rI   r   rx   r   __classcell__rz   s   @r5   rq   rq      s'    7c 7# 7HUXM 7-r7   rq   modulequerykeyvaluescalingdropout	head_maskc                 $   ||j                  d      dz  }t        j                  ||j                  dd            |z  }	|0|j                  dk(  r!|	|d d d d d d d |j
                  d   f   z   }	t        j                  j                  |	d      }	||	|j                  dddd      z  }	t        j                  j                  |	|| j                  	      }	t        j                  |	|      }
|
j                  dd      j                         }
|
|	fS )
Nr%         r$   r      r&   r   ptraining)sizer+   matmul	transposendimr=   r   
functionalsoftmaxr.   r   r   
contiguous)r   r   r   r   rD   r   r   r   kwargsattn_weightsattn_outputs              r5   eager_attention_forwardr      s     **R.D(<<s}}Q':;gEL!n&9&9Q&>#nQ1o		"o5M&NN==((2(>L#innQAq&AA==((6??([L,,|U3K''1-88:K$$r7   c                   z    e Zd ZdZ	 	 	 	 	 	 ddededededededee   d	ee   f fd
Z		 	 	 	 	 	 dde
j                  dee
j                     dee   dee
j                     dee
j                     dedee
j                     dee   dee
j                  ee
j                     eee
j                        f   fdZ xZS )WhisperAttentionz=Multi-headed attention from 'Attention Is All You Need' paper	embed_dim	num_headsr   
is_decoderbias	is_causal	layer_idxconfigc	                 z   t         	|           || _        || _        || _        ||z  | _        || _        | j
                  |z  | j                  k7  rt        d| j                   d| d      | j
                  dz  | _        || _	        || _
        |/|r-t        j                  d| j                  j                   d       || _        t!        j"                  ||d      | _        t!        j"                  |||      | _        t!        j"                  |||      | _        t!        j"                  |||      | _        y )	Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).r   zInstantiating a decoder z without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.Fr   )rw   rx   r   r   r   head_dimr   r(   r   r   r   loggerwarning_oncerz   r   r   r   Lineark_projv_projq_projout_proj)
ry   r   r   r   r   r   r   r   r   rz   s
            r5   rx   zWhisperAttention.__init__   s     	""!Y.MMI%$..8MdnnM]$YKr3  }}d*$"*4>>+B+B*C D, ,
 #ii	95Aii	94@ii	94@		)YTBr7   hidden_stateskey_value_statespast_key_valuerD   layer_head_maskoutput_attentionscache_positionr   r"   c                 T   |du}	|j                   dd \  }
}|
|d| j                  f}| j                  |      | j                  z  } |j                  | }|j                  dd      j                         }|it        |t              rY|j                  j                  | j                        }|	r&d|j                  | j                  <   |j                  }n|j                  }||n|}|	rK|rIrG|j                  | j                     j                  }|j                  | j                     j                   }n| j#                  |      j	                  |
d| j$                  | j                        }| j'                  |      j	                  |
d| j$                  | j                        }|j                  dd      j                         }|j                  dd      j                         }|)|	s|nd}|j)                  ||| j                  d|i      \  }}t*        }| j,                  j.                  dk7  rt0        | j,                  j.                     } || ||||f| j2                  sdn| j4                  d	||d
|\  }}|j7                  |
|d      j                         }| j9                  |      }||fS )z#Input shape: Batch x Time x ChannelNr%   r   r$   Tr   eager              ?)r   r   r   r   )r=   r   r   r   r.   r   r   
isinstancer   
is_updatedgetr   cross_attention_cacheself_attention_cachelayerskeysvaluesr   r   r   updater   r   _attn_implementationr   r   r   rd   r   )ry   r   r   r   rD   r   r   r   r   is_cross_attentionbsztgt_lenq_input_shapequery_statesr   current_states
key_statesvalue_statesattention_interfacer   r   s                        r5   r   zWhisperAttention.forward  s   " .T9 %**3B/Wgr4==9 {{=1DLL@(|((-8#--a3>>@ %*^EX*Y'2266t~~FJ!<@))$..9!/!E!E!/!D!D .>-I)}.Z'..t~~>CCJ)00@GGL^499#r4>>SWS`S`aJ;;~6;;CT^^UYUbUbcL#--a3>>@J'11!Q7BBDL)7It+9+@+@dnn?OQ_>`,(
L )@;;++w6"9$++:Z:Z"[$7%
  $}}C$,,/%%
 %
!\ "))#w;FFHmmK0L((r7   )r   FTFNN)NNNNFN)r   r   r   __doc__rI   floatr[   r   r   rx   r+   Tensorr
   r   r   tupler   r   r   s   @r5   r   r      sT   G  #'*.&C&C &C 	&C
 &C &C &C C=&C '&CV 48*.1526"'15P)||P) #5<<0P) !	P)
 !.P) "%,,/P)  P) !.P) -.P) 
u||Xell3XeELL>Q5RR	SP)r7   r   c                        e Zd Zdef fdZ	 d	dej                  dej                  dej                  dedej                  f
dZ xZ	S )
WhisperEncoderLayerr   c                 h   t         |           |j                  | _        t	        | j                  |j
                  |j                  |      | _        t        j                  | j                        | _
        |j                  | _        t        |j                     | _        |j                  | _        t        j                   | j                  |j"                        | _        t        j                   |j"                  | j                        | _        t        j                  | j                        | _        y )N)r   r   r   r   )rw   rx   d_modelr   r   encoder_attention_headsattention_dropout	self_attnr   	LayerNormself_attn_layer_normr   r	   activation_functionactivation_fnactivation_dropoutr   encoder_ffn_dimfc1fc2final_layer_normry   r   rz   s     r5   rx   zWhisperEncoderLayer.__init__t  s    )nn44,,	
 %'LL$@!~~#F$>$>?"(";";99T^^V-C-CD99V33T^^D "T^^ <r7   r   rD   r   r   r"   c                    |}| j                  |      }| j                  ||||      \  }}t        j                  j	                  || j                  | j
                        }||z   }|}| j                  |      }| j                  | j                  |            }t        j                  j	                  || j                  | j
                        }| j                  |      }t        j                  j	                  || j                  | j
                        }||z   }|j                  t        j                  k(  rEt        j                  |j                        j                  dz
  }t        j                   || |      }||fS )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   rD   r   r   r   i  )minrJ   )r   r   r   r   r   r   r   r   r   r   r   rP   r+   float16finforJ   clamp)ry   r   rD   r   r   residualr   clamp_values           r5   r   zWhisperEncoderLayer.forward  sS   $ !11-@&*nn')+/	 '5 '
#| --mt||VZVcVc-d =0 --m<**488M+BC--mt?V?Vaeanan-o/--mt||VZVcVc-d =0%--/++m&9&9:>>EK!KKK<[YMl**r7   )F)
r   r   r   r   rx   r+   r   r[   r   r   r   s   @r5   r   r   s  sY    =} =. #()+||)+ )+ 	)+
  )+ 
)+r7   r   c                   N    e Zd Zddedee   f fdZ	 	 	 	 	 	 	 	 	 ddej                  deej                     deej                     deej                     deej                     d	eej                     d
ee	   dee
   dee
   deej                     dej                  fdZ xZS )WhisperDecoderLayerr   r   c           	         t         |           |j                  | _        t	        | j                  |j
                  |j                  dd||      | _        |j                  | _        t        |j                     | _        |j                  | _        t        j                  | j                        | _        t	        | j                  |j
                  |j                  d||      | _        t        j                  | j                        | _        t        j$                  | j                  |j&                        | _        t        j$                  |j&                  | j                        | _        t        j                  | j                        | _        y )NT)r   r   r   r   r   r   r   )r   r   r   r   )rw   rx   r   r   r   decoder_attention_headsr   r   r   r	   r   r   r   r   r   r   encoder_attnencoder_attn_layer_normr   decoder_ffn_dimr   r   r   ry   r   r   rz   s      r5   rx   zWhisperDecoderLayer.__init__  s    )nn44,,
 ~~#F$>$>?"(";";$&LL$@!,NN**,,
 (*||DNN'C$99T^^V-C-CD99V33T^^D "T^^ <r7   r   rD   encoder_hidden_statesencoder_attention_maskr   cross_attn_layer_head_maskr   r   	use_cacher   r"   c                 0   |}| j                  |      }| j                  ||||||
      \  }}t        j                  j	                  || j                  | j
                        }||z   }d}|h|}| j                  |      }| j                  ||||||      \  }}t        j                  j	                  || j                  | j
                        }||z   }|}| j                  |      }| j                  | j                  |            }t        j                  j	                  || j                  | j
                        }| j                  |      }t        j                  j	                  || j                  | j
                        }||z   }|f}|r|||fz  }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            encoder_hidden_states (`torch.FloatTensor`):
                cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
            encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
                size `(decoder_attention_heads,)`.
            past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   r   rD   r   r   r   r   N)r   r   rD   r   r   r   )r   r   r   r   r   r   r   r   r   r   r   r   r   )ry   r   rD   r   r   r   r   r   r   r   r   r   self_attn_weightscross_attn_weightsoutputss                  r5   r   zWhisperDecoderLayer.forward  s   > !11-@ ,0>>'))+/) ,: ,
(( --mt||VZVcVc-d =0 " ,$H 88GM040A0A+!65 :-"3 1B 1-M- MM11-4<<Z^ZgZg1hM$}4M !--m<**488M+BC--mt?V?Vaeanan-o/--mt||VZVcVc-d =0 ")+=>>Gr7   rv   )	NNNNNNFTN)r   r   r   r   r   rI   rx   r+   r   r   r[   
LongTensorr   r   r   s   @r5   r   r     s   =} =# =D 268<9=26=A8<,1$(59L||L !.L  (5	L
 !) 6L "%,,/L %-U\\$:L !!45L $D>L D>L !!1!12L 
Lr7   r   c                   b    e Zd ZU eed<   dZdZdZddgZdZ	dZ
dZdZd Zdej                  fd	Zy
)WhisperPreTrainedModelr   modelinput_featuresTr   r   c                    | j                   j                  }t        |t        j                  t        j
                  f      rY|j                  j                  j                  d|       |j                  %|j                  j                  j                          y y t        |t        j                        rf|j                  j                  j                  d|       |j                  2|j                  j                  |j                     j                          y y t        |t        j                        rJ|j                  j                  j                  d       |j                  j                  j                          y t        |t              rJ|j                   j                  j#                  t%        |j                   j                  j&                          y t        |t(              rW| j                   j*                  r@|j,                  j                  j                  d| j                   j.                  dz   z         y y y )Nr   )meanstdr   r   )r   init_stdr   r   r   Conv1dr}   datanormal_r   zero_	Embeddingrt   r   fill_WhisperEncoderembed_positionscopy_r6   r=   WhisperForAudioClassificationuse_weighted_layer_sumlayer_weightsnum_hidden_layers)ry   r   r  s      r5   _init_weightsz$WhisperPreTrainedModel._init_weights.  s   kk""fryy"))45MM&&CS&9{{&  &&( '-MM&&CS&9!!-""6#5#56<<> .-MM$$S)KK""$/""))//	6;Q;Q;X;X;^;^0_` =>{{11$$))//t{{7T7TWX7X0YZ 2 ?r7   rh   c                     |dz
  dz  dz   }|S )zH
        Computes the output length of the convolutional layers
        r   r$    )ry   rh   s     r5    _get_feat_extract_output_lengthsz7WhisperPreTrainedModel._get_feat_extract_output_lengthsA  s     '*q014r7   N)r   r   r   r   __annotations__base_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraphr  r+   r   r  r  r7   r5   r   r   !  sT    &O&*#.0EFN![&e>N>N r7   r   c                        e Zd ZdZdef fdZd Zdej                  fdZ	dej                  fdZ
	 	 	 	 	 d
d	Z xZS )r
  z
    Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
    [`WhisperEncoderLayer`].

    Args:
        config: WhisperConfig
    r   c                 L   t         |   |       |j                  | _        |j                  | _        |j
                  }|j                  | _        |j                  | _        |j                  | _	        |j                  rt        j                  |      nd| _        t        j                  | j                  |dd      | _        t        j                  ||ddd      | _        t        j$                  | j                  |      | _        | j&                  j)                  d       t        j*                  t-        |j.                        D cg c]  }t1        |       c}      | _        t        j4                  |j
                        | _        d| _        | j;                          y c c}w )Nr   r   r   )kernel_sizepaddingr$   )r   strider!  F)rw   rx   r   encoder_layerdrop	layerdropr   num_mel_binsr9   rt   max_source_positionsscale_embeddingr)   sqrtembed_scaler   r  conv1conv2r  r  requires_grad_
ModuleListrY   encoder_layersr   r   r   
layer_normgradient_checkpointing	post_init)ry   r   r   rg   rz   s       r5   rx   zWhisperEncoder.__init__S  s2    ~~11NN	"//!..$*$?$?!393I3I499Y/sYYt00)TUV
YYy)1VWX
!||D,E,EyQ++E2mm%PVPePeJf$gQ%8%@$gh,,v~~6&+# %hs   F!c                 J    | j                         D ]	  }d|_         d| _        y NF)
parametersrequires_grad_requires_grad)ry   params     r5   _freeze_parametersz!WhisperEncoder._freeze_parametersk  s(    __& 	(E"'E	(#r7   r"   c                     | j                   S rv   r*  ry   s    r5   get_input_embeddingsz#WhisperEncoder.get_input_embeddingsp  s    zzr7   r   c                     || _         y rv   r:  ry   r   s     r5   set_input_embeddingsz#WhisperEncoder.set_input_embeddingss  s	    
r7   c           	         | j                   j                  | j                  j                  d   z  | j                  j                  d   z  }|j
                  d   |k7  r"t        d| d|j
                  d    d| d      ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }t        j                  j                  | j                  |            }t        j                  j                  | j	                  |            }|j                  ddd	      }t        j                  | j                   j"                  |j$                  
      }	|| j!                  |	      z   }
t        j                  j'                  |
| j&                  | j(                        }
|rdnd}|rdnd}|[|j+                         d   t-        | j.                        k(  s2J dt-        | j.                         d|j+                         d    d       t1        | j.                        D ]j  \  }}|r||
fz   }d}| j(                  r&t        j2                  g       }|| j4                  k  rd}|rd}n ||
d|||   nd|      }|d   }
|sb||d	   fz   }l | j7                  |
      }
|r||
fz   }|st9        d |
||fD              S t;        |
||      S )a  
        Args:
            input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`):
                Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
                obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
                `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or
                the soundfile library (`pip install soundfile`). To prepare the array into
                `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
                and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
            attention_mask (`torch.Tensor`)`, *optional*):
                Whisper does not support masking of the `input_features`, this argument is preserved for compatibility,
                but it is not used. By default the silence in the input log mel spectrogram are ignored.
            head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        r   r%   z7Whisper expects the mel input features to be of length z, but found z-. Make sure to pad the input mel features to .Nr$   r   devicer   r  z&The head_mask should be specified for  layers, but it is for FT)NN)r   r   c              3   &   K   | ]	  }||  y wrv   r  .0vs     r5   	<genexpr>z)WhisperEncoder.forward.<locals>.<genexpr>  s     eqWXWdes   last_hidden_stater   
attentions)r   r&  r*  r"  r+  r=   r(   r   output_hidden_statesuse_return_dictr   r   gelupermuter+   r-   r  num_embeddingsrC  r   r   r   r]   r   	enumeraterT   r$  r/  r   r   )ry   r   rD   r   r   rM  return_dictexpected_seq_lengthinputs_embedsall_positionsr   encoder_statesall_attentionsidxencoder_layerto_dropdropout_probabilitylayer_outputss                     r5   r   zWhisperEncoder.forwardv  s   H #kk>>ARARSTAUUX\XbXbXiXijkXll#'::IJ]I^^jkykk  AC  lD  kE  Er  sF  rG  GH  I  2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]**4::n+EF**4::m+DE%--aA6T%9%9%H%HQ^QeQef%(<(<](KK--mt||VZVcVc-d30d  >>#A&3t{{+;< 8T[[9I8JJabkbpbpbrstbuavvwx< #,DKK"8 	FC#!/=2B!BG}}&+jjn#&7"G , -!7@7LYs^RV&7	! !.a 0 !/=3C2E!E/	F2 6+}.>>Ne]NN$Seee+>Vd
 	
r7   )NNNNN)r   r   r   r   r   rx   r8  r   Moduler<  r?  r   r   r   s   @r5   r
  r
  J  sP    } 0$
bii "))  !b
r7   r
  c                   N     e Zd ZdZdZdef fdZ	 	 	 	 	 	 	 	 	 	 	 	 	 ddZ xZS )WhisperDecoderz
    Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`WhisperDecoderLayer`]

    Args:
        config: WhisperConfig
    r8   r   c           	      J   t         |   |       |j                  | _        |j                  | _        |j
                  | _        |j                  | _        |j                  | _        |j                  rt        j                  |j                        nd| _        t        j                  |j                   |j                  | j                        | _        t%        | j                  |j                        | _        t        j(                  t+        |j,                        D cg c]  }t/        ||       c}      | _        |j2                  dk(  | _        |j2                  dk(  | _        t        j8                  |j                        | _        d| _        | j?                          y c c}w )Nr   flash_attention_2sdpaF) rw   rx   r   decoder_layerdropr$  r9   rt   max_target_positionsr&  r'  r)   r(  r   r)  r   r  
vocab_sizeembed_tokensrq   r  r-  rY   decoder_layersr   r   r   _use_flash_attention_2	_use_sdpar   r/  r0  r1  r   s      r5   rx   zWhisperDecoder.__init__  s8    ~~11!..$*$?$?!$*$?$?!8>8N8N499V^^4TWLL):):FNNDL\L\]9$:S:SU[UcUcdmmEJ6K`K`Eab	 3b
 '-&A&AEX&X#44>,,v~~6&+# cs   F c                 n   |
|
n| j                   j                  }
||n| j                   j                  }|	|	n| j                   j                  }	||n| j                   j                  }||t        d      |&|j                         }|j                  d|d         }n!||j                         dd }nt        d      || j                  |      }|	r?|=| j                   j                  rt        t               t                     }n
t               }d}||d   }n||j                         }|(t        j                  |||d   z   |j                        }|$|j!                  d      j#                  |d   d      }|| j%                  |||      }n| j%                  |||      }||j'                  |j                        z   }t(        j*                  j-                  || j,                  | j.                  	      }t1        | j                   |||||
      }| j2                  r%| j.                  r|	rt4        j7                  d       d}	|rdnd}|
rdnd}|
r|dnd}t9        ||gddg      D ]f  \  }}|	|j                         d   t;        | j<                        k(  r3J d| dt;        | j<                         d|j                         d    d        t?        | j<                        D ]~  \  }}|r||fz  }| j.                  r%t        j@                  g       }|| jB                  k  r? |||||||   nd|||   nd|	r|nd|
|	|	      }|d   }|
sj||d   fz  }|v||d   fz  } | jE                  |      }|r||fz  }|	r|nd}|stG        d |||||fD              S tI        |||||      S )ac  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                of the decoder.
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
                on hidden heads. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`EncoderDecoderCache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
                Pre-computed hidden-states that can be used to speed up auto-regressive (sequential) decoding. There are
                four sets of pre-computed hidden-states: key and values states in the self-attention blocks (2) and
                in the cross-attention blocks (2). The `past_key_values` are returned when `use_cache=True` is passed or
                when `config.use_cache=True`

                Two formats are allowed:
                - An [`~cache_utils.EncoderDecoderCache`] instance;
                - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
                `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            inputs_embeds (`torch.FloatTensor` of
                shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
                `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
                control over how to convert `input_ids` indices into associated vectors than the model's internal
                embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
                cache in the correct position and to infer the complete sequence length.
        NzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timer%   zEYou have to specify either decoder_input_ids or decoder_inputs_embedsr   r   rB  )r~   r   r   )r   input_embedsrD   r   past_key_valuesr   z^`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`...Fr  r   cross_attn_head_maskzThe `z` should be specified for rD  rA  )rD   r   r   r   r   r   r   r   r$   c              3   $   K   | ]  }|| 
 y wrv   r  rF  s     r5   rI  z)WhisperDecoder.forward.<locals>.<genexpr>  s      = s   )rK  rm  r   rL  cross_attentions)%r   r   rM  r   rN  r(   r   r.   rg  is_encoder_decoderr   r   get_seq_lengthr+   r-   rC  	unsqueezerepeatr  tor   r   r   r   r   r0  r   r   zipr]   r   rR  rT   r$  r/  r   r   )ry   r8   rD   r   r   rn  rm  rU  r   r   r   rM  rS  r   input_shaper~   	positionsr   causal_maskall_hidden_statesall_self_attnsall_cross_attentions	attn_mask	mask_namerY  decoder_layerr\  r]  
next_caches                                r5   r   zWhisperDecoder.forward  s]   b 2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B]  ]%>stt"#..*K!r;r?;I&',,.s3Kdee  --i8M0{{--"5lnln"U"..!"%%3A%6"(%4%C%C%E"!"\\&(>Q(OXeXlXlN )33A6==k!naPL  ,,2HWc - I ,,6L[g - I &	]5I5I(JJ--mt||VZVcVc-d(;;&))+%
 &&4==##t "	"6BD0d&7<Q<]rdh %(4H(IKYoKp$q 	 Iy$ ~~'*s4;;/?@ I;&@T[[AQ@R S!(+,A/@	 #,DKK"8 	@C#!m%55!}}&+jjn#&7)*&;3<3H3dI]Ii,@,Eos2;"3#-
M *!,M =#3"55(4(]1-=,??(5	@8 6-!11(1_t
 '5FXlm  
 9+&+%1
 	
r7   NNNNNNNNNNNNN)	r   r   r   r   r  r   rx   r   r   r   s   @r5   r`  r`    sJ     "O} 4 "!!P
r7   r`  c            &       z    e Zd Zdef fdZd Zd Zd Zd Zd Z		 dde
j                  d	ee
j                     fd
Ze	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddee
j                     d	ee
j                     dee
j                     dee
j                     dee
j                      dee
j                      dee
j                      deeee
j                           deee      deee
j                        deee
j                        dee   dee   dee   dee   dee
j                     deee
j                      ef   f"d       Z xZS )WhisperModelr   c                     t         |   |       t        |      | _        t	        |      | _        | j                          y rv   )rw   rx   r
  encoderr`  decoderr1  r   s     r5   rx   zWhisperModel.__init__  s2     %f-%f-r7   c                 .    | j                   j                  S rv   r  rg  r;  s    r5   r<  z!WhisperModel.get_input_embeddings      ||(((r7   c                 &    || j                   _        y rv   r  r>  s     r5   r?  z!WhisperModel.set_input_embeddings      $)!r7   c                     | j                   S rv   )r  r;  s    r5   get_encoderzWhisperModel.get_encoder      ||r7   c                     | j                   S rv   r  r;  s    r5   get_decoderzWhisperModel.get_decoder  r  r7   c                 8    | j                   j                          yz
        Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will
        not be updated during training.
        Nr  r8  r;  s    r5   freeze_encoderzWhisperModel.freeze_encoder      
 	'')r7   r   rD   c                 2   t        | j                  dd      s|S |j                         \  }}}| j                  j                  dkD  r| j                  rt        ||f| j                  j                  | j                  j                  || j                  j                        }t        j                  ||j                  t        j                        }|dddf   j                  d|d      }d||<   | j                  j                  dkD  r| j                  rt        ||f| j                  j                  | j                  j                  | j                  j                        }t        j                  ||j                  t        j                        }d||<   |S )	z
        Masks extracted features along time axis and/or along feature axis according to
        [SpecAugment](https://huggingface.co/papers/1904.08779).
        apply_spec_augmentTr   )rB   rC   rD   rE   )rC  rP   Nr%   )rB   rC   rE   )getattrr   r   mask_time_probr   ro   mask_time_lengthmask_time_min_masksr+   tensorrC  r[   expandmask_feature_probmask_feature_lengthmask_feature_min_masks)ry   r   rD   rf   hidden_sizerN   mask_time_indicesmask_feature_indicess           r5   _mask_input_featuresz!WhisperModel._mask_input_features  s[    t{{$8$?!! 4B3F3F3H0
K;;%%)dmm 5_-++44 KK88-++99! !&->~G\G\didndn o 1!T' : A A"kSU V01N,-;;((1,#8[)++77 KK;;++<<	$  $)<<0D^MbMbjojtjt#u 34N/0r7   decoder_input_idsdecoder_attention_maskr   decoder_head_maskrn  encoder_outputsrm  decoder_inputs_embedsdecoder_position_idsr   r   rM  rS  r   r"   c                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|*| j                  ||      }| j                  |||||      }nI|rGt        |t              s7t        |d   t        |      dkD  r|d   ndt        |      dkD  r|d   nd      }| j                  |||d   |||	|
||||||      }|s||z   S t        |j                  |j                  |j                  |j                  |j                   |j                  |j                  |j                  	      S )
a
  
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary.

            Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            Whisper uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
            `past_key_values`).
        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.

            If you want to change padding behavior, you should read
            [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the BART
            paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy.
        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        decoder_position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.n_positions - 1]`.

            [What are position IDs?](../glossary#position-ids)

        Example:
         ```python
         >>> import torch
         >>> from transformers import AutoFeatureExtractor, WhisperModel
         >>> from datasets import load_dataset

         >>> model = WhisperModel.from_pretrained("openai/whisper-base")
         >>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base")
         >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
         >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
         >>> input_features = inputs.input_features
         >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
         >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
         >>> list(last_hidden_state.shape)
         [1, 2, 512]
         ```N)rD   r   r   rM  rS  r   r   r$   rJ  )r8   rD   r   r   rn  rm  rU  r   r   r   rM  rS  r   )rK  rm  decoder_hidden_statesdecoder_attentionsrp  encoder_last_hidden_stater   encoder_attentions)r   r   rM  r   rN  r  r  r   r   r]   r  r   rK  rm  r   rL  rp  )ry   r   rD   r  r  r   r  rn  r  rm  r  r  r   r   rM  rS  r   decoder_outputss                     r5   r   zWhisperModel.forward  s   D 2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B]"!66~Vd6eN"ll#"3%9' + O O_!M-"1!"4474H14Loa0RV14_1E1I?1-tO ,,'1"1!"4'!5+/-/!5#) ' 
  "_44!-??+;;"1"?"?.99,==&5&G&G"1"?"?.99	
 		
r7   rv   )NNNNNNNNNNNNNNNN)r   r   r   r   rx   r<  r?  r  r  r  r+   FloatTensorr   r   r  r   r   r   r   r
   r[   r   r   r   r   s   @r5   r  r    s   } )** 6:)))) !!1!12)V  7;598<=A,0487;EI26DHBF$(,0/3&*59#w
 !2!23w
 !!1!12w
 $E$4$45	w

 !))9)9 :w
 ELL)w
 $ELL1w
 'u||4w
 "%e.?.?(@"ABw
 "%,/w
  (e.?.?(@Aw
 'uU-=-='>?w
 D>w
 $D>w
 'tnw
  d^!w
" !!1!12#w
$ 
uU\\"$66	7%w
 w
r7   r  zh
    The Whisper Model with a language modeling head. Can be used for automatic speech recognition.
    )custom_introc            (           e Zd ZdZdgZdef fdZd Zd Zd Z	d Z
d	ej                  fd
Zd Ze	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej$                     deej&                     deej&                     deej&                     deej(                     deej(                     deej(                     deeeej$                           deee      deeej$                        deeej&                        deej&                     dee   dee   dee   dee   deej&                     d	eeej(                     ef   f$d       Z xZS )WhisperForConditionalGenerationr   proj_out.weightr   c                     t         |   |       t        |      | _        t	        j
                  |j                  |j                  d      | _        |j                  | _	        | j                          y NFr   )rw   rx   r  r   r   r   r   rf  proj_outre  r1  r   s     r5   rx   z(WhisperForConditionalGeneration.__init__  sT     !&)
		&..&2C2C%P$*$?$?! 	r7   c                 6    | j                   j                         S rv   )r   r  r;  s    r5   r  z+WhisperForConditionalGeneration.get_encoder      zz%%''r7   c                 6    | j                   j                         S rv   )r   r  r;  s    r5   r  z+WhisperForConditionalGeneration.get_decoder  r  r7   c                     | j                   S rv   r  r;  s    r5   get_output_embeddingsz5WhisperForConditionalGeneration.get_output_embeddings      }}r7   c                     || _         y rv   r  ry   new_embeddingss     r5   set_output_embeddingsz5WhisperForConditionalGeneration.set_output_embeddings  	    &r7   r"   c                 6    | j                   j                         S rv   r   r<  r;  s    r5   r<  z4WhisperForConditionalGeneration.get_input_embeddings      zz..00r7   c                 L    | j                   j                  j                          yr  )r   r  r8  r;  s    r5   r  z.WhisperForConditionalGeneration.freeze_encoder  s    
 	

--/r7   r   rD   r  r  r   r  rn  r  rm  r  r  labelsr   r   rM  rS  r   c                 H   ||n| j                   j                  }|~|j                  d   | j                  kD  r)t	        d|j                  d    d| j                   d      |7|
5t        || j                   j                  | j                   j                        }| j                  |||||||||	|
||||||      }| j                  |d         }d}|at               }|j                  |j                        } ||j                  d| j                   j                        |j                  d            }|s|f|dd z   }||f|z   S |S t!        |||j"                  |j$                  |j&                  |j(                  |j*                  |j,                  |j.                  		      S )
a  
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary.

            Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            Whisper uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
            `past_key_values`).
        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.

            If you want to change padding behavior, you should read
            [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the BART
            paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy.
        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        decoder_position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.n_positions - 1]`.

            [What are position IDs?](../glossary#position-ids)
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
            or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
            only computed for the tokens with labels in `[0, ..., config.vocab_size]`. `sequence_length` should be smaller than or equal to `config.max_target_positions`.

        Example:

        ```python
        >>> import torch
        >>> from transformers import AutoProcessor, WhisperForConditionalGeneration
        >>> from datasets import load_dataset

        >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
        >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")

        >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")

        >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
        >>> input_features = inputs.input_features

        >>> generated_ids = model.generate(inputs=input_features)

        >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
        >>> transcription
        ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
        ```Nr   zLabels' sequence length z- cannot exceed the maximum allowed length of z tokens.)rD   r  r  r  r   r  rn  rm  r  r  r   r   rM  rS  r   r   r%   )	losslogitsrm  r  r  rp  r  r   r  )r   rN  r=   re  r(   rA   r9   r:   r   r  r   ru  rC  r.   rf  rd   r   rm  r  r  rp  r  r   r  )ry   r   rD   r  r  r   r  rn  r  rm  r  r  r  r   r   rM  rS  r   r   	lm_logitsr  loss_fctoutputs                          r5   r   z'WhisperForConditionalGeneration.forward  s   X &1%<k$++B]B]||A!:!:: .v||A.??lmq  nG  nG  mH  HP  Q  !(-B-J$6DKK44dkk6X6X%! **)/+#9/!5+"7!5/!5#)!  
$ MM'!*-	')HYYy//0FINN2t{{/E/EFWYHZ[D\GABK/F)-)9TGf$EvE#33")"?"?&99$55&-&G&G")"?"?&99

 
	
r7   )NNNNNNNNNNNNNNNNN)r   r   r   r  _tied_weights_keysr   rx   r  r  r  r  r   r^  r<  r  r   r   r+   r  r   r   r   r   r
   r[   r   r   r   r   s   @r5   r  r    s     +,} (('1bii 10  7;598<=A,0487;EI26DHBF-1$(,0/3&*59%@
 !2!23@
 !!1!12@
 $E$4$45	@

 !))9)9 :@
 ELL)@
 $ELL1@
 'u||4@
 "%e.?.?(@"AB@
 "%,/@
  (e.?.?(@A@
 'uU-=-='>?@
 ))*@
 D>@
 $D>@
  'tn!@
" d^#@
$ !!1!12%@
& 
uU\\"O3	4'@
 @
r7   r  c                   :     e Zd ZdZ fdZd Zd Zd Zd Z xZ	S )WhisperDecoderWrapperz
    This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
    used in combination with the [`EncoderDecoderModel`] framework.
    c                 R    t         |   |       d|_        t        |      | _        y r3  )rw   rx   rq  r`  r  r   s     r5   rx   zWhisperDecoderWrapper.__init__E  s$     $)!%f-r7   c                 .    | j                   j                  S rv   r  r;  s    r5   r<  z*WhisperDecoderWrapper.get_input_embeddingsJ  r  r7   c                 &    || j                   _        y rv   r  r>  s     r5   r?  z*WhisperDecoderWrapper.set_input_embeddingsM  r  r7   c                     | j                   S rv   r  r;  s    r5   r  z!WhisperDecoderWrapper.get_decoderP  r  r7   c                 &     | j                   |i |S rv   r  )ry   argsr   s      r5   r   zWhisperDecoderWrapper.forwardS  s    t||T,V,,r7   )
r   r   r   r   rx   r<  r?  r  r   r   r   s   @r5   r  r  ?  s!    
.
)*-r7   r  zx
    Whisper decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).
    c                        e Zd ZdgZdZ fdZd Zd Zdej                  fdZ
d Zd	 Zd
 Ze	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej"                     deej$                     deeej(                        deej$                     deej$                     dee   deej(                     deej"                     dee   dee   dee   dee   deej"                     deeef   fd       Z xZS )WhisperForCausalLMr  r8   c                     t         |   |       d|_        t        |      | _        t        j                  |j                  |j                  d      | _	        | j                          y r  )rw   rx   rq  r  r   r   r   r  rf  r  r1  r   s     r5   rx   zWhisperForCausalLM.__init__`  sP     $)!*62
		&"4"4f6G6GeT 	r7   c                     | j                   S rv   r  r;  s    r5   r  z(WhisperForCausalLM.get_output_embeddingsj  r  r7   c                     || _         y rv   r  r  s     r5   r  z(WhisperForCausalLM.set_output_embeddingsm  r  r7   r"   c                 6    | j                   j                         S rv   r  r;  s    r5   r<  z'WhisperForCausalLM.get_input_embeddingsp  r  r7   c                 :    | j                   j                  |       y rv   )r   r?  r>  s     r5   r?  z'WhisperForCausalLM.set_input_embeddingss  s    

''.r7   c                 &    || j                   _        y rv   r   r  )ry   r  s     r5   set_decoderzWhisperForCausalLM.set_decoderv  s    $

r7   c                 .    | j                   j                  S rv   r  r;  s    r5   r  zWhisperForCausalLM.get_decodery  s    zz!!!r7   rD   r  r   rn  rm  rU  r  r   r   rM  rS  r   c                    |
|
n| j                   j                  }
||n| j                   j                  }||n| j                   j                  }t	        |t
        t        t        f      r|d   }| j                  j                  ||||||||	|
|||      }| j                  |d         }d}|a|j                  |j                        }t               } ||j                  d| j                   j                        |j                  d            }|s|f|dd z   }||f|z   S |S t!        |||j"                  |j$                  |j&                  |j(                        S )a  
        encoder_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
            if the model is configured as a decoder.
        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import WhisperForCausalLM, WhisperForConditionalGeneration, WhisperProcessor
        >>> import torch
        >>> from datasets import load_dataset

        >>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
        >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2")

        >>> assistant_model = WhisperForCausalLM.from_pretrained("distil-whisper/distil-large-v2")

        >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
        >>> sample = ds[0]["audio"]
        >>> input_features = processor(
        ...     sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt"
        ... ).input_features

        >>> predicted_ids = model.generate(input_features, assistant_model=assistant_model)

        >>> # decode token ids to text
        >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
        >>> transcription
        ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.'
        ```Nr   )r8   rD   r   r   rn  rm  rU  r   r   rM  rS  r   r%   r   )r  r  rm  r   rL  rp  )r   r   rM  rN  r   r   r   listr   r  r  ru  rC  r   r.   rf  r   rm  r   rL  rp  )ry   r8   rD   r  r   rn  rm  rU  r  r   r   rM  rS  r   r   r  r  r  r  s                      r5   r   zWhisperForCausalLM.forward|  su   n 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B] o'EF-a0O **$$)"1!5+'/!5#) % 
 wqz*YYv}}-F')HFKKDKK,B,BCV[[QS_UDY,F'+'7D7V#CVC0#33!//))$55
 	
r7   r  )r   r   r   r  r  rx   r  r  r   r^  r<  r?  r  r  r   r   r+   r   r   r   r  r
   r[   r   r   r   r   r   s   @r5   r  r  W  s    ,,!O'1bii 1/%"  1515>B,07;+/59-1$(,0/3&*59c
E,,-c
 !.c
 "%(9(9":;	c

 ELL)c
 'u||4c
 "%c
   1 12c
 ))*c
 D>c
 $D>c
 'tnc
 d^c
 !!1!12c
 
u77	8c
 c
r7   r  z
    Whisper Encoder Model with a sequence classification head on top (a linear layer over the pooled output) for tasks
    like SUPERB Keyword Spotting.
    c                   P    e Zd Z fdZd Zdej                  fdZdej                  fdZe		 	 	 	 	 	 	 dde
ej                     de
ej                     d	e
eeej                           d
e
ej                     de
e   de
e   de
e   deeej                     ef   fd       Z xZS )r  c                    t         |   |       t        |      | _        |j                  dz   }|j
                  r0t        j                  t        j                  |      |z        | _
        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        | j#                          y r|   )rw   rx   r
  r  r  r  r   	Parameterr+   r_   r  r   r  classifier_proj_size	projector
num_labels
classifierr1  )ry   r   
num_layersrz   s      r5   rx   z&WhisperForAudioClassification.__init__  s     %f---1
((!#ejj.Dz.Q!RD6#5#5v7R7RS))F$?$?ARARS 	r7   c                 8    | j                   j                          y)z
        Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will
        not be updated during training. Only the projection layers and classification head will be updated.
        Nr  r;  s    r5   r  z,WhisperForAudioClassification.freeze_encoder  r  r7   r"   c                 6    | j                   j                         S rv   )r  r<  r;  s    r5   r<  z2WhisperForAudioClassification.get_input_embeddings  s    ||0022r7   r   c                 :    | j                   j                  |       y rv   )r  r?  r>  s     r5   r?  z2WhisperForAudioClassification.set_input_embeddings  s    ))%0r7   r   r   r  r  r   rM  rS  c                    ||n| j                   j                  }||n| j                   j                  }| j                   j                  rd}n|| j                   j                  }||n| j                   j                  }|| j                  |||||      }| j                   j                  rr|t           }t        j                  |d      }t        j                  j                  | j                  d      }	||	j                  ddd      z  j                  d      }n|d   }| j                  |      }|j!                  d      }
| j#                  |
      }d}|at%               }|j'                  |j(                        } ||j                  d| j                   j*                        |j                  d            }|s|f|dd z   }||f|z   S |S t-        |||j.                  |j0                        S )	a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Example:

        ```python
        >>> import torch
        >>> from transformers import AutoFeatureExtractor, WhisperForAudioClassification
        >>> from datasets import load_dataset

        >>> feature_extractor = AutoFeatureExtractor.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id")
        >>> model = WhisperForAudioClassification.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id")

        >>> ds = load_dataset("google/fleurs", "all", split="validation", streaming=True)
        >>> sample = next(iter(ds))

        >>> inputs = feature_extractor(
        ...     sample["audio"]["array"], sampling_rate=sample["audio"]["sampling_rate"], return_tensors="pt"
        ... )
        >>> input_features = inputs.input_features

        >>> with torch.no_grad():
        ...     logits = model(input_features).logits

        >>> predicted_class_ids = torch.argmax(logits).item()
        >>> predicted_label = model.config.id2label[predicted_class_ids]
        >>> predicted_label
        'Afrikaans'
        ```NTr  r   r&   r%   r   )r  r  r   rL  )r   r   rM  r  rN  r  _HIDDEN_STATES_START_POSITIONr+   stackr   r   r   r  r.   rW   r  r  r  r   ru  rC  r  r   r   rL  )ry   r   r   r  r  r   rM  rS  r   norm_weightspooled_outputr  r  r  r  s                  r5   r   z%WhisperForAudioClassification.forward  s   X 2C1N-TXT_T_TqTq$8$D $++JjJj 	 ;;--#' !)#';;#C#C %0%<k$++B]B]""ll#"3%9' + O ;;--+,IJM!KK1=M==001C1C0LL*\->->r1a-HHMMRSMTM+A.M}5%**q*1/')HYYv}}-FFKKDKK,B,BCV[[QS_UDY!44F)-)9TGf$EvE')77&11	
 	
r7   )NNNNNNN)r   r   r   rx   r  r   r^  r<  r?  r   r   r+   r   r   r   r  r[   r   r   r   r   r   s   @r5   r  r    s   *3bii 31")) 1  6:,0EI-1,0/3&*]
 !1!12]
 ELL)]
 "%e.?.?(@"AB	]

 ))*]
 $D>]
 'tn]
 d^]
 
uU\\"$<<	=]
 ]
r7   r  )r  r  r  r   r  )i'  )Nr   )Nr   N)Kr   r)   typingr   r   r   numpyrR   r+   torch.utils.checkpointr   torch.nnr   activationsr	   cache_utilsr
   r   r   
generationr   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   r   r   modeling_utilsr   r   processing_utilsr   utilsr   r   configuration_whisperr   generation_whisperr   
get_loggerr   r   r  rI   r   r   r6   rA   r   r   ndarrayro   r  rq   r^  r   r   r   r   r   r
  r`  r  r  r  r  r  __all__r  r7   r5   <module>r     s     , ,     % ! C C ) / :  G & , 0 6 
		H	% ! 	Dc 	DS 	D 	D5<< 	D%,, c [^ * 26tc?tt t U--.	t
 t ZZtn- -"  $(,%II%<<% 
% <<	%
 U\\*% e_% % %%<{)ryy {)~<+4 <+~l4 l^ %_ % %PN
+ N
br
+ r
j 
) 
 
D 
d
&<>T d

d
N-2 -0 
D
/ D

D
N y
$: y
y
xr7   