
    Z jr                     l    S SK Jr  SSKJr  SSKJr  SSKJr  \" SS9\ " S S	\5      5       5       rS	/r	g
)    )strict   )PreTrainedConfig)RopeParameters)auto_docstringzLiquidAI/LFM2-1.2B)
checkpointc                     ^  \ rS rSr% SrSrS/rSrSr\	\
S'   Sr\	\
S	'   S
r\	\
S'   Sr\	\
S'   Sr\	\
S'   Sr\	\
S'   Sr\	\
S'   Sr\\
S'   Sr\\
S'   Sr\\
S'   Sr\	S-  \
S'   Sr\	S-  \
S'   Sr\	\\	   -  S-  \
S'   Sr\\
S '   Sr\\-  S-  \
S!'   S"r\\
S#'   S$r\	\
S%'   S&r \	\
S''   S(r!\\	-  \
S)'   Sr"\\
S*'   Sr#\\	   S-  \
S+'   Sr$\\%   S-  \
S,'   U 4S- jr&S.r'U =r($ )/
Lfm2Config   a  
conv_bias (`bool`, *optional*, defaults to `False`):
    Whether to use bias in the conv layers.
conv_L_cache (`int`, *optional*, defaults to 3):
    L_cache dim in the conv layers.
block_multiple_of (`int`, *optional*, defaults to 256):
    Multiple for the `intermediate_size`.
block_ffn_dim_multiplier (`float`, *optional*, defaults to 1.0):
    Multiplier for the `intermediate_size`.
block_auto_adjust_ff_dim (`bool`, *optional*, defaults to `True`):
    Whether to adjust the dim of the `intermediate_size`.
full_attn_idxs (`Optional`, *optional*):
    Index of the layers which use attention.

```python
>>> from transformers import Lfm2Model, Lfm2Config

>>> # Initializing a LFM2 model
>>> configuration = Lfm2Config()

>>> # Initializing a model from the LFM2-1.2B style configuration
>>> model = Lfm2Model(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```
lfm2past_key_valuesg    .Ai   
vocab_sizei 
  hidden_sizei 0  intermediate_size    num_hidden_layersnum_attention_heads   num_key_value_headsi  max_position_embeddingsg{Gz?initializer_rangegh㈵>norm_epsT	use_cacher   Npad_token_id   bos_token_id   eos_token_idtie_word_embeddingsrope_parametersF	conv_biasr   conv_L_cache   block_multiple_ofg      ?block_ffn_dim_multiplierblock_auto_adjust_ff_dimfull_attn_idxslayer_typesc                   > U R                   cx  U R                  b  U R                  O[        [        U R                  5      5      U l        [        U R                  5       Vs/ s H  o"U R                  ;   a  SOSPM     snU l         UR                  SU R                  5      U l        UR                  SU R                  5      U l        [        TU ]$  " S0 UD6  g s  snf )Nfull_attentionconvtie_embeddingblock_ff_dim )
r(   r'   listranger   popr   r   super__post_init__)selfkwargsi	__class__s      |/root/GenerationalWealth/GenerationalWealth/venv/lib/python3.13/site-packages/transformers/models/lfm2/configuration_lfm2.pyr3   Lfm2Config.__post_init__Q   s    #'+':':'F##DQVW[WmWmQnLo  SXX\XnXnRo RoQ)<)<$< &HRo D $*::ot?W?W#X !'ND<R<R!S'' s   "C)r'   r   r(   r   ))__name__
__module____qualname____firstlineno____doc__
model_typekeys_to_ignore_at_inferencedefault_thetar   int__annotations__r   r   r   r   r   r   r   floatr   r   boolr   r   r   r/   r   r    r   dictr!   r"   r$   r%   r&   r'   r(   strr3   __static_attributes____classcell__)r7   s   @r8   r
   r
      sB   8 J#4"5MJK"s"s!!  #*S*#u#HeIt L#*  L#* +,L#S	/D(, $$48O^d*T18ItL# s ,/eck/%)d)'+NDI$+$(KcT!(( (    r
   N)
huggingface_hub.dataclassesr   configuration_utilsr   modeling_rope_utilsr   utilsr   r
   __all__r.   rJ   r8   <module>rP      sJ     / 3 1 # /0C(! C(  1C(L .rJ   