
    Z j?                     d    S r SSKJr  SSKJr  SSKJr  \" SS9\ " S S	\5      5       5       rS	/rg
)zLXMERT model configuration    )strict   )PreTrainedConfig)auto_docstringzunc-nlp/lxmert-base-uncased)
checkpointc                     ^  \ rS rSr% SrSr0 rSr\\	S'   Sr
\\	S'   Sr\\	S	'   S
r\\	S'   Sr\\	S'   Sr\\	S'   Sr\\	S'   Sr\\	S'   Sr\\-  \	S'   Sr\\-  \	S'   Sr\\	S'   Sr\\	S'   Sr\\	S'   Sr\\	S'   Sr\\	S '   Sr\\	S!'   S"r\\	S#'   S$r\\	S%'   S&r\\	S''   S(r\\	S)'   S(r \\	S*'   S(r!\\	S+'   S(r"\\	S,'   S(r#\\	S-'   S(r$\\	S.'   S(r%\\	S/'   S0r&\S0-  \	S1'   S0r'\S0-  \	S2'   S0r(\\)\   -  S0-  \	S3'   S(r*\\	S4'   U 4S5 jr+S6r,U =r-$ )7LxmertConfig   a9  
num_qa_labels (`int`, *optional*, defaults to 9500):
    This represents the total number of different question answering (QA) labels there are. If using more than
    one dataset with QA, the user will need to account for the total number of labels that all of the datasets
    have in total.
num_object_labels (`int`, *optional*, defaults to 1600):
    This represents the total number of semantically unique objects that lxmert will be able to classify a
    pooled-object feature as belonging too.
num_attr_labels (`int`, *optional*, defaults to 400):
    This represents the total number of semantically unique attributes that lxmert will be able to classify a
    pooled-object feature as possessing.
l_layers (`int`, *optional*, defaults to 9):
    Number of hidden layers in the Transformer language encoder.
x_layers (`int`, *optional*, defaults to 5):
    Number of hidden layers in the Transformer cross modality encoder.
r_layers (`int`, *optional*, defaults to 5):
    Number of hidden layers in the Transformer visual encoder.
visual_feat_dim (`int`, *optional*, defaults to 2048):
    This represents the last dimension of the pooled-object features used as input for the model, representing
    the size of each object feature itself.
visual_pos_dim (`int`, *optional*, defaults to 4):
    This represents the number of spatial features that are mixed into the visual features. The default is set
    to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)
visual_loss_normalizer (`float`, *optional*, defaults to 6.67):
    This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one
    decided to train with multiple vision-based loss objectives.
task_matched (`bool`, *optional*, defaults to `True`):
    This task is used for sentence-image matching. If the sentence correctly describes the image the label will
    be 1. If the sentence does not correctly describe the image, the label will be 0.
task_mask_lm (`bool`, *optional*, defaults to `True`):
    Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss
    objective.
task_obj_predict (`bool`, *optional*, defaults to `True`):
    Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.
task_qa (`bool`, *optional*, defaults to `True`):
    Whether or not to add the question-answering loss to the objective
visual_obj_loss (`bool`, *optional*, defaults to `True`):
    Whether or not to calculate the object-prediction loss objective
visual_attr_loss (`bool`, *optional*, defaults to `True`):
    Whether or not to calculate the attribute-prediction loss objective
visual_feat_loss (`bool`, *optional*, defaults to `True`):
    Whether or not to calculate the feature-regression loss objective
lxmerti:w  
vocab_sizei   hidden_size   num_attention_headsi%  num_qa_labelsi@  num_object_labelsi  num_attr_labelsi   intermediate_sizegelu
hidden_actg?hidden_dropout_probattention_probs_dropout_probi   max_position_embeddings   type_vocab_sizeg{Gz?initializer_range	   l_layers   x_layersr_layersi   visual_feat_dim   visual_pos_dimgGz@visual_loss_normalizerTtask_matchedtask_mask_lmtask_obj_predicttask_qavisual_obj_lossvisual_attr_lossvisual_feat_lossNpad_token_idbos_token_ideos_token_idtie_word_embeddingsc                 x   > U R                   U R                  U R                  S.U l        [        TU ]  " S0 UD6  g )N)visioncross_encoderlanguage )r    r   r   num_hidden_layerssuper__post_init__)selfkwargs	__class__s     ڀ/root/GenerationalWealth/GenerationalWealth/venv/lib/python3.13/site-packages/transformers/models/lxmert/configuration_lxmert.pyr7   LxmertConfig.__post_init__g   s1    ,0MMDMMgkgtgt!u''    )r5   ).__name__
__module____qualname____firstlineno____doc__
model_typeattribute_mapr   int__annotations__r   r   r   r   r   r   r   strr   floatr   r   r   r   r   r   r    r!   r#   r$   r%   boolr&   r'   r(   r)   r*   r+   r,   r-   r.   listr/   r7   __static_attributes____classcell__)r:   s   @r;   r	   r	      st   *X JMJK!!M3!s!OS!s!J'**03 %#+3#&S&OS#u#HcHcHcOSNC$(E(L$L$!d!GT OT !d!!d!#L#*##L#*#+/L#S	/D(/ $$( (r=   r	   N)	rB   huggingface_hub.dataclassesr   configuration_utilsr   utilsr   r	   __all__r4   r=   r;   <module>rQ      sK    ! . 3 # 89Q(# Q(  :Q(h 
r=   