# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Autoformer model configuration"""

from huggingface_hub.dataclasses import strict

from ...configuration_utils import PreTrainedConfig
from ...utils import auto_docstring


@auto_docstring(checkpoint="huggingface/autoformer-tourism-monthly")
@strict
class AutoformerConfig(PreTrainedConfig):
    r"""
    prediction_length (`int`):
        The prediction length for the decoder. In other words, the prediction horizon of the model.
    context_length (`int`, *optional*, defaults to `prediction_length`):
        The context length for the encoder. If unset, the context length will be the same as the
        `prediction_length`.
    distribution_output (`string`, *optional*, defaults to `"student_t"`):
        The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
    loss (`string`, *optional*, defaults to `"nll"`):
        The loss function for the model corresponding to the `distribution_output` head. For parametric
        distributions it is the negative log likelihood (nll) - which currently is the only supported one.
    input_size (`int`, *optional*, defaults to 1):
        The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
        multivariate targets.
    lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
        The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4,
        5, 6, 7]`.
    scaling (`bool`, *optional* defaults to `True`):
        Whether to scale the input targets.
    num_time_features (`int`, *optional*, defaults to 0):
        The number of time features in the input time series.
    num_dynamic_real_features (`int`, *optional*, defaults to 0):
        The number of dynamic real valued features.
    num_static_categorical_features (`int`, *optional*, defaults to 0):
        The number of static categorical features.
    num_static_real_features (`int`, *optional*, defaults to 0):
        The number of static real valued features.
    cardinality (`list[int]`, *optional*):
        The cardinality (number of different values) for each of the static categorical features. Should be a list
        of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
        `num_static_categorical_features` is > 0.
    num_parallel_samples (`int`, *optional*, defaults to 100):
        The number of samples to generate in parallel for each time step of inference.
    label_length (`int`, *optional*, defaults to 10):
        Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e.
        non-autoregressive generation).
    moving_average (`int`, *optional*, defaults to 25):
        The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition
        Layer.
    autocorrelation_factor (`int`, *optional*, defaults to 3):
        "Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays.
        It's recommended in the paper to set it to a number between 1 and 5.

    Example:

    ```python
    >>> from transformers import AutoformerConfig, AutoformerModel

    >>> # Initializing a default Autoformer configuration
    >>> configuration = AutoformerConfig()

    >>> # Randomly initializing a model (with random weights) from the configuration
    >>> model = AutoformerModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```"""

    model_type = "autoformer"
    attribute_map = {
        "hidden_size": "d_model",
        "num_attention_heads": "encoder_attention_heads",
        "num_hidden_layers": "encoder_layers",
    }

    prediction_length: int | None = None
    context_length: int | None = None
    distribution_output: str = "student_t"
    loss: str = "nll"
    input_size: int = 1
    lags_sequence: list[int] | tuple[int, ...] = (1, 2, 3, 4, 5, 6, 7)
    scaling: bool | str = True
    num_time_features: int = 0
    num_dynamic_real_features: int = 0
    num_static_categorical_features: int = 0
    num_static_real_features: int = 0
    cardinality: list[int] | None = None
    embedding_dimension: list[int] | None = None
    d_model: int = 64
    encoder_attention_heads: int = 2
    decoder_attention_heads: int = 2
    encoder_layers: int = 2
    decoder_layers: int = 2
    encoder_ffn_dim: int = 32
    decoder_ffn_dim: int = 32
    activation_function: str = "gelu"
    dropout: float | int = 0.1
    encoder_layerdrop: float | int = 0.1
    decoder_layerdrop: float | int = 0.1
    attention_dropout: float | int = 0.1
    activation_dropout: float | int = 0.1
    num_parallel_samples: int = 100
    init_std: float = 0.02
    use_cache: bool = True
    is_encoder_decoder: bool = True
    label_length: int = 10
    moving_average: int = 25
    autocorrelation_factor: int = 3

    def __post_init__(self, **kwargs):
        self.context_length = self.context_length if self.context_length is not None else self.prediction_length
        self.lags_sequence = list(self.lags_sequence)

        if not (self.cardinality is not None and self.num_static_categorical_features > 0):
            self.cardinality = [0]

        if not (self.embedding_dimension is not None and self.num_static_categorical_features > 0):
            self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]

        # Transformer architecture configuration
        self.feature_size = self.input_size * len(self.lags_sequence) + self._number_of_features
        super().__post_init__(**kwargs)

    @property
    def _number_of_features(self) -> int:
        return (
            sum(self.embedding_dimension)
            + self.num_dynamic_real_features
            + self.num_time_features
            + self.num_static_real_features
            + self.input_size * 2  # the log1p(abs(loc)) and log(scale) features
        )

    def validate_architecture(self):
        """Part of `@strict`-powered validation. Validates the architecture of the config."""
        if (
            self.cardinality is not None
            and self.num_static_categorical_features > 0
            and len(self.cardinality) != self.num_static_categorical_features
        ):
            raise ValueError(
                "The cardinality should be a list of the same length as `num_static_categorical_features`"
            )

        if (
            self.embedding_dimension is not None
            and self.num_static_categorical_features > 0
            and len(self.embedding_dimension) != self.num_static_categorical_features
        ):
            raise ValueError(
                "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
            )


__all__ = ["AutoformerConfig"]
