
    Yh              )          d dl mZmZ d dlmZ ddlmZmZ ddlmZm	Z	m
Z
mZmZmZmZ ddgZ G d de          Zd	d
e de de
 de de	 de dz   e_        	 	 	 	 	 	 	 d%dee         dee         dee         dee         dee         dee         dee         dededee         dee         dee         dedededed eeef         d!ed"ed#ef(d$ZdS )&    )OptionalUnion)Tensor   )Adamadam)_capturable_doc_differentiable_doc_foreach_doc
_fused_doc_maximize_doc_params_docParamsTAdamWadamwc                        e Zd Z	 	 	 	 	 dddddddded	eeef         d
eeeef         eeef         f         dedededede	e         dedede	e         f fdZ
 fdZ xZS )r   MbP?g?g+?:0yE>{Gz?FN)maximizeforeach
capturabledifferentiablefusedparamslrbetasepsweight_decayamsgradr   r   r   r   r   c                b    t                                          |||||||||	|
|d           d S )NT)r   r   r   r   r   decoupled_weight_decay)super__init__)selfr   r   r   r   r    r!   r   r   r   r   r   	__class__s               c/var/www/tools.fuzzalab.pt/emblema-extractor/venv/lib/python3.11/site-packages/torch/optim/adamw.pyr%   zAdamW.__init__   sT     	!)#' 	 	
 	
 	
 	
 	
    c                 h    t                                          |           | j        D ]}d|d<   d S )NTr#   )r$   __setstate__param_groups)r&   stategroupr'   s      r(   r+   zAdamW.__setstate__7   sE    U###& 	3 	3E.2E*++	3 	3r)   )r   r   r   r   F)__name__
__module____qualname__r   r   floatr   tupleboolr   r%   r+   __classcell__)r'   s   @r(   r   r      s!        $(CO"
 "& $ $
 
 

 %- 
 U5&=)5+??@	

 
 
 
 
 $
 
 
 ~
 
 
 
 
 
B3 3 3 3 3 3 3 3 3r)   a  Implements AdamW algorithm, where weight decay does not accumulate in the momentum nor variance.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{(lr)}, \: \beta_1, \beta_2
                \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
                \: \epsilon \text{ (epsilon)}                                                    \\
            &\hspace{13mm}      \lambda \text{(weight decay)},  \: \textit{amsgrad},
                \: \textit{maximize}                                                             \\
            &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
                \text{ ( second moment)}, \: v_0^{max}\leftarrow 0                        \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1}         \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm} v_t^{max} \leftarrow \mathrm{max}(v_{t-1}^{max},v_t)                  \\
            &\hspace{10mm}\widehat{v_t} \leftarrow v_t^{max}/\big(1-\beta_2^t \big)              \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                  \\
            &\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_.
    z
    Args:
        a  
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay coefficient (default: 1e-2)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        a8  
    .. Note::
        A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`.
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    NFr   gradsexp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepsr   r   r   r   
grad_scale	found_infhas_complexr!   beta1beta2r   r    r   r   c                F    t          | |||||f||||	|
|||||||||dd dS )zpFunctional API that performs AdamW algorithm computation.

    See :class:`~torch.optim.AdamW` for details.
    T)r   r   r   r   r;   r<   r=   r!   r>   r?   r   r    r   r   r#   N)r   )r   r6   r7   r8   r9   r:   r   r   r   r   r;   r<   r=   r!   r>   r?   r   r    r   r   s                       r(   r   r      sk    : 	 %!#+     r)   )NFFNNNF)typingr   r   torchr   r   r   	optimizerr	   r
   r   r   r   r   r   __all__r   __doc__listr4   r2   r    r)   r(   <module>rH      s   " " " " " " " "                                G
%3 %3 %3 %3 %3D %3 %3 %3R$J	  
  
   
! " 
# $ 
%  K? \ #  #'"&3 3L3<3 6l3 f	3
 &\3 f3 d^3 3 3 D>3  3 3 3" #3$ %3& '3( 	eVm)3* +3, 
-3. /3 3 3 3 3 3r)   