
    YhF                     .   d dl mZ d dlmZmZmZ d dlZd dlmZ d dlm	Z	 d dl
mZmZmZmZmZmZmZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddlmZmZm Z  g dZ! G d de          Z" G d de          Z# G d de          Z$de%ee"e#f                  de&e'         dee         de(de(dede$fdZ)deddd d!Z* G d" d#e          Z+ G d$ d%e          Z, G d& d'e          Z- G d( d)e          Z. ed*+           ed,d- f.          dd/d0d1deee+ef                  de(de(dede$f
d2                        Z/ ed3+           ed,d4 f.          dd/d0d1deee,ef                  de(de(dede$f
d5                        Z0 ed6+           ed,d7 f.          dd/d0d1deee-ef                  de(de(dede$f
d8                        Z1 ed9+           ed,d: f.          dd/d0d1deee.ef                  de(de(dede$f
d;                        Z2dS )<    )partial)AnyOptionalUnionN)Tensor)
BasicBlock
BottleneckResNetResNet18_WeightsResNet50_WeightsResNeXt101_32X8D_WeightsResNeXt101_64X4D_Weights   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface   )_fuse_modules_replace_reluquantize_model)	QuantizableResNetResNet18_QuantizedWeightsResNet50_QuantizedWeights!ResNeXt101_32X8D_QuantizedWeights!ResNeXt101_64X4D_QuantizedWeightsresnet18resnet50resnext101_32x8dresnext101_64x4dc                   Z     e Zd Zdededdf fdZdedefdZd
dee         ddfd	Z	 xZ
S )QuantizableBasicBlockargskwargsreturnNc                      t                      j        |i | t          j        j                                        | _        d S N)super__init__torchnn	quantizedFloatFunctionaladd_reluselfr'   r(   	__class__s      x/var/www/tools.fuzzalab.pt/emblema-extractor/venv/lib/python3.11/site-packages/torchvision/models/quantization/resnet.pyr-   zQuantizableBasicBlock.__init__&   s;    $)&)))*::<<    xc                 J   |}|                      |          }|                     |          }|                     |          }|                     |          }|                     |          }| j        |                     |          }| j                            ||          }|S r+   )conv1bn1reluconv2bn2
downsampler2   r4   r8   identityouts       r6   forwardzQuantizableBasicBlock.forward*   s    jjmmhhsmmiinnjjoohhsmm?&q))Hm$$S(33
r7   is_qatc                 ~    t          | g dddgg|d           | j        rt          | j        ddg|d           d S d S )Nr:   r;   r<   r=   r>   Tinplace01r   r?   r4   rD   s     r6   
fuse_modelz QuantizableBasicBlock.fuse_model;   se    d5557GH&Z^____? 	M$/C:vtLLLLLL	M 	Mr7   r+   __name__
__module____qualname__r   r-   r   rC   r   boolrM   __classcell__r5   s   @r6   r&   r&   %   s        =c =S =T = = = = = = F    "M M$ M4 M M M M M M M Mr7   r&   c                   Z     e Zd Zdededdf fdZdedefdZd
dee         ddfd	Z	 xZ
S )QuantizableBottleneckr'   r(   r)   Nc                      t                      j        |i | t          j                                        | _        t          j        d          | _        t          j        d          | _        d S )NFrG   )	r,   r-   r/   r0   r1   skip_add_reluReLUrelu1relu2r3   s      r6   r-   zQuantizableBottleneck.__init__B   sa    $)&)))\99;;WU+++
WU+++


r7   r8   c                    |}|                      |          }|                     |          }|                     |          }|                     |          }|                     |          }|                     |          }|                     |          }|                     |          }| j        |                     |          }| j	        
                    ||          }|S r+   )r:   r;   rZ   r=   r>   r[   conv3bn3r?   rX   r2   r@   s       r6   rC   zQuantizableBottleneck.forwardH   s    jjmmhhsmmjjoojjoohhsmmjjoojjoohhsmm?&q))H ))#x88
r7   rD   c                     t          | g dg dddgg|d           | j        rt          | j        ddg|d           d S d S )	N)r:   r;   rZ   )r=   r>   r[   r]   r^   TrG   rI   rJ   rK   rL   s     r6   rM   z QuantizableBottleneck.fuse_modelZ   s{    ,,,.G.G.G'SXIYZ\blp	
 	
 	
 	
 ? 	M$/C:vtLLLLLL	M 	Mr7   r+   rN   rT   s   @r6   rV   rV   A   s        ,c ,S ,T , , , , , , F    $M M$ M4 M M M M M M M Mr7   rV   c                   Z     e Zd Zdededdf fdZdedefdZd
dee         ddfd	Z	 xZ
S )r   r'   r(   r)   Nc                      t                      j        |i | t          j        j                                        | _        t          j        j                                        | _        d S r+   )	r,   r-   r.   aoquantization	QuantStubquantDeQuantStubdequantr3   s      r6   r-   zQuantizableResNet.__init__c   sS    $)&)))X*4466
x,88::r7   r8   c                     |                      |          }|                     |          }|                     |          }|S r+   )re   _forward_implrg   )r4   r8   s     r6   rC   zQuantizableResNet.forwardi   s:    JJqMM q!!LLOOr7   rD   c                     t          | g d|d           |                                 D ]C}t          |          t          u st          |          t          u r|                    |           DdS )a  Fuse conv/bn/relu modules in resnet models

        Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        rF   TrG   N)r   modulestyperV   r&   rM   )r4   rD   ms      r6   rM   zQuantizableResNet.fuse_modelr   sy     	d444fdKKKK 	% 	%AAww///477>S3S3SV$$$	% 	%r7   r+   rN   rT   s   @r6   r   r   b   s        ;c ;S ;T ; ; ; ; ; ; F    
% 
%$ 
%4 
% 
% 
% 
% 
% 
% 
% 
%r7   r   blocklayersweightsprogressquantizer(   r)   c                    |Nt          |dt          |j        d                              d|j        v rt          |d|j        d                    |                    dd          }t	          | |fi |}t          |           |rt          ||           |*|                    |                    |d                     |S )Nnum_classes
categoriesbackendfbgemmT)rq   
check_hash)	r   lenmetapopr   r   r   load_state_dictget_state_dict)rn   ro   rp   rq   rr   r(   rv   models           r6   _resnetr      s     fmSl9S5T5TUUU$$!&)W\)5LMMMjjH--GeV66v66E% 'ug&&&g44hSW4XXYYYLr7   )r   r   rw   zdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelsz
        These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
        weights listed below.
    )min_sizeru   rv   recipe_docsc                   l    e Zd Z ed eed          i edej        ddddid	d
d          Z	e	Z
dS )r   zJhttps://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth   	crop_sizei(^ ImageNet-1KgV-_Q@g r8V@zacc@1zacc@5g/$?g`"y&@
num_paramsunquantized_metrics_ops
_file_sizeurl
transformsrz   N)rO   rP   rQ   r   r   r   _COMMON_METAr   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULT r7   r6   r   r      s        "7X7.#>>>

"+9##     
 
 
  " #GGGr7   r   c                       e Zd Z ed eed          i edej        ddddid	d
d          Z	 ed eedd          i edej
        ddddid	dd          ZeZdS )r   zJhttps://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pthr   r   i(r   g{GR@gjt4W@r   gB`"[@gM8@r   r   zJhttps://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth   r   resize_sizeg5^IT@gX9vW@g8@N)rO   rP   rQ   r   r   r   r   r   r   r   IMAGENET1K_V2IMAGENET1K_FBGEMM_V2r   r   r7   r6   r   r      s        "7X7.#>>>

"+9##     
 
 
  " #7X7.#3OOO

"+9##     
 
 
  " #GGGr7   r   c                       e Zd Z ed eed          i edej        ddddid	d
d          Z	 ed eedd          i edej
        ddddid	dd          ZeZdS )r   zQhttps://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pthr   r   i(Jr   gvS@gQW@r   gDli0@gV-U@r   r   zQhttps://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pthr   r   g~jT@g rX@gzGU@N)rO   rP   rQ   r   r   r   r   r   r   r   r   r   r   r   r7   r6   r   r      s        "7_7.#>>>

"3A##     
 
 
  " #7_7.#3OOO

"3A##     
 
 
  " #GGGr7   r   c                   p    e Zd Z ed eedd          i eddej        ddd	d
iddd          Z	e	Z
dS )r    zRhttps://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pthr   r   r   i(mz+https://github.com/pytorch/vision/pull/5935r   gxT@g/X@r   gQ.@g$cT@)r   r   r   r   r   r   r   N)rO   rP   rQ   r   r   r   r   r   r   r   r   r   r7   r6   r    r      s        "7`7.#3OOO

"C3A##     
 
 
  $ #GGGr7   r    quantized_resnet18)name
pretrainedc                 ^    |                      dd          rt          j        nt          j        S Nrr   F)getr   r   r   r   r(   s    r6   <lambda>r     ,    zz*e,,0%::!/ r7   )rp   TF)rp   rq   rr   c                 x    |rt           nt                              |           } t          t          g d| ||fi |S )a  ResNet-18 model from
    `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` or :class:`~torchvision.models.ResNet18_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNet18_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNet18_Weights
        :members:
        :noindex:
    )r   r   r   r   )r   r   verifyr   r&   rp   rq   rr   r(   s       r6   r!   r!     I    ^ -5J((:JRRSZ[[G(,,,8^^W]^^^r7   quantized_resnet50c                 ^    |                      dd          rt          j        nt          j        S r   )r   r   r   r   r   r   s    r6   r   r   S  r   r7   c                 x    |rt           nt                              |           } t          t          g d| ||fi |S )a  ResNet-50 model from
    `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` or :class:`~torchvision.models.ResNet50_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNet50_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNet50_Weights
        :members:
        :noindex:
    )r         r   )r   r   r   r   rV   r   s       r6   r"   r"   O  r   r7   quantized_resnext101_32x8dc                 ^    |                      dd          rt          j        nt          j        S r   )r   r   r   r   r   r   s    r6   r   r     ,    zz*e,,8-BB)7 r7   c                     |rt           nt                              |           } t          |dd           t          |dd           t	          t
          g d| ||fi |S )a  ResNeXt-101 32x8d model from
    `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet101_32X8D_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
        :members:
        :noindex:
    groups    width_per_group   r   r      r   )r   r   r   r   r   rV   r   s       r6   r#   r#     n    ^ 5=Z00BZbbcjkkG&(B///&"3Q777(---(H__X^___r7   quantized_resnext101_64x4dc                 ^    |                      dd          rt          j        nt          j        S r   )r   r    r   r   r   r   s    r6   r   r     r   r7   c                     |rt           nt                              |           } t          |dd           t          |dd           t	          t
          g d| ||fi |S )a  ResNeXt-101 64x4d model from
    `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet101_64X4D_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
        :members:
        :noindex:
    r   @   r   r   r   )r    r   r   r   r   rV   r   s       r6   r$   r$     r   r7   )3	functoolsr   typingr   r   r   r.   torch.nnr/   r   torchvision.models.resnetr   r	   r
   r   r   r   r   transforms._presetsr   _apir   r   r   _metar   _utilsr   r   utilsr   r   r   __all__r&   rV   r   rl   listintrR   r   r   r   r   r   r    r!   r"   r#   r$   r   r7   r6   <module>r      s*         ' ' ' ' ' ' ' ' ' '                               7 6 6 6 6 6 7 7 7 7 7 7 7 7 7 7 ( ( ( ( ( ( C C C C C C C C ? ? ? ? ? ? ? ? ? ?
 
 
M M M M MJ M M M8M M M M MJ M M MB% % % % % % % %:+-BBCDI k" 	
      4 &t	 	# # # # # # # #*## ## ## ## ## ## ## ##L## ## ## ## ## ## ## ##L# # # # # # # #, )***	
 	
	 	 	 MQ	&_ &_ &_e57GGHI&_ &_ 	&_
 &_ &_ &_ &_	 	 +*&_R )***	
 	
	 	 	 MQ	&_ &_ &_e57GGHI&_ &_ 	&_
 &_ &_ &_ &_	 	 +*&_R 1222	
 	
	 	 	 ]a	(` (` (`e=?WWXY(` (` 	(`
 (` (` (` (`	 	 32(`V 1222	
 	
	 	 	 ]a	(` (` (`e=?WWXY(` (` 	(`
 (` (` (` (`	 	 32(` (` (`r7   