o
    hY*                     @   s  d dl Z d dlmZ d dlmZmZmZ d dlZd dlm	Z	 d dl
m	  mZ d dlmZ d dlmZ d dlmZmZ ddlmZ d	d
lmZmZmZ d	dlmZ d	dlmZmZ ddlm Z m!Z!m"Z" g dZ#G dd dej$Z%G dd dej&Z'G dd dej(Z)G dd dej*Z+G dd dej,Z-G dd dej.Z/G dd dej0Z1G dd dej2Z3G d d! d!eZ4ed"d#ed$d%d& fd'dd(d)d*d+eee4ef  d,e5d-e5d.ed/e3f
d0d1Z6dS )2    N)partial)AnyOptionalUnion)Tensor)	inception)Inception_V3_WeightsInceptionOutputs   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface   )_fuse_modules_replace_reluquantize_model)QuantizableInception3Inception_V3_QuantizedWeightsinception_v3c                       sT   e Zd Zdededdf fddZdedefdd	Zdd
ee ddfddZ	  Z
S )QuantizableBasicConv2dargskwargsreturnNc                    s    t  j|i | t | _d S N)super__init__nnZReLUreluselfr   r   	__class__ o/home/www/facesmatcher.com/frenv_anti/lib/python3.10/site-packages/torchvision/models/quantization/inception.pyr       s   zQuantizableBasicConv2d.__init__xc                 C   s"   |  |}| |}| |}|S r   convZbnr"   )r$   r)   r'   r'   r(   forward   s   


zQuantizableBasicConv2d.forwardis_qatc                 C   s   t | g d|dd d S )Nr*   T)Zinplace)r   )r$   r-   r'   r'   r(   
fuse_model%   s   z!QuantizableBasicConv2d.fuse_modelr   )__name__
__module____qualname__r   r    r   r,   r   boolr.   __classcell__r'   r'   r%   r(   r      s     r   c                       <   e Zd Zdededdf fddZdedefdd	Z  ZS )
QuantizableInceptionAr   r   r   Nc                    &   t  j|dti| tj | _d S NZ
conv_blockr   r    r   r!   	quantizedFloatFunctionalmyopr#   r%   r'   r(   r    +      zQuantizableInceptionA.__init__r)   c                 C      |  |}| j|dS Nr   _forwardr;   catr$   r)   outputsr'   r'   r(   r,   /      
zQuantizableInceptionA.forwardr/   r0   r1   r   r    r   r,   r3   r'   r'   r%   r(   r5   )       r5   c                       r4   )
QuantizableInceptionBr   r   r   Nc                    r6   r7   r8   r#   r%   r'   r(   r    6   r<   zQuantizableInceptionB.__init__r)   c                 C   r=   r>   r?   rB   r'   r'   r(   r,   :   rD   zQuantizableInceptionB.forwardrE   r'   r'   r%   r(   rG   4   rF   rG   c                       r4   )
QuantizableInceptionCr   r   r   Nc                    r6   r7   r8   r#   r%   r'   r(   r    A   r<   zQuantizableInceptionC.__init__r)   c                 C   r=   r>   r?   rB   r'   r'   r(   r,   E   rD   zQuantizableInceptionC.forwardrE   r'   r'   r%   r(   rH   ?   rF   rH   c                       r4   )
QuantizableInceptionDr   r   r   Nc                    r6   r7   r8   r#   r%   r'   r(   r    L   r<   zQuantizableInceptionD.__init__r)   c                 C   r=   r>   r?   rB   r'   r'   r(   r,   P   rD   zQuantizableInceptionD.forwardrE   r'   r'   r%   r(   rI   J   rF   rI   c                       sR   e Zd Zdededdf fddZdedee fdd	Zdedefd
dZ  Z	S )QuantizableInceptionEr   r   r   Nc                    s>   t  j|dti| tj | _tj | _tj | _d S r7   )	r   r    r   r!   r9   r:   myop1myop2myop3r#   r%   r'   r(   r    W   s   zQuantizableInceptionE.__init__r)   c                 C   s   |  |}| |}| || |g}| j|d}| |}| |}| || 	|g}| j
|d}tj|dddd}| |}||||g}|S )Nr   r
   )Zkernel_sizeZstridepadding)	branch1x1Zbranch3x3_1Zbranch3x3_2aZbranch3x3_2brK   rA   Zbranch3x3dbl_1Zbranch3x3dbl_2Zbranch3x3dbl_3aZbranch3x3dbl_3brL   FZ
avg_pool2dbranch_pool)r$   r)   rO   Z	branch3x3Zbranch3x3dblrQ   rC   r'   r'   r(   r@   ]   s   




zQuantizableInceptionE._forwardc                 C   r=   r>   )r@   rM   rA   rB   r'   r'   r(   r,   r   rD   zQuantizableInceptionE.forward)
r/   r0   r1   r   r    r   listr@   r,   r3   r'   r'   r%   r(   rJ   U   s    rJ   c                       s*   e Zd Zdededdf fddZ  ZS )QuantizableInceptionAuxr   r   r   Nc                    s   t  j|dti| d S r7   )r   r    r   r#   r%   r'   r(   r    y   s   z QuantizableInceptionAux.__init__)r/   r0   r1   r   r    r3   r'   r'   r%   r(   rS   w   s    "rS   c                       sT   e Zd Zdededdf fddZdedefdd	Zdd
ee	 ddfddZ
  ZS )r   r   r   r   Nc              
      sD   t  j|dtttttttgi| t	j
j | _t	j
j | _d S )NZinception_blocks)r   r    r   r5   rG   rH   rI   rJ   rS   torchZaoZquantizationZ	QuantStubquantZDeQuantStubdequantr#   r%   r'   r(   r    ~   s    zQuantizableInception3.__init__r)   c                 C   sf   |  |}| |}| |\}}| |}| jo| j}tj r-|s(t	
d t||S | ||S )NzIScripted QuantizableInception3 always returns QuantizableInception3 Tuple)Z_transform_inputrU   r@   rV   Ztraining
aux_logitsrT   ZjitZis_scriptingwarningswarnr	   Zeager_outputs)r$   r)   ZauxZaux_definedr'   r'   r(   r,      s   





zQuantizableInception3.forwardr-   c                 C   s(   |   D ]}t|tu r|| qdS )a  Fuse conv/bn/relu modules in inception model

        Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        N)modulestyper   r.   )r$   r-   mr'   r'   r(   r.      s
   
z QuantizableInception3.fuse_modelr   )r/   r0   r1   r   r    r   r	   r,   r   r2   r.   r3   r'   r'   r%   r(   r   }   s     r   c                   @   sJ   e Zd Zedeedddddeddejd	d
ddidddd
dZ	e	Z
dS )r   zUhttps://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-a2837893.pthi+  iV  )Z	crop_sizeZresize_sizeir)K   r]   fbgemmzdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelszImageNet-1Kg%CKS@g-VW@)zacc@1zacc@5g'1@gL7A`%7@z
                These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
                weights listed below.
            )
Z
num_paramsZmin_size
categoriesbackendZrecipeZunquantizedZ_metricsZ_ops
_file_sizeZ_docs)urlZ
transformsmetaN)r/   r0   r1   r   r   r   r   r   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULTr'   r'   r'   r(   r      s*    r   Zquantized_inception_v3)nameZ
pretrainedc                 C   s   |  ddr	tjS tjS )NquantizeF)getr   re   r   rd   )r   r'   r'   r(   <lambda>   s   
rj   )weightsTF)rk   progressrh   rk   rl   rh   r   r   c                 K   s   |rt nt| } |dd}| dur<d|vrt|dd t|dd t|dt| jd  d| jv r<t|d| jd  |dd	}tdi |}t	| |rTt
|| | durv|rb|sbd|_d|_|| j|dd
 |sv|svd|_d|_|S )a  Inception v3 model architecture from
    `Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`__.

    .. note::
        **Important**: In contrast to the other models the inception_v3 expects tensors with a size of
        N x 3 x 299 x 299, so ensure your images are sized accordingly.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` or :class:`~torchvision.models.Inception_V3_Weights`, optional): The pretrained
            weights for the model. See
            :class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableInception3``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/inception.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.Inception_V3_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.Inception_V3_Weights
        :members:
        :noindex:
    rW   FNZtransform_inputTZnum_classesr_   r`   r^   )rl   Z
check_hashr'   )r   r   verifyri   r   lenrc   popr   r   r   rW   Z	AuxLogitsZload_state_dictZget_state_dict)rk   rl   rh   r   Zoriginal_aux_logitsr`   modelr'   r'   r(   r      s.   4

r   )7rX   	functoolsr   typingr   r   r   rT   Ztorch.nnr!   Ztorch.nn.functionalZ
functionalrP   r   Ztorchvision.modelsr   Zinception_moduleZtorchvision.models.inceptionr   r	   Ztransforms._presetsr   Z_apir   r   r   _metar   Z_utilsr   r   utilsr   r   r   __all__ZBasicConv2dr   Z
InceptionAr5   Z
InceptionBrG   Z
InceptionCrH   Z
InceptionDrI   Z
InceptionErJ   ZInceptionAuxrS   Z
Inception3r   r   r2   r   r'   r'   r'   r(   <module>   sX    ",