
    0h&A                        d Z ddlZddlZddlZddlmZ ddlmZ ddlm	Z	 ddl
mZ dadZdZd	Zd
ZdZdZdZdZ ed      dd       Zej-                  ede      e_          ed      dd       Zd Zd Z	 ddZd Zd Zy)z@Utilities for ImageNet data preprocessing & prediction decoding.    N)activations)backend)
data_utils)keras_exportzUhttps://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.jsonam  
  Preprocesses a tensor or Numpy array encoding a batch of images.

  Usage example with `applications.MobileNet`:

  ```python
  i = tf.keras.layers.Input([None, None, 3], dtype = tf.uint8)
  x = tf.cast(i, tf.float32)
  x = tf.keras.applications.mobilenet.preprocess_input(x)
  core = tf.keras.applications.MobileNet()
  x = core(x)
  model = tf.keras.Model(inputs=[i], outputs=[x])

  image = tf.image.decode_png(tf.io.read_file('file.png'))
  result = model(image)
  ```

  Args:
    x: A floating point `numpy.array` or a `tf.Tensor`, 3D or 4D with 3 color
      channels, with values in the range [0, 255].
      The preprocessed data are written over the input data
      if the data types are compatible. To avoid this
      behaviour, `numpy.copy(x)` can be used.
    data_format: Optional data format of the image tensor/array. None, means
      the global setting `tf.keras.backend.image_data_format()` is used
      (unless you changed it, it uses "channels_last").{mode}
      Defaults to `None`.

  Returns:
      Preprocessed `numpy.array` or a `tf.Tensor` with type `float32`.
      {ret}

  Raises:
      {error}
  a  
    mode: One of "caffe", "tf" or "torch".
      - caffe: will convert the images from RGB to BGR,
          then will zero-center each color channel with
          respect to the ImageNet dataset,
          without scaling.
      - tf: will scale pixels between -1 and 1,
          sample-wise.
      - torch: will scale pixels between 0 and 1 and then
          will normalize each channel with respect to the
          ImageNet dataset.
      Defaults to "caffe".
  zE
    ValueError: In case of unknown `mode` or `data_format` argument.z;
    ValueError: In case of unknown `data_format` argument.zH
      The inputs pixel values are scaled between -1 and 1, sample-wise.z
      The input pixels values are scaled between 0 and 1 and each channel is
      normalized with respect to the ImageNet dataset.z
      The images are converted from RGB to BGR, then each color channel is
      zero-centered with respect to the ImageNet dataset, without scaling.z2keras.applications.imagenet_utils.preprocess_inputc                     |dvrt        d|       |t        j                         }n|dvrt        d|       t        | t        j
                        rt        | ||      S t        | ||      S )z@Preprocesses a tensor or Numpy array encoding a batch of images.>   tfcaffetorchzDExpected mode to be one of `caffe`, `tf` or `torch`. Received: mode=>   channels_lastchannels_firstz]Expected data_format to be one of `channels_first` or `channels_last`. Received: data_format=)data_formatmode)
ValueErrorr   image_data_format
isinstancenpndarray_preprocess_numpy_input_preprocess_symbolic_input)xr   r   s      b/var/www/html/engine/venv/lib/python3.12/site-packages/tf_keras/src/applications/imagenet_utils.pypreprocess_inputr   h   s     ++"V%
 	

 //1	?	?66A]D
 	

 !RZZ &qkMM)!4PP     )r   reterrorz4keras.applications.imagenet_utils.decode_predictionsc           
      D   t        | j                        dk7  s| j                  d   dk7  r!t        dt        | j                        z         t        Ft        j                  dt        dd	      }t        |      5 }t        j                  |      addd       g }| D ]p  }|j                         | d ddd
   }|D cg c]$  }t        t        t        |               ||   fz   & }}|j                  d d       |j                  |       r |S # 1 sw Y   xY wc c}w )a  Decodes the prediction of an ImageNet model.

    Args:
      preds: Numpy array encoding a batch of predictions.
      top: Integer, how many top-guesses to return. Defaults to 5.

    Returns:
      A list of lists of top class prediction tuples
      `(class_name, class_description, score)`.
      One list of tuples per sample in batch input.

    Raises:
      ValueError: In case of invalid shape of the `pred` array
        (must be 2D).
          i  zx`decode_predictions` expects a batch of predictions (i.e. a 2D array of shape (samples, 1000)). Found array with shape: Nzimagenet_class_index.jsonmodels c2c37ea517e94d9795004a39431a14cb)cache_subdir	file_hashc                     | d   S )Nr    )r   s    r   <lambda>z$decode_predictions.<locals>.<lambda>   s
    !A$ r   T)keyreverse)lenshaper   strCLASS_INDEXr   get_fileCLASS_INDEX_PATHopenjsonloadargsorttuplesortappend)	predstopfpathfresultspredtop_indicesiresults	            r   decode_predictionsr@      s   & 5;;1A$ 6' *-U[[)9:
 	
 ##'!8	
 %[ 	'A))A,K	'G llncTU+DbD1CNOa%CF+,Qz9OO5v	
 N	' 	'
 Ps   :D=)DDc                    t        | j                  j                  t        j                        s%| j                  t        j                         d      } |dk(  r| dz  } | dz  } | S |dk(  r| dz  } g d}g d	}n=|d
k(  r(| j                  dk(  r| ddddf   } n| ddddddf   } n
| ddddf   } g d}d}|d
k(  r=| j                  dk(  r| dddddfxx   |d   z  cc<   | dddddfxx   |d   z  cc<   | dddddfxx   |d   z  cc<   || dddddfxx   |d   z  cc<   | dddddfxx   |d   z  cc<   | dddddfxx   |d   z  cc<   | S | dddddddfxx   |d   z  cc<   | dddddddfxx   |d   z  cc<   | dddddddfxx   |d   z  cc<   |N| dddddddfxx   |d   z  cc<   | dddddddfxx   |d   z  cc<   | dddddddfxx   |d   z  cc<   | S | dxx   |d   z  cc<   | dxx   |d   z  cc<   | dxx   |d   z  cc<   |0| dxx   |d   z  cc<   | dxx   |d   z  cc<   | dxx   |d   z  cc<   | S )a  Preprocesses a Numpy array encoding a batch of images.

    Args:
      x: Input array, 3D or 4D.
      data_format: Data format of the image array.
      mode: One of "caffe", "tf" or "torch".
        - caffe: will convert the images from RGB to BGR,
            then will zero-center each color channel with
            respect to the ImageNet dataset,
            without scaling.
        - tf: will scale pixels between -1 and 1,
            sample-wise.
        - torch: will scale pixels between 0 and 1 and then
            will normalize each channel with respect to the
            ImageNet dataset.

    Returns:
        Preprocessed Numpy array.
    F)copyr        _@      ?r
        o@g
ףp=
?gv/?gCl?gZd;O?gy&1?g?r      Nr$   .gjtY@g`"1]@gQ^@r   r   r   ).r   ).r   ).r   )	
issubclassdtypetyper   floatingastyper   floatxndim)r   r   r   meanstds        r   r   r      s   ( aggllBKK0HHW^^%EH2t|	U
	S		U
$#**vv{ddCiLa2slO #tt)A) &&66Q;aAgJ$q'!JaAgJ$q'!JaAgJ$q'!J!Q'
c!f$
!Q'
c!f$
!Q'
c!f$
" H aAqjMT!W$MaAqjMT!W$MaAqjMT!W$M!Q1*Q'!Q1*Q'!Q1*Q' H 	
&	T!W		&	T!W		&	T!W	?fIQIfIQIfIQIHr   c           	         |dk(  r| dz  } | dz  } | S |dk(  r| dz  } g d}g d}nF|dk(  r1t        j                  |       d	k(  r| d
d
ddf   } n| d
d
d
d
ddf   } n
| dd
d
df   } g d}d
}t        j                  t        j                  |             }t        j
                  |       t        j
                  |      k7  r@t        j                  | t        j                  |t        j
                  |             |      } nt        j                  | ||      } |]t        j                  t        j                  |      t        j
                  |             }|dk(  rt        j                  |d      }| |z  } | S )a  Preprocesses a tensor encoding a batch of images.

    Args:
      x: Input tensor, 3D or 4D.
      data_format: Data format of the image tensor.
      mode: One of "caffe", "tf" or "torch".
        - caffe: will convert the images from RGB to BGR,
            then will zero-center each color channel with
            respect to the ImageNet dataset,
            without scaling.
        - tf: will scale pixels between -1 and 1,
            sample-wise.
        - torch: will scale pixels between 0 and 1 and then
            will normalize each channel with respect to the
            ImageNet dataset.

    Returns:
        Preprocessed tensor.
    r   rC   rD   r
   rE   rF   rG   r   rH   Nr$   .rI   )r   )rK   )r$   r   r   )	r   rP   constantr   arrayrK   bias_addcastreshape)r   r   r   rQ   rR   mean_tensor
std_tensors          r   r   r      s^   ( t|	U
	S		U
$#**||A!#ddCiLa2slO #tt)A)""BHHTN?3K }}Q7==55LLgmmA&67#
 Q[9
%%bhhsm7==;KL
** Z@J	ZHr   c                 h   |dk7  r| rt        |       dk(  rw|dk(  r9| d   dvr)t        j                  dt        | d         z   dz   d	       | d   ||f}nI| d
   dvr)t        j                  dt        | d
         z   dz   d	       ||| d
   f}n|dk(  rd||f}n||df}|dk(  r|r| | |k7  rt	        d| d|        |S | r|dk(  re| t        |       dk7  rt	        d      | d   dk7  r|dk(  rt	        d|  d      | d   | d   |k  s| d   | d   |k  rt	        d| d| d|        | rt        |       dk7  rt	        d      | d
   dk7  r|dk(  rt	        d|  d      | d   | d   |k  s| d   +| d   |k  r#t	        d| d| d|        |r|} n
|dk(  rd} nd} |rd| v rt	        d|        | S )a  Internal utility to compute/validate a model's input shape.

    Args:
      input_shape: Either None (will return the default network input shape),
        or a user-provided shape to be validated.
      default_size: Default input width/height for the model.
      min_size: Minimum input width/height accepted by the model.
      data_format: Image data format to use.
      require_flatten: Whether the model is expected to
        be linked to a classifier via a Flatten layer.
      weights: One of `None` (random initialization)
        or 'imagenet' (pre-training on ImageNet).
        If weights='imagenet' input channels must be equal to 3.

    Returns:
      An integer shape tuple (may include None entries).

    Raises:
      ValueError: In case of invalid argument values.
    imagenetrH   r   r   >   r   rH   z]This model usually expects 1 or 3 input channels. However, it was passed an input_shape with z input channels.r   )
stacklevelr$   NzXWhen setting `include_top=True` and loading `imagenet` weights, `input_shape` should be z.  Received: input_shape=z0`input_shape` must be a tuple of three integers.z6The input must have 3 channels; Received `input_shape=`r   zInput size must be at least r   z; Received: input_shape=)rH   NN)NNrH   z[If `include_top` is True, you should specify a static `input_shape`. Received: input_shape=)r*   warningswarnr,   r   )input_shapedefault_sizemin_sizer   require_flattenweightsdefault_shapes          r   obtain_input_shaperg   7  s   8 *[1AQ1F**1~V+B+a.)* ))  ! )^\<HM2f,B+b/*+ ))  ! *<RIM**l;M)<;M*"m+ //<o >--8M;  **&{#q($J  q>Q&7j+@$((3}A7 
  N.;q>H3L!!n0[^h5N$6xjH: &''2m5  &{#q($J  r?a'Gz,A$((3}A7 
  N.;q>H3L!!n0[^h5N$6#*AhZ 0''2m5  'K..--;))47 
 r   c                 2   t        j                         dk(  rdnd}t        j                  |       ||dz    }t        |t              r||f}|d   d}nd|d   dz  z
  d|d   dz  z
  f}|d   dz  |d   dz  f}|d   |d   z
  |d   f|d   |d   z
  |d   ffS )zReturns a tuple for zero-padding for 2D convolution with downsampling.

    Args:
      inputs: Input tensor.
      kernel_size: An integer or tuple/list of 2 integers.

    Returns:
      A tuple.
    r   r   r   r   )r   r   )r   r   	int_shaper   int)inputskernel_sizeimg_dim
input_sizeadjustcorrects         r   correct_padrq     s     ,,.2BBaG""6*7gkCJ+s#"K0!}jma''Z]Q->)>?1~"KNa$78G	fQi	,	fQi	, r   c                     |yt        j                  |       } | t        j                  d      t        j                  d      hvrt        d|        y)a@  validates that the classifer_activation is compatible with the weights.

    Args:
      classifier_activation: str or callable activation function
      weights: The pretrained weights to load.

    Raises:
      ValueError: if an activation other than `None` or `softmax` are used with
        pretrained weights.
    NsoftmaxzOnly `None` and `softmax` activations are allowed for the `classifier_activation` argument when using pretrained weights, with `include_top=True`; Received: classifier_activation=)r   getr   )classifier_activationre   s     r   validate_activationrv     sg     'OO,AB	"%  % &;$;=
 	
	r   )Nr	   )   )N)__doc__r1   r_   numpyr   tf_keras.srcr   r   tf_keras.src.utilsr    tensorflow.python.util.tf_exportr   r-   r/   PREPROCESS_INPUT_DOCPREPROCESS_INPUT_MODE_DOC"PREPROCESS_INPUT_DEFAULT_ERROR_DOCPREPROCESS_INPUT_ERROR_DOCPREPROCESS_INPUT_RET_DOC_TFPREPROCESS_INPUT_RET_DOC_TORCHPREPROCESS_INPUT_RET_DOC_CAFFEr   formatr@   r   r   rg   rq   rv   r&   r   r   <module>r      s    G    $   ) :% " H &H "> K ": "N 
 BCQ DQ, 066	"

, 7    DE( F(VFR9D vr2
r   