
    0hjD                        d Z ddlmc mZ ddlmZ ddlmZ	 ddl
mZ ddlmZ ddlmZ  ed       G d	 d
ej                                Z ed       G d dej                                Z ed       G d dej                                Z ed       G d dej                                ZdZeej,                  _          ed       G d dej                                Z ed       G d dej                                Zeej,                  _         d Z ed      ej4                  j6                  j8                  d&d              Z ed      ej4                  j6                  j8                  d               Z ed       ej4                  j6                  j8                  d!               Z ed"      ej4                  j6                  j8                  d'd#              Z  ed$      ej4                  j6                  j8                  d'd%              Z!y)(zAccuracy metrics.    N)backend)utils)base_metric)metrics_utils)keras_exportzkeras.metrics.Accuracyc                   B     e Zd ZdZej
                  d fd	       Z xZS )Accuracya9  Calculates how often predictions equal labels.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `binary accuracy`: an idempotent
    operation that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.Accuracy()
    >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
    >>> m.result().numpy()
    0.75

    >>> m.reset_state()
    >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
    ...                sample_weight=[1, 1, 0, 0])
    >>> m.result().numpy()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.Accuracy()])
    ```
    c                 2    t         |   t        ||       y Ndtype)super__init__accuracyselfnamer   	__class__s      _/var/www/html/engine/venv/lib/python3.12/site-packages/tf_keras/src/metrics/accuracy_metrics.pyr   zAccuracy.__init__B   s    4u5    )r   N__name__
__module____qualname____doc__dtensor_utilsinject_meshr   __classcell__r   s   @r   r	   r	      s#    "H 6 6r   r	   zkeras.metrics.BinaryAccuracyc                   B     e Zd ZdZej
                  d fd	       Z xZS )BinaryAccuracya  Calculates how often predictions match binary labels.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `binary accuracy`: an idempotent
    operation that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.
      threshold: (Optional) Float representing the threshold for deciding
      whether prediction values are 1 or 0.

    Standalone usage:

    >>> m = tf.keras.metrics.BinaryAccuracy()
    >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
    >>> m.result().numpy()
    0.75

    >>> m.reset_state()
    >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
    ...                sample_weight=[1, 0, 0, 1])
    >>> m.result().numpy()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.BinaryAccuracy()])
    ```
    c                 H    t         |   t        j                  |||       y )N)r   	threshold)r   r   r   binary_matches)r   r   r   r#   r   s       r   r   zBinaryAccuracy.__init__o   s$    (($ey 	 	
r   )binary_accuracyN      ?r   r   s   @r   r!   r!   G   s#    $L 
 
r   r!   z!keras.metrics.CategoricalAccuracyc                   B     e Zd ZdZej
                  d fd	       Z xZS )CategoricalAccuracya  Calculates how often predictions match one-hot labels.

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `categorical accuracy`: an idempotent
    operation that simply divides `total` by `count`.

    `y_pred` and `y_true` should be passed in as vectors of probabilities,
    rather than as labels. If necessary, use `tf.one_hot` to expand `y_true` as
    a vector.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.CategoricalAccuracy()
    >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
    ...                 [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
    ...                 [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(
      optimizer='sgd',
      loss='mse',
      metrics=[tf.keras.metrics.CategoricalAccuracy()])
    ```
    c                 ,    t         |   d ||       y )Nc                 l    t        j                  t        j                  j	                  | d      |      S Naxisr   sparse_categorical_matchestfmathargmaxy_truey_preds     r   <lambda>z.CategoricalAccuracy.__init__.<locals>.<lambda>   s'    =#K#KvB/$ r   r   r   r   r   s      r   r   zCategoricalAccuracy.__init__   s"      	 	
r   )categorical_accuracyNr   r   s   @r   r(   r(   v   s#    ,\ 
 
r   r(   z'keras.metrics.SparseCategoricalAccuracyc                   B     e Zd ZdZej
                  d fd	       Z xZS )SparseCategoricalAccuracya7  Calculates how often predictions match integer labels.

    ```python
    acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
    ```

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `sparse categorical accuracy`: an
    idempotent operation that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.SparseCategoricalAccuracy()
    >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
    ```
    c                 F    t         |   t        j                  ||       y r   )r   r   r   r0   r   s      r   r   z"SparseCategoricalAccuracy.__init__   s"    44d% 	 	
r   )sparse_categorical_accuracyNr   r   s   @r   r;   r;      s#    *X 
 
r   r;   a  Accumulates metric statistics.

For sparse categorical metrics, the shapes of `y_true` and `y_pred` are
different.

Args:
  y_true: Ground truth label values. shape = `[batch_size, d0, .. dN-1]` or
    shape = `[batch_size, d0, .. dN-1, 1]`.
  y_pred: The predicted probability values. shape = `[batch_size, d0, .. dN]`.
  sample_weight: Optional `sample_weight` acts as a
    coefficient for the metric. If a scalar is provided, then the metric is
    simply scaled by the given value. If `sample_weight` is a tensor of size
    `[batch_size]`, then the metric for each sample of the batch is rescaled
    by the corresponding element in the `sample_weight` vector. If the shape
    of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
    to this shape), then each metric element of `y_pred` is scaled by the
    corresponding value of `sample_weight`. (Note on `dN-1`: all metric
    functions reduce by 1 dimension, usually the last axis (-1)).

Returns:
  Update op.
z%keras.metrics.TopKCategoricalAccuracyc                   B     e Zd ZdZej
                  d fd	       Z xZS )TopKCategoricalAccuracya  Computes how often targets are in the top `K` predictions.

    Args:
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to `5`.
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
    >>> m.update_state([[0, 0, 1], [0, 1, 0]],
    ...                [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[0, 0, 1], [0, 1, 0]],
    ...                [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
    ```
    c                 .    t         |   d |||       y )Nc                 n    t        j                  t        j                  j	                  | d      ||      S r+   r    sparse_top_k_categorical_matchesr1   r2   r3   )ytypks      r   r7   z2TopKCategoricalAccuracy.__init__.<locals>.<lambda>'  s)    mLLr+R r   r   rF   r8   r   rF   r   r   r   s       r   r   z TopKCategoricalAccuracy.__init__$  s%      	 	
r   )   top_k_categorical_accuracyNr   r   s   @r   r?   r?     s#    @ 
 
r   r?   z+keras.metrics.SparseTopKCategoricalAccuracyc                   D     e Zd ZdZej
                  	 d fd	       Z xZS )SparseTopKCategoricalAccuracyaP  Computes how often integer targets are in the top `K` predictions.

    Args:
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to `5`.
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
    >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(
      optimizer='sgd',
      loss='mse',
      metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
    ```
    c                 H    t         |   t        j                  |||       y )NrG   )r   r   r   rC   rH   s       r   r   z&SparseTopKCategoricalAccuracy.__init__Q  s)     	::	 	 	
r   )rI   !sparse_top_k_categorical_accuracyNr   r   s   @r   rL   rL   0  s%    > CG
 
r   rL   c                 p   t        j                  || g      \  \  }} }| j                  j                  |j                         | j                  |j                  k7  r t        j                  || j                        }t        j                  t        j                  | |      t        j                               S )N)
r   ,ragged_assert_compatible_and_get_flat_valuesshapeassert_is_compatible_withr   r1   castequalr   floatx)r5   r6   _s      r   r   r   b  s     EE		  LL**6<<8||v||#.77288FF+W^^-=>>r   zkeras.metrics.binary_accuracyc                 Z    t        j                  t        j                  | ||      d      S )a  Calculates how often predictions match binary labels.

    Standalone usage:
    >>> y_true = [[1], [1], [0], [0]]
    >>> y_pred = [[1], [1], [0], [0]]
    >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
    >>> assert m.shape == (4,)
    >>> m.numpy()
    array([1., 1., 1., 1.], dtype=float32)

    Args:
      y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
      y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
      threshold: (Optional) Float representing the threshold for deciding
        whether prediction values are 1 or 0.

    Returns:
      Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
    r,   r-   )r1   reduce_meanr   r$   )r5   r6   r#   s      r   r%   r%   o  s)    6 >>$$VVY?b r   z"keras.metrics.categorical_accuracyc                 l    t        j                  t        j                  j	                  | d      |      S )a;  Calculates how often predictions match one-hot labels.

    Standalone usage:
    >>> y_true = [[0, 0, 1], [0, 1, 0]]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([0., 1.], dtype=float32)

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    Args:
      y_true: One-hot ground truth values.
      y_pred: The prediction values.

    Returns:
      Categorical accuracy values.
    r,   r-   r/   r4   s     r   r9   r9     s-    6 33
vB' r   z)keras.metrics.sparse_categorical_accuracyc                     t        j                  | |      }|j                  j                  dkD  r)|j                  d   dk(  rt	        j
                  |dg      }|S )a9  Calculates how often predictions match integer labels.

    Standalone usage:
    >>> y_true = [2, 1]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([0., 1.], dtype=float32)

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    Args:
      y_true: Integer ground truth values.
      y_pred: The prediction values.

    Returns:
      Sparse categorical accuracy values.
       r,   )r   r0   rQ   ndimsr1   squeeze)r5   r6   matchess      r   r=   r=     sQ    8 66vvFG }}Q7==#4#9**Wrd+Nr   z(keras.metrics.top_k_categorical_accuracyc                 n    t        j                  t        j                  j	                  | d      ||      S )aG  Computes how often targets are in the top `K` predictions.

    Standalone usage:
    >>> y_true = [[0, 0, 1], [0, 1, 0]]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([1., 1.], dtype=float32)

    Args:
      y_true: The ground truth values.
      y_pred: The prediction values.
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to `5`.

    Returns:
      Top K categorical accuracy value.
    r,   r-   rB   r5   r6   rF   s      r   rJ   rJ     s/    4 99
vB' r   z/keras.metrics.sparse_top_k_categorical_accuracyc                 0    t        j                  | ||      S )a_  Computes how often integer targets are in the top `K` predictions.

    Standalone usage:
    >>> y_true = [2, 1]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(
    ...     y_true, y_pred, k=3)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([1., 1.], dtype=float32)

    Args:
      y_true: tensor of true targets.
      y_pred: tensor of predicted targets.
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to `5`.

    Returns:
      Sparse top K categorical accuracy value.
    )r   rC   r`   s      r   rN   rN     s    8 99&&!LLr   )r&   )rI   )"r   tensorflow.compat.v2compatv2r1   tf_keras.srcr   tf_keras.src.dtensorr   r   tf_keras.src.metricsr   tf_keras.src.utilsr    tensorflow.python.util.tf_exportr   MeanMetricWrapperr	   r!   r(   r;   *_SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRINGupdate_stater?   rL   r   __internal__dispatchadd_dispatch_supportr%   r9   r=   rJ   rN    r   r   <module>rq      sB    ! !   7 , , : &''6{,, '6 ('6T ,-+
[22 +
 .+
\ 127
+77 7
 37
t 781
 = = 1
 91
h. *0 /  & & .
 56*
k;; *
 7*
Z ;<)
K$A$A )
 =)
Z /  * * 2

? -... / /< 23.. / 4< 9:..  / ; F 89.. / :: ?@..M / AMr   