
    0hjR                        d Z ddlZddlmc mZ ddlmZ ddlm	Z
 ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ  ed       G d dej4                               Z ed       G d dej8                               Z ed       G d dej8                               Z ed       G d dej8                               Z ed       G d dej8                               Z  ed       G d d ej8                               Z! ed!       G d" d#ej4                               Z" ed$       G d% d&ej8                               Z# ed'       G d( d)ejH                               Z%d+d*Z&y),z%Regression metrics, e.g. MAE/MSE/etc.    N)backend)utils)logcosh)mean_absolute_error)mean_absolute_percentage_error)mean_squared_error)mean_squared_logarithmic_error)base_metric)losses_utils)metrics_utils)is_tensor_or_variable)keras_exportzkeras.metrics.MeanRelativeErrorc                   X     e Zd ZdZej
                  d fd	       Zd fd	Z fdZ xZ	S )MeanRelativeErrora  Computes the mean relative error by normalizing with the given values.

    This metric creates two local variables, `total` and `count` that are used
    to compute the mean relative error. This is weighted by `sample_weight`, and
    it is ultimately returned as `mean_relative_error`: an idempotent operation
    that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      normalizer: The normalizer values with same shape as predictions.
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3])
    >>> m.update_state([1, 3, 2, 3], [2, 4, 6, 8])

    >>> # metric = mean(|y_pred - y_true| / normalizer)
    >>> #        = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3])
    >>> #        = 5/4 = 1.25
    >>> m.result().numpy()
    1.25

    Usage with `compile()` API:

    ```python
    model.compile(
      optimizer='sgd',
      loss='mse',
      metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])])
    ```
    c                 v    t         |   ||       t        j                  || j                        }|| _        y )Nnamedtype)super__init__tfcast_dtype
normalizer)selfr   r   r   	__class__s       a/var/www/html/engine/venv/lib/python3.12/site-packages/tf_keras/src/metrics/regression_metrics.pyr   zMeanRelativeError.__init__K   s0    d%0WWZ5
$    c                 .   t        j                  || j                        }t        j                  || j                        }t        j                  ||g|      \  \  }}}t        j                  ||      \  }}t        j                  || j                        \  }| _        |j                  j                  |j                         t         j                  j                  t        j                  ||z
        | j                        }t        | =  ||      S )a  Accumulates metric statistics.

        Args:
          y_true: The ground truth values.
          y_pred: The predicted values.
          sample_weight: Optional weighting of each example. Can
            be a `Tensor` whose rank is either 0, or the same rank as `y_true`,
            and must be broadcastable to `y_true`. Defaults to `1`.

        Returns:
          Update op.
        sample_weight)r   r   r   r   ,ragged_assert_compatible_and_get_flat_valuesr   squeeze_or_expand_dimensionsremove_squeezable_dimensionsr   shapeassert_is_compatible_withmathdivide_no_nanabsr   update_state)r   y_truey_predr!   relative_errorsr   s        r   r*   zMeanRelativeError.update_stateQ   s     -- )UUVm
	 	
 &BBF
 #/"K"KDOO#
 	..v||<''//FF6F?#T__
 w#= $ 
 	
r   c                     | j                   }dt        |      rt        j                  |      n|i}t        |          }t        t        |j                               t        |j                               z         S )Nr   )	r   r   r   evalr   
get_configdictlistitems)r   nconfigbase_configr   s       r   r0   zMeanRelativeError.get_configv   sb    OO-B1-E',,q/1
 g(*D**,-V\\^0DDEEr   )NNN)
__name__
__module____qualname____doc__dtensor_utilsinject_meshr   r*   r0   __classcell__r   s   @r   r   r   %   s5    "H % %
#
JF Fr   r   zkeras.metrics.CosineSimilarityc                   B     e Zd ZdZej
                  d fd	       Z xZS )CosineSimilaritya  Computes the cosine similarity between the labels and predictions.

    `cosine similarity = (a . b) / ||a|| ||b||`

    See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity).

    This metric keeps the average cosine similarity between `predictions` and
    `labels` over a stream of data.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.
      axis: (Optional) The dimension along which the cosine
        similarity is computed. Defaults to `-1`.

    Standalone usage:

    >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]]
    >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]]
    >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
    >>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
    >>> #        = ((0. + 0.) +  (0.5 + 0.5)) / 2
    >>> m = tf.keras.metrics.CosineSimilarity(axis=1)
    >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
    >>> m.result().numpy()
    0.49999997

    >>> m.reset_state()
    >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]],
    ...                sample_weight=[0.3, 0.7])
    >>> m.result().numpy()
    0.6999999

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
    ```
    c                 4    t         |   t        |||       y )N)r   axis)r   r   cosine_similarity)r   r   r   rC   r   s       r   r   zCosineSimilarity.__init__   s    *DDIr   )rD   Nr8   r9   r:   r;   r<   r=   r   r>   r?   s   @r   rA   rA      s%    )V J Jr   rA   zkeras.metrics.MeanAbsoluteErrorc                   B     e Zd ZdZej
                  d fd	       Z xZS )MeanAbsoluteErrora  Computes the mean absolute error between the labels and predictions.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.MeanAbsoluteError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result().numpy()
    0.25

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result().numpy()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[tf.keras.metrics.MeanAbsoluteError()])
    ```
    c                 2    t         |   t        ||       y Nr   )r   r   r   r   r   r   r   s      r   r   zMeanAbsoluteError.__init__   s    ,d%@r   )r   NrF   r?   s   @r   rH   rH      s$    : A Ar   rH   z)keras.metrics.MeanAbsolutePercentageErrorc                   B     e Zd ZdZej
                  d fd	       Z xZS )MeanAbsolutePercentageErrora  Computes the mean absolute percentage error between `y_true` and
    `y_pred`.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.MeanAbsolutePercentageError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result().numpy()
    250000000.0

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result().numpy()
    500000000.0

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
    ```
    c                 2    t         |   t        ||       y rJ   )r   r   r   rL   s      r   r   z$MeanAbsolutePercentageError.__init__       7UKr   )r   NrF   r?   s   @r   rN   rN      $    < L Lr   rN   zkeras.metrics.MeanSquaredErrorc                   B     e Zd ZdZej
                  d fd	       Z xZS )MeanSquaredErrora  Computes the mean squared error between `y_true` and `y_pred`.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.MeanSquaredError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result().numpy()
    0.25

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result().numpy()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[tf.keras.metrics.MeanSquaredError()])
    ```
    c                 2    t         |   t        ||       y rJ   )r   r   r   rL   s      r   r   zMeanSquaredError.__init__  s    +T?r   )r   NrF   r?   s   @r   rS   rS      s$    : @ @r   rS   z)keras.metrics.MeanSquaredLogarithmicErrorc                   B     e Zd ZdZej
                  d fd	       Z xZS )MeanSquaredLogarithmicErrora  Computes the mean squared logarithmic error between `y_true` and
    `y_pred`.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.MeanSquaredLogarithmicError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result().numpy()
    0.12011322

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result().numpy()
    0.24022643

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
    ```
    c                 2    t         |   t        ||       y rJ   )r   r   r	   rL   s      r   r   z$MeanSquaredLogarithmicError.__init__>  rP   r   )r	   NrF   r?   s   @r   rV   rV     rQ   r   rV   z"keras.metrics.RootMeanSquaredErrorc                   T     e Zd ZdZej
                  d fd	       Zd fd	Zd Z xZ	S )RootMeanSquaredErroraS  Computes root mean squared error metric between `y_true` and `y_pred`.

    Standalone usage:

    >>> m = tf.keras.metrics.RootMeanSquaredError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result().numpy()
    0.70710677

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[tf.keras.metrics.RootMeanSquaredError()])
    ```
    c                 (    t         |   ||       y rJ   )r   r   rL   s      r   r   zRootMeanSquaredError.__init__^  s    U+r   c                    t        j                  || j                        }t        j                  || j                        }t        j                  ||      \  }}t         j
                  j                  ||      }t        | !  ||      S )a  Accumulates root mean squared error statistics.

        Args:
          y_true: The ground truth values.
          y_pred: The predicted values.
          sample_weight: Optional weighting of each example. Can
            be a `Tensor` whose rank is either 0, or the same rank as `y_true`,
            and must be broadcastable to `y_true`. Defaults to `1`.

        Returns:
          Update op.
        r    )	r   r   r   r   r#   r'   squared_differencer   r*   )r   r+   r,   r!   error_sqr   s        r   r*   z!RootMeanSquaredError.update_stateb  ss     --%BBF
 77--ff=w#HM#JJr   c                     t        j                  t         j                  j                  | j                  | j
                              S r7   )r   sqrtr'   r(   totalcount)r   s    r   resultzRootMeanSquaredError.resultw  s*    wwrww,,TZZDEEr   )root_mean_squared_errorNr7   )
r8   r9   r:   r;   r<   r=   r   r*   rb   r>   r?   s   @r   rY   rY   C  s.    2 , ,K*Fr   rY   zkeras.metrics.LogCoshErrorc                   B     e Zd ZdZej
                  d fd	       Z xZS )LogCoshErrora,  Computes the logarithm of the hyperbolic cosine of the prediction error.

    `logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred -
    y_true)

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.LogCoshError()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result().numpy()
    0.10844523

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result().numpy()
    0.21689045

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.LogCoshError()])
    ```
    c                 2    t         |   t        ||       y rJ   )r   r   r   rL   s      r   r   zLogCoshError.__init__  s    $e4r   )r   NrF   r?   s   @r   re   re   {  s"    > 5 5r   re   zkeras.metrics.R2Scorec                   n     e Zd ZdZej
                  	 	 	 	 d fd	       Zd Zd	dZd Z	d Z
 fdZ xZS )
R2Scorea.  Computes R2 score.

    This is also called the
    [coefficient of
    determination](https://en.wikipedia.org/wiki/Coefficient_of_determination).

    It indicates how close the fitted regression line
    is to ground-truth data.

    - The highest score possible is 1.0. It indicates that the predictors
        perfectly accounts for variation in the target.
    - A score of 0.0 indicates that the predictors do not
        account for variation in the target.
    - It can also be negative if the model is worse than random.

    This metric can also compute the "Adjusted R2" score.

    Args:
        class_aggregation: Specifies how to aggregate scores corresponding to
            different output classes (or target dimensions),
            i.e. different dimensions on the last axis of the predictions.
            Equivalent to `multioutput` argument in Scikit-Learn.
            Should be one of
            `None` (no aggregation), `"uniform_average"`,
            `"variance_weighted_average"`.
        num_regressors: Number of independent regressors used
            ("Adjusted R2" score). 0 is the standard R2 score.
            Defaults to `0`.
        name: Optional. string name of the metric instance.
        dtype: Optional. data type of the metric result.

    Example:

    >>> y_true = np.array([[1], [4], [3]], dtype=np.float32)
    >>> y_pred = np.array([[2], [4], [4]], dtype=np.float32)
    >>> metric = tf.keras.metrics.R2Score()
    >>> metric.update_state(y_true, y_pred)
    >>> result = metric.result()
    >>> result.numpy()
    0.57142854
    c                     t         |   ||       d}||vrt        d| d|       |dk  rt        d|       || _        || _        | j                  dd      | _        d	| _        y )
Nr   )Nuniform_averagevariance_weighted_averagez@Invalid value for argument `class_aggregation`. Expected one of z. Received: class_aggregation=r   z]Invalid value for argument `num_regressors`. Expected a value >= 0. Received: num_regressors=num_samplesint32F)r   r   
ValueErrorclass_aggregationnum_regressors
add_weightrl   built)r   ro   rp   r   r   valid_class_aggregation_valuesr   s         r   r   zR2Score.__init__  s     	d%0*
&
 $BB89 ://@.AC 
 A,,:+;= 
 "3,??W?M
r   c                    t        |      dk7  st        |      dk7  rt        d| d| d      |d   |d   t        d| d| d      |d   }| j                  d|gd	      | _        | j                  d
|gd	      | _        | j                  d|gd	      | _        | j                  d|gd	      | _        d| _        y )N   zcR2Score expects 2D inputs with shape (batch_size, output_dim). Received input shapes: y_pred.shape=z and y_true.shape=.rE   zR2Score expects 2D inputs with shape (batch_size, output_dim), with output_dim fully defined (not None). Received input shapes: y_pred.shape=squared_sumzeros)r   r%   initializersumresidualra   T)lenrn   rq   rw   rz   	total_msera   rr   )r   y_true_shapey_pred_shapenum_classess       r   buildzR2Score.build  s    |!S%6!%;((4~ 6  ,~Q0  #|B'7'?( )5~ 6  ,~Q	0  #2&??- + 

 ??- # 

 - ) 

 __- % 


 
r   c                    t        j                  || j                        }t        j                  || j                        }| j                  s&| j	                  |j
                  |j
                         |d}t        j                  || j                        }|j
                  j                  dk(  rt        j                  |d      }t         j                  j                  j                  ||      }||z  }| j                  j                  t        j                  |d             | j                  j                  t        j                  ||z  d             | j                  j                  t        j                  ||z
  dz  |z  d             | j                   j                  t        j                  |d             | j"                  j                  t        j$                  |             y )NrK      rC   )weightsvaluesr   ru   )r   convert_to_tensorr   rr   r   r%   rankexpand_dims__internal__opsbroadcast_weightsrz   
assign_add
reduce_sumrw   r}   ra   rl   size)r   r+   r,   r!   weighted_y_trues        r   r*   zR2Score.update_state  so   %%fDJJ?%%fDJJ?zzJJv||V\\2 M,,]$**M##q(NN=qAM++==!& > 
 !=0BMM/BC##MM&?2;	
 	!!MM6F?q0=@qI	
 	

bmmMBC##BGGFO4r   c                    | j                   | j                  z  }| j                  | j                   |z  z
  }d| j                  |z  z
  }t	        j
                  t        j                  j                  |      d|      }| j                  dk(  rt	        j                  |      }nD| j                  dk(  r3t	        j                  ||z        }t	        j                  |      }||z  }n|}| j                  dk7  rZ| j                  | j                  dz
  kD  rt        j                  dd       |S | j                  | j                  dz
  k(  rt        j                  d	d       |S t	        j                  | j                  t        j                   
      }t	        j                  | j                  t        j                   
      }t	        j"                  t	        j$                  d|      t	        j$                  |d            }	t	        j$                  t	        j$                  ||      d      }
t	        j$                  dt	        j&                  |	|
            }|S )Nr   g        rj   rk   r   zdMore independent predictors than datapoints in adjusted R2 score. Falling back to standard R2 score.ru   )
stacklevelzIDivision by zero in Adjusted R2 score. Falling back to standard R2 score.rK   g      ?)rz   ra   rw   r}   r   wherer'   is_infro   reduce_meanr   rp   rl   warningswarnr   float32multiplysubtractdivide)r   meanr`   
raw_scoresr2_scoreweighted_sumsum_of_weightsr4   pnumdens              r   rb   zR2Score.result1  s   xx$**$  488d?2$..501
XXbggnnZ8#zJ
!!%66~~j1H##'BB==);<L]]51N#n4H!H!#""T%5%5%99O &  $$(8(81(<<9   GGD,,BJJ?GGD//rzzBkkKKX.As0C kk"++a"3S9;;sBIIc3,?@r   c                     | j                   D ]<  }|j                  t        j                  |j                  |j
                               > y rJ   )	variablesassignr   rx   r%   r   )r   vs     r   reset_statezR2Score.reset_stateW  s4     	7AHHRXXaggQWW56	7r   c                 ^    | j                   | j                  d}t        |          }i ||S )N)ro   rp   )ro   rp   r   r0   )r   r5   r6   r   s      r   r0   zR2Score.get_config[  s;    !%!7!7"11
 g(*(+(((r   )rj   r   r   Nr7   )r8   r9   r:   r;   r<   r=   r   r   r*   rb   r   r0   r>   r?   s   @r   rh   rh     sQ    (T  , >%N58$L7) )r   rh   c                     t         j                  j                  | |      } t         j                  j                  ||      }t        j                  | |z  |      S )a;  Computes the cosine similarity between labels and predictions.

    Args:
      y_true: The ground truth values.
      y_pred: The prediction values.
      axis: (Optional) -1 is the dimension along which the cosine
        similarity is computed. Defaults to `-1`.

    Returns:
      Cosine similarity value.
    r   )r   linalgl2_normalizer   )r+   r,   rC   s      r   rD   rD   d  sK     YY##F#6FYY##F#6F==&t44r   )rE   )'r;   r   tensorflow.compat.v2compatv2r   tf_keras.srcr   tf_keras.src.dtensorr   r<   tf_keras.src.lossesr   r   r   r   r	   tf_keras.src.metricsr
   tf_keras.src.utilsr   r   tf_keras.src.utils.tf_utilsr    tensorflow.python.util.tf_exportr   Meanr   MeanMetricWrapperrA   rH   rN   rS   rV   rY   re   Metricrh   rD    r   r   <module>r      s   ,  ! !   7 ' 3 > 2 > , + , = : /0VF(( VF 1VFr ./.J{44 .J 0.Jb /0 A55  A 1 AF 9:!L+"?"? !L ;!LH ./ @{44  @ 0 @F 9:!L+"?"? !L ;!LH 234F;++ 4F 44Fn *+"5;00 "5 ,"5L %&~)k   ~) '~)B5r   