
    6bijD                        S r SSKJs  Jr  SSKJr  SSKJr	  SSK
Jr  SSKJr  SSKJr  \" S5       " S	 S
\R                   5      5       r\" S5       " S S\R                   5      5       r\" S5       " S S\R                   5      5       r\" S5       " S S\R                   5      5       rSr\\R,                  l         \" S5       " S S\R                   5      5       r\" S5       " S S\R                   5      5       r\\R,                  l         S r\" S5      \R4                  R6                  R8                  S&S j5       5       r\" S5      \R4                  R6                  R8                  S 5       5       r\" S 5      \R4                  R6                  R8                  S! 5       5       r\" S"5      \R4                  R6                  R8                  S'S# j5       5       r \" S$5      \R4                  R6                  R8                  S'S% j5       5       r!g)(zAccuracy metrics.    N)backend)utils)base_metric)metrics_utils)keras_exportzkeras.metrics.Accuracyc                   N   ^  \ rS rSrSr\R                  SU 4S jj5       rSrU =r	$ )Accuracy   a  Calculates how often predictions equal labels.

This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `binary accuracy`: an idempotent
operation that simply divides `total` by `count`.

If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.

Args:
  name: (Optional) string name of the metric instance.
  dtype: (Optional) data type of the metric result.

Standalone usage:

>>> m = tf.keras.metrics.Accuracy()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
>>> m.result().numpy()
0.75

>>> m.reset_state()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
...                sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
0.5

Usage with `compile()` API:

```python
model.compile(optimizer='sgd',
              loss='mse',
              metrics=[tf.keras.metrics.Accuracy()])
```
c                 *   > [         TU ]  [        XS9  g Ndtype)super__init__accuracyselfnamer   	__class__s      _/home/james-whalen/.local/lib/python3.13/site-packages/tf_keras/src/metrics/accuracy_metrics.pyr   Accuracy.__init__B   s    45     )r   N
__name__
__module____qualname____firstlineno____doc__dtensor_utilsinject_meshr   __static_attributes____classcell__r   s   @r   r	   r	      s#    "H 6 6r   r	   zkeras.metrics.BinaryAccuracyc                   N   ^  \ rS rSrSr\R                  SU 4S jj5       rSrU =r	$ )BinaryAccuracyG   a\  Calculates how often predictions match binary labels.

This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `binary accuracy`: an idempotent
operation that simply divides `total` by `count`.

If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.

Args:
  name: (Optional) string name of the metric instance.
  dtype: (Optional) data type of the metric result.
  threshold: (Optional) Float representing the threshold for deciding
  whether prediction values are 1 or 0.

Standalone usage:

>>> m = tf.keras.metrics.BinaryAccuracy()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
>>> m.result().numpy()
0.75

>>> m.reset_state()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
...                sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
0.5

Usage with `compile()` API:

```python
model.compile(optimizer='sgd',
              loss='mse',
              metrics=[tf.keras.metrics.BinaryAccuracy()])
```
c                 @   > [         TU ]  [        R                  XUS9  g )N)r   	threshold)r   r   r   binary_matches)r   r   r   r)   r   s       r   r   BinaryAccuracy.__init__o   s"    (($y 	 	
r   r   )binary_accuracyN      ?r   r$   s   @r   r&   r&   G   s#    $L 
 
r   r&   z!keras.metrics.CategoricalAccuracyc                   N   ^  \ rS rSrSr\R                  SU 4S jj5       rSrU =r	$ )CategoricalAccuracyv   a%  Calculates how often predictions match one-hot labels.

You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.

This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `categorical accuracy`: an idempotent
operation that simply divides `total` by `count`.

`y_pred` and `y_true` should be passed in as vectors of probabilities,
rather than as labels. If necessary, use `tf.one_hot` to expand `y_true` as
a vector.

If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.

Args:
  name: (Optional) string name of the metric instance.
  dtype: (Optional) data type of the metric result.

Standalone usage:

>>> m = tf.keras.metrics.CategoricalAccuracy()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
...                 [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5

>>> m.reset_state()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
...                 [0.05, 0.95, 0]],
...                sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3

Usage with `compile()` API:

```python
model.compile(
  optimizer='sgd',
  loss='mse',
  metrics=[tf.keras.metrics.CategoricalAccuracy()])
```
c                 &   > [         TU ]  S UUS9  g )Nc                 h    [         R                  " [        R                  R	                  U SS9U5      $ Naxisr   sparse_categorical_matchestfmathargmaxy_truey_preds     r   <lambda>.CategoricalAccuracy.__init__.<locals>.<lambda>   s%    =#K#KvB/$r   r   r   r   r   s      r   r   CategoricalAccuracy.__init__   s"      	 	
r   r   )categorical_accuracyNr   r$   s   @r   r/   r/   v   s#    ,\ 
 
r   r/   z'keras.metrics.SparseCategoricalAccuracyc                   N   ^  \ rS rSrSr\R                  SU 4S jj5       rSrU =r	$ )SparseCategoricalAccuracy   a  Calculates how often predictions match integer labels.

```python
acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
```

You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.

This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `sparse categorical accuracy`: an
idempotent operation that simply divides `total` by `count`.

If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.

Args:
  name: (Optional) string name of the metric instance.
  dtype: (Optional) data type of the metric result.

Standalone usage:

>>> m = tf.keras.metrics.SparseCategoricalAccuracy()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5

>>> m.reset_state()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
...                sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3

Usage with `compile()` API:

```python
model.compile(
    optimizer='sgd',
    loss='mse',
    metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
```
c                 >   > [         TU ]  [        R                  XS9  g r   )r   r   r   r8   r   s      r   r   "SparseCategoricalAccuracy.__init__   s     44d 	 	
r   r   )sparse_categorical_accuracyNr   r$   s   @r   rE   rE      s#    *X 
 
r   rE   a  Accumulates metric statistics.

For sparse categorical metrics, the shapes of `y_true` and `y_pred` are
different.

Args:
  y_true: Ground truth label values. shape = `[batch_size, d0, .. dN-1]` or
    shape = `[batch_size, d0, .. dN-1, 1]`.
  y_pred: The predicted probability values. shape = `[batch_size, d0, .. dN]`.
  sample_weight: Optional `sample_weight` acts as a
    coefficient for the metric. If a scalar is provided, then the metric is
    simply scaled by the given value. If `sample_weight` is a tensor of size
    `[batch_size]`, then the metric for each sample of the batch is rescaled
    by the corresponding element in the `sample_weight` vector. If the shape
    of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
    to this shape), then each metric element of `y_pred` is scaled by the
    corresponding value of `sample_weight`. (Note on `dN-1`: all metric
    functions reduce by 1 dimension, usually the last axis (-1)).

Returns:
  Update op.
z%keras.metrics.TopKCategoricalAccuracyc                   N   ^  \ rS rSrSr\R                  SU 4S jj5       rSrU =r	$ )TopKCategoricalAccuracyi  a;  Computes how often targets are in the top `K` predictions.

Args:
  k: (Optional) Number of top elements to look at for computing accuracy.
    Defaults to `5`.
  name: (Optional) string name of the metric instance.
  dtype: (Optional) data type of the metric result.

Standalone usage:

>>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
...                [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5

>>> m.reset_state()
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
...                [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
...                sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3

Usage with `compile()` API:

```python
model.compile(optimizer='sgd',
              loss='mse',
              metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
```
c                 (   > [         TU ]  S UUUS9  g )Nc                 h    [         R                  " [        R                  R	                  U SS9X5      $ r3   r    sparse_top_k_categorical_matchesr9   r:   r;   )ytypks      r   r?   2TopKCategoricalAccuracy.__init__.<locals>.<lambda>'  s%    mLLr+Rr   r   rR   rA   r   rR   r   r   r   s       r   r    TopKCategoricalAccuracy.__init__$  s%      	 	
r   r   )   top_k_categorical_accuracyNr   r$   s   @r   rK   rK     s#    @ 
 
r   rK   z+keras.metrics.SparseTopKCategoricalAccuracyc                   P   ^  \ rS rSrSr\R                   SU 4S jj5       rSrU =r	$ )SparseTopKCategoricalAccuracyi0  a  Computes how often integer targets are in the top `K` predictions.

Args:
  k: (Optional) Number of top elements to look at for computing accuracy.
    Defaults to `5`.
  name: (Optional) string name of the metric instance.
  dtype: (Optional) data type of the metric result.

Standalone usage:

>>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5

>>> m.reset_state()
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
...                sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3

Usage with `compile()` API:

```python
model.compile(
  optimizer='sgd',
  loss='mse',
  metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
```
c                 B   > [         TU ]  [        R                  UUUS9  g )NrT   )r   r   r   rO   rU   s       r   r   &SparseTopKCategoricalAccuracy.__init__Q  s)     	::	 	 	
r   r   )rW   !sparse_top_k_categorical_accuracyNr   r$   s   @r   rZ   rZ   0  s%    > CG
 
r   rZ   c                 v   [         R                  " X/5      u  u  nn nU R                  R                  UR                  5        U R                  UR                  :w  a   [
        R                  " XR                  5      n[
        R                  " [
        R                  " X5      [        R                  " 5       5      $ )N)
r   ,ragged_assert_compatible_and_get_flat_valuesshapeassert_is_compatible_withr   r9   castequalr   floatx)r=   r>   _s      r   r   r   b  s     EE		  LL**6<<8||v||#.77288F+W^^-=>>r   zkeras.metrics.binary_accuracyc                 V    [         R                  " [        R                  " XU5      SS9$ )aZ  Calculates how often predictions match binary labels.

Standalone usage:
>>> y_true = [[1], [1], [0], [0]]
>>> y_pred = [[1], [1], [0], [0]]
>>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
>>> assert m.shape == (4,)
>>> m.numpy()
array([1., 1., 1., 1.], dtype=float32)

Args:
  y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
  y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
  threshold: (Optional) Float representing the threshold for deciding
    whether prediction values are 1 or 0.

Returns:
  Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
r4   r5   )r9   reduce_meanr   r*   )r=   r>   r)   s      r   r,   r,   o  s'    6 >>$$VY?b r   z"keras.metrics.categorical_accuracyc                 h    [         R                  " [        R                  R	                  U SS9U5      $ )a  Calculates how often predictions match one-hot labels.

Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)
>>> assert m.shape == (2,)
>>> m.numpy()
array([0., 1.], dtype=float32)

You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.

Args:
  y_true: One-hot ground truth values.
  y_pred: The prediction values.

Returns:
  Categorical accuracy values.
r4   r5   r7   r<   s     r   rC   rC     s-    6 33
vB' r   z)keras.metrics.sparse_categorical_accuracyc                     [         R                  " X5      nUR                  R                  S:  a+  UR                  S   S:X  a  [        R
                  " US/5      nU$ )a  Calculates how often predictions match integer labels.

Standalone usage:
>>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
>>> assert m.shape == (2,)
>>> m.numpy()
array([0., 1.], dtype=float32)

You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.

Args:
  y_true: Integer ground truth values.
  y_pred: The prediction values.

Returns:
  Sparse categorical accuracy values.
   r4   )r   r8   r`   ndimsr9   squeeze)r=   r>   matchess      r   rI   rI     sO    8 66vFG }}Q7==#4#9**Wrd+Nr   z(keras.metrics.top_k_categorical_accuracyc                 h    [         R                  " [        R                  R	                  U SS9X5      $ )a  Computes how often targets are in the top `K` predictions.

Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
>>> assert m.shape == (2,)
>>> m.numpy()
array([1., 1.], dtype=float32)

Args:
  y_true: The ground truth values.
  y_pred: The prediction values.
  k: (Optional) Number of top elements to look at for computing accuracy.
    Defaults to `5`.

Returns:
  Top K categorical accuracy value.
r4   r5   rN   r=   r>   rR   s      r   rX   rX     s-    4 99
vB' r   z/keras.metrics.sparse_top_k_categorical_accuracyc                 0    [         R                  " XU5      $ )a  Computes how often integer targets are in the top `K` predictions.

Standalone usage:
>>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(
...     y_true, y_pred, k=3)
>>> assert m.shape == (2,)
>>> m.numpy()
array([1., 1.], dtype=float32)

Args:
  y_true: tensor of true targets.
  y_pred: tensor of predicted targets.
  k: (Optional) Number of top elements to look at for computing accuracy.
    Defaults to `5`.

Returns:
  Sparse top K categorical accuracy value.
)r   rO   ro   s      r   r]   r]     s    8 99&!LLr   )r-   )rW   )"r   tensorflow.compat.v2compatv2r9   tf_keras.srcr   tf_keras.src.dtensorr   r    tf_keras.src.metricsr   tf_keras.src.utilsr    tensorflow.python.util.tf_exportr   MeanMetricWrapperr	   r&   r/   rE   *_SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRINGupdate_staterK   rZ   r   __internal__dispatchadd_dispatch_supportr,   rC   rI   rX   r]   r   r   r   <module>r      sB    ! !   7 , , : &''6{,, '6 ('6T ,-+
[22 +
 .+
\ 127
+77 7
 37
t 781
 = = 1
 91
h. *0 /  & & .
 56*
k;; *
 7*
Z ;<)
K$A$A )
 =)
Z /  * * 2

? -... / /< 23.. / 4< 9:..  / ; F 89.. / :: ?@..M / AMr   