
    6bifX                        S r SSKrSSKrSSKJs  Jr  SSKJs  J	s  J
r  SSKJr  SSKJr  SSKJr  SSKJr  SSKJr  SSKJr  SS	KJr  S
S0r\" S5      \R8                  R:                  R<                  S/S j5       5       r\" S5      \R8                  R:                  R<                  S0S j5       5       r \" S5      \R8                  R:                  R<                  S 5       5       r!\" S5      \R8                  R:                  R<                  S 5       5       r"\" S5      \R8                  R:                  R<                  S 5       5       r#\" S5      \R8                  R:                  R<                  S 5       5       r$\" S5      \R8                  R:                  R<                  S1S j5       5       r%\" S/ S9\R8                  R:                  R<                  S2S j5       5       r&\" S5      \R8                  R:                  R<                  S 5       5       r'\" S5      \R8                  R:                  R<                  S  5       5       r(\" S!5      \R8                  R:                  R<                  S" 5       5       r)\" S#5      \R8                  R:                  R<                  S$ 5       5       r*\" S%5      \R8                  R:                  R<                  S& 5       5       r+\" S'5      \R8                  R:                  R<                  S( 5       5       r,\" S)5      \R8                  R:                  R<                  S2S* j5       5       r-\R\                  R^                  r/\R\                  R`                  r0\R\                  Rb                  r1\R\                  Rd                  r2\" S+5      \R8                  R:                  R<                  S3S, j5       5       r3\" S-5      \R8                  R:                  R<                  S. 5       5       r4g)4zBuilt-in activation functions.    N)backend)object_registration)serialization_lib)serialization)utils)generic_utils)keras_export
softmax_v2softmaxzkeras.activations.softmaxc                 .    [         R                  " X5      $ )a  Softmax converts a vector of values to a probability distribution.

The elements of the output vector are in range (0, 1) and sum to 1.

Each vector is handled independently. The `axis` argument sets which axis
of the input the function is applied along.

Softmax is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution.

The softmax of each vector x is computed as
`exp(x) / tf.reduce_sum(exp(x))`.

The input values in are the log-odds of the resulting probability.

Args:
    x : Input tensor.
    axis: Integer, axis along which the softmax normalization is applied.

Returns:
    Tensor, output of softmax transformation (all values are non-negative
        and sum to 1).

Examples:

**Example 1: standalone usage**

>>> inputs = tf.random.normal(shape=(32, 10))
>>> outputs = tf.keras.activations.softmax(inputs)
>>> tf.reduce_sum(outputs[0, :])  # Each sample in the batch now sums to 1
<tf.Tensor: shape=(), dtype=float32, numpy=1.0000001>

**Example 2: usage in a `Dense` layer**

>>> layer = tf.keras.layers.Dense(32,
...                               activation=tf.keras.activations.softmax)
)r   r   )xaxiss     R/home/james-whalen/.local/lib/python3.13/site-packages/tf_keras/src/activations.pyr   r   .   s    R ??1##    zkeras.activations.eluc                 .    [         R                  " X5      $ )aR  Exponential Linear Unit.

The exponential linear unit (ELU) with `alpha > 0` is:
`x` if `x > 0` and
`alpha * (exp(x) - 1)` if `x < 0`
The ELU hyperparameter `alpha` controls the value to which an
ELU saturates for negative net inputs. ELUs diminish the
vanishing gradient effect.

ELUs have negative values which pushes the mean of the activations
closer to zero.
Mean activations that are closer to zero enable faster learning as they
bring the gradient closer to the natural gradient.
ELUs saturate to a negative value when the argument gets smaller.
Saturation means a small derivative which decreases the variation
and the information that is propagated to the next layer.

Example Usage:

>>> import tensorflow as tf
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu',
...          input_shape=(28, 28, 1)))
>>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
>>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))
>>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
>>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))

<tensorflow.python.keras.engine.sequential.Sequential object ...>

Args:
    x: Input tensor.
    alpha: A scalar, slope of negative section. `alpha` controls the value
        to which an ELU saturates for negative net inputs.

Returns:
    The exponential linear unit (ELU) activation function: `x` if `x > 0`
        and `alpha * (exp(x) - 1)` if `x < 0`.


Reference:
    - [Fast and Accurate Deep Network Learning by Exponential Linear Units
    (ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)
)r   elu)r   alphas     r   r   r   Z   s    ^ ;;q  r   zkeras.activations.seluc                 @    [         R                  R                  U 5      $ )a  Scaled Exponential Linear Unit (SELU).

The Scaled Exponential Linear Unit (SELU) activation function is defined as:

- `if x > 0: return scale * x`
- `if x < 0: return scale * alpha * (exp(x) - 1)`

where `alpha` and `scale` are pre-defined constants
(`alpha=1.67326324` and `scale=1.05070098`).

Basically, the SELU activation function multiplies `scale` (> 1) with the
output of the `tf.keras.activations.elu` function to ensure a slope larger
than one for positive inputs.

The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see `tf.keras.initializers.LecunNormal` initializer)
and the number of input units is "large enough"
(see reference paper for more information).

Example Usage:

>>> num_classes = 10  # 10-class problem
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal',
...                                 activation='selu'))
>>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal',
...                                 activation='selu'))
>>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal',
...                                 activation='selu'))
>>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))

Args:
    x: A tensor or variable to compute the activation function for.

Returns:
    The scaled exponential unit activation: `scale * elu(x, alpha)`.

Notes:
    - To be used together with the
        `tf.keras.initializers.LecunNormal` initializer.
    - To be used together with the dropout variant
        `tf.keras.layers.AlphaDropout` (not regular dropout).

References:
    - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
)tfnnselur   s    r   r   r      s    f 55::a=r   zkeras.activations.softplusc                 @    [         R                  R                  U 5      $ )a  Softplus activation function, `softplus(x) = log(exp(x) + 1)`.

Example Usage:

>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.softplus(a)
>>> b.numpy()
array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,
         2.0000000e+01], dtype=float32)

Args:
    x: Input tensor.

Returns:
    The softplus activation: `log(exp(x) + 1)`.
)r   mathsoftplusr   s    r   r   r      s    & 77Ar   zkeras.activations.softsignc                 @    [         R                  R                  U 5      $ )aB  Softsign activation function, `softsign(x) = x / (abs(x) + 1)`.

Example Usage:

>>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)
>>> b = tf.keras.activations.softsign(a)
>>> b.numpy()
array([-0.5,  0. ,  0.5], dtype=float32)

Args:
    x: Input tensor.

Returns:
    The softsign activation: `x / (abs(x) + 1)`.
)r   r   softsignr   s    r   r   r      s    $ 77Ar   zkeras.activations.swishc                 @    [         R                  R                  U 5      $ )a  Swish activation function, `swish(x) = x * sigmoid(x)`.

Swish activation function which returns `x*sigmoid(x)`.
It is a smooth, non-monotonic function that consistently matches
or outperforms ReLU on deep networks, it is unbounded above and
bounded below.


Example Usage:

>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.swish(a)
>>> b.numpy()
array([-4.1223075e-08, -2.6894143e-01,  0.0000000e+00,  7.3105860e-01,
          2.0000000e+01], dtype=float32)

Args:
    x: Input tensor.

Returns:
    The swish activation applied to `x` (see reference paper for details).

Reference:
    - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
)r   r   silur   s    r   swishr       s    8 55::a=r   zkeras.activations.reluc                 ,    [         R                  " XX#S9$ )aw  Applies the rectified linear unit activation function.

With default values, this returns the standard ReLU activation:
`max(x, 0)`, the element-wise maximum of 0 and the input tensor.

Modifying default parameters allows you to use non-zero thresholds,
change the max value of the activation,
and to use a non-zero multiple of the input for values below the threshold.

Example:

>>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32)
>>> tf.keras.activations.relu(foo).numpy()
array([ 0.,  0.,  0.,  5., 10.], dtype=float32)
>>> tf.keras.activations.relu(foo, alpha=0.5).numpy()
array([-5. , -2.5,  0. ,  5. , 10. ], dtype=float32)
>>> tf.keras.activations.relu(foo, max_value=5.).numpy()
array([0., 0., 0., 5., 5.], dtype=float32)
>>> tf.keras.activations.relu(foo, threshold=5.).numpy()
array([-0., -0.,  0.,  0., 10.], dtype=float32)

Args:
    x: Input `tensor` or `variable`.
    alpha: A `float` that governs the slope for values lower than the
        threshold.
    max_value: A `float` that sets the saturation threshold (the largest
        value the function will return).
    threshold: A `float` giving the threshold value of the activation
        function below which values will be damped or set to zero.

Returns:
    A `Tensor` representing the input tensor, transformed by the relu
    activation function. Tensor will be of the same shape and dtype of
    input `x`.
)r   	max_value	threshold)r   relu)r   r   r"   r#   s       r   r$   r$     s    L <<	) r   zkeras.activations.gelu)v1c                 @    [         R                  R                  X5      $ )a=  Applies the Gaussian error linear unit (GELU) activation function.

Gaussian error linear unit (GELU) computes
`x * P(X <= x)`, where `P(X) ~ N(0, 1)`.
The (GELU) nonlinearity weights inputs by their value, rather than gates
inputs by their sign as in ReLU.

Example:

>>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)
>>> y = tf.keras.activations.gelu(x)
>>> y.numpy()
array([-0.00404951, -0.15865529,  0.        ,  0.8413447 ,  2.9959507 ],
    dtype=float32)
>>> y = tf.keras.activations.gelu(x, approximate=True)
>>> y.numpy()
array([-0.00363752, -0.15880796,  0.        ,  0.841192  ,  2.9963627 ],
    dtype=float32)

Args:
    x: Input tensor.
    approximate: A `bool`, whether to enable approximation.

Returns:
    The gaussian error linear activation:
    `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`
    if `approximate` is `True` or
    `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,
    where `P(X) ~ N(0, 1)`,
    if `approximate` is `False`.

Reference:
    - [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
)r   r   gelu)r   approximates     r   r'   r'   7  s    J 55::a%%r   zkeras.activations.tanhc                 .    [         R                  " U 5      $ )a  Hyperbolic tangent activation function.

Example:

>>> a = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype = tf.float32)
>>> b = tf.keras.activations.tanh(a)
>>> b.numpy()
array([-0.9950547, -0.7615942,  0.,  0.7615942,  0.9950547], dtype=float32)

Args:
    x: Input tensor.

Returns:
    Tensor of same shape and dtype of input `x`, with tanh activation:
    `tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.
)r   tanhr   s    r   r*   r*   _  s    & 771:r   zkeras.activations.sigmoidc                 .    [         R                  " U 5      $ )a  Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`.

Applies the sigmoid activation function. For small values (<-5),
`sigmoid` returns a value close to zero, and for large values (>5)
the result of the function gets close to 1.

Sigmoid is equivalent to a 2-element Softmax, where the second element is
assumed to be zero. The sigmoid function always returns a value between
0 and 1.

Example:

>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.sigmoid(a)
>>> b.numpy()
array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01,
         1.0000000e+00], dtype=float32)

Args:
    x: Input tensor.

Returns:
    Tensor with the sigmoid activation: `1 / (1 + exp(-x))`.
)r   sigmoidr   s    r   r,   r,   u  s    6 ??1r   zkeras.activations.exponentialc                 .    [         R                  " U 5      $ )aP  Exponential activation function.

Example:

>>> a = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype = tf.float32)
>>> b = tf.keras.activations.exponential(a)
>>> b.numpy()
array([0.04978707,  0.36787945,  1.,  2.7182817 , 20.085537], dtype=float32)

Args:
    x: Input tensor.

Returns:
    Tensor with exponential activation: `exp(x)`.
)r   expr   s    r   exponentialr/     s    $ 66!9r   zkeras.activations.hard_sigmoidc                 .    [         R                  " U 5      $ )aG  Hard sigmoid activation function.

A faster approximation of the sigmoid activation.
Piecewise linear approximation of the sigmoid function.
Ref: 'https://en.wikipedia.org/wiki/Hard_sigmoid'

Example:

>>> a = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype = tf.float32)
>>> b = tf.keras.activations.hard_sigmoid(a)
>>> b.numpy()
array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32)

Args:
    x: Input tensor.

Returns:
    The hard sigmoid activation, defined as:

        - `if x < -2.5: return 0`
        - `if x > 2.5: return 1`
        - `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`
)r   hard_sigmoidr   s    r   r1   r1     s    4 ""r   zkeras.activations.linearc                     U $ )a!  Linear activation function (pass-through).

Example:

>>> a = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype = tf.float32)
>>> b = tf.keras.activations.linear(a)
>>> b.numpy()
array([-3., -1.,  0.,  1.,  3.], dtype=float32)

Args:
    x: Input tensor.

Returns:
    The input, unmodified.
 r   s    r   linearr4     s	    $ Hr   zkeras.activations.mishc                     U [         R                  R                  [         R                  R                  U 5      5      -  $ )a;  Mish activation function.

It is defined as:

```python
def mish(x):
    return x * tanh(softplus(x))
```

where `softplus` is defined as:

```python
def softplus(x):
    return log(exp(x) + 1)
```

Example:

>>> a = tf.constant([-3.0, -1.0, 0.0, 1.0], dtype = tf.float32)
>>> b = tf.keras.activations.mish(a)
>>> b.numpy()
array([-0.14564745, -0.30340144,  0.,  0.86509836], dtype=float32)

Args:
    x: Input tensor.

Returns:
    The mish activation.

Reference:
    - [Mish: A Self Regularized Non-Monotonic
    Activation Function](https://arxiv.org/abs/1908.08681)
)r   r   r*   r   r   s    r   mishr6     s,    H rww||BGG,,Q/000r   zkeras.activations.serializec                 l   [        U S5      (       a'  U R                  [        ;   a  [        U R                     $ U(       a  [        R                  " U 5      $ [
        R                  " U 5      n[        R                  R                  R                  5       (       a  [        R                  " 5       (       a  U$ SU;  a  [        SU  SU 35      e[        U [        R                  5      (       d  U$ [        US   [         5      (       a,  US   [#        5       ;  a  [$        R&                  " U 5      US'   U$ US   $ )a  Returns the string identifier of an activation function.

Args:
    activation : Function object.
    use_legacy_format: Boolean, whether to use the legacy format for
        serialization. Defaults to False.

Returns:
    String denoting the name attribute of the input function

Example:

>>> tf.keras.activations.serialize(tf.keras.activations.tanh)
'tanh'
>>> tf.keras.activations.serialize(tf.keras.activations.sigmoid)
'sigmoid'
>>> tf.keras.activations.serialize('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function 'abcd' cannot be serialized.

Raises:
    ValueError: The input function is not a valid one.
__name__configUnknown activation function 'z' cannot be serialized due to invalid function name. Make sure to use an activation name that matches the references defined in activations.py or use `@keras.saving.register_keras_serializable()` to register any custom activations. config=)hasattrr8   _TF_ACTIVATIONS_V2legacy_serializationserialize_keras_objectr   r   __internal__tf2enabledsaved_model_utilsin_tf_saved_model_scope
ValueError
isinstancetypesFunctionTypestrglobalsr   get_registered_name)
activationuse_legacy_format	fn_configs      r   	serializerN     s   8 	
J''#55!*"5"566#:::FF!88DIOO''))4466y +J< 8  ["
 	
 j%"4"4559X&,,hwy0 2EE
	( Xr   zkeras.activations.deserializec                 *   0 n[         R                  [           n[        R                  " U[
        U4[        S9  U(       a  [        R                  " U UUSS9$ [        R                  " U UUSS9n[        U[        5      (       a  [        SU  S35      eU$ )a`  Returns activation function given a string identifier.

Args:
    name: The name of the activation function.
    custom_objects: Optional `{function_name: function_obj}`
        dictionary listing user-provided activation functions.
    use_legacy_format: Boolean, whether to use the legacy format for
        deserialization. Defaults to False.

Returns:
    Corresponding activation function.

Example:

>>> tf.keras.activations.deserialize('linear')
 <function linear at 0x1239596a8>
>>> tf.keras.activations.deserialize('sigmoid')
 <function sigmoid at 0x123959510>
>>> tf.keras.activations.deserialize('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function 'abcd' cannot be deserialized.

Raises:
    ValueError: `Unknown activation function` if the input string does not
    denote any defined Tensorflow activation function.
)
obj_filterzactivation function)module_objectscustom_objectsprintable_module_namer:   z' cannot be deserialized.)sysmodulesr8   r   !populate_dict_with_module_objectsactivation_layerscallabler=   deserialize_keras_objectr   rE   rH   rD   )namerR   rL   activation_functionscurrent_modulereturned_fns         r   deserializer^   M  s    < [[*N 33	N+ #<</)"7	
 	
 $<<+%3	K +s##+D61JK
 	
 r   zkeras.activations.getc                     U c  [         $ [        U [        [        45      (       a%  [        U [        5      (       a  SU ;  OSn[	        XS9$ [        U 5      (       a  U $ [        SU  35      e)a  Returns function.

Args:
    identifier: Function or string

Returns:
    Function corresponding to the input string or input function.

Example:

>>> tf.keras.activations.get('softmax')
 <function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(tf.keras.activations.softmax)
 <function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(None)
 <function linear at 0x1239596a8>
>>> tf.keras.activations.get(abs)
 <built-in function abs>
>>> tf.keras.activations.get('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function:abcd

Raises:
    ValueError: Input is an unknown function or string, i.e., the input does
    not denote any defined function.
moduleF)rL   z4Could not interpret activation function identifier: )r4   rE   rH   dictr^   rX   	TypeError)
identifierrL   s     r   getrd     sv    < *sDk** *d++ J& 	
 :KK	*		

>zlK r   ))g      ?)        Nrf   )F)NF)5__doc__rT   rF   tensorflow.compat.v2compatv2r   tf_keras.src.layers.activationsrclayersrK   rW   tf_keras.srcr   tf_keras.src.savingr   r   tf_keras.src.saving.legacyr   r=   &tf_keras.src.saving.legacy.saved_modelr   rB   tf_keras.src.utilsr    tensorflow.python.util.tf_exportr	   r<   r?   dispatchadd_dispatch_supportr   r   r   r   r   r    r$   r'   r*   r,   r/   r1   r4   r6   rN   r   
leaky_relulog_softmaxrelu6r   r^   rd   r3   r   r   <module>ry      s   % 
  ! ! : :   3 1 L M , : ) 
 )*..'$ / +'$T %&..-! / '-!` &'..1 / (1h *+.. / ,( *+.. / ,& '(.. / ): &'..& / (&R &2...#& / /#&L &'.. / (( )*.. / +8 -... / /& ./..# / 0#6 ().. / *& &'.."1 / ("1J +,..> / ->H UU
ee
	uuzz -...; / /;| %&..) / ')r   