
    h                     F    S r SSKrSSKJr   " S S\R                  5      rg)zActivation modules.    Nc                   n   ^  \ rS rSrSrSS	U 4S jjjrS\R                  S\R                  4S jrSr	U =r
$ )
AGLU   a  
Unified activation function module from AGLU.

This class implements a parameterized activation function with learnable parameters lambda and kappa, based on the
AGLU (Adaptive Gated Linear Unit) approach.

Attributes:
    act (nn.Softplus): Softplus activation function with negative beta.
    lambd (nn.Parameter): Learnable lambda parameter initialized with uniform distribution.
    kappa (nn.Parameter): Learnable kappa parameter initialized with uniform distribution.

Methods:
    forward: Compute the forward pass of the Unified activation function.

Examples:
    >>> import torch
    >>> m = AGLU()
    >>> input = torch.randn(2)
    >>> output = m(input)
    >>> print(output.shape)
    torch.Size([2])

References:
    https://github.com/kostas1515/AGLU
returnc           
        > [         TU ]  5         [        R                  " SS9U l        [        R
                  " [        R                  R                  [        R                  " SXS95      5      U l
        [        R
                  " [        R                  R                  [        R                  " SXS95      5      U l        g)zEInitialize the Unified activation function with learnable parameters.g      )beta   )devicedtypeN)super__init__nnSoftplusact	Parameterinituniform_torchemptylambdkappa)selfr
   r   	__class__s      [/home/james-whalen/.local/lib/python3.13/site-packages/ultralytics/nn/modules/activation.pyr   AGLU.__init__#   sr    ;;D)\\"''"2"25;;q3]"^_
\\"''"2"25;;q3]"^_
    xc           	          [         R                  " U R                  SS9n[         R                  " SU-  U R	                  U R
                  U-  [         R                  " U5      -
  5      -  5      $ )a  
Apply the Adaptive Gated Linear Unit (AGLU) activation function.

This forward method implements the AGLU activation function with learnable parameters lambda and kappa.
The function applies a transformation that adaptively combines linear and non-linear components.

Args:
    x (torch.Tensor): Input tensor to apply the activation function to.

Returns:
    (torch.Tensor): Output tensor after applying the AGLU activation function, with the same shape as the input.
g-C6?)minr	   )r   clampr   expr   r   log)r   r   lams      r   forwardAGLU.forward*   sN     kk$**&1yy!c'TXXtzzA~3.O%PPQQr   )r   r   r   )NN)r   N)__name__
__module____qualname____firstlineno____doc__r   r   Tensorr$   __static_attributes____classcell__)r   s   @r   r   r      s8    4` `R R%,, R Rr   r   )r*   r   torch.nnr   Moduler    r   r   <module>r1      s       0R299 0Rr   