
    6biqD                       S r SSKJr  SSKJr  SSKJr  SSKrSSKJs  Jr	  SSK
Jr  SSKJr  SSKJr  \" S	/S
9 " S S\R"                  \R$                  5      5       r\" S/S
9SSSSSSS\	R                  R&                  R)                  5       SSSSSSSS4S j5       r\" S/S
9 " S S\R,                  \R$                  5      5       r\" S/S
9SSSSSSS\	R                  R&                  R)                  5       SSSSSSSS4S j5       r\" S/S
9 " S S\R0                  \R$                  5      5       r\" S/S
9SSSSSSS\	R                  R&                  R)                  5       SSSSSSSS4S j5       r\" S/S
9 " S  S!\R4                  \R$                  5      5       r\" S"/S
9 " S# S$\R6                  \R$                  5      5       r\" S%/S
9SSSSSSSSS\	R                  R&                  R)                  5       SSSSSSSSSS4S& j5       r\" S'/S
9SSSSSSSSS\	R                  R&                  R)                  5       SSSSSSSSSS4S( j5       r\" S)/S
9 " S* S+\R<                  \R$                  5      5       r\" S,/S
9SSSSSS\	R                  R&                  R)                  5       SSSSSSSS4S- j5       r\" S./S
9 " S/ S0\R@                  \R$                  5      5       r \" S1/S
9SSSSSS\	R                  R&                  R)                  5       SSSSSSSS4S2 j5       r!\r"\r#\r$\r%\=r&=r'r(\ =r)=r*r+\r,\r-\r.\r/\=r0=r1r2\!=r3=r4r5g)3zFContains the convolutional layer classes and their functional aliases.    )absolute_import)division)print_functionN)layers)base)keras_exportz'keras.__internal__.legacy.layers.Conv1D)v1c                      ^  \ rS rSrSrSSSSSSS\R                  R                  R                  5       SSSSSSS4U 4S jjr	S	r
U =r$ )
Conv1D    a  1D convolution layer (e.g. temporal convolution).

This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.

Args:
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: An integer or tuple/list of a single integer, specifying the
    length of the 1D convolution window.
  strides: An integer or tuple/list of a single integer,
    specifying the stride length of the convolution.
    Specifying any stride value != 1 is incompatible with specifying
    any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, length, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, length)`.
  dilation_rate: An integer or tuple/list of a single integer, specifying
    the dilation rate to use for dilated convolution.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any `strides` value != 1.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.

@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv1D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 conv = tf.compat.v1.layers.Conv1D(filters=3, kernel_size=3)
```

After:

```python
 conv = tf.keras.layers.Conv1D(filters=3, kernels_size=3)
```
@end_compatibility
   validchannels_lastNTc                    > [         TU ]  " S0 SU_SU_SU_SU_SU_SU_SU_SU_S	U	_S
U
_SU_SU_SU_SU_SU_SU_SU_UD6  g Nfilterskernel_sizestridespaddingdata_formatdilation_rate
activationuse_biaskernel_initializerbias_initializerkernel_regularizerbias_regularizeractivity_regularizerkernel_constraintbias_constraint	trainablename super__init__selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   kwargs	__class__s                      e/home/james-whalen/.local/lib/python3.13/site-packages/tf_keras/src/legacy_tf_layers/convolutional.pyr&   Conv1D.__init__s       * 	 	
	
#	
 	
 		

 $	
 (	
 "	
 	
  2	
 .	
  2	
 .	
 "6	
 0	
 ,	
   !	
" %	
    r#   __name__
__module____qualname____firstlineno____doc__tfcompatr	   zeros_initializerr&   __static_attributes____classcell__r*   s   @r+   r   r       sT    Oj #779!%(
 (
r.   r   z'keras.__internal__.legacy.layers.conv1dr   r   r   Tc                     [         R                  " SSS9  [        S0 SU_SU_SU_SU_SU_S	U_S
U_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_SU_6nU" U 5      $ )a  Functional interface for 1D convolution (e.g. temporal convolution).

This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.

Args:
  inputs: Tensor input.
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: An integer or tuple/list of a single integer, specifying the
    length of the 1D convolution window.
  strides: An integer or tuple/list of a single integer,
    specifying the stride length of the convolution.
    Specifying any stride value != 1 is incompatible with specifying
    any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, length, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, length)`.
  dilation_rate: An integer or tuple/list of a single integer, specifying
    the dilation rate to use for dilated convolution.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any `strides` value != 1.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.
  reuse: Boolean, whether to reuse the weights of a previous layer
    by the same name.

Returns:
  Output tensor.

Raises:
  ValueError: if eager execution is enabled.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv1D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 y = tf.compat.v1.layers.conv1d(x, filters=3, kernel_size=3)
```

After:

To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/tf_keras/functional):

```python
 x = tf.keras.Input((28, 28, 1))
 y = tf.keras.layers.Conv1D(filters=3, kernels_size=3)(x)
 model = tf.keras.Model(x, y)
```
@end_compatibility
zv`tf.layers.conv1d` is deprecated and will be removed in a future version. Please Use `tf.keras.layers.Conv1D` instead.   
stacklevelr   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   _reuse_scoper#   )warningswarnr   inputsr   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   reuselayers                       r+   conv1drG      s    j MM	7 	    	
   $   . * . * 2 , (  !" #$ %& 'E* =r.   z'keras.__internal__.legacy.layers.Conv2Dc                      ^  \ rS rSrSrSSSSSSS\R                  R                  R                  5       SSSSSSS4U 4S jjr	S	r
U =r$ )
Conv2Di1  a3  2D convolution layer (e.g. spatial convolution over images).

This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.

Args:
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: An integer or tuple/list of 2 integers, specifying the
    height and width of the 2D convolution window.
    Can be a single integer to specify the same value for
    all spatial dimensions.
  strides: An integer or tuple/list of 2 integers,
    specifying the strides of the convolution along the height and width.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Specifying any stride value != 1 is incompatible with specifying
    any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, height, width, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, height, width)`.

  dilation_rate: An integer or tuple/list of 2 integers, specifying
    the dilation rate to use for dilated convolution.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any stride value != 1.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv2D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 conv = tf.compat.v1.layers.Conv2D(filters=3, kernel_size=3)
```

After:

```python
 conv = tf.keras.layers.Conv2D(filters=3, kernels_size=3)
```
@end_compatibility
r   r   r   r   NTc                    > [         TU ]  " S0 SU_SU_SU_SU_SU_SU_SU_SU_S	U	_S
U
_SU_SU_SU_SU_SU_SU_SU_UD6  g r   r$   r'   s                      r+   r&   Conv2D.__init__  r-   r.   r#   r/   r:   s   @r+   rI   rI   1  sT    Wz #779!%(
 (
r.   rI   z'keras.__internal__.legacy.layers.conv2drJ   c                     [         R                  " SSS9  [        S0 SU_SU_SU_SU_SU_S	U_S
U_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_SU_6nU" U 5      $ )a  Functional interface for the 2D convolution layer.

This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.

Args:
  inputs: Tensor input.
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: An integer or tuple/list of 2 integers, specifying the
    height and width of the 2D convolution window.
    Can be a single integer to specify the same value for
    all spatial dimensions.
  strides: An integer or tuple/list of 2 integers,
    specifying the strides of the convolution along the height and width.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Specifying any stride value != 1 is incompatible with specifying
    any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, height, width, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, height, width)`.

  dilation_rate: An integer or tuple/list of 2 integers, specifying
    the dilation rate to use for dilated convolution.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any stride value != 1.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.
  reuse: Boolean, whether to reuse the weights of a previous layer
    by the same name.

Returns:
  Output tensor.

Raises:
  ValueError: if eager execution is enabled.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv2D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 y = tf.compat.v1.layers.conv2d(x, filters=3, kernel_size=3)
```

After:

To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/tf_keras/functional):

```python
 x = tf.keras.Input((28, 28, 1))
 y = tf.keras.layers.Conv2D(filters=3, kernels_size=3)(x)
 model = tf.keras.Model(x, y)
```
@end_compatibility
zv`tf.layers.conv2d` is deprecated and will be removed in a future version. Please Use `tf.keras.layers.Conv2D` instead.r<   r=   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r?   r@   r#   )rA   rB   rI   rC   s                       r+   conv2drN     s    x MM	7 	    	
   $   . * . * 2 , (  !" #$ %& 'E* =r.   z'keras.__internal__.legacy.layers.Conv3Dc                      ^  \ rS rSrSrSSSSSSS\R                  R                  R                  5       SSSSSSS4U 4S jjr	S	r
U =r$ )
Conv3DiQ  aW  3D convolution layer (e.g. spatial convolution over volumes).

This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.

Args:
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: An integer or tuple/list of 3 integers, specifying the
    depth, height and width of the 3D convolution window.
    Can be a single integer to specify the same value for
    all spatial dimensions.
  strides: An integer or tuple/list of 3 integers,
    specifying the strides of the convolution along the depth,
    height and width.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Specifying any stride value != 1 is incompatible with specifying
    any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, depth, height, width, channels)` while `channels_first`
    corresponds to inputs with shape
    `(batch, channels, depth, height, width)`.
  dilation_rate: An integer or tuple/list of 3 integers, specifying
    the dilation rate to use for dilated convolution.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any stride value != 1.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv3D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 conv = tf.compat.v1.layers.Conv3D(filters=3, kernel_size=3)
```

After:

```python
 conv = tf.keras.layers.Conv3D(filters=3, kernels_size=3)
```
@end_compatibility
r   r   r   r   r   NTc                    > [         TU ]  " S0 SU_SU_SU_SU_SU_SU_SU_SU_S	U	_S
U
_SU_SU_SU_SU_SU_SU_SU_UD6  g r   r$   r'   s                      r+   r&   Conv3D.__init__  r-   r.   r#   r/   r:   s   @r+   rP   rP   Q  sT    X| #779!%(
 (
r.   rP   z'keras.__internal__.legacy.layers.conv3drQ   c                     [         R                  " SSS9  [        S0 SU_SU_SU_SU_SU_S	U_S
U_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_SU_6nU" U 5      $ )a  Functional interface for the 3D convolution layer.

This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.

Args:
  inputs: Tensor input.
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: An integer or tuple/list of 3 integers, specifying the
    depth, height and width of the 3D convolution window.
    Can be a single integer to specify the same value for
    all spatial dimensions.
  strides: An integer or tuple/list of 3 integers,
    specifying the strides of the convolution along the depth,
    height and width.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Specifying any stride value != 1 is incompatible with specifying
    any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, depth, height, width, channels)` while `channels_first`
    corresponds to inputs with shape
    `(batch, channels, depth, height, width)`.
  dilation_rate: An integer or tuple/list of 3 integers, specifying
    the dilation rate to use for dilated convolution.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any stride value != 1.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.
  reuse: Boolean, whether to reuse the weights of a previous layer
    by the same name.

Returns:
  Output tensor.

Raises:
  ValueError: if eager execution is enabled.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv3D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 y = tf.compat.v1.layers.conv3d(x, filters=3, kernel_size=3)
```

After:

To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/tf_keras/functional):

```python
 x = tf.keras.Input((28, 28, 1))
 y = tf.keras.layers.Conv3D(filters=3, kernels_size=3)(x)
 model = tf.keras.Model(x, y)
```
@end_compatibility
zv`tf.layers.conv3d` is deprecated and will be removed in a future version. Please Use `tf.keras.layers.Conv3D` instead.r<   r=   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r?   r@   r#   )rA   rB   rP   rC   s                       r+   conv3drU     s    z MM	7 	    	
   $   . * . * 2 , (  !" #$ %& 'E* =r.   z0keras.__internal__.legacy.layers.SeparableConv1Dc                      ^  \ rS rSrSrSSSSSSSSS\R                  R                  R                  5       SSSSSSSSS4U 4S jjr	S	r
U =r$ )
SeparableConv1Dis  a  Depthwise separable 1D convolution.

This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final
output.

Args:
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: A single integer specifying the spatial
    dimensions of the filters.
  strides: A single integer specifying the strides
    of the convolution.
    Specifying any `stride` value != 1 is incompatible with specifying
    any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, length, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, length)`.
  dilation_rate: A single integer, specifying
    the dilation rate to use for dilated convolution.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any stride value != 1.
  depth_multiplier: The number of depthwise convolution output channels for
    each input channel. The total number of depthwise convolution output
    channels will be equal to `num_filters_in * depth_multiplier`.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  depthwise_initializer: An initializer for the depthwise convolution
    kernel.
  pointwise_initializer: An initializer for the pointwise convolution
    kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  depthwise_regularizer: Optional regularizer for the depthwise
    convolution kernel.
  pointwise_regularizer: Optional regularizer for the pointwise
    convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  depthwise_constraint: Optional projection function to be applied to the
      depthwise kernel after being updated by an `Optimizer` (e.g. used for
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  pointwise_constraint: Optional projection function to be applied to the
      pointwise kernel after being updated by an `Optimizer`.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is
`tf.keras.layers.SeparableConv1D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 conv = tf.compat.v1.layers.SeparableConv1D(filters=3, kernel_size=3)
```

After:

```python
 conv = tf.keras.layers.SeparableConv1D(filters=3, kernels_size=3)
```
@end_compatibility
r   r   r   NTc                    > [         TU ]  " S0 SU_SU_SU_SU_SU_SU_SU_SU_S	U	_S
U
_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_UD6  g Nr   r   r   r   r   r   depth_multiplierr   r   depthwise_initializerpointwise_initializerr   depthwise_regularizerpointwise_regularizerr   r   depthwise_constraintpointwise_constraintr    r!   r"   r#   r$   r(   r   r   r   r   r   r   rZ   r   r   r[   r\   r   r]   r^   r   r   r_   r`   r    r!   r"   r)   r*   s                          r+   r&   SeparableConv1D.__init__      2 	 	
	
#	
 	
 		

 $	
 (	
 .	
 "	
 	
 #8	
 #8	
 .	
 #8	
 #8	
 .	
  "6!	
" "6#	
$ "6%	
& ,'	
(  )	
* -	
r.   r#   r/   r:   s   @r+   rW   rW   s  s`    ]F #""779""!!!-0
 0
r.   rW   z0keras.__internal__.legacy.layers.SeparableConv2Dc                      ^  \ rS rSrSrSSSSSSSSS\R                  R                  R                  5       SSSSSSSSS4U 4S	 jjr	S
r
U =r$ )SeparableConv2Di  a  Depthwise separable 2D convolution.

This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.

Args:
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: A tuple or list of 2 integers specifying the spatial
    dimensions of the filters. Can be a single integer to specify the same
    value for all spatial dimensions.
  strides: A tuple or list of 2 positive integers specifying the strides
    of the convolution. Can be a single integer to specify the same value
    for all spatial dimensions.
    Specifying any `stride` value != 1 is incompatible with specifying
    any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, height, width, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, height, width)`.

  dilation_rate: An integer or tuple/list of 2 integers, specifying
    the dilation rate to use for dilated convolution.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any stride value != 1.
  depth_multiplier: The number of depthwise convolution output channels for
    each input channel. The total number of depthwise convolution output
    channels will be equal to `num_filters_in * depth_multiplier`.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  depthwise_initializer: An initializer for the depthwise convolution
    kernel.
  pointwise_initializer: An initializer for the pointwise convolution
    kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  depthwise_regularizer: Optional regularizer for the depthwise
    convolution kernel.
  pointwise_regularizer: Optional regularizer for the pointwise
    convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  depthwise_constraint: Optional projection function to be applied to the
      depthwise kernel after being updated by an `Optimizer` (e.g. used for
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  pointwise_constraint: Optional projection function to be applied to the
      pointwise kernel after being updated by an `Optimizer`.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is
`tf.keras.layers.SeparableConv2D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 conv = tf.compat.v1.layers.SeparableConv2D(filters=3, kernel_size=3)
```

After:

```python
 conv = tf.keras.layers.SeparableConv2D(filters=3, kernels_size=3)
```
@end_compatibility
rJ   r   r   r   NTc                    > [         TU ]  " S0 SU_SU_SU_SU_SU_SU_SU_SU_S	U	_S
U
_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_UD6  g rY   r$   ra   s                          r+   r&   SeparableConv2D.__init__l  rc   r.   r#   r/   r:   s   @r+   re   re     s`    aN #""779""!!!-0
 0
r.   re   z1keras.__internal__.legacy.layers.separable_conv1dc                     [         R                  " SSS9  [        S0 SU_SU_SU_SU_SU_S	U_S
U_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_6nU" U 5      $ )a  Functional interface for the depthwise separable 1D convolution layer.

This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.

Args:
  inputs: Input tensor.
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: A single integer specifying the spatial
    dimensions of the filters.
  strides: A single integer specifying the strides
    of the convolution.
    Specifying any `stride` value != 1 is incompatible with specifying
    any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, length, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, length)`.
  dilation_rate: A single integer, specifying
    the dilation rate to use for dilated convolution.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any stride value != 1.
  depth_multiplier: The number of depthwise convolution output channels for
    each input channel. The total number of depthwise convolution output
    channels will be equal to `num_filters_in * depth_multiplier`.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  depthwise_initializer: An initializer for the depthwise convolution
    kernel.
  pointwise_initializer: An initializer for the pointwise convolution
    kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  depthwise_regularizer: Optional regularizer for the depthwise
    convolution kernel.
  pointwise_regularizer: Optional regularizer for the pointwise
    convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  depthwise_constraint: Optional projection function to be applied to the
      depthwise kernel after being updated by an `Optimizer` (e.g. used for
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  pointwise_constraint: Optional projection function to be applied to the
      pointwise kernel after being updated by an `Optimizer`.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.
  reuse: Boolean, whether to reuse the weights of a previous layer
    by the same name.

Returns:
  Output tensor.

Raises:
  ValueError: if eager execution is enabled.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is
`tf.keras.layers.SeparableConv1D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 y = tf.compat.v1.layers.separable_conv1d(x, filters=3, kernel_size=3)
```

After:

To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/tf_keras/functional):

```python
 x = tf.keras.Input((28, 28, 1))
 y = tf.keras.layers.SeparableConv1D(filters=3, kernels_size=3)(x)
 model = tf.keras.Model(x, y)
```
@end_compatibility
z`tf.layers.separable_conv1d` is deprecated and will be removed in a future version. Please Use `tf.keras.layers.SeparableConv1D` instead.r<   r=   r   r   r   r   r   r   rZ   r   r   r[   r\   r   r]   r^   r   r   r_   r`   r    r!   r"   r?   r@   r#   )rA   rB   rW   rD   r   r   r   r   r   r   rZ   r   r   r[   r\   r   r]   r^   r   r   r_   r`   r    r!   r"   rE   rF   s                           r+   separable_conv1drj     s   J MM	@ 	    	
   $ *   4 4 * 4 4 *  2!" 2#$ 2%& ('( )* +, -. /E2 =r.   z1keras.__internal__.legacy.layers.separable_conv2dc                     [         R                  " SSS9  [        S0 SU_SU_SU_SU_SU_S	U_S
U_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_6nU" U 5      $ )a  Functional interface for the depthwise separable 2D convolution layer.

This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.

Args:
  inputs: Input tensor.
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: A tuple or list of 2 integers specifying the spatial
    dimensions of the filters. Can be a single integer to specify the same
    value for all spatial dimensions.
  strides: A tuple or list of 2 positive integers specifying the strides
    of the convolution. Can be a single integer to specify the same value
    for all spatial dimensions. Specifying any `stride` value != 1 is
    incompatible with specifying any `dilation_rate` value != 1.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, height, width, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, height, width)`.

  dilation_rate: An integer or tuple/list of 2 integers, specifying
    the dilation rate to use for dilated convolution.
    Can be a single integer to specify the same value for
    all spatial dimensions.
    Currently, specifying any `dilation_rate` value != 1 is
    incompatible with specifying any stride value != 1.
  depth_multiplier: The number of depthwise convolution output channels for
    each input channel. The total number of depthwise convolution output
    channels will be equal to `num_filters_in * depth_multiplier`.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  depthwise_initializer: An initializer for the depthwise convolution
    kernel.
  pointwise_initializer: An initializer for the pointwise convolution
    kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  depthwise_regularizer: Optional regularizer for the depthwise
    convolution kernel.
  pointwise_regularizer: Optional regularizer for the pointwise
    convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  depthwise_constraint: Optional projection function to be applied to the
      depthwise kernel after being updated by an `Optimizer` (e.g. used for
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  pointwise_constraint: Optional projection function to be applied to the
      pointwise kernel after being updated by an `Optimizer`.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.
  reuse: Boolean, whether to reuse the weights of a previous layer
    by the same name.

Returns:
  Output tensor.

Raises:
  ValueError: if eager execution is enabled.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is
`tf.keras.layers.SeparableConv2D`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 y = tf.compat.v1.layers.separable_conv2d(x, filters=3, kernel_size=3)
```

After:

To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/tf_keras/functional):

```python
 x = tf.keras.Input((28, 28, 1))
 y = tf.keras.layers.SeparableConv2D(filters=3, kernels_size=3)(x)
 model = tf.keras.Model(x, y)
```
@end_compatibility
z`tf.layers.separable_conv2d` is deprecated and will be removed in a future version. Please Use `tf.keras.layers.SeparableConv2D` instead.r<   r=   r   r   r   r   r   r   rZ   r   r   r[   r\   r   r]   r^   r   r   r_   r`   r    r!   r"   r?   r@   r#   )rA   rB   re   ri   s                           r+   separable_conv2drl   F  s   R MM	@ 	    	
   $ *   4 4 * 4 4 *  2!" 2#$ 2%& ('( )* +, -. /E2 =r.   z0keras.__internal__.legacy.layers.Conv2DTransposec                      ^  \ rS rSrSrSSSSSS\R                  R                  R                  5       SSSSSSS4U 4S jjr	S	r
U =r$ )
Conv2DTransposei  a  Transposed 2D convolution layer (sometimes called 2D Deconvolution).

The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.

Args:
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: A tuple or list of 2 positive integers specifying the spatial
    dimensions of the filters. Can be a single integer to specify the same
    value for all spatial dimensions.
  strides: A tuple or list of 2 positive integers specifying the strides
    of the convolution. Can be a single integer to specify the same value
    for all spatial dimensions.
  padding: one of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, height, width, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, height, width)`.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is
`tf.keras.layers.Conv2DTranspose`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 conv = tf.compat.v1.layers.Conv2DTranspose(filters=3, kernel_size=3)
```

After:

```python
 conv = tf.keras.layers.Conv2DTranspose(filters=3, kernels_size=3)
```
@end_compatibility
rJ   r   r   NTc                    > [         TU ]  " S0 SU_SU_SU_SU_SU_SU_SU_SU_S	U	_S
U
_SU_SU_SU_SU_SU_SU_UD6  g Nr   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r(   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r)   r*   s                     r+   r&   Conv2DTranspose.__init__C      ( 	 	
	
#	
 	
 		

 $	
 "	
 	
  2	
 .	
  2	
 .	
 "6	
 0	
 ,	
  	
  #	
r.   r#   r/   r:   s   @r+   rn   rn     sQ    Nh #779!#&
 &
r.   rn   z1keras.__internal__.legacy.layers.conv2d_transposec                     [         R                  " SSS9  [        S0 SU_SU_SU_SU_SU_S	U_S
U_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_6nU" U 5      $ )a  Functional interface for transposed 2D convolution layer.

The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.

Args:
  inputs: Input tensor.
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: A tuple or list of 2 positive integers specifying the spatial
    dimensions of the filters. Can be a single integer to specify the same
    value for all spatial dimensions.
  strides: A tuple or list of 2 positive integers specifying the strides
    of the convolution. Can be a single integer to specify the same value
    for all spatial dimensions.
  padding: one of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, height, width, channels)` while `channels_first` corresponds to
    inputs with shape `(batch, channels, height, width)`.
  activation: Activation function. Set it to `None` to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If `None`, the
    default initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.
  reuse: Boolean, whether to reuse the weights of a previous layer
    by the same name.

Returns:
  Output tensor.

Raises:
  ValueError: if eager execution is enabled.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is
`tf.keras.layers.Conv2DTranspose`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 y = tf.compat.v1.layers.conv2d_transpose(x, filters=3, kernel_size=3)
```

After:

To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/tf_keras/functional):

```python
 x = tf.keras.Input((28, 28, 1))
 y = tf.keras.layers.Conv2DTranspose(filters=3, kernels_size=3)(x)
 model = tf.keras.Model(x, y)
```
@end_compatibility
z`tf.layers.conv2d_transpose` is deprecated and will be removed in a future version. Please Use `tf.keras.layers.Conv2DTranspose` instead.r<   r=   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r?   r@   r#   )rA   rB   rn   rD   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   rE   rF   s                      r+   conv2d_transposerv   l  s    d MM	@ 	    	
     . * . * 2 , (   !" #$ %E( =r.   z0keras.__internal__.legacy.layers.Conv3DTransposec                      ^  \ rS rSrSrSSSSSS\R                  R                  R                  5       SSSSSSS4U 4S jjr	S	r
U =r$ )
Conv3DTransposei  a  Transposed 3D convolution layer (sometimes called 3D Deconvolution).

Args:
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: An integer or tuple/list of 3 integers, specifying the
    depth, height and width of the 3D convolution window.
    Can be a single integer to specify the same value for all spatial
    dimensions.
  strides: An integer or tuple/list of 3 integers, specifying the strides
    of the convolution along the depth, height and width.
    Can be a single integer to specify the same value for all spatial
    dimensions.
  padding: One of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, depth, height, width, channels)` while `channels_first`
    corresponds to inputs with shape
    `(batch, channels, depth, height, width)`.
  activation: Activation function. Set it to `None` to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If `None`, the
    default initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is
`tf.keras.layers.Conv3DTranspose`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 conv = tf.compat.v1.layers.Conv3DTranspose(filters=3, kernel_size=3)
```

After:

```python
 conv = tf.keras.layers.Conv3DTranspose(filters=3, kernels_size=3)
```
@end_compatibility
rQ   r   r   NTc                    > [         TU ]  " S0 SU_SU_SU_SU_SU_SU_SU_SU_S	U	_S
U
_SU_SU_SU_SU_SU_SU_UD6  g rp   r$   rq   s                     r+   r&   Conv3DTranspose.__init__I  rs   r.   r#   r/   r:   s   @r+   rx   rx     sQ    J` #779!#&
 &
r.   rx   z1keras.__internal__.legacy.layers.conv3d_transposec                     [         R                  " SSS9  [        S0 SU_SU_SU_SU_SU_S	U_S
U_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_6nU" U 5      $ )a  Functional interface for transposed 3D convolution layer.

Args:
  inputs: Input tensor.
  filters: Integer, the dimensionality of the output space (i.e. the number
    of filters in the convolution).
  kernel_size: A tuple or list of 3 positive integers specifying the spatial
    dimensions of the filters. Can be a single integer to specify the same
    value for all spatial dimensions.
  strides: A tuple or list of 3 positive integers specifying the strides
    of the convolution. Can be a single integer to specify the same value
    for all spatial dimensions.
  padding: one of `"valid"` or `"same"` (case-insensitive).
    `"valid"` means no padding. `"same"` results in padding evenly to
    the left/right or up/down of the input such that output has the same
    height/width dimension as the input.
  data_format: A string, one of `channels_last` (default) or
    `channels_first`. The ordering of the dimensions in the inputs.
    `channels_last` corresponds to inputs with shape
    `(batch, depth, height, width, channels)` while `channels_first`
    corresponds to inputs with shape
    `(batch, channels, depth, height, width)`.
  activation: Activation function. Set it to None to maintain a
    linear activation.
  use_bias: Boolean, whether the layer uses a bias.
  kernel_initializer: An initializer for the convolution kernel.
  bias_initializer: An initializer for the bias vector. If None, the default
    initializer will be used.
  kernel_regularizer: Optional regularizer for the convolution kernel.
  bias_regularizer: Optional regularizer for the bias vector.
  activity_regularizer: Optional regularizer function for the output.
  kernel_constraint: Optional projection function to be applied to the
      kernel after being updated by an `Optimizer` (e.g. used to implement
      norm constraints or value constraints for layer weights). The function
      must take as input the unprojected variable and must return the
      projected variable (which must have the same shape). Constraints are
      not safe to use when doing asynchronous distributed training.
  bias_constraint: Optional projection function to be applied to the
      bias after being updated by an `Optimizer`.
  trainable: Boolean, if `True` also add variables to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  name: A string, the name of the layer.
  reuse: Boolean, whether to reuse the weights of a previous layer
    by the same name.

Returns:
  Output tensor.

Raises:
  ValueError: if eager execution is enabled.


@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`

Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.

The corresponding TensorFlow v2 layer is
`tf.keras.layers.Conv3DTranspose`.


#### Structural Mapping to Native TF2

None of the supported arguments have changed name.

Before:

```python
 y = tf.compat.v1.layers.conv3d_transpose(x, filters=3, kernel_size=3)
```

After:

To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/tf_keras/functional):

```python
 x = tf.keras.Input((28, 28, 1))
 y = tf.keras.layers.Conv3DTranspose(filters=3, kernels_size=3)(x)
 model = tf.keras.Model(x, y)
```
@end_compatibility
z`tf.layers.conv3d_transpose` is deprecated and will be removed in a future version. Please Use `tf.keras.layers.Conv3DTranspose` instead.r<   r=   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r?   r@   r#   )rA   rB   rx   ru   s                      r+   conv3d_transposer|   r  s    X MM	@ 	    	
     . * . * 2 , (   !" #$ %E( =r.   )6r4   
__future__r   r   r   rA   tensorflow.compat.v2r6   v2r5   tf_keras.srcr   keras_layerstf_keras.src.legacy_tf_layersr    tensorflow.python.util.tf_exportr   r   Layerr	   r7   rG   rI   rN   rP   rU   rW   re   rj   rl   rn   rv   rx   r|   Convolution1DConvolution2DConvolution3DSeparableConvolution2DConvolution2DTransposeDeconvolution2DDeconv2DConvolution3DTransposeDeconvolution3DDeconv3Dconvolution1dconvolution2dconvolution3dseparable_convolution2dconvolution2d_transposedeconvolution2ddeconv2dconvolution3d_transposedeconvolution3ddeconv3dr#   r.   r+   <module>r      s    M &  %  ! ! / . : ;<=z
\  $** z
 >z
z ;<=
 YY\\335	
'O >Od ;<=B
\  $** B
 >B
J ;<=
 YY\\335	
'V >Vr ;<=C
\  $** C
 >C
L ;<=
 YY\\335	
'W >Wt DEFP
l22DJJ P
 GP
f DEFT
l22DJJ T
 GT
n EFG
 YY\\335	
/c HcL EFG
 YY\\335	
/g HgT DEFw
l22DJJ w
 Gw
t EFG
 YY\\335	
%K HK\ DEFs
l22DJJ s
 Gs
l EFG
 YY\\335	
%E HET ( 6E E  E86E E  E8* 7G G  G/H7G G  G/Hr.   