Skip to content

layers

Layer module.

This submodule contains layers used in the CAREamics models.

Conv_Block #

Bases: Module

Convolution block used in UNets.

Convolution block consist of two convolution layers with optional batch norm, dropout and with a final activation function.

The parameters are directly mapped to PyTorch Conv2D and Conv3d parameters, see PyTorch torch.nn.Conv2d and torch.nn.Conv3d for more information.

Parameters:

Name Type Description Default
conv_dim int

Number of dimension of the convolutions, 2 or 3.

required
in_channels int

Number of input channels.

required
out_channels int

Number of output channels.

required
intermediate_channel_multiplier int

Multiplied for the number of output channels, by default 1.

1
stride int

Stride of the convolutions, by default 1.

1
padding int

Padding of the convolutions, by default 1.

1
bias bool

Bias of the convolutions, by default True.

True
groups int

Controls the connections between inputs and outputs, by default 1.

1
activation str

Activation function, by default "ReLU".

'ReLU'
dropout_perc float

Dropout percentage, by default 0.

0
use_batch_norm bool

Use batch norm, by default False.

False
Source code in src/careamics/models/layers.py
class Conv_Block(nn.Module):
    """
    Convolution block used in UNets.

    Convolution block consist of two convolution layers with optional batch norm,
    dropout and with a final activation function.

    The parameters are directly mapped to PyTorch Conv2D and Conv3d parameters, see
    PyTorch torch.nn.Conv2d and torch.nn.Conv3d for more information.

    Parameters
    ----------
    conv_dim : int
        Number of dimension of the convolutions, 2 or 3.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    intermediate_channel_multiplier : int, optional
        Multiplied for the number of output channels, by default 1.
    stride : int, optional
        Stride of the convolutions, by default 1.
    padding : int, optional
        Padding of the convolutions, by default 1.
    bias : bool, optional
        Bias of the convolutions, by default True.
    groups : int, optional
        Controls the connections between inputs and outputs, by default 1.
    activation : str, optional
        Activation function, by default "ReLU".
    dropout_perc : float, optional
        Dropout percentage, by default 0.
    use_batch_norm : bool, optional
        Use batch norm, by default False.
    """

    def __init__(
        self,
        conv_dim: int,
        in_channels: int,
        out_channels: int,
        intermediate_channel_multiplier: int = 1,
        stride: int = 1,
        padding: int = 1,
        bias: bool = True,
        groups: int = 1,
        activation: str = "ReLU",
        dropout_perc: float = 0,
        use_batch_norm: bool = False,
    ) -> None:
        """
        Constructor.

        Parameters
        ----------
        conv_dim : int
            Number of dimension of the convolutions, 2 or 3.
        in_channels : int
            Number of input channels.
        out_channels : int
            Number of output channels.
        intermediate_channel_multiplier : int, optional
            Multiplied for the number of output channels, by default 1.
        stride : int, optional
            Stride of the convolutions, by default 1.
        padding : int, optional
            Padding of the convolutions, by default 1.
        bias : bool, optional
            Bias of the convolutions, by default True.
        groups : int, optional
            Controls the connections between inputs and outputs, by default 1.
        activation : str, optional
            Activation function, by default "ReLU".
        dropout_perc : float, optional
            Dropout percentage, by default 0.
        use_batch_norm : bool, optional
            Use batch norm, by default False.
        """
        super().__init__()
        self.use_batch_norm = use_batch_norm
        self.conv1 = getattr(nn, f"Conv{conv_dim}d")(
            in_channels,
            out_channels * intermediate_channel_multiplier,
            kernel_size=3,
            stride=stride,
            padding=padding,
            bias=bias,
            groups=groups,
        )

        self.conv2 = getattr(nn, f"Conv{conv_dim}d")(
            out_channels * intermediate_channel_multiplier,
            out_channels,
            kernel_size=3,
            stride=stride,
            padding=padding,
            bias=bias,
            groups=groups,
        )

        self.batch_norm1 = getattr(nn, f"BatchNorm{conv_dim}d")(
            out_channels * intermediate_channel_multiplier
        )
        self.batch_norm2 = getattr(nn, f"BatchNorm{conv_dim}d")(out_channels)

        self.dropout = (
            getattr(nn, f"Dropout{conv_dim}d")(dropout_perc)
            if dropout_perc > 0
            else None
        )
        self.activation = (
            getattr(nn, f"{activation}")() if activation is not None else nn.Identity()
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass.

        Parameters
        ----------
        x : torch.Tensor
            Input tensor.

        Returns
        -------
        torch.Tensor
            Output tensor.
        """
        if self.use_batch_norm:
            x = self.conv1(x)
            x = self.batch_norm1(x)
            x = self.activation(x)
            x = self.conv2(x)
            x = self.batch_norm2(x)
            x = self.activation(x)
        else:
            x = self.conv1(x)
            x = self.activation(x)
            x = self.conv2(x)
            x = self.activation(x)
        if self.dropout is not None:
            x = self.dropout(x)
        return x

__init__(conv_dim, in_channels, out_channels, intermediate_channel_multiplier=1, stride=1, padding=1, bias=True, groups=1, activation='ReLU', dropout_perc=0, use_batch_norm=False) #

Constructor.

Parameters:

Name Type Description Default
conv_dim int

Number of dimension of the convolutions, 2 or 3.

required
in_channels int

Number of input channels.

required
out_channels int

Number of output channels.

required
intermediate_channel_multiplier int

Multiplied for the number of output channels, by default 1.

1
stride int

Stride of the convolutions, by default 1.

1
padding int

Padding of the convolutions, by default 1.

1
bias bool

Bias of the convolutions, by default True.

True
groups int

Controls the connections between inputs and outputs, by default 1.

1
activation str

Activation function, by default "ReLU".

'ReLU'
dropout_perc float

Dropout percentage, by default 0.

0
use_batch_norm bool

Use batch norm, by default False.

False
Source code in src/careamics/models/layers.py
def __init__(
    self,
    conv_dim: int,
    in_channels: int,
    out_channels: int,
    intermediate_channel_multiplier: int = 1,
    stride: int = 1,
    padding: int = 1,
    bias: bool = True,
    groups: int = 1,
    activation: str = "ReLU",
    dropout_perc: float = 0,
    use_batch_norm: bool = False,
) -> None:
    """
    Constructor.

    Parameters
    ----------
    conv_dim : int
        Number of dimension of the convolutions, 2 or 3.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    intermediate_channel_multiplier : int, optional
        Multiplied for the number of output channels, by default 1.
    stride : int, optional
        Stride of the convolutions, by default 1.
    padding : int, optional
        Padding of the convolutions, by default 1.
    bias : bool, optional
        Bias of the convolutions, by default True.
    groups : int, optional
        Controls the connections between inputs and outputs, by default 1.
    activation : str, optional
        Activation function, by default "ReLU".
    dropout_perc : float, optional
        Dropout percentage, by default 0.
    use_batch_norm : bool, optional
        Use batch norm, by default False.
    """
    super().__init__()
    self.use_batch_norm = use_batch_norm
    self.conv1 = getattr(nn, f"Conv{conv_dim}d")(
        in_channels,
        out_channels * intermediate_channel_multiplier,
        kernel_size=3,
        stride=stride,
        padding=padding,
        bias=bias,
        groups=groups,
    )

    self.conv2 = getattr(nn, f"Conv{conv_dim}d")(
        out_channels * intermediate_channel_multiplier,
        out_channels,
        kernel_size=3,
        stride=stride,
        padding=padding,
        bias=bias,
        groups=groups,
    )

    self.batch_norm1 = getattr(nn, f"BatchNorm{conv_dim}d")(
        out_channels * intermediate_channel_multiplier
    )
    self.batch_norm2 = getattr(nn, f"BatchNorm{conv_dim}d")(out_channels)

    self.dropout = (
        getattr(nn, f"Dropout{conv_dim}d")(dropout_perc)
        if dropout_perc > 0
        else None
    )
    self.activation = (
        getattr(nn, f"{activation}")() if activation is not None else nn.Identity()
    )

forward(x) #

Forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor.

required

Returns:

Type Description
Tensor

Output tensor.

Source code in src/careamics/models/layers.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass.

    Parameters
    ----------
    x : torch.Tensor
        Input tensor.

    Returns
    -------
    torch.Tensor
        Output tensor.
    """
    if self.use_batch_norm:
        x = self.conv1(x)
        x = self.batch_norm1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.batch_norm2(x)
        x = self.activation(x)
    else:
        x = self.conv1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.activation(x)
    if self.dropout is not None:
        x = self.dropout(x)
    return x

MaxBlurPool #

Bases: Module

Compute pools and blurs and downsample a given feature map.

Inspired by Kornia MaxBlurPool implementation. Equivalent to nn.Sequential(nn.MaxPool2d(...), BlurPool2D(...))

Parameters#

dim : int Toggles between 2D and 3D. kernel_size : Union[Tuple[int, int], int] Kernel size for max pooling. stride : int Stride for pooling. max_pool_size : int Max kernel size for max pooling. ceil_mode : bool Ceil mode, by default False. Set to True to match output size of conv2d.

Source code in src/careamics/models/layers.py
class MaxBlurPool(nn.Module):
    """Compute pools and blurs and downsample a given feature map.

    Inspired by Kornia MaxBlurPool implementation. Equivalent to
    ```nn.Sequential(nn.MaxPool2d(...), BlurPool2D(...))```

    Parameters
    ----------
    dim : int
        Toggles between 2D and 3D.
    kernel_size : Union[Tuple[int, int], int]
        Kernel size for max pooling.
    stride : int
        Stride for pooling.
    max_pool_size : int
        Max kernel size for max pooling.
    ceil_mode : bool
        Ceil mode, by default False. Set to True to match output size of conv2d.
    """

    def __init__(
        self,
        dim: int,
        kernel_size: Union[Tuple[int, int], int],
        stride: int = 2,
        max_pool_size: int = 2,
        ceil_mode: bool = False,
    ) -> None:
        """Constructor.

        Parameters
        ----------
        dim : int
            Dimension of the convolution.
        kernel_size : Union[Tuple[int, int], int]
            Kernel size for max pooling.
        stride : int, optional
            Stride, by default 2.
        max_pool_size : int, optional
            Maximum pool size, by default 2.
        ceil_mode : bool, optional
            Ceil mode, by default False. Set to True to match output size of conv2d.
        """
        super().__init__()
        self.dim = dim
        self.kernel_size = kernel_size
        self.stride = stride
        self.max_pool_size = max_pool_size
        self.ceil_mode = ceil_mode
        self.kernel = _get_pascal_kernel_nd(kernel_size, norm=True, dim=self.dim)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Forward pass of the function.

        Parameters
        ----------
        x : torch.Tensor
            Input tensor.

        Returns
        -------
        torch.Tensor
            Output tensor.
        """
        self.kernel = torch.as_tensor(self.kernel, device=x.device, dtype=x.dtype)
        if self.dim == 2:
            return _max_blur_pool_by_kernel2d(
                x,
                self.kernel.repeat((x.size(1), 1, 1, 1)),
                self.stride,
                self.max_pool_size,
                self.ceil_mode,
            )
        else:
            return _max_blur_pool_by_kernel3d(
                x,
                self.kernel.repeat((x.size(1), 1, 1, 1, 1)),
                self.stride,
                self.max_pool_size,
                self.ceil_mode,
            )

__init__(dim, kernel_size, stride=2, max_pool_size=2, ceil_mode=False) #

Constructor.

Parameters:

Name Type Description Default
dim int

Dimension of the convolution.

required
kernel_size Union[Tuple[int, int], int]

Kernel size for max pooling.

required
stride int

Stride, by default 2.

2
max_pool_size int

Maximum pool size, by default 2.

2
ceil_mode bool

Ceil mode, by default False. Set to True to match output size of conv2d.

False
Source code in src/careamics/models/layers.py
def __init__(
    self,
    dim: int,
    kernel_size: Union[Tuple[int, int], int],
    stride: int = 2,
    max_pool_size: int = 2,
    ceil_mode: bool = False,
) -> None:
    """Constructor.

    Parameters
    ----------
    dim : int
        Dimension of the convolution.
    kernel_size : Union[Tuple[int, int], int]
        Kernel size for max pooling.
    stride : int, optional
        Stride, by default 2.
    max_pool_size : int, optional
        Maximum pool size, by default 2.
    ceil_mode : bool, optional
        Ceil mode, by default False. Set to True to match output size of conv2d.
    """
    super().__init__()
    self.dim = dim
    self.kernel_size = kernel_size
    self.stride = stride
    self.max_pool_size = max_pool_size
    self.ceil_mode = ceil_mode
    self.kernel = _get_pascal_kernel_nd(kernel_size, norm=True, dim=self.dim)

forward(x) #

Forward pass of the function.

Parameters:

Name Type Description Default
x Tensor

Input tensor.

required

Returns:

Type Description
Tensor

Output tensor.

Source code in src/careamics/models/layers.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """Forward pass of the function.

    Parameters
    ----------
    x : torch.Tensor
        Input tensor.

    Returns
    -------
    torch.Tensor
        Output tensor.
    """
    self.kernel = torch.as_tensor(self.kernel, device=x.device, dtype=x.dtype)
    if self.dim == 2:
        return _max_blur_pool_by_kernel2d(
            x,
            self.kernel.repeat((x.size(1), 1, 1, 1)),
            self.stride,
            self.max_pool_size,
            self.ceil_mode,
        )
    else:
        return _max_blur_pool_by_kernel3d(
            x,
            self.kernel.repeat((x.size(1), 1, 1, 1, 1)),
            self.stride,
            self.max_pool_size,
            self.ceil_mode,
        )

get_pascal_kernel_1d(kernel_size, norm=False, *, device=None, dtype=None) #

Generate Yang Hui triangle (Pascal's triangle) for a given number.

Inspired by Kornia implementation. TODO link

Parameters:

Name Type Description Default
kernel_size int

Kernel size.

required
norm bool

Normalize the kernel, by default False.

False
device Optional[device]

Device of the tensor, by default None.

None
dtype Optional[dtype]

Data type of the tensor, by default None.

None

Returns:

Type Description
Tensor

Pascal kernel.

Examples:

>>> get_pascal_kernel_1d(1)
tensor([1.])
>>> get_pascal_kernel_1d(2)
tensor([1., 1.])
>>> get_pascal_kernel_1d(3)
tensor([1., 2., 1.])
>>> get_pascal_kernel_1d(4)
tensor([1., 3., 3., 1.])
>>> get_pascal_kernel_1d(5)
tensor([1., 4., 6., 4., 1.])
>>> get_pascal_kernel_1d(6)
tensor([ 1.,  5., 10., 10.,  5.,  1.])
Source code in src/careamics/models/layers.py
def get_pascal_kernel_1d(
    kernel_size: int,
    norm: bool = False,
    *,
    device: Optional[torch.device] = None,
    dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
    """Generate Yang Hui triangle (Pascal's triangle) for a given number.

    Inspired by Kornia implementation. TODO link

    Parameters
    ----------
    kernel_size : int
        Kernel size.
    norm : bool
        Normalize the kernel, by default False.
    device : Optional[torch.device]
        Device of the tensor, by default None.
    dtype : Optional[torch.dtype]
        Data type of the tensor, by default None.

    Returns
    -------
    torch.Tensor
        Pascal kernel.

    Examples
    --------
    >>> get_pascal_kernel_1d(1)
    tensor([1.])
    >>> get_pascal_kernel_1d(2)
    tensor([1., 1.])
    >>> get_pascal_kernel_1d(3)
    tensor([1., 2., 1.])
    >>> get_pascal_kernel_1d(4)
    tensor([1., 3., 3., 1.])
    >>> get_pascal_kernel_1d(5)
    tensor([1., 4., 6., 4., 1.])
    >>> get_pascal_kernel_1d(6)
    tensor([ 1.,  5., 10., 10.,  5.,  1.])
    """
    pre: List[float] = []
    cur: List[float] = []
    for i in range(kernel_size):
        cur = [1.0] * (i + 1)

        for j in range(1, i // 2 + 1):
            value = pre[j - 1] + pre[j]
            cur[j] = value
            if i != 2 * j:
                cur[-j - 1] = value
        pre = cur

    out = torch.tensor(cur, device=device, dtype=dtype)

    if norm:
        out = out / out.sum()

    return out