Skip to content

6676 port generative networks spade #7320

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions docs/source/networks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,10 @@ N-Dim Fourier Transform
.. autofunction:: monai.networks.blocks.fft_utils_t.fftshift
.. autofunction:: monai.networks.blocks.fft_utils_t.ifftshift

`SPADE`
~~~~~~~
.. autoclass:: monai.networks.blocks.spade_norm.SPADE
:members:

Layers
------
Expand Down Expand Up @@ -588,6 +592,11 @@ Nets
.. autoclass:: DiffusionModelUNet
:members:

`SPADEDiffusionModelUNet`
~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: SPADEDiffusionModelUNet
:members:

`ControlNet`
~~~~~~~~~~~~
.. autoclass:: ControlNet
Expand Down Expand Up @@ -618,6 +627,11 @@ Nets
.. autoclass:: AutoencoderKL
:members:

`SPADEAutoencoderKL`
~~~~~~~~~~~~~~~~~~~~
.. autoclass:: SPADEAutoencoderKL
:members:

`VarAutoEncoder`
~~~~~~~~~~~~~~~~
.. autoclass:: VarAutoEncoder
Expand Down
1 change: 1 addition & 0 deletions monai/networks/blocks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from .regunet_block import RegistrationDownSampleBlock, RegistrationExtractionBlock, RegistrationResidualConvBlock
from .segresnet_block import ResBlock
from .selfattention import SABlock
from .spade_norm import SPADE
from .squeeze_and_excitation import (
ChannelSELayer,
ResidualSELayer,
Expand Down
96 changes: 96 additions & 0 deletions monai/networks/blocks/spade_norm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import torch
import torch.nn as nn
import torch.nn.functional as F

from monai.networks.blocks import ADN, Convolution


class SPADE(nn.Module):
"""
Spatially Adaptive Normalization (SPADE) block, allowing for normalization of activations conditioned on a
semantic map. This block is used in SPADE-based image-to-image translation models, as described in
Semantic Image Synthesis with Spatially-Adaptive Normalization (https://arxiv.org/abs/1903.07291).

Args:
label_nc: number of semantic labels
norm_nc: number of output channels
kernel_size: kernel size
spatial_dims: number of spatial dimensions
hidden_channels: number of channels in the intermediate gamma and beta layers
norm: type of base normalisation used before applying the SPADE normalisation
norm_params: parameters for the base normalisation
"""

def __init__(
self,
label_nc: int,
norm_nc: int,
kernel_size: int = 3,
spatial_dims: int = 2,
hidden_channels: int = 64,
norm: str | tuple = "INSTANCE",
norm_params: dict | None = None,
) -> None:
super().__init__()

if norm_params is None:
norm_params = {}
if len(norm_params) != 0:
norm = (norm, norm_params)
self.param_free_norm = ADN(
act=None, dropout=0.0, norm=norm, norm_dim=spatial_dims, ordering="N", in_channels=norm_nc
)
self.mlp_shared = Convolution(
spatial_dims=spatial_dims,
in_channels=label_nc,
out_channels=hidden_channels,
kernel_size=kernel_size,
norm=None,
act="LEAKYRELU",
)
self.mlp_gamma = Convolution(
spatial_dims=spatial_dims,
in_channels=hidden_channels,
out_channels=norm_nc,
kernel_size=kernel_size,
act=None,
)
self.mlp_beta = Convolution(
spatial_dims=spatial_dims,
in_channels=hidden_channels,
out_channels=norm_nc,
kernel_size=kernel_size,
act=None,
)

def forward(self, x: torch.Tensor, segmap: torch.Tensor) -> torch.Tensor:
"""
Args:
x: input tensor with shape (B, C, [spatial-dimensions]) where C is the number of semantic channels.
segmap: input segmentation map (B, C, [spatial-dimensions]) where C is the number of semantic channels.
The map will be interpolated to the dimension of x internally.
"""

# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)

# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode="nearest")
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
out: torch.Tensor = normalized * (1 + gamma) + beta
return out
2 changes: 2 additions & 0 deletions monai/networks/nets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,8 @@
seresnext50,
seresnext101,
)
from .spade_autoencoderkl import SPADEAutoencoderKL
from .spade_diffusion_model_unet import SPADEDiffusionModelUNet
from .swin_unetr import PatchMerging, PatchMergingV2, SwinUNETR
from .torchvision_fc import TorchVisionFCModel
from .transchex import BertAttention, BertMixedLayer, BertOutput, BertPreTrainedModel, MultiModal, Pooler, Transchex
Expand Down
Loading