# type: ignore
"""
SparseUNet Driven by SpConv.
Adapted from: https://github.com/Pointcept/Pointcept
This module requires the installation of the following packages:
- spconv: https://github.com/traveller59/spconv?tab=readme-ov-file#spconv-spatially-sparse-convolution-library
Original Author: Xiaoyang Wu ([email protected])
Please cite their work if you use the following code in your research paper.
"""
import math
import warnings
from collections import OrderedDict
from functools import partial
import spconv.pytorch as spconv
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from torch_geometric.utils import scatter
# Adapted from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/weight_init.py
def _trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std) # noqa: E741
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
"""
with torch.no_grad():
return _trunc_normal_(tensor, mean, std, a, b)
# Adapted from: https://github.com/Pointcept/Pointcept/blob/main/pointcept/models/utils/misc.py
@torch.inference_mode()
def offset2bincount(offset):
return torch.diff(offset, prepend=torch.tensor([0], device=offset.device, dtype=torch.long))
@torch.inference_mode()
def offset2batch(offset):
bincount = offset2bincount(offset)
return torch.arange(len(bincount), device=offset.device, dtype=torch.long).repeat_interleave(bincount)
# Adapted from: https://github.com/Pointcept/Pointcept/blob/main/pointcept/models/sparse_unet/spconv_unet_v1m1_base.py
class BasicBlock(spconv.SparseModule):
expansion = 1
def __init__(
self,
in_channels,
embed_channels,
stride=1,
norm_fn=None,
indice_key=None,
bias=False,
):
super().__init__()
assert norm_fn is not None
if in_channels == embed_channels:
self.proj = spconv.SparseSequential(nn.Identity())
else:
self.proj = spconv.SparseSequential(
spconv.SubMConv3d(in_channels, embed_channels, kernel_size=1, bias=False),
norm_fn(embed_channels),
)
self.conv1 = spconv.SubMConv3d(
in_channels,
embed_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn1 = norm_fn(embed_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = spconv.SubMConv3d(
embed_channels,
embed_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn2 = norm_fn(embed_channels)
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = out.replace_feature(self.bn1(out.features))
out = out.replace_feature(self.relu(out.features))
out = self.conv2(out)
out = out.replace_feature(self.bn2(out.features))
out = out.replace_feature(out.features + self.proj(residual).features)
out = out.replace_feature(self.relu(out.features))
return out
[docs]
class SpUNetBase(nn.Module):
def __init__(
self,
in_channels: int,
num_classes: int,
base_channels=32,
channels=(32, 64, 128, 256, 256, 128, 96, 96),
layers=(2, 3, 4, 6, 2, 2, 2, 2),
cls_mode=False,
):
super().__init__()
assert len(layers) % 2 == 0
assert len(layers) == len(channels)
[docs]
self.in_channels = in_channels
[docs]
self.num_classes = num_classes
[docs]
self.base_channels = base_channels
[docs]
self.channels = channels
[docs]
self.num_stages = len(layers) // 2
[docs]
self.cls_mode = cls_mode
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
block = BasicBlock
enc_channels = base_channels
dec_channels = channels[-1]
[docs]
self.down = nn.ModuleList()
[docs]
self.up = nn.ModuleList()
[docs]
self.enc = nn.ModuleList()
[docs]
self.dec = nn.ModuleList() if not self.cls_mode else None
for s in range(self.num_stages):
# encode num_stages
self.down.append(
spconv.SparseSequential(
spconv.SparseConv3d(
enc_channels,
channels[s],
kernel_size=2,
stride=2,
bias=False,
indice_key=f"spconv{s + 1}",
),
norm_fn(channels[s]),
nn.ReLU(inplace=True),
)
)
self.enc.append(
spconv.SparseSequential(
OrderedDict(
[
# (f"block{i}", block(enc_channels, channels[s], norm_fn=norm_fn, indice_key=f"subm{s + 1}"))
# if i == 0 else
(
f"block{i}",
block(
channels[s],
channels[s],
norm_fn=norm_fn,
indice_key=f"subm{s + 1}",
),
)
for i in range(layers[s])
]
)
)
)
if not self.cls_mode:
# decode num_stages
self.up.append(
spconv.SparseSequential(
spconv.SparseInverseConv3d(
channels[len(channels) - s - 2],
dec_channels,
kernel_size=2,
bias=False,
indice_key=f"spconv{s + 1}",
),
norm_fn(dec_channels),
nn.ReLU(inplace=True),
)
)
self.dec.append(
spconv.SparseSequential(
OrderedDict(
[
(
(
f"block{i}",
block(
dec_channels + enc_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
if i == 0
else (
f"block{i}",
block(
dec_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
)
for i in range(layers[len(channels) - s - 1])
]
)
)
)
enc_channels = channels[s]
dec_channels = channels[len(channels) - s - 2]
final_in_channels = channels[-1] if not self.cls_mode else channels[self.num_stages - 1]
[docs]
self.final = (
spconv.SubMConv3d(final_in_channels, num_classes, kernel_size=1, padding=1, bias=True)
if num_classes > 0
else spconv.Identity()
)
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, spconv.SubMConv3d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
[docs]
def forward(self, points: torch.Tensor):
B, N, D = points.shape
# TODO: Check if this is the correct way to normalize the points
grid_coord = torch.div(
points[..., :3] - points[..., :3].min(-2, keepdim=True).values, 0.01, rounding_mode="trunc"
).int()
feat = rearrange(points, "B N D -> (B N) D")
grid_coord = rearrange(grid_coord, "B N D -> (B N) D")
batch = torch.arange(B, device=points.device, dtype=torch.int).repeat_interleave(N)
sparse_shape = torch.add(torch.max(grid_coord, dim=0).values, 96).tolist()
x = spconv.SparseConvTensor(
features=feat,
indices=torch.cat([batch.unsqueeze(-1).int(), grid_coord.int()], dim=1).contiguous(),
spatial_shape=sparse_shape,
batch_size=batch[-1].tolist() + 1,
)
x = self.conv_input(x)
skips = [x]
# enc forward
for s in range(self.num_stages):
x = self.down[s](x)
x = self.enc[s](x)
skips.append(x)
x = skips.pop(-1)
if not self.cls_mode:
# dec forward
for s in reversed(range(self.num_stages)):
x = self.up[s](x)
skip = skips.pop(-1)
x = x.replace_feature(torch.cat((x.features, skip.features), dim=1))
x = self.dec[s](x)
x = self.final(x)
if self.cls_mode:
x = x.replace_feature(scatter(x.features, x.indices[:, 0].long(), reduce="mean", dim=0))
return x.features
[docs]
class SpUNetCls(nn.Module):
def __init__(
self,
in_channels: int,
num_classes: int,
base_channels=32,
channels=(32, 64, 128, 256, 256, 128, 96, 96),
layers=(2, 3, 4, 6, 2, 2, 2, 2),
):
super().__init__()
[docs]
self.model = SpUNetBase(in_channels, num_classes, base_channels, channels, layers, cls_mode=True)
[docs]
def forward(self, points: torch.Tensor):
x = self.model(points)
return F.log_softmax(x, dim=1)
[docs]
class SpUNetSeg(nn.Module):
def __init__(
self,
in_channels: int,
num_classes: int,
base_channels=32,
channels=(32, 64, 128, 256, 256, 128, 96, 96),
layers=(2, 3, 4, 6, 2, 2, 2, 2),
):
super().__init__()
[docs]
self.model = SpUNetBase(in_channels, num_classes, base_channels, channels, layers, cls_mode=False)
[docs]
def forward(self, points: torch.Tensor):
x = self.model(points)
x = x.view(points.shape[:-1] + (x.shape[-1],))
return F.log_softmax(x, dim=-1)
__all__ = ["SpUNetBase", "SpUNetCls", "SpUNetSeg"]