diff --git a/segmentation_models_pytorch/base/heads.py b/segmentation_models_pytorch/base/heads.py index fbc939ca..992f77d3 100644 --- a/segmentation_models_pytorch/base/heads.py +++ b/segmentation_models_pytorch/base/heads.py @@ -1,4 +1,5 @@ import torch.nn as nn + from .modules import Activation @@ -10,7 +11,7 @@ def __init__( in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2 ) upsampling = ( - nn.UpsamplingBilinear2d(scale_factor=upsampling) + nn.Upsample(mode="bilinear", scale_factor=upsampling, align_corners=True) if upsampling > 1 else nn.Identity() ) diff --git a/segmentation_models_pytorch/decoders/deeplabv3/decoder.py b/segmentation_models_pytorch/decoders/deeplabv3/decoder.py index 6a801a70..179985fd 100644 --- a/segmentation_models_pytorch/decoders/deeplabv3/decoder.py +++ b/segmentation_models_pytorch/decoders/deeplabv3/decoder.py @@ -31,7 +31,7 @@ """ from collections.abc import Iterable, Sequence -from typing import Literal, List +from typing import List, Literal import torch from torch import nn @@ -105,7 +105,9 @@ def __init__( ) scale_factor = 4 if output_stride == 16 and encoder_depth > 3 else 2 - self.up = nn.UpsamplingBilinear2d(scale_factor=scale_factor) + self.up = nn.Upsample( + mode="bilinear", scale_factor=scale_factor, align_corners=True + ) highres_in_channels = encoder_channels[2] highres_out_channels = 48 # proposed by authors of paper diff --git a/segmentation_models_pytorch/losses/_functional.py b/segmentation_models_pytorch/losses/_functional.py index 07efd7f4..74a63a8d 100644 --- a/segmentation_models_pytorch/losses/_functional.py +++ b/segmentation_models_pytorch/losses/_functional.py @@ -226,7 +226,7 @@ def wing_loss( idx_smaller = diff_abs < width idx_bigger = diff_abs >= width - loss[idx_smaller] = width * torch.log(1 + diff_abs[idx_smaller] / curvature) + loss[idx_smaller] = width * torch.log1p(diff_abs[idx_smaller] / curvature) C = width - width * math.log(1 + width / curvature) loss[idx_bigger] = loss[idx_bigger] - C