Skip to content
This repository was archived by the owner on Sep 25, 2023. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions python/cusignal/diff/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

try:
from cusignal.diff.resample import ResamplePoly
except:
msg = """
Warning - Could not find PyTorch. Please install to use
differentiable functions in cuSignal.
"""
print(msg)
127 changes: 127 additions & 0 deletions python/cusignal/diff/resample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
import torch
import cupy as cp
from torch.autograd import Function
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
from math import gcd
from cusignal import resample_poly
from cusignal import choose_conv_method
from cusignal import correlate


class FuncResamplePoly(Function):
@staticmethod
def get_start_index(length):
if length <= 2:
return 0
return (length - 1) // 2

@staticmethod
def best_corr(sig1, sig2, mode):
method = choose_conv_method(sig1, sig2, mode=mode)
out = correlate(sig1, sig2, mode=mode, method=method)
return out

@staticmethod
def forward(ctx, x, filter_coeffs, up, down):
device = x.device.type
x = x.detach()
filter_coeffs = filter_coeffs.detach()
up = up.detach()
down = down.detach()

x_size = x.shape[0]
filt_size = filter_coeffs.shape[0]
up = int(up[0])
down = int(down[0])
ud_gcd = gcd(up, down)
up = up // ud_gcd
down = down // ud_gcd

if 'cuda' in device:
gpupath = True
else:
gpupath = False

if (up == 1 and down == 1):
out_x = x
inverse_size = x.shape[0]
x_up = None
else:
if gpupath:
window = cp.array(filter_coeffs)
else:
window = filter_coeffs.numpy()
out_x = resample_poly(x, up, down, window=window,
gpupath=gpupath)
inverse_size = up * x_size + filt_size - 1
x_up = torch.zeros(up * x_size, device=device, dtype=x.dtype)
x_up[::up] = up * x

ctx.save_for_backward(torch.Tensor([x_size]), filter_coeffs,
torch.Tensor([up]),
torch.Tensor([down]),
torch.Tensor([inverse_size]),
torch.Tensor([len(out_x)]), x_up)
out = torch.as_tensor(cp.asnumpy(out_x), device=device)
return(out)

@staticmethod
def backward(ctx, gradient):
gradient = gradient.detach()
x_size, filter_coeffs, up, down, inverse_size, out_len, x_up \
= ctx.saved_tensors

device = gradient.device.type
x_size = int(x_size[0])
filt_size = filter_coeffs.shape[0]
up = int(up[0])
down = int(down[0])
start = FuncResamplePoly.get_start_index(filt_size)
inverse_size = int(inverse_size)
filter_coeffs = filter_coeffs.type(gradient.dtype)

if (up == 1 and down == 1):
# J_x up \times J_x conv
out_x = gradient
# J_f conv
out_f = torch.zeros(filter_coeffs.shape[0],
device=device,
dtype=filter_coeffs.dtype)
else:
gradient_up = torch.zeros(inverse_size,
device=device,
dtype=gradient.dtype)
gradient_up[start: start + down * gradient.shape[0]: down] =\
gradient

out_x = FuncResamplePoly.best_corr(gradient_up,
filter_coeffs,
mode='valid')
out_x = up * out_x[::up]
out_f = FuncResamplePoly.best_corr(gradient_up, x_up,
mode='valid')

out_x = torch.as_tensor(out_x[:x_size],
device=device)
out_f = torch.as_tensor(out_f[:filter_coeffs.shape[0]],
device=device)
return(out_x, out_f, None, None)


class ResamplePoly(Module):
def __init__(self, up, down, filter_coeffs):
super(ResamplePoly, self).__init__()
self.up = torch.Tensor([up])
self.down = torch.Tensor([down])
self.filter_coeffs = Parameter(filter_coeffs)

def forward(self, x):
return FuncResamplePoly.apply(x, self.filter_coeffs, self.up,
self.down)

@classmethod
def output_size(self, input_size, up, down):
out_size = input_size * up
out_size = out_size // down + bool(out_size % down)
return out_size
87 changes: 87 additions & 0 deletions python/cusignal/test/test_diff.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import pytest
import cupy as cp
from numpy import sqrt
from numpy import allclose
from cusignal import resample_poly


try:
import torch
from cusignal.diff import ResamplePoly
from torch.autograd.gradcheck import gradcheck
except ImportError:
pytest.skip(f"skipping pytorch dependant tests in {__file__}",
allow_module_level=True)


@pytest.mark.parametrize("device", ['cpu', 'cuda'])
@pytest.mark.parametrize("up", [1, 3, 10])
@pytest.mark.parametrize("down", [1, 7, 10])
@pytest.mark.parametrize("filter_size", [1, 10])
def test_gradcheck(device, up, down, filter_size,
eps=1e-3, atol=1e-1, rtol=-1):
'''
Verifies that our backward method works.
'''
filter_coeffs = torch.randn(filter_size, requires_grad=True,
dtype=torch.double,
device=device)
inputs = torch.randn(100, dtype=torch.double, requires_grad=True,
device=device)
module = ResamplePoly(up, down, filter_coeffs)
kwargs = {"eps": eps}
if rtol > 0:
kwargs["rtol"] = rtol
else:
kwargs["atol"] = atol
gradcheck(module, inputs, **kwargs, raise_exception=True)


@pytest.mark.parametrize("device", ['cpu', 'cuda'])
@pytest.mark.parametrize("x_size", [30, 100])
@pytest.mark.parametrize("filter_size", [5, 20])
@pytest.mark.parametrize("up", [1, 10])
@pytest.mark.parametrize("down", [1, 7, 10])
def test_forward(device, x_size, up, down, filter_size):
'''
Verifies that our module agress with scipy's implementation
on randomly generated examples.

gpupath = True accepts cupy typed windows.
gpupath = False accepts numpy types windows.
'''
gpupath = True
if 'cuda' not in device:
gpupath = False
x = torch.randn(x_size, device=device)
window = torch.randn(filter_size, device=device)
# The module requires a torch tensor window
module = ResamplePoly(up, down, window)
# resample_poly requires a cupy or numpy array window
window = window.cpu().numpy()
if gpupath:
window = cp.array(window)
bench_resample = resample_poly(x, up, down, window=window,
gpupath=gpupath)
our_resample = module.forward(x)
if not allclose(bench_resample, our_resample.detach().cpu(), atol=1e-4):
raise Exception("Module does not agree with resample")


def test_backprop(device='cuda', iters=100, filter_size=10, up=7,
down=3, x_size=1000):
'''
Demonstration of how to use ResamplePoly with back prop
'''
x = torch.linspace(-sqrt(10), sqrt(10), x_size, device='cuda')
y = torch.sin(x)
window = torch.randn(filter_size, device=device)
model = torch.nn.Sequential(
ResamplePoly(up, down, window),
torch.nn.Linear(in_features=ResamplePoly.output_size(x_size, up, down),
out_features=1000).to(device)
)
loss_fn = torch.nn.MSELoss(reduction='sum')
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
1 change: 0 additions & 1 deletion python/cusignal/test/test_radartools.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import cupy as cp
import pytest
from numpy import vectorize

from cusignal.radartools import ca_cfar, cfar_alpha
from cusignal.testing.utils import array_equal

Expand Down