Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 11 additions & 6 deletions examples/fluid_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
from torch.autograd import Variable
import torch.nn as nn

import pdb

NSUBSTEPS = 1
DT = 1.0/60
STIFFNESS = 2.99e-11
Expand Down Expand Up @@ -264,6 +266,7 @@ def _interp_poses(self, last_poses, poses, t):

def _fix_static_collisions(self, locs, idxs, poses, scales, collisionDistance):
ret = locs

mtd = self.convsdfcol(ret, idxs, poses, scales) + collisionDistance
intersect = self.relu(
mtd) + self.relu(-self.relu(-(mtd - 0.5)) + 0.5)*0.0
Expand All @@ -281,14 +284,13 @@ def fixStaticCollisions(self, locs, new_locs, idxs, poses, scales):
#globals()[p] = getattr(self, "_"+p)*scale
val = getattr(self, "_"+p)*scale
exec("%s = val" % p)

delta = (new_locs - locs)/numStaticIterations
for _ in range(numStaticIterations):
locs = locs + delta
locs = self._fix_static_collisions(locs, idxs, poses,
scales, collisionDistance)
return locs

def forward(self, locs, vel, idxs, poses, last_poses, scales, extra_constraints=None):
"""
Compute one forward timestep of the fluid simulation. It takes as input the current
Expand Down Expand Up @@ -332,12 +334,15 @@ def forward(self, locs, vel, idxs, poses, last_poses, scales, extra_constraints=
globals()[p] = val
dt = self._dt
dt /= nSubsteps

if self.ones is None or self.ones.size()[:-1] != locs.size()[:-1]:
self.ones.data.resize_(locs.size()[:-1] + (1,)).fill_(1)
with torch.no_grad():
self.ones.resize_(locs.size()[:-1] + (1,)).fill_(1)


_poses = last_poses
for substep in range(nSubsteps):
# pdb.set_trace()
_last_poses = _poses
_poses = self._interp_poses(
last_poses, poses, 1.0*(1 + substep)/nSubsteps)
Expand All @@ -363,7 +368,6 @@ def forward(self, locs, vel, idxs, poses, last_poses, scales, extra_constraints=
new_locs, vel, pidxs, neighbors = self.coll(new_locs, vel)

for iteration in range(numIterations):

density = self.spiky1(new_locs, self.ones, neighbors)
nj = self.dspikyDnormd(new_locs, new_locs, neighbors)
ni = new_locs*self.dspiky1normd(new_locs, self.ones, neighbors)
Expand Down Expand Up @@ -471,7 +475,7 @@ def main():
spnet = FluidSim([torch.from_numpy(bounds_sdf["sdf"])], [
bounds_sdf["sdf_size"]], radius=0.1, ndim=3)
spnet = spnet.cuda()

locs = torch.rand(1, 1000, 3).cuda()
locs[:, :, 1] += 4.0
vel = torch.zeros(1, 1000, 3).cuda()
Expand All @@ -480,6 +484,7 @@ def main():
obj_poses = torch.zeros(1, 1, 7).cuda()
obj_poses[:, :, -1] = 1.0
while True:
# pdb.set_trace()
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Leftover debugging code?

locs, vel = spnet(locs, vel, idxs, obj_poses, obj_poses, scales)
print(locs[0, ...].mean(0))

Expand Down
Binary file not shown.
Binary file added python/SmoothParticleNets/.convsdf.py.swp
Binary file not shown.
Binary file added python/SmoothParticleNets/.convsp.py.swp
Binary file not shown.
8 changes: 5 additions & 3 deletions python/SmoothParticleNets/ImageProjection.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,11 +166,12 @@ def forward(self, locs, image, camera_pose, camera_rot, depth_mask=None):


class _ImageProjectionFunction(torch.autograd.Function):

def __init__(self, camera_fl):
super(_ImageProjectionFunction, self).__init__()
self.camera_fl = camera_fl


# @staticmethod
def forward(self, locs, image, depth_mask):
self.save_for_backward(locs, image, depth_mask)
batch_size = locs.size()[0]
Expand All @@ -187,7 +188,8 @@ def forward(self, locs, image, depth_mask):
self.camera_fl, depth_mask, ret)

return ret


# @staticmethod
def backward(self, grad_output):
locs, image, depth_mask = self.saved_tensors
ret_locs = grad_output.new(locs.size())
Expand Down
72 changes: 39 additions & 33 deletions python/SmoothParticleNets/ParticleCollision.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,7 @@ def forward(self, idxs, locs, data=None):
idxs = idxs.contiguous()

# Do the compution.
coll = _ReorderDataFunction(self.reverse)
locs, data = coll(idxs, locs, data)
locs, data = _ReorderDataFunction.apply(idxs, locs, data, self.reverse)
if no_data:
return locs
else:
Expand Down Expand Up @@ -181,9 +180,11 @@ def forward(self, locs, data=None, qlocs=None):
grid_dims = grid_dims.contiguous()

# Get the new hashgrid order.
hashorder = _HashgridOrderFunction(self.radius, self.max_grid_dim, self.cellIDs,
self.cuda_buffer)
idxs = hashorder(locs, lower_bounds, grid_dims)
# hashorder = _HashgridOrderFunction(self.radius, self.max_grid_dim, self.cellIDs,
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can probably just remove the commented out code if it's not needed anymore. I'm assuming this is updated to the new style of pytorch?

# self.cuda_buffer)
# idxs = hashorder(locs, lower_bounds, grid_dims)
idxs = _HashgridOrderFunction.apply(locs, lower_bounds, grid_dims, self.radius, self.max_grid_dim,
self.cellIDs, self.cuda_buffer)

# Reorder the locs and data.
if has_data:
Expand All @@ -192,10 +193,11 @@ def forward(self, locs, data=None, qlocs=None):
locs = self.reorder(idxs, locs)

# Do the collision compution.
coll = _ParticleCollisionFunction(self.radius, self.max_collisions, self.cellIDs,
self.cellStarts, self.cellEnds, self.include_self)
neighbors = coll(qlocs if qlocs is not None else locs,
locs, lower_bounds, grid_dims)
# coll = _ParticleCollisionFunction(self.radius, self.max_collisions, self.cellIDs,
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here.

# self.cellStarts, self.cellEnds, self.include_self)
neighbors = _ParticleCollisionFunction.apply(qlocs if qlocs is not None else locs,
locs, lower_bounds, grid_dims, self.radius, self.max_collisions,
self.cellIDs, self.cellStarts, self.cellEnds, self.include_self)

if has_data:
return locs, data, idxs, neighbors
Expand All @@ -211,30 +213,30 @@ def forward(self, locs, data=None, qlocs=None):


class _HashgridOrderFunction(torch.autograd.Function):

def __init__(self, radius, max_grid_dim, cellIDs, cuda_buffer):
super(_HashgridOrderFunction, self).__init__()
self.radius = radius
self.max_grid_dim = max_grid_dim
self.cellIDs = cellIDs
self.cuda_buffer = cuda_buffer

def forward(self, locs, lower_bounds, grid_dims):
@staticmethod
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's the thinking here on making this a static method? I'll admit, I haven't looked at this code in awhile, but a static method wouldn't have a self argument. And I see self being used for save_for_backward. Although it looks like you removed the rest of the self uses.

def forward(self, locs, lower_bounds, grid_dims, radius, max_grid_dim, cellIDs, cuda_buffer):
self.save_for_backward(locs, lower_bounds, grid_dims)
batch_size = locs.size()[0]
N = locs.size()[1]
idxs = locs.new(batch_size, N)
self.cellIDs.fill_(0)
cellIDs.fill_(0)
if locs.is_cuda:
if not _extc.spnc_hashgrid_order(locs, lower_bounds, grid_dims,
self.cellIDs, idxs, self.cuda_buffer, self.radius):
cellIDs, idxs, cuda_buffer, radius):
raise Exception("Cuda error")
else:
_ext.spn_hashgrid_order(locs, lower_bounds, grid_dims,
self.cellIDs, idxs, self.radius)
_ext.spn_hashgrid_order(locs, lower_bounds, grid_dims, cellIDs, idxs, radius)

return idxs

@staticmethod
def backward(self, grad_idxs):
locs, lower_bounds, grid_dims = self.saved_tensors
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like the saved tensors from forward are grabbed here for the backward pass. Without self I'm not sure where those would come from. Did pytorch change how this works?

return (
Expand All @@ -255,25 +257,28 @@ def __init__(self, radius, max_collisions, cellIDs, cellStarts, cellEnds,
self.cellEnds = cellEnds
self.include_self = include_self

def forward(self, qlocs, locs, lower_bounds, grid_dims):
self.save_for_backward(qlocs, locs, lower_bounds, grid_dims)
@staticmethod
def forward(self, qlocs, locs, lower_bounds, grid_dims, radius, max_collisions, cellIDs, cellStarts, cellEnds, include_self):
self.save_for_backward(qlocs, locs, lower_bounds, grid_dims, radius, max_collisions, cellIDs, cellStarts,
cellEnds, include_self)
batch_size = locs.size()[0]
M = qlocs.size()[1]
neighbors = locs.new(batch_size, M, self.max_collisions)
neighbors = locs.new(batch_size, M, max_collisions)
neighbors.fill_(-1)
self.cellStarts.fill_(0)
self.cellEnds.fill_(0)
cellStarts.fill_(0)
cellEnds.fill_(0)
if locs.is_cuda:
if not _extc.spnc_compute_collisions(qlocs, locs, lower_bounds, grid_dims, self.cellIDs,
self.cellStarts, self.cellEnds, neighbors, self.radius, self.radius,
self.include_self):
if not _extc.spnc_compute_collisions(qlocs, locs, lower_bounds, grid_dims, cellIDs,
cellStarts, cellEnds, neighbors, radius, radius,
include_self):
raise Exception("Cuda error")
else:
_ext.spn_compute_collisions(qlocs, locs, lower_bounds, grid_dims, self.cellIDs,
self.cellStarts, self.cellEnds, neighbors, self.radius, self.radius, self.include_self)

return neighbors


@staticmethod
def backward(self, grad_neighbors):
qlocs, locs, lower_bounds, grid_dims = self.saved_tensors
return (
Expand All @@ -288,27 +293,28 @@ class _ReorderDataFunction(torch.autograd.Function):
def __init__(self, reverse):
super(_ReorderDataFunction, self).__init__()
self.reverse = reverse

def forward(self, idxs, locs, data):
self.save_for_backward(idxs,)

@staticmethod
def forward(self, idxs, locs, data, reverse):
self.save_for_backward(idxs,reverse)
nlocs = locs.new(*locs.size())
ndata = locs.new(*data.size())
if locs.is_cuda:
if not _extc.spnc_reorder_data(locs, data, idxs, nlocs, ndata, self.reverse):
if not _extc.spnc_reorder_data(locs, data, idxs, nlocs, ndata, reverse):
raise Exception("Cuda error")
else:
_ext.spn_reorder_data(locs, data, idxs, nlocs, ndata, self.reverse)
_ext.spn_reorder_data(locs, data, idxs, nlocs, ndata, reverse)
return nlocs, ndata

@staticmethod
def backward(self, grad_locs, grad_data):
idxs, = self.saved_tensors
idxs,reverse = self.saved_tensors
nlocs = grad_locs.new(*grad_locs.size())
ndata = grad_data.new(*grad_data.size())
if grad_locs.is_cuda:
if not _extc.spnc_reorder_data(grad_locs, grad_data, idxs, nlocs,
ndata, 1 - self.reverse):
ndata, 1 - reverse):
raise Exception("Cuda error")
else:
_ext.spn_reorder_data(grad_locs, grad_data, idxs, nlocs, ndata,
1 - self.reverse)
1 - reverse)
return idxs.new(idxs.size()).fill_(0), nlocs, ndata
7 changes: 4 additions & 3 deletions python/SmoothParticleNets/ParticleProjection.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,14 +163,15 @@ def forward(self, locs, camera_pose, camera_rot, depth_mask=None):


class _ParticleProjectionFunction(torch.autograd.Function):

def __init__(self, camera_fl, camera_size, filter_std, filter_scale):
super(_ParticleProjectionFunction, self).__init__()
self.camera_fl = camera_fl
self.camera_size = camera_size
self.filter_std = filter_std
self.filter_scale = filter_scale


# @staticmethod
def forward(self, locs, depth_mask):
self.save_for_backward(locs, depth_mask)
batch_size = locs.size()[0]
Expand All @@ -185,7 +186,7 @@ def forward(self, locs, depth_mask):
self.filter_std, self.filter_scale, depth_mask, ret)

return ret

# @staticmethod
def backward(self, grad_output):
locs, depth_mask = self.saved_tensors
ret_locs = grad_output.new(locs.size())
Expand Down
1 change: 1 addition & 0 deletions python/SmoothParticleNets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from os.path import dirname, basename, isfile
import glob
import sys
import torch
sys.path.append(dirname(__file__))
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = [basename(f)[:-3] for f in modules if isfile(f)]
Expand Down
Loading