diff --git a/aimnet2calc/calculator.py b/aimnet2calc/calculator.py index 011a4b2..9f9dcfc 100644 --- a/aimnet2calc/calculator.py +++ b/aimnet2calc/calculator.py @@ -1,8 +1,9 @@ import torch from torch import nn, Tensor from typing import Union, Dict, Any -from aimnet2calc.nblist import nblist_torch_cluster, nblists_torch_pbc +from aimnet2calc.nblist import calc_nbmat_dual, calc_nbmat_pbc from aimnet2calc.models import get_model_path +import warnings class AIMNet2Calculator: @@ -42,6 +43,7 @@ def __init__(self, model: Union[str, torch.nn.Module] = 'aimnet2'): self.cutoff = self.model.cutoff self.lr = hasattr(self.model, 'cutoff_lr') self.cutoff_lr = getattr(self.model, 'cutoff_lr', float('inf')) + self.max_density = 0.2 # indicator if input was flattened self._batch = None @@ -54,7 +56,7 @@ def __init__(self, model: Union[str, torch.nn.Module] = 'aimnet2'): self._coulomb_method = coul_methods.pop() else: self._coulomb_method = None - + def __call__(self, *args, **kwargs): return self.eval(*args, **kwargs) @@ -93,7 +95,7 @@ def prepare_input(self, data: Dict[str, Any]) -> Dict[str, Tensor]: if data['mol_idx'][-1] > 0: raise NotImplementedError('PBC with multiple molecules is not implemented yet.') if self._coulomb_method == 'simple': - print('Switching to DSF Coulomb for PBC') + warnings.warn('Switching to DSF Coulomb for PBC') self.set_lrcoulomb_method('dsf') data = self.make_nbmat(data) data = self.pad_input(data) @@ -133,7 +135,7 @@ def mol_flatten(self, data: Dict[str, Tensor]) -> Dict[str, Tensor]: else: self._batch = None if 'mol_idx' not in data: - data['mol_idx'] = torch.zeros(data['coord'].shape[0], device=self.device) + data['mol_idx'] = torch.zeros(data['coord'].shape[0], dtype=torch.long, device=self.device) return data def mol_unflatten(self, data: Dict[str, Tensor], batch=None) -> Dict[str, Tensor]: @@ -147,22 +149,54 @@ def mol_unflatten(self, data: Dict[str, Tensor], batch=None) -> Dict[str, Tensor def make_nbmat(self, data: Dict[str, Tensor]) -> Dict[str, Tensor]: if 'cell' in data and data['cell'] is not None: assert data['cell'].ndim == 2, 'Expected 2D tensor for cell' - if 'nbmat' not in data: - data['coord'] = move_coord_to_cell(data['coord'], data['cell']) - mat_idxj, mat_pad, mat_S = nblists_torch_pbc(data['coord'], data['cell'], self.cutoff) - data['nbmat'], data['nb_pad_mask'], data['shifts'] = mat_idxj, mat_pad, mat_S - if self.lr: - if 'nbmat_lr' not in data: - assert self.cutoff_lr < torch.inf, 'Long-range cutoff must be finite for PBC' - data['nbmat_lr'], data['nb_pad_mask_lr'], data['shifts_lr'] = nblists_torch_pbc(data['coord'], data['cell'], self.cutoff_lr) - data['cutoff_lr'] = torch.tensor(self.cutoff_lr, device=self.device) - else: - if 'nbmat' not in data: - data['nbmat'] = nblist_torch_cluster(data['coord'], self.cutoff, data['mol_idx'], max_nb=128) - if self.lr: - if 'nbmat_lr' not in data: - data['nbmat_lr'] = nblist_torch_cluster(data['coord'], self.cutoff_lr, data['mol_idx'], max_nb=1024) + data['coord'] = move_coord_to_cell(data['coord'], data['cell']) + while True: + try: + maxnb = int(self.max_density * 4/3 * 3.14159 * self.cutoff ** 3) + mat_idxj, mat_S = calc_nbmat_pbc(data['coord'], data['cell'], self.cutoff, maxnb) + mat_pad = mat_idxj == data['coord'].shape[0] + data['nbmat'], data['nb_pad_mask'], data['shifts'] = mat_idxj, mat_pad, mat_S + break + except ValueError: + self.max_density *= 1.5 + if self.lr: + assert self.cutoff_lr < torch.inf, 'Long-range cutoff must be finite for PBC' + while True: + try: + maxnb = int(self.max_density * 4/3 * 3.14159 * self.cutoff_lr ** 3) + mat_idxj, mat_S = calc_nbmat_pbc(data['coord'], data['cell'], self.cutoff_lr, maxnb) + mat_pad = mat_idxj == data['coord'].shape[0] + data['nbmat_lr'], data['nb_pad_mask_lr'], data['shifts_lr'] = mat_idxj, mat_pad, mat_S data['cutoff_lr'] = torch.tensor(self.cutoff_lr, device=self.device) + break + except ValueError: + self.max_density *= 1.5 + else: + while True: + try: + cutoff1 = self.cutoff + maxnb1 = int(self.max_density * 4/3 * 3.14159 * cutoff1 ** 3) + if self.lr: + cutoff2 = self.cutoff_lr + if cutoff2 == float('inf'): + maxnb2 = data['coord'].shape[0] - 1 + else: + maxnb2 = int(self.max_density * 4/3 * 3.14159 * cutoff2 ** 3) + else: + cutoff2 = None + maxnb2 = None + maxnb1 = min(maxnb1, data['coord'].shape[0] - 1) + maxnb2 = min(maxnb2, data['coord'].shape[0] - 1) + nbmat1, nbmat2 = calc_nbmat_dual(data['coord'].contiguous(), (cutoff1, cutoff2), (maxnb1, maxnb2), data.get('mol_idx').contiguous()) + data['nbmat'] = nbmat1 + if self.lr: + data['nbmat_lr'] = nbmat2 + data['cutoff_lr'] = torch.tensor(cutoff2, device=self.device) + break + + except ValueError: + self.max_density *= 1.5 + return data def pad_input(self, data: Dict[str, Tensor]) -> Dict[str, Tensor]: diff --git a/aimnet2calc/nblist.py b/aimnet2calc/nblist.py index 623ff6e..9d96c99 100644 --- a/aimnet2calc/nblist.py +++ b/aimnet2calc/nblist.py @@ -1,155 +1,337 @@ +from typing import Optional, Tuple import torch from torch import Tensor -from typing import Optional, Tuple -from torch_cluster import radius_graph import numba -try: - # optionaly use numba cuda +import warnings + +if torch.cuda.is_available(): import numba.cuda + assert numba.cuda.is_available(), "PyTorch CUDA is available, but Numba CUDA is not available." _numba_cuda_available = True -except ImportError: +else: _numba_cuda_available = False -import numpy as np -@numba.njit(cache=True) -def sparse_nb_to_dense_half(idx, natom, max_nb): - dense_nb = np.full((natom+1, max_nb), natom, dtype=np.int32) - last_idx = np.zeros((natom,), dtype=np.int32) - for k in range(idx.shape[0]): - i, j = idx[k] - il, jl = last_idx[i], last_idx[j] - dense_nb[i, il] = j - dense_nb[j, jl] = i - last_idx[i] += 1 - last_idx[j] += 1 - return dense_nb +@numba.njit(cache=True, parallel=True) +def _nbmat_cpu(coord, cutoff_squared, maxnb, mol_idx, mol_end_idx, nbmat, nnb): + # number of atoms + N = coord.shape[0] + # parallel loop over atoms + for i in numba.prange(N): + # coordinates of atom i + c_i = coord[i] + # mol index of atom i + _mol_idx = mol_idx[i] + # get indices of other atoms within same mol with j>i + _j_start = i + 1 + _j_end = mol_end_idx[_mol_idx] + # loop over other atoms in mol + for j in range(_j_start, _j_end): + c_j = coord[j] + diff = c_i - c_j + dist2 = (diff * diff).sum(-1) + if dist2 < cutoff_squared: + pos = nnb[i] + nnb[i] += 1 + if pos < maxnb: + nbmat[i, pos] = j + # add pairs with j sparse_nb[1]] - dense_nb = sparse_nb_to_dense_half(sparse_nb_half.mT.cpu().numpy(), coord.shape[0], max_num_neighbors) - dense_nb = torch.as_tensor(dense_nb, device=device) - return dense_nb - - -### dense neighbor matrix kernels +@numba.njit(cache=True, parallel=True) +def _nbmat_dual_cpu(coord, cutoff1_squared, cutoff2_squared, maxnb1, maxnb2, mol_idx, mol_end_idx, nbmat1, nbmat2, nnb1, nnb2): + # dual cutoff version of _nbmat_cpu + N = coord.shape[0] + for i in numba.prange(N): + c_i = coord[i] + _mol_idx = mol_idx[i] + _j_start = i + 1 + _j_end = mol_end_idx[_mol_idx] + for j in range(_j_start, _j_end): + c_j = coord[j] + diff = c_i - c_j + dist2 = (diff * diff).sum(-1) + if dist2 < cutoff1_squared: + pos = nnb1[i] + nnb1[i] += 1 + if pos < maxnb1: + nbmat1[i, pos] = j + if dist2 < cutoff2_squared: + pos = nnb2[i] + nnb2[i] += 1 + if pos < maxnb2: + nbmat2[i, pos] = j + nnb1_half = nnb1.copy() + nnb2_half = nnb2.copy() + for i in range(N): + for m in range(nnb1_half[i]): + j = nbmat1[i, m] + pos = nnb1[j] + nnb1[j] += 1 + if pos < maxnb1: + nbmat1[j, pos] = i + for m in range(nnb2_half[i]): + j = nbmat2[i, m] + pos = nnb2[j] + nnb2[j] += 1 + if pos < maxnb2: + nbmat2[j, pos] = i + + +def _nbmat_cuda(coord, cutoff_squared, maxnb, mol_idx, mol_end_idx, nbmat, nnb): + N = coord.shape[0] + i = numba.cuda.grid(1) + + if (i >= N): + return + + c0 = coord[i, 0] + c1 = coord[i, 1] + c2 = coord[i, 2] + + _mol_idx = mol_idx[i] + _j_start = i + 1 + _j_end = mol_end_idx[_mol_idx] + + for j in range(_j_start, _j_end): + d0 = c0 - coord[j, 0] + d1 = c1 - coord[j, 1] + d2 = c2 - coord[j, 2] + dist_squared = d0 * d0 + d1 * d1 + d2 * d2 + if dist_squared > cutoff_squared: + continue + + pos = numba.cuda.atomic.add(nnb, i, 1) + if pos < maxnb: + nbmat[i, pos] = j + pos = numba.cuda.atomic.add(nnb, j, 1) + if pos < maxnb: + nbmat[j, pos] = i + + +def _nbmat_dual_cuda(coord, cutoff1_squared, cutoff2_squared, maxnb1, maxnb2, mol_idx, mol_end_idx, nbmat1, nbmat2, nnb1, nnb2): + N = coord.shape[0] + i = numba.cuda.grid(1) + + if (i >= N): + return + + c0 = coord[i, 0] + c1 = coord[i, 1] + c2 = coord[i, 2] + + _mol_idx = mol_idx[i] + _j_start = i + 1 + _j_end = mol_end_idx[_mol_idx] + + for j in range(_j_start, _j_end): + d0 = c0 - coord[j, 0] + d1 = c1 - coord[j, 1] + d2 = c2 - coord[j, 2] + dist_squared = d0 * d0 + d1 * d1 + d2 * d2 + if dist_squared < cutoff1_squared: + pos = numba.cuda.atomic.add(nnb1, i, 1) + if pos < maxnb1: + nbmat1[i, pos] = j + pos = numba.cuda.atomic.add(nnb1, j, 1) + if pos < maxnb1: + nbmat1[j, pos] = i + if dist_squared < cutoff2_squared: + pos = numba.cuda.atomic.add(nnb2, i, 1) + if pos < maxnb2: + nbmat2[i, pos] = j + pos = numba.cuda.atomic.add(nnb2, j, 1) + if pos < maxnb2: + nbmat2[j, pos] = i + @numba.njit(cache=True, parallel=True) -def _cpu_dense_nb_mat_sft(conn_matrix): - N, S = conn_matrix.shape[:2] - # figure out max number of neighbors - _s_flat_conn_matrix = conn_matrix.reshape(N, -1) - maxnb = np.max(np.sum(_s_flat_conn_matrix, axis=-1)) - M = maxnb - # atom idx matrix - mat_idxj = np.full((N + 1, M), N, dtype=np.int_) - # padding matrix - mat_pad = np.ones((N + 1, M), dtype=np.bool_) - # shitfs matrix - mat_S_idx = np.zeros((N + 1, M), dtype=np.int_) - for _n in numba.prange(N): - _i = 0 - for _s in range(S): - for _m in range(N): - if conn_matrix[_n, _s, _m] == True: - mat_idxj[_n, _i] = _m - mat_pad[_n, _i] = False - mat_S_idx[_n, _i] = _s - _i += 1 - return mat_idxj, mat_pad, mat_S_idx +def _nbmat_pbc_cpu(coord, coord_shifted, cutoff_squared, maxnb, nbmat, nnb, nbmat_shifts): + N = coord.shape[0] + M = coord_shifted.shape[0] + for i in numba.prange(N): + c_i = coord[i] + for j in range(M): + c_j = coord_shifted[j] + diff = c_i - c_j + dist2 = (diff * diff).sum(-1) + if dist2 > 0.01 and dist2 < cutoff_squared: + pos = nnb[i] + nnb[i] += 1 + if pos < maxnb: + nbmat[i, pos] = j % N + nbmat_shifts[i, pos] = j // N -if _numba_cuda_available: - @numba.cuda.jit(cache=True) - def _cuda_dense_nb_mat_sft(conn_matrix, mat_idxj, mat_pad, mat_S_idx): - i = numba.cuda.grid(1) - if i < conn_matrix.shape[0]: - k = 0 - for s in range(conn_matrix.shape[1]): - for j in range(conn_matrix.shape[2]): - if conn_matrix[i, s, j] > 0: - mat_idxj[i, k] = j - mat_pad[i, k] = 0 - mat_S_idx[i, k] = s - k += 1 - - -def nblists_torch_pbc(coord: Tensor, cell: Tensor, cutoff: float) -> Tuple[Tensor, Tensor, Tensor]: - """ Compute dense neighbor lists for periodic boundary conditions case. - Coordinates must be in cartesian coordinates and be within the unit cell. - Single crystal only, no support for batched coord or multiple unit cells. - """ - assert coord.ndim == 2, 'Expected 2D tensor for coord, got {coord.ndim}D' - # non-PBC version - device = coord.device - reciprocal_cell = cell.inverse().t() - inv_distances = reciprocal_cell.norm(2, -1) - shifts = _calc_shifts(inv_distances, cutoff) - d = torch.cdist(coord.unsqueeze(0), coord.unsqueeze(0) + (shifts @ cell).unsqueeze(1)) - conn_mat = ((d < cutoff) & (d > 0.1)).transpose(0, 1).contiguous() - if device.type == 'cuda' and _numba_cuda_available: - _fn = _nblist_pbc_cuda - else: - _fn = _nblist_pbc_cpu - mat_idxj, mat_pad, mat_S = _fn(conn_mat, shifts) - return mat_idxj, mat_pad, mat_S +def _nbmat_pbc_cuda(coord, coord_shifted, cutoff_squared, maxnb, nbmat, nnb, nbmat_shifts): + N = coord_shifted.shape[0] + M = coord.shape[0] + + i = numba.cuda.grid(1) + if i >= N: + return + + c0 = coord_shifted[i, 0] + c1 = coord_shifted[i, 1] + c2 = coord_shifted[i, 2] + k = i % M + l = i // M -def _calc_shifts(inv_distances, cutoff): - num_repeats = torch.ceil(cutoff * inv_distances).to(torch.long) - dc = [torch.arange(-num_repeats[i], num_repeats[i] + 1, device=inv_distances.device) for i in range(len(num_repeats))] - shifts = torch.cartesian_prod(*dc).to(torch.float) - return shifts + for j in range(M): + d0 = c0 - coord[j, 0] + d1 = c1 - coord[j, 1] + d2 = c2 - coord[j, 2] + dist_squared = d0 * d0 + d1 * d1 + d2 * d2 + if dist_squared > 0.01 and dist_squared < cutoff_squared: + pos = numba.cuda.atomic.add(nnb, j, 1) + if pos < maxnb: + nbmat[j, pos] = k + nbmat_shifts[j, pos] = l -def _nblist_pbc_cuda(conn_mat, shifts): - N = conn_mat.shape[0] - M = conn_mat.view(N, -1).sum(-1).max() +if _numba_cuda_available: + numba_cuda_jit_kwargs = {'fastmath': True, 'cache': True} + _nbmat_cuda = numba.cuda.jit(_nbmat_cuda, **numba_cuda_jit_kwargs) + _nbmat_dual_cuda = numba.cuda.jit(_nbmat_dual_cuda, **numba_cuda_jit_kwargs) + _nbmat_pbc_cuda = numba.cuda.jit(_nbmat_pbc_cuda, **numba_cuda_jit_kwargs) + + +def calc_nbmat_dual(coord: Tensor, + cutoffs: Tuple[float, Optional[float]], + maxnb: Tuple[int, Optional[int]], + mol_idx: Optional[Tensor] = None, + ): + device = coord.device + N = coord.shape[0] + threadsperblock = 32 blockspergrid = (N + (threadsperblock - 1)) // threadsperblock - idx_j = torch.full((N + 1, M), N, dtype=torch.int64, device=conn_mat.device) - mat_pad = torch.ones((N + 1, M), dtype=torch.int8, device=conn_mat.device) - S_idx = torch.zeros((N + 1, M), dtype=torch.int64, device=conn_mat.device) - conn_mat = conn_mat.to(torch.int8) - _conn_mat = numba.cuda.as_cuda_array(conn_mat) - _idx_j = numba.cuda.as_cuda_array(idx_j) - _mat_pad = numba.cuda.as_cuda_array(mat_pad) - _S_idx = numba.cuda.as_cuda_array(S_idx) - _cuda_dense_nb_mat_sft[blockspergrid, threadsperblock](_conn_mat, _idx_j, _mat_pad, _S_idx) - mat_pad = mat_pad.to(torch.bool) - return idx_j, mat_pad, shifts[S_idx] - - -def _nblist_pbc_cpu(conn_mat, shifts, device): - conn_mat = conn_mat.cpu().numpy() - mat_idxj, mat_pad, mat_S_idx = _cpu_dense_nb_mat_sft(conn_mat) - mat_idxj = torch.from_numpy(mat_idxj).to(device) - mat_pad = torch.from_numpy(mat_pad).to(device) - mat_S_idx = torch.from_numpy(mat_S_idx).to(device) - mat_S = shifts[mat_S_idx] - return mat_idxj, mat_pad, mat_S - + cutoff_sr, cutoff_lr = cutoffs + maxnb_sr, maxnb_lr = maxnb + + _, mol_size = torch.unique(mol_idx, return_counts=True) + mol_end_idx = torch.cumsum(mol_size, 0) + if cutoff_lr is None: + nnb = torch.zeros(N, dtype=torch.long, device=device) + nbmat = torch.full((N+1, maxnb_sr), N, dtype=torch.long, device=device) + cutoff_squared = cutoff_sr * cutoff_sr + if device.type == 'cuda': + fn = _nbmat_cuda[blockspergrid, threadsperblock] + _coord = numba.cuda.as_cuda_array(coord) + _nbmat1 = numba.cuda.as_cuda_array(nbmat) + _nnb1 = numba.cuda.as_cuda_array(nnb) + _mol_idx = numba.cuda.as_cuda_array(mol_idx) + _mol_end_idx = numba.cuda.as_cuda_array(mol_end_idx) + else: + fn = _nbmat_cpu + _coord = coord.numpy() + _nbmat1 = nbmat.numpy() + _nnb1 = nnb.numpy() + _mol_idx = mol_idx.numpy() + _mol_end_idx = mol_end_idx.numpy() + fn(_coord, cutoff_squared, maxnb_sr, _mol_idx, _mol_end_idx, _nbmat1, _nnb1) + nnb1_max = nnb.max() + if nnb1_max > maxnb_sr: + raise ValueError(f"Max number of neighbors exceeded, increase maxnb_sr.") + nbmat1 = torch.as_tensor(nbmat1[:, :nnb1_max], device=device) + nbmat2 = None + + else: + nnb1 = torch.zeros(N, dtype=torch.long, device=device) + nnb2 = torch.zeros(N, dtype=torch.long, device=device) + nbmat1 = torch.full((N+1, maxnb_sr), N, dtype=torch.long, device=device) + nbmat2 = torch.full((N+1, maxnb_lr), N, dtype=torch.long, device=device) + cutoff1_squared = cutoff_sr * cutoff_sr + cutoff2_squared = cutoff_lr * cutoff_lr + if device.type == 'cuda' and _numba_cuda_available: + fn = _nbmat_dual_cuda[blockspergrid, threadsperblock] + _coord = numba.cuda.as_cuda_array(coord) + _nbmat1 = numba.cuda.as_cuda_array(nbmat1) + _nbmat2 = numba.cuda.as_cuda_array(nbmat2) + _nnb1 = numba.cuda.as_cuda_array(nnb1) + _nnb2 = numba.cuda.as_cuda_array(nnb2) + _mol_idx = numba.cuda.as_cuda_array(mol_idx) + _mol_end_idx = numba.cuda.as_cuda_array(mol_end_idx) + else: + fn = _nbmat_dual_cpu + _coord = coord.numpy() + _nbmat1 = nbmat1.numpy() + _nbmat2 = nbmat2.numpy() + _nnb1 = nnb1.numpy() + _nnb2 = nnb2.numpy() + _mol_idx = mol_idx.numpy() + _mol_end_idx = mol_end_idx.numpy() + fn(_coord, cutoff1_squared, cutoff2_squared, maxnb_sr, maxnb_lr, _mol_idx, _mol_end_idx, _nbmat1, _nbmat2, _nnb1, _nnb2) + nnb1_max = nnb1.max() + nnb2_max = nnb2.max() + if nnb1_max > maxnb_sr: + raise ValueError(f"Max number of neighbors exceeded, increase maxnb_sr.") + if nnb2_max > maxnb_lr: + raise ValueError(f"Max number of neighbors exceeded, increase maxnb_lr.") + nbmat1 = nbmat1[:, :nnb1_max] + nbmat2 = nbmat2[:, :nnb2_max] + return nbmat1, nbmat2 + +def calc_nbmat_pbc(coord: Tensor, + cell: Tensor, + cutoff: float, + maxnb: int + ): + device = coord.device + inv_distances = cell.detach().inverse().cpu().norm(2, -1) + nshifts = torch.ceil(cutoff * inv_distances).to(torch.long) + dc = [torch.arange(-nshifts[i], nshifts[i] + 1) for i in range(len(nshifts))] + shifts = torch.cartesian_prod(*dc).to(torch.float).to(device) + coord_shifted = coord.unsqueeze(0) + (shifts @ cell).unsqueeze(1) + ncells = shifts.shape[0] + shifts = torch.nn.functional.pad(shifts, [0, 0, 0, 1], mode='constant', value=0.0) + coord_shifted = coord_shifted.view(-1, 3).contiguous() + N = coord_shifted.shape[0] + threadsperblock = 32 + blockspergrid = (N + (threadsperblock - 1)) // threadsperblock + M = coord.shape[0] + nnb = torch.zeros(M, dtype=torch.long, device=device) + nbmat = torch.full((M+1, maxnb), M, dtype=torch.long, device=device) + nbmat_shifts = torch.full((M+1, maxnb), ncells, dtype=torch.long, device=device) + cutoff_squared = cutoff * cutoff + if device.type == 'cuda' and _numba_cuda_available: + fn = _nbmat_pbc_cuda[blockspergrid, threadsperblock] + _coord = numba.cuda.as_cuda_array(coord) + _coord_shifted = numba.cuda.as_cuda_array(coord_shifted) + _nbmat = numba.cuda.as_cuda_array(nbmat) + _nnb = numba.cuda.as_cuda_array(nnb) + _nbmat_shifts = numba.cuda.as_cuda_array(nbmat_shifts) + else: + fn = _nbmat_pbc_cpu + _coord = coord.numpy() + _coord_shifted = coord_shifted.numpy() + _nbmat = nbmat.numpy() + _nnb = nnb.numpy() + _nbmat_shifts = nbmat_shifts.numpy() + fn(_coord, _coord_shifted, cutoff_squared, maxnb, _nbmat, _nnb, _nbmat_shifts) + nnb_max = nnb.max() + if nnb_max > maxnb: + raise ValueError(f"Max number of neighbors exceeded, increase maxnb.") + nbmat = nbmat[:, :nnb_max] + nbmat_shifts = nbmat_shifts[:, :nnb_max] + shifts = shifts[nbmat_shifts] + return nbmat, shifts diff --git a/setup.py b/setup.py index f311e43..031b33e 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ packages=find_packages(), install_requires=[ 'torch>2.0,<3', - 'torch-cluster', + # 'torch-cluster', 'numpy', 'numba', 'ase',