Skip to content

Commit 12b6fa4

Browse files
committed
Add LRU cache to trig_tables function for improved performance
1 parent de14f6c commit 12b6fa4

1 file changed

Lines changed: 2 additions & 0 deletions

File tree

diffct/differentiable.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import numpy as np
33
import torch
44
from numba import cuda
5+
from functools import lru_cache
56

67
# ---------------------------------------------------------------------------
78
# Global settings & helpers
@@ -110,6 +111,7 @@ def tensor_to_cuda_array(tensor):
110111

111112

112113
# === GPU-aware Trigonometric Table Generation ===
114+
@lru_cache(maxsize=2048)
113115
def _trig_tables(angles, dtype=_DTYPE, device=None):
114116
"""Compute cosine and sine tables for input angles.
115117

0 commit comments

Comments
 (0)