@@ -161,6 +161,7 @@ def __init__(
161161 self .correlation_id = correlation_id
162162 self ._schema_differ_overrides = schema_differ_overrides
163163 self ._query_execution_tracker = query_execution_tracker
164+ self ._data_object_cache : t .Dict [str , t .Optional [DataObject ]] = {}
164165
165166 def with_settings (self , ** kwargs : t .Any ) -> EngineAdapter :
166167 extra_kwargs = {
@@ -983,6 +984,13 @@ def _create_table(
983984 ),
984985 track_rows_processed = track_rows_processed ,
985986 )
987+ # Extract table name to clear cache
988+ table_name = (
989+ table_name_or_schema .this
990+ if isinstance (table_name_or_schema , exp .Schema )
991+ else table_name_or_schema
992+ )
993+ self ._clear_data_object_cache (table_name )
986994
987995 def _build_create_table_exp (
988996 self ,
@@ -1074,6 +1082,7 @@ def clone_table(
10741082 ** kwargs ,
10751083 )
10761084 )
1085+ self ._clear_data_object_cache (target_table_name )
10771086
10781087 def drop_data_object (self , data_object : DataObject , ignore_if_not_exists : bool = True ) -> None :
10791088 """Drops a data object of arbitrary type.
@@ -1139,6 +1148,7 @@ def _drop_object(
11391148 drop_args ["cascade" ] = cascade
11401149
11411150 self .execute (exp .Drop (this = exp .to_table (name ), kind = kind , exists = exists , ** drop_args ))
1151+ self ._clear_data_object_cache (name )
11421152
11431153 def get_alter_operations (
11441154 self ,
@@ -1329,6 +1339,8 @@ def create_view(
13291339 quote_identifiers = self .QUOTE_IDENTIFIERS_IN_VIEWS ,
13301340 )
13311341
1342+ self ._clear_data_object_cache (view_name )
1343+
13321344 # Register table comment with commands if the engine doesn't support doing it in CREATE
13331345 if (
13341346 table_description
@@ -2278,14 +2290,52 @@ def get_data_objects(
22782290 if object_names is not None :
22792291 if not object_names :
22802292 return []
2281- object_names_list = list (object_names )
2282- batches = [
2283- object_names_list [i : i + self .DATA_OBJECT_FILTER_BATCH_SIZE ]
2284- for i in range (0 , len (object_names_list ), self .DATA_OBJECT_FILTER_BATCH_SIZE )
2285- ]
2286- return [
2287- obj for batch in batches for obj in self ._get_data_objects (schema_name , set (batch ))
2288- ]
2293+
2294+ # Check cache for each object name
2295+ target_schema = to_schema (schema_name )
2296+ cached_objects = []
2297+ missing_names = set ()
2298+
2299+ for name in object_names :
2300+ cache_key = _get_data_object_cache_key (
2301+ target_schema .catalog , target_schema .db , name
2302+ )
2303+ if cache_key in self ._data_object_cache :
2304+ data_object = self ._data_object_cache [cache_key ]
2305+ # If the object is none, then the table was previously looked for but not found
2306+ if data_object :
2307+ cached_objects .append (data_object )
2308+ else :
2309+ missing_names .add (name )
2310+
2311+ # Fetch missing objects from database
2312+ if missing_names :
2313+ object_names_list = list (missing_names )
2314+ batches = [
2315+ object_names_list [i : i + self .DATA_OBJECT_FILTER_BATCH_SIZE ]
2316+ for i in range (0 , len (object_names_list ), self .DATA_OBJECT_FILTER_BATCH_SIZE )
2317+ ]
2318+ fetched_objects = [
2319+ obj
2320+ for batch in batches
2321+ for obj in self ._get_data_objects (schema_name , set (batch ))
2322+ ]
2323+
2324+ # Cache the fetched objects
2325+ for obj in fetched_objects :
2326+ cache_key = _get_data_object_cache_key (obj .catalog , obj .schema_name , obj .name )
2327+ self ._data_object_cache [cache_key ] = obj
2328+
2329+ fetched_object_names = {obj .name for obj in fetched_objects }
2330+ for missing_name in missing_names - fetched_object_names :
2331+ cache_key = _get_data_object_cache_key (
2332+ target_schema .catalog , target_schema .db , missing_name
2333+ )
2334+ self ._data_object_cache [cache_key ] = None
2335+
2336+ return cached_objects + fetched_objects
2337+
2338+ return cached_objects
22892339 return self ._get_data_objects (schema_name )
22902340
22912341 def fetchone (
@@ -2693,6 +2743,15 @@ def _to_sql(self, expression: exp.Expression, quote: bool = True, **kwargs: t.An
26932743
26942744 return expression .sql (** sql_gen_kwargs , copy = False ) # type: ignore
26952745
2746+ def _clear_data_object_cache (self , table_name : t .Optional [TableName ] = None ) -> None :
2747+ """Clears the cache entry for the given table name, or clears the entire cache if table_name is None."""
2748+ if table_name is None :
2749+ self ._data_object_cache .clear ()
2750+ else :
2751+ table = exp .to_table (table_name )
2752+ cache_key = _get_data_object_cache_key (table .catalog , table .db , table .name )
2753+ self ._data_object_cache .pop (cache_key , None )
2754+
26962755 def _get_data_objects (
26972756 self , schema_name : SchemaName , object_names : t .Optional [t .Set [str ]] = None
26982757 ) -> t .List [DataObject ]:
@@ -2940,3 +2999,11 @@ def _decoded_str(value: t.Union[str, bytes]) -> str:
29402999 if isinstance (value , bytes ):
29413000 return value .decode ("utf-8" )
29423001 return value
3002+
3003+
3004+ def _get_data_object_cache_key (catalog : t .Optional [str ], schema_name : str , object_name : str ) -> str :
3005+ """Returns a cache key for a data object based on its fully qualified name."""
3006+ catalog_part = catalog .lower () if catalog else ""
3007+ schema_part = schema_name .lower ()
3008+ object_part = object_name .lower ()
3009+ return f"{ catalog_part } .{ schema_part } .{ object_part } "
0 commit comments