@@ -47,7 +47,7 @@ def getWarehouses(mcdId,mcdToken):
4747def get_dataset_query (dwId ,first : Optional [int ] = 1000 , after : Optional [str ] = None ) -> Query :
4848 query = Query ()
4949 get_datasets = query .get_datasets (first = first , dw_id = dwId , ** (dict (after = after ) if after else {}))
50- get_datasets .edges .node .__fields__ ("dataset" ,"uuid" ,"is_muted" )
50+ get_datasets .edges .node .__fields__ ("dataset" ,"project" , " uuid" ,"is_muted" )
5151 get_datasets .page_info .__fields__ (end_cursor = True )
5252 get_datasets .page_info .__fields__ ("has_next_page" )
5353 return query
@@ -59,8 +59,8 @@ def getDatasetUuidDict(mcdId,mcdToken,dwId):
5959 while True :
6060 response = client (get_dataset_query (dwId = dwId ,after = next_token )).get_datasets
6161 for dataset in response .edges :
62- if dataset .node .is_muted :
63- dataset_uuid_dict [dataset .node .dataset .lower ()] = dataset .node .uuid
62+ if dataset .node .is_muted == False :
63+ dataset_uuid_dict [dataset .node .uuid ] = f" { dataset .node . project . lower ()} : { dataset .node .dataset . lower () } "
6464 if response .page_info .has_next_page :
6565 next_token = response .page_info .end_cursor
6666 else :
@@ -80,7 +80,7 @@ def userReview(uuid_dict, dw_id):
8080 with open (fname , 'w' ) as csvfile :
8181 writer = csv .writer (csvfile )
8282 writer .writerow (header )
83- for dataset , uuid in uuid_dict .items ():
83+ for uuid , dataset in uuid_dict .items ():
8484 writer .writerow ([dataset , uuid ])
8585 userReview = input (f'Datasets to unmute written to file { fname } for your review. OK to proceed? (y/n) ' ).lower ()
8686
@@ -115,7 +115,7 @@ def unmute_datasets(mcdId,dwId,mcdToken,uuidDict):
115115
116116 unmuted_dataset_counter = 0
117117 incremental_datasets = 0
118- for uuid in uuidDict .values ():
118+ for uuid in uuidDict .keys ():
119119 temp_obj = dict (dsId = uuid , dwId = dwId )
120120 print (temp_obj )
121121 uuid_list .append (temp_obj )
0 commit comments