-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbenchmark.py
More file actions
333 lines (261 loc) · 12.2 KB
/
benchmark.py
File metadata and controls
333 lines (261 loc) · 12.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
# imports
import numpy as np
import pandas as pd
import os
import pdb
import constants
from collections import defaultdict
import seaborn as sns
import pickle
import matplotlib.pyplot as plt
from Trial_Setup_Utils import maybe_mkdir
import argparse
# generate
def load_merge_dfs(dataset_name: str, trial_nums: list[int]):
main_df = pd.DataFrame()
# accumulate dfs
for trial_num in trial_nums:
path = os.path.join(constants.RESULTS_FOLDER,
"analysis", f"trial{trial_num}")
df = pd.read_pickle(os.path.join(path, f"{dataset_name}.pkl"))
# add parameter column
df["parameter"] = df["model_name"].apply(
lambda string: int(string.split("_")[-1]))
# main_df = pd.concat([main_df, df], ignore_index=False)
main_df = main_df._append(df, ignore_index=True)
# get rid of parameter duplicates
main_df = main_df.drop_duplicates(subset=['parameter'], keep="first")
# sort values
return main_df
def generate_benchmark_dataframe(paths: dict):
bds = None
for dataset_name in paths:
# import pdb
# pdb.set_trace()
df = load_merge_dfs(dataset_name, paths[dataset_name])
# pdb.set_trace()
# path = os.path.join(constants.RESULTS_FOLDER, "analysis", f"trial{paths[dataset_name][0]}")
# # load data
# # pdb.set_trace()
# df = pd.read_pickle(os.path.join(path, f"{dataset_name}.pkl"))
# pandas stuff, assign parameter
# df["parameter"] = df["model_name"].apply(lambda string: int(string.split("_")[-1]))
# assign means for capacity and expressivity
for metric_type in ["algorithmic_capacity", "entropic_expressivity"]:
metric_mean = metric_type + "_mean" # column names
df[metric_mean] = df[metric_type].apply(lambda x: np.mean(x))
# assign means for bias for each target set size
algorithmic_bias_types = [
title for title in df.columns if "algorithmic_bias" in title]
for metric_type in algorithmic_bias_types:
metric_mean = metric_type + "_mean" # column names
df[metric_mean] = df[metric_type].apply(lambda x: np.mean(x))
# make column for final value stored in bds
final_value_column = []
for i, _ in df.iterrows():
# print()
# expressivity and capacity
values = {"algorithmic_capacity_mean": df["algorithmic_capacity_mean"]
[i], "entropic_expressivity_mean": df["entropic_expressivity_mean"][i]}
# bias values (for each target set)
# algorithmic_bias_types = [title for title in df.columns if "algorithmic_bias" in title]
for metric_type in algorithmic_bias_types:
metric_mean = metric_type + "_mean" # column names
values[metric_mean] = df[metric_mean][i]
final_value_column.append(values)
df[f"&{dataset_name}"] = final_value_column
temp_df = df[[f"&{dataset_name}", "parameter"]]
temp_df = temp_df.set_index("parameter")
if bds is not None:
bds = pd.merge(bds, temp_df, left_index=True, right_index=True)
else:
bds = temp_df
current_columns = bds.columns
# get rid of extra "&" in columns
bds = bds.rename(
columns={dataset_name: dataset_name[1:] for dataset_name in current_columns})
# sort by parameter
return bds
def split_dataframe_dictionaries(df: pd.DataFrame) -> dict:
# This will store our resulting DataFrames
result_dfs = {}
# Iterate over the DataFrame's items
for column in df:
for index, value in df[column].items():
# Proceed only if the value is a dictionary
if isinstance(value, dict):
# For each key in the dictionary, we create or update a DataFrame
for key, val in value.items():
metric_name = key.split("_")[:-1]
metric_name = "_".join(metric_name)
# If the DataFrame for this key does not exist, create it
if metric_name not in result_dfs:
result_dfs[metric_name] = pd.DataFrame(
index=df.index, columns=df.columns)
result_dfs[metric_name].at[index, column] = val
# loop over dfs and sort by parameter value
for key in result_dfs.keys():
df = result_dfs[key]
result_dfs[key] = df.sort_index(ascending=False)
return result_dfs
# # Convert result_dfs values from dict to list to maintain order
# result_dfs_list = list(result_dfs.values())
# # Replace None with np.nan
# for result_df in result_dfs_list:
# result_df.fillna(value=np.nan, inplace=True)
# return result_dfs_list
class BDS_Vector:
def __init__(self):
self.algorithmic_capacity = []
self.entropic_expresstivity = []
self.algorithmic_bias = []
self.dataset_keys = {}
def bds_dataframe(bds):
# change it to a dictionary by the index
bds_index = bds.to_dict('index')
bds_matrix = {}
metrics = bds.iat[0, 0].keys()
# for each index, create three vectors
for index in bds_index.keys():
bds_row = bds_index[index]
bds_vector = {}
for metric in metrics:
bds_vector[metric] = [bds_row[dataset][metric]
for dataset in bds_row.keys()]
bds_matrix[index] = bds_vector
return bds_matrix
# import model_name and metrics
def visualize_bds(bds_dfs: dict[str, pd.DataFrame], saving_directory: str, metric: str, model_name: str = None):
# if metric is provided create a 2d vector for that metric based on matrix in bds_dfs[metric]
# import pdb
# pdb.set_trace()
df = bds_dfs[metric].astype(float) # cast to floats
# , vmin=None, vmax=None, cmap=None, center=None, robust=False,
ax = sns.heatmap(df, annot=True, annot_kws={"size": 7})
# annot=True, fmt='.2g', annot_kws=None, linewidths=0, linecolor='white',
# cbar=True, cbar_kws=None, cbar_ax=None, square=F
# alse, xticklabels='auto',
# yticklabels='auto', mask=None, ax=None)
# plt.show()
plt.xticks(rotation=90, fontsize=5)
plt.yticks(fontsize=7)
plt.tight_layout() # readjusts the plot layout, so that everything fits
plt.savefig(f"{saving_directory}/{model_name}_{metric}.png", dpi=400)
plt.close()
# ax.savefig(f"{model_name}/{metric}.png", dpi=400)
return
# benchmark utils
def load_bds_dfs_from_pickle(saving_path: str) -> dict:
with open(saving_path, "rb") as input_file:
bds_dfs = pickle.load(input_file)
return bds_dfs
def analyze_bds(bds: pd.DataFrame, aggregate_fn, sort: bool) -> pd.Series:
"""
Takes a bds which looks like a matrix and creates a vector, each value is the aggregate
result of a row in the DataFrame, across various datasets
Inputs:
bds: a Pandas Dataframe. The Columns are names of datasets and rows are the paramter values
aggregate_fn: This is an higher order functin (such as max)
sort: sorts in ascending order, default is no sorting
Outputs:
aggregate_vals: a Pandas Series containing a value corresponding to each parameter of a model
"""
aggregate_vals = bds.apply(aggregate_fn, axis=1)
if sort:
aggregate_vals = aggregate_vals.sort_values() # key = lambda x: -1*x if reverse
return aggregate_vals
def rank_models_by_dataset(bds_folders, aggregate_fn, saving_path, aggregate_name):
# loads multiple datasets and then calls analyze_bds on all of them
# and then performs the sorting
# an empty series (should be a dictionary, key will be metric type, value will be series)
series_dict = defaultdict(pd.Series) # str: pd.Series
# loop through bds_folders
# bds_folders = '/data/big/erchen/inductive_orientation/heatmaps'
for model_name in os.listdir(bds_folders):
bds_dfs_saving_path = os.path.join(
bds_folders, model_name, f'{model_name}_BDF_dfs.pkl')
# calls load_bds_dfs_from_pickle
bds_dfs = load_bds_dfs_from_pickle(bds_dfs_saving_path)
# loop through the dfs aka different metrics
for metric in bds_dfs.keys():
bds = bds_dfs[metric]
# calls analyze_bds
aggregate_vals = analyze_bds(bds, aggregate_fn, sort=False)
# appends the model name to the series indexes
new_index = [
f'{model_name}-{parameter}' for parameter in aggregate_vals.index]
aggregate_vals.index = new_index
# appends the above series to the empty series corresponding to the right metric type
if metric in series_dict.keys():
series_dict[metric] = series_dict[metric]._append(
aggregate_vals)
else:
series_dict[metric] = aggregate_vals
# loop through the metrics aka dictionary keys
for metric in series_dict.keys():
current_series = series_dict[metric]
# order the current series
current_series = current_series.sort_values()
# prints out as a csv that we can read
current_series.to_csv(os.path.join(
saving_path, f'{metric}_{aggregate_name}.csv'), index=True)
def rank_models(bds_folders, aggregate_fn, saving_path, aggregate_name):
# an empty series (should be a dictionary, key will be metric type, value will be series)
series_dict = defaultdict(pd.Series) # str: pd.Series
# loop through bds_folders
# bds_folders = '/data/big/erchen/inductive_orientation/heatmaps'
for model_name in os.listdir(bds_folders):
bds_dfs_saving_path = os.path.join(
bds_folders, model_name, f'{model_name}_BDF_dfs.pkl')
# calls load_bds_dfs_from_pickle
bds_dfs = load_bds_dfs_from_pickle(bds_dfs_saving_path)
# loop through the dfs aka different metrics
for metric in bds_dfs.keys():
bds = bds_dfs[metric]
# calls analyze_bds
aggregate_vals = analyze_bds(bds, aggregate_fn, sort=False)
# appends the model name to the series indexes
new_index = [
f'{model_name}-{parameter}' for parameter in aggregate_vals.index]
aggregate_vals.index = new_index
# appends the above series to the empty series corresponding to the right metric type
if metric in series_dict.keys():
series_dict[metric] = series_dict[metric]._append(
aggregate_vals)
else:
series_dict[metric] = aggregate_vals
# loop through the metrics aka dictionary keys
for metric in series_dict.keys():
current_series = series_dict[metric]
# order the current series
current_series = current_series.sort_values()
# prints out as a csv that we can read
current_series.to_csv(os.path.join(
saving_path, f'{metric}_{aggregate_name}.csv'), index=True)
# load the bds
# def load_bds_dfs_from_pickle(saving_location: str) -> dict:
# analyze it
# def load_bds_dfs_from_pickle(saving_location: str) -> dict:
# aggregate across the datasets
# do a ranking
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs Benchmark Dataset Suite for a Model for all three methods")
parser.add_argument('--model_name', type=str,
help='provide both the name of model and parameter to vary, check constants.py', required=True)
# parse inputs
args = parser.parse_args()
MODEL_NAME = args.model_name
paths = constants.MODEL_TO_TRIAL_NUMS[MODEL_NAME] # actually trial number
saving_dir = maybe_mkdir(constants.RESULTS_FOLDER,
f"heatmaps/{MODEL_NAME}")
# generate the BDS
bds_df = generate_benchmark_dataframe(paths)
bds_dfs = split_dataframe_dictionaries(bds_df)
# pickle bds dataframes
with open(f'{saving_dir}/{MODEL_NAME}_BDF_dfs.pkl', 'wb') as handle:
pickle.dump(bds_dfs, handle, protocol=pickle.HIGHEST_PROTOCOL)
# get and download heatmap for each metric
for metric in bds_dfs:
visualize_bds(bds_dfs, saving_dir, metric, MODEL_NAME)
# visualize_bds(bds_dfs, constants.ModelNamesMetrics.KNN_NEIGHBORS.value, constants.MetricNames.ALGORITHMIC_CAPACITY.value, saving_dir)