|
| 1 | +#!/usr/bin/env python |
| 2 | +import argparse |
| 3 | +import logging |
| 4 | +import os |
| 5 | +import psutil |
| 6 | + |
| 7 | +import numpy as np |
| 8 | +import pandas as pd |
| 9 | + |
| 10 | +from pyace import BBasisConfiguration, ACEBBasisSet, aseatoms_to_atomicenvironment |
| 11 | +from pyace.activelearning import compute_B_projections, compute_active_set, compute_active_set_by_batches, \ |
| 12 | + compute_A_active_inverse, compute_extrapolation_grade, compute_number_of_functions, \ |
| 13 | + count_number_total_atoms_per_species_type, save_active_inverse_set |
| 14 | +from pyace.preparedata import sizeof_fmt |
| 15 | + |
| 16 | +log = logging.getLogger() |
| 17 | + |
| 18 | +parser = argparse.ArgumentParser(prog="pace_activeset", |
| 19 | + description="Utility to compute active set for PACE (.yaml) potential") |
| 20 | + |
| 21 | +# parser.add_argument("potential_file", help="B-basis file name (.yaml)", type=str, nargs='+', default=[]) |
| 22 | +parser.add_argument("potential_file", help="B-basis file name (.yaml)", type=str) |
| 23 | +parser.add_argument("-d", "--dataset", help="Dataset file name, ex.: filename.pckl.gzip", type=str) |
| 24 | +parser.add_argument("-f", "--full", help="Compute active set on full (linearized) design matrix", |
| 25 | + action='store_true') |
| 26 | +parser.add_argument("-b", "--batch_size", help="Batch size (number of structures) considered simultaneously." |
| 27 | + "If not provided - all dataset at once is considered", |
| 28 | + default="auto", type=str) |
| 29 | +parser.add_argument("-g", "--gamma_tolerance", help="Gamma tolerance", |
| 30 | + default=1.01, type=float) |
| 31 | +parser.add_argument("-i", "--maxvol_iters", help="Number of maximum iteration in MaxVol algorithm", |
| 32 | + default=300, type=int) |
| 33 | + |
| 34 | +parser.add_argument("-r", "--maxvol_refinement", help="Number of refinements (epochs)", |
| 35 | + default=5, type=int) |
| 36 | + |
| 37 | +parser.add_argument("-m", "--memory-limit", help="Memory limit (i.e. 1GB, 500MB or 'auto')", default="auto", type=str) |
| 38 | + |
| 39 | +args_parse = parser.parse_args() |
| 40 | +potential_file = args_parse.potential_file |
| 41 | +dataset_filename = args_parse.dataset |
| 42 | +batch_size = args_parse.batch_size |
| 43 | +gamma_tolerance = args_parse.gamma_tolerance |
| 44 | +maxvol_iters = args_parse.maxvol_iters |
| 45 | +maxvol_refinement = args_parse.maxvol_refinement |
| 46 | +mem_lim = args_parse.memory_limit |
| 47 | +is_full = args_parse.full |
| 48 | +if mem_lim == "auto": |
| 49 | + # determine 80% of available memory |
| 50 | + mem_lim = int(0.8 * psutil.virtual_memory().available) |
| 51 | +else: |
| 52 | + mem_lim = mem_lim.replace("GB", "*2**30").replace("MB", "*2**20") |
| 53 | + mem_lim = eval(mem_lim) |
| 54 | + |
| 55 | +data_path = os.environ.get("PACEMAKERDATAPATH", "") |
| 56 | +if data_path: |
| 57 | + log.info("Data path set to $PACEMAKERDATAPATH = {}".format(data_path)) |
| 58 | + |
| 59 | +if os.path.isfile(dataset_filename): |
| 60 | + dataset_filename = dataset_filename |
| 61 | +elif os.path.isfile(os.path.join(data_path, dataset_filename)): |
| 62 | + dataset_filename = os.path.join(data_path, dataset_filename) |
| 63 | +else: |
| 64 | + raise RuntimeError("File {} not found".format(dataset_filename)) |
| 65 | + |
| 66 | +df = pd.read_pickle(dataset_filename, compression="gzip") |
| 67 | +df.reset_index(drop=True, inplace=True) |
| 68 | +log.info("Number of structures: {}".format(len(df))) |
| 69 | +log.info("Potential file: ".format(potential_file)) |
| 70 | + |
| 71 | +bconf = BBasisConfiguration(potential_file) |
| 72 | +bbasis = ACEBBasisSet(bconf) |
| 73 | +nfuncs = compute_number_of_functions(bbasis) |
| 74 | +if is_full: |
| 75 | + n_projections = [p * bbasis.map_embedding_specifications[st].ndensity for st, p in enumerate(nfuncs)] |
| 76 | +else: # linear |
| 77 | + n_projections = nfuncs |
| 78 | + |
| 79 | +elements_to_index_map = bbasis.elements_to_index_map |
| 80 | +elements_name = bbasis.elements_name |
| 81 | +cutoffmax = bbasis.cutoffmax |
| 82 | + |
| 83 | +ATOMIC_ENV_COLUMN = "atomic_env" |
| 84 | + |
| 85 | +rebuild_atomic_env = False |
| 86 | +if ATOMIC_ENV_COLUMN not in df.columns: |
| 87 | + rebuild_atomic_env = True |
| 88 | +else: |
| 89 | + # check if cutoff is not smaller than requested now |
| 90 | + try: |
| 91 | + metadata_kwargs = df.metadata_dict[ATOMIC_ENV_COLUMN + "_kwargs"] |
| 92 | + metadata_cutoff = metadata_kwargs["cutoff"] |
| 93 | + if metadata_cutoff < cutoffmax: |
| 94 | + log.warning("WARNING! Column {} was constructed with smaller cutoff ({}A) " |
| 95 | + "that necessary now ({}A). " |
| 96 | + "Neighbourlists will be re-built".format(ATOMIC_ENV_COLUMN, metadata_cutoff, |
| 97 | + cutoffmax)) |
| 98 | + rebuild_atomic_env = True |
| 99 | + else: |
| 100 | + log.info("Column '{}': existing cutoff ({}A) >= " |
| 101 | + "requested cutoff ({}A), skipping...".format(ATOMIC_ENV_COLUMN, metadata_cutoff, |
| 102 | + cutoffmax)) |
| 103 | + rebuild_atomic_env = False |
| 104 | + |
| 105 | + except KeyboardInterrupt as e: |
| 106 | + raise e |
| 107 | + except Exception as e: |
| 108 | + log.info("Could not extract cutoff metadata " |
| 109 | + "for column '{}' (error: {}). Please ensure the valid cutoff for " |
| 110 | + "precomputed neighbourlists".format(ATOMIC_ENV_COLUMN, e)) |
| 111 | + rebuild_atomic_env = False |
| 112 | + |
| 113 | +if rebuild_atomic_env: |
| 114 | + log.info("Constructing {} column, cutoffmax={}, elements_to_index_map={}".format(ATOMIC_ENV_COLUMN, cutoffmax, |
| 115 | + elements_to_index_map)) |
| 116 | + df[ATOMIC_ENV_COLUMN] = df["ase_atoms"].apply(aseatoms_to_atomicenvironment, |
| 117 | + cutoff=cutoffmax, elements_mapper_dict=elements_to_index_map) |
| 118 | + |
| 119 | +atomic_env_list = df[ATOMIC_ENV_COLUMN] |
| 120 | +structure_ind_list = df.index |
| 121 | +total_number_of_atoms_per_species_type = count_number_total_atoms_per_species_type(atomic_env_list) |
| 122 | + |
| 123 | +number_of_projection_entries = 0 |
| 124 | +required_active_set_memory = 0 |
| 125 | +for st in total_number_of_atoms_per_species_type.keys(): |
| 126 | + log.info("\tElement: {}, # atoms: {}, # B-func: {}, # projections: {}".format(elements_name[st], |
| 127 | + total_number_of_atoms_per_species_type[ |
| 128 | + st], |
| 129 | + nfuncs[st], n_projections[st] |
| 130 | + )) |
| 131 | + number_of_projection_entries += total_number_of_atoms_per_species_type[st] * n_projections[st] |
| 132 | + required_active_set_memory += n_projections[st] ** 2 |
| 133 | + |
| 134 | +required_projections_memory = number_of_projection_entries * 8 # float64 |
| 135 | +required_active_set_memory *= 8 # in bytes, float64 |
| 136 | +log.info("Required memory to store complete dataset projections: {}".format(sizeof_fmt(required_projections_memory))) |
| 137 | +log.info("Required memory to store active set: {}".format(sizeof_fmt(required_active_set_memory))) |
| 138 | + |
| 139 | +if batch_size == "auto": |
| 140 | + log.info("Automatic batch_size determination") |
| 141 | + log.info("Memory limit: {}".format(sizeof_fmt(mem_lim))) |
| 142 | + if 2 * required_projections_memory + required_active_set_memory < mem_lim: |
| 143 | + batch_size = None |
| 144 | + else: |
| 145 | + nsplits = int(np.ceil(2 * required_projections_memory // (mem_lim - required_active_set_memory))) |
| 146 | + batch_size = int(np.round(len(atomic_env_list) / nsplits)) |
| 147 | +elif batch_size == "None" or batch_size == "none": |
| 148 | + batch_size = None |
| 149 | +else: |
| 150 | + batch_size = int(batch_size) |
| 151 | + |
| 152 | +if is_full: |
| 153 | + active_set_inv_filename = potential_file.replace(".yaml", ".asi.nonlinear") |
| 154 | + log.info("FULL (non-linear) matrix will be used for active set calculation") |
| 155 | +else: |
| 156 | + active_set_inv_filename = potential_file.replace(".yaml", ".asi") |
| 157 | + log.info("LINEAR matrix will be used for active set calculation") |
| 158 | + |
| 159 | +if batch_size is None: |
| 160 | + # single shot MaxVol |
| 161 | + log.info("Single-run (no batch_size is provided)") |
| 162 | + log.info("Compute B-projections") |
| 163 | + A0_proj_dict = compute_B_projections(bbasis, atomic_env_list, is_full=is_full) |
| 164 | + log.info("B-projections computed:") |
| 165 | + for st, A0_proj in A0_proj_dict.items(): |
| 166 | + log.info("\tElement: {}, B-projections shape: {}".format(elements_name[st], A0_proj.shape)) |
| 167 | + |
| 168 | + log.info("Compute active set (using MaxVol algorithm)") |
| 169 | + A_active_set_dict = compute_active_set(A0_proj_dict, tol=gamma_tolerance, max_iters=maxvol_iters, verbose=True) |
| 170 | + log.info("Compute pseudoinversion of active set") |
| 171 | + A_active_inverse_set = compute_A_active_inverse(A_active_set_dict) |
| 172 | + log.info("Done") |
| 173 | + gamma_dict = compute_extrapolation_grade(A0_proj_dict, A_active_inverse_set) |
| 174 | + gamma_max = {k: gg.max() for k, gg in gamma_dict.items()} |
| 175 | + |
| 176 | + for st, AS_inv in A_active_inverse_set.items(): |
| 177 | + log.info("\tElement: {}, Active set inv. shape: {}, gamma_max: {:.3f}".format(elements_name[st], AS_inv.shape, |
| 178 | + gamma_max[st])) |
| 179 | + log.info("Saving Active Set Inversion (ASI) to {}".format(active_set_inv_filename)) |
| 180 | + with open(active_set_inv_filename, "wb") as f: |
| 181 | + np.savez(f, **{elements_name[st]: v for st, v in A_active_inverse_set.items()}) |
| 182 | + log.info("Saving done to {} ({})".format(active_set_inv_filename, sizeof_fmt(active_set_inv_filename))) |
| 183 | +else: |
| 184 | + # multiple round maxvol |
| 185 | + log.info("Approximated MaxVol by batches") |
| 186 | + log.info("Batch size: {}".format(batch_size)) |
| 187 | + nsplits = len(atomic_env_list) // batch_size |
| 188 | + atomic_env_batches = np.array_split(atomic_env_list, nsplits) |
| 189 | + atomic_env_batches = [b.values for b in atomic_env_batches] |
| 190 | + structure_env_batches = np.array_split(structure_ind_list, nsplits) |
| 191 | + structure_env_batches = [b.values for b in structure_env_batches] |
| 192 | + log.info("Number of batches: {}".format(len(atomic_env_batches))) |
| 193 | + |
| 194 | + log.info("Compute approximate active set (using batched MaxVol algorithm)") |
| 195 | + (best_gamma, best_active_sets_dict, _) = \ |
| 196 | + compute_active_set_by_batches( |
| 197 | + bbasis, |
| 198 | + atomic_env_batches=atomic_env_batches, |
| 199 | + structure_ind_batches=structure_env_batches, |
| 200 | + gamma_tolerance=gamma_tolerance, |
| 201 | + maxvol_iters=maxvol_iters, |
| 202 | + n_refinement_iter=maxvol_refinement, |
| 203 | + save_interim_active_set=True, |
| 204 | + is_full=is_full |
| 205 | + ) |
| 206 | + log.info("Compute pseudoinversion of active set") |
| 207 | + A_active_inverse_set = compute_A_active_inverse(best_active_sets_dict) |
| 208 | + for st, AS_inv in A_active_inverse_set.items(): |
| 209 | + log.info("\tElement: {}, Active set inv. shape: {}, gamma_max: {:.3f}".format(elements_name[st], AS_inv.shape, |
| 210 | + best_gamma[st])) |
| 211 | + log.info("Saving Active Set Inversion (ASI) to {}".format(active_set_inv_filename)) |
| 212 | + save_active_inverse_set(active_set_inv_filename, A_active_inverse_set, elements_name=elements_name) |
| 213 | + log.info("Saving done to {} ({})".format(active_set_inv_filename, sizeof_fmt(active_set_inv_filename))) |
0 commit comments