diff --git a/aodntools/timeseries_products/aggregated_timeseries.py b/aodntools/timeseries_products/aggregated_timeseries.py old mode 100644 new mode 100755 index 28086fe..12d0a68 --- a/aodntools/timeseries_products/aggregated_timeseries.py +++ b/aodntools/timeseries_products/aggregated_timeseries.py @@ -414,29 +414,28 @@ def main_aggregator(files_to_agg, var_to_agg, site_code, input_dir='', output_di (var_to_agg + "-" + product_type), ('END-'+ time_end_filename), 'C-' + datetime.utcnow().strftime(file_timeformat)]) + '.nc' ncout_path = os.path.join(output_dir, output_name) - shutil.move(temp_outfile, os.path.join(output_dir, ncout_path)) + shutil.move(temp_outfile, ncout_path) return ncout_path, bad_files if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Aggregate ONE variable from ALL instruments from ALL deployments from ONE site") - parser.add_argument('-site', dest='site_code', help='site code, like NRMMAI', required=True) - parser.add_argument('-var', dest='varname', help='variable to aggregate, like TEMP', required=True) - parser.add_argument('-files', dest='filenames', - help='name of the file that contains the source URLs (relative to inpath, if given)', - required=True) - parser.add_argument('-indir', dest='input_dir', help='base path of input files', default='', required=False) - parser.add_argument('-outdir', dest='output_dir', help='path where the result file will be written. Default ./', - default='./', required=False) - parser.add_argument('-download_url', dest='download_url', help='path to the download_url_prefix', - default='', required=False) - parser.add_argument('-opendap_url', dest='opendap_url', help='path to the opendap_url_prefix', - default='', required=False) + parser = argparse.ArgumentParser( + description="Aggregate ONE variable from ALL instruments from ALL deployments from ONE site" + ) + parser.add_argument('site_code', help='site code, like NRSMAI') + parser.add_argument('varname', help='variable to aggregate, like TEMP') + parser.add_argument('filenames', + help='path of file listing the source URLs (relative to input_dir, if given)') + parser.add_argument('-i', '--input_dir', help='base path of input files', default='') + parser.add_argument('-o', '--output_dir', help='path where the result file will be written. Default ./', + default='./') + parser.add_argument('--download_url', help='path to the download_url_prefix', default='') + parser.add_argument('--opendap_url', help='path to the opendap_url_prefix', default='') args = parser.parse_args() - with open(os.path.join(args.input_dir,args.filenames)) as ff: + with open(args.filenames) as ff: files_to_agg = [line.rstrip() for line in ff] print(main_aggregator(files_to_agg=files_to_agg, var_to_agg=args.varname, site_code=args.site_code, diff --git a/aodntools/timeseries_products/gridded_timeseries.py b/aodntools/timeseries_products/gridded_timeseries.py old mode 100644 new mode 100755 index 12a681a..138a2cb --- a/aodntools/timeseries_products/gridded_timeseries.py +++ b/aodntools/timeseries_products/gridded_timeseries.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import numpy as np import bisect import argparse @@ -318,17 +320,24 @@ def grid_variable(input_file, VoI, depth_bins=None, max_separation=50, depth_bin if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Gridded time series: interpolate ONE variable from ALL instruments from ALL deployments from ONE site into 1hr timestamps and fixed depth bins") - parser.add_argument('-var', dest='var', help='name of the variable to concatenate. Like TEMP, PSAL', default='TEMP', required=False) - parser.add_argument('-file', dest='filename', help='name of the Hourly Time Series Product file that contains the data', default=None, required=False) - parser.add_argument('-depth_bins', dest='depth_bins', help='list of depth where the VoI will be interpolated', default=None, nargs='+', required=False) - parser.add_argument('-max_separation', dest='max_separation', help='maximum difference between instruments to allow interpolation', default=50, required=False) - parser.add_argument('-depth_bins_increment', dest='depth_bins_increment', help='increment in meters for the automatic generated depth bins', default=10, required=False) - parser.add_argument('-indir', dest='input_dir', help='base path of input file. Default .', default='.', - required=False) - parser.add_argument('-outdir', dest='output_dir', help='path where the result file will be written. Default .', - default='.', required=False) - parser.add_argument('-config', dest='config_file', help='JSON configuration file', default=None, required=False) + parser = argparse.ArgumentParser( + description="Gridded time series: interpolate ONE variable from ALL instruments from ALL deployments" + " from ONE site into 1hr timestamps and fixed depth bins" + ) + parser.add_argument('input_file', + help='name of the Hourly Time Series Product file that contains the data') + parser.add_argument('-v', '--var', default='TEMP', + help='name of the variable to grid (default TEMP)') + parser.add_argument('--depth_bins', help='list of depth where the variable will be interpolated', + nargs='+', type=float) + parser.add_argument('--max_separation', default=50, + help='maximum difference between instruments to allow interpolation (default 50m)') + parser.add_argument('--depth_bins_increment', default=10, + help='increment in meters for the automatic generated depth bins (default 10)') + parser.add_argument('-i', '--input_dir', help='base path of input files', default='') + parser.add_argument('-o', '--output_dir', help='path where the result file will be written. Default ./', + default='./') + parser.add_argument('--config_file', help='JSON configuration file', default=None) args = parser.parse_args() if args.config_file: @@ -348,8 +357,6 @@ def grid_variable(input_file, VoI, depth_bins=None, max_separation=50, depth_bin input_dir = args.input_dir output_dir = args.output_dir - file_name = args.filename - - print(grid_variable(input_file=file_name, VoI=VoI, depth_bins=depth_bins, + print(grid_variable(input_file=args.input_file, VoI=VoI, depth_bins=depth_bins, max_separation=int(max_separation), depth_bins_increment=int(depth_bins_increment), input_dir=input_dir, output_dir=output_dir)) diff --git a/aodntools/timeseries_products/hourly_timeseries.py b/aodntools/timeseries_products/hourly_timeseries.py old mode 100644 new mode 100755 index 7e2e4a5..d40e3d0 --- a/aodntools/timeseries_products/hourly_timeseries.py +++ b/aodntools/timeseries_products/hourly_timeseries.py @@ -641,18 +641,25 @@ def hourly_aggregator(files_to_aggregate, site_code, qcflags, input_dir='', outp if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Concatenate ALL variables from ALL instruments from ALL deployments from ONE site at 1hr time bin") - parser.add_argument('-site', dest='site_code', help='site code, like NRMMAI', required=True) - parser.add_argument('-files', dest='filenames', help='name of the file that contains the source URLs', required=True) - parser.add_argument('-qc', dest='qcflags', help='list of QC flags to select variable values to keep', nargs='+', required=True) - parser.add_argument('-indir', dest='input_dir', help='base path of input files', default='', required=False) - parser.add_argument('-outdir', dest='output_dir', help='path where the result file will be written. Default ./', - default='./', required=False) + parser = argparse.ArgumentParser( + description="Concatenate ALL variables from ALL instruments from ALL deployments from ONE site at 1hr time bin" + ) + parser.add_argument('site_code', help='site code, like NRSMAI') + parser.add_argument('filenames', + help='path of file listing the source URLs (relative to input_dir, if given)') + parser.add_argument('--qcflags', default='1,2', + help='QC flags to select variable values to keep (comma-separated, no spaces; default=1,2)') + parser.add_argument('-i', '--input_dir', help='base path of input files', default='') + parser.add_argument('-o', '--output_dir', help='path where the result file will be written. Default ./', + default='./') + parser.add_argument('--download_url', help='path to the download_url_prefix', default='') + parser.add_argument('--opendap_url', help='path to the opendap_url_prefix', default='') args = parser.parse_args() with open(args.filenames, 'r') as file: files_to_aggregate = [i.strip() for i in file.readlines()] - qcflags = [int(i) for i in args.qcflags] + qcflags = [int(i) for i in args.qcflags.split(',')] hourly_aggregator(files_to_aggregate=files_to_aggregate, site_code=args.site_code, qcflags=qcflags, - input_dir=args.input_dir, output_dir=args.output_path) + input_dir=args.input_dir, output_dir=args.output_dir, + download_url_prefix=args.download_url, opendap_url_prefix=args.opendap_url) diff --git a/aodntools/timeseries_products/velocity_aggregated_timeseries.py b/aodntools/timeseries_products/velocity_aggregated_timeseries.py old mode 100644 new mode 100755 index 4b97d9f..bdcba70 --- a/aodntools/timeseries_products/velocity_aggregated_timeseries.py +++ b/aodntools/timeseries_products/velocity_aggregated_timeseries.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import os import sys import tempfile @@ -306,15 +308,14 @@ def velocity_aggregated(files_to_agg, site_code, input_dir='', output_dir='./', if __name__ == "__main__": parser = argparse.ArgumentParser(description="Concatenate X,Y,Z velocity variables from ALL instruments from ALL deployments from ONE site") - parser.add_argument('-site', dest='site_code', help='site code, like NRMMAI', required=True) - parser.add_argument('-files', dest='filenames', help='name of the file that contains the source URLs', required=True) - parser.add_argument('-indir', dest='input_dir', help='base path of input files', default='', required=False) - parser.add_argument('-outdir', dest='output_dir', help='path where the result file will be written. Default ./', - default='./', required=False) - parser.add_argument('-download_url', dest='download_url', help='path to the download_url_prefix', - default='', required=False) - parser.add_argument('-opendap_url', dest='opendap_url', help='path to the opendap_url_prefix', - default='', required=False) + parser.add_argument('site_code', help='site code, like NRSMAI') + parser.add_argument('filenames', + help='path of file listing the source URLs (relative to input_dir, if given)') + parser.add_argument('-i', '--input_dir', help='base path of input files', default='') + parser.add_argument('-o', '--output_dir', help='path where the result file will be written. Default ./', + default='./') + parser.add_argument('--download_url', help='path to the download_url_prefix', default='') + parser.add_argument('--opendap_url', help='path to the opendap_url_prefix', default='') args = parser.parse_args() diff --git a/aodntools/timeseries_products/velocity_hourly_timeseries.py b/aodntools/timeseries_products/velocity_hourly_timeseries.py old mode 100644 new mode 100755 index d3cd104..d0f56d8 --- a/aodntools/timeseries_products/velocity_hourly_timeseries.py +++ b/aodntools/timeseries_products/velocity_hourly_timeseries.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import argparse import json import os @@ -319,15 +321,14 @@ def velocity_hourly_aggregated(files_to_agg, site_code, input_dir='', output_dir if __name__ == "__main__": parser = argparse.ArgumentParser(description="Concatenate X,Y,Z velocity variables from ALL instruments from ALL deployments from ONE site") - parser.add_argument('-site', dest='site_code', help='site code, like NRMMAI', required=True) - parser.add_argument('-files', dest='filenames', help='name of the file that contains the source URLs', required=True) - parser.add_argument('-indir', dest='input_dir', help='base path of input files', default='', required=False) - parser.add_argument('-outdir', dest='output_dir', help='path where the result file will be written. Default ./', - default='./', required=False) - parser.add_argument('-download_url', dest='download_url', help='path to the download_url_prefix', - default='', required=False) - parser.add_argument('-opendap_url', dest='opendap_url', help='path to the opendap_url_prefix', - default='', required=False) + parser.add_argument('site_code', help='site code, like NRSMAI') + parser.add_argument('filenames', + help='path of file listing the source URLs (relative to input_dir, if given)') + parser.add_argument('-i', '--input_dir', help='base path of input files', default='') + parser.add_argument('-o', '--output_dir', help='path where the result file will be written. Default ./', + default='./') + parser.add_argument('--download_url', help='path to the download_url_prefix', default='') + parser.add_argument('--opendap_url', help='path to the opendap_url_prefix', default='') args = parser.parse_args()