diff --git a/pyxdf/pyxdf.py b/pyxdf/pyxdf.py index 50211b9..00d2a71 100644 --- a/pyxdf/pyxdf.py +++ b/pyxdf/pyxdf.py @@ -46,6 +46,8 @@ def __init__(self, xml): self.srate = float(xml["info"]["nominal_srate"][0]) # format string (int8, int16, int32, float32, double64, string) self.fmt = xml["info"]["channel_format"][0] + # Whether or not to skip processing this chunk + self.skip = False # list of time-stamp chunks (each an ndarray, in seconds) self.time_stamps = [] # list of time-series chunks (each an ndarray or list of lists) @@ -81,6 +83,7 @@ def load_xdf( clock_reset_threshold_offset_seconds=1, clock_reset_threshold_offset_stds=10, winsor_threshold=0.0001, + stream_headers_only=False, verbose=None ): """Import an XDF file. @@ -113,6 +116,9 @@ def load_xdf( matching either the type *or* the name will be loaded. - None: load all streams (default). + stream_headers_only: Passing True will cause all non-StreamHeader chunks to be skipped. + Keyword arguments other than select_streams and verbose will be ignored. + verbose : Passing True will set logging level to DEBUG, False will set it to WARNING, and None will use root logger level. (default: None) @@ -201,148 +207,40 @@ def load_xdf( logger.info("Importing XDF file %s..." % filename) - # if select_streams is an int or a list of int, load only streams - # associated with the corresponding stream IDs - # if select_streams is a list of dicts, use this to query and load streams - # associated with these properties - if select_streams is None: - pass - elif isinstance(select_streams, int): - select_streams = [select_streams] - elif all([isinstance(elem, dict) for elem in select_streams]): - select_streams = match_streaminfos( - resolve_streams(filename), select_streams - ) - if not select_streams: # no streams found - raise ValueError("No matching streams found.") - elif not all([isinstance(elem, int) for elem in select_streams]): - raise ValueError( - "Argument 'select_streams' must be an int, a list of ints or a " - "list of dicts." - ) - # dict of returned streams, in order of appearance, indexed by stream id streams = OrderedDict() # dict of per-stream temporary data (StreamData), indexed by stream id - temp = {} + temp_stream_data = {} # XML content of the file header chunk fileheader = None with open_xdf(filename) as f: - # for each chunk - while True: - # noinspection PyBroadException - try: - # read [NumLengthBytes], [Length] - chunklen = _read_varlen_int(f) - except EOFError: - break - except Exception: - logger.exception("Error reading chunk length") - # If there's more data available (i.e. a read() succeeds), - # find the next boundary chunk - if f.read(1): - logger.warning( - "got zero-length chunk, scanning forward to next " - "boundary chunk." - ) - # move the stream position one byte back - f.seek(-1, 1) - if _scan_forward(f): - continue - logger.info(" reached end of file.") - break - - # read [Tag] - tag = struct.unpack("