diff --git a/makeitwright/core/helpers.py b/makeitwright/core/helpers.py index c2e5d26..42c867b 100644 --- a/makeitwright/core/helpers.py +++ b/makeitwright/core/helpers.py @@ -137,11 +137,14 @@ def find_peaks(*data, channel=-1, axis=0, peak_width="medium", noisy=False, **kw return peaks -def norm(arr, tmin, tmax): - diff = tmax-tmin - arr_range = np.max(arr)-np.min(arr) - norm_arr = np.nan_to_num((((arr-np.min(arr))*diff)/arr_range) + tmin) - return norm_arr +def norm(arr, tmin=0, tmax=1): + """ + scale (and offset) an array's range of values to fit between `tmin` and `tmax`. + for some reason, we also set nans as zero and infinities to large real numbers... + """ + new_range = tmax-tmin + old_range = np.max(arr)-np.min(arr) + return np.nan_to_num((((arr-np.min(arr))*new_range)/old_range) + tmin) def split_n(arr, *axes): """ diff --git a/makeitwright/core/parsers/andor.py b/makeitwright/core/parsers/andor.py index 53d7140..ea3abf9 100644 --- a/makeitwright/core/parsers/andor.py +++ b/makeitwright/core/parsers/andor.py @@ -1,10 +1,20 @@ import WrightTools as wt -import numpy as np -import pathlib -from os import fspath -def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False): +# builtin conversion from pixels to um (from somebody's records) +# for different objectives +# roughly, pixel size in microns / magnification +px_per_um = { + '5x-Jin' : 0.893, + '20x-Jin' : 3.52, + '100x-Wright' : 18.2, + '5' : 0.893, + '20' : 3.52, + '100' : 18.2, +} + + +def fromAndorNeo(fpath, name=None, px_per_um=None): """Create a data object from Andor Solis software (ascii exports). Parameters @@ -16,169 +26,34 @@ def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False): name : string (optional) Name to give to the created data object. If None, filename is used. Default is None. + px_per_um : float-like (optional) + if present, camera spatial dimensions will be mapped in micron units. + if not present, spatial variables of camera will be a unitless index Returns ------- data New data object. """ - - objective_lenses = { - '5x-Jin' : 0.893, - '20x-Jin' : 3.52, - '100x-Wright' : 18.2, - '5' : 0.893, - '20' : 3.52, - '100' : 18.2, - 5 : 0.893, - 20 : 3.52, - 100 : 18.2 - } - # parse filepath - filepath = pathlib.Path(fpath) - - if not ".asc" in filepath.suffixes: - wt.exceptions.WrongFileTypeWarning.warn(filepath, ".asc") - # parse name - if name is None: - name = filepath.name.split("/")[-1] - - if objective_lens=='prompt': - objective_lens = input(f'enter magnification for data at {name}: ') - if not objective_lens: - objective_lens = 0 - - # create data - ds = np.DataSource(None) - f = ds.open(fspath(fpath), "rt") - axis0 = [] - arr = [] - attrs = {} - - line0 = f.readline().strip()[:-1] - line0 = [float(x) for x in line0.split(",")] # TODO: robust to space, tab, comma - axis0.append(line0.pop(0)) - arr.append(line0) - - def get_frames(f, arr, axis0): - axis0_written = False - while True: - line = f.readline().strip()[:-1] - if len(line) == 0: - break - else: - line = [float(x) for x in line.split(",")] - # signature of new frames is restart of axis0 - if not axis0_written and (line[0] == axis0[0]): - axis0_written = True - if axis0_written: - line.pop(0) - else: - axis0.append(line.pop(0)) - arr.append(line) - return arr, axis0 + data:wt.Data = wt.data.from_Solis(fpath, name=name, verbose=True) + data.rename_variables(xindex="x", yindex="y", wm="wl") - arr, axis0 = get_frames(f, arr, axis0) - nframes = len(arr) // len(axis0) + for var in (("x", "y") and data.variable_names): + if px_per_um: + data[var] /= px_per_um + data[var].units = 'µm' - i = 0 - while i < 3: - line = f.readline().strip() - if len(line) == 0: - i += 1 - else: - try: - key, val = line.split(":", 1) - except ValueError: - pass - else: - attrs[key.strip()] = val.strip() + dtype = "image" if "x" in data.variable_names else "spectralprofile" + data.attrs.update(dtype=dtype) - f.close() + if "wl" in data.variable_names: + data["wl"].attrs['label'] = "wavelength (nm)" if data["wl"].units == "nm" else "wavenumber (cm-1)" - #create data object - arr = np.array(arr) - axis0 = np.array(axis0) - data = wt.Data(name=name) - if float(attrs["Grating Groove Density (l/mm)"]) == 0: - xname = 'x' - dtype = 'image' - try: - axis0 = axis0/objective_lenses[objective_lens] - xunits = 'µm' - except KeyError: - xunits = 'px' + if data.signal.units == "Hz": + data.signal.label = "intensity (cps)" else: - xname = 'wl' - xunits = 'nm' - dtype = 'spectralprofile' - - axis1 = np.arange(arr.shape[-1]) - yname='y' - try: - axis1 = axis1/objective_lenses[objective_lens] - yunits = 'µm' - except KeyError: - yunits = 'px' - - axes = [xname, yname] - - if nframes == 1: - arr = np.array(arr) - data.create_variable(name=xname, values=axis0[:, None], units=xunits) - data.create_variable(name=yname, values=axis1[None, :], units=yunits) - else: - frames = np.arange(nframes) - try: - ct = float(attrs["Kinetic Cycle Time (secs)"]) - frames = frames*ct - tname = 't' - tunits = 's' - except KeyError: - tname = 'frame' - tunits = None - arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0])) - data.create_variable(name=tname, values=frames[:, None, None], units=tunits) - data.create_variable(name=xname, values=axis0[None, :, None], units=xunits) - data.create_variable(name=yname, values=axis1[None, None, :], units=yunits) - axes = [tname] + axes - - if xname=='wl': - if xunits=='nm': - data[xname].attrs['label'] = "wavelength (nm)" - if xunits=='wn': - data[xname].attrs['label'] = "wavenumber (cm-1)" - if xname=='x': - data[xname].attrs['label'] = "x (µm)" - if yname=='y': - data[yname].attrs['label'] = "y (µm)" - - data.transform(*axes) - if cps: - try: - arr = arr/float(attrs["Exposure Time (secs)"]) - except KeyError: - pass - try: - arr = arr/int(attrs["Number of Accumulations"]) - except KeyError: - pass - - data.create_channel(name='sig', values=arr, signed=False) - if cps: - data['sig'].attrs['label'] = "intensity (cps)" - else: - data['sig'].attrs['label'] = "counts" - - for key, val in attrs.items(): - data.attrs[key] = val - - # finish - print("data created at {0}".format(data.fullpath)) - print(" axes: {0}".format(data.axis_names)) - print(" shape: {0}".format(data.shape)) - data.attrs['dtype']=dtype + data.signal.label = "counts" return data diff --git a/makeitwright/core/parsers/horiba.py b/makeitwright/core/parsers/horiba.py index b3a2f7b..eb6df90 100644 --- a/makeitwright/core/parsers/horiba.py +++ b/makeitwright/core/parsers/horiba.py @@ -1,9 +1,11 @@ import numpy as np import WrightTools as wt -from ..helpers import norm def horiba_typeID(filepath): + """surmise the type of horiba scan (spectrum, linescan, map) without creating data object + NOTE: dtypes disagree with data attrs + """ with open(filepath) as f: txt = f.readlines() header_size = 0 @@ -37,161 +39,25 @@ def horiba_typeID(filepath): return dtype -def fromLabramHR(filepath, name=None, cps=False): - if name is None: - name = filepath.split('/')[-1].split('.')[0] - with open(filepath) as f: - txt = f.readlines() - header_size = 0 - acq = 1 - accum = 1 - spectral_units = 'nm' - - for line in txt: - if "#" in line: - header_size += 1 - if "Acq. time" in line: - acq = float(line.split('=\t')[1]) - if "Accumulations" in line: - accum= int(line.split('=\t')[1]) - if "Range (" in line: - if 'eV' in line: - spectral_units='eV' - elif 'cm' in line: - spectral_units='wn' - else: - spectral_units='nm' - if 'Spectro' in line: - if 'eV' in line: - spectral_units='eV' - elif 'cm' in line: - spectral_units='wn' - else: - spectral_units='nm' - total_acq_time = acq*accum - acq_type = {'wn':"Raman", 'nm':"PL", 'eV':"PL"} - siglabels = {'wn':"scattering intensity", 'nm':"PL intensity", 'eV':"PL intensity"} - for key, value in siglabels.items(): - if cps: - siglabels[key] = siglabels[key] + " (cps)" - else: - siglabels[key] = siglabels[key] + " (counts)" - - spectlabels = {'wn':"Raman shift (cm\u207b\u2071)", 'nm':"wavelength (nm)", 'eV':"energy (eV)"} +def fromLabramHR(filepath, **kwargs): + data = wt.data.from_LabRAM(filepath, **kwargs) + # add attrs + if data.ndim == 1: + data.attrs["dtype"] = "spectrum" + elif data.ndim == 2: + data.attrs["dtype"] = "spectrum" if "index" in data.variable_names else "spectralprofile" + elif data.ndim == 3: + data.attrs["dtype"] = "hyperspectral" - wl_arr = np.genfromtxt(filepath, skip_header=header_size, max_rows=1) - ch_arr = np.genfromtxt(filepath, skip_header=header_size+1) - xy_cols = ch_arr.shape[1]-wl_arr.shape[0] - - if xy_cols==0: - sig = ch_arr[:,1] - if cps: - sig = sig/total_acq_time - wl = ch_arr[:,0] - d = wt.Data(name=name) - d.create_variable('wl', values=wl, units=spectral_units) - d['wl'].label = spectlabels[spectral_units] - d.create_channel('sig', values=sig) - d['sig'].label = siglabels[spectral_units] - d.create_channel('norm', values=norm(sig, 0, 1)) - d['norm'].label = 'norm. ' + siglabels[spectral_units].split(' (')[0] - d.transform('wl') - d.attrs['dtype'] = 'spectrum' - d.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] - d.attrs['exposure time (s)'] = acq - d.attrs['number of accumulations'] = accum - print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} spectrum") - - if xy_cols==1: - sig = ch_arr[:,1:].transpose() - wl = wl_arr[:,None] - y = ch_arr[:,0][None,:] - - is_survey = True - for i in range(1, y.size): - if y.flatten()[i]-y.flatten()[i-1] != 1: - is_survey = False - - if is_survey: - d = [] - for i in range(y.size): - sig_i = sig[:,i].flatten() - if cps: - sig_i = sig_i/total_acq_time - spect = wt.Data(name=f"{name}_spect{i}") - spect.create_variable('wl', values=wl.flatten(), units=spectral_units) - spect['wl'].label = spectlabels[spectral_units] - spect.create_channel(name='sig', values=sig_i) - spect['sig'].label = siglabels[spectral_units] - spect.create_channel(name='norm', values=norm(sig_i, 0, 1)) - spect['norm'].label = 'norm. ' + siglabels[spectral_units].split(' (')[0] - spect.transform('wl') - spect.attrs['dtype'] = 'spectrum' - spect.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] - spect.attrs['exposure time (s)'] = acq - spect.attrs['number of accumulations'] = accum - d.append(spect) - print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} survey") - else: - if cps: - sig = sig/total_acq_time - d = wt.Data(name=name) - d.create_variable('wl', values=wl, units=spectral_units) - d['wl'].label = spectlabels[spectral_units] - d.create_channel('sig', values=sig) - d['sig'].label = siglabels[spectral_units] - d.create_variable('y', values=y, units='um') - d['y'].label = "y (µm)" - d.transform('wl', 'y') - d.attrs['dtype'] = 'spectralprofile' - d.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] - d.attrs['exposure time (s)'] = acq - d.attrs['number of accumulations'] = accum - print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} linescan") - - if xy_cols==2: - xidx = ch_arr[:,0] - xdim = 1 - for i in range(1,ch_arr.shape[0]): - if xidx[i] != xidx[i-1]: - xdim = xdim+1 - ydim = int(ch_arr.shape[0]/xdim) - - x = np.zeros((xdim,1,1)) - y = np.zeros((1,ydim,1)) - wl = wl_arr.reshape([1,1,wl_arr.size]) - sig = np.zeros((xdim,ydim,wl_arr.size)) - - for i in range(0, ch_arr.shape[0], ydim): x[int(i/ydim),0,0] = ch_arr[i,0] - y[0,:,0] = ch_arr[:ydim,1] - for i in range(xdim): - for j in range(ydim): - sig[i,j,:] = ch_arr[i*ydim+j,2:].reshape([1,1,wl_arr.size]) + acq_type = "Raman" if data.wl.units == "wn" else "PL" + data.attrs['acquisition'] = f'Horiba_{acq_type}' - if cps: - sig = sig/total_acq_time - d = wt.Data(name=name) - d.create_channel('sig', values=sig) - d['sig'].label = siglabels[spectral_units] - d.create_variable('x', values=x, units='um') - d['x'].label = "x (µm)" - d.create_variable('y', values=y, units='um') - d['y'].label = "y (µm)" - d.create_variable('wl', values=wl, units=spectral_units) - d['wl'].label = spectlabels[spectral_units] - d.transform('x','y','wl') - d.attrs['dtype'] = 'hyperspectral' - d.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] - d.attrs['exposure time (s)'] = acq - d.attrs['number of accumulations'] = accum - print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} map") + return data - return d def fromLabramHRTimedSeries(filedir): raise NotImplementedError + def fromAramis(filepath): raise NotImplementedError - - diff --git a/makeitwright/core/parsers/sp130.py b/makeitwright/core/parsers/sp130.py index 3912ec4..e8e0a49 100644 --- a/makeitwright/core/parsers/sp130.py +++ b/makeitwright/core/parsers/sp130.py @@ -1,33 +1,10 @@ -import numpy as np import WrightTools as wt -from ..helpers import norm -def fromSP130(fpath, name=None): - if fpath.split('.')[-1] != 'asc': - print(f"filetype .{fpath.split('.')[-1]} not supported") - else: - with open(fpath) as f: - txt = f.readlines() - header_size = 0 - for i, line in enumerate(txt): - if 'Title' in line.split() and name is None: - name = line.split()[-1] - if '*BLOCK' in line: - header_size = i+1 - arr = np.genfromtxt(fpath, delimiter=',', skip_header=header_size, skip_footer=1) - t = arr[:,0] - sig = arr[:,1] - t = t-t[np.argmax(sig)] - - out = wt.Data(name=name) - out.create_variable('t', values=t, units='ns') - out['t'].attrs['label'] = "time (ns)" - out.create_channel('sig', values=sig) - out['sig'].attrs['label'] = "PL counts" - out.transform('t') - out.create_channel('norm', values=norm(out['sig'][:], 0.01, 1)) - out['norm'].attrs['label'] = "norm. PL counts" +def fromSP130(fpath, **kwargs): + data = wt.data.from_spcm(fpath, **kwargs) + data.rename_variables(time="t") + data.rename_channels(counts="sig") + data.create_variable("t_shifted", values=data.t[:] - data.t[data.sig[:].argmax()], units=data.t.units) - return out - + return data