From c6a81395b679e3d4a6e782964c8b4ed089ee6e35 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 13:02:36 -0500 Subject: [PATCH 01/20] refactor parsers all in one spot --- README.md | 28 +- {scripts => examples}/AbsProcessing.py | 2 +- {scripts => examples}/PLProcessing.py | 0 {scripts => examples}/main.py | 0 {scripts => examples}/main2.py | 0 {scripts => examples}/workup.py | 0 makeitwright/VERSION | 1 + makeitwright/__init__.py | 2 + makeitwright/__version__.py | 20 + makeitwright/{process => }/andor.py | 188 +----- makeitwright/artists.py | 440 ------------- makeitwright/{process => }/beckerhickl.py | 12 +- makeitwright/horiba.py | 62 ++ makeitwright/{process => }/iontof.py | 64 +- makeitwright/parsers.py | 153 ----- makeitwright/process/__init__.py | 0 makeitwright/process/afm.py | 119 ---- makeitwright/process/helpers.py | 592 ------------------ makeitwright/process/horiba.py | 256 -------- makeitwright/process/hyperspectral.py | 329 ---------- makeitwright/process/image.py | 112 ---- makeitwright/process/spectralprofile.py | 312 --------- makeitwright/spectra.py | 179 ------ makeitwright/styles.py | 215 ------- .../transmittance_references/Ag-P01.csv | 0 .../transmittance_references/Ag-P02.csv | 0 .../transmittance_references/BK7.csv | 0 .../transmittance_references/CaF2.csv | 0 .../transmittance_references/MgF2.csv | 0 .../transmittance_references/UVAl.csv | 0 .../transmittance_references/UVFS.csv | 0 .../transmittance_references/sapphire.csv | 0 makeitwright/{process => }/xrd.py | 47 +- 33 files changed, 110 insertions(+), 3023 deletions(-) rename {scripts => examples}/AbsProcessing.py (99%) rename {scripts => examples}/PLProcessing.py (100%) rename {scripts => examples}/main.py (100%) rename {scripts => examples}/main2.py (100%) rename {scripts => examples}/workup.py (100%) create mode 100644 makeitwright/VERSION create mode 100644 makeitwright/__version__.py rename makeitwright/{process => }/andor.py (78%) delete mode 100644 makeitwright/artists.py rename makeitwright/{process => }/beckerhickl.py (96%) create mode 100644 makeitwright/horiba.py rename makeitwright/{process => }/iontof.py (50%) delete mode 100644 makeitwright/parsers.py delete mode 100644 makeitwright/process/__init__.py delete mode 100644 makeitwright/process/afm.py delete mode 100644 makeitwright/process/helpers.py delete mode 100644 makeitwright/process/horiba.py delete mode 100644 makeitwright/process/hyperspectral.py delete mode 100644 makeitwright/process/image.py delete mode 100644 makeitwright/process/spectralprofile.py delete mode 100644 makeitwright/spectra.py delete mode 100644 makeitwright/styles.py rename makeitwright/{process => }/transmittance_references/Ag-P01.csv (100%) rename makeitwright/{process => }/transmittance_references/Ag-P02.csv (100%) rename makeitwright/{process => }/transmittance_references/BK7.csv (100%) rename makeitwright/{process => }/transmittance_references/CaF2.csv (100%) rename makeitwright/{process => }/transmittance_references/MgF2.csv (100%) rename makeitwright/{process => }/transmittance_references/UVAl.csv (100%) rename makeitwright/{process => }/transmittance_references/UVFS.csv (100%) rename makeitwright/{process => }/transmittance_references/sapphire.csv (100%) rename makeitwright/{process => }/xrd.py (60%) diff --git a/README.md b/README.md index 4791eb5..538cad7 100644 --- a/README.md +++ b/README.md @@ -2,32 +2,32 @@ Jin Group Tools for handling a variety of common data. Builds upon the WrightTools Data object. - -## Installation - -Basic: - -`pip install git+https://github.com/wright-group/makeitwright.git` - -iontof support is considered optional; if you need to use iontof data, use - -`pip install git+https://github.com/wright-group/makeitwright.git[iontof]` - - ## Features - a module for each instrument featured - - AFM + - AFM (Gwiddion) - Andor Neo Camera (Solis) - Becker and Hickl SPCM - Horiba LabRAM - Generic Images - ion TOF - XRD (Bruker) -- various data importers to create the WrightTools Data objects. - preset styles and routines for making quick figures +## Installation + +### Basic + +`pip install git+https://github.com/wright-group/makeitwright.git` + +### IonTOF + +support for iontof data is optional; if you need to use iontof data, specify additional imports using: + +`pip install git+https://github.com/wright-group/makeitwright.git[iontof]` + + ## Examples TODO diff --git a/scripts/AbsProcessing.py b/examples/AbsProcessing.py similarity index 99% rename from scripts/AbsProcessing.py rename to examples/AbsProcessing.py index 102a95f..a512722 100644 --- a/scripts/AbsProcessing.py +++ b/examples/AbsProcessing.py @@ -1,5 +1,5 @@ -# Process Reflectance/Transmittance/Absorbance Data from Wright group +# Process Reflectance/Transmittance/Absorbance Data from Wright group Microscope import pathlib import makeitwright.process.andor as andor diff --git a/scripts/PLProcessing.py b/examples/PLProcessing.py similarity index 100% rename from scripts/PLProcessing.py rename to examples/PLProcessing.py diff --git a/scripts/main.py b/examples/main.py similarity index 100% rename from scripts/main.py rename to examples/main.py diff --git a/scripts/main2.py b/examples/main2.py similarity index 100% rename from scripts/main2.py rename to examples/main2.py diff --git a/scripts/workup.py b/examples/workup.py similarity index 100% rename from scripts/workup.py rename to examples/workup.py diff --git a/makeitwright/VERSION b/makeitwright/VERSION new file mode 100644 index 0000000..8a9ecc2 --- /dev/null +++ b/makeitwright/VERSION @@ -0,0 +1 @@ +0.0.1 \ No newline at end of file diff --git a/makeitwright/__init__.py b/makeitwright/__init__.py index e69de29..0c17c79 100644 --- a/makeitwright/__init__.py +++ b/makeitwright/__init__.py @@ -0,0 +1,2 @@ +from .__version__ import * +from .lib import * diff --git a/makeitwright/__version__.py b/makeitwright/__version__.py new file mode 100644 index 0000000..3b03f91 --- /dev/null +++ b/makeitwright/__version__.py @@ -0,0 +1,20 @@ +import pathlib + + +here = pathlib.Path(__file__).resolve().parent + + +__all__ = ["__version__", "__branch__"] +__version__ = pathlib.Path(here / "VERSION").read_text().strip() + +p = here.parent / ".git" +if p.is_file(): + with open(str(p)) as f: + p = p.parent / f.readline()[8:].strip() # Strip "gitdir: " +p = p / "HEAD" +if p.exists(): + with open(str(p)) as f: + __branch__ = f.readline().rstrip().split(r"/")[-1] + __version__ += "+" + __branch__ +else: + __branch__ = None diff --git a/makeitwright/process/andor.py b/makeitwright/andor.py similarity index 78% rename from makeitwright/process/andor.py rename to makeitwright/andor.py index 01d6ce3..c2d74bb 100644 --- a/makeitwright/process/andor.py +++ b/makeitwright/andor.py @@ -1,13 +1,9 @@ -import pathlib -import os import warnings import numpy as np -import WrightTools as wt -from . import image -from . import spectralprofile -from .helpers import roi, set_label, get_channels -import makeitwright.styles as styles +from .lib import image, spectralprofile +from .lib.helpers import roi, set_label, get_channels +import makeitwright.lib.styles as styles APD_PIXEL = (1325, 1080) SLIT_PIXEL_COLUMN = 1325 @@ -52,184 +48,6 @@ def plot_decomposition(data, channel=0, mode='R', **kwargs): spectralprofile.plot_decomposition(data, 'wl', 'y', channel, **params) -def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False): - """Create a data object from Andor Solis software (ascii exports). - - Parameters - ---------- - fpath : path-like - Path to file (should be .asc format). - Can be either a local or remote file (http/ftp). - Can be compressed with gz/bz2, decompression based on file name. - name : string (optional) - Name to give to the created data object. If None, filename is used. - Default is None. - - Returns - ------- - data - New data object. - """ - - objective_lenses = { - '5x-Jin' : 0.893, - '20x-Jin' : 3.52, - '100x-Wright' : 18.2, - '5' : 0.893, - '20' : 3.52, - '100' : 18.2, - 5 : 0.893, - 20 : 3.52, - 100 : 18.2 - } - - # parse filepath - filestr = os.fspath(fpath) - filepath = pathlib.Path(fpath) - - if not ".asc" in filepath.suffixes: - wt.exceptions.WrongFileTypeWarning.warn(filepath, ".asc") - # parse name - if name is None: - name = filepath.name.split("/")[-1] - - if objective_lens=='prompt': - objective_lens = input(f'enter magnification for data at {name}: ') - if not objective_lens: - objective_lens = 0 - - # create data - ds = np.DataSource(None) - f = ds.open(filestr, "rt") - axis0 = [] - arr = [] - attrs = {} - - line0 = f.readline().strip()[:-1] - line0 = [float(x) for x in line0.split(",")] # TODO: robust to space, tab, comma - axis0.append(line0.pop(0)) - arr.append(line0) - - def get_frames(f, arr, axis0): - axis0_written = False - while True: - line = f.readline().strip()[:-1] - if len(line) == 0: - break - else: - line = [float(x) for x in line.split(",")] - # signature of new frames is restart of axis0 - if not axis0_written and (line[0] == axis0[0]): - axis0_written = True - if axis0_written: - line.pop(0) - else: - axis0.append(line.pop(0)) - arr.append(line) - return arr, axis0 - - arr, axis0 = get_frames(f, arr, axis0) - nframes = len(arr) // len(axis0) - - i = 0 - while i < 3: - line = f.readline().strip() - if len(line) == 0: - i += 1 - else: - try: - key, val = line.split(":", 1) - except ValueError: - pass - else: - attrs[key.strip()] = val.strip() - - f.close() - - #create data object - arr = np.array(arr) - axis0 = np.array(axis0) - data = wt.Data(name=name) - if float(attrs["Grating Groove Density (l/mm)"]) == 0: - xname = 'x' - dtype = 'image' - try: - axis0 = axis0/objective_lenses[objective_lens] - xunits = 'µm' - except KeyError: - xunits = 'px' - else: - xname = 'wl' - xunits = 'nm' - dtype = 'spectralprofile' - - axis1 = np.arange(arr.shape[-1]) - yname='y' - try: - axis1 = axis1/objective_lenses[objective_lens] - yunits = 'µm' - except KeyError: - yunits = 'px' - - axes = [xname, yname] - - if nframes == 1: - arr = np.array(arr) - data.create_variable(name=xname, values=axis0[:, None], units=xunits) - data.create_variable(name=yname, values=axis1[None, :], units=yunits) - else: - frames = np.arange(nframes) - try: - ct = float(attrs["Kinetic Cycle Time (secs)"]) - frames = frames*ct - tname = 't' - tunits = 's' - except KeyError: - tname = 'frame' - tunits = None - arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0])) - data.create_variable(name=tname, values=frames[:, None, None], units=tunits) - data.create_variable(name=xname, values=axis0[None, :, None], units=xunits) - data.create_variable(name=yname, values=axis1[None, None, :], units=yunits) - axes = [tname] + axes - - if xname=='wl': - if xunits=='nm': - data[xname].attrs['label'] = "wavelength (nm)" - if xunits=='wn': - data[xname].attrs['label'] = "wavenumber (cm-1)" - if xname=='x': - data[xname].attrs['label'] = "x (µm)" - if yname=='y': - data[yname].attrs['label'] = "y (µm)" - - data.transform(*axes) - if cps: - try: - arr = arr/float(attrs["Exposure Time (secs)"]) - except KeyError: - pass - try: - arr = arr/int(attrs["Number of Accumulations"]) - except KeyError: - pass - - data.create_channel(name='sig', values=arr, signed=False) - if cps: - data['sig'].attrs['label'] = "intensity (cps)" - else: - data['sig'].attrs['label'] = "counts" - - for key, val in attrs.items(): - data.attrs[key] = val - - # finish - print("data created at {0}".format(data.fullpath)) - print(" axes: {0}".format(data.axis_names)) - print(" shape: {0}".format(data.shape)) - data.attrs['dtype']=dtype - - return data def _get_reference_material_array(data, channel, material): """ diff --git a/makeitwright/artists.py b/makeitwright/artists.py deleted file mode 100644 index 4a52103..0000000 --- a/makeitwright/artists.py +++ /dev/null @@ -1,440 +0,0 @@ -import numpy as np -import WrightTools as wt -import matplotlib.cm as cm -from matplotlib import pyplot as plt -from .process.helpers import roi, parse_args -from . import styles - -def plot(data, **kwargs): - if type(data) is wt.Collection: - data = [data[key] for key in data] - if type(data) is not list: - data = [data] - - - #set parameters for plotting from kwargs - params = { - "plot_type" : "line", - "xscale" : "linear", - "xticks" : True, - "yscale" : "linear", - "yticks" : True, - "axis" : 0, - "channel" : -1, - "ROI" : None, - "xrange" : None, - "vrange" : None, - "offset" : 0, - "reference_lines" : None, - "title" : None - } - params.update(styles.spectra) - params.update(**kwargs) - - signed=False - - colors = __parse_colors(data, params['colors']) - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - for i in range(len(data)): - #convert axis/channel indices to natural names - axis, = parse_args(data[i], params["axis"]) - - if params['channel']=='prompt': #kill this if else if all your code suddenly stops working - channel, = parse_args(data[i], input(f'select channel from {data[i].natural_name}: {[ch.natural_name for ch in data[i].channels]} '), dtype='Channel') - else: - channel, = parse_args(data[i], params["channel"], dtype='Channel') - if data[i][channel].signed: - signed=True - - #extract ROI - if params["ROI"] is not None: - out = roi(data[i], params["ROI"]) - else: - out = data[i] - - #plot data - if params["plot_type"] == "line": - ax.plot(out[axis][:],out[channel][:]+i*params["offset"], - linewidth=params["linewidth"], alpha=params["alpha"], color=colors[i]) - if params["plot_type"] == "scatter": - ax.scatter(out[axis][:],out[channel][:]+i*params["offset"], - marker=params["marker"], alpha=params["alpha"], color=colors[i], s=params["marker_size"]) - - if signed: - ax.axhline(y=0, color='black', linewidth=1) - - if params["reference_lines"] is not None: - if type(params["reference_lines"]) is not list: - params["reference_lines"] = [params["reference_lines"]] - for reference_line in params["reference_lines"]: - ax.axvline(x=reference_line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) - - #adjust plot frame - if params["xrange"] is not None: - xrange = params["xrange"] - else: - xrange = __get_range(*data, reference_key=params["axis"], dtype='Axis') - if params["xscale"] == 'log' and xrange[0]<=0: - xrange[0] = 0.001 - ax.set_xlim(*xrange) - ax.set_xscale(params["xscale"]) - if params["xlabel"] is None: - try: - params["xlabel"] = out[axis].attrs['label'] - except KeyError: - params["xlabel"] = 'x' - ax.set_xlabel(params["xlabel"]) - if not params["xticks"]: - ax.set_xticks([]) - - if params["vrange"] is not None: - vrange = params["vrange"] - else: - vrange = __get_range(*data, reference_key=params["channel"], offset=params["offset"]) - if params["yscale"] == 'log' and vrange[0]<=0: - vrange[0] = 0.001 - ax.set_ylim(*vrange) - ax.set_yscale(params["yscale"]) - if params["ylabel"] is None: - try: - params["ylabel"] = out[channel].attrs['label'] - except KeyError: - params["ylabel"] = 'y' - ax.set_ylabel(params["ylabel"]) - if not params["yticks"]: - ax.set_yticks([]) - - if params["title"] is not None: - ax.set_title(params["title"]) - - plt.show() - -def plot2D(data, channel, **kwargs): - - #convert axis/channel indices to natural names - channel, = parse_args(data, channel, dtype='Channel') - - #set parameters for plotting from kwargs - params = { - "ROI" : None, - 'ticks' : 'auto', - "vrange" : None, - "reference_lines" : None, - "title" : None - } - params.update(styles.profile) - - if data[channel].signed: - params["cmap"] = cm.RdBu_r - params.update(**kwargs) - - if params["ROI"] is not None: - out = roi(data, params["ROI"]) - else: - out = data - - #determine range to be plotted - if params["vrange"] is None: - vrange = __get_range(out, reference_key=channel) - else: - vrange = params["vrange"] - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) - #array needs to be transposed before passing to pcolormesh because apparently no matplotlib devs thought about what arrays look like - try: - mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - except TypeError: - mesh = ax.pcolormesh(xgrid, ygrid, out[channel][:], cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - if params["title"] is not None: - ax.set_title(params["title"]) - - if params["reference_lines"] is not None: - if type(params["reference_lines"]) is not list: - params["reference_lines"] = [params["reference_lines"]] - for reference_line in params["reference_lines"]: - ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) - - #set ticks - if params['ticks'] == 'auto': - ticks = np.linspace(vrange[0], vrange[1], num=11) - elif params['ticks'] is None: - ticks = [] - else: - ticks = params['ticks'] - - # plot colorbar - cbar = plt.colorbar(mesh) - cbar.set_ticks(ticks) - cbar.set_label(params["cbar_label"]) - -def plot3D(data, profile_axis, channel, **kwargs): - - #convert axis/channel indices to natural names - profile_axis, = parse_args(data, profile_axis) - spectral_axis = data.axes[-1].natural_name - channel, = parse_args(data, channel, dtype='Channel') - non_profile_axis = [axis.natural_name for axis in data.axes[:-1] if axis.natural_name != profile_axis][0] - - #set parameters for plotting from kwargs - params = { - "ROI" : None, - 'ticks' : 'auto', - "vrange" : None, - "reference_lines" : None, - "title" : None - } - params.update(styles.profile) - if data[channel].signed: - params["cmap"] = cm.RdBu_r - params.update(**kwargs) - - #extract ROI - if params["ROI"] is not None: - out = roi(data, params["ROI"]) - if len(out.axes) != 2: - out = roi(out, {non_profile_axis : 'sum'}) - else: - out = roi(data, {non_profile_axis : 'sum'}) - out.transform(spectral_axis, profile_axis) - - #determine range to be plotted - if params["vrange"] is None: - vrange = __get_range(out, reference_key=channel) - else: - vrange = params["vrange"] - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - xgrid, ygrid = np.meshgrid(out.axes[1][:], out.axes[0][:]) - try: - mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - except TypeError: - mesh = ax.pcolormesh(xgrid, ygrid, out[channel][:], cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - - if params["reference_lines"] is not None: - if type(params["reference_lines"]) is not list: - params["reference_lines"] = [params["reference_lines"]] - for reference_line in params["reference_lines"]: - ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) - - #set ticks - if params['ticks'] == 'auto': - ticks = np.linspace(vrange[0], vrange[1], num=11) - elif params['ticks'] is None: - ticks = [] - else: - ticks = params['ticks'] - - # plot colorbar - cbar = plt.colorbar(mesh) - cbar.set_ticks(ticks) - cbar.set_label(params["cbar_label"]) - - if params["title"] is not None: - ax.set_title(params["title"]) - -def image(data, channel, **kwargs): - #convert axis/channel indices to natural names - channel, = parse_args(data, channel, dtype='Channel') - - #set parameters for plotting from kwargs - params = { - "ROI" : None, - "vrange" : None, - "reference_lines" : None, - "title" : None - } - params.update(styles.image) - if data[channel].signed: - params["cmap"] = cm.RdBu_r - params.update(**kwargs) - - #extract ROI - if params["ROI"] is not None: - out = roi(data, params["ROI"]) - else: - out = data - - #determine range to be plotted - if params["vrange"] is None: - vrange = __get_range(out, reference_key=channel) - else: - vrange = params["vrange"] - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) - ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - - if params["reference_lines"] is not None: - if type(params["reference_lines"]) is not list: - params["reference_lines"] = [params["reference_lines"]] - for reference_line in params["reference_lines"]: - ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) - - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - ax.set_aspect("equal") - if params["title"] is not None: - ax.set_title(params["title"]) - -def image3D(data, channel, **kwargs): - - #convert axis/channel indices to natural names - channel, = parse_args(data, channel, dtype='Channel') - non_spatial_axis = data.axes[-1].natural_name - - #set parameters for plotting from kwargs - params = { - "ROI" : None, - 'ticks' : 'auto', - "vrange" : None, - "title" : None - } - params.update(styles.image) - if data[channel].signed: - params["cmap"] = cm.RdBu_r - params.update(**kwargs) - - #extract ROI - if params["ROI"] is not None: - out = roi(data, params["ROI"]) - if len(out.axes) != 2: - out = roi(out, {non_spatial_axis : 'sum'}) - else: - out = roi(data, {non_spatial_axis : 'sum'}) - - #determine range to be plotted - if params["vrange"] is None: - vrange = __get_range(out, reference_key=channel) - else: - vrange = params["vrange"] - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) - mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - ax.set_aspect("equal") - if params["title"] is not None: - ax.set_title(params["title"]) - - #set ticks - if params['ticks'] == 'auto': - ticks = np.linspace(vrange[0], vrange[1], num=11) - elif params['ticks'] is None: - ticks = [] - else: - ticks = params['ticks'] - - # plot colorbar - cbar = plt.colorbar(mesh) - cbar.set_ticks(ticks) - cbar.set_label(params["cbar_label"]) - -def plot_tandem(d1,d2, figsize=(2.6,1), axis=0, channels=(-1,-1), - xticks=True, yticks=[True,True], xlabel="wavelength (nm)", ylabels=["reflectance","absorbance"], - xrange=[400,650], vranges=[(0,1),(0,1)], colors=['coral','royalblue'], - linewidth=1, reference_lines=None): - #setup plot frame - fig, ax1 = plt.subplots(figsize=figsize) - ax2 = ax1.twinx() - - #convert axis/channel indices to natural names - axis1, = parse_args(d1, axis) - axis2, = parse_args(d2, axis) - channel1, = parse_args(d1, channels[0], dtype='Channel') - channel2, = parse_args(d2, channels[1], dtype='Channel') - - #plot data - ax1.plot(d1[axis1][:],d1[channel1][:], linewidth=linewidth, color=colors[0]) - ax2.plot(d2[axis1][:],d2[channel2][:], linewidth=linewidth, color=colors[1]) - - if reference_lines is not None: - if type(reference_lines) is not list: - reference_lines = [reference_lines] - for line in reference_lines: - ax1.axvline(x=line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) - - #adjust plot frame - if xrange is None: - xrange = __get_range(*[d1,d2], reference_key=axis, dtype='Axis') - ax1.set_xlim(*xrange) - ax1.set_xlabel(xlabel) - if not xticks: - ax1.set_xticks([]) - - for i, v in enumerate(vranges): - if v is None: - if i==0: - vranges[i] = __get_range(d1, reference_key=channel1, offset=0) - if i==1: - vranges[i] = __get_range(d2, reference_key=channel2, offset=0) - ax1.set_ylim(*vranges[0]) - ax2.set_ylim(*vranges[1]) - ax1.set_ylabel(ylabels[0]) - ax2.set_ylabel(ylabels[1]) - if not yticks[0]: - ax1.set_yticks([]) - if not yticks[1]: - ax2.set_yticks([]) - -def __parse_colors(data, colors): - if type(colors) is list: - if len(colors) < len(data): - q, r = divmod(len(data), len(colors)) - colors = q*colors+colors[:r] - else: - try: - colors = colors(np.linspace(0,1,len(data))) - except: - colors = [colors for i in range(len(data))] - return colors - -def __get_range(*data, reference_key=0, dtype='Channel', window='default', offset=0): - ranges = [] - signed=False - default_windows = { - 'Axis' : 1, - 'Channel' : 1.1 - } - if window=='default': - window = default_windows[dtype] - - for d in data: - key, = parse_args(d, reference_key, dtype=dtype) - ranges.append([np.min(d[key][:]), np.max(d[key][:])]) - if dtype=='Channel': - if d[key].signed: - signed=True - - ranges_min, ranges_max = min([r[0] for r in ranges]), max([r[1] for r in ranges]) - if offset != 0: - ranges_max = sum([r[0] for r in ranges]) + offset*(len(data)-1) + [r[1] for r in ranges][-1] - - rng = [(ranges_min+(ranges_max-ranges_min)/2)-(window*(ranges_max-ranges_min)/2), (ranges_min+(ranges_max-ranges_min)/2)+(window*(ranges_max-ranges_min)/2)] - if signed and ranges_min*ranges_max < 0 and not offset: #make window symmetric about zero if min and max have opposite sign - return [-window*max(rng),window*max(rng)] - else: - return rng - -def __contrast(d, ch, contrast=[99,1]): - return [np.percentile(d[ch][:],min(contrast)),np.percentile(d[ch][:],max(contrast))] \ No newline at end of file diff --git a/makeitwright/process/beckerhickl.py b/makeitwright/beckerhickl.py similarity index 96% rename from makeitwright/process/beckerhickl.py rename to makeitwright/beckerhickl.py index 231e2e4..ff05378 100644 --- a/makeitwright/process/beckerhickl.py +++ b/makeitwright/beckerhickl.py @@ -7,15 +7,9 @@ from scipy.optimize import curve_fit from scipy.stats import pearsonr -from . import helpers -import makeitwright.spectra as spectra -import makeitwright.styles as styles - - -get_axes = helpers.get_axes -get_channels = helpers.get_channels -set_label = helpers.set_label -roi = helpers.roi +from .lib.helpers import get_axes, get_channels, set_label, roi +import makeitwright.lib.spectra as spectra +import makeitwright.lib.styles as styles def fromSP130(fpath, name=None): diff --git a/makeitwright/horiba.py b/makeitwright/horiba.py new file mode 100644 index 0000000..d890ee5 --- /dev/null +++ b/makeitwright/horiba.py @@ -0,0 +1,62 @@ +import numpy as np +import WrightTools as wt +import makeitwright.lib.styles as styles + +from .lib import spectralprofile, hyperspectral + +def central_wavelength(data): + pass + +def plot_image(data, channel, **kwargs): + params = {} + try: + unit = data['wl'].units + except KeyError: + unit = data.constants[0].units + if unit == 'wn': + params.update(styles.image_horiba_Raman) + else: + params.update(styles.image_horiba_PL) + params.update(**kwargs) + + if len(data.axes) == 3: + hyperspectral.plot_image(data, channel, **params) + else: + spectralprofile.plot_image(data, channel, **params) + +def plot_profile(data, channel, profile_axis='y', **kwargs): + params = {} + try: + unit = data['wl'].units + except KeyError: + unit = data.constants[0].units + + if data.axes[1].natural_name == 't': + params.update(styles.profile_horiba_timed_series) + elif unit == 'wn': + params.update(styles.profile_horiba_Raman) + else: + params.update(styles.profile_horiba_PL) + params.update(**kwargs) + + if len(data.axes) == 3: + hyperspectral.plot_profile(data, profile_axis, channel, **params) + else: + spectralprofile.plot_profile(data, channel, **params) + +def plot_decomposition(data, channel, **kwargs): + params = {} + try: + unit = data['wl'].units + except KeyError: + unit = data.constants[0].units + if unit == 'wn': + params.update(styles.decomposition_horiba_Raman) + else: + params.update(styles.decomposition_horiba_PL) + params.update(**kwargs) + + if len(data.axes) == 3: + hyperspectral.plot_decomposition(data, 0, 1, 2, channel, **params) + else: + spectralprofile.plot_decomposition(data, 0, 1, channel, **params) diff --git a/makeitwright/process/iontof.py b/makeitwright/iontof.py similarity index 50% rename from makeitwright/process/iontof.py rename to makeitwright/iontof.py index e997070..a678b45 100644 --- a/makeitwright/process/iontof.py +++ b/makeitwright/iontof.py @@ -1,9 +1,7 @@ import numpy as np import cmocean -import pySPM -import WrightTools as wt -from . import hyperspectral, styles -from . import helpers +from .lib import hyperspectral, styles, helpers + def relative_proportion(data, channel0, channel1): """ @@ -75,61 +73,3 @@ def plot_depth_trace(data, channel, **kwargs): hyperspectral.plot_decomposition(data, 'x', 'y', 'scan', channel, **kwargs) -def ITApeaks(fpath): - ita=pySPM.ITA(fpath) - summ = ita.get_summary() - return summ['peaks'] - -def fromITA(fpath, name=None, select_channels=None): - - ita = pySPM.ITA(fpath) - ita.show_summary() - summ = ita.get_summary() - - xarr = np.linspace(0, summ['fov']*1e6, num=summ['pixels']['x']) - yarr = np.linspace(0, summ['fov']*1e6, num=summ['pixels']['y']) - scarr = np.linspace(1, int(summ['Scans']), num=int(summ['Scans'])) - charrs = {} - if select_channels is not None: - idxs = [] - for peak in summ['peaks']: - if peak['id'] in select_channels or peak['assign'] in select_channels: - idxs = idxs + [peak['id']] - for idx in idxs: - if summ['peaks'][idx]['assign']: - chname = summ['peaks'][idx]['assign'] - elif summ['peaks'][idx]['desc']: - chname = summ['peaks'][idx]['desc'] - else: - chname = str(int(summ['peaks'][idx]['cmass'])) + 'mz' - charr = ita.getImage(idx,0) - for i in range(1,len(scarr)): - j = ita.getImage(idx,i) - charr = np.dstack((charr,j)) - charrs[chname] = charr - print("channel <" + chname + "> found") - else: - for peak in summ['peaks']: - if peak['assign']: - chname = peak['assign'] - elif peak['desc']: - chname = peak['desc'] - else: - chname = str(int(peak['cmass'])) + 'mz' - idx = peak['id'] - charr = ita.getImage(idx,0) - for i in range(1,len(scarr)): - j = ita.getImage(idx,i) - charr = np.dstack((charr,j)) - charrs[chname] = charr - print("channel <" + chname + "> found") - - d = wt.Data() - d.create_variable(name='x', values=xarr[:,None,None], units='um') - d.create_variable(name='y', values=yarr[None,:,None], units='um') - d.create_variable(name='scan', values=scarr[None,None,:], units='s') - for chname, charr in charrs.items(): - d.create_channel(name=chname, values=charr) - d.transform('x','y','scan') - - return d \ No newline at end of file diff --git a/makeitwright/parsers.py b/makeitwright/parsers.py deleted file mode 100644 index 5ffaf6a..0000000 --- a/makeitwright/parsers.py +++ /dev/null @@ -1,153 +0,0 @@ -from psutil import virtual_memory -from os import listdir -from os.path import isfile, isdir, getsize - -from .process import afm, andor, beckerhickl, horiba, xrd -try: # iontof is optional - from .process import iontof -except ImportError: - pass -import WrightTools as wt -import numpy as np - - -def typeID(*fpaths): - """ - Infer what kind of data the file contains. - The kind will inform on how to correctly import the data. - """ - types = {} - for fpath in fpaths: - if '.ita' in fpath: - types[fpath] = 'iontof_SIMS' - print(f"file {fpath} is IonToF SIMS data") - - if '.txt' in fpath: - with open(fpath) as f: - txt = f.read() - if "LabRAM HR" in txt: - if horiba.typeID(fpath) is not None: - types[fpath] = horiba.typeID(fpath) - if "Goniometer" in txt: - types[fpath] = 'Bruker_XRD' - if "[m]" in txt: - types[fpath] = 'Gwyddion_traces' - - if '.asc' in fpath: - with open(fpath) as f: - txt = f.read() - if "*BLOCK" in txt: - types[fpath] = 'TRPL' - else: - types[fpath] = 'ASCII' - - if '.wt5' in fpath: - types[fpath] = 'wt5' - - print(f"{len(types)} of {len(fpaths)} files identified as valid data types") - return types - - -def listfiles(fdir, flist=[]): - if len(flist) < 1000: - dirlist = [f'{fdir}/{d}' for d in listdir(fdir) if isdir(f'{fdir}/{d}')] - fpaths = flist+[f'{fdir}/{f}' for f in listdir(fdir) if isfile(f'{fdir}/{f}')] - - if dirlist: - for d in dirlist: - fpaths = listfiles(d, flist=fpaths) - - return fpaths - else: - print("Too many files in directory. Process terminated to prevent overflow.") - - -def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]): - files = listfiles(fdir) - - include = [1 for i in range(len(files))] - if keywords: - if type(keywords) is not list: - keywords = [keywords] - for kw in keywords: - for i, f in enumerate(files): - if kw not in f: - include[i]=0 - if exclude: - if type(exclude) is not list: - exclude = [exclude] - for x in exclude: - for i, f in enumerate(files): - if x in f: - include[i]=0 - - files = [file for i, file in enumerate(files) if include[i]] - print(f'found {sum(include)} files matching keyword specifications') - - ftypes = typeID(*files) - if select_types: - to_delete=[] - num_removed=0 - for key, value in ftypes.items(): - if value not in select_types: - to_delete.append(key) - num_removed+=1 - if to_delete: - for key in to_delete: - del(ftypes[key]) - print(f'excluded {num_removed} files that did not match specified data type(s)') - - if 'ASCII' in ftypes.values(): - if not objective: - objective = input(f'Enter objective lens magnification if all data in this directory used the same lens. Otherwise, press enter: ') - if not objective: - objective = 'prompt' - - #make sure call doesn't generate too much data, roughly 1 GB - too_much_data = False - if len([dtype for dtype in ftypes.values() if dtype=='iontof_SIMS']) > 1: - too_much_data = True - if len([dtype for dtype in ftypes.values() if dtype=='ASCII']) > 100: - too_much_data = True - if len(ftypes) > 200: - too_much_data = True - if sum([getsize(f) for f in files]) > virtual_memory().available: - too_much_data = True - - if not too_much_data: - d = [] - for fpath, dtype in ftypes.items(): - basename = fpath.split('/')[-1].split('.')[0] - - if dtype.startswith('LabramHR'): - d.append(horiba.fromLabramHR(fpath, name=basename)) - - elif dtype=='Bruker_XRD': - l0 = len(d) - d = d + xrd.fromBruker(fpath) - - elif dtype=='Gwyddion_traces': - d.append(afm.fromGwyddion_traces(fpath, name=None, ID_steps=True)) - - elif dtype=='iontof_SIMS': - d.append((fpath, iontof.ITApeaks(fpath))) - - elif dtype=='TRPL': - l0 = len(d) - d.append(beckerhickl.fromSP130(fpath, name=basename)) - print(basename) - - elif dtype=='ASCII': - try: - d.append(andor.fromAndorNeo(fpath, name=basename, objective_lens=objective)) - except: - print(f'attempted to extract ASCII data from path <{fpath}> but it was not recognized by the andor module') - print(basename) - - elif dtype=='wt5': - d.append(wt.open(fpath)) - if len(d)==1: - d=d[0] - return d - else: - print("too much data in directory, parsing cancelled to prevent storage overflow") \ No newline at end of file diff --git a/makeitwright/process/__init__.py b/makeitwright/process/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/makeitwright/process/afm.py b/makeitwright/process/afm.py deleted file mode 100644 index 7a8061b..0000000 --- a/makeitwright/process/afm.py +++ /dev/null @@ -1,119 +0,0 @@ -import numpy as np -import WrightTools as wt - - -def fromPicoView(filepath, name=None, convert_units=True, flatten_order=0): - """ - under development - """ - raise NotImplementedError - - -def fromGwyddion_traces(filepath, name=None, convert_units=True, ID_steps=False, flatten=False): - """ - Generate individual Data objects for a series of traces as exported from Gwyddion workup. - - Arguments - --------- - filepath - str - The path to where the data is located. - - Keyword Arguments - ----------------- - name - str - - The base name for the data - convert_units - bool - - When True, converts the units of x and y into what is anticipated for typical AFM topography (um, nm) - ID_steps - bool - - When True, identifies the most significant topography change in the trace as a "step" and sets that position as 0 in the x array - flatten - bool - - When True, subtracts the median slope from the y trace - - Returns - ------- - data - WrightTools Data object or list of WrightTools Data objects - the data generated from the file's arrays - """ - if name is None: - basename = filepath.split('/')[-1].split('.')[0] - else: - basename = name - - header = 0 - delimiter = None - dims = None - units = None - with open(filepath) as f: - txt = f.readlines() - for i, line in enumerate(txt): - if not '.' in line: #assumes each row of data has a decimal somewhere - header+=1 - if 'x' in line: - spl = line.split() - dims = [(spl[i], spl[i+1]) for i in range(0,len(spl),2)] - if '[' in line: - units = [l.strip('[]') for l in line.split()] - units = [(units[i], units[i+1]) for i in range(0, len(units), 2)] - if ',' in line and i>2: - delimiter = ',' - if i>10: - break - arr = np.genfromtxt(filepath, skip_header=header, delimiter=delimiter) - if arr.shape[1]>2: - profiles = np.split(arr, arr.shape[1]/2, axis=1) - else: - profiles = [arr] - profiles = [p[~np.isnan(p).any(axis=1)] for p in profiles] - - if dims is None: - dims = [('x','y') for i in range(len(profiles))] - if units is None: - units = [None for i in range(len(profiles))] - #return profiles, dims, units - - data = [] - for i, (profile, dim, unit) in enumerate(zip(profiles, dims, units)): - x, y = profile[:,0], profile[:,1] - - if convert_units: - if unit is None: - x = wt.units.convert(x,'m','um') - xunit, yunit = 'm', 'm' - print(f'no units for x or y identified - assumed each to be meters') - else: - if wt.units.is_valid_conversion(unit[0], 'um'): - x = wt.units.convert(x, unit[0], 'um') - xunit = 'um' - else: - print(f'unrecognized unit {unit[0]} for x dimension of profile {i} - conversion did not proceed') - xunit = unit[0] - if wt.units.is_valid_conversion(unit[1], 'nm'): - y = wt.units.convert(y, unit[1], 'nm') - yunit = 'nm' - else: - print(f'unrecognized unit {unit[1]} for x dimension of profile {i} - conversion did not proceed') - yunit = unit[1] - - if ID_steps: - steppos = np.argmax(np.abs(np.gradient(y))) - x = x-x[steppos] - xlabel = f'distance from edge ({xunit})' - else: - xlabel = f'distance ({xunit})' - - if flatten: - slope = np.median(np.gradient(y))/np.median(np.gradient(x)) - bkg = slope*x+y[0] - y = y-bkg - - d = wt.Data(name=f'{basename}_profile{i}') - d.create_variable(dim[0], values=x, units=xunit) - d.create_channel(dim[1], values=y, units=yunit) - d.create_channel(f'{dim[1]}_rel', values=y-np.min(y), units=yunit) - d[dim[0]].attrs['label'] = xlabel - d[dim[1]].attrs['label'] = f'topography ({yunit})' - d[f'{dim[1]}_rel'].attrs['label'] = f'relative height ({yunit})' - d.transform(dim[0]) - data.append(d) - - if len(data) == 1: - data = data[0] - return data \ No newline at end of file diff --git a/makeitwright/process/helpers.py b/makeitwright/process/helpers.py deleted file mode 100644 index c2e5d26..0000000 --- a/makeitwright/process/helpers.py +++ /dev/null @@ -1,592 +0,0 @@ -import numpy as np -from scipy.signal import find_peaks_cwt -import matplotlib as mpl -import matplotlib.pyplot as plt -import WrightTools as wt - -def parse_args(data, *args, dtype='Axis', return_name=True): - argout = list(args) - - if dtype == 'Axis': - for i, arg in enumerate(args): - if return_name: - if isinstance(arg, int): - argout[i] = data.axes[arg].natural_name - else: - if isinstance(arg, str): - argout[i] = [i for i, axis in enumerate(data.axes) if axis.natural_name==arg][0] - - if dtype == 'Channel': - for i, arg in enumerate(args): - if return_name: - if isinstance(arg, int): - argout[i] = data.channels[arg].natural_name - else: - if isinstance(arg, str): - argout[i] = [i for i, channel in enumerate(data.channels) if channel.natural_name==arg][0] - - if len(argout) == 1: - return (argout[0],) - else: - return tuple(argout) - -def parse_kwargs(params, **kwargs): - for key, value in kwargs.items(): - params[key] = value - return params - -def get_axes(data, *keys, asindex=False): - idx = list(keys) - axdict = {ax.natural_name:i for i, ax in enumerate(data.axes)} - for i, key in enumerate(keys): - if type(key) is not int: - try: - idx[i] = axdict[key] - except KeyError: - print(f'axis {key} not found') - idx[i] = None - idx = [i for i in idx if i is not None] - if asindex: - return tuple(idx) - else: - return tuple([data.axes[i].natural_name for i in idx]) - -def get_channels(data, *keys, asindex=False): - idx = list(keys) - chdict = {ch.natural_name:i for i, ch in enumerate(data.channels)} - for i, key in enumerate(keys): - if type(key) is not int: - try: - idx[i] = chdict[key] - except KeyError: - print(f'axis {key} not found') - idx[i] = None - idx = [i for i in idx if i is not None] - if asindex: - return tuple(idx) - else: - return tuple([data.channels[i].natural_name for i in idx]) - -def set_label(data, key, name): - if type(key) is not str: - raise TypeError(f'key must be string, function received {type(key)}') - - if type(data) is not list: - data = [data] - - for d in data: - try: - d[key].attrs['label'] = name - except KeyError: - print(f'no object with key {key} in data {d.natural_name}') - -def find_nearest(arr, val, return_index=True): - idx = (np.abs(arr-val)).argmin() - if return_index: - return idx - else: - return arr[idx] - -def find_peaks(*data, channel=-1, axis=0, peak_width="medium", noisy=False, **kwargs): - peaks = {} - cwtargs = {} - if noisy: - cwtargs["min_snr"] = 2 - cwtargs.update(kwargs) - - for i, d in enumerate(data): - channel, = parse_args(d, channel, dtype='Channel') - axis, = parse_args(d, axis) - dname = str(i) + "_" + d.natural_name - peaks[dname] = {} - - axratio = d[axis].size/(np.max(d[axis].points) - np.min(d[axis].points)) - peak_width_vals = { - "narrow" : d[axis].size/1000, - "medium" : d[axis].size/100, - "broad" : d[axis].size/10 - } - if type(peak_width) is int or type(peak_width) is float: - width = axratio*peak_width - else: - try: - width = peak_width_vals[peak_width] - except KeyError: - print('Peak width argument not recognized. Select between narrow, medium, or broad.') - width = peak_width_vals["medium"] - - if len(d[channel].shape)>1: - out = d.chop(axis) - out = [spect for spect in out.values()] - for i, spect in enumerate(out): - peaks[dname][str(i)] = {} - peaks[dname][str(i)]["coords"] = [(c.natural_name, c.value) for c in spect.constants] - if np.sum(spect[channel][:]) != 0: - p = find_peaks_cwt(spect[channel].points, width, **cwtargs) - if p.size==0: - peaks[dname][str(i)]["peaks"] = None - else: - peaks[dname][str(i)]["peaks"] = np.asarray([spect[axis][idx] for idx in p]) - else: - if np.sum(d[channel][:]) != 0: - p = find_peaks_cwt(d[channel].points, width, **cwtargs) - if p.size==0: - peaks[dname] = None - else: - peaks[dname] = np.asarray([d[axis][idx] for idx in p]) - - return peaks - -def norm(arr, tmin, tmax): - diff = tmax-tmin - arr_range = np.max(arr)-np.min(arr) - norm_arr = np.nan_to_num((((arr-np.min(arr))*diff)/arr_range) + tmin) - return norm_arr - -def split_n(arr, *axes): - """ - Split an array sequentially along multiple axes. Multi-axis calls nested lists of arrays. Calling a single axis is equivalent to the numpy.split() method. - - Parameters - ---------- - arr : numpy array - The array to be split. - *axes : interable of ints - The axes along which the array will be split. - - Returns - ------- - arr : lists of numpy arrays - The split sub-arrays. - """ - axes = list(axes) - while axes: - if type(arr) is list: - spl_arr = [] - for a in arr: - spl_arr.append(split_n(a, *axes)) - arr = spl_arr - else: - arr = np.split(arr, arr.shape[axes[0]], axis=axes[0]) - del(axes[0]) - return arr - -def norm_split(split_arr, bounds): #TODO generalize to arbitrary operation - """ - Independently normalize all sub-arrays in a sequentially split numpy array. - - Parameters - ---------- - split_arr : lists of numpy arrays - The split array in the form generated by split_n. - bounds : 2-element iterable of ints - The lower and upper bounds of the normalized array, in order. - Returns - ------- - split_arr : lists of numpy arrays - The normalized arrays in the same format as-called. - """ - if type(split_arr) is list: - l = [] - for a in split_arr: - l.append(norm_split(a, bounds)) - split_arr = l - else: - split_arr = norm(split_arr, bounds[0], bounds[1]) - return split_arr - -def inverse_split_n(split_arr, *split_axes): - """ - Reconstruct a split array into its original form, provided the list of axes that was used to split the array via split_n. - - Parameters - ---------- - split_arr : lists of numpy arrays - The split array in the form generated by split_n. - *split_axes : int - The axes arguments called in split_n to produce split_arr, in the same order. - - Returns - ------- - split_arr : numpy array - A single array matching the original unsplit dimensionality. - """ - split_axes = list(split_axes) - while split_axes: - if type(split_arr[0]) is list: - arr = [] - for l in split_arr: - arr.append(inverse_split_n(l, split_axes[-1])) - split_arr = arr - del(split_axes[-1]) - else: - split_arr = np.concatenate(split_arr, axis=split_axes[-1]) - del(split_axes[-1]) - return split_arr - -def func_split(split_arr, func='norm', **kwargs): #TODO make func keyword able to call arbitrary external array functions - """ - Independently perform a function on all sub-arrays in a sequentially split numpy array. - - Parameters - ---------- - split_arr : lists of numpy arrays - The split array in the form generated by split_n. - bounds : 2-element iterable of ints - The lower and upper bounds of the normalized array, in order. - Returns - ------- - split_arr : lists of numpy arrays - The normalized arrays in the same format as-called. - """ - - params = { - 'norm':{'bounds':[0,1]}, - 'bkg_remove':{'negative':False, 'threshold':0.5, 'top_range':100}, - 'spike_filter':{'width':4} - } - params[func] = kwargs - - if type(split_arr) is list: - l = [] - for a in split_arr: - l.append(norm_split(a, func=func, **params[func])) - split_arr = l - else: - if func=='norm': - split_arr = norm(split_arr, params[func]['bounds'][0], params[func]['bounds'][1]) - if func=='bkg_remove': - pass - return split_arr - -def normalize_by_axis(data, channel, *axes, bounds=(0,1)): - """ - Normalize a channel of a data object along explicitly defined axes. - EXAMPLE: For a 3-dimensional data set with axes (x, y, z): - Normalizing by z produces independently normalized z-profiles for all (x, y). - Normalizing by (x, y) produces independently normalized xy planes for every z-slice. - Noramlizing by (x, y, z) normalized the channel as a whole. - - Parameters - ---------- - data : Data object of WrightTools data module. - The data containing the channel to be normalized. - channel : string or int - The key or index of the channel to be normalized. - *axes : iterable of strings and/or ints - The keys or indices of the axes along which to normalize the channel. - bounds : iterable of numbers, optional - The lower and upper bounds for normalization, in order. - - Returns - ------- - None. - Adds a normalized channel to the Data instance. - """ - axes = parse_args(data, *axes, return_name=False) - dims = [i for i, axis in enumerate(data.axes) if i not in axes] - channel, = parse_args(data, channel, dtype='Channel') - - ch_arr = data[channel][:] - ch_spl = split_n(ch_arr, *dims) - ch_spl_norm = norm_split(ch_spl, bounds) - ch_norm = inverse_split_n(ch_spl_norm, *dims) - ch_name = "norm_" - for ax in [axis.natural_name for i, axis in enumerate(data.axes) if i not in dims]: - ch_name = ch_name + data[ax].natural_name - - data.create_channel(ch_name, values=ch_norm) - -def background_mask(data, channel, *axes, negative=True): - axes = parse_args(data, *axes, return_name=False) - dims = [i for i, axis in enumerate(data.axes) if i not in axes] - channel, = parse_args(data, channel, dtype='Channel') - - ch_arr = data[channel][:] - ch_spl = split_n(ch_arr, *dims) - pass - -def get_range(*data, reference_key=0, dtype='Channel', window='default', offset=0): - ranges = [] - signed=False - default_windows = { - 'Axis' : 1, - 'Channel' : 1.1 - } - if window=='default': - window = default_windows[dtype] - - for d in data: - key, = parse_args(d, reference_key, dtype=dtype) - ranges.append([np.min(d[key][:]), np.max(d[key][:])]) - if dtype=='Channel': - if d[key].signed: - signed=True - - ranges_min, ranges_max = min([r[0] for r in ranges]), max([r[1] for r in ranges]) - if offset != 0: - ranges_max = sum([r[0] for r in ranges]) + offset*(len(data)-1) + [r[1] for r in ranges][-1] - - rng = [(ranges_min+(ranges_max-ranges_min)/2)-(window*(ranges_max-ranges_min)/2), (ranges_min+(ranges_max-ranges_min)/2)+(window*(ranges_max-ranges_min)/2)] - if signed and ranges_min*ranges_max < 0 and not offset: #make window symmetric about zero if min and max have opposite sign - return [-window*max(rng),window*max(rng)] - else: - return rng - -def imshowarr(a, rotate=False, cmap=None, ticks=None, vrange=None): - fig, ax = plt.subplots(figsize=(6.5, 6.5)) - if cmap is None: - cmap = mpl.cm.viridis - if vrange is None: - v = [np.min(a), np.max(a)] - else: - v = vrange - ax.imshow(a, vmin=v[0], vmax=v[1], cmap=cmap) - if ticks is None: - ax.set_xticks([]) - ax.set_yticks([]) - ax.set_aspect('equal') - -def __at(arr, val): - return (np.abs(arr-val)).argmin() - -def roi(data, ROI, return_arrs=False, verbose=False): - """ - Extract a region of interest (ROI) from data objects using a variety of operations, - without generating useless secondary data or collapsed variables. - - The extracted ROI's are of equivalent type as the input, unless return_arrs is set to True. - The returned data are new instances distinct from the input, with their own unique .wt5 files. - Further modification of the output data will not affect the input data. - - Parameters - ---------- - data : WrightTools Data, WrightTools Collection, or list of WrightTools Data - The data from which the ROI will be extracted. - Axes of all input data must have axes with indices or names matched to the ROI, but they need not have identical shapes. - - ROI : dictionary - The region of interest to extract from all input data. ROI's will be interpreted in the following ways: - String keys will be interpreted as axis names. - Integer keys will be interpreted as axis indices. - - Numerical values will extract the single point along that axis which is closest to the value, collapsing the axis. - List values will extract the points along that axis which fall within the range of two numbers (if the list contains 2 numbers), - or that lie beyond the number (if the list contains 1 number) - Certain string values will collapse the points along that axis via a specified method. Valid methods include: - 'average', which yields arrays averaged along that axis - 'median', which extracts the median along that axis - 'sum', which yields arrays summed along that axis - - Tuple values will be interpreted as an ordered sequence of the operations described above. - - return_arrs : boolean, optional, default=False - Specify the return format of the data. - If False, the returned data will match the format of the input data. - If True, the returned data will be formatted as a list of dictionaries - containing the variable and channel names as keys and their corresponding arrays as values. - - verbose : boolean, optional, default=False - Toggle talkback. - - Returns - ------- - out: Equivalent type to input data or list of dictionaries - New Data instance(s) containing only the regions of interest from the original(s), with collapsed variables removed. - - Examples - -------- - Using a 100mm x 100mm photograph as an example, stored as a Data instance my_data with axes ('x','y'), each in units mm. - - Calling the function "my_data_ROI = roi(my_data, ROI)" extracts the following data from my_data depending on the format of ROI: - if ROI = {'y':'sum'}: - Returns a 1D x-profile of the image. The channels will be the signals summed along the entire y-axis. - - if ROI = {'x':([20,80],'sum')}: - Returns a 1D y-profile of the image. The channels will be the summed signals along the x-axis from x=20mm - x=80mm. - The ordering of the tuple affects the outcome. As a counterexample, if my_ROI = {'x':('sum',[20,80])}: - The 1D y-profile will summed along the entire x-axis. The function will ignore the second operation because the variable was already collapsed by the first. - - if ROI = {'x':[20,80], 'y':[50]}: - Returns a cropped (60mm x 50mm) image containing points from x=20mm - x=80mm and y=50mm - y=100mm. - - if ROI = {'x':[20,80], 1:50}: - Returns a cropped (60 mm) x-profile of the pixels at the row where y (axis 1) = 50mm, containing points from x=20mm - x=80mm. - - See Also - -------- - WrightTools.Data.collapse - WrightTools.Data.moment - WrightTools.Data.split - """ - def __copy_attrs(data, new_data, object_key): - #do not overwrite default HDF5 parameters that come with new instances - nocopy = {key for key in new_data[object_key].attrs.keys()} - for key, value in data[object_key].attrs.items(): - if key not in nocopy: - new_data[object_key].attrs[key] = value - - operations = { - 'sum' : np.sum, - 'product' : np.prod, - 'average' : np.average, - 'std' : np.std, - 'var' : np.var, - 'median' : np.median, - 'min' : np.min, - 'max' : np.max - } - - if type(data) is not list and type(data) is not wt.Data and type(data) is not wt.Collection: - raise TypeError(f'Unsupported data type {type(data)} was passed to the function. Supported data types include WrightTools Data objects, lists of WrightTools Data objects, or WrightTools Collection objects.') - - if type(data) is wt.Collection and not return_arrs: - out = wt.Collection(name=data.natural_name) - data = [data[d] for d in data] - else: - out = [] - if type(data) is not list: - data = [data] - - for d in data: - variables = dict([(var.natural_name, d[var.natural_name][:]) for var in d.variables]) - axes = [ax.natural_name for ax in d.axes] - channels = dict([(ch.natural_name, d[ch.natural_name][:]) for ch in d.channels]) - - for key, value in ROI.items(): - axis = key - - if key not in axes: - if type(key) is int and key in range(len(d.axes)): #try indexing using the key provided if it isn't a valid axis name - axis = d.axes[key].natural_name - else: - axis = None - print(f'axis {key} not found') - - if axis is not None: - collapsed=False - axarr = variables[axis] - axidx = [i for i, dimlength in enumerate(axarr.shape) if dimlength>1] - #if there is no dimension greater than 1 in the axis array, consider it a collapsed variable - if not axidx: - collapsed=True - else: - axidx = axidx[0] - #interpret a single operation or a sequence of operations on the variable - if type(value) is tuple: - ops = [op for op in value] - else: - ops = [value] - #extract the ROI - for op in ops: - if not collapsed: - if type(op) is str and op in operations.keys(): - for ch, charr in channels.items(): - if charr.shape[axidx]==axarr.shape[axidx] and charr.ndim==axarr.ndim: - channels[ch] = operations[op](charr, axis=axidx) - collapsed=True - - if type(op) is int or type(op) is float: - ax0 = __at(axarr, op) - for ch, charr in channels.items(): - if charr.shape[axidx]==axarr.shape[axidx] and charr.ndim==axarr.ndim: - extracted = np.split(charr, charr.shape[axidx], axis=axidx)[ax0] - channels[ch] = np.squeeze(extracted, axis=axidx) - collapsed=True - if verbose: - print(f'ROI extracted at {axis} = {op} for data {d.natural_name}') - - if type(op) is list: - if len(op) not in [1, 2]: - print(f'specified bounds for split along axis {key} contained {len(op)} elements, but only 1 or 2 elements were expected') - bounds = sorted([__at(axarr, bound) for bound in op]) - if np.split(axarr, bounds, axis=axidx)[1].shape[axidx]==1: - collapsed=True - - for ch, charr in channels.items(): - if charr.ndim==axarr.ndim and charr.shape[axidx]==axarr.shape[axidx]: - channels[ch] = np.split(charr, bounds, axis=axidx)[1] - for var, varr in variables.items(): - if varr.ndim==axarr.ndim and varr.shape[axidx]==axarr.shape[axidx]: - variables[var] = np.split(varr, bounds, axis=axidx)[1] - if verbose: - print(f'extracted range {op[0]} to {op[-1]} along {axis} for data {d.natural_name}') - - axarr = variables[axis] - - else: - print(f'cannot interpret operation {op} for axis {axis} of data {d.natural_name} because the variable was already collapsed') - - if collapsed: - variables.pop(axis) - axes.remove(axis) - for var, varr in variables.items(): - if varr.ndim==axarr.ndim and varr.shape[axidx]==1: - variables[var] = np.squeeze(varr, axis=axidx) - for ch, charr in channels.items(): - if charr.ndim==axarr.ndim and charr.shape[axidx]==1: - channels[charr] = np.squeeze(charr, axis=axidx) - if verbose: - print(f'axis {axis} collapsed via operation {op} for data {d.natural_name}') - - if return_arrs: #return a dictionary of arrays deconstructed from the original data object - arrs = {} - for var, varr in variables.items(): - if var in axes: - arrs[var] = varr - for ch, charr in channels.items(): - arrs[ch] = charr - out.append(arrs) - - else: #construct new Data objects - if type(out) is wt.Collection: - out.create_data(name=d.natural_name) - d_out = out[-1] - else: - d_out = wt.Data(name=d.natural_name) - - keysHDF5 = {key for key in d_out.attrs.keys()} - - for var, varr in variables.items(): - if d[var].units is not None: - d_out.create_variable(var, values=varr, unit=d[var].units) - else: - d_out.create_variable(var, values=varr, units=None) - __copy_attrs(d, d_out, var) - - for ch, charr in channels.items(): - if d[ch].units is not None: - d_out.create_channel(ch, values=charr, units=d[ch].units) - else: - d_out.create_channel(ch, values=charr, units=None) - if d[ch].signed: - d_out[ch].signed=True - __copy_attrs(d, d_out, ch) - - for key, value in d.attrs.items(): - if key not in keysHDF5: - d_out.attrs[key] = value - d_out.transform(*axes) - if type(out) is not wt.Collection: - out.append(d_out) - - if len(out)==1: - out=out[0] - - return out - -def show(data): - if type(data) is not list: - data = [data] - return [f'{i} - name: {d.natural_name}, axes:{[ax.natural_name for ax in d.axes]}' for i, d in enumerate(data) if type(d) is wt.Data] - -def contrast(d, ch, contrast=[99,1]): - return [np.percentile(d[ch][:],min(contrast)),np.percentile(d[ch][:],max(contrast))] - -def vrange(arr, signed, window=1.1, manual_range=None): - #determine range to be plotted - vmin, vmax = np.min(arr), np.max(arr) - if signed and vmin<0: - return [-window*max([abs(vmin), abs(vmax)]), window*max([abs(vmin), abs(vmax)])] - else: - return [(vmin+(vmax-vmin)/2)-(window*(vmax-vmin)/2), (vmin+(vmax-vmin)/2)+(window*(vmax-vmin)/2)] \ No newline at end of file diff --git a/makeitwright/process/horiba.py b/makeitwright/process/horiba.py deleted file mode 100644 index 2f01b9d..0000000 --- a/makeitwright/process/horiba.py +++ /dev/null @@ -1,256 +0,0 @@ -import numpy as np -import WrightTools as wt -import makeitwright.styles as styles - -from . import spectralprofile -from . import hyperspectral -from . import helpers - -def central_wavelength(data): - pass - -def plot_image(data, channel, **kwargs): - params = {} - try: - unit = data['wl'].units - except KeyError: - unit = data.constants[0].units - if unit == 'wn': - params.update(styles.image_horiba_Raman) - else: - params.update(styles.image_horiba_PL) - params.update(**kwargs) - - if len(data.axes) == 3: - hyperspectral.plot_image(data, channel, **params) - else: - spectralprofile.plot_image(data, channel, **params) - -def plot_profile(data, channel, profile_axis='y', **kwargs): - params = {} - try: - unit = data['wl'].units - except KeyError: - unit = data.constants[0].units - - if data.axes[1].natural_name == 't': - params.update(styles.profile_horiba_timed_series) - elif unit == 'wn': - params.update(styles.profile_horiba_Raman) - else: - params.update(styles.profile_horiba_PL) - params.update(**kwargs) - - if len(data.axes) == 3: - hyperspectral.plot_profile(data, profile_axis, channel, **params) - else: - spectralprofile.plot_profile(data, channel, **params) - -def plot_decomposition(data, channel, **kwargs): - params = {} - try: - unit = data['wl'].units - except KeyError: - unit = data.constants[0].units - if unit == 'wn': - params.update(styles.decomposition_horiba_Raman) - else: - params.update(styles.decomposition_horiba_PL) - params.update(**kwargs) - - if len(data.axes) == 3: - hyperspectral.plot_decomposition(data, 0, 1, 2, channel, **params) - else: - spectralprofile.plot_decomposition(data, 0, 1, channel, **params) - -def fromAramis(filepath): - print("not ready yet, get to work :)") - - -def horiba_typeID(filepath): - with open(filepath) as f: - txt = f.readlines() - header_size = 0 - - for line in txt: - if "#" in line: - header_size += 1 - - wl_arr = np.genfromtxt(filepath, skip_header=header_size, max_rows=1) - ch_arr = np.genfromtxt(filepath, skip_header=header_size+1) - xy_cols = ch_arr.shape[1]-wl_arr.shape[0] - - dtype = None - if xy_cols==0: - dtype = 'LabramHR_spectrum' - - if xy_cols==1: - y = ch_arr[:,0][None,:] - is_survey = True - for i in range(1, y.size): - if y.flatten()[i]-y.flatten()[i-1] != 1: - is_survey = False - if is_survey: - dtype = 'LabramHR_spectrum' - else: - dtype = 'LabramHR_linescan' - - if xy_cols==2: - dtype = 'LabramHR_map' - - return dtype - - -def fromLabramHR(filepath, name=None, cps=False): - if name is None: - name = filepath.split('/')[-1].split('.')[0] - with open(filepath) as f: - txt = f.readlines() - header_size = 0 - acq = 1 - accum = 1 - spectral_units = 'nm' - - for line in txt: - if "#" in line: - header_size += 1 - if "Acq. time" in line: - acq = float(line.split('=\t')[1]) - if "Accumulations" in line: - accum= int(line.split('=\t')[1]) - if "Range (" in line: - if 'eV' in line: - spectral_units='eV' - elif 'cm' in line: - spectral_units='wn' - else: - spectral_units='nm' - if 'Spectro' in line: - if 'eV' in line: - spectral_units='eV' - elif 'cm' in line: - spectral_units='wn' - else: - spectral_units='nm' - total_acq_time = acq*accum - acq_type = {'wn':"Raman", 'nm':"PL", 'eV':"PL"} - siglabels = {'wn':"scattering intensity", 'nm':"PL intensity", 'eV':"PL intensity"} - for key, value in siglabels.items(): - if cps: - siglabels[key] = siglabels[key] + " (cps)" - else: - siglabels[key] = siglabels[key] + " (counts)" - - spectlabels = {'wn':"Raman shift (cm\u207b\u2071)", 'nm':"wavelength (nm)", 'eV':"energy (eV)"} - - wl_arr = np.genfromtxt(filepath, skip_header=header_size, max_rows=1) - ch_arr = np.genfromtxt(filepath, skip_header=header_size+1) - xy_cols = ch_arr.shape[1]-wl_arr.shape[0] - - if xy_cols==0: - sig = ch_arr[:,1] - if cps: - sig = sig/total_acq_time - wl = ch_arr[:,0] - d = wt.Data(name=name) - d.create_variable('wl', values=wl, units=spectral_units) - d['wl'].label = spectlabels[spectral_units] - d.create_channel('sig', values=sig) - d['sig'].label = siglabels[spectral_units] - d.create_channel('norm', values=helpers.norm(sig, 0, 1)) - d['norm'].label = 'norm. ' + siglabels[spectral_units].split(' (')[0] - d.transform('wl') - d.attrs['dtype'] = 'spectrum' - d.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] - d.attrs['exposure time (s)'] = acq - d.attrs['number of accumulations'] = accum - print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} spectrum") - - if xy_cols==1: - sig = ch_arr[:,1:].transpose() - wl = wl_arr[:,None] - y = ch_arr[:,0][None,:] - - is_survey = True - for i in range(1, y.size): - if y.flatten()[i]-y.flatten()[i-1] != 1: - is_survey = False - - if is_survey: - d = [] - for i in range(y.size): - sig_i = sig[:,i].flatten() - if cps: - sig_i = sig_i/total_acq_time - spect = wt.Data(name=f"{name}_spect{i}") - spect.create_variable('wl', values=wl.flatten(), units=spectral_units) - spect['wl'].label = spectlabels[spectral_units] - spect.create_channel(name='sig', values=sig_i) - spect['sig'].label = siglabels[spectral_units] - spect.create_channel(name='norm', values=helpers.norm(sig_i, 0, 1)) - spect['norm'].label = 'norm. ' + siglabels[spectral_units].split(' (')[0] - spect.transform('wl') - spect.attrs['dtype'] = 'spectrum' - spect.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] - spect.attrs['exposure time (s)'] = acq - spect.attrs['number of accumulations'] = accum - d.append(spect) - print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} survey") - else: - if cps: - sig = sig/total_acq_time - d = wt.Data(name=name) - d.create_variable('wl', values=wl, units=spectral_units) - d['wl'].label = spectlabels[spectral_units] - d.create_channel('sig', values=sig) - d['sig'].label = siglabels[spectral_units] - d.create_variable('y', values=y, units='um') - d['y'].label = "y (µm)" - d.transform('wl', 'y') - d.attrs['dtype'] = 'spectralprofile' - d.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] - d.attrs['exposure time (s)'] = acq - d.attrs['number of accumulations'] = accum - print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} linescan") - - if xy_cols==2: - xidx = ch_arr[:,0] - xdim = 1 - for i in range(1,ch_arr.shape[0]): - if xidx[i] != xidx[i-1]: - xdim = xdim+1 - ydim = int(ch_arr.shape[0]/xdim) - - x = np.zeros((xdim,1,1)) - y = np.zeros((1,ydim,1)) - wl = wl_arr.reshape([1,1,wl_arr.size]) - sig = np.zeros((xdim,ydim,wl_arr.size)) - - for i in range(0, ch_arr.shape[0], ydim): x[int(i/ydim),0,0] = ch_arr[i,0] - y[0,:,0] = ch_arr[:ydim,1] - for i in range(xdim): - for j in range(ydim): - sig[i,j,:] = ch_arr[i*ydim+j,2:].reshape([1,1,wl_arr.size]) - - if cps: - sig = sig/total_acq_time - d = wt.Data(name=name) - d.create_channel('sig', values=sig) - d['sig'].label = siglabels[spectral_units] - d.create_variable('x', values=x, units='um') - d['x'].label = "x (µm)" - d.create_variable('y', values=y, units='um') - d['y'].label = "y (µm)" - d.create_variable('wl', values=wl, units=spectral_units) - d['wl'].label = spectlabels[spectral_units] - d.transform('x','y','wl') - d.attrs['dtype'] = 'hyperspectral' - d.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] - d.attrs['exposure time (s)'] = acq - d.attrs['number of accumulations'] = accum - print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} map") - - return d - -def fromLabramHRTimedSeries(filedir): - pass \ No newline at end of file diff --git a/makeitwright/process/hyperspectral.py b/makeitwright/process/hyperspectral.py deleted file mode 100644 index 8d7a5c3..0000000 --- a/makeitwright/process/hyperspectral.py +++ /dev/null @@ -1,329 +0,0 @@ -__name__ = "hyperspectral" -__author__ = "Chris Roy, Song Jin Research Group, Dept. of Chemistry, University of Wisconsin - Madison" - -""" -Processing and plotting methods for 3-dimensional WrightTools data objects containing two spatial axes and one non-spatial axis (i.e. a spectral axis). - -The two spatial axes are generally defined as x and y. -The third axis is referred to as "spectral axis", -but an arbitrary non-spatial or pseudo-spatial axis may be used where relevant. - -Data axes must be ordered (spatial x, spatial y, non-spatial). -""" - -#import -import numpy as np -import matplotlib as mpl -from matplotlib import pyplot as plt -from . import helpers -import makeitwright.styles as styles - -def remove_background(data, channel, threshold=0.1, negative=False, return_mask=False, max_ref_count=10): - """ - Remove background pixels from the x-y plane of the hyperspectral image using the spectrally binned image as a reference signal. - Background x-y points will be set to 0 along the entire spectral axis. - - Parameters - ---------- - data : Data object of WrightTools data module. - The data for which the background-subtracted channel will be generated. - channel : int or str - The channel that will be duplicated with subtracted background. - threshold : float between 0 and 1, optional - The fraction of the maximum reference value below which is to be considered background. - The default is 0.1. - negative : bool, optional - Subtract everything but the background instead. Useful if the region of interest is the signal minimum. - The default is False. - max_ref_count : int, optional - The number of highest x-y points in the image to be averaged as a reference for the channel maximum. Useful to avoid false maxima caused by spikes. - The default is 10. - - Returns - ------- - None. - Adds a background-subtracted channel to the Data instance. - """ - #parse channel argument as str - channel, = helpers.parse_args(data, channel, dtype='Channel') - - #generate a spectrally binned image of the channel - ch_arr = np.sum(data[channel][:], axis=2) - #get a representative maximum value from the reference image - if max_ref_count > ch_arr.size: - max_ref_count = int(ch_arr.size/10) - ordered = np.sort(ch_arr.flatten())[-max_ref_count:] - ch_max = np.average(ordered) - #generate a mask array - bkg = np.where(ch_arr < threshold*ch_max, 0, ch_arr) - bkg = np.where(bkg > 0, 1, bkg) - #remove background using mask - mask = np.repeat(bkg[:,:,None], data.axes[2].size, axis=2) - if negative: - mask = 1-mask - nobkg = data[channel][:] * mask - #create background-subtracted channel - data.create_channel(name=channel+"_nobkg", values=nobkg) - data[channel+'_nobkg'].signed = data[channel].signed - if return_mask: - data.create_channel(name=data[channel].natural_name+"_mask", values=mask) - -def get_profile(data, profile_axis, ROI=None): - """ - Extract profile from an arbitrary pair of points in a selected 2D subspace of the data's 3 dimensions. - Arguments - ------------------------------------ - data: WrightTools Data instance. Must have at least 3 axes. - ROI should be a dict object containing the beginning and end points for each relevant axis, returns data object - """ - profile_axis, = helpers.parse_args(data, profile_axis) - non_profile_axis = [axis.natural_name for axis in data.axes if axis.natural_name != profile_axis][0] - spectral_axis = [axis.natural_name for axis in data.axes if axis.natural_name != profile_axis][1] - - dims_too_low = False - if ROI is not None: - if [val for val in ROI.values() if val == 'all']: - dims_too_low = True - if spectral_axis in ROI.keys(): - if type(ROI[spectral_axis]) is int or type(ROI[spectral_axis]) is float: - dims_too_low = True - if profile_axis in ROI.keys(): - if type(ROI[profile_axis]) is int or type(ROI[profile_axis]) is float: - dims_too_low = True - if len([val for val in ROI.values() if type(val) is int or type(val) is float])>1: - dims_too_low = True - if dims_too_low: - print("Dimensionality of ROI is too low. Do not collapse any dimensions of the data before calling this method.") - return - if not dims_too_low: - out = helpers.roi(data, ROI) - else: - out = data - if len(out.axes) > 2: - out = helpers.roi(out, {non_profile_axis:'all'}) - - out.transform() - - for channel in out.channels: - ch_name = channel.natural_name - ch_values = out[ch_name][:].transpose() - out.remove_channel(ch_name, verbose=False) - out.create_channel(ch_name, values=ch_values, verbose=False) - for variable in out.variables: - var_name = variable.natural_name - var_values = out[var_name][:].transpose() - var_units = variable.units - out.remove_variable(var_name, verbose=False) - out.create_variable(var_name, values=var_values, units=var_units, verbose=False) - - out.transform(spectral_axis, profile_axis) - print(f'profile along direction <{profile_axis}> extracted') - - return out - -def plot_image(data, channel, **kwargs): - - #convert axis/channel indices to natural names - channel, = helpers.parse_args(data, channel, dtype='Channel') - non_spatial_axis = data.axes[-1].natural_name - - #set parameters for plotting from kwargs - params = { - "ROI" : None, - 'ticks' : 'auto', - "vrange" : None, - "title" : None - } - params.update(styles.image) - if data[channel].signed: - params["cmap"] = mpl.cm.RdBu_r - params.update(**kwargs) - - #extract ROI - if params["ROI"] is not None: - out = helpers.roi(data, params["ROI"]) - if len(out.axes) != 2: - out = helpers.roi(out, {non_spatial_axis : 'all'}) - else: - out = helpers.roi(data, {non_spatial_axis : 'all'}) - - #determine range to be plotted - if params["vrange"] is None: - vrange = helpers.get_range(out, reference_key=channel) - else: - vrange = params["vrange"] - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) - mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - ax.set_aspect("equal") - if params["title"] is not None: - ax.set_title(params["title"]) - - #set ticks - if params['ticks'] == 'auto': - ticks = np.linspace(vrange[0], vrange[1], num=11) - elif params['ticks'] is None: - ticks = [] - else: - ticks = params['ticks'] - - # plot colorbar - cbar = plt.colorbar(mesh) - cbar.set_ticks(ticks) - cbar.set_label(params["cbar_label"]) - -def plot_profile(data, profile_axis, channel, **kwargs): - - #convert axis/channel indices to natural names - profile_axis, = helpers.parse_args(data, profile_axis) - spectral_axis = data.axes[-1].natural_name - channel, = helpers.parse_args(data, channel, dtype='Channel') - non_profile_axis = [axis.natural_name for axis in data.axes[:-1] if axis.natural_name != profile_axis][0] - - #set parameters for plotting from kwargs - params = { - "ROI" : None, - 'ticks' : 'auto', - "vrange" : None, - "reference_lines" : None, - "title" : None - } - params.update(styles.profile) - if data[channel].signed: - params["cmap"] = mpl.cm.RdBu_r - params.update(**kwargs) - - #extract ROI - if params["ROI"] is not None: - out = helpers.roi(data, params["ROI"]) - if len(out.axes) != 2: - out = helpers.roi(out, {non_profile_axis : 'all'}) - else: - out = helpers.roi(data, {non_profile_axis : 'all'}) - out.transform(spectral_axis, profile_axis) - - #determine range to be plotted - if params["vrange"] is None: - vrange = helpers.get_range(out, reference_key=channel) - else: - vrange = params["vrange"] - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - xgrid, ygrid = np.meshgrid(out.axes[1][:], out.axes[0][:]) - try: - mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - except TypeError: - mesh = ax.pcolormesh(xgrid, ygrid, out[channel][:], cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - - if params["reference_lines"] is not None: - if type(params["reference_lines"]) is not list: - params["reference_lines"] = [params["reference_lines"]] - for reference_line in params["reference_lines"]: - ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) - - #set ticks - if params['ticks'] == 'auto': - ticks = np.linspace(vrange[0], vrange[1], num=11) - elif params['ticks'] is None: - ticks = [] - else: - ticks = params['ticks'] - - # plot colorbar - cbar = plt.colorbar(mesh) - cbar.set_ticks(ticks) - cbar.set_label(params["cbar_label"]) - - if params["title"] is not None: - ax.set_title(params["title"]) - -def plot_decomposition(data, x_axis, y_axis, spectral_axis, channel, **kwargs): - #convert axis/channel indices to natural names - x_axis, y_axis, spectral_axis = helpers.parse_args(data, x_axis, y_axis, spectral_axis) - channel, = helpers.parse_args(data, channel, dtype='Channel') - - #set parameters for plotting from kwargs - params = { - "ROI" : None, - "xrange" : None, - "vrange" : None, - "yscale" : 'linear', - "binning" : None, - "reference_lines" : None, - "xticks" : True, - "yticks" : True, - "title" : None - } - params.update(styles.decomposition) - params.update(**kwargs) - - #extract ROI - if params["ROI"] is not None: - out = helpers.roi(data, params["ROI"]) - else: - out = data - - #identify spatial ranges for indexing - xrange = helpers.get_range(out, reference_key=spectral_axis, dtype='Axis') - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - if params["binning"] is not None: - if params["binning"] == 'average': - arr_out = np.sum(out[channel][:], axis=(0,1))/(np.count_nonzero(out[channel][:])/out[channel].shape[2]) - if params["binning"] == 'sum': - arr_out = np.sum(out[channel][:], axis=(0,1)) - #determine range to be plotted - vrange = helpers.vrange(arr_out, out[channel].signed, window=1) - - ax.plot(out[spectral_axis].points, arr_out, - params["marker"], linewidth=params["linewidth"], alpha=1, color=params["color"]) - else: - #determine range to be plotted - vrange = helpers.get_range(out, reference_key=channel) - for i in range(out[x_axis].size): - for j in range(out[y_axis].size): - if np.sum(out[channel][i,j,:]) != 0: - ax.plot(out[spectral_axis].points, out[channel][i,j,:], - params["marker"], linewidth=params["linewidth"], alpha=params["alpha"], color=params["color"]) - - if params["xrange"] is not None: - xrange = params["xrange"] - ax.set_xlim(*xrange) - if not params["xticks"]: - ax.set_xticks([]) - if params["vrange"] is not None: - vrange = params["vrange"] - ax.set_ylim(*vrange) - ax.set_yscale(params["yscale"]) - if not params["yticks"]: - ax.set_yticks([]) - - if out[channel].signed: - ax.axhline(y=0, color='black', linewidth=1) - - if params["reference_lines"] is not None: - if type(params["reference_lines"]) is not list: - params["reference_lines"] = [params["reference_lines"]] - for reference_line in params["reference_lines"]: - ax.axvline(x=reference_line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) - - # label plot - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - ax.set_yscale(params["yscale"]) - if params["title"] is not None: - ax.set_title(params["title"]) \ No newline at end of file diff --git a/makeitwright/process/image.py b/makeitwright/process/image.py deleted file mode 100644 index 34684bd..0000000 --- a/makeitwright/process/image.py +++ /dev/null @@ -1,112 +0,0 @@ -import numpy as np -import matplotlib as mpl -from matplotlib import pyplot as plt -from . import helpers -import makeitwright.styles as styles - -def get_pixel_location(data, pixel): - """ - Get the axis coordinates of an exact pixel in an image. - - Arguments - --------- - data : WrightTools Data - The image. - pixel : tuple (x,y) - The pixel coordinates. - - Returns - ------- - tuple (x,y) - The location of the pixel in axis coordinates. - """ - return (data.axes[0].points[pixel[0]], data.axes[1].points[pixel[1]]) - -def remove_background(data, channel, threshold=0.5, negative=False, return_mask=False, max_ref_count=500): - channel, = helpers.parse_args(data, channel, dtype='Channel') - - ch_arr = data[channel][:] - if max_ref_count > ch_arr.size: - max_ref_count = int(ch_arr.size/10) - ordered = np.sort(ch_arr.flatten())[-max_ref_count:] - ch_max = np.average(ordered) - bkg = np.where(ch_arr < threshold*ch_max, 0, ch_arr) - mask = np.where(bkg > 0, 1, bkg) - if negative: - mask = 1-mask - nobkg = ch_arr * mask - - data.create_channel(name=channel+"_nobkg", values=nobkg, units=data[channel].units) - data[channel+'_nobkg'].signed = data[channel].signed - if return_mask: - data.create_channel(name=data[channel].natural_name+"_mask", values=mask) - -def plot_image(data, channel, **kwargs): - #convert axis/channel indices to natural names - channel, = helpers.parse_args(data, channel, dtype='Channel') - - #set parameters for plotting from kwargs - params = { - "ROI" : None, - "vrange" : None, - "contrast" : None, - "crosshairs" : None, - "xticks" : None, - "yticks" : None, - "title" : None - } - params.update(styles.image) - if data[channel].signed: - params["cmap"] = mpl.cm.RdBu_r - params.update(**kwargs) - - #extract ROI - if params["ROI"] is not None: - out = helpers.roi(data, params["ROI"]) - else: - out = data - - if params["xlabel"] is None: - try: - params["xlabel"] = out.variables[0].attrs['label'] - except KeyError: - params["xlabel"] = 'x' - - if params["ylabel"] is None: - try: - params["ylabel"] = out.variables[1].attrs['label'] - except KeyError: - params["ylabel"] = 'y' - - #determine range to be plotted - if params["vrange"] is None: - if params["contrast"] is None: - vrange = helpers.get_range(out, reference_key=channel) - else: - vrange = helpers.contrast(out, channel, params["contrast"]) - else: - vrange = params["vrange"] - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) - ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - - if params["crosshairs"] is not None: - h, v = params["crosshairs"] - if h is not None: - ax.axvline(x=h, linewidth=1, color='white', linestyle='--', alpha=0.5) - if v is not None: - ax.axhline(y=v, linewidth=1, color='white', linestyle='--', alpha=0.5) - - if not params["xticks"] and params['xticks'] is not None: - ax.set_xticks([]) - if not params["yticks"] and params['yticks'] is not None: - ax.set_yticks([]) - - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - - ax.set_aspect("equal") - - if params["title"] is not None: - ax.set_title(params["title"]) \ No newline at end of file diff --git a/makeitwright/process/spectralprofile.py b/makeitwright/process/spectralprofile.py deleted file mode 100644 index 17606c0..0000000 --- a/makeitwright/process/spectralprofile.py +++ /dev/null @@ -1,312 +0,0 @@ -"""Methods for two-dimensional data consisting of a spectral axis (0) and a spatial axis (1).""" - - -import numpy as np -import matplotlib as mpl -from matplotlib import pyplot as plt -from . import helpers -import makeitwright.styles as styles - - -def remove_spectral_background(data, channel, spatial_reference_range, name=None, create_background_channel=False, talkback=True): - """ - Remove background along the spatial axis using a specified range along the other axis as reference. - Creates a new channel with the background-subtracted array. - - Arguments - --------- - data : WrightTools.Data - The data. - background_axis : str or int - The axis along which the background is. - channel : str or int - The channel to subtract the background from. - reference_axis_range : list or int - - Returns - ------- - None - Creates new background-subtracted Channels in the Data instance. - """ - #identify channel and categorize axes - channel = helpers.get_channels(data, channel)[0] - spectral_axis = helpers.get_axes(data, 0)[0] - spatial_axis = helpers.get_axes(data, 1)[0] - #construct the background array - if isinstance(spatial_reference_range, int): - spectral_background = helpers.roi(data, {spatial_axis:spatial_reference_range}, return_arrs=True)[channel].reshape(data[spectral_axis].shape) - else: - spectral_background = helpers.roi(data, {spatial_axis:(spatial_reference_range,'average')}, return_arrs=True)[channel].reshape(data[spectral_axis].shape) - spatial_points = np.ones(data[spatial_axis].shape) - background = spectral_background*spatial_points - #create background-subtracted channel - if name is None: - name = f"{channel}_bkgsub_spectral" - data.create_channel(name, values=data[channel][:]-background, units=data[channel].units) - if data[channel].signed: - data[name].signed = True - if create_background_channel: - data.create_channel(f"spectral_bkg_{channel}", values=background, units=data[channel.units]) - if data[channel].signed: - data[name].signed = True - - if talkback: - print(f"subtracted spectral background from data {data.natural_name}") - -def remove_spatial_background(data, channel, spectral_reference_range, name=None, create_background_channel=False, talkback=True): - """ - Remove background along the spatial axis using a specified range along the other axis as reference. - Creates a new channel with the background-subtracted array. - - Arguments - --------- - data : WrightTools.Data - The data. - background_axis : str or int - The axis along which the background is. - channel : str or int - The channel to subtract the background from. - reference_axis_range : list or int - - Returns - ------- - None - Creates new background-subtracted Channels in the Data instance. - """ - #identify channel and categorize axes - channel = helpers.get_channels(data, channel)[0] - spectral_axis = helpers.get_axes(data, 0)[0] - spatial_axis = helpers.get_axes(data, 1)[0] - #construct the background array - if isinstance(spectral_reference_range, int): - spatial_background = helpers.roi(data, {spectral_axis:spectral_reference_range}, return_arrs=True)[channel].reshape(data[spatial_axis].shape) - else: - spatial_background = helpers.roi(data, {spectral_axis:(spectral_reference_range,'average')}, return_arrs=True)[channel].reshape(data[spatial_axis].shape) - spectral_points = np.ones(data[spectral_axis].shape) - background = spatial_background*spectral_points - #create background-subtracted channel - if name is None: - name = f"{channel}_bkgsub_spatial" - data.create_channel(name, values=data[channel][:]-background, units=data[channel].units) - if data[channel].signed: - data[name].signed = True - if create_background_channel: - data.create_channel(f"spatial_bkg_{channel}", values=background, units=data[channel.units]) - if data[channel].signed: - data[name].signed = True - - if talkback: - print(f"subtracted spatial background from data {data.natural_name}") - -def remove_combined_background(data, channel, spectral_reference_range, spatial_reference_range, name=None, create_background_channel=False, talkback=True): - """ - Remove background from data using a range of the spectral profile along each axis as reference. The background is a matrix product of the two background arrays. - """ - def __at(arr, val): - return (np.abs(arr-val)).argmin() - #identify channel and categorize axes - channel = helpers.get_channels(data, channel)[0] - spectral_axis = helpers.get_axes(data, 0)[0] - spatial_axis = helpers.get_axes(data, 1)[0] - #extract background along each axis - if isinstance(spatial_reference_range, int): - spectral_background = helpers.roi(data, {spatial_axis:spatial_reference_range}, return_arrs=True)[channel].reshape(data[spectral_axis].shape) - else: - spectral_background = helpers.roi(data, {spatial_axis:(spatial_reference_range,'average')}, return_arrs=True)[channel].reshape(data[spectral_axis].shape) - if isinstance(spectral_reference_range, int): - spatial_background = helpers.roi(data, {spectral_axis:spectral_reference_range}, return_arrs=True)[channel].reshape(data[spatial_axis].shape) - else: - spatial_background = helpers.roi(data, {spectral_axis:(spectral_reference_range,'average')}, return_arrs=True)[channel].reshape(data[spatial_axis].shape) - #compute combined background using region of overlap as a reference for magnitude - overlap_magnitude = np.average(helpers.roi(data, {0:spectral_reference_range, 1:spatial_reference_range}, return_arrs=True)[channel]) - background = spectral_background*spatial_background - spectral_range = [__at(data[spectral_axis].points, spectral_reference_range[0]), __at(data[spectral_axis].points, spectral_reference_range[1])] - spatial_range = [__at(data[spatial_axis].points, spatial_reference_range[0]), __at(data[spatial_axis].points, spatial_reference_range[1])] - overlap_background = np.average(background[spectral_range[0]:spectral_range[1],spatial_range[0]:spatial_range[1]]) - overlap_ratio = overlap_magnitude/overlap_background - background *= overlap_ratio - #create background-subtracted channel - if name is None: - name = f"{channel}_bkgsub_combined" - data.create_channel(name, values=data[channel][:]-background, units=data[channel].units) - if data[channel].signed: - data[name].signed = True - if create_background_channel: - data.create_channel(f"bkg_combined_{channel}", values=background, units=data[channel.units]) - if data[channel].signed: - data[name].signed = True - - if talkback: - print(f"subtracted combined background from channel {channel} of data {data.natural_name}") - -def plot_profile(data, channel, **kwargs): - - #convert axis/channel indices to natural names - channel, = helpers.parse_args(data, channel, dtype='Channel') - - #set parameters for plotting from kwargs - params = { - 'ROI' : None, - 'xticks' : None, - 'yticks' : None, - 'cbar_ticks' : None, - 'xlabel' : None, - 'ylabel' : None, - 'cbar_label' : None, - "contrast" : None, - "vrange" : None, - "reference_lines" : None, - "title" : None - } - params.update(styles.profile) - - if data[channel].signed: - params["cmap"] = mpl.cm.RdBu_r - params.update(**kwargs) - - if params["ROI"] is not None: - out = helpers.roi(data, params["ROI"]) - else: - out = data - - #determine range to be plotted - if params["vrange"] is None: - if params["contrast"] is None: - vrange = helpers.get_range(out, reference_key=channel) - else: - vrange = helpers.contrast(out, channel, params["contrast"]) - else: - vrange = params["vrange"] - - #setup x axis - if params["xlabel"] is None: - try: - params["xlabel"] = out.variables[0].attrs['label'] - except KeyError: - params["xlabel"] = 'spectrum' - - #setup y axis - if params["ylabel"] is None: - try: - params["ylabel"] = out.variables[1].attrs['label'] - except KeyError: - params["ylabel"] = 'y' - - #setup colorbar label - if params["cbar_label"] is None: - try: - params["cbar_label"] = out[channel].attrs['label'] - except KeyError: - params["cbar_label"] = 'signal' - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) - #array needs to be transposed before passing to pcolormesh because apparently no matplotlib devs thought about what arrays look like - try: - mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - except TypeError: - mesh = ax.pcolormesh(xgrid, ygrid, out[channel][:], cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) - - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - - if params["title"] is not None: - ax.set_title(params["title"]) - - if params["reference_lines"] is not None: - if type(params["reference_lines"]) is not list: - params["reference_lines"] = [params["reference_lines"]] - for reference_line in params["reference_lines"]: - ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) - - #set ticks - if not params["xticks"] and params['xticks'] is not None: - ax.set_xticks([]) - - if not params["yticks"] and params['yticks'] is not None: - ax.set_yticks([]) - - if not params['cbar_ticks'] and params['cbar_ticks'] is not None: - ticks = [] - elif params['cbar_ticks'] is None: - ticks = np.linspace(vrange[0], vrange[1], num=11) - else: - ticks = params['cbar_ticks'] - - # plot colorbar - cbar = plt.colorbar(mesh) - cbar.set_ticks(ticks) - cbar.set_label(params["cbar_label"]) - -def plot_decomposition(data, non_spatial_axis, spatial_axis, channel, **kwargs): - #convert axis/channel indices to natural names - non_spatial_axis, spatial_axis = helpers.parse_args(data, non_spatial_axis, spatial_axis) - channel, = helpers.parse_args(data, channel, dtype='Channel') - - #set parameters for plotting from kwargs - params = { - "ROI" : None, - "binning" : None, - "xrange" : None, - "vrange" : None, - "yscale" : 'linear', - "reference_lines" : None, - "xticks" : True, - "yticks" : True, - "title" : None - } - params.update(styles.decomposition) - params.update(**kwargs) - - #extract ROI - if params["ROI"] is not None: - out = helpers.roi(data, params["ROI"]) - else: - out = data - - #identify spatial ranges for indexing - xrange = helpers.get_range(out, reference_key=non_spatial_axis, dtype='Axis') - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - #plot data - if params["binning"] is not None: - if params["binning"] == 'average': - arr_out = np.sum(out[channel][:], axis=1)/np.count_nonzero(np.sum(out[channel][:], axis=0)) - if params["binning"] == 'sum': - arr_out = np.sum(out[channel][:], axis=1) - #determine range to be plotted - vrange = helpers.vrange(arr_out, out[channel].signed, window=1) - - ax.plot(out[non_spatial_axis].points, arr_out, - params["marker"], linewidth=params["linewidth"], alpha=1, color=params["color"]) - else: - #determine range to be plotted - vrange = helpers.get_range(out, reference_key=channel) - for i in range(out[spatial_axis].size): - if np.sum(out[channel][:,i]) != 0: - ax.plot(out[non_spatial_axis][:].flatten(), out[channel][:,i], - params["marker"], linewidth=params["linewidth"], alpha=params["alpha"], color=params["color"]) - - if out[channel].signed: - ax.axhline(y=0, color='black', linewidth=1) - - if params["reference_lines"] is not None: - if type(params["reference_lines"]) is not list: - params["reference_lines"] = [params["reference_lines"]] - for reference_line in params["reference_lines"]: - ax.axvline(x=reference_line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) - - if params["xrange"] is not None: - xrange = params["xrange"] - ax.set_xlim(*xrange) - if not params["xticks"]: - ax.set_xticks([]) - if params["vrange"] is not None: - vrange = params["vrange"] - ax.set_ylim(*vrange) - ax.set_yscale(params["yscale"]) - if not params["yticks"]: - ax.set_yticks([]) - - # label plot - ax.set_xlabel(params["xlabel"]) - ax.set_ylabel(params["ylabel"]) - if params["title"] is not None: - ax.set_title(params["title"]) \ No newline at end of file diff --git a/makeitwright/spectra.py b/makeitwright/spectra.py deleted file mode 100644 index 0b70b76..0000000 --- a/makeitwright/spectra.py +++ /dev/null @@ -1,179 +0,0 @@ -"""plotting routines for 1D data""" - -import numpy as np -import WrightTools as wt -from matplotlib import pyplot as plt -from .process import helpers -from . import styles - - -def plot_spectra(data, **kwargs): - if type(data) is wt.Collection: - data = [data[key] for key in data] - if type(data) is not list: - data = [data] - - #set parameters for plotting from kwargs - params = { - "plot_type" : "line", - "xscale" : "linear", - "xticks" : True, - "yscale" : "linear", - "yticks" : True, - "axis" : 0, - "channel" : -1, - "ROI" : None, - "xrange" : None, - "vrange" : None, - "offset" : 0, - "reference_lines" : None, - "title" : None, - "background_color" : 'default' - } - params.update(styles.spectra) - params.update(**kwargs) - - signed=False - - #parse color parameters to plot - if type(params["colors"]) is list: - colors = params["colors"] - if len(params["colors"]) < len(data): - q, r = divmod(len(data), len(colors)) - colors = q*colors+colors[:r] - else: - try: - colors = params["colors"](np.linspace(0,1,len(data))) - except: - colors = [params["colors"] for i in range(len(data))] - - #setup plot frame - fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) - - for i in range(len(data)): - #convert axis/channel indices to natural names - axis, = helpers.parse_args(data[i], params["axis"]) - - if params['channel']=='prompt': #kill this if else if all your code suddenly stops working - channel, = helpers.parse_args(data[i], input(f'select channel from {data[i].natural_name}: {[ch.natural_name for ch in data[i].channels]} '), dtype='Channel') - else: - channel, = helpers.parse_args(data[i], params["channel"], dtype='Channel') - if data[i][channel].signed: - signed=True - - #extract ROI - if params["ROI"] is not None: - out = helpers.roi(data[i], params["ROI"]) - else: - out = data[i] - - #plot data - if params["plot_type"] == "line": - ax.plot(out[axis][:],out[channel][:]+i*params["offset"], - linewidth=params["linewidth"], alpha=params["alpha"], color=colors[i]) - if params["plot_type"] == "scatter": - ax.scatter(out[axis][:],out[channel][:]+i*params["offset"], - marker=params["marker"], alpha=params["alpha"], color=colors[i], s=params["marker_size"]) - - if signed: - ax.axhline(y=0, color='black', linewidth=1) - - if params["reference_lines"] is not None: - if type(params["reference_lines"]) is not list: - params["reference_lines"] = [params["reference_lines"]] - for reference_line in params["reference_lines"]: - ax.axvline(x=reference_line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) - - #adjust plot frame - if params["xrange"] is not None: - xrange = params["xrange"] - else: - xrange = helpers.get_range(*data, reference_key=params["axis"], dtype='Axis') - if params["xscale"] == 'log' and xrange[0]<=0: - xrange[0] = 0.001 - ax.set_xlim(*xrange) - ax.set_xscale(params["xscale"]) - if params["xlabel"] is None: - try: - params["xlabel"] = out[axis].attrs['label'] - except KeyError: - params["xlabel"] = 'x' - ax.set_xlabel(params["xlabel"]) - if not params["xticks"]: - ax.set_xticks([]) - - if params["vrange"] is not None: - vrange = params["vrange"] - else: - vrange = helpers.get_range(*data, reference_key=params["channel"], offset=params["offset"]) - if params["yscale"] == 'log' and vrange[0]<=0: - vrange[0] = 0.01 - ax.set_ylim(*vrange) - ax.set_yscale(params["yscale"]) - if params["ylabel"] is None: - try: - params["ylabel"] = out[channel].attrs['label'] - except KeyError: - params["ylabel"] = 'y' - ax.set_ylabel(params["ylabel"]) - if not params["yticks"]: - ax.set_yticks([]) - - if params["background_color"] != 'default': - if params["background_color"] == 'transparent' or params["background_color"] is None: - ax.set_alpha(0) - else: - ax.set_facecolor(params["background_color"]) - fig.set_alpha(0) - - if params["title"] is not None: - ax.set_title(params["title"]) - - plt.show() - -def plot_tandem(d1,d2, figsize=(2.6,1), axis=0, channels=(-1,-1), - xticks=True, yticks=[True,True], xlabel="wavelength (nm)", ylabels=["reflectance","absorbance"], - xrange=[400,650], vranges=[(0,1),(0,1)], colors=['coral','royalblue'], - linewidth=1, reference_lines=None): - #setup plot frame - fig, ax1 = plt.subplots(figsize=figsize) - ax2 = ax1.twinx() - - #convert axis/channel indices to natural names - axis1, = helpers.parse_args(d1, axis) - axis2, = helpers.parse_args(d2, axis) - channel1, = helpers.parse_args(d1, channels[0], dtype='Channel') - channel2, = helpers.parse_args(d2, channels[1], dtype='Channel') - - #plot data - ax1.plot(d1[axis1][:],d1[channel1][:], linewidth=linewidth, color=colors[0]) - ax2.plot(d2[axis1][:],d2[channel2][:], linewidth=linewidth, color=colors[1]) - - if reference_lines is not None: - if type(reference_lines) is not list: - reference_lines = [reference_lines] - for line in reference_lines: - ax1.axvline(x=line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) - - #adjust plot frame - if xrange is None: - xrange = helpers.get_range(*[d1,d2], reference_key=axis, dtype='Axis') - ax1.set_xlim(*xrange) - ax1.set_xlabel(xlabel) - if not xticks: - ax1.set_xticks([]) - - for i, v in enumerate(vranges): - if v is None: - if i==0: - vranges[i] = helpers.get_range(d1, reference_key=channel1, offset=0) - if i==1: - vranges[i] = helpers.get_range(d2, reference_key=channel2, offset=0) - ax1.set_ylim(*vranges[0]) - ax2.set_ylim(*vranges[1]) - ax1.set_ylabel(ylabels[0]) - ax2.set_ylabel(ylabels[1]) - if not yticks[0]: - ax1.set_yticks([]) - if not yticks[1]: - ax2.set_yticks([]) \ No newline at end of file diff --git a/makeitwright/styles.py b/makeitwright/styles.py deleted file mode 100644 index 1930c97..0000000 --- a/makeitwright/styles.py +++ /dev/null @@ -1,215 +0,0 @@ -import matplotlib.cm as cm -import cmocean - - -beckerhickl_transient = { - "xlabel" : "time (ns)", - "vreflines" : 0, - "marker" : '.', - "markersize" : 3 - } - -profile = { - "fig_width" : 4, - "fig_height" : 3, - "cmap" : cm.viridis, - "xlabel" : None, - "ylabel" : None, - "cbar_label" : None, - "cbar_ticks" : None - } - -profile_andor = { - "fig_width" : 4, - "fig_height" : 3 - } - -profile_horiba = { - "fig_width" : 6.5, - "cbar_ticks" : None - } - -profile_horiba_PL = profile_horiba|{ - "fig_height" : 6.5, - "cmap" : cm.viridis, - "xlabel" : "wavelength (nm)", - "ylabel" : "y (µm)", - "cbar_label" : "PL intensity (cps)" - } - -profile_horiba_Raman = profile_horiba|{ - "fig_height" : 6.5, - "cmap" : cm.inferno, - "xlabel" : "Raman shift (cm\u207b\u2071)", - "ylabel" : "y (µm)", - "cbar_label" : "scattering intensity (cps)" - } - -profile_horiba_timed_series = profile_horiba|{ - "fig_height" : 10, - "cmap" : cm.viridis, - "xlabel" : "wavelength (nm)", - "ylabel" : "excitation time (s)", - "cbar_label" : "PL intensity (cps)" - } - -profile_iontof = { - "fig_width" : 6.5, - "fig_height" : 3.5, - "cmap" : cmocean.cm.matter, - "xlabel" : "distance (µm)", - "ylabel" : "sputtering time (s)", - "cbar_label" : "SI counts", - "cbar_ticks" : None - } - -image = { - "fig_width" : 4, - "fig_height" : 4, - "cmap" : cm.Greys_r, - "xlabel" : None, - "ylabel" : None, - "cbar_label" : "signal (a.u.)", - "ticks" : 'auto' - } - -image_andor = { - "fig_width" : 4, - "fig_height" : 4, - "xlabel" : None, - "ylabel" : None - } - -image_horiba = { - "fig_width" : 6.5, - "fig_height" : 6.5, - "ticks" : 'auto' - } - -image_horiba_PL = image_horiba|{ - "cmap" : cm.viridis, - "xlabel" : "x (µm)", - "ylabel" : "y (µm)", - "cbar_label" : "PL intensity (cps)" - } - -image_horiba_Raman = image_horiba|{ - "cmap" : cm.inferno, - "xlabel" : "x (µm)", - "ylabel" : "y (µm)", - "cbar_label" : "scattering intensity (cps)" - } - -image_iontof = { - "fig_width" : 6.5, - "fig_height" : 6.5, - "cmap" : cmocean.cm.matter, - "xlabel" : "x (µm)", - "ylabel" : "y (µm)", - "cbar_label" : "SI counts", - "ticks" : 'auto' - } - -decomposition = { - "fig_width" : 6.5, - "fig_height" : 3.0, - "linewidth" : 1.0, - "marker" : '-', - "color" : 'black', - "alpha" : 0.01, - "xlabel" : "spectrum (a.u.)", - "ylabel" : "signal (a.u.)" - } - -decomposition_andor = { - "fig_width" : 6.5, - "fig_height" : 3.5, - "linewidth" : 1.0, - "marker" : '-', - "color" : 'red', - "alpha" : 0.01, - "xlabel" : "wavelength (nm)", - } - -decomposition_andor_A = decomposition_andor|{ - "ylabel" : "absorbance" - } - -decomposition_andor_PL = decomposition_andor|{ - "ylabel" : "PL intensity (cps)" - } - -decomposition_andor_R = decomposition_andor|{ - "ylabel" : "reflectance" - } - -decomposition_andor_RR0 = decomposition_andor|{ - "ylabel" : "reflection contrast" - } - -decomposition_andor_T = decomposition_andor|{ - "ylabel" : "transmittance" - } - -decomposition_horiba = { - "fig_width" : 6.5, - "fig_height" : 3.5, - "linewidth" : 1.0, - "marker" : '-', - "color" : 'red', - "alpha" : 0.01 - } - -decomposition_horiba_PL = decomposition_horiba|{ - "xlabel" : "wavelength (nm)", - "ylabel" : "PL intensity (cps)", - } - -decomposition_horiba_Raman = decomposition_horiba|{ - "xlabel" : "Raman shift (cm\u207b\u2071)", - "ylabel" : "scattering intensity (cps)", - } - -decomposition_iontof = { - "fig_width" : 6.5, - "fig_height" : 2.0, - "linewidth" : 1.0, - "marker" : '.', - "color" : 'red', - "alpha" : 1, - "xlabel" : "sputtering time (s)", - "ylabel" : "SI counts" - } - -spectra = { - "plot_type" : "line", - "fig_width" : 4, - "fig_height" : 3, - "linewidth" : 2, - "marker" : '.', - "alpha" : 1, - "marker_size" : 5, - "colors" : cm.Set1, - "xlabel" : None, - "ylabel" : None - } - -spectra_TRPL = spectra|{ - "plot_type" : "scatter", - "yscale" : "log", - "xlabel" : "t (ns)", - "ylabel" : "norm. counts", - "colors" : cm.Set2, - "marker" : '.', - "marker_size" : 3, - "reference_lines" : 0 - } - -spectra_XRD_pattern = spectra|{ - "fig_height" : 3, - "marker" : 'o', - "marker_size" : 3, - "colors" : cm.Set1, - "xlabel" : "diffraction angle (deg. 2\u03B8)", - "ylabel" : "intensity (a.u.)" - } \ No newline at end of file diff --git a/makeitwright/process/transmittance_references/Ag-P01.csv b/makeitwright/transmittance_references/Ag-P01.csv similarity index 100% rename from makeitwright/process/transmittance_references/Ag-P01.csv rename to makeitwright/transmittance_references/Ag-P01.csv diff --git a/makeitwright/process/transmittance_references/Ag-P02.csv b/makeitwright/transmittance_references/Ag-P02.csv similarity index 100% rename from makeitwright/process/transmittance_references/Ag-P02.csv rename to makeitwright/transmittance_references/Ag-P02.csv diff --git a/makeitwright/process/transmittance_references/BK7.csv b/makeitwright/transmittance_references/BK7.csv similarity index 100% rename from makeitwright/process/transmittance_references/BK7.csv rename to makeitwright/transmittance_references/BK7.csv diff --git a/makeitwright/process/transmittance_references/CaF2.csv b/makeitwright/transmittance_references/CaF2.csv similarity index 100% rename from makeitwright/process/transmittance_references/CaF2.csv rename to makeitwright/transmittance_references/CaF2.csv diff --git a/makeitwright/process/transmittance_references/MgF2.csv b/makeitwright/transmittance_references/MgF2.csv similarity index 100% rename from makeitwright/process/transmittance_references/MgF2.csv rename to makeitwright/transmittance_references/MgF2.csv diff --git a/makeitwright/process/transmittance_references/UVAl.csv b/makeitwright/transmittance_references/UVAl.csv similarity index 100% rename from makeitwright/process/transmittance_references/UVAl.csv rename to makeitwright/transmittance_references/UVAl.csv diff --git a/makeitwright/process/transmittance_references/UVFS.csv b/makeitwright/transmittance_references/UVFS.csv similarity index 100% rename from makeitwright/process/transmittance_references/UVFS.csv rename to makeitwright/transmittance_references/UVFS.csv diff --git a/makeitwright/process/transmittance_references/sapphire.csv b/makeitwright/transmittance_references/sapphire.csv similarity index 100% rename from makeitwright/process/transmittance_references/sapphire.csv rename to makeitwright/transmittance_references/sapphire.csv diff --git a/makeitwright/process/xrd.py b/makeitwright/xrd.py similarity index 60% rename from makeitwright/process/xrd.py rename to makeitwright/xrd.py index 681528d..5f4cabf 100644 --- a/makeitwright/process/xrd.py +++ b/makeitwright/xrd.py @@ -3,56 +3,13 @@ from scipy.optimize import curve_fit from scipy.stats import pearsonr import WrightTools as wt -import makeitwright.spectra as spectra, styles -from .helpers import norm, roi +from .lib import spectra, styles +from .lib.helpers import norm, roi pi = np.pi -def fromBruker(*filepaths): - d = [] - for filepath in filepaths: - dtype = "Locked Coupled" - header_size=None - with open(filepath) as f: - txt = f.readlines() - for i, line in enumerate(txt): - if "ScanType" in line: - dtype = line.split('=')[-1].strip() - if "[Data]" in line: - header_size = i+2 - if header_size is None: - try: - arr = np.genfromtxt(filepath, skip_header=166, delimiter=',') - print("Data header was not identified in file. Data in instance may not reflect complete file information.") - except: - print("Unable to read data from file due to lack of expected data header.") - else: - arr = np.genfromtxt(filepath, skip_header=header_size, delimiter=',') - - if arr.size > 0: - deg_arr = arr[:,0].flatten() - ch_arr = arr[:,1].flatten() - pat = wt.Data(name=filepath.split('/')[-1]) - pat.create_channel('sig', values=ch_arr) - pat.create_channel('norm', values=norm(ch_arr, 1, 100)) - pat.create_channel('log', values=np.log(norm(ch_arr, 1, 100))) - if dtype=="Locked Coupled": - pat.create_variable('ang', values=deg_arr, units='deg') - pat.transform('ang') - pat.attrs['acquisition'] = 'XRD_2theta' - if dtype=="Z-Drive": - pat.create_variable('z', values=deg_arr, units='mm') - pat.transform('z') - pat.attrs['acquisition'] = 'XRD_2theta' - pat.attrs['dtype'] = 'spectrum' - d.append(pat) - else: - print(f'file {filepath} was loaded but had no values') - - return d - def get_fits(data, channel='norm', function='gauss', xrange='all'): def gauss(x, a, u, s): return a*np.exp(-((x-u)/(2*s))**2) From 288db27fa07f91b2391a314b3068c6c3727829e2 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 13:02:40 -0500 Subject: [PATCH 02/20] Create pyproject.toml --- pyproject.toml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 pyproject.toml diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..ef3ffeb --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,32 @@ +[build-system] +requires = ["flit_core >= 3.12.0, <4"] +build-backend = "flit_core.buildapi" + +[project] +name = "makeitwright" +description = "Plotting and parsing tools for the Jin Group." +authors = [{name="Chris Roy"}] +maintainers = [{name="Dan Kohler"}] +dynamic = ["version"] +requires-python = ">=3.7" +readme = "README.md" +dependencies = [ + "psutil", + "wrighttools", + "cmocean", +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", + "Natural Language :: English", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Scientific/Engineering", +] + +[project.optional-dependencies] +iontof = ["pySPM"] From dc6ed5b1ae1f01183adf7543f781c27c85c9aa3d Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 13:22:11 -0500 Subject: [PATCH 03/20] revise package imports --- makeitwright/__init__.py | 6 ++++++ makeitwright/andor.py | 4 ++-- makeitwright/beckerhickl.py | 33 +++------------------------------ makeitwright/horiba.py | 5 ++++- makeitwright/iontof.py | 3 +++ 5 files changed, 18 insertions(+), 33 deletions(-) diff --git a/makeitwright/__init__.py b/makeitwright/__init__.py index 0c17c79..8102a2e 100644 --- a/makeitwright/__init__.py +++ b/makeitwright/__init__.py @@ -1,2 +1,8 @@ from .__version__ import * from .lib import * + +from . import andor +from . import beckerhickl +from . import horiba +from . import iontof +from . import xrd diff --git a/makeitwright/andor.py b/makeitwright/andor.py index c2d74bb..5ac7b4d 100644 --- a/makeitwright/andor.py +++ b/makeitwright/andor.py @@ -1,7 +1,7 @@ import warnings import numpy as np -from .lib import image, spectralprofile +from .lib import image, spectralprofile, styles from .lib.helpers import roi, set_label, get_channels import makeitwright.lib.styles as styles @@ -275,7 +275,7 @@ def compute_reflection_contrast(sample_data, reference_data, def compute_transmittance(sample_data, reference_data, sample_channel=0, reference_channel=0, dark_reference_data=None, dark_reference_channel=0, - dark_wavelength_range=[0, CMOS_RESPONSE_CUTOFF_NM], + dark_wavelength_range=[0, config["CMOS_RESPONSE_CUTOFF_NM"]], background_ROI=None): """ Determine the spectral transmittance of a sample using the spectral transmittance of the substrate underneath. diff --git a/makeitwright/beckerhickl.py b/makeitwright/beckerhickl.py index ff05378..8ffb637 100644 --- a/makeitwright/beckerhickl.py +++ b/makeitwright/beckerhickl.py @@ -8,38 +8,11 @@ from scipy.stats import pearsonr from .lib.helpers import get_axes, get_channels, set_label, roi -import makeitwright.lib.spectra as spectra -import makeitwright.lib.styles as styles +from .lib import spectra +from .lib import styles -def fromSP130(fpath, name=None): - if fpath.split('.')[-1] != 'asc': - print(f"filetype .{fpath.split('.')[-1]} not supported") - else: - with open(fpath) as f: - txt = f.readlines() - header_size = 0 - for i, line in enumerate(txt): - if 'Title' in line.split() and name is None: - name = line.split()[-1] - if '*BLOCK' in line: - header_size = i+1 - - arr = np.genfromtxt(fpath, delimiter=',', skip_header=header_size, skip_footer=1) - t = arr[:,0] - sig = arr[:,1] - t = t-t[np.argmax(sig)] - out = wt.Data(name=name) - out.create_variable('t', values=t, units='ns') - out['t'].attrs['label'] = "time (ns)" - out.create_channel('sig', values=sig) - out['sig'].attrs['label'] = "PL counts" - out.transform('t') - out.create_channel('norm', values=helpers.norm(out['sig'][:], 0.01, 1)) - out['norm'].attrs['label'] = "norm. PL counts" - - return out def get_fits(data, channel='norm', function='biexp'): def exp(t, a, td): @@ -54,7 +27,7 @@ def biexp(t, a1, td1, a2, td2): fits = {} for i in range(len(data)): - out = helpers.roi(data[i], {'t':[0]}) + out = roi(data[i], {'t':[0]}) fit, cov = curve_fit(functions[function], out['t'][:], out[channel][:], bounds=(0,1000000), maxfev=1000*len(out['t'][:])) std = np.sqrt(np.diag(cov)) diff --git a/makeitwright/horiba.py b/makeitwright/horiba.py index d890ee5..780e377 100644 --- a/makeitwright/horiba.py +++ b/makeitwright/horiba.py @@ -5,7 +5,8 @@ from .lib import spectralprofile, hyperspectral def central_wavelength(data): - pass + raise NotImplementedError + def plot_image(data, channel, **kwargs): params = {} @@ -24,6 +25,7 @@ def plot_image(data, channel, **kwargs): else: spectralprofile.plot_image(data, channel, **params) + def plot_profile(data, channel, profile_axis='y', **kwargs): params = {} try: @@ -44,6 +46,7 @@ def plot_profile(data, channel, profile_axis='y', **kwargs): else: spectralprofile.plot_profile(data, channel, **params) + def plot_decomposition(data, channel, **kwargs): params = {} try: diff --git a/makeitwright/iontof.py b/makeitwright/iontof.py index a678b45..973add3 100644 --- a/makeitwright/iontof.py +++ b/makeitwright/iontof.py @@ -45,6 +45,7 @@ def relative_proportion(data, channel0, channel1): data.create_channel(ch_name, values=ch_arr, verbose=True) data[ch_name].signed = True + def plot_image(data, channel, **kwargs): params = {} @@ -55,6 +56,7 @@ def plot_image(data, channel, **kwargs): hyperspectral.plot_image(data, channel, **params) + def plot_profile(data, profile_axis, channel, **kwargs): params = {} @@ -65,6 +67,7 @@ def plot_profile(data, profile_axis, channel, **kwargs): hyperspectral.plot_profile(data, profile_axis, channel, **params) + def plot_depth_trace(data, channel, **kwargs): params = {} From ba37c9c1d1e4b90e367d3c63bf3c23aac479a32d Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 13:26:43 -0500 Subject: [PATCH 04/20] Update andor.py --- makeitwright/andor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/makeitwright/andor.py b/makeitwright/andor.py index 5ac7b4d..792cd40 100644 --- a/makeitwright/andor.py +++ b/makeitwright/andor.py @@ -5,6 +5,7 @@ from .lib.helpers import roi, set_label, get_channels import makeitwright.lib.styles as styles + APD_PIXEL = (1325, 1080) SLIT_PIXEL_COLUMN = 1325 CMOS_RESPONSE_CUTOFF_NM = 370 @@ -275,7 +276,7 @@ def compute_reflection_contrast(sample_data, reference_data, def compute_transmittance(sample_data, reference_data, sample_channel=0, reference_channel=0, dark_reference_data=None, dark_reference_channel=0, - dark_wavelength_range=[0, config["CMOS_RESPONSE_CUTOFF_NM"]], + dark_wavelength_range=[0, CMOS_RESPONSE_CUTOFF_NM], background_ROI=None): """ Determine the spectral transmittance of a sample using the spectral transmittance of the substrate underneath. From 0382b529638453e3295cf21dce13e63d457682b6 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 16:16:12 -0500 Subject: [PATCH 05/20] lib dir was hiding initially called lib, but was ignored due to gitignore. renamed to `core` to avoid confusion with packaging parsers.__init__ underwent some revision --- makeitwright/__init__.py | 2 +- makeitwright/andor.py | 6 +- makeitwright/beckerhickl.py | 9 +- makeitwright/core/__init__.py | 8 + makeitwright/core/artists.py | 441 +++++++++++++++++++ makeitwright/core/helpers.py | 592 ++++++++++++++++++++++++++ makeitwright/core/hyperspectral.py | 325 ++++++++++++++ makeitwright/core/image.py | 111 +++++ makeitwright/core/parsers/__init__.py | 160 +++++++ makeitwright/core/parsers/andor.py | 184 ++++++++ makeitwright/core/parsers/gwyddion.py | 113 +++++ makeitwright/core/parsers/horiba.py | 197 +++++++++ makeitwright/core/parsers/iontof.py | 80 ++++ makeitwright/core/parsers/sp130.py | 33 ++ makeitwright/core/parsers/xrd.py | 47 ++ makeitwright/core/spectra.py | 178 ++++++++ makeitwright/core/spectralprofile.py | 311 ++++++++++++++ makeitwright/core/styles.py | 215 ++++++++++ makeitwright/horiba.py | 4 +- makeitwright/iontof.py | 2 +- makeitwright/xrd.py | 4 +- 21 files changed, 3007 insertions(+), 15 deletions(-) create mode 100644 makeitwright/core/__init__.py create mode 100644 makeitwright/core/artists.py create mode 100644 makeitwright/core/helpers.py create mode 100644 makeitwright/core/hyperspectral.py create mode 100644 makeitwright/core/image.py create mode 100644 makeitwright/core/parsers/__init__.py create mode 100644 makeitwright/core/parsers/andor.py create mode 100644 makeitwright/core/parsers/gwyddion.py create mode 100644 makeitwright/core/parsers/horiba.py create mode 100644 makeitwright/core/parsers/iontof.py create mode 100644 makeitwright/core/parsers/sp130.py create mode 100644 makeitwright/core/parsers/xrd.py create mode 100644 makeitwright/core/spectra.py create mode 100644 makeitwright/core/spectralprofile.py create mode 100644 makeitwright/core/styles.py diff --git a/makeitwright/__init__.py b/makeitwright/__init__.py index 8102a2e..0db4ef6 100644 --- a/makeitwright/__init__.py +++ b/makeitwright/__init__.py @@ -1,5 +1,5 @@ from .__version__ import * -from .lib import * +from .core import * from . import andor from . import beckerhickl diff --git a/makeitwright/andor.py b/makeitwright/andor.py index 792cd40..c472b67 100644 --- a/makeitwright/andor.py +++ b/makeitwright/andor.py @@ -1,9 +1,9 @@ import warnings import numpy as np -from .lib import image, spectralprofile, styles -from .lib.helpers import roi, set_label, get_channels -import makeitwright.lib.styles as styles +from .core import image, spectralprofile, styles +from .core.helpers import roi, set_label, get_channels +import makeitwright.core.styles as styles APD_PIXEL = (1325, 1080) diff --git a/makeitwright/beckerhickl.py b/makeitwright/beckerhickl.py index 8ffb637..dfd50b0 100644 --- a/makeitwright/beckerhickl.py +++ b/makeitwright/beckerhickl.py @@ -1,15 +1,12 @@ -__name__ = "beckerhickl" -__author__ = "Chris Roy, Song Jin Research Group, Dept. of Chemistry, University of Wisconsin - Madison" - import numpy as np import matplotlib.pyplot as plt import WrightTools as wt from scipy.optimize import curve_fit from scipy.stats import pearsonr -from .lib.helpers import get_axes, get_channels, set_label, roi -from .lib import spectra -from .lib import styles +from .core.helpers import get_axes, get_channels, set_label, roi +from .core import spectra +from .core import styles diff --git a/makeitwright/core/__init__.py b/makeitwright/core/__init__.py new file mode 100644 index 0000000..60761ac --- /dev/null +++ b/makeitwright/core/__init__.py @@ -0,0 +1,8 @@ +from . import parsers +from . import artists +from . import styles +from . import helpers + +from . import spectra +from . import spectralprofile +from . import hyperspectral diff --git a/makeitwright/core/artists.py b/makeitwright/core/artists.py new file mode 100644 index 0000000..7595ac2 --- /dev/null +++ b/makeitwright/core/artists.py @@ -0,0 +1,441 @@ +import numpy as np +import WrightTools as wt +import matplotlib.cm as cm +from matplotlib import pyplot as plt +from .helpers import roi, parse_args +from . import styles + + +def plot(data, **kwargs): + if type(data) is wt.Collection: + data = [data[key] for key in data] + if type(data) is not list: + data = [data] + + + #set parameters for plotting from kwargs + params = { + "plot_type" : "line", + "xscale" : "linear", + "xticks" : True, + "yscale" : "linear", + "yticks" : True, + "axis" : 0, + "channel" : -1, + "ROI" : None, + "xrange" : None, + "vrange" : None, + "offset" : 0, + "reference_lines" : None, + "title" : None + } + params.update(styles.spectra) + params.update(**kwargs) + + signed=False + + colors = __parse_colors(data, params['colors']) + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + for i in range(len(data)): + #convert axis/channel indices to natural names + axis, = parse_args(data[i], params["axis"]) + + if params['channel']=='prompt': #kill this if else if all your code suddenly stops working + channel, = parse_args(data[i], input(f'select channel from {data[i].natural_name}: {[ch.natural_name for ch in data[i].channels]} '), dtype='Channel') + else: + channel, = parse_args(data[i], params["channel"], dtype='Channel') + if data[i][channel].signed: + signed=True + + #extract ROI + if params["ROI"] is not None: + out = roi(data[i], params["ROI"]) + else: + out = data[i] + + #plot data + if params["plot_type"] == "line": + ax.plot(out[axis][:],out[channel][:]+i*params["offset"], + linewidth=params["linewidth"], alpha=params["alpha"], color=colors[i]) + if params["plot_type"] == "scatter": + ax.scatter(out[axis][:],out[channel][:]+i*params["offset"], + marker=params["marker"], alpha=params["alpha"], color=colors[i], s=params["marker_size"]) + + if signed: + ax.axhline(y=0, color='black', linewidth=1) + + if params["reference_lines"] is not None: + if type(params["reference_lines"]) is not list: + params["reference_lines"] = [params["reference_lines"]] + for reference_line in params["reference_lines"]: + ax.axvline(x=reference_line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) + + #adjust plot frame + if params["xrange"] is not None: + xrange = params["xrange"] + else: + xrange = __get_range(*data, reference_key=params["axis"], dtype='Axis') + if params["xscale"] == 'log' and xrange[0]<=0: + xrange[0] = 0.001 + ax.set_xlim(*xrange) + ax.set_xscale(params["xscale"]) + if params["xlabel"] is None: + try: + params["xlabel"] = out[axis].attrs['label'] + except KeyError: + params["xlabel"] = 'x' + ax.set_xlabel(params["xlabel"]) + if not params["xticks"]: + ax.set_xticks([]) + + if params["vrange"] is not None: + vrange = params["vrange"] + else: + vrange = __get_range(*data, reference_key=params["channel"], offset=params["offset"]) + if params["yscale"] == 'log' and vrange[0]<=0: + vrange[0] = 0.001 + ax.set_ylim(*vrange) + ax.set_yscale(params["yscale"]) + if params["ylabel"] is None: + try: + params["ylabel"] = out[channel].attrs['label'] + except KeyError: + params["ylabel"] = 'y' + ax.set_ylabel(params["ylabel"]) + if not params["yticks"]: + ax.set_yticks([]) + + if params["title"] is not None: + ax.set_title(params["title"]) + + plt.show() + +def plot2D(data, channel, **kwargs): + + #convert axis/channel indices to natural names + channel, = parse_args(data, channel, dtype='Channel') + + #set parameters for plotting from kwargs + params = { + "ROI" : None, + 'ticks' : 'auto', + "vrange" : None, + "reference_lines" : None, + "title" : None + } + params.update(styles.profile) + + if data[channel].signed: + params["cmap"] = cm.RdBu_r + params.update(**kwargs) + + if params["ROI"] is not None: + out = roi(data, params["ROI"]) + else: + out = data + + #determine range to be plotted + if params["vrange"] is None: + vrange = __get_range(out, reference_key=channel) + else: + vrange = params["vrange"] + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) + #array needs to be transposed before passing to pcolormesh because apparently no matplotlib devs thought about what arrays look like + try: + mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + except TypeError: + mesh = ax.pcolormesh(xgrid, ygrid, out[channel][:], cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + if params["title"] is not None: + ax.set_title(params["title"]) + + if params["reference_lines"] is not None: + if type(params["reference_lines"]) is not list: + params["reference_lines"] = [params["reference_lines"]] + for reference_line in params["reference_lines"]: + ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) + + #set ticks + if params['ticks'] == 'auto': + ticks = np.linspace(vrange[0], vrange[1], num=11) + elif params['ticks'] is None: + ticks = [] + else: + ticks = params['ticks'] + + # plot colorbar + cbar = plt.colorbar(mesh) + cbar.set_ticks(ticks) + cbar.set_label(params["cbar_label"]) + +def plot3D(data, profile_axis, channel, **kwargs): + + #convert axis/channel indices to natural names + profile_axis, = parse_args(data, profile_axis) + spectral_axis = data.axes[-1].natural_name + channel, = parse_args(data, channel, dtype='Channel') + non_profile_axis = [axis.natural_name for axis in data.axes[:-1] if axis.natural_name != profile_axis][0] + + #set parameters for plotting from kwargs + params = { + "ROI" : None, + 'ticks' : 'auto', + "vrange" : None, + "reference_lines" : None, + "title" : None + } + params.update(styles.profile) + if data[channel].signed: + params["cmap"] = cm.RdBu_r + params.update(**kwargs) + + #extract ROI + if params["ROI"] is not None: + out = roi(data, params["ROI"]) + if len(out.axes) != 2: + out = roi(out, {non_profile_axis : 'sum'}) + else: + out = roi(data, {non_profile_axis : 'sum'}) + out.transform(spectral_axis, profile_axis) + + #determine range to be plotted + if params["vrange"] is None: + vrange = __get_range(out, reference_key=channel) + else: + vrange = params["vrange"] + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + xgrid, ygrid = np.meshgrid(out.axes[1][:], out.axes[0][:]) + try: + mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + except TypeError: + mesh = ax.pcolormesh(xgrid, ygrid, out[channel][:], cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + + if params["reference_lines"] is not None: + if type(params["reference_lines"]) is not list: + params["reference_lines"] = [params["reference_lines"]] + for reference_line in params["reference_lines"]: + ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) + + #set ticks + if params['ticks'] == 'auto': + ticks = np.linspace(vrange[0], vrange[1], num=11) + elif params['ticks'] is None: + ticks = [] + else: + ticks = params['ticks'] + + # plot colorbar + cbar = plt.colorbar(mesh) + cbar.set_ticks(ticks) + cbar.set_label(params["cbar_label"]) + + if params["title"] is not None: + ax.set_title(params["title"]) + +def image(data, channel, **kwargs): + #convert axis/channel indices to natural names + channel, = parse_args(data, channel, dtype='Channel') + + #set parameters for plotting from kwargs + params = { + "ROI" : None, + "vrange" : None, + "reference_lines" : None, + "title" : None + } + params.update(styles.image) + if data[channel].signed: + params["cmap"] = cm.RdBu_r + params.update(**kwargs) + + #extract ROI + if params["ROI"] is not None: + out = roi(data, params["ROI"]) + else: + out = data + + #determine range to be plotted + if params["vrange"] is None: + vrange = __get_range(out, reference_key=channel) + else: + vrange = params["vrange"] + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) + ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + + if params["reference_lines"] is not None: + if type(params["reference_lines"]) is not list: + params["reference_lines"] = [params["reference_lines"]] + for reference_line in params["reference_lines"]: + ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) + + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + ax.set_aspect("equal") + if params["title"] is not None: + ax.set_title(params["title"]) + +def image3D(data, channel, **kwargs): + + #convert axis/channel indices to natural names + channel, = parse_args(data, channel, dtype='Channel') + non_spatial_axis = data.axes[-1].natural_name + + #set parameters for plotting from kwargs + params = { + "ROI" : None, + 'ticks' : 'auto', + "vrange" : None, + "title" : None + } + params.update(styles.image) + if data[channel].signed: + params["cmap"] = cm.RdBu_r + params.update(**kwargs) + + #extract ROI + if params["ROI"] is not None: + out = roi(data, params["ROI"]) + if len(out.axes) != 2: + out = roi(out, {non_spatial_axis : 'sum'}) + else: + out = roi(data, {non_spatial_axis : 'sum'}) + + #determine range to be plotted + if params["vrange"] is None: + vrange = __get_range(out, reference_key=channel) + else: + vrange = params["vrange"] + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) + mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + ax.set_aspect("equal") + if params["title"] is not None: + ax.set_title(params["title"]) + + #set ticks + if params['ticks'] == 'auto': + ticks = np.linspace(vrange[0], vrange[1], num=11) + elif params['ticks'] is None: + ticks = [] + else: + ticks = params['ticks'] + + # plot colorbar + cbar = plt.colorbar(mesh) + cbar.set_ticks(ticks) + cbar.set_label(params["cbar_label"]) + +def plot_tandem(d1,d2, figsize=(2.6,1), axis=0, channels=(-1,-1), + xticks=True, yticks=[True,True], xlabel="wavelength (nm)", ylabels=["reflectance","absorbance"], + xrange=[400,650], vranges=[(0,1),(0,1)], colors=['coral','royalblue'], + linewidth=1, reference_lines=None): + #setup plot frame + fig, ax1 = plt.subplots(figsize=figsize) + ax2 = ax1.twinx() + + #convert axis/channel indices to natural names + axis1, = parse_args(d1, axis) + axis2, = parse_args(d2, axis) + channel1, = parse_args(d1, channels[0], dtype='Channel') + channel2, = parse_args(d2, channels[1], dtype='Channel') + + #plot data + ax1.plot(d1[axis1][:],d1[channel1][:], linewidth=linewidth, color=colors[0]) + ax2.plot(d2[axis1][:],d2[channel2][:], linewidth=linewidth, color=colors[1]) + + if reference_lines is not None: + if type(reference_lines) is not list: + reference_lines = [reference_lines] + for line in reference_lines: + ax1.axvline(x=line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) + + #adjust plot frame + if xrange is None: + xrange = __get_range(*[d1,d2], reference_key=axis, dtype='Axis') + ax1.set_xlim(*xrange) + ax1.set_xlabel(xlabel) + if not xticks: + ax1.set_xticks([]) + + for i, v in enumerate(vranges): + if v is None: + if i==0: + vranges[i] = __get_range(d1, reference_key=channel1, offset=0) + if i==1: + vranges[i] = __get_range(d2, reference_key=channel2, offset=0) + ax1.set_ylim(*vranges[0]) + ax2.set_ylim(*vranges[1]) + ax1.set_ylabel(ylabels[0]) + ax2.set_ylabel(ylabels[1]) + if not yticks[0]: + ax1.set_yticks([]) + if not yticks[1]: + ax2.set_yticks([]) + +def __parse_colors(data, colors): + if type(colors) is list: + if len(colors) < len(data): + q, r = divmod(len(data), len(colors)) + colors = q*colors+colors[:r] + else: + try: + colors = colors(np.linspace(0,1,len(data))) + except: + colors = [colors for i in range(len(data))] + return colors + +def __get_range(*data, reference_key=0, dtype='Channel', window='default', offset=0): + ranges = [] + signed=False + default_windows = { + 'Axis' : 1, + 'Channel' : 1.1 + } + if window=='default': + window = default_windows[dtype] + + for d in data: + key, = parse_args(d, reference_key, dtype=dtype) + ranges.append([np.min(d[key][:]), np.max(d[key][:])]) + if dtype=='Channel': + if d[key].signed: + signed=True + + ranges_min, ranges_max = min([r[0] for r in ranges]), max([r[1] for r in ranges]) + if offset != 0: + ranges_max = sum([r[0] for r in ranges]) + offset*(len(data)-1) + [r[1] for r in ranges][-1] + + rng = [(ranges_min+(ranges_max-ranges_min)/2)-(window*(ranges_max-ranges_min)/2), (ranges_min+(ranges_max-ranges_min)/2)+(window*(ranges_max-ranges_min)/2)] + if signed and ranges_min*ranges_max < 0 and not offset: #make window symmetric about zero if min and max have opposite sign + return [-window*max(rng),window*max(rng)] + else: + return rng + +def __contrast(d, ch, contrast=[99,1]): + return [np.percentile(d[ch][:],min(contrast)),np.percentile(d[ch][:],max(contrast))] \ No newline at end of file diff --git a/makeitwright/core/helpers.py b/makeitwright/core/helpers.py new file mode 100644 index 0000000..c2e5d26 --- /dev/null +++ b/makeitwright/core/helpers.py @@ -0,0 +1,592 @@ +import numpy as np +from scipy.signal import find_peaks_cwt +import matplotlib as mpl +import matplotlib.pyplot as plt +import WrightTools as wt + +def parse_args(data, *args, dtype='Axis', return_name=True): + argout = list(args) + + if dtype == 'Axis': + for i, arg in enumerate(args): + if return_name: + if isinstance(arg, int): + argout[i] = data.axes[arg].natural_name + else: + if isinstance(arg, str): + argout[i] = [i for i, axis in enumerate(data.axes) if axis.natural_name==arg][0] + + if dtype == 'Channel': + for i, arg in enumerate(args): + if return_name: + if isinstance(arg, int): + argout[i] = data.channels[arg].natural_name + else: + if isinstance(arg, str): + argout[i] = [i for i, channel in enumerate(data.channels) if channel.natural_name==arg][0] + + if len(argout) == 1: + return (argout[0],) + else: + return tuple(argout) + +def parse_kwargs(params, **kwargs): + for key, value in kwargs.items(): + params[key] = value + return params + +def get_axes(data, *keys, asindex=False): + idx = list(keys) + axdict = {ax.natural_name:i for i, ax in enumerate(data.axes)} + for i, key in enumerate(keys): + if type(key) is not int: + try: + idx[i] = axdict[key] + except KeyError: + print(f'axis {key} not found') + idx[i] = None + idx = [i for i in idx if i is not None] + if asindex: + return tuple(idx) + else: + return tuple([data.axes[i].natural_name for i in idx]) + +def get_channels(data, *keys, asindex=False): + idx = list(keys) + chdict = {ch.natural_name:i for i, ch in enumerate(data.channels)} + for i, key in enumerate(keys): + if type(key) is not int: + try: + idx[i] = chdict[key] + except KeyError: + print(f'axis {key} not found') + idx[i] = None + idx = [i for i in idx if i is not None] + if asindex: + return tuple(idx) + else: + return tuple([data.channels[i].natural_name for i in idx]) + +def set_label(data, key, name): + if type(key) is not str: + raise TypeError(f'key must be string, function received {type(key)}') + + if type(data) is not list: + data = [data] + + for d in data: + try: + d[key].attrs['label'] = name + except KeyError: + print(f'no object with key {key} in data {d.natural_name}') + +def find_nearest(arr, val, return_index=True): + idx = (np.abs(arr-val)).argmin() + if return_index: + return idx + else: + return arr[idx] + +def find_peaks(*data, channel=-1, axis=0, peak_width="medium", noisy=False, **kwargs): + peaks = {} + cwtargs = {} + if noisy: + cwtargs["min_snr"] = 2 + cwtargs.update(kwargs) + + for i, d in enumerate(data): + channel, = parse_args(d, channel, dtype='Channel') + axis, = parse_args(d, axis) + dname = str(i) + "_" + d.natural_name + peaks[dname] = {} + + axratio = d[axis].size/(np.max(d[axis].points) - np.min(d[axis].points)) + peak_width_vals = { + "narrow" : d[axis].size/1000, + "medium" : d[axis].size/100, + "broad" : d[axis].size/10 + } + if type(peak_width) is int or type(peak_width) is float: + width = axratio*peak_width + else: + try: + width = peak_width_vals[peak_width] + except KeyError: + print('Peak width argument not recognized. Select between narrow, medium, or broad.') + width = peak_width_vals["medium"] + + if len(d[channel].shape)>1: + out = d.chop(axis) + out = [spect for spect in out.values()] + for i, spect in enumerate(out): + peaks[dname][str(i)] = {} + peaks[dname][str(i)]["coords"] = [(c.natural_name, c.value) for c in spect.constants] + if np.sum(spect[channel][:]) != 0: + p = find_peaks_cwt(spect[channel].points, width, **cwtargs) + if p.size==0: + peaks[dname][str(i)]["peaks"] = None + else: + peaks[dname][str(i)]["peaks"] = np.asarray([spect[axis][idx] for idx in p]) + else: + if np.sum(d[channel][:]) != 0: + p = find_peaks_cwt(d[channel].points, width, **cwtargs) + if p.size==0: + peaks[dname] = None + else: + peaks[dname] = np.asarray([d[axis][idx] for idx in p]) + + return peaks + +def norm(arr, tmin, tmax): + diff = tmax-tmin + arr_range = np.max(arr)-np.min(arr) + norm_arr = np.nan_to_num((((arr-np.min(arr))*diff)/arr_range) + tmin) + return norm_arr + +def split_n(arr, *axes): + """ + Split an array sequentially along multiple axes. Multi-axis calls nested lists of arrays. Calling a single axis is equivalent to the numpy.split() method. + + Parameters + ---------- + arr : numpy array + The array to be split. + *axes : interable of ints + The axes along which the array will be split. + + Returns + ------- + arr : lists of numpy arrays + The split sub-arrays. + """ + axes = list(axes) + while axes: + if type(arr) is list: + spl_arr = [] + for a in arr: + spl_arr.append(split_n(a, *axes)) + arr = spl_arr + else: + arr = np.split(arr, arr.shape[axes[0]], axis=axes[0]) + del(axes[0]) + return arr + +def norm_split(split_arr, bounds): #TODO generalize to arbitrary operation + """ + Independently normalize all sub-arrays in a sequentially split numpy array. + + Parameters + ---------- + split_arr : lists of numpy arrays + The split array in the form generated by split_n. + bounds : 2-element iterable of ints + The lower and upper bounds of the normalized array, in order. + Returns + ------- + split_arr : lists of numpy arrays + The normalized arrays in the same format as-called. + """ + if type(split_arr) is list: + l = [] + for a in split_arr: + l.append(norm_split(a, bounds)) + split_arr = l + else: + split_arr = norm(split_arr, bounds[0], bounds[1]) + return split_arr + +def inverse_split_n(split_arr, *split_axes): + """ + Reconstruct a split array into its original form, provided the list of axes that was used to split the array via split_n. + + Parameters + ---------- + split_arr : lists of numpy arrays + The split array in the form generated by split_n. + *split_axes : int + The axes arguments called in split_n to produce split_arr, in the same order. + + Returns + ------- + split_arr : numpy array + A single array matching the original unsplit dimensionality. + """ + split_axes = list(split_axes) + while split_axes: + if type(split_arr[0]) is list: + arr = [] + for l in split_arr: + arr.append(inverse_split_n(l, split_axes[-1])) + split_arr = arr + del(split_axes[-1]) + else: + split_arr = np.concatenate(split_arr, axis=split_axes[-1]) + del(split_axes[-1]) + return split_arr + +def func_split(split_arr, func='norm', **kwargs): #TODO make func keyword able to call arbitrary external array functions + """ + Independently perform a function on all sub-arrays in a sequentially split numpy array. + + Parameters + ---------- + split_arr : lists of numpy arrays + The split array in the form generated by split_n. + bounds : 2-element iterable of ints + The lower and upper bounds of the normalized array, in order. + Returns + ------- + split_arr : lists of numpy arrays + The normalized arrays in the same format as-called. + """ + + params = { + 'norm':{'bounds':[0,1]}, + 'bkg_remove':{'negative':False, 'threshold':0.5, 'top_range':100}, + 'spike_filter':{'width':4} + } + params[func] = kwargs + + if type(split_arr) is list: + l = [] + for a in split_arr: + l.append(norm_split(a, func=func, **params[func])) + split_arr = l + else: + if func=='norm': + split_arr = norm(split_arr, params[func]['bounds'][0], params[func]['bounds'][1]) + if func=='bkg_remove': + pass + return split_arr + +def normalize_by_axis(data, channel, *axes, bounds=(0,1)): + """ + Normalize a channel of a data object along explicitly defined axes. + EXAMPLE: For a 3-dimensional data set with axes (x, y, z): + Normalizing by z produces independently normalized z-profiles for all (x, y). + Normalizing by (x, y) produces independently normalized xy planes for every z-slice. + Noramlizing by (x, y, z) normalized the channel as a whole. + + Parameters + ---------- + data : Data object of WrightTools data module. + The data containing the channel to be normalized. + channel : string or int + The key or index of the channel to be normalized. + *axes : iterable of strings and/or ints + The keys or indices of the axes along which to normalize the channel. + bounds : iterable of numbers, optional + The lower and upper bounds for normalization, in order. + + Returns + ------- + None. + Adds a normalized channel to the Data instance. + """ + axes = parse_args(data, *axes, return_name=False) + dims = [i for i, axis in enumerate(data.axes) if i not in axes] + channel, = parse_args(data, channel, dtype='Channel') + + ch_arr = data[channel][:] + ch_spl = split_n(ch_arr, *dims) + ch_spl_norm = norm_split(ch_spl, bounds) + ch_norm = inverse_split_n(ch_spl_norm, *dims) + ch_name = "norm_" + for ax in [axis.natural_name for i, axis in enumerate(data.axes) if i not in dims]: + ch_name = ch_name + data[ax].natural_name + + data.create_channel(ch_name, values=ch_norm) + +def background_mask(data, channel, *axes, negative=True): + axes = parse_args(data, *axes, return_name=False) + dims = [i for i, axis in enumerate(data.axes) if i not in axes] + channel, = parse_args(data, channel, dtype='Channel') + + ch_arr = data[channel][:] + ch_spl = split_n(ch_arr, *dims) + pass + +def get_range(*data, reference_key=0, dtype='Channel', window='default', offset=0): + ranges = [] + signed=False + default_windows = { + 'Axis' : 1, + 'Channel' : 1.1 + } + if window=='default': + window = default_windows[dtype] + + for d in data: + key, = parse_args(d, reference_key, dtype=dtype) + ranges.append([np.min(d[key][:]), np.max(d[key][:])]) + if dtype=='Channel': + if d[key].signed: + signed=True + + ranges_min, ranges_max = min([r[0] for r in ranges]), max([r[1] for r in ranges]) + if offset != 0: + ranges_max = sum([r[0] for r in ranges]) + offset*(len(data)-1) + [r[1] for r in ranges][-1] + + rng = [(ranges_min+(ranges_max-ranges_min)/2)-(window*(ranges_max-ranges_min)/2), (ranges_min+(ranges_max-ranges_min)/2)+(window*(ranges_max-ranges_min)/2)] + if signed and ranges_min*ranges_max < 0 and not offset: #make window symmetric about zero if min and max have opposite sign + return [-window*max(rng),window*max(rng)] + else: + return rng + +def imshowarr(a, rotate=False, cmap=None, ticks=None, vrange=None): + fig, ax = plt.subplots(figsize=(6.5, 6.5)) + if cmap is None: + cmap = mpl.cm.viridis + if vrange is None: + v = [np.min(a), np.max(a)] + else: + v = vrange + ax.imshow(a, vmin=v[0], vmax=v[1], cmap=cmap) + if ticks is None: + ax.set_xticks([]) + ax.set_yticks([]) + ax.set_aspect('equal') + +def __at(arr, val): + return (np.abs(arr-val)).argmin() + +def roi(data, ROI, return_arrs=False, verbose=False): + """ + Extract a region of interest (ROI) from data objects using a variety of operations, + without generating useless secondary data or collapsed variables. + + The extracted ROI's are of equivalent type as the input, unless return_arrs is set to True. + The returned data are new instances distinct from the input, with their own unique .wt5 files. + Further modification of the output data will not affect the input data. + + Parameters + ---------- + data : WrightTools Data, WrightTools Collection, or list of WrightTools Data + The data from which the ROI will be extracted. + Axes of all input data must have axes with indices or names matched to the ROI, but they need not have identical shapes. + + ROI : dictionary + The region of interest to extract from all input data. ROI's will be interpreted in the following ways: + String keys will be interpreted as axis names. + Integer keys will be interpreted as axis indices. + + Numerical values will extract the single point along that axis which is closest to the value, collapsing the axis. + List values will extract the points along that axis which fall within the range of two numbers (if the list contains 2 numbers), + or that lie beyond the number (if the list contains 1 number) + Certain string values will collapse the points along that axis via a specified method. Valid methods include: + 'average', which yields arrays averaged along that axis + 'median', which extracts the median along that axis + 'sum', which yields arrays summed along that axis + + Tuple values will be interpreted as an ordered sequence of the operations described above. + + return_arrs : boolean, optional, default=False + Specify the return format of the data. + If False, the returned data will match the format of the input data. + If True, the returned data will be formatted as a list of dictionaries + containing the variable and channel names as keys and their corresponding arrays as values. + + verbose : boolean, optional, default=False + Toggle talkback. + + Returns + ------- + out: Equivalent type to input data or list of dictionaries + New Data instance(s) containing only the regions of interest from the original(s), with collapsed variables removed. + + Examples + -------- + Using a 100mm x 100mm photograph as an example, stored as a Data instance my_data with axes ('x','y'), each in units mm. + + Calling the function "my_data_ROI = roi(my_data, ROI)" extracts the following data from my_data depending on the format of ROI: + if ROI = {'y':'sum'}: + Returns a 1D x-profile of the image. The channels will be the signals summed along the entire y-axis. + + if ROI = {'x':([20,80],'sum')}: + Returns a 1D y-profile of the image. The channels will be the summed signals along the x-axis from x=20mm - x=80mm. + The ordering of the tuple affects the outcome. As a counterexample, if my_ROI = {'x':('sum',[20,80])}: + The 1D y-profile will summed along the entire x-axis. The function will ignore the second operation because the variable was already collapsed by the first. + + if ROI = {'x':[20,80], 'y':[50]}: + Returns a cropped (60mm x 50mm) image containing points from x=20mm - x=80mm and y=50mm - y=100mm. + + if ROI = {'x':[20,80], 1:50}: + Returns a cropped (60 mm) x-profile of the pixels at the row where y (axis 1) = 50mm, containing points from x=20mm - x=80mm. + + See Also + -------- + WrightTools.Data.collapse + WrightTools.Data.moment + WrightTools.Data.split + """ + def __copy_attrs(data, new_data, object_key): + #do not overwrite default HDF5 parameters that come with new instances + nocopy = {key for key in new_data[object_key].attrs.keys()} + for key, value in data[object_key].attrs.items(): + if key not in nocopy: + new_data[object_key].attrs[key] = value + + operations = { + 'sum' : np.sum, + 'product' : np.prod, + 'average' : np.average, + 'std' : np.std, + 'var' : np.var, + 'median' : np.median, + 'min' : np.min, + 'max' : np.max + } + + if type(data) is not list and type(data) is not wt.Data and type(data) is not wt.Collection: + raise TypeError(f'Unsupported data type {type(data)} was passed to the function. Supported data types include WrightTools Data objects, lists of WrightTools Data objects, or WrightTools Collection objects.') + + if type(data) is wt.Collection and not return_arrs: + out = wt.Collection(name=data.natural_name) + data = [data[d] for d in data] + else: + out = [] + if type(data) is not list: + data = [data] + + for d in data: + variables = dict([(var.natural_name, d[var.natural_name][:]) for var in d.variables]) + axes = [ax.natural_name for ax in d.axes] + channels = dict([(ch.natural_name, d[ch.natural_name][:]) for ch in d.channels]) + + for key, value in ROI.items(): + axis = key + + if key not in axes: + if type(key) is int and key in range(len(d.axes)): #try indexing using the key provided if it isn't a valid axis name + axis = d.axes[key].natural_name + else: + axis = None + print(f'axis {key} not found') + + if axis is not None: + collapsed=False + axarr = variables[axis] + axidx = [i for i, dimlength in enumerate(axarr.shape) if dimlength>1] + #if there is no dimension greater than 1 in the axis array, consider it a collapsed variable + if not axidx: + collapsed=True + else: + axidx = axidx[0] + #interpret a single operation or a sequence of operations on the variable + if type(value) is tuple: + ops = [op for op in value] + else: + ops = [value] + #extract the ROI + for op in ops: + if not collapsed: + if type(op) is str and op in operations.keys(): + for ch, charr in channels.items(): + if charr.shape[axidx]==axarr.shape[axidx] and charr.ndim==axarr.ndim: + channels[ch] = operations[op](charr, axis=axidx) + collapsed=True + + if type(op) is int or type(op) is float: + ax0 = __at(axarr, op) + for ch, charr in channels.items(): + if charr.shape[axidx]==axarr.shape[axidx] and charr.ndim==axarr.ndim: + extracted = np.split(charr, charr.shape[axidx], axis=axidx)[ax0] + channels[ch] = np.squeeze(extracted, axis=axidx) + collapsed=True + if verbose: + print(f'ROI extracted at {axis} = {op} for data {d.natural_name}') + + if type(op) is list: + if len(op) not in [1, 2]: + print(f'specified bounds for split along axis {key} contained {len(op)} elements, but only 1 or 2 elements were expected') + bounds = sorted([__at(axarr, bound) for bound in op]) + if np.split(axarr, bounds, axis=axidx)[1].shape[axidx]==1: + collapsed=True + + for ch, charr in channels.items(): + if charr.ndim==axarr.ndim and charr.shape[axidx]==axarr.shape[axidx]: + channels[ch] = np.split(charr, bounds, axis=axidx)[1] + for var, varr in variables.items(): + if varr.ndim==axarr.ndim and varr.shape[axidx]==axarr.shape[axidx]: + variables[var] = np.split(varr, bounds, axis=axidx)[1] + if verbose: + print(f'extracted range {op[0]} to {op[-1]} along {axis} for data {d.natural_name}') + + axarr = variables[axis] + + else: + print(f'cannot interpret operation {op} for axis {axis} of data {d.natural_name} because the variable was already collapsed') + + if collapsed: + variables.pop(axis) + axes.remove(axis) + for var, varr in variables.items(): + if varr.ndim==axarr.ndim and varr.shape[axidx]==1: + variables[var] = np.squeeze(varr, axis=axidx) + for ch, charr in channels.items(): + if charr.ndim==axarr.ndim and charr.shape[axidx]==1: + channels[charr] = np.squeeze(charr, axis=axidx) + if verbose: + print(f'axis {axis} collapsed via operation {op} for data {d.natural_name}') + + if return_arrs: #return a dictionary of arrays deconstructed from the original data object + arrs = {} + for var, varr in variables.items(): + if var in axes: + arrs[var] = varr + for ch, charr in channels.items(): + arrs[ch] = charr + out.append(arrs) + + else: #construct new Data objects + if type(out) is wt.Collection: + out.create_data(name=d.natural_name) + d_out = out[-1] + else: + d_out = wt.Data(name=d.natural_name) + + keysHDF5 = {key for key in d_out.attrs.keys()} + + for var, varr in variables.items(): + if d[var].units is not None: + d_out.create_variable(var, values=varr, unit=d[var].units) + else: + d_out.create_variable(var, values=varr, units=None) + __copy_attrs(d, d_out, var) + + for ch, charr in channels.items(): + if d[ch].units is not None: + d_out.create_channel(ch, values=charr, units=d[ch].units) + else: + d_out.create_channel(ch, values=charr, units=None) + if d[ch].signed: + d_out[ch].signed=True + __copy_attrs(d, d_out, ch) + + for key, value in d.attrs.items(): + if key not in keysHDF5: + d_out.attrs[key] = value + d_out.transform(*axes) + if type(out) is not wt.Collection: + out.append(d_out) + + if len(out)==1: + out=out[0] + + return out + +def show(data): + if type(data) is not list: + data = [data] + return [f'{i} - name: {d.natural_name}, axes:{[ax.natural_name for ax in d.axes]}' for i, d in enumerate(data) if type(d) is wt.Data] + +def contrast(d, ch, contrast=[99,1]): + return [np.percentile(d[ch][:],min(contrast)),np.percentile(d[ch][:],max(contrast))] + +def vrange(arr, signed, window=1.1, manual_range=None): + #determine range to be plotted + vmin, vmax = np.min(arr), np.max(arr) + if signed and vmin<0: + return [-window*max([abs(vmin), abs(vmax)]), window*max([abs(vmin), abs(vmax)])] + else: + return [(vmin+(vmax-vmin)/2)-(window*(vmax-vmin)/2), (vmin+(vmax-vmin)/2)+(window*(vmax-vmin)/2)] \ No newline at end of file diff --git a/makeitwright/core/hyperspectral.py b/makeitwright/core/hyperspectral.py new file mode 100644 index 0000000..422f251 --- /dev/null +++ b/makeitwright/core/hyperspectral.py @@ -0,0 +1,325 @@ +""" +Processing and plotting methods for 3-dimensional WrightTools data objects containing two spatial axes and one non-spatial axis (i.e. a spectral axis). + +The two spatial axes are generally defined as x and y. +The third axis is referred to as "spectral axis", +but an arbitrary non-spatial or pseudo-spatial axis may be used where relevant. + +Data axes must be ordered (spatial x, spatial y, non-spatial). +""" + +import numpy as np +import matplotlib as mpl +from matplotlib import pyplot as plt +from . import helpers, styles + + +def remove_background(data, channel, threshold=0.1, negative=False, return_mask=False, max_ref_count=10): + """ + Remove background pixels from the x-y plane of the hyperspectral image using the spectrally binned image as a reference signal. + Background x-y points will be set to 0 along the entire spectral axis. + + Parameters + ---------- + data : Data object of WrightTools data module. + The data for which the background-subtracted channel will be generated. + channel : int or str + The channel that will be duplicated with subtracted background. + threshold : float between 0 and 1, optional + The fraction of the maximum reference value below which is to be considered background. + The default is 0.1. + negative : bool, optional + Subtract everything but the background instead. Useful if the region of interest is the signal minimum. + The default is False. + max_ref_count : int, optional + The number of highest x-y points in the image to be averaged as a reference for the channel maximum. Useful to avoid false maxima caused by spikes. + The default is 10. + + Returns + ------- + None. + Adds a background-subtracted channel to the Data instance. + """ + #parse channel argument as str + channel, = helpers.parse_args(data, channel, dtype='Channel') + + #generate a spectrally binned image of the channel + ch_arr = np.sum(data[channel][:], axis=2) + #get a representative maximum value from the reference image + if max_ref_count > ch_arr.size: + max_ref_count = int(ch_arr.size/10) + ordered = np.sort(ch_arr.flatten())[-max_ref_count:] + ch_max = np.average(ordered) + #generate a mask array + bkg = np.where(ch_arr < threshold*ch_max, 0, ch_arr) + bkg = np.where(bkg > 0, 1, bkg) + #remove background using mask + mask = np.repeat(bkg[:,:,None], data.axes[2].size, axis=2) + if negative: + mask = 1-mask + nobkg = data[channel][:] * mask + #create background-subtracted channel + data.create_channel(name=channel+"_nobkg", values=nobkg) + data[channel+'_nobkg'].signed = data[channel].signed + if return_mask: + data.create_channel(name=data[channel].natural_name+"_mask", values=mask) + +def get_profile(data, profile_axis, ROI=None): + """ + Extract profile from an arbitrary pair of points in a selected 2D subspace of the data's 3 dimensions. + Arguments + ------------------------------------ + data: WrightTools Data instance. Must have at least 3 axes. + ROI should be a dict object containing the beginning and end points for each relevant axis, returns data object + """ + profile_axis, = helpers.parse_args(data, profile_axis) + non_profile_axis = [axis.natural_name for axis in data.axes if axis.natural_name != profile_axis][0] + spectral_axis = [axis.natural_name for axis in data.axes if axis.natural_name != profile_axis][1] + + dims_too_low = False + if ROI is not None: + if [val for val in ROI.values() if val == 'all']: + dims_too_low = True + if spectral_axis in ROI.keys(): + if type(ROI[spectral_axis]) is int or type(ROI[spectral_axis]) is float: + dims_too_low = True + if profile_axis in ROI.keys(): + if type(ROI[profile_axis]) is int or type(ROI[profile_axis]) is float: + dims_too_low = True + if len([val for val in ROI.values() if type(val) is int or type(val) is float])>1: + dims_too_low = True + if dims_too_low: + print("Dimensionality of ROI is too low. Do not collapse any dimensions of the data before calling this method.") + return + if not dims_too_low: + out = helpers.roi(data, ROI) + else: + out = data + if len(out.axes) > 2: + out = helpers.roi(out, {non_profile_axis:'all'}) + + out.transform() + + for channel in out.channels: + ch_name = channel.natural_name + ch_values = out[ch_name][:].transpose() + out.remove_channel(ch_name, verbose=False) + out.create_channel(ch_name, values=ch_values, verbose=False) + for variable in out.variables: + var_name = variable.natural_name + var_values = out[var_name][:].transpose() + var_units = variable.units + out.remove_variable(var_name, verbose=False) + out.create_variable(var_name, values=var_values, units=var_units, verbose=False) + + out.transform(spectral_axis, profile_axis) + print(f'profile along direction <{profile_axis}> extracted') + + return out + +def plot_image(data, channel, **kwargs): + + #convert axis/channel indices to natural names + channel, = helpers.parse_args(data, channel, dtype='Channel') + non_spatial_axis = data.axes[-1].natural_name + + #set parameters for plotting from kwargs + params = { + "ROI" : None, + 'ticks' : 'auto', + "vrange" : None, + "title" : None + } + params.update(styles.image) + if data[channel].signed: + params["cmap"] = mpl.cm.RdBu_r + params.update(**kwargs) + + #extract ROI + if params["ROI"] is not None: + out = helpers.roi(data, params["ROI"]) + if len(out.axes) != 2: + out = helpers.roi(out, {non_spatial_axis : 'all'}) + else: + out = helpers.roi(data, {non_spatial_axis : 'all'}) + + #determine range to be plotted + if params["vrange"] is None: + vrange = helpers.get_range(out, reference_key=channel) + else: + vrange = params["vrange"] + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) + mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + ax.set_aspect("equal") + if params["title"] is not None: + ax.set_title(params["title"]) + + #set ticks + if params['ticks'] == 'auto': + ticks = np.linspace(vrange[0], vrange[1], num=11) + elif params['ticks'] is None: + ticks = [] + else: + ticks = params['ticks'] + + # plot colorbar + cbar = plt.colorbar(mesh) + cbar.set_ticks(ticks) + cbar.set_label(params["cbar_label"]) + +def plot_profile(data, profile_axis, channel, **kwargs): + + #convert axis/channel indices to natural names + profile_axis, = helpers.parse_args(data, profile_axis) + spectral_axis = data.axes[-1].natural_name + channel, = helpers.parse_args(data, channel, dtype='Channel') + non_profile_axis = [axis.natural_name for axis in data.axes[:-1] if axis.natural_name != profile_axis][0] + + #set parameters for plotting from kwargs + params = { + "ROI" : None, + 'ticks' : 'auto', + "vrange" : None, + "reference_lines" : None, + "title" : None + } + params.update(styles.profile) + if data[channel].signed: + params["cmap"] = mpl.cm.RdBu_r + params.update(**kwargs) + + #extract ROI + if params["ROI"] is not None: + out = helpers.roi(data, params["ROI"]) + if len(out.axes) != 2: + out = helpers.roi(out, {non_profile_axis : 'all'}) + else: + out = helpers.roi(data, {non_profile_axis : 'all'}) + out.transform(spectral_axis, profile_axis) + + #determine range to be plotted + if params["vrange"] is None: + vrange = helpers.get_range(out, reference_key=channel) + else: + vrange = params["vrange"] + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + xgrid, ygrid = np.meshgrid(out.axes[1][:], out.axes[0][:]) + try: + mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + except TypeError: + mesh = ax.pcolormesh(xgrid, ygrid, out[channel][:], cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + + if params["reference_lines"] is not None: + if type(params["reference_lines"]) is not list: + params["reference_lines"] = [params["reference_lines"]] + for reference_line in params["reference_lines"]: + ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) + + #set ticks + if params['ticks'] == 'auto': + ticks = np.linspace(vrange[0], vrange[1], num=11) + elif params['ticks'] is None: + ticks = [] + else: + ticks = params['ticks'] + + # plot colorbar + cbar = plt.colorbar(mesh) + cbar.set_ticks(ticks) + cbar.set_label(params["cbar_label"]) + + if params["title"] is not None: + ax.set_title(params["title"]) + +def plot_decomposition(data, x_axis, y_axis, spectral_axis, channel, **kwargs): + #convert axis/channel indices to natural names + x_axis, y_axis, spectral_axis = helpers.parse_args(data, x_axis, y_axis, spectral_axis) + channel, = helpers.parse_args(data, channel, dtype='Channel') + + #set parameters for plotting from kwargs + params = { + "ROI" : None, + "xrange" : None, + "vrange" : None, + "yscale" : 'linear', + "binning" : None, + "reference_lines" : None, + "xticks" : True, + "yticks" : True, + "title" : None + } + params.update(styles.decomposition) + params.update(**kwargs) + + #extract ROI + if params["ROI"] is not None: + out = helpers.roi(data, params["ROI"]) + else: + out = data + + #identify spatial ranges for indexing + xrange = helpers.get_range(out, reference_key=spectral_axis, dtype='Axis') + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + if params["binning"] is not None: + if params["binning"] == 'average': + arr_out = np.sum(out[channel][:], axis=(0,1))/(np.count_nonzero(out[channel][:])/out[channel].shape[2]) + if params["binning"] == 'sum': + arr_out = np.sum(out[channel][:], axis=(0,1)) + #determine range to be plotted + vrange = helpers.vrange(arr_out, out[channel].signed, window=1) + + ax.plot(out[spectral_axis].points, arr_out, + params["marker"], linewidth=params["linewidth"], alpha=1, color=params["color"]) + else: + #determine range to be plotted + vrange = helpers.get_range(out, reference_key=channel) + for i in range(out[x_axis].size): + for j in range(out[y_axis].size): + if np.sum(out[channel][i,j,:]) != 0: + ax.plot(out[spectral_axis].points, out[channel][i,j,:], + params["marker"], linewidth=params["linewidth"], alpha=params["alpha"], color=params["color"]) + + if params["xrange"] is not None: + xrange = params["xrange"] + ax.set_xlim(*xrange) + if not params["xticks"]: + ax.set_xticks([]) + if params["vrange"] is not None: + vrange = params["vrange"] + ax.set_ylim(*vrange) + ax.set_yscale(params["yscale"]) + if not params["yticks"]: + ax.set_yticks([]) + + if out[channel].signed: + ax.axhline(y=0, color='black', linewidth=1) + + if params["reference_lines"] is not None: + if type(params["reference_lines"]) is not list: + params["reference_lines"] = [params["reference_lines"]] + for reference_line in params["reference_lines"]: + ax.axvline(x=reference_line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) + + # label plot + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + ax.set_yscale(params["yscale"]) + if params["title"] is not None: + ax.set_title(params["title"]) \ No newline at end of file diff --git a/makeitwright/core/image.py b/makeitwright/core/image.py new file mode 100644 index 0000000..69d599d --- /dev/null +++ b/makeitwright/core/image.py @@ -0,0 +1,111 @@ +import numpy as np +import matplotlib as mpl +from matplotlib import pyplot as plt +from . import helpers, styles + +def get_pixel_location(data, pixel): + """ + Get the axis coordinates of an exact pixel in an image. + + Arguments + --------- + data : WrightTools Data - The image. + pixel : tuple (x,y) - The pixel coordinates. + + Returns + ------- + tuple (x,y) - The location of the pixel in axis coordinates. + """ + return (data.axes[0].points[pixel[0]], data.axes[1].points[pixel[1]]) + +def remove_background(data, channel, threshold=0.5, negative=False, return_mask=False, max_ref_count=500): + channel, = helpers.parse_args(data, channel, dtype='Channel') + + ch_arr = data[channel][:] + if max_ref_count > ch_arr.size: + max_ref_count = int(ch_arr.size/10) + ordered = np.sort(ch_arr.flatten())[-max_ref_count:] + ch_max = np.average(ordered) + bkg = np.where(ch_arr < threshold*ch_max, 0, ch_arr) + mask = np.where(bkg > 0, 1, bkg) + if negative: + mask = 1-mask + nobkg = ch_arr * mask + + data.create_channel(name=channel+"_nobkg", values=nobkg, units=data[channel].units) + data[channel+'_nobkg'].signed = data[channel].signed + if return_mask: + data.create_channel(name=data[channel].natural_name+"_mask", values=mask) + +def plot_image(data, channel, **kwargs): + #convert axis/channel indices to natural names + channel, = helpers.parse_args(data, channel, dtype='Channel') + + #set parameters for plotting from kwargs + params = { + "ROI" : None, + "vrange" : None, + "contrast" : None, + "crosshairs" : None, + "xticks" : None, + "yticks" : None, + "title" : None + } + params.update(styles.image) + if data[channel].signed: + params["cmap"] = mpl.cm.RdBu_r + params.update(**kwargs) + + #extract ROI + if params["ROI"] is not None: + out = helpers.roi(data, params["ROI"]) + else: + out = data + + if params["xlabel"] is None: + try: + params["xlabel"] = out.variables[0].attrs['label'] + except KeyError: + params["xlabel"] = 'x' + + if params["ylabel"] is None: + try: + params["ylabel"] = out.variables[1].attrs['label'] + except KeyError: + params["ylabel"] = 'y' + + #determine range to be plotted + if params["vrange"] is None: + if params["contrast"] is None: + vrange = helpers.get_range(out, reference_key=channel) + else: + vrange = helpers.contrast(out, channel, params["contrast"]) + else: + vrange = params["vrange"] + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) + ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + + if params["crosshairs"] is not None: + h, v = params["crosshairs"] + if h is not None: + ax.axvline(x=h, linewidth=1, color='white', linestyle='--', alpha=0.5) + if v is not None: + ax.axhline(y=v, linewidth=1, color='white', linestyle='--', alpha=0.5) + + if not params["xticks"] and params['xticks'] is not None: + ax.set_xticks([]) + if not params["yticks"] and params['yticks'] is not None: + ax.set_yticks([]) + + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + + ax.set_aspect("equal") + + if params["title"] is not None: + ax.set_title(params["title"]) \ No newline at end of file diff --git a/makeitwright/core/parsers/__init__.py b/makeitwright/core/parsers/__init__.py new file mode 100644 index 0000000..c3cf90b --- /dev/null +++ b/makeitwright/core/parsers/__init__.py @@ -0,0 +1,160 @@ +import WrightTools as wt +import pathlib + +from psutil import virtual_memory + +from .andor import fromAndorNeo +from .gwyddion import fromGwyddion_traces +from .sp130 import fromSP130 +from .horiba import fromLabramHR, horiba_typeID +from .iontof import fromITA, ITApeaks +from .xrd import fromBruker + + +def typeID(*fpaths): + """ + Infer what kind of data the file contains. + The kind will inform on how to correctly import the data. + """ + types = {} + for fpath in map(pathlib.Path, fpaths): + if fpath.suffix == '.ita': + types[fpath] = 'iontof_SIMS' + print(f"file {fpath} is IonToF SIMS data") + + if fpath.suffix == '.txt': + with open(fpath) as f: + txt = f.read() + if "LabRAM HR" in txt: + if horiba.typeID(fpath) is not None: + types[fpath] = horiba_typeID(fpath) + if "Goniometer" in txt: + types[fpath] = 'Bruker_XRD' + if "[m]" in txt: + types[fpath] = 'Gwyddion_traces' + + if fpath.suffix == '.asc': + with open(fpath) as f: + txt = f.read() + if "*BLOCK" in txt: + types[fpath] = 'TRPL' + else: + types[fpath] = 'ASCII' + + if fpath.suffix == '.wt5': + types[fpath] = 'wt5' + + print(f"{len(types)} of {len(fpaths)} files identified as valid data types") + return types + + +def listfiles(fdir:str|pathlib.Path, pattern:str="*") -> list[pathlib.Path]: + """Generate a list of filepaths within a directory. + Includes files from nested directories. + + Parameters + ---------- + fdir: path-like + directory to walk + pattern: string + pattern used to filter files. default uses no filter + """ + return [ + pi for pi in filter( + lambda pi: pi.is_file(), pathlib.Path(fdir).rglob(pattern) + ) + ] + + +def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]): + """ + DOCUMENTATION NEEDED + """ + files = listfiles(fdir) + + include = [1 for i in range(len(files))] + if keywords: + if type(keywords) is not list: + keywords = [keywords] + for kw in keywords: + for i, f in enumerate(files): + if kw not in f: + include[i]=0 + if exclude: + if type(exclude) is not list: + exclude = [exclude] + for x in exclude: + for i, f in enumerate(files): + if x in f: + include[i]=0 + + files = [file for i, file in zip(include, files) if i] + print(f'found {sum(include)} files matching keyword specifications') + + ftypes = typeID(*files) + if select_types: + to_delete=[] + num_removed=0 + for key, value in ftypes.items(): + if value not in select_types: + to_delete.append(key) + num_removed+=1 + if to_delete: + for key in to_delete: + del(ftypes[key]) + print(f'excluded {num_removed} files that did not match specified data type(s)') + + if 'ASCII' in ftypes.values(): + if not objective: + objective = input(f'Enter objective lens magnification if all data in this directory used the same lens. Otherwise, press enter: ') + if not objective: + objective = 'prompt' + + #make sure call doesn't generate too much data, roughly 1 GB + too_much_data = False + if len([dtype for dtype in ftypes.values() if dtype=='iontof_SIMS']) > 1: + too_much_data = True + if len([dtype for dtype in ftypes.values() if dtype=='ASCII']) > 100: + too_much_data = True + if len(ftypes) > 200: + too_much_data = True + if sum([f.state()["st_size"] for f in files]) > virtual_memory().available: + too_much_data = True + + if too_much_data: + raise MemoryError("too much data in directory, parsing cancelled to prevent storage overflow") + + d = [] + for fpath, dtype in ftypes.items(): + basename = fpath.split('/')[-1].split('.')[0] + + if dtype.startswith('LabramHR'): + d.append(fromLabramHR(fpath, name=basename)) + + elif dtype=='Bruker_XRD': + l0 = len(d) + d = d + fromBruker(fpath) + + elif dtype=='Gwyddion_traces': + d.append(fromGwyddion_traces(fpath, name=None, ID_steps=True)) + + elif dtype=='iontof_SIMS': + d.append((fpath, ITApeaks(fpath))) + + elif dtype=='TRPL': + l0 = len(d) + d.append(fromSP130(fpath, name=basename)) + print(basename) + + elif dtype=='ASCII': + try: + d.append(fromAndorNeo(fpath, name=basename, objective_lens=objective)) + except: + print(f'attempted to extract ASCII data from path <{fpath}> but it was not recognized by the andor module') + print(basename) + + elif dtype=='wt5': + d.append(wt.open(fpath)) + if len(d)==1: + d=d[0] + return d diff --git a/makeitwright/core/parsers/andor.py b/makeitwright/core/parsers/andor.py new file mode 100644 index 0000000..53d7140 --- /dev/null +++ b/makeitwright/core/parsers/andor.py @@ -0,0 +1,184 @@ +import WrightTools as wt +import numpy as np +import pathlib +from os import fspath + + +def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False): + """Create a data object from Andor Solis software (ascii exports). + + Parameters + ---------- + fpath : path-like + Path to file (should be .asc format). + Can be either a local or remote file (http/ftp). + Can be compressed with gz/bz2, decompression based on file name. + name : string (optional) + Name to give to the created data object. If None, filename is used. + Default is None. + + Returns + ------- + data + New data object. + """ + + objective_lenses = { + '5x-Jin' : 0.893, + '20x-Jin' : 3.52, + '100x-Wright' : 18.2, + '5' : 0.893, + '20' : 3.52, + '100' : 18.2, + 5 : 0.893, + 20 : 3.52, + 100 : 18.2 + } + + # parse filepath + filepath = pathlib.Path(fpath) + + if not ".asc" in filepath.suffixes: + wt.exceptions.WrongFileTypeWarning.warn(filepath, ".asc") + # parse name + if name is None: + name = filepath.name.split("/")[-1] + + if objective_lens=='prompt': + objective_lens = input(f'enter magnification for data at {name}: ') + if not objective_lens: + objective_lens = 0 + + # create data + ds = np.DataSource(None) + f = ds.open(fspath(fpath), "rt") + axis0 = [] + arr = [] + attrs = {} + + line0 = f.readline().strip()[:-1] + line0 = [float(x) for x in line0.split(",")] # TODO: robust to space, tab, comma + axis0.append(line0.pop(0)) + arr.append(line0) + + def get_frames(f, arr, axis0): + axis0_written = False + while True: + line = f.readline().strip()[:-1] + if len(line) == 0: + break + else: + line = [float(x) for x in line.split(",")] + # signature of new frames is restart of axis0 + if not axis0_written and (line[0] == axis0[0]): + axis0_written = True + if axis0_written: + line.pop(0) + else: + axis0.append(line.pop(0)) + arr.append(line) + return arr, axis0 + + arr, axis0 = get_frames(f, arr, axis0) + nframes = len(arr) // len(axis0) + + i = 0 + while i < 3: + line = f.readline().strip() + if len(line) == 0: + i += 1 + else: + try: + key, val = line.split(":", 1) + except ValueError: + pass + else: + attrs[key.strip()] = val.strip() + + f.close() + + #create data object + arr = np.array(arr) + axis0 = np.array(axis0) + data = wt.Data(name=name) + if float(attrs["Grating Groove Density (l/mm)"]) == 0: + xname = 'x' + dtype = 'image' + try: + axis0 = axis0/objective_lenses[objective_lens] + xunits = 'µm' + except KeyError: + xunits = 'px' + else: + xname = 'wl' + xunits = 'nm' + dtype = 'spectralprofile' + + axis1 = np.arange(arr.shape[-1]) + yname='y' + try: + axis1 = axis1/objective_lenses[objective_lens] + yunits = 'µm' + except KeyError: + yunits = 'px' + + axes = [xname, yname] + + if nframes == 1: + arr = np.array(arr) + data.create_variable(name=xname, values=axis0[:, None], units=xunits) + data.create_variable(name=yname, values=axis1[None, :], units=yunits) + else: + frames = np.arange(nframes) + try: + ct = float(attrs["Kinetic Cycle Time (secs)"]) + frames = frames*ct + tname = 't' + tunits = 's' + except KeyError: + tname = 'frame' + tunits = None + arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0])) + data.create_variable(name=tname, values=frames[:, None, None], units=tunits) + data.create_variable(name=xname, values=axis0[None, :, None], units=xunits) + data.create_variable(name=yname, values=axis1[None, None, :], units=yunits) + axes = [tname] + axes + + if xname=='wl': + if xunits=='nm': + data[xname].attrs['label'] = "wavelength (nm)" + if xunits=='wn': + data[xname].attrs['label'] = "wavenumber (cm-1)" + if xname=='x': + data[xname].attrs['label'] = "x (µm)" + if yname=='y': + data[yname].attrs['label'] = "y (µm)" + + data.transform(*axes) + if cps: + try: + arr = arr/float(attrs["Exposure Time (secs)"]) + except KeyError: + pass + try: + arr = arr/int(attrs["Number of Accumulations"]) + except KeyError: + pass + + data.create_channel(name='sig', values=arr, signed=False) + if cps: + data['sig'].attrs['label'] = "intensity (cps)" + else: + data['sig'].attrs['label'] = "counts" + + for key, val in attrs.items(): + data.attrs[key] = val + + # finish + print("data created at {0}".format(data.fullpath)) + print(" axes: {0}".format(data.axis_names)) + print(" shape: {0}".format(data.shape)) + data.attrs['dtype']=dtype + + return data + diff --git a/makeitwright/core/parsers/gwyddion.py b/makeitwright/core/parsers/gwyddion.py new file mode 100644 index 0000000..0891caf --- /dev/null +++ b/makeitwright/core/parsers/gwyddion.py @@ -0,0 +1,113 @@ +import WrightTools as wt +import numpy as np + + +def fromGwyddion_traces(filepath, name=None, convert_units=True, ID_steps=False, flatten=False): + """ + Generate individual Data objects for a series of traces as exported from Gwyddion workup. + + Arguments + --------- + filepath - str - The path to where the data is located. + + Keyword Arguments + ----------------- + name - str - + The base name for the data + convert_units - bool - + When True, converts the units of x and y into what is anticipated for typical AFM topography (um, nm) + ID_steps - bool - + When True, identifies the most significant topography change in the trace as a "step" and sets that position as 0 in the x array + flatten - bool - + When True, subtracts the median slope from the y trace + + Returns + ------- + data - WrightTools Data object or list of WrightTools Data objects - the data generated from the file's arrays + """ + if name is None: + basename = filepath.split('/')[-1].split('.')[0] + else: + basename = name + + header = 0 + delimiter = None + dims = None + units = None + with open(filepath) as f: + txt = f.readlines() + for i, line in enumerate(txt): + if not '.' in line: #assumes each row of data has a decimal somewhere + header+=1 + if 'x' in line: + spl = line.split() + dims = [(spl[i], spl[i+1]) for i in range(0,len(spl),2)] + if '[' in line: + units = [l.strip('[]') for l in line.split()] + units = [(units[i], units[i+1]) for i in range(0, len(units), 2)] + if ',' in line and i>2: + delimiter = ',' + if i>10: + break + arr = np.genfromtxt(filepath, skip_header=header, delimiter=delimiter) + if arr.shape[1]>2: + profiles = np.split(arr, arr.shape[1]/2, axis=1) + else: + profiles = [arr] + profiles = [p[~np.isnan(p).any(axis=1)] for p in profiles] + + if dims is None: + dims = [('x','y') for i in range(len(profiles))] + if units is None: + units = [None for i in range(len(profiles))] + #return profiles, dims, units + + data = [] + for i, (profile, dim, unit) in enumerate(zip(profiles, dims, units)): + x, y = profile[:,0], profile[:,1] + + if convert_units: + if unit is None: + x = wt.units.convert(x,'m','um') + xunit, yunit = 'm', 'm' + print(f'no units for x or y identified - assumed each to be meters') + else: + if wt.units.is_valid_conversion(unit[0], 'um'): + x = wt.units.convert(x, unit[0], 'um') + xunit = 'um' + else: + print(f'unrecognized unit {unit[0]} for x dimension of profile {i} - conversion did not proceed') + xunit = unit[0] + if wt.units.is_valid_conversion(unit[1], 'nm'): + y = wt.units.convert(y, unit[1], 'nm') + yunit = 'nm' + else: + print(f'unrecognized unit {unit[1]} for x dimension of profile {i} - conversion did not proceed') + yunit = unit[1] + + if ID_steps: + steppos = np.argmax(np.abs(np.gradient(y))) + x = x-x[steppos] + xlabel = f'distance from edge ({xunit})' + else: + xlabel = f'distance ({xunit})' + + if flatten: + slope = np.median(np.gradient(y))/np.median(np.gradient(x)) + bkg = slope*x+y[0] + y = y-bkg + + d = wt.Data(name=f'{basename}_profile{i}') + d.create_variable(dim[0], values=x, units=xunit) + d.create_channel(dim[1], values=y, units=yunit) + d.create_channel(f'{dim[1]}_rel', values=y-np.min(y), units=yunit) + d[dim[0]].attrs['label'] = xlabel + d[dim[1]].attrs['label'] = f'topography ({yunit})' + d[f'{dim[1]}_rel'].attrs['label'] = f'relative height ({yunit})' + d.transform(dim[0]) + data.append(d) + + if len(data) == 1: + data = data[0] + return data + diff --git a/makeitwright/core/parsers/horiba.py b/makeitwright/core/parsers/horiba.py new file mode 100644 index 0000000..b2ea375 --- /dev/null +++ b/makeitwright/core/parsers/horiba.py @@ -0,0 +1,197 @@ +import numpy as np +import WrightTools as wt +from ..helpers import norm + + +def horiba_typeID(filepath): + with open(filepath) as f: + txt = f.readlines() + header_size = 0 + + for line in txt: + if "#" in line: + header_size += 1 + + wl_arr = np.genfromtxt(filepath, skip_header=header_size, max_rows=1) + ch_arr = np.genfromtxt(filepath, skip_header=header_size+1) + xy_cols = ch_arr.shape[1]-wl_arr.shape[0] + + dtype = None + if xy_cols==0: + dtype = 'LabramHR_spectrum' + + if xy_cols==1: + y = ch_arr[:,0][None,:] + is_survey = True + for i in range(1, y.size): + if y.flatten()[i]-y.flatten()[i-1] != 1: + is_survey = False + if is_survey: + dtype = 'LabramHR_spectrum' + else: + dtype = 'LabramHR_linescan' + + if xy_cols==2: + dtype = 'LabramHR_map' + + return dtype + + +def fromLabramHR(filepath, name=None, cps=False): + if name is None: + name = filepath.split('/')[-1].split('.')[0] + with open(filepath) as f: + txt = f.readlines() + header_size = 0 + acq = 1 + accum = 1 + spectral_units = 'nm' + + for line in txt: + if "#" in line: + header_size += 1 + if "Acq. time" in line: + acq = float(line.split('=\t')[1]) + if "Accumulations" in line: + accum= int(line.split('=\t')[1]) + if "Range (" in line: + if 'eV' in line: + spectral_units='eV' + elif 'cm' in line: + spectral_units='wn' + else: + spectral_units='nm' + if 'Spectro' in line: + if 'eV' in line: + spectral_units='eV' + elif 'cm' in line: + spectral_units='wn' + else: + spectral_units='nm' + total_acq_time = acq*accum + acq_type = {'wn':"Raman", 'nm':"PL", 'eV':"PL"} + siglabels = {'wn':"scattering intensity", 'nm':"PL intensity", 'eV':"PL intensity"} + for key, value in siglabels.items(): + if cps: + siglabels[key] = siglabels[key] + " (cps)" + else: + siglabels[key] = siglabels[key] + " (counts)" + + spectlabels = {'wn':"Raman shift (cm\u207b\u2071)", 'nm':"wavelength (nm)", 'eV':"energy (eV)"} + + wl_arr = np.genfromtxt(filepath, skip_header=header_size, max_rows=1) + ch_arr = np.genfromtxt(filepath, skip_header=header_size+1) + xy_cols = ch_arr.shape[1]-wl_arr.shape[0] + + if xy_cols==0: + sig = ch_arr[:,1] + if cps: + sig = sig/total_acq_time + wl = ch_arr[:,0] + d = wt.Data(name=name) + d.create_variable('wl', values=wl, units=spectral_units) + d['wl'].label = spectlabels[spectral_units] + d.create_channel('sig', values=sig) + d['sig'].label = siglabels[spectral_units] + d.create_channel('norm', values=norm(sig, 0, 1)) + d['norm'].label = 'norm. ' + siglabels[spectral_units].split(' (')[0] + d.transform('wl') + d.attrs['dtype'] = 'spectrum' + d.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] + d.attrs['exposure time (s)'] = acq + d.attrs['number of accumulations'] = accum + print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} spectrum") + + if xy_cols==1: + sig = ch_arr[:,1:].transpose() + wl = wl_arr[:,None] + y = ch_arr[:,0][None,:] + + is_survey = True + for i in range(1, y.size): + if y.flatten()[i]-y.flatten()[i-1] != 1: + is_survey = False + + if is_survey: + d = [] + for i in range(y.size): + sig_i = sig[:,i].flatten() + if cps: + sig_i = sig_i/total_acq_time + spect = wt.Data(name=f"{name}_spect{i}") + spect.create_variable('wl', values=wl.flatten(), units=spectral_units) + spect['wl'].label = spectlabels[spectral_units] + spect.create_channel(name='sig', values=sig_i) + spect['sig'].label = siglabels[spectral_units] + spect.create_channel(name='norm', values=helpers.norm(sig_i, 0, 1)) + spect['norm'].label = 'norm. ' + siglabels[spectral_units].split(' (')[0] + spect.transform('wl') + spect.attrs['dtype'] = 'spectrum' + spect.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] + spect.attrs['exposure time (s)'] = acq + spect.attrs['number of accumulations'] = accum + d.append(spect) + print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} survey") + else: + if cps: + sig = sig/total_acq_time + d = wt.Data(name=name) + d.create_variable('wl', values=wl, units=spectral_units) + d['wl'].label = spectlabels[spectral_units] + d.create_channel('sig', values=sig) + d['sig'].label = siglabels[spectral_units] + d.create_variable('y', values=y, units='um') + d['y'].label = "y (µm)" + d.transform('wl', 'y') + d.attrs['dtype'] = 'spectralprofile' + d.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] + d.attrs['exposure time (s)'] = acq + d.attrs['number of accumulations'] = accum + print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} linescan") + + if xy_cols==2: + xidx = ch_arr[:,0] + xdim = 1 + for i in range(1,ch_arr.shape[0]): + if xidx[i] != xidx[i-1]: + xdim = xdim+1 + ydim = int(ch_arr.shape[0]/xdim) + + x = np.zeros((xdim,1,1)) + y = np.zeros((1,ydim,1)) + wl = wl_arr.reshape([1,1,wl_arr.size]) + sig = np.zeros((xdim,ydim,wl_arr.size)) + + for i in range(0, ch_arr.shape[0], ydim): x[int(i/ydim),0,0] = ch_arr[i,0] + y[0,:,0] = ch_arr[:ydim,1] + for i in range(xdim): + for j in range(ydim): + sig[i,j,:] = ch_arr[i*ydim+j,2:].reshape([1,1,wl_arr.size]) + + if cps: + sig = sig/total_acq_time + d = wt.Data(name=name) + d.create_channel('sig', values=sig) + d['sig'].label = siglabels[spectral_units] + d.create_variable('x', values=x, units='um') + d['x'].label = "x (µm)" + d.create_variable('y', values=y, units='um') + d['y'].label = "y (µm)" + d.create_variable('wl', values=wl, units=spectral_units) + d['wl'].label = spectlabels[spectral_units] + d.transform('x','y','wl') + d.attrs['dtype'] = 'hyperspectral' + d.attrs['acquisition'] = 'Horiba_' + acq_type[spectral_units] + d.attrs['exposure time (s)'] = acq + d.attrs['number of accumulations'] = accum + print(f"data from file {filepath.split('/')[-1]} is {acq_type[spectral_units]} map") + + return d + +def fromLabramHRTimedSeries(filedir): + raise NotImplementedError + +def fromAramis(filepath): + raise NotImplementedError + + diff --git a/makeitwright/core/parsers/iontof.py b/makeitwright/core/parsers/iontof.py new file mode 100644 index 0000000..0aaa791 --- /dev/null +++ b/makeitwright/core/parsers/iontof.py @@ -0,0 +1,80 @@ +try: + import pySPM +except ImportError: + pass +import numpy as np +import WrightTools as wt + + +def open_ita(fpath): + try: + ita = pySPM.ITA(fpath) + except ModuleNotFoundError: + print(""" + ionTOF support is optional and was not specified at install. + to work with iontof data, please install the optional dependencies + `pip install git+https://github.com/wright-group/makeitwright.git[iontof]` + """ + ) + return ita + + +def fromITA(fpath, name=None, select_channels=None): + + ita = open_ita(fpath) + ita.show_summary() + summ = ita.get_summary() + + xarr = np.linspace(0, summ['fov']*1e6, num=summ['pixels']['x']) + yarr = np.linspace(0, summ['fov']*1e6, num=summ['pixels']['y']) + scarr = np.linspace(1, int(summ['Scans']), num=int(summ['Scans'])) + charrs = {} + if select_channels is not None: + idxs = [] + for peak in summ['peaks']: + if peak['id'] in select_channels or peak['assign'] in select_channels: + idxs = idxs + [peak['id']] + for idx in idxs: + if summ['peaks'][idx]['assign']: + chname = summ['peaks'][idx]['assign'] + elif summ['peaks'][idx]['desc']: + chname = summ['peaks'][idx]['desc'] + else: + chname = str(int(summ['peaks'][idx]['cmass'])) + 'mz' + charr = ita.getImage(idx,0) + for i in range(1,len(scarr)): + j = ita.getImage(idx,i) + charr = np.dstack((charr,j)) + charrs[chname] = charr + print("channel <" + chname + "> found") + else: + for peak in summ['peaks']: + if peak['assign']: + chname = peak['assign'] + elif peak['desc']: + chname = peak['desc'] + else: + chname = str(int(peak['cmass'])) + 'mz' + idx = peak['id'] + charr = ita.getImage(idx,0) + for i in range(1,len(scarr)): + j = ita.getImage(idx,i) + charr = np.dstack((charr,j)) + charrs[chname] = charr + print("channel <" + chname + "> found") + + d = wt.Data() + d.create_variable(name='x', values=xarr[:,None,None], units='um') + d.create_variable(name='y', values=yarr[None,:,None], units='um') + d.create_variable(name='scan', values=scarr[None,None,:], units='s') + for chname, charr in charrs.items(): + d.create_channel(name=chname, values=charr) + d.transform('x','y','scan') + + return d + + +def ITApeaks(fpath): + ita = open_ita(fpath) + summ = ita.get_summary() + return summ['peaks'] diff --git a/makeitwright/core/parsers/sp130.py b/makeitwright/core/parsers/sp130.py new file mode 100644 index 0000000..3912ec4 --- /dev/null +++ b/makeitwright/core/parsers/sp130.py @@ -0,0 +1,33 @@ +import numpy as np +import WrightTools as wt +from ..helpers import norm + +def fromSP130(fpath, name=None): + if fpath.split('.')[-1] != 'asc': + print(f"filetype .{fpath.split('.')[-1]} not supported") + else: + with open(fpath) as f: + txt = f.readlines() + header_size = 0 + for i, line in enumerate(txt): + if 'Title' in line.split() and name is None: + name = line.split()[-1] + if '*BLOCK' in line: + header_size = i+1 + + arr = np.genfromtxt(fpath, delimiter=',', skip_header=header_size, skip_footer=1) + t = arr[:,0] + sig = arr[:,1] + t = t-t[np.argmax(sig)] + + out = wt.Data(name=name) + out.create_variable('t', values=t, units='ns') + out['t'].attrs['label'] = "time (ns)" + out.create_channel('sig', values=sig) + out['sig'].attrs['label'] = "PL counts" + out.transform('t') + out.create_channel('norm', values=norm(out['sig'][:], 0.01, 1)) + out['norm'].attrs['label'] = "norm. PL counts" + + return out + diff --git a/makeitwright/core/parsers/xrd.py b/makeitwright/core/parsers/xrd.py new file mode 100644 index 0000000..46d083d --- /dev/null +++ b/makeitwright/core/parsers/xrd.py @@ -0,0 +1,47 @@ +import numpy as np +import WrightTools as wt +from ..helpers import norm + +def fromBruker(*filepaths): + d = [] + for filepath in filepaths: + dtype = "Locked Coupled" + header_size=None + with open(filepath) as f: + txt = f.readlines() + for i, line in enumerate(txt): + if "ScanType" in line: + dtype = line.split('=')[-1].strip() + if "[Data]" in line: + header_size = i+2 + if header_size is None: + try: + arr = np.genfromtxt(filepath, skip_header=166, delimiter=',') + print("Data header was not identified in file. Data in instance may not reflect complete file information.") + except: + print("Unable to read data from file due to lack of expected data header.") + else: + arr = np.genfromtxt(filepath, skip_header=header_size, delimiter=',') + + if arr.size > 0: + deg_arr = arr[:,0].flatten() + ch_arr = arr[:,1].flatten() + pat = wt.Data(name=filepath.split('/')[-1]) + pat.create_channel('sig', values=ch_arr) + pat.create_channel('norm', values=norm(ch_arr, 1, 100)) + pat.create_channel('log', values=np.log(norm(ch_arr, 1, 100))) + if dtype=="Locked Coupled": + pat.create_variable('ang', values=deg_arr, units='deg') + pat.transform('ang') + pat.attrs['acquisition'] = 'XRD_2theta' + if dtype=="Z-Drive": + pat.create_variable('z', values=deg_arr, units='mm') + pat.transform('z') + pat.attrs['acquisition'] = 'XRD_2theta' + pat.attrs['dtype'] = 'spectrum' + d.append(pat) + else: + print(f'file {filepath} was loaded but had no values') + + return d + diff --git a/makeitwright/core/spectra.py b/makeitwright/core/spectra.py new file mode 100644 index 0000000..c05f01e --- /dev/null +++ b/makeitwright/core/spectra.py @@ -0,0 +1,178 @@ +"""plotting routines for 1D data""" + +import numpy as np +import WrightTools as wt +from matplotlib import pyplot as plt +from . import helpers, styles + + +def plot_spectra(data, **kwargs): + if type(data) is wt.Collection: + data = [data[key] for key in data] + if type(data) is not list: + data = [data] + + #set parameters for plotting from kwargs + params = { + "plot_type" : "line", + "xscale" : "linear", + "xticks" : True, + "yscale" : "linear", + "yticks" : True, + "axis" : 0, + "channel" : -1, + "ROI" : None, + "xrange" : None, + "vrange" : None, + "offset" : 0, + "reference_lines" : None, + "title" : None, + "background_color" : 'default' + } + params.update(styles.spectra) + params.update(**kwargs) + + signed=False + + #parse color parameters to plot + if type(params["colors"]) is list: + colors = params["colors"] + if len(params["colors"]) < len(data): + q, r = divmod(len(data), len(colors)) + colors = q*colors+colors[:r] + else: + try: + colors = params["colors"](np.linspace(0,1,len(data))) + except: + colors = [params["colors"] for i in range(len(data))] + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + for i in range(len(data)): + #convert axis/channel indices to natural names + axis, = helpers.parse_args(data[i], params["axis"]) + + if params['channel']=='prompt': #kill this if else if all your code suddenly stops working + channel, = helpers.parse_args(data[i], input(f'select channel from {data[i].natural_name}: {[ch.natural_name for ch in data[i].channels]} '), dtype='Channel') + else: + channel, = helpers.parse_args(data[i], params["channel"], dtype='Channel') + if data[i][channel].signed: + signed=True + + #extract ROI + if params["ROI"] is not None: + out = helpers.roi(data[i], params["ROI"]) + else: + out = data[i] + + #plot data + if params["plot_type"] == "line": + ax.plot(out[axis][:],out[channel][:]+i*params["offset"], + linewidth=params["linewidth"], alpha=params["alpha"], color=colors[i]) + if params["plot_type"] == "scatter": + ax.scatter(out[axis][:],out[channel][:]+i*params["offset"], + marker=params["marker"], alpha=params["alpha"], color=colors[i], s=params["marker_size"]) + + if signed: + ax.axhline(y=0, color='black', linewidth=1) + + if params["reference_lines"] is not None: + if type(params["reference_lines"]) is not list: + params["reference_lines"] = [params["reference_lines"]] + for reference_line in params["reference_lines"]: + ax.axvline(x=reference_line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) + + #adjust plot frame + if params["xrange"] is not None: + xrange = params["xrange"] + else: + xrange = helpers.get_range(*data, reference_key=params["axis"], dtype='Axis') + if params["xscale"] == 'log' and xrange[0]<=0: + xrange[0] = 0.001 + ax.set_xlim(*xrange) + ax.set_xscale(params["xscale"]) + if params["xlabel"] is None: + try: + params["xlabel"] = out[axis].attrs['label'] + except KeyError: + params["xlabel"] = 'x' + ax.set_xlabel(params["xlabel"]) + if not params["xticks"]: + ax.set_xticks([]) + + if params["vrange"] is not None: + vrange = params["vrange"] + else: + vrange = helpers.get_range(*data, reference_key=params["channel"], offset=params["offset"]) + if params["yscale"] == 'log' and vrange[0]<=0: + vrange[0] = 0.01 + ax.set_ylim(*vrange) + ax.set_yscale(params["yscale"]) + if params["ylabel"] is None: + try: + params["ylabel"] = out[channel].attrs['label'] + except KeyError: + params["ylabel"] = 'y' + ax.set_ylabel(params["ylabel"]) + if not params["yticks"]: + ax.set_yticks([]) + + if params["background_color"] != 'default': + if params["background_color"] == 'transparent' or params["background_color"] is None: + ax.set_alpha(0) + else: + ax.set_facecolor(params["background_color"]) + fig.set_alpha(0) + + if params["title"] is not None: + ax.set_title(params["title"]) + + plt.show() + +def plot_tandem(d1,d2, figsize=(2.6,1), axis=0, channels=(-1,-1), + xticks=True, yticks=[True,True], xlabel="wavelength (nm)", ylabels=["reflectance","absorbance"], + xrange=[400,650], vranges=[(0,1),(0,1)], colors=['coral','royalblue'], + linewidth=1, reference_lines=None): + #setup plot frame + fig, ax1 = plt.subplots(figsize=figsize) + ax2 = ax1.twinx() + + #convert axis/channel indices to natural names + axis1, = helpers.parse_args(d1, axis) + axis2, = helpers.parse_args(d2, axis) + channel1, = helpers.parse_args(d1, channels[0], dtype='Channel') + channel2, = helpers.parse_args(d2, channels[1], dtype='Channel') + + #plot data + ax1.plot(d1[axis1][:],d1[channel1][:], linewidth=linewidth, color=colors[0]) + ax2.plot(d2[axis1][:],d2[channel2][:], linewidth=linewidth, color=colors[1]) + + if reference_lines is not None: + if type(reference_lines) is not list: + reference_lines = [reference_lines] + for line in reference_lines: + ax1.axvline(x=line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) + + #adjust plot frame + if xrange is None: + xrange = helpers.get_range(*[d1,d2], reference_key=axis, dtype='Axis') + ax1.set_xlim(*xrange) + ax1.set_xlabel(xlabel) + if not xticks: + ax1.set_xticks([]) + + for i, v in enumerate(vranges): + if v is None: + if i==0: + vranges[i] = helpers.get_range(d1, reference_key=channel1, offset=0) + if i==1: + vranges[i] = helpers.get_range(d2, reference_key=channel2, offset=0) + ax1.set_ylim(*vranges[0]) + ax2.set_ylim(*vranges[1]) + ax1.set_ylabel(ylabels[0]) + ax2.set_ylabel(ylabels[1]) + if not yticks[0]: + ax1.set_yticks([]) + if not yticks[1]: + ax2.set_yticks([]) \ No newline at end of file diff --git a/makeitwright/core/spectralprofile.py b/makeitwright/core/spectralprofile.py new file mode 100644 index 0000000..fbf5fda --- /dev/null +++ b/makeitwright/core/spectralprofile.py @@ -0,0 +1,311 @@ +"""Methods for two-dimensional data consisting of a spectral axis (0) and a spatial axis (1).""" + + +import numpy as np +import matplotlib as mpl +from matplotlib import pyplot as plt +from . import helpers, styles + + +def remove_spectral_background(data, channel, spatial_reference_range, name=None, create_background_channel=False, talkback=True): + """ + Remove background along the spatial axis using a specified range along the other axis as reference. + Creates a new channel with the background-subtracted array. + + Arguments + --------- + data : WrightTools.Data - The data. + background_axis : str or int - The axis along which the background is. + channel : str or int - The channel to subtract the background from. + reference_axis_range : list or int + + Returns + ------- + None - Creates new background-subtracted Channels in the Data instance. + """ + #identify channel and categorize axes + channel = helpers.get_channels(data, channel)[0] + spectral_axis = helpers.get_axes(data, 0)[0] + spatial_axis = helpers.get_axes(data, 1)[0] + #construct the background array + if isinstance(spatial_reference_range, int): + spectral_background = helpers.roi(data, {spatial_axis:spatial_reference_range}, return_arrs=True)[channel].reshape(data[spectral_axis].shape) + else: + spectral_background = helpers.roi(data, {spatial_axis:(spatial_reference_range,'average')}, return_arrs=True)[channel].reshape(data[spectral_axis].shape) + spatial_points = np.ones(data[spatial_axis].shape) + background = spectral_background*spatial_points + #create background-subtracted channel + if name is None: + name = f"{channel}_bkgsub_spectral" + data.create_channel(name, values=data[channel][:]-background, units=data[channel].units) + if data[channel].signed: + data[name].signed = True + if create_background_channel: + data.create_channel(f"spectral_bkg_{channel}", values=background, units=data[channel.units]) + if data[channel].signed: + data[name].signed = True + + if talkback: + print(f"subtracted spectral background from data {data.natural_name}") + +def remove_spatial_background(data, channel, spectral_reference_range, name=None, create_background_channel=False, talkback=True): + """ + Remove background along the spatial axis using a specified range along the other axis as reference. + Creates a new channel with the background-subtracted array. + + Arguments + --------- + data : WrightTools.Data - The data. + background_axis : str or int - The axis along which the background is. + channel : str or int - The channel to subtract the background from. + reference_axis_range : list or int + + Returns + ------- + None - Creates new background-subtracted Channels in the Data instance. + """ + #identify channel and categorize axes + channel = helpers.get_channels(data, channel)[0] + spectral_axis = helpers.get_axes(data, 0)[0] + spatial_axis = helpers.get_axes(data, 1)[0] + #construct the background array + if isinstance(spectral_reference_range, int): + spatial_background = helpers.roi(data, {spectral_axis:spectral_reference_range}, return_arrs=True)[channel].reshape(data[spatial_axis].shape) + else: + spatial_background = helpers.roi(data, {spectral_axis:(spectral_reference_range,'average')}, return_arrs=True)[channel].reshape(data[spatial_axis].shape) + spectral_points = np.ones(data[spectral_axis].shape) + background = spatial_background*spectral_points + #create background-subtracted channel + if name is None: + name = f"{channel}_bkgsub_spatial" + data.create_channel(name, values=data[channel][:]-background, units=data[channel].units) + if data[channel].signed: + data[name].signed = True + if create_background_channel: + data.create_channel(f"spatial_bkg_{channel}", values=background, units=data[channel.units]) + if data[channel].signed: + data[name].signed = True + + if talkback: + print(f"subtracted spatial background from data {data.natural_name}") + +def remove_combined_background(data, channel, spectral_reference_range, spatial_reference_range, name=None, create_background_channel=False, talkback=True): + """ + Remove background from data using a range of the spectral profile along each axis as reference. The background is a matrix product of the two background arrays. + """ + def __at(arr, val): + return (np.abs(arr-val)).argmin() + #identify channel and categorize axes + channel = helpers.get_channels(data, channel)[0] + spectral_axis = helpers.get_axes(data, 0)[0] + spatial_axis = helpers.get_axes(data, 1)[0] + #extract background along each axis + if isinstance(spatial_reference_range, int): + spectral_background = helpers.roi(data, {spatial_axis:spatial_reference_range}, return_arrs=True)[channel].reshape(data[spectral_axis].shape) + else: + spectral_background = helpers.roi(data, {spatial_axis:(spatial_reference_range,'average')}, return_arrs=True)[channel].reshape(data[spectral_axis].shape) + if isinstance(spectral_reference_range, int): + spatial_background = helpers.roi(data, {spectral_axis:spectral_reference_range}, return_arrs=True)[channel].reshape(data[spatial_axis].shape) + else: + spatial_background = helpers.roi(data, {spectral_axis:(spectral_reference_range,'average')}, return_arrs=True)[channel].reshape(data[spatial_axis].shape) + #compute combined background using region of overlap as a reference for magnitude + overlap_magnitude = np.average(helpers.roi(data, {0:spectral_reference_range, 1:spatial_reference_range}, return_arrs=True)[channel]) + background = spectral_background*spatial_background + spectral_range = [__at(data[spectral_axis].points, spectral_reference_range[0]), __at(data[spectral_axis].points, spectral_reference_range[1])] + spatial_range = [__at(data[spatial_axis].points, spatial_reference_range[0]), __at(data[spatial_axis].points, spatial_reference_range[1])] + overlap_background = np.average(background[spectral_range[0]:spectral_range[1],spatial_range[0]:spatial_range[1]]) + overlap_ratio = overlap_magnitude/overlap_background + background *= overlap_ratio + #create background-subtracted channel + if name is None: + name = f"{channel}_bkgsub_combined" + data.create_channel(name, values=data[channel][:]-background, units=data[channel].units) + if data[channel].signed: + data[name].signed = True + if create_background_channel: + data.create_channel(f"bkg_combined_{channel}", values=background, units=data[channel.units]) + if data[channel].signed: + data[name].signed = True + + if talkback: + print(f"subtracted combined background from channel {channel} of data {data.natural_name}") + +def plot_profile(data, channel, **kwargs): + + #convert axis/channel indices to natural names + channel, = helpers.parse_args(data, channel, dtype='Channel') + + #set parameters for plotting from kwargs + params = { + 'ROI' : None, + 'xticks' : None, + 'yticks' : None, + 'cbar_ticks' : None, + 'xlabel' : None, + 'ylabel' : None, + 'cbar_label' : None, + "contrast" : None, + "vrange" : None, + "reference_lines" : None, + "title" : None + } + params.update(styles.profile) + + if data[channel].signed: + params["cmap"] = mpl.cm.RdBu_r + params.update(**kwargs) + + if params["ROI"] is not None: + out = helpers.roi(data, params["ROI"]) + else: + out = data + + #determine range to be plotted + if params["vrange"] is None: + if params["contrast"] is None: + vrange = helpers.get_range(out, reference_key=channel) + else: + vrange = helpers.contrast(out, channel, params["contrast"]) + else: + vrange = params["vrange"] + + #setup x axis + if params["xlabel"] is None: + try: + params["xlabel"] = out.variables[0].attrs['label'] + except KeyError: + params["xlabel"] = 'spectrum' + + #setup y axis + if params["ylabel"] is None: + try: + params["ylabel"] = out.variables[1].attrs['label'] + except KeyError: + params["ylabel"] = 'y' + + #setup colorbar label + if params["cbar_label"] is None: + try: + params["cbar_label"] = out[channel].attrs['label'] + except KeyError: + params["cbar_label"] = 'signal' + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + xgrid, ygrid = np.meshgrid(out.axes[0][:], out.axes[1][:]) + #array needs to be transposed before passing to pcolormesh because apparently no matplotlib devs thought about what arrays look like + try: + mesh = ax.pcolormesh(xgrid, ygrid, np.transpose(out[channel][:]), cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + except TypeError: + mesh = ax.pcolormesh(xgrid, ygrid, out[channel][:], cmap=params["cmap"], vmin=vrange[0], vmax=vrange[1]) + + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + + if params["title"] is not None: + ax.set_title(params["title"]) + + if params["reference_lines"] is not None: + if type(params["reference_lines"]) is not list: + params["reference_lines"] = [params["reference_lines"]] + for reference_line in params["reference_lines"]: + ax.axvline(x=reference_line, linewidth=1, color='grey', linestyle='--', alpha=0.25) + + #set ticks + if not params["xticks"] and params['xticks'] is not None: + ax.set_xticks([]) + + if not params["yticks"] and params['yticks'] is not None: + ax.set_yticks([]) + + if not params['cbar_ticks'] and params['cbar_ticks'] is not None: + ticks = [] + elif params['cbar_ticks'] is None: + ticks = np.linspace(vrange[0], vrange[1], num=11) + else: + ticks = params['cbar_ticks'] + + # plot colorbar + cbar = plt.colorbar(mesh) + cbar.set_ticks(ticks) + cbar.set_label(params["cbar_label"]) + +def plot_decomposition(data, non_spatial_axis, spatial_axis, channel, **kwargs): + #convert axis/channel indices to natural names + non_spatial_axis, spatial_axis = helpers.parse_args(data, non_spatial_axis, spatial_axis) + channel, = helpers.parse_args(data, channel, dtype='Channel') + + #set parameters for plotting from kwargs + params = { + "ROI" : None, + "binning" : None, + "xrange" : None, + "vrange" : None, + "yscale" : 'linear', + "reference_lines" : None, + "xticks" : True, + "yticks" : True, + "title" : None + } + params.update(styles.decomposition) + params.update(**kwargs) + + #extract ROI + if params["ROI"] is not None: + out = helpers.roi(data, params["ROI"]) + else: + out = data + + #identify spatial ranges for indexing + xrange = helpers.get_range(out, reference_key=non_spatial_axis, dtype='Axis') + + #setup plot frame + fig, ax = plt.subplots(figsize=(params['fig_width'], params['fig_height'])) + + #plot data + if params["binning"] is not None: + if params["binning"] == 'average': + arr_out = np.sum(out[channel][:], axis=1)/np.count_nonzero(np.sum(out[channel][:], axis=0)) + if params["binning"] == 'sum': + arr_out = np.sum(out[channel][:], axis=1) + #determine range to be plotted + vrange = helpers.vrange(arr_out, out[channel].signed, window=1) + + ax.plot(out[non_spatial_axis].points, arr_out, + params["marker"], linewidth=params["linewidth"], alpha=1, color=params["color"]) + else: + #determine range to be plotted + vrange = helpers.get_range(out, reference_key=channel) + for i in range(out[spatial_axis].size): + if np.sum(out[channel][:,i]) != 0: + ax.plot(out[non_spatial_axis][:].flatten(), out[channel][:,i], + params["marker"], linewidth=params["linewidth"], alpha=params["alpha"], color=params["color"]) + + if out[channel].signed: + ax.axhline(y=0, color='black', linewidth=1) + + if params["reference_lines"] is not None: + if type(params["reference_lines"]) is not list: + params["reference_lines"] = [params["reference_lines"]] + for reference_line in params["reference_lines"]: + ax.axvline(x=reference_line, zorder=0, linewidth=1, color='grey', linestyle='--', alpha=0.5) + + if params["xrange"] is not None: + xrange = params["xrange"] + ax.set_xlim(*xrange) + if not params["xticks"]: + ax.set_xticks([]) + if params["vrange"] is not None: + vrange = params["vrange"] + ax.set_ylim(*vrange) + ax.set_yscale(params["yscale"]) + if not params["yticks"]: + ax.set_yticks([]) + + # label plot + ax.set_xlabel(params["xlabel"]) + ax.set_ylabel(params["ylabel"]) + if params["title"] is not None: + ax.set_title(params["title"]) \ No newline at end of file diff --git a/makeitwright/core/styles.py b/makeitwright/core/styles.py new file mode 100644 index 0000000..1930c97 --- /dev/null +++ b/makeitwright/core/styles.py @@ -0,0 +1,215 @@ +import matplotlib.cm as cm +import cmocean + + +beckerhickl_transient = { + "xlabel" : "time (ns)", + "vreflines" : 0, + "marker" : '.', + "markersize" : 3 + } + +profile = { + "fig_width" : 4, + "fig_height" : 3, + "cmap" : cm.viridis, + "xlabel" : None, + "ylabel" : None, + "cbar_label" : None, + "cbar_ticks" : None + } + +profile_andor = { + "fig_width" : 4, + "fig_height" : 3 + } + +profile_horiba = { + "fig_width" : 6.5, + "cbar_ticks" : None + } + +profile_horiba_PL = profile_horiba|{ + "fig_height" : 6.5, + "cmap" : cm.viridis, + "xlabel" : "wavelength (nm)", + "ylabel" : "y (µm)", + "cbar_label" : "PL intensity (cps)" + } + +profile_horiba_Raman = profile_horiba|{ + "fig_height" : 6.5, + "cmap" : cm.inferno, + "xlabel" : "Raman shift (cm\u207b\u2071)", + "ylabel" : "y (µm)", + "cbar_label" : "scattering intensity (cps)" + } + +profile_horiba_timed_series = profile_horiba|{ + "fig_height" : 10, + "cmap" : cm.viridis, + "xlabel" : "wavelength (nm)", + "ylabel" : "excitation time (s)", + "cbar_label" : "PL intensity (cps)" + } + +profile_iontof = { + "fig_width" : 6.5, + "fig_height" : 3.5, + "cmap" : cmocean.cm.matter, + "xlabel" : "distance (µm)", + "ylabel" : "sputtering time (s)", + "cbar_label" : "SI counts", + "cbar_ticks" : None + } + +image = { + "fig_width" : 4, + "fig_height" : 4, + "cmap" : cm.Greys_r, + "xlabel" : None, + "ylabel" : None, + "cbar_label" : "signal (a.u.)", + "ticks" : 'auto' + } + +image_andor = { + "fig_width" : 4, + "fig_height" : 4, + "xlabel" : None, + "ylabel" : None + } + +image_horiba = { + "fig_width" : 6.5, + "fig_height" : 6.5, + "ticks" : 'auto' + } + +image_horiba_PL = image_horiba|{ + "cmap" : cm.viridis, + "xlabel" : "x (µm)", + "ylabel" : "y (µm)", + "cbar_label" : "PL intensity (cps)" + } + +image_horiba_Raman = image_horiba|{ + "cmap" : cm.inferno, + "xlabel" : "x (µm)", + "ylabel" : "y (µm)", + "cbar_label" : "scattering intensity (cps)" + } + +image_iontof = { + "fig_width" : 6.5, + "fig_height" : 6.5, + "cmap" : cmocean.cm.matter, + "xlabel" : "x (µm)", + "ylabel" : "y (µm)", + "cbar_label" : "SI counts", + "ticks" : 'auto' + } + +decomposition = { + "fig_width" : 6.5, + "fig_height" : 3.0, + "linewidth" : 1.0, + "marker" : '-', + "color" : 'black', + "alpha" : 0.01, + "xlabel" : "spectrum (a.u.)", + "ylabel" : "signal (a.u.)" + } + +decomposition_andor = { + "fig_width" : 6.5, + "fig_height" : 3.5, + "linewidth" : 1.0, + "marker" : '-', + "color" : 'red', + "alpha" : 0.01, + "xlabel" : "wavelength (nm)", + } + +decomposition_andor_A = decomposition_andor|{ + "ylabel" : "absorbance" + } + +decomposition_andor_PL = decomposition_andor|{ + "ylabel" : "PL intensity (cps)" + } + +decomposition_andor_R = decomposition_andor|{ + "ylabel" : "reflectance" + } + +decomposition_andor_RR0 = decomposition_andor|{ + "ylabel" : "reflection contrast" + } + +decomposition_andor_T = decomposition_andor|{ + "ylabel" : "transmittance" + } + +decomposition_horiba = { + "fig_width" : 6.5, + "fig_height" : 3.5, + "linewidth" : 1.0, + "marker" : '-', + "color" : 'red', + "alpha" : 0.01 + } + +decomposition_horiba_PL = decomposition_horiba|{ + "xlabel" : "wavelength (nm)", + "ylabel" : "PL intensity (cps)", + } + +decomposition_horiba_Raman = decomposition_horiba|{ + "xlabel" : "Raman shift (cm\u207b\u2071)", + "ylabel" : "scattering intensity (cps)", + } + +decomposition_iontof = { + "fig_width" : 6.5, + "fig_height" : 2.0, + "linewidth" : 1.0, + "marker" : '.', + "color" : 'red', + "alpha" : 1, + "xlabel" : "sputtering time (s)", + "ylabel" : "SI counts" + } + +spectra = { + "plot_type" : "line", + "fig_width" : 4, + "fig_height" : 3, + "linewidth" : 2, + "marker" : '.', + "alpha" : 1, + "marker_size" : 5, + "colors" : cm.Set1, + "xlabel" : None, + "ylabel" : None + } + +spectra_TRPL = spectra|{ + "plot_type" : "scatter", + "yscale" : "log", + "xlabel" : "t (ns)", + "ylabel" : "norm. counts", + "colors" : cm.Set2, + "marker" : '.', + "marker_size" : 3, + "reference_lines" : 0 + } + +spectra_XRD_pattern = spectra|{ + "fig_height" : 3, + "marker" : 'o', + "marker_size" : 3, + "colors" : cm.Set1, + "xlabel" : "diffraction angle (deg. 2\u03B8)", + "ylabel" : "intensity (a.u.)" + } \ No newline at end of file diff --git a/makeitwright/horiba.py b/makeitwright/horiba.py index 780e377..04c4570 100644 --- a/makeitwright/horiba.py +++ b/makeitwright/horiba.py @@ -1,8 +1,8 @@ import numpy as np import WrightTools as wt -import makeitwright.lib.styles as styles +import makeitwright.core.styles as styles -from .lib import spectralprofile, hyperspectral +from .core import spectralprofile, hyperspectral def central_wavelength(data): raise NotImplementedError diff --git a/makeitwright/iontof.py b/makeitwright/iontof.py index 973add3..cd5794b 100644 --- a/makeitwright/iontof.py +++ b/makeitwright/iontof.py @@ -1,6 +1,6 @@ import numpy as np import cmocean -from .lib import hyperspectral, styles, helpers +from .core import hyperspectral, styles, helpers def relative_proportion(data, channel0, channel1): diff --git a/makeitwright/xrd.py b/makeitwright/xrd.py index 5f4cabf..7b6e512 100644 --- a/makeitwright/xrd.py +++ b/makeitwright/xrd.py @@ -3,8 +3,8 @@ from scipy.optimize import curve_fit from scipy.stats import pearsonr import WrightTools as wt -from .lib import spectra, styles -from .lib.helpers import norm, roi +from .core import spectra, styles +from .core.helpers import norm, roi pi = np.pi From 9b98b34fb4f882edcedf280fd32fa1708913a599 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 16:45:30 -0500 Subject: [PATCH 06/20] Create .pre-commit-config.yaml --- .pre-commit-config.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..ee53cf0 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,18 @@ +repos: + - repo: https://github.com/psf/black + rev: 24.10.0 # Replace by any tag/version: https://github.com/psf/black/tags + hooks: + - id: black + language_version: python3 # Should be a command that runs python3.6+ + args: ["--line-length", "99"] + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + exclude: datasets|.csv$ + - id: no-commit-to-branch + args: [-b master] + +default_language_version: + python: python3 From 6010574668352b424b39d91a67a3863079ba929a Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 16:48:38 -0500 Subject: [PATCH 07/20] some cleanup --- .pre-commit-config.yaml | 1 - makeitwright/beckerhickl.py | 2 -- makeitwright/horiba.py | 2 -- makeitwright/xrd.py | 5 +---- 4 files changed, 1 insertion(+), 9 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ee53cf0..486e1a8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,6 @@ repos: rev: v5.0.0 hooks: - id: trailing-whitespace - exclude: datasets|.csv$ - id: no-commit-to-branch args: [-b master] diff --git a/makeitwright/beckerhickl.py b/makeitwright/beckerhickl.py index dfd50b0..7e909ff 100644 --- a/makeitwright/beckerhickl.py +++ b/makeitwright/beckerhickl.py @@ -9,8 +9,6 @@ from .core import styles - - def get_fits(data, channel='norm', function='biexp'): def exp(t, a, td): return a*np.exp(-t/td) diff --git a/makeitwright/horiba.py b/makeitwright/horiba.py index 04c4570..312593a 100644 --- a/makeitwright/horiba.py +++ b/makeitwright/horiba.py @@ -1,5 +1,3 @@ -import numpy as np -import WrightTools as wt import makeitwright.core.styles as styles from .core import spectralprofile, hyperspectral diff --git a/makeitwright/xrd.py b/makeitwright/xrd.py index 7b6e512..518c87e 100644 --- a/makeitwright/xrd.py +++ b/makeitwright/xrd.py @@ -7,14 +7,11 @@ from .core.helpers import norm, roi -pi = np.pi - - def get_fits(data, channel='norm', function='gauss', xrange='all'): def gauss(x, a, u, s): return a*np.exp(-((x-u)/(2*s))**2) def cauchy(x, a, u, s): - return a/(pi*s*(1+((x-u)/s)**2)) + return a/(np.pi*s*(1+((x-u)/s)**2)) functions = { 'gauss' : gauss, From ff88015e3c6ff8d23b9bbf8e733825d94262a273 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 17:01:43 -0500 Subject: [PATCH 08/20] Create pl_T_R_A.py --- examples/pl_T_R_A.py | 73 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 examples/pl_T_R_A.py diff --git a/examples/pl_T_R_A.py b/examples/pl_T_R_A.py new file mode 100644 index 0000000..305c64d --- /dev/null +++ b/examples/pl_T_R_A.py @@ -0,0 +1,73 @@ +import pathlib +import matplotlib as mpl +import makeitwright as mw + + +roi = mw.helpers.roi +parse = mw.parsers.parse +andor = mw.andor +becker = mw.beckerhickl +plot = mw.spectra.plot_spectra + +fp = pathlib.Path().expanduser().resolve() / r"Desktop/Research Data/Wright Table/Original" + +# set plotting parameters +mpl.rcParams['font.sans-serif'] = "Arial" +mpl.rcParams['font.family'] = "sans-serif" +mpl.rcParams['font.size'] = 14 +mpl.rcParams['figure.dpi'] = 300 +mpl.rcParams['lines.linewidth'] = 4 +mpl.rcParams['pcolor.shading'] = 'auto' +mpl.rcParams['figure.dpi'] = 150 + +if True: # Plot PL + data = parse(fp, keywords='4 hr.asc') + PL_ROI = roi(data, {'y': ([1021, 1047], 'average')}) + plot(PL_ROI, channel=0, xrange=[500, 850]) + PL_output = open(fp / '4hr.txt', 'w') + PL_dataTrace = zip(PL_ROI.axes[0], PL_ROI.channels[0]) + for x in PL_dataTrace: + PL_output.write(str(x[0])+'\t') + PL_output.write(str(x[1])+'\n') + PL_output.close() + +if True: # Plot T/R/A + data = parse(fp / 'For Chris/23_11_21/4ClPEASnI n1', keywords='Object 3') + R = data[2] + R_back = data[1] + T = data[4] + T_back = data[3] + + andor.compute_reflectance(R, R_back, dark_wavelength_range=None) + y_profile = roi(R, {'wl': ([580, 750], 'sum')}) # If need to check object area + plot(y_profile) + plot(R, channel=1, ROI={'y': ([1020, 1070], 'average')}, xrange=[580, 750]) #Currently at 10 x mag + R_ROI = roi(R, {'y': ([1020, 1070], 'average')}) + R_output = open(fp / 'For Chris/23_11_21/4ClPEASnI n1/Object 3 R processed.txt', 'w') + R_dataTrace = zip(R_ROI.axes[0], R_ROI.channels[1]) + for x in R_dataTrace: + R_output.write(str(x[0])+'\t') + R_output.write(str(x[1])+'\n') + R_output.close() + + andor.compute_transmittance(T, T_back, dark_wavelength_range=None) + # y_profile = roi(T, {'wl': ([400, 500], 'sum')}) # If need to check object area + # plot(y_profile) + plot(T, channel=1, ROI={'y': ([1020, 1070], 'average')}, xrange=[580, 750]) # Current 10x mag, 100x mag 54-70 + T_ROI = roi(T, {'y': ([1020, 1070], 'average')}) + T_output = open(fp / 'For Chris/23_11_21/4ClPEASnI n1/Object 3 T Processed.txt', 'w') + T_dataTrace = zip(T_ROI.axes[0], T_ROI.channels[1]) + for x in T_dataTrace: + T_output.write(str(x[0])+'\t') + T_output.write(str(x[1])+'\n') + T_output.close() + # + andor.compute_absorbance(R, T) + A_output = open(fp / 'For Chris/23_11_21/4ClPEASnI n1/Object 3 A processed.txt', 'w') + A_ROI = roi(T, {'y': ([1020, 1070], 'average')}) # A is channel 2 in both R and T data objects + plot(R, channel=2, ROI={'y': ([1020, 1070], 'average')}, xrange=[580, 750]) #Current 10x mag. can add vrange + A_dataTrace = zip(A_ROI.axes[0], A_ROI.channels[2]) + for x in A_dataTrace: + A_output.write(str(x[0])+'\t') + A_output.write(str(x[1])+'\n') + A_output.close() From e12f9d3c334ee4dab11f6ea128924763f4d93cba Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 17:01:52 -0500 Subject: [PATCH 09/20] Delete main2.py --- examples/main2.py | 82 ----------------------------------------------- 1 file changed, 82 deletions(-) delete mode 100644 examples/main2.py diff --git a/examples/main2.py b/examples/main2.py deleted file mode 100644 index 5ba3adc..0000000 --- a/examples/main2.py +++ /dev/null @@ -1,82 +0,0 @@ -import pathlib -import numpy as np -import matplotlib.cm as cms -import cmocean.cm as cmo -from scipy.signal import savgol_filter as savgol -from scipy.signal import medfilt2d -from scipy.optimize import curve_fit -from scipy.stats import pearsonr, spearmanr, ttest_ind -import WrightTools as wt - -import makeitwright.process.andor as andor -import makeitwright.process.beckerhickl as becker -import makeitwright.process.spectralprofile - -from makeitwright.process.helpers import show, roi, norm, set_label -from makeitwright.parsers import parse -from makeitwright.artists import setparams, setdpi -from makeitwright.spectra import plot_spectra as plot - -setparams() -setdpi(150) - -fp = pathlib.Path().expanduser().resolve() / r"Desktop/Research Data/Wright Table/Original" # filepath name to folder - - -# -if True: # Plot PL - data = parse(fp, keywords='4 hr.asc') - #andor.correct_PL_background(data, ybkg=[0, 20]) - #y_profile = roi(data, {'wl': ([400, 800], 'sum')}) # If need to check object area - #plot(y_profile) - PL_ROI = roi(data, {'y': ([1021, 1047], 'average')}) - plot(PL_ROI, channel=0, xrange=[500, 850]) - PL_output = open('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/4hr.txt', 'w') - PL_dataTrace = zip(PL_ROI.axes[0], PL_ROI.channels[0]) - for x in PL_dataTrace: - PL_output.write(str(x[0])+'\t') - PL_output.write(str(x[1])+'\n') - PL_output.close() - -# -if True: # Plot T/R/A - data = parse('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/For Chris/23_11_21/4ClPEASnI n1', keywords='Object 3') - R = data[2] - R_back = data[1] - T = data[4] - T_back = data[3] - - - andor.compute_reflectance(R, R_back, dark_wavelength_range=None) - y_profile = roi(R, {'wl': ([580, 750], 'sum')}) # If need to check object area - plot(y_profile) - plot(R, channel=1, ROI={'y': ([1020, 1070], 'average')}, xrange=[580, 750]) #Currently at 10 x mag - R_ROI = roi(R, {'y': ([1020, 1070], 'average')}) - R_output = open('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/For Chris/23_11_21/4ClPEASnI n1/Object 3 R processed.txt', 'w') - R_dataTrace = zip(R_ROI.axes[0], R_ROI.channels[1]) - for x in R_dataTrace: - R_output.write(str(x[0])+'\t') - R_output.write(str(x[1])+'\n') - R_output.close() - - andor.compute_transmittance(T, T_back, dark_wavelength_range=None) - # y_profile = roi(T, {'wl': ([400, 500], 'sum')}) # If need to check object area - # plot(y_profile) - plot(T, channel=1, ROI={'y': ([1020, 1070], 'average')}, xrange=[580, 750]) # Current 10x mag, 100x mag 54-70 - T_ROI = roi(T, {'y': ([1020, 1070], 'average')}) - T_output = open('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/For Chris/23_11_21/4ClPEASnI n1/Object 3 T Processed.txt', 'w') - T_dataTrace = zip(T_ROI.axes[0], T_ROI.channels[1]) - for x in T_dataTrace: - T_output.write(str(x[0])+'\t') - T_output.write(str(x[1])+'\n') - T_output.close() - # - andor.compute_absorbance(R, T) - A_output = open('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/For Chris/23_11_21/4ClPEASnI n1/Object 3 A processed.txt', 'w') - A_ROI = roi(T, {'y': ([1020, 1070], 'average')}) # A is channel 2 in both R and T data objects - plot(R, channel=2, ROI={'y': ([1020, 1070], 'average')}, xrange=[580, 750]) #Current 10x mag. can add vrange - A_dataTrace = zip(A_ROI.axes[0], A_ROI.channels[2]) - for x in A_dataTrace: - A_output.write(str(x[0])+'\t') - A_output.write(str(x[1])+'\n') - A_output.close() From 0546c0f508c24762f9d25b5c043327b1f8b16222 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 17:04:10 -0500 Subject: [PATCH 10/20] Create environment.yml --- environment.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 environment.yml diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000..be807f1 --- /dev/null +++ b/environment.yml @@ -0,0 +1,3 @@ +psutil +wrighttools +cmocean From dc3fc5455da66191a9da5e7294a3a9973a670311 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 17:18:28 -0500 Subject: [PATCH 11/20] fixes from flake8 simply example script issues --- .github/workflows/python-package-conda.yml | 34 ---------------------- environment.yml | 3 -- examples/main.py | 9 +++--- examples/workup.py | 12 +++++++- 4 files changed, 16 insertions(+), 42 deletions(-) delete mode 100644 .github/workflows/python-package-conda.yml delete mode 100644 environment.yml diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml deleted file mode 100644 index f358604..0000000 --- a/.github/workflows/python-package-conda.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Python Package using Conda - -on: [push] - -jobs: - build-linux: - runs-on: ubuntu-latest - strategy: - max-parallel: 5 - - steps: - - uses: actions/checkout@v4 - - name: Set up Python 3.10 - uses: actions/setup-python@v3 - with: - python-version: '3.10' - - name: Add conda to system path - run: | - # $CONDA is an environment variable pointing to the root of the miniconda directory - echo $CONDA/bin >> $GITHUB_PATH - - name: Install dependencies - run: | - conda env update --file environment.yml --name base - - name: Lint with flake8 - run: | - conda install flake8 - # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with pytest - run: | - conda install pytest - pytest diff --git a/environment.yml b/environment.yml deleted file mode 100644 index be807f1..0000000 --- a/environment.yml +++ /dev/null @@ -1,3 +0,0 @@ -psutil -wrighttools -cmocean diff --git a/examples/main.py b/examples/main.py index 1210156..8d21758 100644 --- a/examples/main.py +++ b/examples/main.py @@ -1,11 +1,12 @@ import matplotlib as mpl from pathlib import Path -from makeitwright.process import andor +import makeitwright as mw -from makeitwright.process.helpers import show, roi, set_label -from makeitwright.parsers import parse -from makeitwright.artists import plot +andor = mw.andor +roi = mw.helpers.roi +parse = mw.parsers.parse +plot = mw.artists.plot user_path = Path().expanduser().resolve() diff --git a/examples/workup.py b/examples/workup.py index 07229de..57569c4 100644 --- a/examples/workup.py +++ b/examples/workup.py @@ -8,6 +8,15 @@ from scipy.signal import savgol_filter from matplotlib import pyplot as plt +import makeitwright as mw + + +parse = mw.parsers.parse +__at = mw.helpers.find_nearest() +roi = mw.helpers.roi +set_label = mw.helpers.set_label +norm = mw.helpers.norm + def lorentz_fit_2(data, channel='darksub', xrange='all', bounds=None, plot=False): @@ -136,6 +145,7 @@ def test2(x, a1, u1, s1, a2, u2, s2): def residual(a, fit): return (a-fit)/a*100 + base = pathlib.Path().expanduser().resolve() / r'OneDrive/Documents/UW/research/data local/WG-microscope/biexciton-fluence-dependent-PL_20220909' fn1 = base / "n1BA" fn2 = base / 'n2BAMA_CRRsample' @@ -201,7 +211,7 @@ def residual(a, fit): wlbkgsub = (d['sig'][:]-wlbkg)/exposure d.create_channel('constantbkgsub',values=constantbkgsub) d.create_channel('wlbkgsub',values=wlbkgsub) - set_label(d, 'wlbkgsub', "PL intensity (cps)") + # set_label(d, 'wlbkgsub', "PL intensity (cps)") bn1 = roi(n1raw,{'y':'sum'}) bn2 = roi(n2raw,{'y':'sum'}) From 508f0713c57ed4b854172eb9d2027fbd60262941 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 17:18:50 -0500 Subject: [PATCH 12/20] example fixes --- examples/AbsProcessing.py | 17 ++++++++++------- examples/PLProcessing.py | 13 ++++++------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/examples/AbsProcessing.py b/examples/AbsProcessing.py index a512722..934bd09 100644 --- a/examples/AbsProcessing.py +++ b/examples/AbsProcessing.py @@ -2,15 +2,18 @@ # Process Reflectance/Transmittance/Absorbance Data from Wright group Microscope import pathlib -import makeitwright.process.andor as andor -from makeitwright.process.helpers import roi -from makeitwright.parsers import parse -from makeitwright.artists import setparams, setdpi -from makeitwright.spectra import plot_spectra as plot +import makeitwright as miw +import matplotlib as mpl +# import makeitwright.process.andor as andor +# from makeitwright.process.helpers import roi +# from makeitwright.parsers import parse +# from makeitwright.artists import setparams, setdpi +# from makeitwright.spectra import plot_spectra as plot + + +mpl.rc(dpi=150) -setparams() -setdpi(150) filepath = pathlib.Path().expanduser().resolve() / "Desktop/Research Data/Wright Table/Original/test" filename_R = "PEAPbBr4 R" diff --git a/examples/PLProcessing.py b/examples/PLProcessing.py index 559fc0d..bb061bb 100644 --- a/examples/PLProcessing.py +++ b/examples/PLProcessing.py @@ -2,15 +2,14 @@ # Process PL Data from Wright group import pathlib -import makeitwright.process.andor as andor -from makeitwright.process.helpers import roi -from makeitwright.parsers import parse -from makeitwright.artists import setparams, setdpi -from makeitwright.spectra import plot_spectra as plot +import makeitwright as mw -setparams() -setdpi(150) +andor = mw.andor +roi = mw.helpers.roi +parse = mw.parsers.parse +plot = mw.spectra.plot_spectra + filepath = pathlib.Path().expanduser() / "Desktop" / "Research Data" / "Wright Table" / "Original" / "test" filename = "PEAPbI on FPEASnI PL 77K 4 2 hour wait for cool" From a90bcf98956c56d472e0b9dfa8c7e0369738d608 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 17:25:25 -0500 Subject: [PATCH 13/20] flake8: fix bad references --- examples/AbsProcessing.py | 12 ++++++------ makeitwright/core/parsers/__init__.py | 4 ++-- makeitwright/core/parsers/horiba.py | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/AbsProcessing.py b/examples/AbsProcessing.py index 934bd09..50bde2f 100644 --- a/examples/AbsProcessing.py +++ b/examples/AbsProcessing.py @@ -2,14 +2,14 @@ # Process Reflectance/Transmittance/Absorbance Data from Wright group Microscope import pathlib -import makeitwright as miw +import makeitwright as mw import matplotlib as mpl -# import makeitwright.process.andor as andor -# from makeitwright.process.helpers import roi -# from makeitwright.parsers import parse -# from makeitwright.artists import setparams, setdpi -# from makeitwright.spectra import plot_spectra as plot + +andor = mw.andor +roi = mw.helpers.roi +parse = mw.parsers.parse +plot = mw.spectra.plot_spectra mpl.rc(dpi=150) diff --git a/makeitwright/core/parsers/__init__.py b/makeitwright/core/parsers/__init__.py index c3cf90b..0e018fa 100644 --- a/makeitwright/core/parsers/__init__.py +++ b/makeitwright/core/parsers/__init__.py @@ -26,8 +26,8 @@ def typeID(*fpaths): with open(fpath) as f: txt = f.read() if "LabRAM HR" in txt: - if horiba.typeID(fpath) is not None: - types[fpath] = horiba_typeID(fpath) + if (htype := horiba_typeID(fpath)) is not None: + types[fpath] = htype if "Goniometer" in txt: types[fpath] = 'Bruker_XRD' if "[m]" in txt: diff --git a/makeitwright/core/parsers/horiba.py b/makeitwright/core/parsers/horiba.py index b2ea375..b3a2f7b 100644 --- a/makeitwright/core/parsers/horiba.py +++ b/makeitwright/core/parsers/horiba.py @@ -123,7 +123,7 @@ def fromLabramHR(filepath, name=None, cps=False): spect['wl'].label = spectlabels[spectral_units] spect.create_channel(name='sig', values=sig_i) spect['sig'].label = siglabels[spectral_units] - spect.create_channel(name='norm', values=helpers.norm(sig_i, 0, 1)) + spect.create_channel(name='norm', values=norm(sig_i, 0, 1)) spect['norm'].label = 'norm. ' + siglabels[spectral_units].split(' (')[0] spect.transform('wl') spect.attrs['dtype'] = 'spectrum' From 0f91b5316d3d47b825b4a8ad8f3a7e24040e9ab6 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 18:14:40 -0500 Subject: [PATCH 14/20] give a test --- pyproject.toml | 5 +++++ tests/import.py | 5 +++++ 2 files changed, 10 insertions(+) create mode 100644 tests/import.py diff --git a/pyproject.toml b/pyproject.toml index ef3ffeb..f7a5929 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,3 +30,8 @@ classifiers = [ [project.optional-dependencies] iontof = ["pySPM"] + +[tool.pytest.ini_options] +testpaths = [ + "tests", +] \ No newline at end of file diff --git a/tests/import.py b/tests/import.py new file mode 100644 index 0000000..0c27ebe --- /dev/null +++ b/tests/import.py @@ -0,0 +1,5 @@ +import makeitwright as mw + + +def test_import(): + mw.helpers.roi From fab83b29e0a1abbcc914d11f98e6f6a945fe6dcf Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 18:18:09 -0500 Subject: [PATCH 15/20] Create __init__.py --- tests/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/__init__.py diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 From 760d297819ac70e07c917fe38866bde59e0f8bb7 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 18:21:44 -0500 Subject: [PATCH 16/20] rename to troubleshoot pytest --- tests/{import.py => test_import.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{import.py => test_import.py} (100%) diff --git a/tests/import.py b/tests/test_import.py similarity index 100% rename from tests/import.py rename to tests/test_import.py From 7d11a944a7dd6a2963917e87ad413c54df7f55fb Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 21:40:04 -0500 Subject: [PATCH 17/20] Update python-app.yml --- .github/workflows/python-app.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 1168bd9..38c230c 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -28,6 +28,7 @@ jobs: python -m pip install --upgrade pip pip install flake8 pytest if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + pip install . - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names From 40b52f296faa2d22152209146516c6d8bc7b0215 Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 21:42:06 -0500 Subject: [PATCH 18/20] Update andor.py --- makeitwright/andor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/makeitwright/andor.py b/makeitwright/andor.py index c472b67..77d6c56 100644 --- a/makeitwright/andor.py +++ b/makeitwright/andor.py @@ -3,7 +3,6 @@ from .core import image, spectralprofile, styles from .core.helpers import roi, set_label, get_channels -import makeitwright.core.styles as styles APD_PIXEL = (1325, 1080) From ad454eb8f91b09ffde938b0b7c70696aca1f35ac Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 21:43:54 -0500 Subject: [PATCH 19/20] Update horiba.py --- makeitwright/horiba.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/makeitwright/horiba.py b/makeitwright/horiba.py index 312593a..322a23e 100644 --- a/makeitwright/horiba.py +++ b/makeitwright/horiba.py @@ -1,6 +1,5 @@ -import makeitwright.core.styles as styles +from .core import spectralprofile, hyperspectral, styles -from .core import spectralprofile, hyperspectral def central_wavelength(data): raise NotImplementedError From fc65852c7a21fec9ee8910615a648507aecc245f Mon Sep 17 00:00:00 2001 From: Daniel Kohler <11864045+ddkohler@users.noreply.github.com> Date: Mon, 18 Aug 2025 21:45:21 -0500 Subject: [PATCH 20/20] Delete __init__.py --- tests/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/__init__.py diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29..0000000