Source code for scan_framework.models.scan_model

from artiq.experiment import *
from scan_framework.models.model import *
from scan_framework.models.hist_model import *
from scan_framework.models.fit_model import *
import numpy as np
import scipy.stats as stats
from math import *
import logging


[docs]class ScanModel(Model): """ An extension of the :class:`~scan_framework.models.model.Model` class that handles initializing and mutating scan datasets, calculating statistics on scan data, and performing fits on the scan data. All scan models inherit from this class. **Functions performed during a scan** The ScanModel class performs the following functions when a scan is run. - Initializes all datasets for a scan. - Mutates the dataset containing raw counts measured at each scan point. - Calculates statistics (means, errors, histograms) at each scan point and mutates associated datasets. - Performs a fit on the mean values using the :mod:`~scan_framework.analysis.curvefits` package and saves all data generated by the :func:`~scan_framework.analysis.curvefits.Fit.fit_data` method to datasets. - All statistical and fit data are mirrored to the `current_scan` and `current_hist` namespaces. - Gives a standardized way of fetching the main fit (e.g. pi_time or res_frequency of a transition) **Organization of datasets** Datasets are organized under the namespace by the following hierarchy. - **<namespace>** All data is stored under this location which is specified in the child class. - **<namespace>.stats** Contains statistical data, raw data from the scan, and the list of scan points. - **<namespace>.points** List of scan points. - **<namespace>.counts** Raw counts recorded at each scan point and repetition. - **<namespace>.mean** Mean count values calculated at each scan point. - **<namespace>.error** Standard deviation of each mean value in the <namespace>.mean array. - **<namespace>.hist** Binned mean values at each scan point. Each entry is the histogram at the corresponding scan point. - **<namespace>.bins** Defines the bin boundaries for histograms. - **<namespace>.nbins** The number of bins to use for histograms. - **<namespace>.fits:** Contains all fit data - **<namespace>.fits.params** The value of each fitted parameter. - **<namespace>.fits.guesses** The guess that was used for each fitted parameter. - **<namespace>.fits.errors** The estimated error in each fitted parameter. - **<namespace>.fits.fitline** The line of best fit to the data. - **<namespace>.fits.analysis** The standard error in the regression and coefficient of determination (R^2) values of the fit. - **<namespace>.plots:** Contains all data necessary to plot the scan - **<namespace>.plots.x:** x data of the plot. - **<namespace>.plots.y:** y data of the plot. - **<namespace>.plots.plot_title:** Title of the plot. - **<namespace>.plots.x_label:** Label of the x-axis. - **<namespace>.plots.y_label:** Label of the y-axis. - **<namespace>.plots.x_units:** Units to display on the x-axis. - **<namespace>.plots.y_units:** Units to display on the y-axis. - **<namespace>.plots.x_scale:** x-axis is scaled by this amount. - **<namespace>.plots.y_scale:** y-axis is scaled by this amount. - **<namespace>.plots.trigger:** Plots are redrawn by :mod:`~scan_framework.applets.plot_xy` only when trigger is set to 1. - **<namespace>.plots.fitline:** The line of best fit. - **<namespace>.%main_fit** Contains the main fitted parameter of the scan. `%main_fit` is replaced by the value of the models 'main_fit' attribute (see dynamic namespaces below). For example, a microwave frequency scan might have 'main_fit' set to `frequency` and the fitted frequency would be saved to `microwaves.3322.frequency`. - **<namespace>.defaults:** Contains default values for the main fit which are used when a fitted value does not exist. - **current_scan:** Namespace under which a mirror of all data generated is stored. This is used by the current_scan applet to display results of the current scan. **Dynamic namespace syntax** Example: >>> model = ScanModel(namespace="raman.%transition.%type") >>> model.transition = 'm1_rsb' >>> model.type = 'frequency' >>> model.bind() >>> print(model.namespace) # prints 'raman.m1_rsb.frequency' %transition and %type are tokens that name an attribute of the model. After calling self.bind() on the model, the %transition and %type tokens are replaced by self.transition and self.type and the final namespace is "raman.m1_rsb.frequency". All future datasets will be stored and accessed under the 'raman.m1_rsb.frequency' namespace. :param namespace: Dataset key under which all datasets are created, defaults to None :type namespace: string :param mirror_namespace: Dataset key under which all datasets are mirrored -- datasets under this key are plotted by the current scan applet, defaults to 'current_scan' :type mirror_namespace: string :param broadcast: If True all datasets besides the main fit dataset are broadcast when created, defaults to False :type broadcast: bool, optional :param persist: If True all datasets besides the main fit dataset are persisted, defaults to False :type persist: bool, optional :param save: If True all datasets besides the main fit dataset are archived to the hdf5 file, defaults to True :type save: bool, optional :param mirror: If False datasets will not be mirrored to the mirror_namespace, default to True :type mirror: bool, optional :param enable_histograms: If True, histogram data is generated for plotting by the current scan histogram applet -- this applet displays a histogram of the data collected at each scan point, defaults to True :type enable_histograms: bool, optional :param aggregate_histogram: If True, histogram data is generated for plotting by the current scan aggregate histogram applet -- this applet displays a histogram of all data collected, aggregated over all scan points, defaults to True :type aggregate_histogram: bool, optional :param disable_validations: If True, no fit validatons will be performed, fits will always be performed and no fit param values will be validated, defaults to False :type disable_validations: bool, optional :param fit_map: Dictionary of fit param names to dataset name mappings. Fit params are renamed according to this mapping before the fit is saved to the datasets. Keys specify to a fit param name and the corresponding value in the dictionary specifies the dataset name. Defaluts to {} :type fit_map: dict, optional :param fit_function: Specifies the fit function that will be used during fitting. Defaults to None :type fit_function: :class:`scan_framework.analysis.curvefits.FitFunction`, required if the model performs fits. :param fit_use_yerr: If set to True a weighted fit will be performed using the calculated error in the measurued value at each scan point (by default this is the standard error of the mean value at each scan point). Defaults to True. :type fit_use_yerr: bool, optional :param guess: Dictionary containing initial guesses for the fit params. Keys specify fit params and the corresponding value gives the numerical guess. Defaults to {} :type guess: dict, optional :param man_bounds: Dictionary containing manual bounds for each fit param. Keys specify fit params and the corresponding value is a list specifying the bounds -- see :class:`scan_framework.analysis.curvefits.Fit` for more details. Defaults to {} :type man_bounds: dict, optional :param man_scale: Dictionary containing manual scales for each fit param. Keys specify fit params and the corresponding value is a float specifying the scale -- see :class:`scan_framework.analysis.curvefits.Fit` for more details. Defaults to {} :type man_scale: dict, optional :param hold: Dictionary containing held values for each fit param. Keys specify fit params and the corresponding value is a float specifying the held fit param value -- see :class:`scan_framework.analysis.curvefits.Fit` for more details, defaults to None :type hold: dict, optional :param main_fit: Dataset name for the main fit param -- this name is automatically prefixed by self.namespace to give the full dataset name. Defaults to None :type main_fit: string, optional :param fits_to_save: Any fit param specified in this dictionary will broadcast, persisted, and saved to the datasets. The format of this dictionary is the same as fit_map, with keys specifying fit param names and values specifiying the corresponding dataset name. Defaults to {} :type fits_to_save: dict, optional :param validators: Dictionary containing validation rules of all soft fit validations that will be performed, defaults to None :type validators: dict, optional :param strong_validators: Dictionary containing validation rules of all strong fit validations that will be performed, defaults to None :type strong_validators: dict, optional :param pre_validators: Dictionary containing validation rules of all all pre-fit validations validations that will be performed, defaults to None :type pre_validators: dict, optional :param fit_performed: Set to true by the Scan class if fit's have been performed, defaults to None :type fit_performed: bool :param fit_valid: Set to True by the Scan class if the fit passed validations, False if it failed any validations, None if validations haven't been performed. Default to None :type fit_valid: bool :param fit_valid_pre: Set to True by the Scan class if the fit passed pre-validation, False if it falied, None if pre-validation has not yet been performed. Defaults to None :type fit_valid_pre: bool :param fit_valid_soft: Set to True by the Scan class if the fit passed soft-validation, False if it falied, None if soft-validation has not yet been performed. Defaults to None :type fit_valid_soft: bool :param fit_valid_strong: Set to True by the Scan class if the fit passed strong-validation, False if it falied, None if strong-validation has not yet been performed. Defaults to None :type fit_valid_strong: bool :param _fit_saved: Set to True by the Scan class after the main fit has been broadcast, saved, and persisted to the datasets. Defaults to None :type _fit_saved: bool :param fits_set: Dictionary of fit params that were set to the datasets during fitting -- keys specify the full dataset key and the corresponding value is set to the fitted parameter value, defaults to {} :type fits_set: dict :param fits_saved: Dictionary of fit params that were broadcast, persisted, and saved to the datasets during fitting -- keys specify the full dataset key and the corresponding value is set to the fitted parameter value, defaults to {} :type fits_saved: dict :param x_label: Label of the x-axis for the current scan plot, defaults to "" :type x_label: string, optional :param y_label: Label of the y-axis for the current scan plot, defaults to "" :type y_label: string, optional :param x_scale: Scale of the x-axis for the current scan plot, defaults to 1 :type x_scale: float, optional :param y_scale: Scale of the y-axis for the current scan plot, defaults to 1 :type y_scale: float, optional :param x_units: Unit of the x-axis for the current scan plot, defaults to "" :type x_units: string, optional :param y_units: Unit of the y-axis for the current scan plot, defaults to "" :type y_units: string, optional :param plot_title: Title of the current scan plot, defaults to "" :type plot_title: string, optional :param counts: Contains the value returned by each call to the Scan measure method() -- contains a value for each scan point, repeat, and pass. :type counts: Numpy ndarray """ # MARKED FOR REMOVAL default_fit = None # apparently no longer used, MARKED FOR REMOVAL enable_debugging = False # apparently no longer used, MARKED FOR REMOVAL x_offset = 0.0 # may no longer be used, MARKED FOR REMOVAL subspace = {} # apparently no longer used, MARKED FOR REMOVAL # datasets namespace = "" #: Dataset key under which all datasets are created mirror_namespace = 'current_scan' #: Dataset key under which all datasets are mirrored -- datasets under this key are plotted by the current scan applet broadcast = False #: If True all datasets besides the main fit dataset are broadcast when created persist = False #: If True all datasets besides the main fit dataset are persisted save = True #: If True all datasets besides the main fit dataset are save to the hdf5 file # settings mirror = True #: If False datasets will not be mirrored to the mirror_namespace enable_histograms = True #: If True, histogram data is generated for plotting by the current scan histogram applet -- this applet displays a histogram of the data collected at each scan point aggregate_histogram = True #: If True, histogram data is generated for plotting by the current scan aggregate histogram applet -- this applet displays a histogram of all data collected, aggregated over all scan points disable_validations = False #: If True, no fit validatons will be performed, fits will always be performed and no fit param values will be validated, defaults to False # fitting configuration fit_map = {} #: Dictionary of fit param names to dataset name mappings. Fit params are renamed according to this mapping before the fit is saved to the datasets. Keys specify to a fit param name and the corresponding value in the dictionary specifies the dataset name. fit_function = None #: Specifies the fit function that will be used during fitting fit_use_yerr = True #: If set to True a weighted fit will be performed using the calculated error in the measurued value at each scan point (by default this is the standard error of the mean value at each scan point) main_fit = None #: Dataset name for the main fit param -- this name is automatically prefixed by self.namespace to give the full dataset name. fits_to_save = {} #: Any fit param specified in this dictionary will broadcast, persisted, and saved to the datasets. The format of this dictionary is the same as fit_map, with keys specifying fit param names and values specifiying the corresponding dataset name. guess = {} #: Dictionary containing initial guesses for the fit params. Keys specify fit params and the corresponding value gives the numerical guess. man_bounds = {} #: Dictionary containing manual bounds for each fit param. Keys specify fit params and the corresponding value is a list specifying the bounds -- see :class:`scan_framework.analysis.curvefits.Fit` for more details. man_scale = {} #: Dictionary containing manual scales for each fit param. Keys specify fit params and the corresponding value is a float specifying the scale -- see :class:`scan_framework.analysis.curvefits.Fit` for more details. hold = None #: Dictionary containing held values for each fit param. Keys specify fit params and the corresponding value is a float specifying the held fit param value -- see :class:`scan_framework.analysis.curvefits.Fit` for more details, defaults to None # fit validation configuration validators = None #: Dictionary containing definitions all soft fit validations strong_validators = None #: Dictionary containing validation rules of all strong fit validations that will be performed. pre_validators = None #: Dictionary containing validation rules of all all pre-fit validations validations that will be performed # histogram configuration bin_start = 0 bin_end = 'auto' # plot format configuration x_label = '' #: Label of the x-axis for the current scan plot. y_label = '' #: Label of the y-axis for the current scan plot. x_scale = 1 #: Scale of the x-axis for the current scan plot. y_scale = 1 #: Scale of the y-axis for the current scan plot. x_units = '' #: Unit of the x-axis for the current scan plot. y_units = '' #: Unit of the y-axis for the current scan plot. plot_title = '' #: Title of the current scan plot. # instance variables counts = None #: Contains the value returned by each call to the Scan measure method() -- contains a value for each scan point, repeat, and pass. # state variables fits_set = {} #: Set to true by the Scan class if fit's have been performed fits_saved = {} #: Set to True by the Scan class after the fit params have saved to the datasets. fit_performed = None #: Set to true by the Scan class if fit's have been performed fit_valid = None #: Set to True by the Scan class if the fit passed validations, False if it failed any validations, None if validations haven't been performed. fit_valid_pre = None #: Set to True by the Scan class if the fit passed pre-validation, False if it falied, None if pre-validation has not yet been performed. fit_valid_soft = None #: Set to True by the Scan class if the fit passed soft-validation, False if it falied, None if soft-validation has not yet been performed. fit_valid_strong = None #: Set to True by the Scan class if the fit passed strong-validation, False if it falied, None if strong-validation has not yet been performed. _fit_saved = None #: Set to True by the Scan class after the main fit has been broadcast, saved, and persisted to the datasets. type = None #: Set by the TimeFreqScan class to either 'time' or 'frequency' to indiciate to the model if it will be processing data from a time scan or from a frequency scan.
[docs] def report(self): """Generate a report string that displays the values of the stat datasets.""" str = "" for key in ['bins', 'counts', 'error', 'hist', 'mean', 'nbins']: v = self.stat_model.get(key).items() str += "[{0}]\n {1}\n\n".format(key, v) for k, v in self.get(['points']).items(): str += "[{0}]\n {1}\n\n".format(k, v) return str
[docs] def build(self, bind=True, **kwargs): # don't bind the child model's to their namespaces yet since our namespace hasn't been set. self.fit_model = Model(self, bind=False, mirror=self.mirror, mirror_namespace=self.mirror_namespace, broadcast=self.broadcast, persist=self.persist, save=self.save ) self.stat_model = Model(self, bind=False, mirror=self.mirror, mirror_namespace=self.mirror_namespace, broadcast=self.broadcast, persist=self.persist, save=self.save ) # for monitoring histograms of each scan point if self.enable_histograms: self.hist_model = HistModel(self, bind=False, discrete=True, aggregate=True, mirror=self.mirror, x_label="PMT Counts", broadcast=self.broadcast, persist=self.persist, save=self.save ) self.defaults_model = Model(self, bind=False, mirror=False) # bind the scan model and it's child models to their namespaces super().build(bind, **kwargs) # create a logger and reset the model state. self._name = self.__class__.__name__ self.logger = logging.getLogger('scan') self.reset_state() self._fit_saved = False
[docs] def reset_state(self): """Reset state variables""" self.fit_performed = False self._fit_saved = False self.fit_valid = None self.fit_valid_pre = None self.fit_valid_soft = None self.fit_valid_strong = None
[docs] def bind(self): """Bind the scan model to it's namespace and additional sub-spaces for fits, stats, hists, and defaults.""" # map and bind the scan model namespace first because child models extend off of our namespace super().bind() # now bind the child models to their namespaces self.fit_model._namespace = self.namespace + '.fits' self.fit_model._mirror_namespace = self.mirror_namespace + '.fits' self.fit_model.bind() self.stat_model.namespace = self.namespace + '.stats' self.stat_model.mirror_namespace = self.mirror_namespace + '.stats' self.stat_model.bind() if self.enable_histograms: self.hist_model.namespace = self.namespace + '.hist' self.hist_model.mirror_namespace = 'current_hist' self.hist_model.plot_title = self.plot_title self.hist_model.bind() self.defaults_model.namespace = self.namespace + '.defaults' self.defaults_model.bind()
[docs] def attach(self, scan): """ Attach a scan to the model. Gather's parameters of the scan -- such as scan.nrepeats, scan.npasses, etc. -- and sets these as attributes of the model. """ self._scan = scan self.nrepeats = self.stat_model.nrepeats = scan.nrepeats self.nbins = self.stat_model.nbins = scan.nbins self.npasses = self.stat_model.npasses = scan.npasses self.bins = self.stat_model.bins = np.linspace(-0.5, self.nbins - 0.5, self.nbins + 1) if self.enable_histograms: if self.bin_end == 'auto' or self.bin_end == None: bin_end = self.nbins - 1 else: bin_end = self.bin_end self.hist_model.init_bins(bin_start=self.bin_start, bin_end=bin_end, nbins=self.nbins)
[docs] def load(self): """Fetches the 'x', 'means', 'errors', and 'counts' datasets and sets their values to attributes of the model.""" self.load_xs() self.load_means() self.load_errors() self.load_counts()
[docs] def init_datasets(self, shape, plot_shape, points, dimension=0): """Initializes all datasets pertaining to scans. This method is called by the scan during the initialization stage.""" self.shape = shape self.plot_shape = plot_shape # allow below to work on either 1d or 2d scans if self._scan._dim == 1: shape = np.array([shape]) shape = list(shape) # set experiment rid if not(hasattr(self._scan, 'scheduler')): raise NotImplementedError('The scan has no scheduler attribute. Did you forget to call super().build()?') self.set('rid', self._scan.scheduler.rid) # don't draw plots while initializing self.set('plots.trigger', 0) # initialize scan points self.stat_model.set('points', points) self.stat_model.points = points # self.stat_model.init(key='x', shape=shape, varname='xs', init_local=init_local) # initialize plots self.init_plots(dimension=dimension) # initialize stats self.stat_model.init('counts', shape=shape + [self.npasses * self.nrepeats], fill_value=0, dtype=np.int32) self.stat_model.init(key='mean', shape=shape, varname='means') self.stat_model.init('error', shape, 'errors') if self.enable_histograms: self.stat_model.write('nbins') self.stat_model.write('bins') self.stat_model.init('hist', shape=shape + [self.nbins], varname='hist', fill_value=0, dtype=np.int32) self.hist_model.init_datasets(broadcast=self.broadcast, persist=self.persist, save=self.save) # initialize fits self.fit_model.init('fitline', plot_shape)
[docs] def init_plots(self, dimension): """Initialize the plot datasets. :param dimension: 0 for initializing dimension 0 plots, 1 for initializing dimension 1 plots""" # --- 1D Scans --- if self._scan._dim == 1: dim0_shape = self.plot_shape dim1_shape = None # --- 2D Scans --- else: dim0_shape = self.plot_shape[0] dim1_shape = self.plot_shape # --- Dimension 0 Plots --- if dimension == 0: # data self.init('plots.x', dim0_shape, varname='x', init_local=True) self.init('plots.y', dim0_shape, varname='y', init_local=True) self.init('plots.y2', dim0_shape, varname='y', init_local=True) self.init('plots.fitline', dim0_shape, varname='fitline', init_local=True) self.init('plots.error', dim0_shape, init_local=False) # labels, etc. self.set('plots.plot_title', self.plot_title) self.set('plots.y_label', self.y_label) self.set('plots.x_label', self.x_label) self.set('plots.x_scale', self.x_scale) self.set('plots.y_scale', self.y_scale) self.set('plots.x_units', self.x_units) self.set('plots.y_units', self.y_units) # --- Dimension 1 Plots --- elif dimension == 1: # data self.init('plots.dim1.x', dim1_shape, varname='dim1_x', init_local=True) self.init('plots.dim1.y', dim1_shape, varname='dim1_y', init_local=True) self.init('plots.dim1.fitline', dim1_shape, varname='dim1_fitline', init_local=True) # labels, etc. self.set('plots.dim1.plot_title', self.plot_title) self.set('plots.dim1.y_label', self.y_label) self.set('plots.dim1.x_label', self.x_label) self.set('plots.dim1.x_scale', self.x_scale) self.set('plots.dim1.x_scale', self.y_scale) self.set('plots.dim1.x_units', self.x_units) self.set('plots.dim1.y_units', self.y_units)
[docs] def write_datasets(self, dimension): """Writes all internal values to their datasets. This method is called by the scan when it is resuming from a pause to restore previous scan values to their datasets.""" # don't draw plots while writing self.set('plots.trigger', 0) if dimension == 0: # write scan points self.stat_model.write('points') #self.write('x', 'x') # write plots self.write('plots.x', varname='x') self.write('plots.y', varname='y') self.write('plots.fitline', varname='fitline') self.set('plots.plot_title', self.plot_title) self.set('plots.y_label', self.y_label) self.set('plots.x_label', self.x_label) self.set('plots.x_scale', self.x_scale) self.set('plots.y_scale', self.y_scale) self.set('plots.x_units', self.x_units) self.set('plots.y_units', self.y_units) # write stats self.stat_model.write('counts') self.stat_model.write('mean', 'means') self.stat_model.write('error', 'errors') if self.enable_histograms: self.stat_model.write('nbins') self.stat_model.write('bins') self.stat_model.write('hist') self.hist_model.init_datasets() elif dimension is 1: # write scan points #self.write('x', 'x') # write plots self.write('plots.dim1.x', varname='dim1_x') self.write('plots.dim1.y', varname='dim1_y') self.write('plots.dim1.fitline', varname='dim1_fitline') self.set('plots.dim1.plot_title', self.plot_title) self.set('plots.dim1.y_label', self.y_label) self.set('plots.dim1.x_label', self.x_label) self.set('plots.dim1.x_scale', self.x_scale) self.set('plots.dim1.y_scale', self.y_scale) self.set('plots.dim1.x_units', self.x_units) self.set('plots.dim1.y_units', self.y_units) # draw plots when done writting self.set('plots.trigger', 1)
[docs] def mutate_datasets(self, i_point, point, counts): """Generates the mean and standard error of the mean for the measured value at the specified scan point and mutates the corresponding datasets. The `points` and `counts` datasets are also mutated with the specified scan point value and raw values measured at the specified scan point. If histograms are enabled, each measured value in `counts` will also be binned and the histogram datasets will be mutated with the binned values to updated the histogram plots. :param i_point: scan point index :param point: value of scan point :param counts: array containing all values returned by the scan's measure() method during the specified scan point """ dim = self._scan._dim # mutate the dataset containing the scan point values self.mutate_points(i_point, point) # mutate the dataset containing the array of counts measured at each repetition of the scan point if dim == 1: # mutate the counts dataset i = ((i_point, i_point + 1), (0, len(counts))) self.stat_model.mutate('counts', i, counts, update_local=False) # mutate the local counts array (so it can be written when a scan resumes) self.stat_model.counts[i_point, 0:len(counts)] = counts else: # mutate the counts dataset with counts i = ((i_point[0], i_point[0] + 1), (i_point[1], i_point[1] + 1), (0, len(counts))) self.stat_model.mutate('counts', i, counts, update_local=False) # mutate the local counts array (so it can be written when a scan resumes) self.stat_model.counts[i_point[0], i_point[1], 0:len(counts)] = counts # calculate the mean mean = self.calc_mean(counts) # mutate the dataset containing the mean values at each scan point self.mutate_means(i_point, mean) # calculate the error error = self.calc_error(counts) # mutate the dataset containing the error in the mean at each scan point self.mutate_errors(i_point, error) # histograms if self.enable_histograms: # bin counts and mutate the histogram at the current scan point self.hist_model.reset_bins() self.hist_model.mutate(counts) # mutate the time series histograms if dim == 1: # mutate the hist dataset self.stat_model.mutate('hist', i_point, self.hist_model.bins, update_local=False) # mutate the local hist array self.stat_model.hist[i_point] = self.hist_model.bins else: # mutate the hist dataset i = ((i_point[0], i_point[0]+1), (i_point[1], i_point[1] + 1)) self.stat_model.mutate('hist', i, self.hist_model.bins, update_local=False) # mutate the local hist array self.stat_model.hist[i_point[0], i_point[1]] = self.hist_model.bins return mean
[docs] def mutate_plot(self, i_point, x, y, error=None, dim=None): """Mutate the plots.x and plots.y datasets. This method is called by the scan to update the plot as the scan runs. :param i: Scan point index. Plot datasets are mutated at this index. :param x: X value to plot :param y: Y value to plot :param dim: Which dimension is being plotted. 0 for normal 1D plots. For 2D plots, dim=1 updates the dimension 1 plot, dim=0 updates the final dimension 0 plot. """ which = 'both' if self.broadcast is False: which = 'mirror' # --- Mutate Dimension 0 Plot --- if dim is None or dim == 0: # --- 1D Scans --- if self._scan._dim == 1: i = i_point # --- 2D Scans --- else: i = i_point[0] self.mutate('plots.x', i, x, varname='x', which=which) self.mutate('plots.y', i, y, varname='y', which=which) if error is not None: self.mutate('plots.error', i, error, which=which, update_local=False) # Merge Conflict 8/15/18 not sure which is correct # elif dim is 1: # self.mutate('plots.dim1.x', i, x, which=which) # self.dim1_x[i] = x # self.mutate('plots.dim1.y', i, y, which=which) # self.dim1_y[i] = y # --- Mutate Dimension 1 Plot --- else: i = ((i_point[0], i_point[0]+1), (i_point[1], i_point[1]+1)) self.mutate('plots.dim%i.x' % dim, i, x, which=which) self.mutate('plots.dim%i.y' % dim, i, y, which=which)
[docs] def get_plot_data(self, mirror): """Returns the plots.x and plots.y datasets (always dimension 0) :param mirror: True to pull from the current_scan namespace, False to pull from the actual namespace.""" x_data = self.get('plots.x', mirror=mirror) y_data = self.get('plots.y', mirror=mirror) errors = self.get('plots.error', mirror=mirror) return x_data, y_data, errors
[docs] def mutate_datasets_calc(self, i_point, point, calculation): """Mutates the statistics datasets at a specified scan point using a calculated value :param i_point: index of the current scan point :param point: value of the current scan point :param calculation: name of the calculation to perform :type calculation: string """ value, error = self.calculate(i_point, calculation) self.mutate_points(i_point, point) self.mutate_means(i_point, value) self.mutate_errors(i_point, error) return value
[docs] def mutate_points(self, i_point, point): """Mutate the 'points' dataset with the value of a scan point :param i_point: index of the scan point :param point: value of the scan point """ dim = self._scan._dim if dim == 1: i = i_point self.stat_model.mutate('points', i, point) self.stat_model.points[i] = point else: i = ((i_point[0], i_point[0] + 1), (i_point[1], i_point[1] + 1)) self.stat_model.mutate('points', i, point, update_local=False) self.stat_model.points[i_point[0], i_point[1]] = point
[docs] def mutate_means(self, i_point, mean): """"Mutate the 'means' dataset with a mean value calculated at the specified scan point :param i_point: index of the scan point :param mean: mean of the measured value at the given scan point """ dim = self._scan._dim if dim == 1: # mutate the mean dataset self.stat_model.mutate('mean', i_point, mean, update_local=False) # mutate local means array self.stat_model.means[i_point] = mean else: # mutate the mean dataset i = ((i_point[0], i_point[0] + 1), (i_point[1], i_point[1] + 1)) self.stat_model.mutate('mean', i, mean, update_local=False) # mutate local means array self.stat_model.means[i_point[0], i_point[1]] = mean
[docs] def mutate_errors(self, i_point, error): """"Mutate the 'error' dataset with the error in the mean value calculated at the specified scan point :param i_point: index of the scan point :param error: error in the mean measured value at the given scan point """ dim = self._scan._dim if dim == 1: # mutate the error dataset i = i_point self.stat_model.mutate('error', i, error, update_local=False) # mutate the local errors array self.stat_model.errors[i_point] = error else: # mutate the error dataset i = ((i_point[0], i_point[0] + 1), (i_point[1], i_point[1] + 1)) self.stat_model.mutate('error', i, error, update_local=False) # mutate the local errors array self.stat_model.errors[i_point[0], i_point[1]] = error
#def rewind(self, i_point_start): # """Set all internal data values to np.nan from i_point_start forward""" # for i_point in range(i_point_start, self._scan.npoints): # self.stat_model.means[i_point] = np.nan # self.stat_model.errors[i_point] = np.nan # self.stat_model.xs[i_point] = np.nan
[docs] def get_means(self, default=NoDefault, mirror=False): """Fetches from the datasets and returns the mean values measured at each scan point. i.e. the 'means' dataset """ return self.stat_model.get('mean', default, mirror)
[docs] def get_means_key(self, mirror=False): """Returns the dataset key of the 'means' dataset""" return self.stat_model.key('mean', mirror)
@property def errors(self): """Return the internal value of the 'errors' dataset""" return self.stat_model.errors @property def xs(self): """Return the internal value of the 'x' dataset""" return self.xs
[docs] def get_xs(self, default=NoDefault, mirror=False): """Fetches from the datasets and returns the list of scan points. i.e. the 'x' dataset """ return self.get('x', default, mirror)
[docs] def get_xs_key(self, default=NoDefault, mirror=False): """Return the dataset key of the 'x' dataset""" return self.key('x', mirror)
# [loaders]
[docs] def load_counts(self): """Loads the internal counts variable from its dataset""" self.stat_model.load('counts')
[docs] def load_xs(self): """Loads the internal xs variable from its dataset""" self.stat_model.load('x', 'xs')
[docs] def load_errors(self): """Loads the internal errors variable from its dataset""" self.stat_model.load('error', 'errors')
[docs] def load_means(self): """Loads the internal means variable from its dataset""" self.stat_model.load('mean', 'means')
# [calculators]
[docs] def calc_hist(self, counts): """Return a histogram of counts :param counts: Array of counts to be binned""" #hist, bin_edges = np.histogram(counts, **self.hist_args) hist = [0]*self.nbins for c in counts: if np.isnan(c): continue if c >= len(hist): c = len(hist) - 1 hist[c] += 1 return hist
[docs] def calc_mean(self, counts): """Calculate mean value of counts. :param counts: Array of counts""" return np.nanmean(counts)
[docs] def calc_error(self, counts): """Calculate the standard deviation of the mean :param counts: Array of counts""" return stats.sem(counts, ddof=0, nan_policy='omit')
[docs] def calc_amplitude(self, use_current_scan=False): """Calculate the 'amplitude' of the means, i.e. the maximum mean value minus the minimum mean value""" y_data = self.get_means(mirror=use_current_scan) return max(y_data) - min(y_data)
# --fitting
[docs] def _map_fit_param(self, name): """ Maps a fit param name to it's dataset key """ if self.fit_map is not None and name in self.fit_map: return self.fit_map[name] return name
@property def main_fit_param(self): p = self.main_fit if isinstance(p, list): return p[0] else: return p @property def main_fit_ds(self): p = self.main_fit if isinstance(p, list): return p[1] else: return p
[docs] def get_main_fit(self, use_fit_result=False, i=None, archive=False) -> TFloat: """Helper method. Fetches the value of the main fit from its dataset or from the fitresults. :param use_fit_result: If True, the fit param value in the models fit object is returned. Otherwise the fir param value will be fetched from the datasets. value. """ if use_fit_result: if self.main_fit_param is None: raise Exception("Can't get the main fit. The 'main_fit' attribute needs to be set in the scan model.") return self.fit.fitresults[self.main_fit_param] else: if self.main_fit_ds is None: raise Exception("Can't get the main fit. The 'main_fit' attribute needs to be set in the scan model.") if self.fit_model.default_fallback: default = self.defaults_model.get(self.main_fit_ds, archive=archive) else: default = NoDefault return self.get(self.main_fit_ds, default=default, archive=archive)
[docs] def set_main_fit(self, value): """Helper method. Broadcasts, persists, and saves to the datasets the main fit param specified by the model's main_fit attribute. :param value: value of the main fit that will be saved. """ self.set(self.main_fit_ds, value, which='main', broadcast=True, persist=True, save=True)
[docs] def get_fit(self, name, use_fit_result=False, i=None): """Helper method. Fetches the value of fit param that was found during the last fit performed. The fit param returned will either be read from the datasets or from the model's fit object attribvute. :param name: Name of the fit param :type name: string :param use_fit_result: If True, the fit param is fetched from the models' fit object (self.fit) instead of from the datasets. :type use_fit_result: bool """ if use_fit_result: return self.fit.fitresults[name] else: key = self._map_fit_param(name) if self.fit_model.default_fallback: default = self.defaults_model.get(key) else: default = NoDefault return self.fit_model.get("params."+key, default=default)
[docs] def get_fit_data(self, use_mirror): """Helper method. Returns the experimental data to use for fitting.""" x_data = self.stat_model.get('points', mirror=use_mirror) y_data = self.stat_model.get('mean', mirror=use_mirror) return x_data, y_data
[docs] def get_guess(self, x_data, y_data): """Helper method. Returns the fit guesses to use for fitting.""" return self.guess or {}
[docs] def fit_data(self, x_data, y_data, errors, fit_function, guess=None, i=None, validate=True, set=True, save=False, man_bounds={}, man_scale={}): """Perform a fit of the x values, y values, and errors to the specified fit function. :param x_data: X values of the experimental data :param y_data: Y values of the experimental data :param errors: Error in each corresponding Y value of the experimental data :param fit_function: The function being fit to the data points and errors given by x_data, y_data, and errors. :param guess: Dictionary containing the initial guess for each fit param. Keys specify fit param names and values the initial guess of that fit param. :type guess: dict :param validate: If True, fit validations will be performed :param set: If True, all generated data pertaining to the fit will be saved to the datasets under the model's namespace :param save: If True, the main fit will be saved to the datasets, as long as any defined strong validations pass. :param man_bounds: Dictionary containing the allowed bounds for each fit param. Keys specify fit param names and values are set to a list to specify the bounds of that fit param. :param man_scale: Dictionary containing the scale of each fit param. Keys specify fit param names and values are set to floats that specify the scale of that fit param. """ x_sorted = sorted(x_data) fit_performed = False saved = False errormsg = "" valid_pre = False valid_strong = False # update class variables used in validations. self.min_point = min(x_sorted) self.max_point = max(x_sorted) self.tick = x_sorted[1] - x_sorted[0] # don't validate if validations have been disabled if self.disable_validations: validate = False # - pre-validate data if validate: try: valid_pre = self.validate_fit('pre', x_data, y_data) except CantFit as msg: valid_pre, errormsg = False, msg self.fit_valid_pre = valid_pre # - fit if not validate or valid_pre: # get data to fit guess = guess or self.get_guess(x_data, y_data) hold = self.hold or {} try: yerr = errors if self.fit_use_yerr else None FitModel.fit_data(self, x_data, y_data, fit_function, hold=hold, guess=guess, yerr=yerr, man_bounds=man_bounds, man_scale=man_scale) fit_performed = True except ValueError as msg: self.logger.error("ERROR Fit Failed: {0}".format(msg)) # - post-validation & save fits if fit_performed: # append x/y dataset self.fit.fitresults['x_dataset'] = self.get_xs_key() self.fit.fitresults['y_dataset'] = self.get_means_key() self.before_validate(self.fit) # - set all fitted params to datasets if set: self.set_fits(i) # - post-validate the fit if validate: # - strong validations: # * fit params not saved if this fails. try: valid_strong = True errormsg = "" self.validate_fit('strong') except BadFit as msg: valid_strong = False errormsg = msg self.fit_valid_strong = valid_strong # - soft validations: # * fit params *are* saved if this fails # * this is not performed if the strong validations failed if valid_strong: try: valid_soft = True errormsg = "" self.validate_fit('soft') except BadFit as msg: valid_soft = False errormsg = msg self.fit_valid_soft = valid_soft # - save fitted params to datasets # is it ok to save the fit params? if save and (not validate or valid_strong): # - save the main fit if self.main_fit_param and self.main_fit_ds: self.save_fit(fitparam=self.main_fit_param, dskey=self.main_fit_ds, broadcast=True, persist=True, save=True) saved = True # - save other fits for fitparam, dskey in self.fits_to_save.items(): if fitparam and fitparam is not self.main_fit_param: self.save_fit(fitparam=fitparam, dskey=dskey, broadcast=True, persist=True, save=True) # store status of fit to class variables. self.fit_performed = fit_performed if not validate: self.fit_valid = None else: self.fit_valid = self.fit_valid_pre and self.fit_valid_strong and self.fit_valid_soft self._fit_saved = saved # tell the user about any fit validation errors or warnings that occurred. if self.fit_valid_pre is False: self.logger.warning("SKIP FIT. {}".format(errormsg)) if self.fit_valid_soft is False: self.logger.warning("INVALID FIT. {}".format(errormsg)) if self.fit_valid_strong is False: self.logger.error("INVALID FIT. {}".format(errormsg)) return fit_performed, self.fit_valid, saved, errormsg
[docs] def before_validate(self, fit): """User callback (runs on host). Executed after a fit was successfully performed by the scan model, but before fits are validated or saved to the datasets. This callback allows additional fit parameters to be calculated from the parameters of the fit function. e.g. calculating a pi time from a transition rate. Any calculated fit parameters will also be validated by any validation rules that are defined for name of the calculated parameter. :param fit: Fit object for the fit that was just performed """ pass
[docs] def validate_fit(self, rule, x_data=None, y_data=None): # check pre-validation rules if rule == 'pre': return FitModel.pre_validate(self, series={'x_data': x_data, 'y_data': y_data}, validators=self.pre_validators) elif rule == 'strong': FitModel.validate(self, self.strong_validators) elif rule == 'soft': FitModel.validate(self, self.validators)
[docs] def save_fit(self, fitparam, dskey, broadcast=False, persist=False, save=True): """Helper method. Saves the specified fit param to the datasets under the model's namespace. :param fitparam: Name of the fit param to save :type fitparam: string :param dskey: Datset key to save the fit param value to :param broadcast: Indicates if the dataset should be broadcast, defaults to False :param persist: Indicates if the dataset should be persisted, defaults to False :param save: Indicates if the dataset should be saved to the hdf5 file, defaults to True """ # get the fitted param fitval = self.fit.fitresults[fitparam] # save it to a dataset self.set(dskey, fitval, which='main', broadcast=broadcast, persist=persist, save=save) # record what's been saved if broadcast is True: self.fits_saved[self.key(dskey)] = fitval
[docs] def set_fits(self, i=None): """Helper method. Set's all data generated during fitting to datasets under the model's namespace. """ # fitted params for key, value in zip(self.fit.params._fields, self.fit.params): key = "params.{0}".format(key) if i is not None: key = "{0}.{1}".format(i, key) self.fit_model.set(key, value) # guess for key, value in zip(self.fit.params._fields, self.fit.guess): key = "guesses.{0}".format(key) if i is not None: key = "{0}.{1}".format(i, key) self.fit_model.set(key, value) # fitted param errors for key, value in zip(self.fit.errs._fields, self.fit.errs): key = "errors.{0}".format(key) if i is not None: key = "{0}.{1}".format(i, key) self.fit_model.set(key, value) # fitline key = 'fitline' if i is not None: key = "{0}.{1}".format(i, key) else: # this must update the current_scan so fitlines show up in the plots mirror = self.mirror self.mirror = True self.set('plots.fitline', self.fit.fitline_orig) self.mirror = mirror self.fit_model.set(key, self.fit.fitline_orig) # regression analysis key = 'analysis.r2' if i is not None: key = "{0}.{1}".format(i, key) self.fit_model.set(key, self.fit.r2) key = 'analysis.reg_err' if i is not None: key = "{0}.{1}".format(i, key) self.fit_model.set(key, self.fit.reg_err)
[docs] def report_fit(self): """Helper method. Prints fit results to the console""" self._scan.logger.info('') # display fitted parameters self._scan.logger.info("Fitted Parameters:") for key, param, error in zip(self.fit.params._fields, self.fit.params, self.fit.errs): unit, scaled, text = Model._format(key, param, self) error_unit, error_scaled, error_text = Model._format(key, error, self) self._scan.logger.info("{0} = {1} +/- {2} {3}".format(key, scaled, error_scaled, unit)) # display regression analysis self._scan.logger.info("r2 = {0}".format(self.fit.r2)) self._scan.logger.info("reg_err = {0}".format(self.fit.reg_err)) # display every fit that was set if self.fits_saved: self._scan.logger.info("Fits Saved:") for key, value in self.fits_saved.items(): s = key.split('.') unit, _, scaled = Model._format(s[-1], value) self._scan.logger.info("%s set to %s" % (key, scaled)) self._scan.logger.info("") # display datasets that we're updated if self.fits_set: self._scan.logger.info("Datasets Updated:") for key, value in self.fits_set.items(): s = key.split('.') unit, _, scaled = Model._format(s[-1], value) self._scan.logger.info("%s set to %s" % (key, scaled))
# --- validation helpers
[docs] def validate_in_scan_range(self, field, value, padding_left=None, padding_right=None): """Return False if the value is outside the range of scan points and add a validation error for the field. :param field: name of the field who's value will be checked :param padding_left: increase the allowable range by this amount below the scan point with the smallest value. Defaults to self._scan.tick which is the difference between adjacent scan points. :param padding_left: increase the allowable range by this amount above the scan point with the largest value. Defaults to self._scan.tick which is the difference between adjacent scan points. """ if padding_left is None: padding_left = self.tick if padding_right is None: padding_right = self.tick return self.validate_between(field, value, min_=self.min_point - self.tick, max_=self.max_point + self.tick )
# --- simulation @property def simulation_args(self): """Function arguments passed to the fit function when running simulations""" raise NotImplementedError('Your model needs to implement the simulation_args property')
[docs] def simulate(self, x, noise_level=0, simulation_args=None): simulation_args = simulation_args or self.simulation_args return FitModel.simulate(self, x, noise_level, simulation_args)