From da66d5150dc975e3aa9842cbfb3815238fada553 Mon Sep 17 00:00:00 2001 From: Andrea Alfonsi Date: Tue, 25 Sep 2018 11:44:18 -0600 Subject: [PATCH 01/95] updated submodule moose --- moose | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moose b/moose index 28027fd9b2..d25c2a9e18 160000 --- a/moose +++ b/moose @@ -1 +1 @@ -Subproject commit 28027fd9b2a77d3494698da9f68c450bccec97b7 +Subproject commit d25c2a9e185756a178aef66de71bc2cb4b3d359c From 74963b48313efe463e2d4315159940876635da51 Mon Sep 17 00:00:00 2001 From: Andrea Alfonsi Date: Tue, 25 Sep 2018 11:46:25 -0600 Subject: [PATCH 02/95] Revert "updated submodule moose" This reverts commit da66d5150dc975e3aa9842cbfb3815238fada553. --- moose | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moose b/moose index d25c2a9e18..28027fd9b2 160000 --- a/moose +++ b/moose @@ -1 +1 @@ -Subproject commit d25c2a9e185756a178aef66de71bc2cb4b3d359c +Subproject commit 28027fd9b2a77d3494698da9f68c450bccec97b7 From 5da7950d35f8be05ad865db223534ac3c3b77236 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Sat, 24 Jul 2021 14:33:57 -0600 Subject: [PATCH 03/95] Few changes to probabilistic.py and representativity.py --- .../Models/PostProcessors/BasicStatistics.py | 72 +++++----- .../validationAlgorithms/Probabilistic.py | 91 +++++++------ .../validationAlgorithms/Representativity.py | 12 +- .../test_validation_gate_representativity.xml | 4 +- ...test_validation_gate_representativity2.xml | 124 ++++++++++++++++++ 5 files changed, 221 insertions(+), 82 deletions(-) create mode 100644 tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml diff --git a/framework/Models/PostProcessors/BasicStatistics.py b/framework/Models/PostProcessors/BasicStatistics.py index 6b9f13b6f0..15f354363a 100644 --- a/framework/Models/PostProcessors/BasicStatistics.py +++ b/framework/Models/PostProcessors/BasicStatistics.py @@ -156,45 +156,47 @@ def inputToInternal(self, currentInp): currentInput = currentInp [-1] if type(currentInp) == list else currentInp if len(currentInput) == 0: self.raiseAnError(IOError, "In post-processor " +self.name+" the input "+currentInput.name+" is empty.") - - pbWeights = None - if type(currentInput).__name__ == 'tuple': - return currentInput - # TODO: convert dict to dataset, I think this will be removed when DataSet is used by other entities that - # are currently using this Basic Statisitics PostProcessor. - if type(currentInput).__name__ == 'dict': - if 'targets' not in currentInput.keys(): - self.raiseAnError(IOError, 'Did not find targets in the input dictionary') - inputDataset = xr.Dataset() - for var, val in currentInput['targets'].items(): - inputDataset[var] = val - if 'metadata' in currentInput.keys(): - metadata = currentInput['metadata'] - self.pbPresent = True if 'ProbabilityWeight' in metadata else False - if self.pbPresent: - pbWeights = xr.Dataset() - self.realizationWeight = xr.Dataset() - self.realizationWeight['ProbabilityWeight'] = metadata['ProbabilityWeight']/metadata['ProbabilityWeight'].sum() - for target in self.parameters['targets']: - pbName = 'ProbabilityWeight-' + target - if pbName in metadata: - pbWeights[target] = metadata[pbName]/metadata[pbName].sum() - elif self.pbPresent: - pbWeights[target] = self.realizationWeight['ProbabilityWeight'] + if not isinstance(currentInput,xr.Dataset): + pbWeights = None + if type(currentInput).__name__ == 'tuple': + return currentInput + # TODO: convert dict to dataset, I think this will be removed when DataSet is used by other entities that + # are currently using this Basic Statisitics PostProcessor. + if type(currentInput).__name__ == 'dict': + if 'targets' not in currentInput.keys(): + self.raiseAnError(IOError, 'Did not find targets in the input dictionary') + inputDataset = xr.Dataset() + for var, val in currentInput['targets'].items(): + inputDataset[var] = val + if 'metadata' in currentInput.keys(): + metadata = currentInput['metadata'] + self.pbPresent = True if 'ProbabilityWeight' in metadata else False + if self.pbPresent: + pbWeights = xr.Dataset() + self.realizationWeight = xr.Dataset() + self.realizationWeight['ProbabilityWeight'] = metadata['ProbabilityWeight']/metadata['ProbabilityWeight'].sum() + for target in self.parameters['targets']: + pbName = 'ProbabilityWeight-' + target + if pbName in metadata: + pbWeights[target] = metadata[pbName]/metadata[pbName].sum() + elif self.pbPresent: + pbWeights[target] = self.realizationWeight['ProbabilityWeight'] + else: + self.raiseAWarning('BasicStatistics postprocessor did not detect ProbabilityWeights! Assuming unit weights instead...') else: self.raiseAWarning('BasicStatistics postprocessor did not detect ProbabilityWeights! Assuming unit weights instead...') - else: - self.raiseAWarning('BasicStatistics postprocessor did not detect ProbabilityWeights! Assuming unit weights instead...') - if 'RAVEN_sample_ID' not in inputDataset.sizes.keys(): - self.raiseAWarning('BasicStatisitics postprocessor did not detect RAVEN_sample_ID! Assuming the first dimension of given data...') - self.sampleTag = utils.first(inputDataset.sizes.keys()) - return inputDataset, pbWeights + if 'RAVEN_sample_ID' not in inputDataset.sizes.keys(): + self.raiseAWarning('BasicStatisitics postprocessor did not detect RAVEN_sample_ID! Assuming the first dimension of given data...') + self.sampleTag = utils.first(inputDataset.sizes.keys()) + return inputDataset, pbWeights - if currentInput.type not in ['PointSet','HistorySet']: - self.raiseAnError(IOError, self, 'BasicStatistics postprocessor accepts PointSet and HistorySet only! Got ' + currentInput.type) + if currentInput.type not in ['PointSet','HistorySet']: + self.raiseAnError(IOError, self, 'BasicStatistics postprocessor accepts PointSet and HistorySet only! Got ' + currentInput.type) - # extract all required data from input DataObjects, an input dataset is constructed - dataSet = currentInput.asDataset() + # extract all required data from input DataObjects, an input dataset is constructed + dataSet = currentInput.asDataset() + else: + dataSet = currentInput try: inputDataset = dataSet[self.parameters['targets']] except KeyError: diff --git a/framework/Models/PostProcessors/validationAlgorithms/Probabilistic.py b/framework/Models/PostProcessors/validationAlgorithms/Probabilistic.py index 82e0f70452..e17a63c362 100644 --- a/framework/Models/PostProcessors/validationAlgorithms/Probabilistic.py +++ b/framework/Models/PostProcessors/validationAlgorithms/Probabilistic.py @@ -80,20 +80,9 @@ def run(self, inputIn): @ Out, outputDict, dict, dictionary containing the post-processed results """ # inpVars, outVars, dataSet = inputIn['Data'][0] - dataSets = [data for _, _, data in inputIn['Data']] + # dataSets = [data for _, _, data in inputIn['Data']] + dataDict = {data.attrs['name']: data for _, _, data in inputIn['Data']} pivotParameter = self.pivotParameter - # # names = [dataSets[i].attrs['name'] for i in len(dataSets)] - # names = ['simulation','experiment'] - # pivotParameter = self.pivotParameter - # outs = {} - # for feat, targ in zip(self.features, self.targets): - # featData = self._getDataFromDatasets(dataSets, feat, names) - # targData = self._getDataFromDatasets(dataSets, targ, names) - # for metric in self.metrics: - # name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) - # outs[name] = metric.evaluate((featData, targData), multiOutput='raw_values') - # return outs - names = [inp[-1].attrs['name'] for inp in inputIn['Data']] if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: if 'dynamic' not in self.dynamicType: #self.model.dataType: @@ -107,35 +96,19 @@ def run(self, inputIn): # self.raiseAnError(RuntimeError, "The pivotParameter '{}' has been inputted but PointSets have been used as input of PostProcessor '{}'".format(pivotParameter, self.name)) # if not all([True if pivotParameter in inp else False for inp in dataSets]): # self.raiseAnError(RuntimeError, "The pivotParameter '{}' not found in datasets used as input of PostProcessor '{}'".format(pivotParameter, self.name)) - evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()} + + + evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataDict, **{'dataobjectNames': names}).items()} if pivotParameter: - if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): + #if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): + if len(inputIn['Data'][0][-1]['time']) != len(list(evaluation.values())[0]): self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) if pivotParameter not in evaluation: - evaluation[pivotParameter] = dataSets[0][pivotParameter] + evaluation[pivotParameter] = inputIn['Data'][0][-1]['time'] + #evaluation[pivotParameter] = dataSets[0][pivotParameter] return evaluation - # ## merge the following run method with above run method - # def run(self, datasets, **kwargs): - # """ - # Main method to "do what you do". - # @ In, datasets, list, list of datasets (data1,data2,etc.) to used. - # @ In, kwargs, dict, keyword arguments - # @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} - # """ - # names = kwargs.get('dataobjectNames') - # outs = {} - # for feat, targ in zip(self.features, self.targets): - # featData = self._getDataFromDatasets(datasets, feat, names) - # targData = self._getDataFromDatasets(datasets, targ, names) - # # featData = (featData[0], None) - # # targData = (targData[0], None) - # for metric in self.metrics: - # name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) - # outs[name] = metric.evaluate((featData, targData), multiOutput='raw_values') - # return outs - ### utility functions def _evaluate(self, datasets, **kwargs): """ @@ -147,8 +120,10 @@ def _evaluate(self, datasets, **kwargs): names = kwargs.get('dataobjectNames') outputDict = {} for feat, targ in zip(self.features, self.targets): - featData = self._getDataFromDatasets(datasets, feat, names) - targData = self._getDataFromDatasets(datasets, targ, names) + # featData = self._getDataFromDatasets(datasets, feat, names) + featData = self._getDataFromDataDict(datasets, feat, names) + # targData = self._getDataFromDatasets(datasets, targ, names) + targData = self._getDataFromDataDict(datasets, targ, names) for metric in self.metrics: name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) outputDict[name] = metric.evaluate((featData, targData), multiOutput='raw_values') @@ -168,17 +143,47 @@ def _getDataFromDatasets(self, datasets, var, names=None): dat = None if "|" in var and names is not None: do, feat = var.split("|") - doindex = names.index(do) - dat = datasets[doindex][feat] + doIndex = names.index(do) + dat = datasets[doIndex][feat] else: - for doindex, ds in enumerate(datasets): + for doIndex, ds in enumerate(datasets): if var in ds: dat = ds[var] break if 'ProbabilityWeight-{}'.format(feat) in datasets[names.index(do)]: - pw = datasets[doindex]['ProbabilityWeight-{}'.format(feat)].values + pw = datasets[doIndex]['ProbabilityWeight-{}'.format(feat)].values elif 'ProbabilityWeight' in datasets[names.index(do)]: - pw = datasets[doindex]['ProbabilityWeight'].values + pw = datasets[doIndex]['ProbabilityWeight'].values + dim = len(dat.shape) + # (numRealizations, numHistorySteps) for MetricDistributor + dat = dat.values + if dim == 1: + # the following reshaping does not require a copy + dat.shape = (dat.shape[0], 1) + data = dat, pw + return data + + def _getDataFromDataDict(self, datasets, var, names=None): + """ + Utility function to retrieve the data from datasets + @ In, datasets, list, list of datasets (data1,data2,etc.) to search from. + @ In, names, list, optional, list of datasets names (data1,data2,etc.). If not present, the search will be done on the full list. + @ In, var, str, the variable to find (either in fromat dataobject|var or simply var) + @ Out, data, tuple(numpy.ndarray, xarray.DataArray or None), the retrived data (data, probability weights (None if not present)) + """ + pw = None + if "|" in var and names is not None: + do, feat = var.split("|") + dat = datasets[do][feat] + else: + for doIndex, ds in enumerate(datasets): + if var in ds: + dat = ds[var] + break + if 'ProbabilityWeight-{}'.format(feat) in datasets[do]: + pw = datasets[do]['ProbabilityWeight-{}'.format(feat)].values + elif 'ProbabilityWeight' in datasets[do]: + pw = datasets[do]['ProbabilityWeight'].values dim = len(dat.shape) # (numRealizations, numHistorySteps) for MetricDistributor dat = dat.values diff --git a/framework/Models/PostProcessors/validationAlgorithms/Representativity.py b/framework/Models/PostProcessors/validationAlgorithms/Representativity.py index 74a85f82a2..239d446cfc 100644 --- a/framework/Models/PostProcessors/validationAlgorithms/Representativity.py +++ b/framework/Models/PostProcessors/validationAlgorithms/Representativity.py @@ -53,12 +53,14 @@ class cls. specifying input of cls. """ specs = super(Representativity, cls).getInputSpecification() - parametersInput = InputData.parameterInputFactory("Parameters", contentType=InputTypes.StringListType) + parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType) parametersInput.addParam("type", InputTypes.StringType) specs.addSub(parametersInput) targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType) targetParametersInput.addParam("type", InputTypes.StringType) specs.addSub(targetParametersInput) + targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType) + specs.addSub(targetPivotParameterInput) return specs def __init__(self): @@ -153,10 +155,12 @@ def _handleInput(self, paramInput): """ super()._handleInput(paramInput) for child in paramInput.subparts: - if child.getName() == 'Parameters': + if child.getName() == 'featureParameters': self.Parameters = child.value elif child.getName() == 'targetParameters': self.targetParameters = child.value + elif child.getName() == 'targetPivotParameter': + self.targetPivotParameter = child.value def run(self, inputIn): """ @@ -190,7 +194,9 @@ def _evaluate(self, datasets, **kwargs): @ In, kwargs, dict, keyword arguments @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} """ - self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] + # self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] + for data in datasets: + sen = self.stat.run(data) names = kwargs.get('dataobjectNames') outs = {} for feat, targ, param, targParam in zip(self.features, self.targets, self.Parameters, self.targetParameters): diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml index 4a10bf7ad5..89cf532459 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml @@ -33,8 +33,10 @@ outputDataMC2|ans2 simIndex - outputDataMC1|x1,outputDataMC1|x2 + outputDataMC1|x1,outputDataMC1|x2 outputDataMC2|x1,outputDataMC2|x2 + outputDataMC1|time + outputDataMC2|time + + outputDataMC1|ans + outputDataMC2|ans2 + simIndex + + outputDataMC1|x1,outputDataMC1|x2 + outputDataMC2|x1,outputDataMC2|x2 + outputDataMC1|time + outputDataMC2|time + + + + + + + + + + + + 1 + 0.5 + + + -1 + 1 + + + + + + + 10 + + + dist1 + + + dist2 + + + + + + + inputPlaceHolder2 + poly + MC_external + outputDataMC1 + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + + + + + x1,x2 + OutputPlaceHolder + + + x1,x2 + ans + + + x1,x2 + ans2 + + + InputPlaceHolder + + + + + + + + + csv + pp1_metric + + + + From bbef143138949728c2e71fc5463a816eb18877a7 Mon Sep 17 00:00:00 2001 From: "Joshua J. Cogliati" Date: Tue, 23 Nov 2021 15:30:57 -0700 Subject: [PATCH 04/95] Switching to PostProcessorReadyInterface Adding a _runLegacy for code still using dictionaries. --- .../Models/PostProcessors/BasicStatistics.py | 75 +++++++------------ .../PostProcessors/LimitSurfaceIntegral.py | 4 +- .../Models/PostProcessors/SafestPoint.py | 2 +- framework/Samplers/AdaptiveMonteCarlo.py | 2 +- 4 files changed, 30 insertions(+), 53 deletions(-) diff --git a/framework/Models/PostProcessors/BasicStatistics.py b/framework/Models/PostProcessors/BasicStatistics.py index 6a5bca9229..0ef7a6ac94 100644 --- a/framework/Models/PostProcessors/BasicStatistics.py +++ b/framework/Models/PostProcessors/BasicStatistics.py @@ -27,14 +27,14 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessorInterface import PostProcessorInterface +from .PostProcessorReadyInterface import PostProcessorReadyInterface from utils import utils from utils import InputData, InputTypes from utils import mathUtils import Files #Internal Modules End----------------------------------------------------------- -class BasicStatistics(PostProcessorInterface): +class BasicStatistics(PostProcessorReadyInterface): """ BasicStatistics filter class. It computes all the most popular statistics """ @@ -142,6 +142,7 @@ def __init__(self): self.sampleSize = None # number of sample size self.calculations = {} self.validDataType = ['PointSet', 'HistorySet', 'DataSet'] # The list of accepted types of DataObject + self.setInputDataType('xrDataset') def inputToInternal(self, currentInp): """ @@ -150,58 +151,20 @@ def inputToInternal(self, currentInp): @ In, currentInp, object, an object that needs to be converted @ Out, (inputDataset, pbWeights), tuple, the dataset of inputs and the corresponding variable probability weight """ - # The BasicStatistics postprocessor only accept DataObjects - self.dynamic = False + # The BasicStatistics postprocessor only accept Datasets currentInput = currentInp [-1] if type(currentInp) == list else currentInp - if len(currentInput) == 0: - self.raiseAnError(IOError, "In post-processor " +self.name+" the input "+currentInput.name+" is empty.") - pbWeights = None - if type(currentInput).__name__ == 'tuple': - return currentInput - # TODO: convert dict to dataset, I think this will be removed when DataSet is used by other entities that - # are currently using this Basic Statisitics PostProcessor. - if type(currentInput).__name__ == 'dict': - if 'targets' not in currentInput.keys(): - self.raiseAnError(IOError, 'Did not find targets in the input dictionary') - inputDataset = xr.Dataset() - for var, val in currentInput['targets'].items(): - inputDataset[var] = val - if 'metadata' in currentInput.keys(): - metadata = currentInput['metadata'] - self.pbPresent = True if 'ProbabilityWeight' in metadata else False - if self.pbPresent: - pbWeights = xr.Dataset() - self.realizationWeight = xr.Dataset() - self.realizationWeight['ProbabilityWeight'] = metadata['ProbabilityWeight']/metadata['ProbabilityWeight'].sum() - for target in self.parameters['targets']: - pbName = 'ProbabilityWeight-' + target - if pbName in metadata: - pbWeights[target] = metadata[pbName]/metadata[pbName].sum() - elif self.pbPresent: - pbWeights[target] = self.realizationWeight['ProbabilityWeight'] - else: - self.raiseAWarning('BasicStatistics postprocessor did not detect ProbabilityWeights! Assuming unit weights instead...') - else: - self.raiseAWarning('BasicStatistics postprocessor did not detect ProbabilityWeights! Assuming unit weights instead...') - if 'RAVEN_sample_ID' not in inputDataset.sizes.keys(): - self.raiseAWarning('BasicStatisitics postprocessor did not detect RAVEN_sample_ID! Assuming the first dimension of given data...') - self.sampleTag = utils.first(inputDataset.sizes.keys()) - return inputDataset, pbWeights - - if currentInput.type not in ['PointSet','HistorySet']: - self.raiseAnError(IOError, self, 'BasicStatistics postprocessor accepts PointSet and HistorySet only! Got ' + currentInput.type) # extract all required data from input DataObjects, an input dataset is constructed - dataSet = currentInput.asDataset() + inpVars, outVars, dataSet = currentInput['Data'][0] try: inputDataset = dataSet[self.parameters['targets']] except KeyError: missing = [var for var in self.parameters['targets'] if var not in dataSet] self.raiseAnError(KeyError, "Variables: '{}' missing from dataset '{}'!".format(", ".join(missing),currentInput.name)) - self.sampleTag = currentInput.sampleTag + self.sampleTag = utils.first(dataSet.dims) - if currentInput.type == 'HistorySet': + if self.dynamic: dims = inputDataset.sizes.keys() if self.pivotParameter is None: if len(dims) > 1: @@ -212,22 +175,21 @@ def inputToInternal(self, currentInp): requested variables', ','.join(self.parameters['targets'])) else: self.dynamic = True - if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): - self.raiseAnError(IOError, "The data provided by the data objects", currentInput.name, "is not synchronized!") + #if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): + # self.raiseAnError(IOError, "The data provided by the data objects", currentInput.name, "is not synchronized!") self.pivotValue = inputDataset[self.pivotParameter].values if self.pivotValue.size != len(inputDataset.groupby(self.pivotParameter)): msg = "Duplicated values were identified in pivot parameter, please use the 'HistorySetSync'" + \ " PostProcessor to syncronize your data before running 'BasicStatistics' PostProcessor." self.raiseAnError(IOError, msg) # extract all required meta data - metaVars = currentInput.getVars('meta') - self.pbPresent = True if 'ProbabilityWeight' in metaVars else False + self.pbPresent = 'ProbabilityWeight' in dataSet if self.pbPresent: pbWeights = xr.Dataset() self.realizationWeight = dataSet[['ProbabilityWeight']]/dataSet[['ProbabilityWeight']].sum() for target in self.parameters['targets']: pbName = 'ProbabilityWeight-' + target - if pbName in metaVars: + if pbName in dataSet: pbWeights[target] = dataSet[pbName]/dataSet[pbName].sum() elif self.pbPresent: pbWeights[target] = self.realizationWeight['ProbabilityWeight'] @@ -1375,6 +1337,21 @@ def spearmanCorrelation(self, featVars, targVars, featSamples, targSamples, pbWe da = xr.DataArray(spearmanMat, dims=('targets','features'), coords={'targets':targVars,'features':featVars}) return da + def _runLegacy(self, inputIn): + """ + This method executes the postprocessor action with the old data format. In this case, it computes all the requested statistical FOMs + @ In, inputIn, object, object contained the data to process. (inputToInternal output) + @ Out, outputSet, xarray.Dataset or dictionary, dataset or dictionary containing the results + """ + if type(inputIn).__name__ == 'PointSet': + merged = inputIn.asDataset() + elif 'metadata' in inputIn: + merged = xr.merge([inputIn['metadata'],inputIn['targets']]) + else: + merged = xr.merge([inputIn['targets']]) + newInputIn = {'Data':[[None,None,merged]]} + return self.run(newInputIn) + def run(self, inputIn): """ This method executes the postprocessor action. In this case, it computes all the requested statistical FOMs diff --git a/framework/Models/PostProcessors/LimitSurfaceIntegral.py b/framework/Models/PostProcessors/LimitSurfaceIntegral.py index 01a0d34621..343cb2b86a 100644 --- a/framework/Models/PostProcessors/LimitSurfaceIntegral.py +++ b/framework/Models/PostProcessors/LimitSurfaceIntegral.py @@ -256,9 +256,9 @@ def run(self, input): f = np.vectorize(self.variableDist[varName].ppf, otypes=[np.float]) randomMatrix[:, index] = f(randomMatrix[:, index]) tempDict[varName] = randomMatrix[:, index] - pb = self.stat.run({'targets':{self.target:xarray.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] + pb = self.stat._runLegacy({'targets':{self.target:xarray.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] if self.errorModel: - boundError = abs(pb-self.stat.run({'targets':{self.target:xarray.DataArray(self.errorModel.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target]) + boundError = abs(pb-self.stat._runLegacy({'targets':{self.target:xarray.DataArray(self.errorModel.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target]) else: self.raiseAnError(NotImplemented, "quadrature not yet implemented") return pb, boundError diff --git a/framework/Models/PostProcessors/SafestPoint.py b/framework/Models/PostProcessors/SafestPoint.py index 6ef256707e..a755acc3f3 100644 --- a/framework/Models/PostProcessors/SafestPoint.py +++ b/framework/Models/PostProcessors/SafestPoint.py @@ -335,7 +335,7 @@ def run(self, input): rlz['ProbabilityWeight'][ncLine] = np.prod(probList) metadata = {'ProbabilityWeight':xarray.DataArray(rlz['ProbabilityWeight'])} targets = {tar:xarray.DataArray( rlz[tar]) for tar in self.controllableOrd} - rlz['ExpectedSafestPointCoordinates'] = self.stat.run({'metadata':metadata, 'targets':targets}) + rlz['ExpectedSafestPointCoordinates'] = self.stat._runLegacy({'metadata':metadata, 'targets':targets}) self.raiseADebug(rlz['ExpectedSafestPointCoordinates']) return rlz diff --git a/framework/Samplers/AdaptiveMonteCarlo.py b/framework/Samplers/AdaptiveMonteCarlo.py index c702b03bd5..a38e007cdd 100644 --- a/framework/Samplers/AdaptiveMonteCarlo.py +++ b/framework/Samplers/AdaptiveMonteCarlo.py @@ -186,7 +186,7 @@ def localFinalizeActualSampling(self,jobObject,model,myInput): @ Out, None """ if self.counter > 1: - output = self.basicStatPP.run(self._targetEvaluation) + output = self.basicStatPP._runLegacy(self._targetEvaluation) output['solutionUpdate'] = np.asarray([self.counter - 1]) self._solutionExport.addRealization(output) self.checkConvergence(output) From 73cf876abade6f45b85b893639220dc7e648fcf5 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Tue, 30 Nov 2021 11:51:24 -0700 Subject: [PATCH 05/95] fixind factories and __init__ --- framework/Metrics/metrics/Factory.py | 1 + framework/Models/PostProcessors/Validations/__init__.py | 1 + 2 files changed, 2 insertions(+) diff --git a/framework/Metrics/metrics/Factory.py b/framework/Metrics/metrics/Factory.py index 2e705f735d..c1ecf0ead5 100644 --- a/framework/Metrics/metrics/Factory.py +++ b/framework/Metrics/metrics/Factory.py @@ -25,6 +25,7 @@ from .CDFAreaDifference import CDFAreaDifference from .PDFCommonArea import PDFCommonArea from .ScipyMetric import ScipyMetric +from .RepresentativityFactors import RepresentativityFactors factory = EntityFactory('Metrics') factory.registerAllSubtypes(MetricInterface) diff --git a/framework/Models/PostProcessors/Validations/__init__.py b/framework/Models/PostProcessors/Validations/__init__.py index 1a05f0befa..7dcc6b8031 100644 --- a/framework/Models/PostProcessors/Validations/__init__.py +++ b/framework/Models/PostProcessors/Validations/__init__.py @@ -19,3 +19,4 @@ @author: wangc """ from .Probabilistic import Probabilistic +from .Representativity import Representativity \ No newline at end of file From fb773e803671affc7f98b0488c5b54a251b18ba1 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Thu, 2 Dec 2021 10:46:15 -0700 Subject: [PATCH 06/95] adding the representativity again --- .../Validations/Representativity.py | 265 ++++++++++++++++++ 1 file changed, 265 insertions(+) create mode 100644 framework/Models/PostProcessors/Validations/Representativity.py diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py new file mode 100644 index 0000000000..7466dbccb5 --- /dev/null +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -0,0 +1,265 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Created on April 29, 2021 + + @author: Mohammad Abdo (@Jimmy-INL) + + This class represents a base class for the validation algorithms + It inherits from the PostProcessor directly + ##TODO: Recast it once the new PostProcesso API gets in place +""" + +#External Modules------------------------------------------------------------------------------------ +import numpy as np +import xarray as xr +#External Modules End-------------------------------------------------------------------------------- + +#Internal Modules------------------------------------------------------------------------------------ +#from utils import xmlUtils +from utils import InputData, InputTypes +#import Files +#import Distributions +#import MetricDistributor +from utils import utils +from .. import ValidationBase +# from utils.mathUtils import partialDerivative, derivatives +#Internal Modules End-------------------------------------------------------------------------------- + +class Representativity(ValidationBase): + """ + Representativity is a base class for validation problems + It represents the base class for most validation problems + """ + + @classmethod + def getInputSpecification(cls): + """ + Method to get a reference to a class that specifies the input data for + class cls. + @ In, cls, the class for which we are retrieving the specification + @ Out, specs, InputData.ParameterInput, class to use for + specifying input of cls. + """ + specs = super(Representativity, cls).getInputSpecification() + parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType) + parametersInput.addParam("type", InputTypes.StringType) + specs.addSub(parametersInput) + targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType) + targetParametersInput.addParam("type", InputTypes.StringType) + specs.addSub(targetParametersInput) + targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType) + specs.addSub(targetPivotParameterInput) + return specs + + def __init__(self): + """ + Constructor + @ In, None + @ Out, None + """ + super().__init__() + from Models.PostProcessors import factory as ppFactory # delay import to allow definition + self.printTag = 'POSTPROCESSOR Representativity' + self.dynamicType = ['static','dynamic'] # for now only static is available + self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics + self.name = 'Represntativity' + self.stat = ppFactory.returnInstance('BasicStatistics') + self.stat.what = ['NormalizedSensitivities'] # expected value calculation + + # def initialize(self, runInfo, inputs, initDict=None): + # """ + # This function is used to initialize the plugin, i.e. set up working dir, + # call the initializePlugin method from the plugin + # @ In, runInfo, dict, it is the run info from the jobHandler + # @ In, inputs, list, it is a list containing whatever is passed with an input role in the step + # @ In, initDict, dict, optional, dictionary of all objects available in the step is using this model + # """ + # super().initialize(runInfo, inputs, initDict) + # if self._keepInputMeta: + # ## add meta keys from input data objects + # for inputObj in inputs: + # if isinstance(inputObj, DataObject.DataObject): + # metaKeys = inputObj.getVars('meta') + # self.addMetaKeys(metaKeys) + + + # def inputToInternal(self, currentInputs): + # """ + # Method to convert an input object into the internal format that is + # understandable by this pp. + # @ In, currentInputs, list or DataObject, data object or a list of data objects + # @ Out, measureList, list of (feature, target), the list of the features and targets to measure the distance between + # """ + # if type(currentInputs) != list: + # currentInputs = [currentInputs] + # hasPointSet = False + # hasHistorySet = False + # #Check for invalid types + # for currentInput in currentInputs: + # inputType = None + # if hasattr(currentInput, 'type'): + # inputType = currentInput.type + + # if isinstance(currentInput, Files.File): + # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") + # elif isinstance(currentInput, Distributions.Distribution): + # pass #Allowed type + # elif inputType == 'HDF5': + # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") + # elif inputType == 'PointSet': + # hasPointSet = True + # elif inputType == 'HistorySet': + # hasHistorySet = True + # if self.multiOutput == 'raw_values': + # self.dynamic = True + # if self.pivotParameter not in currentInput.getVars('indexes'): + # self.raiseAnError(IOError, self, 'Pivot parameter', self.pivotParameter,'has not been found in DataObject', currentInput.name) + # if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): + # self.raiseAnError(IOError, "HistorySet", currentInput.name," is not syncronized, please use Interfaced PostProcessor HistorySetSync to pre-process it") + # pivotValues = currentInput.asDataset()[self.pivotParameter].values + # if len(self.pivotValues) == 0: + # self.pivotValues = pivotValues + # elif set(self.pivotValues) != set(pivotValues): + # self.raiseAnError(IOError, "Pivot values for pivot parameter",self.pivotParameter, "in provided HistorySets are not the same") + # else: + # self.raiseAnError(IOError, "Metric cannot process "+inputType+ " of type "+str(type(currentInput))) + # if self.multiOutput == 'raw_values' and hasPointSet and hasHistorySet: + # self.multiOutput = 'mean' + # self.raiseAWarning("Reset 'multiOutput' to 'mean', since both PointSet and HistorySet are provided as Inputs. Calculation outputs will be aggregated by averaging") + + # measureList = [] + + # for cnt in range(len(self.features)): + # feature = self.features[cnt] + # target = self.targets[cnt] + # featureData = self.__getMetricSide(feature, currentInputs) + # targetData = self.__getMetricSide(target, currentInputs) + # measureList.append((featureData, targetData)) + + # return measureList + + # def initialize(self, features, targets, **kwargs): + # """ + # Set up this interface for a particular activity + # @ In, features, list, list of features + # @ In, targets, list, list of targets + # @ In, kwargs, dict, keyword arguments + # """ + # super().initialize(features, targets, **kwargs) + # self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.targets), 'prefix':'nsen'}]} + # # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} + # fakeRunInfo = {'workingDir':'','stepName':''} + # self.stat.initialize(fakeRunInfo, self.Parameters, features, **kwargs) + + def _handleInput(self, paramInput): + """ + Function to handle the parsed paramInput for this class. + @ In, paramInput, ParameterInput, the already parsed input. + @ Out, None + """ + super()._handleInput(paramInput) + for child in paramInput.subparts: + if child.getName() == 'featureParameters': + self.Parameters = child.value + elif child.getName() == 'targetParameters': + self.targetParameters = child.value + elif child.getName() == 'targetPivotParameter': + self.targetPivotParameter = child.value + + def run(self, inputIn): + """ + This method executes the postprocessor action. In this case it loads the + results to specified dataObject + @ In, inputIn, list, dictionary of data to process + @ Out, outputDict, dict, dictionary containing the post-processed results + """ + dataSets = [data for _, _, data in inputIn['Data']] + pivotParameter = self.pivotParameter + names=[] + if isinstance(inputIn['Data'][0][-1], xr.Dataset): + names = [self.getDataSetName(inp[-1]) for inp in inputIn['Data']] + # names = [inp[-1].attrs['name'] for inp in inputIn['Data']] + if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: + if 'dynamic' not in self.dynamicType: #self.model.dataType: + self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) + else: + pivotParameter = self.pivotParameter + evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()}#inputIn + if pivotParameter: + if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): + self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) + if pivotParameter not in evaluation: + evaluation[pivotParameter] = dataSets[0][pivotParameter] + return evaluation + + def _evaluate(self, datasets, **kwargs): + """ + Main method to "do what you do". + @ In, datasets, list, list of datasets (data1,data2,etc.) to used. + @ In, kwargs, dict, keyword arguments + @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} + """ + # self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] + self.stat.run(datasets) + # for data in datasets: + # sen = self.stat.run(data) + names = kwargs.get('dataobjectNames') + outs = {} + for feat, targ, param, targParam in zip(self.features, self.targets, self.Parameters, self.targetParameters): + featData = self._getDataFromDatasets(datasets, feat, names) + targData = self._getDataFromDatasets(datasets, targ, names) + Parameters = self._getDataFromDatasets(datasets, param, names) + targetParameters = self._getDataFromDatasets(datasets, targParam, names) + # senFOMs = partialDerivative(featData.data,np.atleast_2d(Parameters.data)[0,:],'x1') + senFOMs = np.atleast_2d(Parameters[0])#.data + senMeasurables = np.atleast_2d(targetParameters[0]) + covParameters = senFOMs @ senMeasurables.T + for metric in self.metrics: + name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.name) + outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) + return outs + + def _getDataFromDatasets(self, datasets, var, names=None): + """ + Utility function to retrieve the data from datasets + @ In, datasets, list, list of datasets (data1,data2,etc.) to search from. + @ In, names, list, optional, list of datasets names (data1,data2,etc.). If not present, the search will be done on the full list. + @ In, var, str, the variable to find (either in fromat dataobject|var or simply var) + @ Out, data, tuple(numpy.ndarray, xarray.DataArray or None), the retrived data (data, probability weights (None if not present)) + """ + data = None + pw = None + dat = None + if "|" in var and names is not None: + do, feat = var.split("|") + doindex = names.index(do) + dat = datasets[doindex][feat] + else: + for doindex, ds in enumerate(datasets): + if var in ds: + dat = ds[var] + break + if 'ProbabilityWeight-{}'.format(feat) in datasets[names.index(do)]: + pw = datasets[doindex]['ProbabilityWeight-{}'.format(feat)].values + elif 'ProbabilityWeight' in datasets[names.index(do)]: + pw = datasets[doindex]['ProbabilityWeight'].values + dim = len(dat.shape) + # (numRealizations, numHistorySteps) for MetricDistributor + dat = dat.values + if dim == 1: + # the following reshaping does not require a copy + dat.shape = (dat.shape[0], 1) + data = dat, pw + return data \ No newline at end of file From edf8fcc406a841a537222a88cf3aee70fc85bed1 Mon Sep 17 00:00:00 2001 From: "Joshua J. Cogliati" Date: Thu, 2 Dec 2021 11:19:25 -0700 Subject: [PATCH 07/95] Fixing passing data. --- .../Models/PostProcessors/Validations/Representativity.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py index 7466dbccb5..99422f0980 100644 --- a/framework/Models/PostProcessors/Validations/Representativity.py +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -212,7 +212,7 @@ def _evaluate(self, datasets, **kwargs): @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} """ # self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] - self.stat.run(datasets) + self.stat.run({"Data":[[None, None, datasets]]}) # for data in datasets: # sen = self.stat.run(data) names = kwargs.get('dataobjectNames') @@ -262,4 +262,4 @@ def _getDataFromDatasets(self, datasets, var, names=None): # the following reshaping does not require a copy dat.shape = (dat.shape[0], 1) data = dat, pw - return data \ No newline at end of file + return data From d6f903843d3cfe06bbe71dd8af05f6bda28dad62 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Fri, 3 Dec 2021 11:04:44 -0700 Subject: [PATCH 08/95] first attept to fix representativity --- .../Validations/Representativity.py | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py index 99422f0980..7c534fd68d 100644 --- a/framework/Models/PostProcessors/Validations/Representativity.py +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -150,18 +150,19 @@ def __init__(self): # return measureList - # def initialize(self, features, targets, **kwargs): - # """ - # Set up this interface for a particular activity - # @ In, features, list, list of features - # @ In, targets, list, list of targets - # @ In, kwargs, dict, keyword arguments - # """ - # super().initialize(features, targets, **kwargs) - # self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.targets), 'prefix':'nsen'}]} - # # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} - # fakeRunInfo = {'workingDir':'','stepName':''} - # self.stat.initialize(fakeRunInfo, self.Parameters, features, **kwargs) + def initialize(self, runInfo, inputs, initDict): + """ + Method to initialize the DataMining pp. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.targets), 'prefix':'nsen'}]} + # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} + # fakeRunInfo = {'workingDir':'','stepName':''} + self.stat.initialize(runInfo, inputs, initDict)#self.featureParameters, self.featureParameters, **kwargs def _handleInput(self, paramInput): """ @@ -172,7 +173,7 @@ def _handleInput(self, paramInput): super()._handleInput(paramInput) for child in paramInput.subparts: if child.getName() == 'featureParameters': - self.Parameters = child.value + self.featureParameters = child.value elif child.getName() == 'targetParameters': self.targetParameters = child.value elif child.getName() == 'targetPivotParameter': @@ -262,4 +263,4 @@ def _getDataFromDatasets(self, datasets, var, names=None): # the following reshaping does not require a copy dat.shape = (dat.shape[0], 1) data = dat, pw - return data + return data \ No newline at end of file From d5fe3e4923a99844876defc60413c1209ee3a9ed Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Wed, 8 Dec 2021 19:39:19 -0700 Subject: [PATCH 09/95] adding features to the BS call --- framework/Models/PostProcessors/Validations/Representativity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py index 7c534fd68d..46cbc22808 100644 --- a/framework/Models/PostProcessors/Validations/Representativity.py +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -159,7 +159,7 @@ def initialize(self, runInfo, inputs, initDict): @ Out, None """ super().initialize(runInfo, inputs, initDict) - self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.targets), 'prefix':'nsen'}]} + self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.features), 'features':set(self.featureParameters),'prefix':'nsen'}]} # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} # fakeRunInfo = {'workingDir':'','stepName':''} self.stat.initialize(runInfo, inputs, initDict)#self.featureParameters, self.featureParameters, **kwargs From 9d226679f386b467924a1c6ffa8fca0941233c31 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Tue, 25 Jan 2022 19:29:31 -0700 Subject: [PATCH 10/95] slowly adding differences from Josh's pathch --- .../Validations/Representativity.py | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py index 46cbc22808..36e850e3c8 100644 --- a/framework/Models/PostProcessors/Validations/Representativity.py +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -74,7 +74,7 @@ def __init__(self): self.printTag = 'POSTPROCESSOR Representativity' self.dynamicType = ['static','dynamic'] # for now only static is available self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics - self.name = 'Represntativity' + self.name = 'Representativity' self.stat = ppFactory.returnInstance('BasicStatistics') self.stat.what = ['NormalizedSensitivities'] # expected value calculation @@ -159,7 +159,8 @@ def initialize(self, runInfo, inputs, initDict): @ Out, None """ super().initialize(runInfo, inputs, initDict) - self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.features), 'features':set(self.featureParameters),'prefix':'nsen'}]} + # self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.features), 'features':set(self.featureParameters),'prefix':'nsen'}]} + self.stat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[1] for x in self.features]), 'features':set([x.split("|")[1] for x in self.featureParameters]),'prefix':'nsen'}]} # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} # fakeRunInfo = {'workingDir':'','stepName':''} self.stat.initialize(runInfo, inputs, initDict)#self.featureParameters, self.featureParameters, **kwargs @@ -198,11 +199,12 @@ def run(self, inputIn): else: pivotParameter = self.pivotParameter evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()}#inputIn - if pivotParameter: - if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): - self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) - if pivotParameter not in evaluation: - evaluation[pivotParameter] = dataSets[0][pivotParameter] + # if pivotParameter: + # # Uncomment this to cause crash: print(dataSets[0], pivotParameter) + # if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): + # self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) + # if pivotParameter not in evaluation: + # evaluation[pivotParameter] = dataSets[0][pivotParameter] return evaluation def _evaluate(self, datasets, **kwargs): @@ -213,19 +215,23 @@ def _evaluate(self, datasets, **kwargs): @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} """ # self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] - self.stat.run({"Data":[[None, None, datasets]]}) + senMeasurables = self.stat.run({"Data":[[None, None, datasets[0]]]}) + senFOMs = self.stat.run({"Data":[[None, None, datasets[1]]]}) + + # for data in datasets: # sen = self.stat.run(data) names = kwargs.get('dataobjectNames') outs = {} - for feat, targ, param, targParam in zip(self.features, self.targets, self.Parameters, self.targetParameters): + # for feat, targ, param, targParam in zip(self.features, self.targets, self.Parameters, self.targetParameters): + for feat, targ, param, targParam in zip(self.features, self.targets, self.featureParameters, self.targetParameters): featData = self._getDataFromDatasets(datasets, feat, names) targData = self._getDataFromDatasets(datasets, targ, names) Parameters = self._getDataFromDatasets(datasets, param, names) targetParameters = self._getDataFromDatasets(datasets, targParam, names) # senFOMs = partialDerivative(featData.data,np.atleast_2d(Parameters.data)[0,:],'x1') - senFOMs = np.atleast_2d(Parameters[0])#.data - senMeasurables = np.atleast_2d(targetParameters[0]) + # senFOMs = np.atleast_2d(Parameters[0])#.data + # senMeasurables = np.atleast_2d(targetParameters[0]) covParameters = senFOMs @ senMeasurables.T for metric in self.metrics: name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.name) From 56b4401a3729fb49255c84caf5db2cd4549efab7 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Tue, 1 Mar 2022 17:45:23 -0700 Subject: [PATCH 11/95] Adding test --- ...lidation_gate_representativityLinModel.xml | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml new file mode 100644 index 0000000000..3d26307c13 --- /dev/null +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -0,0 +1,124 @@ + + + + Representativity + mcRun, PP1 + 1 + + + + framework/PostProcessors/Validation/test_validation_gate_representativity + Mohammad Abdo (@Jimmy-INL) + 2021-04-29 + PostProcessors.Validation + + This test is aimed to show how to use the mechanics of the Validation Post-Processor. For semplicity, + this test is using the attenuation model (analytical) and simple representativity factors metrics. + The output name convention is ``feature name''\underscore``target name''\underscore``metric name''=. + + + Added Modification for new PP API + + + + + + p1,p2,F1, F2, F3 + + + + outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 + outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 + simIndex + + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + outputDataMC2|time + + + + + + + + + + + + 5.5 + 0.0001 + + + 8 + 0.0001 + + + + + + + 10 + + + dist1 + + + dist2 + + + + + + + inputPlaceHolder2 + linModel + MC_external + outputDataMC1 + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + + + + + p1,p2 + OutputPlaceHolder + + + p1,p2 + F1, F2, F3 + + + p1,p2 + F1, F2, F3 + + + InputPlaceHolder + + + + + + + + + csv + pp1_metric + + + + From 66faee7e520f4e4525a893401b39e135d23baa61 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Tue, 5 Apr 2022 14:35:51 -0600 Subject: [PATCH 12/95] returning xr temperarly with dict --- framework/Models/PostProcessors/BasicStatistics.py | 4 ++-- .../test_validation_gate_representativityLinModel.xml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/framework/Models/PostProcessors/BasicStatistics.py b/framework/Models/PostProcessors/BasicStatistics.py index 0ef7a6ac94..51a4647058 100644 --- a/framework/Models/PostProcessors/BasicStatistics.py +++ b/framework/Models/PostProcessors/BasicStatistics.py @@ -1173,7 +1173,7 @@ def getCovarianceSubset(desired): if self.pivotParameter in outputSet.sizes.keys(): outputDict[self.pivotParameter] = np.atleast_1d(self.pivotValue) - return outputDict + return outputDict,outputSet def corrCoeff(self, covM): """ @@ -1359,7 +1359,7 @@ def run(self, inputIn): @ Out, outputSet, xarray.Dataset or dictionary, dataset or dictionary containing the results """ inputData = self.inputToInternal(inputIn) - outputSet = self.__runLocal(inputData) + _,outputSet = self.__runLocal(inputData) return outputSet def collectOutput(self, finishedJob, output): diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index 3d26307c13..98308b482e 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -52,11 +52,11 @@ 5.5 - 0.0001 + 0.55 8 - 0.0001 + 0.8 From 19cd7076367fdef8405083551cd7b2c363a21522 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Wed, 6 Apr 2022 11:43:20 -0600 Subject: [PATCH 13/95] removing unnecessay imports for python3 --- framework/Metrics/metrics/RepresentativityFactors.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/framework/Metrics/metrics/RepresentativityFactors.py b/framework/Metrics/metrics/RepresentativityFactors.py index c83be45cb0..b04150115b 100644 --- a/framework/Metrics/metrics/RepresentativityFactors.py +++ b/framework/Metrics/metrics/RepresentativityFactors.py @@ -16,10 +16,6 @@ @author: Mohammad Abdo (@Jimmy-INL) """ -#for future compatibility with Python 3-------------------------------------------------------------- -from __future__ import division, print_function, unicode_literals, absolute_import -#End compatibility block for Python 3---------------------------------------------------------------- - #External Modules------------------------------------------------------------------------------------ import numpy as np import copy From 6b81b9e25a792903c9efe4147ee0a2dab0be93cd Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Tue, 12 Apr 2022 18:19:40 -0600 Subject: [PATCH 14/95] adding representativity to user_manual --- doc/user_manual/postprocessor.tex | 140 +++++++++++++----- .../metrics/RepresentativityFactors.py | 22 +-- ...lidation_gate_representativityLinModel.xml | 16 +- 3 files changed, 107 insertions(+), 71 deletions(-) diff --git a/doc/user_manual/postprocessor.tex b/doc/user_manual/postprocessor.tex index 72a39131ae..9ea563382c 100644 --- a/doc/user_manual/postprocessor.tex +++ b/doc/user_manual/postprocessor.tex @@ -452,6 +452,7 @@ \subsubsection{ComparisonStatistics} \end{lstlisting} + %%%%% PP ImportanceRank %%%%%%% \subsubsection{ImportanceRank} \label{ImportanceRank} @@ -1614,37 +1615,55 @@ \subsubsection{SampleSelector} \end{lstlisting} %%%%% PP Validation %%%%%%% -\subsubsection{Validation} -\label{subsubsec:Validation} - -The \xmlNode{Validation} post-processor represents a gate -for applying a different range of algorithms to validate (e.g. compare) -dataset and/or models (e.g. Distributions). -The post-processor is in charge of deploying a common infrastructure -for the user of \textbf{Validation} problems. -Several algorithms are avaialable within this post-processor: +%\subsubsection{Validation} +%\label{subsubsec:Validation} +% +%The \xmlNode{Validation} post-processor represents a gate +%for applying a different range of algorithms to validate (e.g. compare) +%dataset and/or models (e.g. Distributions). +%The post-processor is in charge of deploying a common infrastructure +%for the user of \textbf{Validation} problems. +%Several algorithms are avaialable within this post-processor: + +%%%%% PP Validation %%%%%%% +\subsubsection{Represntativity} +\label{subsubsec:Representativity} +The \textbf{Representativity} post-processor is one of three \textbf{Validation} poset-processors, in fact there is a post-processor interface that acts as a gate for appliying these validation algorithms (i.e., representativity, Physics-guided Convergence Maping (PCM), and Dynamic System Scaling (DSS)). The post-processor is in charge of deploying a common infrastructure for the user of \textbf{Validation} problems. The usage of this post-processor is three fold. one, to quantitatively assess if a mock/prototype model/experiment form a good representation of a target model. Two, if a set of experiments can represent a target model and can claim a full coverage of the design space and scenarios, and three, if the available set of experiments are not enough to declare coverage what are the remaining experiments requred in order to achieve full coverage and increase the representativity/bias factor. The representativity theory was first founded in the Neutronics community \ref{} then shortly after, was transformed to the thermal hydraulics \ref{}. +So far several algorithms are implemented within this post-processor: + \begin{itemize} \item \textbf{Probabilistic}, for Static and Time-dependent data - % \item \textbf{DSS} - % \item \textbf{Representativity} - % \item \textbf{PCM} + \item \textbf{DSS} + \item \textbf{Representativity} + \item \textbf{PCM} \end{itemize} % -The \textbf{Validation} post-processor makes use of the \textbf{Metric} system (See Chapter \ref{sec:Metrics}) to, in conjucntion with the specific algorithm chosen from the list above, +The \textbf{Represntativity} post-processor can make use of the \textbf{Metric} system (See Chapter \ref{sec:Metrics}), in conjucntion with the specific algorithm chosen from the list above, to report validation scores for both static and time-dependent data. Indeed, Both \textbf{PointSet} and \textbf{HistorySet} can be accepted by this post-processor (depending on which algorithm is chosen). If the name of given variable to be compared is unique, it can be used directly, otherwise the variable can be specified with $DataObjectName|InputOrOutput|VariableName$ nomenclature. % -\ppType{Validation}{Validation} +\ppType{representativity}{representativity} % \begin{itemize} - \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features. + \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features, which can be the measuables/observables of the mock model. Reader should be warned that this nomenclature is different than the Machine learning nomenclature. + \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of - targets. \nb Each target is paired with a feature listed in xml node \xmlNode{Features}. In this case, the - number of targets should be equal to the number of features. + targets. These are the Figures of merit (FOMs) in the target model against which the mock model is being validated. + + \item \xmlNode{featureParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputrs to the mock model. + + \item \xmlNode{targetParameters}, \xmlDesc{comma separated string, required field}, contains a comma separated list of + target parameters/inputs. + + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable of the moch model. Default is ``time''. + \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). + \item \xmlNode{targetPivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable in the target model. Default is ``time''. + \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). + \item \xmlNode{Metric}, \xmlDesc{string, required field}, specifies the \textbf{Metric} name that is defined via \textbf{Metrics} entity. In this xml-node, the following xml attributes need to be specified: \begin{itemize} @@ -1654,14 +1673,15 @@ \subsubsection{Validation} The choice of the available metrics depends on the specific validation algorithm that is chosen (see table \ref{tab:ValidationAlgorithms}) \end{itemize} -In addition to the nodes above, the user must choose a validation algorithm: -\begin{itemize} - \item \xmlNode{Probabilistic}, \xmlDesc{XML node, optional field}, specify that the validation needs to be performed - using the Probabilistic metrics: \textbf{CDFAreaDifference} (see \ref{subsubsec:metric_CDFAreaDifference}) or \textbf{PDFCommonArea} (see \ref{subsubsec:metric_PDFCommonArea}) - This xml-node accepts the following attribute: - \begin{itemize} - \item \xmlAttr{ name}, \xmlDesc{required string attribute}, the user defined name of the validation algorithm used as prefix for the output results. - \end{itemize} +%In addition to the nodes above, the user must choose a validation algorithm: +%\begin{itemize} +% \item \xmlNode{Probabilistic}, \xmlDesc{XML node, optional field}, specify that the validation needs to be performed +% using the Probabilistic metrics: \textbf{CDFAreaDifference} (see \ref{subsubsec:metric_CDFAreaDifference}) or \textbf{PDFCommonArea} (see \ref{subsubsec:metric_PDFCommonArea}) +% This xml-node accepts the following attribute: +% \begin{itemize} +% \item \xmlAttr{ name}, \xmlDesc{required string attribute}, the user defined name of the validation algorithm used as prefix for the output results. +% \end{itemize} + %\item \xmlNode{DSS}, \xmlDesc{XML node, optional field}, specify that the validation needs to be performed via DSS. %This xml-node accepts the following attribute: % \begin{itemize} @@ -1671,7 +1691,7 @@ \subsubsection{Validation} % \begin{itemize} % \item \xmlNode{myNode}, \xmlDesc{comma separated string, required field}, DESCRIPTION % \end{itemize} -\end{itemize} +%\end{itemize} \begin{table}[] \caption{Validation Algorithms and respective available metrics and DataObjects} @@ -1680,28 +1700,74 @@ \subsubsection{Validation} \hline \textbf{Validation Algorithm} & \textbf{DataObject} & \textbf{Available Metrics} \\ \hline Probabilistic & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet\end{tabular} & \begin{tabular}[c]{@{}c@{}}CDFAreaDifference\\ \\ PDFCommonArea\end{tabular} \\ \hline + +representativity & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet \\DataSet\end{tabular} & \begin{tabular}[c]{@{}c@{}}BiasFactor\end{tabular} \\ \hline + DSS & HistorySet & Not Available Yet \\ \hline +%PCM & HistorySet & Not Available Yet \\ \hline \end{tabular} \end{table} \textbf{Example:} \begin{lstlisting}[style=XML,morekeywords={subType}] - ... - - ... - - outputDataMC1|ans - outputDataMC2|ans2 - cdf_diff - pdf_area - - ... +... + + + inputPlaceHolder2 + linModel + MC_external + outputDataMC1 + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + +... - ... +... + + outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 + outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 + simIndex + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + outputDataMC2|time + +... + +... + + + +... \end{lstlisting} + +%\textbf{Example:} +%\begin{lstlisting}[style=XML,morekeywords={subType}] +% +% ... +% +% ... +% +% outputDataMC1|ans +% outputDataMC2|ans2 +% cdf_diff +% pdf_area +% +% ... +% +% ... +% +%\end{lstlisting} %%%%% PP EconomicRatio %%%%%%% \input{EconomicRatio.tex} diff --git a/framework/Metrics/metrics/RepresentativityFactors.py b/framework/Metrics/metrics/RepresentativityFactors.py index b04150115b..1cdf03b368 100644 --- a/framework/Metrics/metrics/RepresentativityFactors.py +++ b/framework/Metrics/metrics/RepresentativityFactors.py @@ -56,29 +56,13 @@ def __init__(self): @ In, None @ Out, None """ - # Metric.__init__(self) super().__init__() # The type of given analysis self.actionType = None # True indicates the metric needs to be able to handle dynamic data - self._dynamicHandling = True + self._dynamicHandling = True # True indicates the metric needs to be able to handle pairwise data - self._pairwiseHandling = False - - def _localReadMoreXML(self, xmlNode): - """ - Method that reads the portion of the xml input that belongs to this specialized class - and initialize internal parameters - @ In, xmlNode, xml.etree.Element, Xml element node - @ Out, None - """ - paramInput = Metric.getInputSpecification()() - paramInput.parseNode(xmlNode) - for child in paramInput.subparts: - if child.getName() == "actionType": - self.order = child.value - else: - self.raiseAnError(IOError, "Unknown xml node ", child.getName(), " is provided for metric system") + self._pairwiseHandling = False def run(self, x, y, weights = None, axis = 0, **kwargs): """ @@ -106,4 +90,4 @@ def run(self, x, y, weights = None, axis = 0, **kwargs): r = (senFOMs.T @ covParameters @ senMeasurables)/\ np.sqrt(senFOMs.T @ covParameters @ senFOMs)/\ np.sqrt(senMeasurables.T @ covParameters @ senMeasurables) - return r \ No newline at end of file + return r diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index 98308b482e..e1d60dee6c 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -25,28 +25,19 @@ p1,p2,F1, F2, F3 - outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 - outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 + outputDataMC2|F1, outputDataMC2|F2 simIndex - outputDataMC1|p1,outputDataMC1|p2 outputDataMC2|p1,outputDataMC2|p2 outputDataMC1|time outputDataMC2|time - - @@ -106,14 +97,9 @@ InputPlaceHolder - - - csv From ed2dd4fb5fe568df351827549209e272d4e9eed1 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Tue, 12 Apr 2022 18:47:25 -0600 Subject: [PATCH 15/95] removing unnecessary commented lines --- framework/Metrics/metrics/RepresentativityFactors.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/framework/Metrics/metrics/RepresentativityFactors.py b/framework/Metrics/metrics/RepresentativityFactors.py index 1cdf03b368..41bebb41b5 100644 --- a/framework/Metrics/metrics/RepresentativityFactors.py +++ b/framework/Metrics/metrics/RepresentativityFactors.py @@ -19,12 +19,10 @@ #External Modules------------------------------------------------------------------------------------ import numpy as np import copy -#import scipy.spatial.distance as spatialDistance #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ from .MetricInterface import MetricInterface -# from Metrics.metrics import MetricUtilities from utils import InputData, InputTypes #Internal Modules End-------------------------------------------------------------------------------- @@ -82,8 +80,6 @@ def run(self, x, y, weights = None, axis = 0, **kwargs): @ In, kwargs, dict, dictionary of parameters characteristic of each metric @ Out, value, float, metric result """ - # assert (isinstance(x, np.ndarray)) - # assert (isinstance(y, np.ndarray)) senMeasurables = kwargs['senMeasurables'] senFOMs = kwargs['senFOMs'] covParameters = kwargs['covParameters'] From d780bda20f1b08c1285beb0f049397360cb04225 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Wed, 13 Apr 2022 10:21:19 -0600 Subject: [PATCH 16/95] Updating test description --- .../test_validation_gate_representativityLinModel.xml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index e1d60dee6c..259fc925ef 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -12,9 +12,8 @@ 2021-04-29 PostProcessors.Validation - This test is aimed to show how to use the mechanics of the Validation Post-Processor. For semplicity, - this test is using the attenuation model (analytical) and simple representativity factors metrics. - The output name convention is ``feature name''\underscore``target name''\underscore``metric name''=. + This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. + This test a linear model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. Added Modification for new PP API From 820373453ad853dfd4f2effec13142886b6aac4f Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Wed, 13 Apr 2022 10:26:58 -0600 Subject: [PATCH 17/95] more cleaning --- .../Validations/Probabilistic.py | 2 -- .../Validations/Representativity.py | 20 +------------------ 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/framework/Models/PostProcessors/Validations/Probabilistic.py b/framework/Models/PostProcessors/Validations/Probabilistic.py index 0d5970387a..76b488d840 100644 --- a/framework/Models/PostProcessors/Validations/Probabilistic.py +++ b/framework/Models/PostProcessors/Validations/Probabilistic.py @@ -125,8 +125,6 @@ def _getDataFromDataDict(self, datasets, var, names=None): pw = None if "|" in var and names is not None: do, feat = var.split("|") - # doIndex = names.index(do) - # dat = datasets[doIndex][feat] dat = datasets[do][feat] else: for doIndex, ds in enumerate(datasets): diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py index 36e850e3c8..02bce73749 100644 --- a/framework/Models/PostProcessors/Validations/Representativity.py +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -27,14 +27,9 @@ #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ -#from utils import xmlUtils from utils import InputData, InputTypes -#import Files -#import Distributions -#import MetricDistributor from utils import utils from .. import ValidationBase -# from utils.mathUtils import partialDerivative, derivatives #Internal Modules End-------------------------------------------------------------------------------- class Representativity(ValidationBase): @@ -159,11 +154,8 @@ def initialize(self, runInfo, inputs, initDict): @ Out, None """ super().initialize(runInfo, inputs, initDict) - # self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.features), 'features':set(self.featureParameters),'prefix':'nsen'}]} self.stat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[1] for x in self.features]), 'features':set([x.split("|")[1] for x in self.featureParameters]),'prefix':'nsen'}]} - # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} - # fakeRunInfo = {'workingDir':'','stepName':''} - self.stat.initialize(runInfo, inputs, initDict)#self.featureParameters, self.featureParameters, **kwargs + self.stat.initialize(runInfo, inputs, initDict) def _handleInput(self, paramInput): """ @@ -192,7 +184,6 @@ def run(self, inputIn): names=[] if isinstance(inputIn['Data'][0][-1], xr.Dataset): names = [self.getDataSetName(inp[-1]) for inp in inputIn['Data']] - # names = [inp[-1].attrs['name'] for inp in inputIn['Data']] if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: if 'dynamic' not in self.dynamicType: #self.model.dataType: self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) @@ -214,24 +205,16 @@ def _evaluate(self, datasets, **kwargs): @ In, kwargs, dict, keyword arguments @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} """ - # self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] senMeasurables = self.stat.run({"Data":[[None, None, datasets[0]]]}) senFOMs = self.stat.run({"Data":[[None, None, datasets[1]]]}) - - # for data in datasets: - # sen = self.stat.run(data) names = kwargs.get('dataobjectNames') outs = {} - # for feat, targ, param, targParam in zip(self.features, self.targets, self.Parameters, self.targetParameters): for feat, targ, param, targParam in zip(self.features, self.targets, self.featureParameters, self.targetParameters): featData = self._getDataFromDatasets(datasets, feat, names) targData = self._getDataFromDatasets(datasets, targ, names) Parameters = self._getDataFromDatasets(datasets, param, names) targetParameters = self._getDataFromDatasets(datasets, targParam, names) - # senFOMs = partialDerivative(featData.data,np.atleast_2d(Parameters.data)[0,:],'x1') - # senFOMs = np.atleast_2d(Parameters[0])#.data - # senMeasurables = np.atleast_2d(targetParameters[0]) covParameters = senFOMs @ senMeasurables.T for metric in self.metrics: name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.name) @@ -263,7 +246,6 @@ def _getDataFromDatasets(self, datasets, var, names=None): elif 'ProbabilityWeight' in datasets[names.index(do)]: pw = datasets[doindex]['ProbabilityWeight'].values dim = len(dat.shape) - # (numRealizations, numHistorySteps) for MetricDistributor dat = dat.values if dim == 1: # the following reshaping does not require a copy From 9950169635bc4da4a025804e58c711678b0fe62d Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Wed, 13 Apr 2022 10:39:22 -0600 Subject: [PATCH 18/95] adding some descriptions to the inputs --- .../PostProcessors/Validations/Representativity.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py index 02bce73749..aafce11ad5 100644 --- a/framework/Models/PostProcessors/Validations/Representativity.py +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -48,13 +48,17 @@ class cls. specifying input of cls. """ specs = super(Representativity, cls).getInputSpecification() - parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType) + parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType, + descr=r"""mock model parameters/inputs""") parametersInput.addParam("type", InputTypes.StringType) specs.addSub(parametersInput) - targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType) + targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType, + descr=r"""Target model parameters/inputs""") targetParametersInput.addParam("type", InputTypes.StringType) specs.addSub(targetParametersInput) - targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType) + targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType, + descr=r"""ID of the temporal variable of the target model. Default is ``time''. + \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation).""") specs.addSub(targetPivotParameterInput) return specs From 9ed4c3619b0552764a21d285631c4f6ef8ca94a3 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Wed, 13 Apr 2022 10:42:41 -0600 Subject: [PATCH 19/95] removing old methods --- .../Validations/Representativity.py | 73 +------------------ 1 file changed, 1 insertion(+), 72 deletions(-) diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py index aafce11ad5..deeb9b2b67 100644 --- a/framework/Models/PostProcessors/Validations/Representativity.py +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -77,78 +77,6 @@ def __init__(self): self.stat = ppFactory.returnInstance('BasicStatistics') self.stat.what = ['NormalizedSensitivities'] # expected value calculation - # def initialize(self, runInfo, inputs, initDict=None): - # """ - # This function is used to initialize the plugin, i.e. set up working dir, - # call the initializePlugin method from the plugin - # @ In, runInfo, dict, it is the run info from the jobHandler - # @ In, inputs, list, it is a list containing whatever is passed with an input role in the step - # @ In, initDict, dict, optional, dictionary of all objects available in the step is using this model - # """ - # super().initialize(runInfo, inputs, initDict) - # if self._keepInputMeta: - # ## add meta keys from input data objects - # for inputObj in inputs: - # if isinstance(inputObj, DataObject.DataObject): - # metaKeys = inputObj.getVars('meta') - # self.addMetaKeys(metaKeys) - - - # def inputToInternal(self, currentInputs): - # """ - # Method to convert an input object into the internal format that is - # understandable by this pp. - # @ In, currentInputs, list or DataObject, data object or a list of data objects - # @ Out, measureList, list of (feature, target), the list of the features and targets to measure the distance between - # """ - # if type(currentInputs) != list: - # currentInputs = [currentInputs] - # hasPointSet = False - # hasHistorySet = False - # #Check for invalid types - # for currentInput in currentInputs: - # inputType = None - # if hasattr(currentInput, 'type'): - # inputType = currentInput.type - - # if isinstance(currentInput, Files.File): - # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") - # elif isinstance(currentInput, Distributions.Distribution): - # pass #Allowed type - # elif inputType == 'HDF5': - # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") - # elif inputType == 'PointSet': - # hasPointSet = True - # elif inputType == 'HistorySet': - # hasHistorySet = True - # if self.multiOutput == 'raw_values': - # self.dynamic = True - # if self.pivotParameter not in currentInput.getVars('indexes'): - # self.raiseAnError(IOError, self, 'Pivot parameter', self.pivotParameter,'has not been found in DataObject', currentInput.name) - # if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): - # self.raiseAnError(IOError, "HistorySet", currentInput.name," is not syncronized, please use Interfaced PostProcessor HistorySetSync to pre-process it") - # pivotValues = currentInput.asDataset()[self.pivotParameter].values - # if len(self.pivotValues) == 0: - # self.pivotValues = pivotValues - # elif set(self.pivotValues) != set(pivotValues): - # self.raiseAnError(IOError, "Pivot values for pivot parameter",self.pivotParameter, "in provided HistorySets are not the same") - # else: - # self.raiseAnError(IOError, "Metric cannot process "+inputType+ " of type "+str(type(currentInput))) - # if self.multiOutput == 'raw_values' and hasPointSet and hasHistorySet: - # self.multiOutput = 'mean' - # self.raiseAWarning("Reset 'multiOutput' to 'mean', since both PointSet and HistorySet are provided as Inputs. Calculation outputs will be aggregated by averaging") - - # measureList = [] - - # for cnt in range(len(self.features)): - # feature = self.features[cnt] - # target = self.targets[cnt] - # featureData = self.__getMetricSide(feature, currentInputs) - # targetData = self.__getMetricSide(target, currentInputs) - # measureList.append((featureData, targetData)) - - # return measureList - def initialize(self, runInfo, inputs, initDict): """ Method to initialize the DataMining pp. @@ -194,6 +122,7 @@ def run(self, inputIn): else: pivotParameter = self.pivotParameter evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()}#inputIn + ## TODO: This is a placeholder to remember the time dependent case # if pivotParameter: # # Uncomment this to cause crash: print(dataSets[0], pivotParameter) # if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): From e1d486803dfea700751c7496d1a25f6b53c9e9d5 Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Wed, 13 Apr 2022 10:48:15 -0600 Subject: [PATCH 20/95] camelBack --- framework/Models/PostProcessors/Validations/Representativity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py index deeb9b2b67..95a16eb36b 100644 --- a/framework/Models/PostProcessors/Validations/Representativity.py +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -146,7 +146,7 @@ def _evaluate(self, datasets, **kwargs): for feat, targ, param, targParam in zip(self.features, self.targets, self.featureParameters, self.targetParameters): featData = self._getDataFromDatasets(datasets, feat, names) targData = self._getDataFromDatasets(datasets, targ, names) - Parameters = self._getDataFromDatasets(datasets, param, names) + parameters = self._getDataFromDatasets(datasets, param, names) targetParameters = self._getDataFromDatasets(datasets, targParam, names) covParameters = senFOMs @ senMeasurables.T for metric in self.metrics: From d5cc347e3b2698adfbdd51347d6d82e8ac9a478c Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Wed, 13 Apr 2022 10:53:21 -0600 Subject: [PATCH 21/95] cleaning test --- .../test_validation_gate_representativity.xml | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml index 89cf532459..046b0ec8ea 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml @@ -12,9 +12,8 @@ 2021-04-29 PostProcessors.Validation - This test is aimed to show how to use the mechanics of the Validation Post-Processor. For semplicity, - this test is using the attenuation model (analytical) and simple representativity factors metrics. - The output name convention is ``feature name''\underscore``target name''\underscore``metric name''=. + This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. + This test uses a toy 1D slab reflective model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. Added Modification for new PP API @@ -22,9 +21,6 @@ - time,phi_0,x,a_tilde,phi @@ -32,21 +28,15 @@ outputDataMC1|ans outputDataMC2|ans2 simIndex - outputDataMC1|x1,outputDataMC1|x2 outputDataMC2|x1,outputDataMC2|x2 outputDataMC1|time outputDataMC2|time - - @@ -106,14 +96,9 @@ InputPlaceHolder - - - csv From f59508be6c0cd755efe4f8034b876bbb1a7059fe Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Wed, 13 Apr 2022 16:35:18 -0600 Subject: [PATCH 22/95] trying to fix imports --- framework/Metrics/metrics/RepresentativityFactors.py | 2 +- framework/Models/PostProcessors/BasicStatistics.py | 6 +++--- .../Models/PostProcessors/Validations/Probabilistic.py | 2 +- .../Models/PostProcessors/Validations/Representativity.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/framework/Metrics/metrics/RepresentativityFactors.py b/framework/Metrics/metrics/RepresentativityFactors.py index 41bebb41b5..031fac635c 100644 --- a/framework/Metrics/metrics/RepresentativityFactors.py +++ b/framework/Metrics/metrics/RepresentativityFactors.py @@ -23,7 +23,7 @@ #Internal Modules------------------------------------------------------------------------------------ from .MetricInterface import MetricInterface -from utils import InputData, InputTypes +from ...utils import InputData, InputTypes #Internal Modules End-------------------------------------------------------------------------------- class RepresentativityFactors(MetricInterface): diff --git a/framework/Models/PostProcessors/BasicStatistics.py b/framework/Models/PostProcessors/BasicStatistics.py index 51a4647058..1cf64b1f93 100644 --- a/framework/Models/PostProcessors/BasicStatistics.py +++ b/framework/Models/PostProcessors/BasicStatistics.py @@ -28,9 +28,9 @@ #Internal Modules--------------------------------------------------------------- from .PostProcessorReadyInterface import PostProcessorReadyInterface -from utils import utils -from utils import InputData, InputTypes -from utils import mathUtils +from ...utils import utils +from ...utils import InputData, InputTypes +from ...utils import mathUtils import Files #Internal Modules End----------------------------------------------------------- diff --git a/framework/Models/PostProcessors/Validations/Probabilistic.py b/framework/Models/PostProcessors/Validations/Probabilistic.py index 76b488d840..7f6bdc637e 100644 --- a/framework/Models/PostProcessors/Validations/Probabilistic.py +++ b/framework/Models/PostProcessors/Validations/Probabilistic.py @@ -27,7 +27,7 @@ #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ -from utils import utils +from ....utils import utils from ..ValidationBase import ValidationBase #Internal Modules End-------------------------------------------------------------------------------- diff --git a/framework/Models/PostProcessors/Validations/Representativity.py b/framework/Models/PostProcessors/Validations/Representativity.py index 95a16eb36b..a3d67d31b9 100644 --- a/framework/Models/PostProcessors/Validations/Representativity.py +++ b/framework/Models/PostProcessors/Validations/Representativity.py @@ -27,8 +27,8 @@ #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ -from utils import InputData, InputTypes -from utils import utils +from ravenframework.utils import InputData, InputTypes +from ravenframework.utils import utils from .. import ValidationBase #Internal Modules End-------------------------------------------------------------------------------- From efb48e70805094d829cb15b0cf2eba54f577b04c Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Tue, 26 Apr 2022 11:59:55 -0600 Subject: [PATCH 23/95] remove changes from plugins --- plugins/HERON | 2 +- plugins/LOGOS | 2 +- plugins/SR2ML | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/HERON b/plugins/HERON index f9fbe2f81e..4e2700b15a 160000 --- a/plugins/HERON +++ b/plugins/HERON @@ -1 +1 @@ -Subproject commit f9fbe2f81ec16cc6209168d312425c934c134540 +Subproject commit 4e2700b15a9832390bf7ed38b6c5bdbf35605c1d diff --git a/plugins/LOGOS b/plugins/LOGOS index 014cf2e9a8..7234b8b5e8 160000 --- a/plugins/LOGOS +++ b/plugins/LOGOS @@ -1 +1 @@ -Subproject commit 014cf2e9a86edc35c73b2f5c415d29046f3e7e41 +Subproject commit 7234b8b5e80bc79526b4cbced7efd5ae482f7c44 diff --git a/plugins/SR2ML b/plugins/SR2ML index 15f441e58e..3ec137a504 160000 --- a/plugins/SR2ML +++ b/plugins/SR2ML @@ -1 +1 @@ -Subproject commit 15f441e58e828b4eec56c045acac059ec84d4933 +Subproject commit 3ec137a50496b6a28e54d5fbef66cb9ed7b0bfc2 From 3d2b542fcf7772aeb674b364aec04d80cbfc0ae5 Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Tue, 26 Apr 2022 12:02:10 -0600 Subject: [PATCH 24/95] update basic stats pp --- ravenframework/Models/PostProcessors/BasicStatistics.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ravenframework/Models/PostProcessors/BasicStatistics.py b/ravenframework/Models/PostProcessors/BasicStatistics.py index 396e3bce64..7777499767 100644 --- a/ravenframework/Models/PostProcessors/BasicStatistics.py +++ b/ravenframework/Models/PostProcessors/BasicStatistics.py @@ -27,19 +27,11 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -<<<<<<< HEAD:framework/Models/PostProcessors/BasicStatistics.py from .PostProcessorReadyInterface import PostProcessorReadyInterface from ...utils import utils from ...utils import InputData, InputTypes from ...utils import mathUtils import Files -======= -from .PostProcessorInterface import PostProcessorInterface -from ...utils import utils -from ...utils import InputData, InputTypes -from ...utils import mathUtils -from ... import Files ->>>>>>> devel:ravenframework/Models/PostProcessors/BasicStatistics.py #Internal Modules End----------------------------------------------------------- class BasicStatistics(PostProcessorReadyInterface): From 81659af50eb4bbe8b637cd6846007585d1529cdc Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Tue, 26 Apr 2022 20:28:47 -0600 Subject: [PATCH 25/95] update and clean up Basic Statistics PP --- .../Models/PostProcessors/BasicStatistics.py | 45 +++++++++---------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/ravenframework/Models/PostProcessors/BasicStatistics.py b/ravenframework/Models/PostProcessors/BasicStatistics.py index 7777499767..b08a0f556e 100644 --- a/ravenframework/Models/PostProcessors/BasicStatistics.py +++ b/ravenframework/Models/PostProcessors/BasicStatistics.py @@ -31,7 +31,7 @@ from ...utils import utils from ...utils import InputData, InputTypes from ...utils import mathUtils -import Files +from ... import Files #Internal Modules End----------------------------------------------------------- class BasicStatistics(PostProcessorReadyInterface): @@ -164,46 +164,39 @@ def __init__(self): self.sampleSize = None # number of sample size self.calculations = {} self.validDataType = ['PointSet', 'HistorySet', 'DataSet'] # The list of accepted types of DataObject + self.inputDataObjectName = None # name for input data object self.setInputDataType('xrDataset') - def inputToInternal(self, currentInp): + def inputToInternal(self, inputIn): """ - Method to convert an input object into the internal format that is + Method to select corresponding data from Data Objects and normalize the ProbabilityWeight of corresponding data understandable by this pp. - @ In, currentInp, object, an object that needs to be converted + @ In, inputIn, dict, a dictionary that contains the input Data Object information @ Out, (inputDataset, pbWeights), tuple, the dataset of inputs and the corresponding variable probability weight """ - # The BasicStatistics postprocessor only accept Datasets - currentInput = currentInp [-1] if type(currentInp) == list else currentInp + inpVars, outVars, dataSet = inputIn['Data'][0] pbWeights = None - # extract all required data from input DataObjects, an input dataset is constructed - inpVars, outVars, dataSet = currentInput['Data'][0] try: inputDataset = dataSet[self.parameters['targets']] except KeyError: missing = [var for var in self.parameters['targets'] if var not in dataSet] - self.raiseAnError(KeyError, "Variables: '{}' missing from dataset '{}'!".format(", ".join(missing),currentInput.name)) + self.raiseAnError(KeyError, "Variables: '{}' missing from dataset '{}'!".format(", ".join(missing),self.inputDataObjectName)) self.sampleTag = utils.first(dataSet.dims) if self.dynamic: dims = inputDataset.sizes.keys() if self.pivotParameter is None: - if len(dims) > 1: - self.raiseAnError(IOError, self, 'Time-dependent statistics is requested (HistorySet) but no pivotParameter \ - got inputted!') + self.raiseAnError(IOError, self, 'Time-dependent statistics is requested (HistorySet) but no pivotParameter \ + got inputted!') elif self.pivotParameter not in dims: self.raiseAnError(IOError, self, 'Pivot parameter', self.pivotParameter, 'is not the associated index for \ requested variables', ','.join(self.parameters['targets'])) - else: - self.dynamic = True - #if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): - # self.raiseAnError(IOError, "The data provided by the data objects", currentInput.name, "is not synchronized!") - self.pivotValue = inputDataset[self.pivotParameter].values - if self.pivotValue.size != len(inputDataset.groupby(self.pivotParameter)): - msg = "Duplicated values were identified in pivot parameter, please use the 'HistorySetSync'" + \ - " PostProcessor to syncronize your data before running 'BasicStatistics' PostProcessor." - self.raiseAnError(IOError, msg) + self.pivotValue = dataSet[self.pivotParameter].values + if self.pivotValue.size != len(dataSet.groupby(self.pivotParameter)): + msg = "Duplicated values were identified in pivot parameter, please use the 'HistorySetSync'" + \ + " PostProcessor to syncronize your data before running 'BasicStatistics' PostProcessor." + self.raiseAnError(IOError, msg) # extract all required meta data self.pbPresent = 'ProbabilityWeight' in dataSet if self.pbPresent: @@ -229,6 +222,12 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one DataObject') + if self.pivotParameter is not None: + if not inputs[-1].checkIndexAlignment(indexesToCheck=self.pivotParameter): + self.raiseAnError(IOError, "The data provided by the input data object is not synchronized!") + self.inputDataObjectName = inputs[-1].name #construct a list of all the parameters that have requested values into self.allUsedParams self.allUsedParams = set() for metricName in self.scalarVals + self.vectorVals: @@ -1342,7 +1341,7 @@ def getCovarianceSubset(desired): if self.pivotParameter in outputSet.sizes.keys(): outputDict[self.pivotParameter] = np.atleast_1d(self.pivotValue) - return outputDict,outputSet + return outputDict def corrCoeff(self, covM): """ @@ -1528,7 +1527,7 @@ def run(self, inputIn): @ Out, outputSet, xarray.Dataset or dictionary, dataset or dictionary containing the results """ inputData = self.inputToInternal(inputIn) - _,outputSet = self.__runLocal(inputData) + outputSet = self.__runLocal(inputData) return outputSet def collectOutput(self, finishedJob, output): From 194f28250750a4c729bd764ca6290a700f9f16de Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Tue, 26 Apr 2022 23:27:04 -0600 Subject: [PATCH 26/95] update representativity --- .../Validations/Representativity.py | 68 ++++++++++++++++--- ...test_validation_gate_representativity2.xml | 20 ++---- 2 files changed, 64 insertions(+), 24 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index a3d67d31b9..df2814cbed 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -69,13 +69,24 @@ def __init__(self): @ Out, None """ super().__init__() - from Models.PostProcessors import factory as ppFactory # delay import to allow definition self.printTag = 'POSTPROCESSOR Representativity' self.dynamicType = ['static','dynamic'] # for now only static is available self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics self.name = 'Representativity' - self.stat = ppFactory.returnInstance('BasicStatistics') - self.stat.what = ['NormalizedSensitivities'] # expected value calculation + self.stat = [None, None] + self.featureDataObject = None + self.targetDataObject = None + + def getBasicStat(self): + """ + Get Basic Statistic PostProcessor + @ In, None + @ Out, stat, object, Basic Statistic PostProcessor Object + """ + from .. import factory as ppFactory # delay import to allow definition + stat = ppFactory.returnInstance('BasicStatistics') + stat.what = ['NormalizedSensitivities'] # expected value calculation + return stat def initialize(self, runInfo, inputs, initDict): """ @@ -86,8 +97,49 @@ def initialize(self, runInfo, inputs, initDict): @ Out, None """ super().initialize(runInfo, inputs, initDict) - self.stat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[1] for x in self.features]), 'features':set([x.split("|")[1] for x in self.featureParameters]),'prefix':'nsen'}]} - self.stat.initialize(runInfo, inputs, initDict) + if len(inputs) != 2: + self.raiseAnError(IOError, "PostProcessor", self.name, "can only accept two DataObjects, but got {}!".format(str(len(inputs)))) + params = self.features+self.targets+self.featureParameters+self.targetParameters + validParams = [True if "|" in x else False for x in params] + if not all(validParams): + notValid = list(np.asarray(params)[np.where(np.asarray(validParams)==False)[0]]) + self.raiseAnError(IOError, "'Features', 'Targets', 'featureParameters', and 'targetParameters' should use 'DataObjectName|variable' format, but variables {} do not follow this rule.".format(','.join(notValid))) + # Assume features and targets are in the format of: DataObjectName|Variables + names = set([x.split("|")[0] for x in self.features] + [x.split("|")[0] for x in self.featureParameters]) + if len(names) != 1: + self.raiseAnError(IOError, "'Features' and 'featureParameters' should come from the same DataObjects, but they present in differet DataObjects:{}".fortmat(','.join(names))) + featDataObject = list(names)[0] + names = set([x.split("|")[0] for x in self.targets] + [x.split("|")[0] for x in self.targetParameters]) + if len(names) != 1: + self.raiseAnError(IOError, "'Targets' and 'targetParameters' should come from the same DataObjects, but they present in differet DataObjects:{}".fortmat(','.join(names))) + targetDataObject = list(names)[0] + featVars = [x.split("|")[-1] for x in self.features] + [x.split("|")[1] for x in self.featureParameters] + targVars = [x.split("|")[-1] for x in self.targets] + [x.split("|")[1] for x in self.targetParameters] + + for i, inp in enumerate(inputs): + if inp.name == featDataObject: + self.featureDataObject = (inp, i) + else: + self.targetDataObject = (inp, i) + + vars = self.featureDataObject[0].vars + self.featureDataObject[0].indexes + if not set(featVars).issubset(set(vars)): + missing = featVars - set(vars) + self.raiseAnError(IOError, "Variables {} are missing from DataObject {}".format(','.join(missing), self.featureDataObject[0].name)) + vars = self.targetDataObject[0].vars + self.targetDataObject[0].indexes + if not set(targVars).issubset(set(vars)): + missing = targVars - set(vars) + self.raiseAnError(IOError, "Variables {} are missing from DataObject {}".format(','.join(missing), self.targetDataObject[0].name)) + + featStat = self.getBasicStat() + featStat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[-1] for x in self.features]), 'features':set([x.split("|")[-1] for x in self.featureParameters]),'prefix':'nsen'}]} + featStat.initialize(runInfo, [self.featureDataObject[0]], initDict) + self.stat[self.featureDataObject[-1]] = featStat + tartStat = self.getBasicStat() + tartStat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[-1] for x in self.targets]), 'features':set([x.split("|")[-1] for x in self.targetParameters]),'prefix':'nsen'}]} + tartStat.initialize(runInfo, [self.targetDataObject[0]], initDict) + self.stat[self.targetDataObject[-1]] = tartStat + def _handleInput(self, paramInput): """ @@ -138,8 +190,8 @@ def _evaluate(self, datasets, **kwargs): @ In, kwargs, dict, keyword arguments @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} """ - senMeasurables = self.stat.run({"Data":[[None, None, datasets[0]]]}) - senFOMs = self.stat.run({"Data":[[None, None, datasets[1]]]}) + senMeasurables = self.stat[0].run({"Data":[[None, None, datasets[0]]]}) + senFOMs = self.stat[1].run({"Data":[[None, None, datasets[1]]]}) names = kwargs.get('dataobjectNames') outs = {} @@ -184,4 +236,4 @@ def _getDataFromDatasets(self, datasets, var, names=None): # the following reshaping does not require a copy dat.shape = (dat.shape[0], 1) data = dat, pw - return data \ No newline at end of file + return data diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml index 9b6e8dbafa..33e96e5611 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml @@ -10,7 +10,7 @@ framework/PostProcessors/Validation/test_validation_gate_representativity Mohammad Abdo (@Jimmy-INL) 2021-04-29 - PostProcessors.Validation + PostProcessors.Validation.Representativity This test is aimed to show how to use the mechanics of the Validation Post-Processor. For semplicity, this test is using the attenuation model (analytical) and simple representativity factors metrics. @@ -23,30 +23,22 @@ - x1,x2,ans,ans2 + x1, x2 + ans, ans2 - outputDataMC1|ans outputDataMC2|ans2 simIndex - outputDataMC1|x1,outputDataMC1|x2 outputDataMC2|x1,outputDataMC2|x2 outputDataMC1|time outputDataMC2|time - - @@ -106,14 +98,10 @@ InputPlaceHolder - + ans_ans2_simIndex - - csv From 3776f12090aac80a0b9c584d52515c133ba0972c Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Wed, 27 Apr 2022 12:13:24 -0600 Subject: [PATCH 27/95] update representativity --- .../Validations/Representativity.py | 31 ++++++++++++++++--- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index df2814cbed..cbf1cfe3b8 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -76,6 +76,7 @@ def __init__(self): self.stat = [None, None] self.featureDataObject = None self.targetDataObject = None + self.senPrefix = 'nsen' def getBasicStat(self): """ @@ -132,11 +133,11 @@ def initialize(self, runInfo, inputs, initDict): self.raiseAnError(IOError, "Variables {} are missing from DataObject {}".format(','.join(missing), self.targetDataObject[0].name)) featStat = self.getBasicStat() - featStat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[-1] for x in self.features]), 'features':set([x.split("|")[-1] for x in self.featureParameters]),'prefix':'nsen'}]} + featStat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[-1] for x in self.features]), 'features':set([x.split("|")[-1] for x in self.featureParameters]),'prefix':self.senPrefix}]} featStat.initialize(runInfo, [self.featureDataObject[0]], initDict) self.stat[self.featureDataObject[-1]] = featStat tartStat = self.getBasicStat() - tartStat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[-1] for x in self.targets]), 'features':set([x.split("|")[-1] for x in self.targetParameters]),'prefix':'nsen'}]} + tartStat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[-1] for x in self.targets]), 'features':set([x.split("|")[-1] for x in self.targetParameters]),'prefix':self.senPrefix}]} tartStat.initialize(runInfo, [self.targetDataObject[0]], initDict) self.stat[self.targetDataObject[-1]] = tartStat @@ -190,8 +191,10 @@ def _evaluate(self, datasets, **kwargs): @ In, kwargs, dict, keyword arguments @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} """ - senMeasurables = self.stat[0].run({"Data":[[None, None, datasets[0]]]}) - senFOMs = self.stat[1].run({"Data":[[None, None, datasets[1]]]}) + sens = self.stat[self.featureDataObject[-1]].run({"Data":[[None, None, datasets[self.featureDataObject[-1]]]]}) + senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens) + sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) + senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens) names = kwargs.get('dataobjectNames') outs = {} @@ -202,10 +205,28 @@ def _evaluate(self, datasets, **kwargs): targetParameters = self._getDataFromDatasets(datasets, targParam, names) covParameters = senFOMs @ senMeasurables.T for metric in self.metrics: - name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.name) + name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) return outs + def _generateSensitivityMatrix(self, outputs, inputs, sensDict): + """ + Reconstruct sensitivity matrix from the Basic Statistic calculation + @ In, inputs, list, list of input variables + @ In, outputs, list, list of output variables + @ In, sensDict, dict, dictionary contains the sensitivities + @ Out, sensMatr, numpy.array, 2-D array of the reconstructed sensitivity matrix + """ + sensMatr = np.zeros((len(outputs), len(inputs))) + inputVars = [x.split("|")[-1] for x in inputs] + outputVars = [x.split("|")[-1] for x in outputs] + for i, outVar in enumerate(outputVars): + for j, inpVar in enumerate(inputVars): + senName = "{}_{}_{}".format(self.senPrefix, outVar, inpVar) + # Assume static data (PointSets are provided as input) + sensMatr[i, j] = sensDict[senName][0] + return sensMatr + def _getDataFromDatasets(self, datasets, var, names=None): """ Utility function to retrieve the data from datasets From 9b12b8991db8f1705ae4a52e1a98c29b642b374c Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 28 Apr 2022 11:06:57 -0600 Subject: [PATCH 28/95] adding linModel.py --- tests/framework/AnalyticModels/linModel.py | 46 ++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 tests/framework/AnalyticModels/linModel.py diff --git a/tests/framework/AnalyticModels/linModel.py b/tests/framework/AnalyticModels/linModel.py new file mode 100644 index 0000000000..a354fc4139 --- /dev/null +++ b/tests/framework/AnalyticModels/linModel.py @@ -0,0 +1,46 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#*************************************** +#* Simple analytic test ExternalModule * +#*************************************** +# +# Simulates a steady state linear model that maps $J-$parameters (i.e., $\mathbb{R}^J$) to k Responses +# +# External Modules +import numpy as np +################## + +A = np.array([[3, -3],[1,8],[-5, -5]]) +b = np.array([[0],[0],[0]]) + +def run(self,Input): + """ + Method require by RAVEN to run this as an external model. + @ In, self, object, object to store members on + @ In, Input, dict, dictionary containing inputs from RAVEN + @ Out, None + """ + self.F1,self.F2,self.F3 = main(Input) + +def main(Input): + y = A @ np.array(list(Input.values())).reshape(-1,1) + b + return y[:] + + +if __name__ == '__main__': + Input = {} + Input['x1'] = 5.5 + Input['x2'] = 8 + a,b,c = main(Input) + print(a,b,c) From 5a0964766cfd0640a5ead917972b9a2e455632cf Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 28 Apr 2022 13:52:56 -0600 Subject: [PATCH 29/95] changes to the metric --- .../Metrics/metrics/RepresentativityFactors.py | 9 ++++++--- .../PostProcessors/Validations/Representativity.py | 7 +++++-- .../test_validation_gate_representativityLinModel.xml | 2 +- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/ravenframework/Metrics/metrics/RepresentativityFactors.py b/ravenframework/Metrics/metrics/RepresentativityFactors.py index 031fac635c..b86af5cdcb 100644 --- a/ravenframework/Metrics/metrics/RepresentativityFactors.py +++ b/ravenframework/Metrics/metrics/RepresentativityFactors.py @@ -18,6 +18,8 @@ """ #External Modules------------------------------------------------------------------------------------ import numpy as np +import scipy as sp +from scipy.linalg import sqrtm import copy #External Modules End-------------------------------------------------------------------------------- @@ -83,7 +85,8 @@ def run(self, x, y, weights = None, axis = 0, **kwargs): senMeasurables = kwargs['senMeasurables'] senFOMs = kwargs['senFOMs'] covParameters = kwargs['covParameters'] - r = (senFOMs.T @ covParameters @ senMeasurables)/\ - np.sqrt(senFOMs.T @ covParameters @ senFOMs)/\ - np.sqrt(senMeasurables.T @ covParameters @ senMeasurables) + # r = (senFOMs.T @ covParameters @ senMeasurables)/\ + # np.sqrt(senFOMs.T @ covParameters @ senFOMs)/\ + # np.sqrt(senMeasurables.T @ covParameters @ senMeasurables) + r = (sp.linalg.pinv(sqrtm(senFOMs @ covParameters @ senFOMs.T)) @ sqrtm(senFOMs @ covParameters @ senMeasurables.T) @ sqrtm(senFOMs @ covParameters @ senMeasurables.T) @ sp.linalg.pinv(sqrtm(senMeasurables @ covParameters @ senMeasurables.T))).real return r diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index cbf1cfe3b8..0351299a6c 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -195,15 +195,18 @@ def _evaluate(self, datasets, **kwargs): senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens) sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens) - + c = np.zeros((datasets[0].dims['RAVEN_sample_ID'],len(self.featureParameters))) names = kwargs.get('dataobjectNames') outs = {} + ## TODO this loop is not needed for feat, targ, param, targParam in zip(self.features, self.targets, self.featureParameters, self.targetParameters): featData = self._getDataFromDatasets(datasets, feat, names) targData = self._getDataFromDatasets(datasets, targ, names) parameters = self._getDataFromDatasets(datasets, param, names) targetParameters = self._getDataFromDatasets(datasets, targParam, names) - covParameters = senFOMs @ senMeasurables.T + for ind,var in enumerate(self.featureParameters): + c[:,ind] = np.squeeze(self._getDataFromDatasets(datasets, var, names)[0]) + covParameters = c.T @ c for metric in self.metrics: name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index 259fc925ef..60dc6fa455 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -26,7 +26,7 @@ outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 - outputDataMC2|F1, outputDataMC2|F2 + outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 simIndex outputDataMC1|p1,outputDataMC1|p2 outputDataMC2|p1,outputDataMC2|p2 From 9f503a048c666893b475a9ea025ac0bbadfda6f3 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 16 May 2022 10:26:55 -0600 Subject: [PATCH 30/95] updating linear representativity test --- .../test_validation_gate_representativityLinModel.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index 60dc6fa455..1587557a83 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -53,7 +53,7 @@ - 10 + 100 dist1 From a39860d97af1ed967d54615194a9c4d550de384a Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 16 May 2022 12:20:09 -0600 Subject: [PATCH 31/95] pushing test_linModel --- .../Models/PostProcessors/Validations/Representativity.py | 6 +++--- tests/framework/AnalyticModels/linModel.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 0351299a6c..af3e8478b5 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -86,7 +86,7 @@ def getBasicStat(self): """ from .. import factory as ppFactory # delay import to allow definition stat = ppFactory.returnInstance('BasicStatistics') - stat.what = ['NormalizedSensitivities'] # expected value calculation + stat.what = ['sensitivity'] # expected value calculation return stat def initialize(self, runInfo, inputs, initDict): @@ -133,11 +133,11 @@ def initialize(self, runInfo, inputs, initDict): self.raiseAnError(IOError, "Variables {} are missing from DataObject {}".format(','.join(missing), self.targetDataObject[0].name)) featStat = self.getBasicStat() - featStat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[-1] for x in self.features]), 'features':set([x.split("|")[-1] for x in self.featureParameters]),'prefix':self.senPrefix}]} + featStat.toDo = {'sensitivity':[{'targets':set([x.split("|")[-1] for x in self.features]), 'features':set([x.split("|")[-1] for x in self.featureParameters]),'prefix':self.senPrefix}]} featStat.initialize(runInfo, [self.featureDataObject[0]], initDict) self.stat[self.featureDataObject[-1]] = featStat tartStat = self.getBasicStat() - tartStat.toDo = {'NormalizedSensitivity':[{'targets':set([x.split("|")[-1] for x in self.targets]), 'features':set([x.split("|")[-1] for x in self.targetParameters]),'prefix':self.senPrefix}]} + tartStat.toDo = {'sensitivity':[{'targets':set([x.split("|")[-1] for x in self.targets]), 'features':set([x.split("|")[-1] for x in self.targetParameters]),'prefix':self.senPrefix}]} tartStat.initialize(runInfo, [self.targetDataObject[0]], initDict) self.stat[self.targetDataObject[-1]] = tartStat diff --git a/tests/framework/AnalyticModels/linModel.py b/tests/framework/AnalyticModels/linModel.py index a354fc4139..56cc6cc188 100644 --- a/tests/framework/AnalyticModels/linModel.py +++ b/tests/framework/AnalyticModels/linModel.py @@ -21,7 +21,7 @@ import numpy as np ################## -A = np.array([[3, -3],[1,8],[-5, -5]]) +A = np.array([[2, -3],[1,8],[-5, -5]]) b = np.array([[0],[0],[0]]) def run(self,Input): From 9e0c5e01a5970b08602970d6e452b21bbe7f90e0 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 24 May 2022 15:44:28 -0600 Subject: [PATCH 32/95] adding helper functions and replicationg metric in order to remove it later --- .../Validations/Probabilistic.py | 28 +--- .../Validations/Representativity.py | 150 ++++++++++++++++-- ...lidation_gate_representativityLinModel.xml | 2 +- 3 files changed, 142 insertions(+), 38 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Probabilistic.py b/ravenframework/Models/PostProcessors/Validations/Probabilistic.py index 6b8bb02cbe..b7a41d59cc 100644 --- a/ravenframework/Models/PostProcessors/Validations/Probabilistic.py +++ b/ravenframework/Models/PostProcessors/Validations/Probabilistic.py @@ -81,29 +81,6 @@ def run(self, inputIn): @ In, inputIn, list, dictionary of data to process @ Out, outputDict, dict, dictionary containing the post-processed results """ - # inpVars, outVars, dataSet = inputIn['Data'][0] - # dataSets = [data for _, _, data in inputIn['Data']] - dataDict = {data.attrs['name']: data for _, _, data in inputIn['Data']} - pivotParameter = self.pivotParameter - names = [inp[-1].attrs['name'] for inp in inputIn['Data']] - if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: - if 'dynamic' not in self.dynamicType: #self.model.dataType: - self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) - # else: - # pivotParameter = self.pivotParameter - # # check if pivotParameter - # if pivotParameter: - # # in case of dataobjects we check that the dataobject is either an HistorySet or a DataSet - # if isinstance(inputIn['Data'][0][-1], xr.Dataset) and not all([True if inp.type in ['HistorySet', 'DataSet'] else False for inp in inputIn]): - # self.raiseAnError(RuntimeError, "The pivotParameter '{}' has been inputted but PointSets have been used as input of PostProcessor '{}'".format(pivotParameter, self.name)) - # if not all([True if pivotParameter in inp else False for inp in dataSets]): - # self.raiseAnError(RuntimeError, "The pivotParameter '{}' not found in datasets used as input of PostProcessor '{}'".format(pivotParameter, self.name)) - - - evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataDict, **{'dataobjectNames': names}).items()} - - if pivotParameter: - #if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): dataDict = {self.getDataSetName(data): data for _, _, data in inputIn['Data']} pivotParameter = self.pivotParameter names = [self.getDataSetName(inp[-1]) for inp in inputIn['Data']] @@ -117,7 +94,6 @@ def run(self, inputIn): self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) if pivotParameter not in evaluation: evaluation[pivotParameter] = inputIn['Data'][0][-1]['time'] - #evaluation[pivotParameter] = dataSets[0][pivotParameter] return evaluation ### utility functions @@ -131,9 +107,7 @@ def _evaluate(self, datasets, **kwargs): names = kwargs.get('dataobjectNames') outputDict = {} for feat, targ in zip(self.features, self.targets): - # featData = self._getDataFromDatasets(datasets, feat, names) featData = self._getDataFromDataDict(datasets, feat, names) - # targData = self._getDataFromDatasets(datasets, targ, names) targData = self._getDataFromDataDict(datasets, targ, names) for metric in self.metrics: name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) @@ -168,4 +142,4 @@ def _getDataFromDataDict(self, datasets, var, names=None): # the following reshaping does not require a copy dat.shape = (dat.shape[0], 1) data = dat, pw - return data + return data \ No newline at end of file diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index af3e8478b5..8411ff395f 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -24,6 +24,8 @@ #External Modules------------------------------------------------------------------------------------ import numpy as np import xarray as xr +import scipy as sp +from scipy.linalg import sqrtm #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ @@ -86,7 +88,7 @@ def getBasicStat(self): """ from .. import factory as ppFactory # delay import to allow definition stat = ppFactory.returnInstance('BasicStatistics') - stat.what = ['sensitivity'] # expected value calculation + stat.what = ['NormalizedSensitivities'] # expected value calculation return stat def initialize(self, runInfo, inputs, initDict): @@ -191,28 +193,74 @@ def _evaluate(self, datasets, **kwargs): @ In, kwargs, dict, keyword arguments @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} """ + self._addRefValues(datasets[0], self.featureParameters, self.features) + self._addRefValues(datasets[1], self.targetParameters, self.targets) + self._computeErrors(datasets[0],self.featureParameters, self.features) + self._computeErrors(datasets[1],self.targetParameters, self.targets) + self._addRefValues(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters],['err_' + s2.split("|")[-1] for s2 in self.features]) + self._addRefValues(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targets]) + UParVar = self._computeUncertaintyinParametersErrorMatrix(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters]) + UMeasurablesVar = self._computeUncertaintyinParametersErrorMatrix(datasets[0],['err_' + s.split("|")[-1] for s in self.features]) + UFOMsVar = self._computeUncertaintyinParametersErrorMatrix(datasets[1],['err_' + s.split("|")[-1] for s in self.targets]) + sens = self.stat[self.featureDataObject[-1]].run({"Data":[[None, None, datasets[self.featureDataObject[-1]]]]}) - senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens) + senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens, datasets) + sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) - senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens) - c = np.zeros((datasets[0].dims['RAVEN_sample_ID'],len(self.featureParameters))) + senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens, datasets) + r,r_exact = self._calculateBiasFactor(senMeasurables, senFOMs, UParVar, UMeasurablesVar) + + names = kwargs.get('dataobjectNames') outs = {} - ## TODO this loop is not needed for feat, targ, param, targParam in zip(self.features, self.targets, self.featureParameters, self.targetParameters): featData = self._getDataFromDatasets(datasets, feat, names) targData = self._getDataFromDatasets(datasets, targ, names) parameters = self._getDataFromDatasets(datasets, param, names) targetParameters = self._getDataFromDatasets(datasets, targParam, names) - for ind,var in enumerate(self.featureParameters): - c[:,ind] = np.squeeze(self._getDataFromDatasets(datasets, var, names)[0]) - covParameters = c.T @ c + # covParameters = senFOMs @ senMeasurables.T for metric in self.metrics: name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) + + + + # ## Analysis: + # 1. Identify Models: Mock experiment, and Target model. + # 2. Generate Data + samples = datasets[0] + # data = self._getDataFromDatasets(featu,) + # 3. Propagate error from parameters to experiment and target outputs. + _, _, mes_samples = self._propagateErrors(samples)#par, par_var,Exp_A,Exp_b,samples + # Avg = + + + Fsim_check, CFsim,_ = propagateErrors(par, par_var, Exp_A, Exp_b,samples) + _,_,FOM_samples = Propagate_errors(par, par_var,Tar_A,Tar_b,samples) + Upar, Upar_var = Transform_to_error_space_stoch(par, par_var, par, samples) + Umes, Umes_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) + # Umes, Umes_var = Transform_to_error_space_stoch(Fmes, CFmes, Fmes, mes_samples) + UF, UF_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) + # 4. Normalize the data (transform to relative errors) + expNormalizedSen = normalizeSensetivities(par, F, G) + mesParametersNormalizedSen = normalizeSensetivities(par, F, G) + nSF = normalizeSensetivities(par, F, G) + nSFOM = normalizeSensetivities(par, FOM, G) + # 5. Compute correction in parameters + par_tilde, par_var_tilde = Parameter_correction_theory(par, Upar, Upar_var, Umes, Umes_var, expNormalizedSen) + pm_tilde, Cpm_tilde = Parameter_correction_theory(par, Upar, Upar_var,UF, UF_var,mesParametersNormalizedSen) + # 6. Compute correction in targets + FOMsim_tilde_theory, FOMsim_var_tilde_theory, UFOMsim_var_tilde_theory, Umes_var, UFOM_var_tilde_no_Umes_var, Inner1 = Target_correction_theory(par, FOM, Upar, Upar_var, Umes, Umes_var, mesParametersNormalizedSen, expNormalizedSen) + # 7. Computer representativity factor + r,r_exact, UFOMsim_var_tilde_rep,UFOMsim_var_tilde_rep_exact = Representativity(par, Upar, Upar_var, F, nSF, nSFOM, Umes_var) + print('==== Representativity ====') + print('r') + print(r) + print('UFOMsim_var_tilde_rep') + print(UFOMsim_var_tilde_rep) return outs - def _generateSensitivityMatrix(self, outputs, inputs, sensDict): + def _generateSensitivityMatrix(self, outputs, inputs, sensDict, datasets, normalize=True): """ Reconstruct sensitivity matrix from the Basic Statistic calculation @ In, inputs, list, list of input variables @@ -227,7 +275,10 @@ def _generateSensitivityMatrix(self, outputs, inputs, sensDict): for j, inpVar in enumerate(inputVars): senName = "{}_{}_{}".format(self.senPrefix, outVar, inpVar) # Assume static data (PointSets are provided as input) - sensMatr[i, j] = sensDict[senName][0] + if not normalize: + sensMatr[i, j] = sensDict[senName][0] + else: + sensMatr[i, j] = sensDict[senName][0]* datasets[0][inpVar].meanValue / datasets[0][outVar].meanValue return sensMatr def _getDataFromDatasets(self, datasets, var, names=None): @@ -261,3 +312,82 @@ def _getDataFromDatasets(self, datasets, var, names=None): dat.shape = (dat.shape[0], 1) data = dat, pw return data + + def _addRefValues(self, datasets, features, targets): + for var in [x.split("|")[-1] for x in features + targets]: #datasets.data_vars + datasets[var].attrs['meanValue'] = np.mean(datasets[var].values) + for var2 in [x.split("|")[-1] for x in features + targets]: + if var == var2: + datasets[var2].attrs['var'] = np.var(datasets[var].values) + else: + datasets[var2].attrs['cov_'+str(var)] = np.cov(datasets[var2].values,datasets[var].values) + return datasets + + def _computeErrors(self,datasets,features,targets): + for var in [x.split("|")[-1] for x in features + targets]: + datasets['err_'+str(var)] = (datasets[var].values - datasets[var].attrs['meanValue'])/datasets[var].attrs['meanValue'] + # for var2 in [x.split("|")[-1] for x in features + targets]: + # datasets[var].attrs['err_cov_'+str(var)] = np.cov((datasets[var2].values - datasets[var2].attrs['meanValue'])/datasets[var2].attrs['meanValue'],(datasets[var2].values - datasets[var2].attrs['meanValue'])/datasets[var].attrs['meanValue']) + + # def _propagateErrors(self,data): + # # par = [data[var.split("|")[1]] for var in self.featureParameters] + # # par_var = xr.cov(par[0],par[1]) + # # Trans_samples = np.zeros((np.shape(data)[0],np.shape(A)[0])) + # for ind,samp in enumerate(data): + # Trans_samples[ind,:] = linModel(A,samp,b) + # Avg = np.average(Trans_samples, axis=0) + # C = np.cov(Trans_samples.T) + # return Avg, C, Trans_samples + + def _computeUncertaintyinParametersErrorMatrix(self, data, parameters): + + uncertMatr = np.zeros((len(parameters), len(parameters))) + # inputVars = [x.split("|")[-1] for x in parameters] + # outputVars = [x.split("|")[-1] for x in outputs] + for i, var1 in enumerate(parameters): + for j, var2 in enumerate(parameters): + if var1 == var2: + uncertMatr[i, j] = data[var1].attrs['var'] + else: + uncertMatr[i, j] = data[var1].attrs['cov_'+var2][0,1] + return uncertMatr + + def _ParameterCorrectionTheory(par, Upar, UparVar, Umes, UmesVar, normalizedSen): + pass + + def _calculateBiasFactor(self, normalizedSenExp, normalizedSenTar, UparVar, UmesVar=None): + # Compute representativity (#eq 79) + r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T))).real + rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar))).real + return r, rExact + +def run2(self): + # ## Analysis: + # 1. Identify Models: Mock experiment, and Target model. + # 2. Generate Data + samples = genData(par, par_var, nSamples) + # 3. Propagate error from parameters to experiment and target outputs. + _, _, mes_samples = Propagate_errors(par, par_var,Exp_A,Exp_b,samples) + Fsim_check, CFsim,_ = Propagate_errors(par, par_var, Exp_A, Exp_b,samples) + _,_,FOM_samples = Propagate_errors(par, par_var,Tar_A,Tar_b,samples) + Upar, Upar_var = Transform_to_error_space_stoch(par, par_var, par, samples) + Umes, Umes_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) + # Umes, Umes_var = Transform_to_error_space_stoch(Fmes, CFmes, Fmes, mes_samples) + UF, UF_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) + # 4. Normalize the data (transform to relative errors) + expNormalizedSen = normalizeSensetivities(par, F, G) + mesParametersNormalizedSen = normalizeSensetivities(par, F, G) + nSF = normalizeSensetivities(par, F, G) + nSFOM = normalizeSensetivities(par, FOM, G) + # 5. Compute correction in parameters + par_tilde, par_var_tilde = Parameter_correction_theory(par, Upar, Upar_var, Umes, Umes_var, expNormalizedSen) + pm_tilde, Cpm_tilde = Parameter_correction_theory(par, Upar, Upar_var,UF, UF_var,mesParametersNormalizedSen) + # 6. Compute correction in targets + FOMsim_tilde_theory, FOMsim_var_tilde_theory, UFOMsim_var_tilde_theory, Umes_var, UFOM_var_tilde_no_Umes_var, Inner1 = Target_correction_theory(par, FOM, Upar, Upar_var, Umes, Umes_var, mesParametersNormalizedSen, expNormalizedSen) + # 7. Computer representativity factor + r,r_exact, UFOMsim_var_tilde_rep,UFOMsim_var_tilde_rep_exact = Representativity(par, Upar, Upar_var, F, nSF, nSFOM, Umes_var) + print('==== Representativity ====') + print('r') + print(r) + print('UFOMsim_var_tilde_rep') + print(UFOMsim_var_tilde_rep) \ No newline at end of file diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index 1587557a83..7e0bd69ff2 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -53,7 +53,7 @@ - 100 + 1000 dist1 From 21408dbd75b518b7de7240d8730d53c099518a22 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 25 May 2022 22:36:53 -0600 Subject: [PATCH 33/95] removing the metric gradually --- .../Validations/Representativity.py | 96 +++++++++---------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 8411ff395f..5529e4ecf8 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -211,54 +211,54 @@ def _evaluate(self, datasets, **kwargs): r,r_exact = self._calculateBiasFactor(senMeasurables, senFOMs, UParVar, UMeasurablesVar) - names = kwargs.get('dataobjectNames') - outs = {} - for feat, targ, param, targParam in zip(self.features, self.targets, self.featureParameters, self.targetParameters): - featData = self._getDataFromDatasets(datasets, feat, names) - targData = self._getDataFromDatasets(datasets, targ, names) - parameters = self._getDataFromDatasets(datasets, param, names) - targetParameters = self._getDataFromDatasets(datasets, targParam, names) - # covParameters = senFOMs @ senMeasurables.T - for metric in self.metrics: - name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) - outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) - - - - # ## Analysis: - # 1. Identify Models: Mock experiment, and Target model. - # 2. Generate Data - samples = datasets[0] - # data = self._getDataFromDatasets(featu,) - # 3. Propagate error from parameters to experiment and target outputs. - _, _, mes_samples = self._propagateErrors(samples)#par, par_var,Exp_A,Exp_b,samples - # Avg = - - - Fsim_check, CFsim,_ = propagateErrors(par, par_var, Exp_A, Exp_b,samples) - _,_,FOM_samples = Propagate_errors(par, par_var,Tar_A,Tar_b,samples) - Upar, Upar_var = Transform_to_error_space_stoch(par, par_var, par, samples) - Umes, Umes_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) - # Umes, Umes_var = Transform_to_error_space_stoch(Fmes, CFmes, Fmes, mes_samples) - UF, UF_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) - # 4. Normalize the data (transform to relative errors) - expNormalizedSen = normalizeSensetivities(par, F, G) - mesParametersNormalizedSen = normalizeSensetivities(par, F, G) - nSF = normalizeSensetivities(par, F, G) - nSFOM = normalizeSensetivities(par, FOM, G) - # 5. Compute correction in parameters - par_tilde, par_var_tilde = Parameter_correction_theory(par, Upar, Upar_var, Umes, Umes_var, expNormalizedSen) - pm_tilde, Cpm_tilde = Parameter_correction_theory(par, Upar, Upar_var,UF, UF_var,mesParametersNormalizedSen) - # 6. Compute correction in targets - FOMsim_tilde_theory, FOMsim_var_tilde_theory, UFOMsim_var_tilde_theory, Umes_var, UFOM_var_tilde_no_Umes_var, Inner1 = Target_correction_theory(par, FOM, Upar, Upar_var, Umes, Umes_var, mesParametersNormalizedSen, expNormalizedSen) - # 7. Computer representativity factor - r,r_exact, UFOMsim_var_tilde_rep,UFOMsim_var_tilde_rep_exact = Representativity(par, Upar, Upar_var, F, nSF, nSFOM, Umes_var) - print('==== Representativity ====') - print('r') - print(r) - print('UFOMsim_var_tilde_rep') - print(UFOMsim_var_tilde_rep) - return outs + # names = kwargs.get('dataobjectNames') + # outs = {} + # for feat, targ, param, targParam in zip(self.features, self.targets, self.featureParameters, self.targetParameters): + # featData = self._getDataFromDatasets(datasets, feat, names) + # targData = self._getDataFromDatasets(datasets, targ, names) + # parameters = self._getDataFromDatasets(datasets, param, names) + # targetParameters = self._getDataFromDatasets(datasets, targParam, names) + # # covParameters = senFOMs @ senMeasurables.T + # for metric in self.metrics: + # name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) + # outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) + + + + # # ## Analysis: + # # 1. Identify Models: Mock experiment, and Target model. + # # 2. Generate Data + # samples = datasets[0] + # # data = self._getDataFromDatasets(featu,) + # # 3. Propagate error from parameters to experiment and target outputs. + # _, _, mes_samples = self._propagateErrors(samples)#par, par_var,Exp_A,Exp_b,samples + # # Avg = + + + # Fsim_check, CFsim,_ = propagateErrors(par, par_var, Exp_A, Exp_b,samples) + # _,_,FOM_samples = Propagate_errors(par, par_var,Tar_A,Tar_b,samples) + # Upar, Upar_var = Transform_to_error_space_stoch(par, par_var, par, samples) + # Umes, Umes_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) + # # Umes, Umes_var = Transform_to_error_space_stoch(Fmes, CFmes, Fmes, mes_samples) + # UF, UF_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) + # # 4. Normalize the data (transform to relative errors) + # expNormalizedSen = normalizeSensetivities(par, F, G) + # mesParametersNormalizedSen = normalizeSensetivities(par, F, G) + # nSF = normalizeSensetivities(par, F, G) + # nSFOM = normalizeSensetivities(par, FOM, G) + # # 5. Compute correction in parameters + # par_tilde, par_var_tilde = Parameter_correction_theory(par, Upar, Upar_var, Umes, Umes_var, expNormalizedSen) + # pm_tilde, Cpm_tilde = Parameter_correction_theory(par, Upar, Upar_var,UF, UF_var,mesParametersNormalizedSen) + # # 6. Compute correction in targets + # FOMsim_tilde_theory, FOMsim_var_tilde_theory, UFOMsim_var_tilde_theory, Umes_var, UFOM_var_tilde_no_Umes_var, Inner1 = Target_correction_theory(par, FOM, Upar, Upar_var, Umes, Umes_var, mesParametersNormalizedSen, expNormalizedSen) + # # 7. Computer representativity factor + # r,r_exact, UFOMsim_var_tilde_rep,UFOMsim_var_tilde_rep_exact = Representativity(par, Upar, Upar_var, F, nSF, nSFOM, Umes_var) + # print('==== Representativity ====') + # print('r') + # print(r) + # print('UFOMsim_var_tilde_rep') + # print(UFOMsim_var_tilde_rep) + return #outs def _generateSensitivityMatrix(self, outputs, inputs, sensDict, datasets, normalize=True): """ From 289f026f318ac23ef67acdc84a6724866010de77 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 26 May 2022 16:39:02 -0600 Subject: [PATCH 34/95] changing required metrics to zero_to_infinity --- .../Models/PostProcessors/Metric.py | 2 +- .../Models/PostProcessors/ValidationBase.py | 2 +- .../Validations/Representativity.py | 29 +++++++++++-------- ...lidation_gate_representativityLinModel.xml | 6 ++-- 4 files changed, 22 insertions(+), 17 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Metric.py b/ravenframework/Models/PostProcessors/Metric.py index fd882d4fbc..8cc796fcaa 100644 --- a/ravenframework/Models/PostProcessors/Metric.py +++ b/ravenframework/Models/PostProcessors/Metric.py @@ -89,7 +89,7 @@ def __init__(self): self.pivotParameter = None self.pivotValues = [] # assembler objects to be requested - self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity) + self.addAssemblerObject('Metric', InputData.Quantity.zero_to_infinity) def __getMetricSide(self, metricDataName, currentInputs): """ diff --git a/ravenframework/Models/PostProcessors/ValidationBase.py b/ravenframework/Models/PostProcessors/ValidationBase.py index f00c26c455..3b12c1a455 100644 --- a/ravenframework/Models/PostProcessors/ValidationBase.py +++ b/ravenframework/Models/PostProcessors/ValidationBase.py @@ -91,7 +91,7 @@ def __init__(self): self.targets = None # list of target variables self.pivotValues = None # pivot values (present if dynamic == True) - self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity) + self.addAssemblerObject('Metric', InputData.Quantity.zero_to_infinity) self.addAssemblerObject('PreProcessor', InputData.Quantity.zero_to_infinity) ## dataset option self.setInputDataType('xrDataset') diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 5529e4ecf8..a2ddedf8c2 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -73,7 +73,7 @@ def __init__(self): super().__init__() self.printTag = 'POSTPROCESSOR Representativity' self.dynamicType = ['static','dynamic'] # for now only static is available - self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics + # self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics self.name = 'Representativity' self.stat = [None, None] self.featureDataObject = None @@ -212,16 +212,21 @@ def _evaluate(self, datasets, **kwargs): # names = kwargs.get('dataobjectNames') - # outs = {} - # for feat, targ, param, targParam in zip(self.features, self.targets, self.featureParameters, self.targetParameters): - # featData = self._getDataFromDatasets(datasets, feat, names) - # targData = self._getDataFromDatasets(datasets, targ, names) - # parameters = self._getDataFromDatasets(datasets, param, names) - # targetParameters = self._getDataFromDatasets(datasets, targParam, names) - # # covParameters = senFOMs @ senMeasurables.T - # for metric in self.metrics: - # name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) - # outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) + outs = {} + for i,targ in enumerate(self.targets): + for j,feat in enumerate(self.features): + # featData = self._getDataFromDatasets(datasets, feat, names) + # targData = self._getDataFromDatasets(datasets, targ, names) + # parameters = self._getDataFromDatasets(datasets, param, names) + # targetParameters = self._getDataFromDatasets(datasets, targParam, names) + # covParameters = senFOMs @ senMeasurables.T + name1 = "BiasFactor_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1]) + name2 = "ExactBiasFactor_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1]) + outs[name1] = r_exact[i,j] + outs[name2] = r[i,j] + # for metric in self.metrics: + # name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) + # outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) @@ -258,7 +263,7 @@ def _evaluate(self, datasets, **kwargs): # print(r) # print('UFOMsim_var_tilde_rep') # print(UFOMsim_var_tilde_rep) - return #outs + return outs def _generateSensitivityMatrix(self, outputs, inputs, sensDict, datasets, normalize=True): """ diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index 7e0bd69ff2..0cb35ef46e 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -27,7 +27,7 @@ outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 - simIndex + outputDataMC1|p1,outputDataMC1|p2 outputDataMC2|p1,outputDataMC2|p2 outputDataMC1|time @@ -35,9 +35,9 @@ - + From 06e21462cbecfd66be5ea71bd9176a90887b5d69 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 30 May 2022 18:44:20 -0600 Subject: [PATCH 35/95] adding initial representativity _evaluate function --- .../Validations/Representativity.py | 70 +++++++------------ ...lidation_gate_representativityLinModel.xml | 4 ++ 2 files changed, 29 insertions(+), 45 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index a2ddedf8c2..f3778597f0 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -72,13 +72,12 @@ def __init__(self): """ super().__init__() self.printTag = 'POSTPROCESSOR Representativity' - self.dynamicType = ['static','dynamic'] # for now only static is available - # self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics + self.dynamicType = ['static'] # for now only static is available self.name = 'Representativity' self.stat = [None, None] self.featureDataObject = None self.targetDataObject = None - self.senPrefix = 'nsen' + self.senPrefix = 'sen' def getBasicStat(self): """ @@ -193,64 +192,45 @@ def _evaluate(self, datasets, **kwargs): @ In, kwargs, dict, keyword arguments @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} """ - self._addRefValues(datasets[0], self.featureParameters, self.features) - self._addRefValues(datasets[1], self.targetParameters, self.targets) + # # ## Analysis: + # # 1. Compute mean and variance: + # For mock model + self._computeMoments(datasets[0], self.featureParameters, self.features) + # For target model + self._computeMoments(datasets[1], self.targetParameters, self.targets) + # # 2. Propagate error from parameters to experiment and target outputs. + # For mock model self._computeErrors(datasets[0],self.featureParameters, self.features) + # For target model self._computeErrors(datasets[1],self.targetParameters, self.targets) - self._addRefValues(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters],['err_' + s2.split("|")[-1] for s2 in self.features]) - self._addRefValues(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targets]) + # # 3. Compute mean and variance in the error space: + self._computeMoments(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters],['err_' + s2.split("|")[-1] for s2 in self.features]) + self._computeMoments(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targets]) + # # 4. Compute Uncertainties in parameters UParVar = self._computeUncertaintyinParametersErrorMatrix(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters]) + # # 5. Compute Uncertainties in outputs + # Outputs of Mock model (Measurables F_i) UMeasurablesVar = self._computeUncertaintyinParametersErrorMatrix(datasets[0],['err_' + s.split("|")[-1] for s in self.features]) + # Outputs of Target model (Targets FOM_i) UFOMsVar = self._computeUncertaintyinParametersErrorMatrix(datasets[1],['err_' + s.split("|")[-1] for s in self.targets]) - + # # 6. Compute Normalized Uncertainties + # In mock experiment outputs (measurables) sens = self.stat[self.featureDataObject[-1]].run({"Data":[[None, None, datasets[self.featureDataObject[-1]]]]}) senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens, datasets) - + # In target outputs (FOMs) sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens, datasets) + # # 7. Compute representativities r,r_exact = self._calculateBiasFactor(senMeasurables, senFOMs, UParVar, UMeasurablesVar) - - - # names = kwargs.get('dataobjectNames') + # # 8. Create outputs outs = {} for i,targ in enumerate(self.targets): for j,feat in enumerate(self.features): - # featData = self._getDataFromDatasets(datasets, feat, names) - # targData = self._getDataFromDatasets(datasets, targ, names) - # parameters = self._getDataFromDatasets(datasets, param, names) - # targetParameters = self._getDataFromDatasets(datasets, targParam, names) - # covParameters = senFOMs @ senMeasurables.T name1 = "BiasFactor_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1]) name2 = "ExactBiasFactor_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1]) outs[name1] = r_exact[i,j] outs[name2] = r[i,j] - # for metric in self.metrics: - # name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) - # outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) - - - - # # ## Analysis: - # # 1. Identify Models: Mock experiment, and Target model. - # # 2. Generate Data - # samples = datasets[0] - # # data = self._getDataFromDatasets(featu,) - # # 3. Propagate error from parameters to experiment and target outputs. - # _, _, mes_samples = self._propagateErrors(samples)#par, par_var,Exp_A,Exp_b,samples - # # Avg = - - - # Fsim_check, CFsim,_ = propagateErrors(par, par_var, Exp_A, Exp_b,samples) - # _,_,FOM_samples = Propagate_errors(par, par_var,Tar_A,Tar_b,samples) - # Upar, Upar_var = Transform_to_error_space_stoch(par, par_var, par, samples) - # Umes, Umes_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) - # # Umes, Umes_var = Transform_to_error_space_stoch(Fmes, CFmes, Fmes, mes_samples) - # UF, UF_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) - # # 4. Normalize the data (transform to relative errors) - # expNormalizedSen = normalizeSensetivities(par, F, G) - # mesParametersNormalizedSen = normalizeSensetivities(par, F, G) - # nSF = normalizeSensetivities(par, F, G) - # nSFOM = normalizeSensetivities(par, FOM, G) + ## TODO: # # 5. Compute correction in parameters # par_tilde, par_var_tilde = Parameter_correction_theory(par, Upar, Upar_var, Umes, Umes_var, expNormalizedSen) # pm_tilde, Cpm_tilde = Parameter_correction_theory(par, Upar, Upar_var,UF, UF_var,mesParametersNormalizedSen) @@ -318,7 +298,7 @@ def _getDataFromDatasets(self, datasets, var, names=None): data = dat, pw return data - def _addRefValues(self, datasets, features, targets): + def _computeMoments(self, datasets, features, targets): for var in [x.split("|")[-1] for x in features + targets]: #datasets.data_vars datasets[var].attrs['meanValue'] = np.mean(datasets[var].values) for var2 in [x.split("|")[-1] for x in features + targets]: diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index 0cb35ef46e..8a4f37fe37 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -96,6 +96,10 @@ InputPlaceHolder + + BiasFactor_F1_F1,BiasFactor_F1_F2,BiasFactor_F1_F3,BiasFactor_F2_F1,BiasFactor_F2_F2,BiasFactor_F2_F3,BiasFactor_F3_F1,BiasFactor_F3_F2,BiasFactor_F3_F3, + ExactBiasFactor_F1_F1,ExactBiasFactor_F1_F2,ExactBiasFactor_F1_F3,ExactBiasFactor_F2_F1,ExactBiasFactor_F2_F2,ExactBiasFactor_F2_F3,ExactBiasFactor_F3_F1,ExactBiasFactor_F3_F2,ExactBiasFactor_F3_F3 + From cdf78114e959461ee6fc8cf7b6c34f2a11129cdb Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 30 May 2022 19:04:01 -0600 Subject: [PATCH 36/95] adding pysensors to the dependencies --- dependencies.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/dependencies.xml b/dependencies.xml index 423dcd7466..f2dab84b0c 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -66,6 +66,7 @@ Note all install methods after "main" take 1.1 + From 2e7670af9bc1b9b0fa9e1325e6ddf50491eca64a Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 7 Jun 2022 12:26:24 -0600 Subject: [PATCH 37/95] Adding reduced covariance (corrected Uncertainty) --- .../Validations/Representativity.py | 59 ++++++++----------- ...lidation_gate_representativityLinModel.xml | 6 +- 2 files changed, 26 insertions(+), 39 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index f3778597f0..6133ac8ce8 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -221,15 +221,27 @@ def _evaluate(self, datasets, **kwargs): sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens, datasets) # # 7. Compute representativities - r,r_exact = self._calculateBiasFactor(senMeasurables, senFOMs, UParVar, UMeasurablesVar) - # # 8. Create outputs + r,rExact = self._calculateBiasFactor(senMeasurables, senFOMs, UParVar, UMeasurablesVar) + # # 8. Compute corrected Uncertainties + UtarVarTilde = self._calculateCovofTargetErrorsfromBiasFactor(senFOMs,UParVar,r) + UtarVarTildeExact = self._calculateCovofTargetErrorsfromBiasFactor(senFOMs,UParVar,rExact) + # # 9. Create outputs outs = {} for i,targ in enumerate(self.targets): for j,feat in enumerate(self.features): - name1 = "BiasFactor_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1]) - name2 = "ExactBiasFactor_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1]) - outs[name1] = r_exact[i,j] - outs[name2] = r[i,j] + name1 = "BiasFactor_Mock{}_Tar{}".format(feat.split("|")[-1], targ.split("|")[-1]) + name2 = "ExactBiasFactor_Mock{}_Tar{}".format(feat.split("|")[-1], targ.split("|")[-1]) + outs[name1] = r[i,j] + outs[name2] = rExact[i,j] + for k,tar in enumerate(self.targets): + if k == i: + name3 = "CorrectedVar_Tar{}".format(tar.split("|")[-1]) + name4 = "ExactCorrectedVar_Tar{}".format(tar.split("|")[-1]) + else: + name3 = "CorrectedCov_Tar{}_Tar{}".format(targ.split("|")[-1], tar.split("|")[-1]) + name4 = "ExactCorrectedCov_Tar{}_Tar{}".format(targ.split("|")[-1], tar.split("|")[-1]) + outs[name3] = UtarVarTilde[i,k] + outs[name4] = UtarVarTildeExact[i,k] ## TODO: # # 5. Compute correction in parameters # par_tilde, par_var_tilde = Parameter_correction_theory(par, Upar, Upar_var, Umes, Umes_var, expNormalizedSen) @@ -346,33 +358,8 @@ def _calculateBiasFactor(self, normalizedSenExp, normalizedSenTar, UparVar, Umes rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar))).real return r, rExact -def run2(self): - # ## Analysis: - # 1. Identify Models: Mock experiment, and Target model. - # 2. Generate Data - samples = genData(par, par_var, nSamples) - # 3. Propagate error from parameters to experiment and target outputs. - _, _, mes_samples = Propagate_errors(par, par_var,Exp_A,Exp_b,samples) - Fsim_check, CFsim,_ = Propagate_errors(par, par_var, Exp_A, Exp_b,samples) - _,_,FOM_samples = Propagate_errors(par, par_var,Tar_A,Tar_b,samples) - Upar, Upar_var = Transform_to_error_space_stoch(par, par_var, par, samples) - Umes, Umes_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) - # Umes, Umes_var = Transform_to_error_space_stoch(Fmes, CFmes, Fmes, mes_samples) - UF, UF_var = Transform_to_error_space_stoch(F, F_var, F, mes_samples) - # 4. Normalize the data (transform to relative errors) - expNormalizedSen = normalizeSensetivities(par, F, G) - mesParametersNormalizedSen = normalizeSensetivities(par, F, G) - nSF = normalizeSensetivities(par, F, G) - nSFOM = normalizeSensetivities(par, FOM, G) - # 5. Compute correction in parameters - par_tilde, par_var_tilde = Parameter_correction_theory(par, Upar, Upar_var, Umes, Umes_var, expNormalizedSen) - pm_tilde, Cpm_tilde = Parameter_correction_theory(par, Upar, Upar_var,UF, UF_var,mesParametersNormalizedSen) - # 6. Compute correction in targets - FOMsim_tilde_theory, FOMsim_var_tilde_theory, UFOMsim_var_tilde_theory, Umes_var, UFOM_var_tilde_no_Umes_var, Inner1 = Target_correction_theory(par, FOM, Upar, Upar_var, Umes, Umes_var, mesParametersNormalizedSen, expNormalizedSen) - # 7. Computer representativity factor - r,r_exact, UFOMsim_var_tilde_rep,UFOMsim_var_tilde_rep_exact = Representativity(par, Upar, Upar_var, F, nSF, nSFOM, Umes_var) - print('==== Representativity ====') - print('r') - print(r) - print('UFOMsim_var_tilde_rep') - print(UFOMsim_var_tilde_rep) \ No newline at end of file + def _calculateCovofTargetErrorsfromBiasFactor(self, normalizedSenTar, UparVar, r): + # re-compute Utar_var_tilde from r (#eq 80) + chol = sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T).real + UtarVarTilde = chol @ (np.eye(np.shape(r)[0]) - r @ r.T) @ chol + return UtarVarTilde \ No newline at end of file diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index 8a4f37fe37..2b8fece95f 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -53,7 +53,7 @@ - 1000 + 100 dist1 @@ -97,8 +97,8 @@ InputPlaceHolder - BiasFactor_F1_F1,BiasFactor_F1_F2,BiasFactor_F1_F3,BiasFactor_F2_F1,BiasFactor_F2_F2,BiasFactor_F2_F3,BiasFactor_F3_F1,BiasFactor_F3_F2,BiasFactor_F3_F3, - ExactBiasFactor_F1_F1,ExactBiasFactor_F1_F2,ExactBiasFactor_F1_F3,ExactBiasFactor_F2_F1,ExactBiasFactor_F2_F2,ExactBiasFactor_F2_F3,ExactBiasFactor_F3_F1,ExactBiasFactor_F3_F2,ExactBiasFactor_F3_F3 + BiasFactor_MockF1_TarF1,BiasFactor_MockF1_TarF2,BiasFactor_MockF1_TarF3,BiasFactor_MockF2_TarF1,BiasFactor_MockF2_TarF2,BiasFactor_MockF2_TarF3,BiasFactor_MockF3_TarF1,BiasFactor_MockF3_TarF2,BiasFactor_MockF3_TarF3, + ExactBiasFactor_MockF1_TarF1,ExactBiasFactor_MockF1_TarF2,ExactBiasFactor_MockF1_TarF3,ExactBiasFactor_MockF2_TarF1,ExactBiasFactor_MockF2_TarF2,ExactBiasFactor_MockF2_TarF3,ExactBiasFactor_MockF3_TarF1,ExactBiasFactor_MockF3_TarF2,ExactBiasFactor_MockF3_TarF3 From f5696b5a6c082a96ff83b21a8c76186dbfa2afb8 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 8 Jun 2022 14:15:43 -0600 Subject: [PATCH 38/95] reverting dependencies.xml --- dependencies.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/dependencies.xml b/dependencies.xml index f2dab84b0c..423dcd7466 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -66,7 +66,6 @@ Note all install methods after "main" take 1.1 - From 49640659b5a37407ffd90235aca6e66705f2daf1 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 13 Jun 2022 15:14:12 -0600 Subject: [PATCH 39/95] reporting more out puts --- .../Validations/Representativity.py | 134 ++++++++++++++---- ...lidation_gate_representativityLinModel.xml | 47 +++++- 2 files changed, 154 insertions(+), 27 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 6133ac8ce8..f7f67b2555 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -38,6 +38,12 @@ class Representativity(ValidationBase): """ Representativity is a base class for validation problems It represents the base class for most validation problems + + @ Authors: Mohammad abdo (@Jimmy-INL) + Congjian Wang (@) + Andrea Alfonsi (@) + Aaron Epiney (@) + """ @classmethod @@ -196,23 +202,33 @@ def _evaluate(self, datasets, **kwargs): # # 1. Compute mean and variance: # For mock model self._computeMoments(datasets[0], self.featureParameters, self.features) + measurableNames = [s.split("|")[-1] for s in self.features] + measurables = [datasets[0][var].meanValue for var in measurableNames] # For target model self._computeMoments(datasets[1], self.targetParameters, self.targets) + FOMNames = [s.split("|")[-1] for s in self.targets] + FOMs = np.atleast_2d([datasets[1][var].meanValue for var in FOMNames]).reshape(-1,1) # # 2. Propagate error from parameters to experiment and target outputs. # For mock model self._computeErrors(datasets[0],self.featureParameters, self.features) + measurableErrorNames = ['err_' + s.split("|")[-1] for s in self.features] + FOMErrorNames = ['err_' + s.split("|")[-1] for s in self.targets] + self._computeMoments(datasets[0], measurableErrorNames, measurableErrorNames) + UMeasurables = np.atleast_2d([datasets[0][var].meanValue for var in measurableErrorNames]).reshape(-1,1) # For target model self._computeErrors(datasets[1],self.targetParameters, self.targets) + self._computeMoments(datasets[1], FOMErrorNames, FOMErrorNames) + UFOMs = np.atleast_2d([datasets[1][var].meanValue for var in FOMErrorNames]).reshape(-1,1) # # 3. Compute mean and variance in the error space: self._computeMoments(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters],['err_' + s2.split("|")[-1] for s2 in self.features]) self._computeMoments(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targets]) # # 4. Compute Uncertainties in parameters - UParVar = self._computeUncertaintyinParametersErrorMatrix(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters]) + UparVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters]) # # 5. Compute Uncertainties in outputs # Outputs of Mock model (Measurables F_i) - UMeasurablesVar = self._computeUncertaintyinParametersErrorMatrix(datasets[0],['err_' + s.split("|")[-1] for s in self.features]) + UMeasurablesVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.features]) # Outputs of Target model (Targets FOM_i) - UFOMsVar = self._computeUncertaintyinParametersErrorMatrix(datasets[1],['err_' + s.split("|")[-1] for s in self.targets]) + UFOMsVar = self._computeUncertaintyMatrixInErrors(datasets[1],['err_' + s.split("|")[-1] for s in self.targets]) # # 6. Compute Normalized Uncertainties # In mock experiment outputs (measurables) sens = self.stat[self.featureDataObject[-1]].run({"Data":[[None, None, datasets[self.featureDataObject[-1]]]]}) @@ -221,13 +237,33 @@ def _evaluate(self, datasets, **kwargs): sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens, datasets) # # 7. Compute representativities - r,rExact = self._calculateBiasFactor(senMeasurables, senFOMs, UParVar, UMeasurablesVar) + r,rExact = self._calculateBiasFactor(senMeasurables, senFOMs, UparVar, UMeasurablesVar) # # 8. Compute corrected Uncertainties - UtarVarTilde = self._calculateCovofTargetErrorsfromBiasFactor(senFOMs,UParVar,r) - UtarVarTildeExact = self._calculateCovofTargetErrorsfromBiasFactor(senFOMs,UParVar,rExact) + UtarVarTilde = self._calculateCovofTargetErrorsfromBiasFactor(senFOMs,UparVar,r) + UtarVarTildeExact = self._calculateCovofTargetErrorsfromBiasFactor(senFOMs,UparVar,rExact) + # # 9 Compute Corrected Targets, + # for var in self.targets: + # self._getDataFromDatasets(datasets, var, names=None) + parametersNames = [s.split("|")[-1] for s in self.featureParameters] + par = np.atleast_2d([datasets[0][var].meanValue for var in parametersNames]).reshape(-1,1) + correctedTargets, correctedTargetCovariance, correctedTargetErrorCov, UtarVarTilde_no_Umes_var, Inner1 = self._targetCorrection(FOMs, UparVar, UMeasurables, UMeasurablesVar, senFOMs, senMeasurables) + correctedParameters, correctedParametersCovariance = self._parameterCorrection(par, UparVar, UMeasurables, UMeasurablesVar, senMeasurables) # # 9. Create outputs outs = {} + for i,param in enumerate(self.featureParameters): + name4 = "CorrectedParameters_{}".format(param.split("|")[-1]) + outs[name4] = correctedParameters[i] + for j, param2 in enumerate(self.featureParameters): + if param == param2: + name5 = "VarianceInCorrectedParameters_{}".format(param.split("|")[-1]) + outs[name5] = correctedParametersCovariance[i,i] + else: + name6 = "CovarianceInCorrectedParameters_{}_{}".format(param.split("|")[-1],param2.split("|")[-1]) + outs[name6] = correctedParametersCovariance[i,j] + for i,targ in enumerate(self.targets): + name3 = "CorrectedTargets_{}".format(targ.split("|")[-1]) + outs[name3] = correctedTargets[i] for j,feat in enumerate(self.features): name1 = "BiasFactor_Mock{}_Tar{}".format(feat.split("|")[-1], targ.split("|")[-1]) name2 = "ExactBiasFactor_Mock{}_Tar{}".format(feat.split("|")[-1], targ.split("|")[-1]) @@ -278,6 +314,22 @@ def _generateSensitivityMatrix(self, outputs, inputs, sensDict, datasets, normal sensMatr[i, j] = sensDict[senName][0]* datasets[0][inpVar].meanValue / datasets[0][outVar].meanValue return sensMatr + # def _generateMatrixFromDataset(self,dataset,rows,cols): + # """ + # Reconstruct sensitivity matrix from the Basic Statistic calculation + # @ In, rows, list, list of rows names + # @ In, cols, list, list of colums names + # @ In, + # @ Out, matr, numpy.array, 2-D array of the reconstructed matrix + # """ + # matr = np.zeros((len(rows), len(cols))) + # inputVars = [x.split("|")[-1] for x in rows] + # outputVars = [x.split("|")[-1] for x in cols] + # for i, outVar in enumerate(outputVars): + # for j, inpVar in enumerate(inputVars): + # matr[i, j] = dataset[senName] + # return matr + def _getDataFromDatasets(self, datasets, var, names=None): """ Utility function to retrieve the data from datasets @@ -323,24 +375,10 @@ def _computeMoments(self, datasets, features, targets): def _computeErrors(self,datasets,features,targets): for var in [x.split("|")[-1] for x in features + targets]: datasets['err_'+str(var)] = (datasets[var].values - datasets[var].attrs['meanValue'])/datasets[var].attrs['meanValue'] - # for var2 in [x.split("|")[-1] for x in features + targets]: - # datasets[var].attrs['err_cov_'+str(var)] = np.cov((datasets[var2].values - datasets[var2].attrs['meanValue'])/datasets[var2].attrs['meanValue'],(datasets[var2].values - datasets[var2].attrs['meanValue'])/datasets[var].attrs['meanValue']) - - # def _propagateErrors(self,data): - # # par = [data[var.split("|")[1]] for var in self.featureParameters] - # # par_var = xr.cov(par[0],par[1]) - # # Trans_samples = np.zeros((np.shape(data)[0],np.shape(A)[0])) - # for ind,samp in enumerate(data): - # Trans_samples[ind,:] = linModel(A,samp,b) - # Avg = np.average(Trans_samples, axis=0) - # C = np.cov(Trans_samples.T) - # return Avg, C, Trans_samples - def _computeUncertaintyinParametersErrorMatrix(self, data, parameters): + def _computeUncertaintyMatrixInErrors(self, data, parameters): uncertMatr = np.zeros((len(parameters), len(parameters))) - # inputVars = [x.split("|")[-1] for x in parameters] - # outputVars = [x.split("|")[-1] for x in outputs] for i, var1 in enumerate(parameters): for j, var2 in enumerate(parameters): if var1 == var2: @@ -349,9 +387,6 @@ def _computeUncertaintyinParametersErrorMatrix(self, data, parameters): uncertMatr[i, j] = data[var1].attrs['cov_'+var2][0,1] return uncertMatr - def _ParameterCorrectionTheory(par, Upar, UparVar, Umes, UmesVar, normalizedSen): - pass - def _calculateBiasFactor(self, normalizedSenExp, normalizedSenTar, UparVar, UmesVar=None): # Compute representativity (#eq 79) r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T))).real @@ -362,4 +397,53 @@ def _calculateCovofTargetErrorsfromBiasFactor(self, normalizedSenTar, UparVar, r # re-compute Utar_var_tilde from r (#eq 80) chol = sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T).real UtarVarTilde = chol @ (np.eye(np.shape(r)[0]) - r @ r.T) @ chol - return UtarVarTilde \ No newline at end of file + return UtarVarTilde + + def _parameterCorrection(self, par, UparVar, Umes, UmesVar, normalizedSen): #eq 48 and eq 67 + # Compute adjusted par #eq 48 + UparTilde = UparVar @ normalizedSen.T @ np.linalg.pinv(normalizedSen @ UparVar @ normalizedSen.T + UmesVar) @ Umes + + # back transform to parameters + parTilde = UparTilde * par + par + + # Compute adjusted par_var #eq 67 + UparVarTilde = UparVar - UparVar @ normalizedSen.T @ np.linalg.pinv(normalizedSen @ UparVar @ normalizedSen.T + UmesVar) @ normalizedSen @ UparVar + + # back transform the variance + UparVarTildeDiag = np.diagonal(UparVarTilde) + for ind,c in enumerate(UparVarTildeDiag): + if c<0: + UparVarTilde[ind,ind] = 0 + UparVarTildeDiag2 = np.sqrt(UparVarTildeDiag) + UparVarTildeDiag3 = UparVarTildeDiag2 * np.squeeze(par) + parVarTilde = np.square(UparVarTildeDiag3) + parVarTilde = np.diag(parVarTilde) + return parTilde, parVarTilde + + def _targetCorrection(self, FOMs, UparVar, Umes, UmesVar, normalizedSenTar, normalizedSenExp): + # Compute adjusted target #eq 71 + UtarTilde = normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenTar.T + UmesVar) @ Umes + # back transform to parameters + tarTilde = UtarTilde * FOMs + FOMs + + # Compute adjusted par_var #eq 74 + UtarVarTilde = normalizedSenTar @ UparVar @ normalizedSenTar.T - normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenTar.T + UmesVar) @ normalizedSenExp @ UparVar @ normalizedSenTar.T + + # back transform the variance + UtarVarTildeDiag = np.diagonal(UtarVarTilde) + for ind,c in enumerate(UtarVarTildeDiag): + if c<0: + UtarVarTilde[ind,ind] = 0 + UtarVarTildeDiag2 = np.sqrt(UtarVarTildeDiag) + UtarVarTildeDiag3 = UtarVarTildeDiag2 * np.squeeze(FOMs) + tarVarTilde = np.square(UtarVarTildeDiag3) + tarVarTilde = np.diag(tarVarTilde) + + # Compute adjusted par_var neglecting UmesVar (to compare to representativity) + # The representativity (#eq 79 negelcts UmesVar) + Inner1 = (normalizedSenExp @ UparVar) @ normalizedSenExp.T + UtarVarztilde_no_UmesVar = (normalizedSenTar @ UparVar @ normalizedSenTar.T)\ + - (normalizedSenTar @ UparVar @ normalizedSenExp.T)\ + @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenExp.T)\ + @ (normalizedSenExp @ UparVar @ normalizedSenTar.T) + return tarTilde, tarVarTilde, UtarVarTilde, UtarVarztilde_no_UmesVar, Inner1 \ No newline at end of file diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index 2b8fece95f..f267161e2b 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -97,8 +97,51 @@ InputPlaceHolder - BiasFactor_MockF1_TarF1,BiasFactor_MockF1_TarF2,BiasFactor_MockF1_TarF3,BiasFactor_MockF2_TarF1,BiasFactor_MockF2_TarF2,BiasFactor_MockF2_TarF3,BiasFactor_MockF3_TarF1,BiasFactor_MockF3_TarF2,BiasFactor_MockF3_TarF3, - ExactBiasFactor_MockF1_TarF1,ExactBiasFactor_MockF1_TarF2,ExactBiasFactor_MockF1_TarF3,ExactBiasFactor_MockF2_TarF1,ExactBiasFactor_MockF2_TarF2,ExactBiasFactor_MockF2_TarF3,ExactBiasFactor_MockF3_TarF1,ExactBiasFactor_MockF3_TarF2,ExactBiasFactor_MockF3_TarF3 + BiasFactor_MockF1_TarF1, + BiasFactor_MockF1_TarF2, + BiasFactor_MockF1_TarF3, + BiasFactor_MockF2_TarF1, + BiasFactor_MockF2_TarF2, + BiasFactor_MockF2_TarF3, + BiasFactor_MockF3_TarF1, + BiasFactor_MockF3_TarF2, + BiasFactor_MockF3_TarF3, + ExactBiasFactor_MockF1_TarF1, + ExactBiasFactor_MockF1_TarF2, + ExactBiasFactor_MockF1_TarF3, + ExactBiasFactor_MockF2_TarF1, + ExactBiasFactor_MockF2_TarF2, + ExactBiasFactor_MockF2_TarF3, + ExactBiasFactor_MockF3_TarF1, + ExactBiasFactor_MockF3_TarF2, + ExactBiasFactor_MockF3_TarF3, + CorrectedParameters_p1, + CorrectedParameters_p2, + CorrectedTargets_F1, + CorrectedTargets_F2, + CorrectedTargets_F3, + VarianceInCorrectedParameters_p1, + VarianceInCorrectedParameters_p2, + CovarianceInCorrectedParameters_p1_p2, + CovarianceInCorrectedParameters_p2_p1, + CorrectedVar_TarF1, + CorrectedVar_TarF2, + CorrectedVar_TarF3, + ExactCorrectedVar_TarF1, + ExactCorrectedVar_TarF2, + ExactCorrectedVar_TarF3, + CorrectedCov_TarF1_TarF2, + CorrectedCov_TarF2_TarF1, + CorrectedCov_TarF1_TarF3, + CorrectedCov_TarF3_TarF1, + CorrectedCov_TarF2_TarF3, + CorrectedCov_TarF3_TarF2, + ExactCorrectedCov_TarF1_TarF2, + ExactCorrectedCov_TarF2_TarF1, + ExactCorrectedCov_TarF1_TarF3, + ExactCorrectedCov_TarF3_TarF1, + ExactCorrectedCov_TarF2_TarF3, + ExactCorrectedCov_TarF3_TarF2 From 0b09fe7e1b51cb8d0138993a4feeb066ac0b617d Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 14 Jun 2022 11:07:06 -0600 Subject: [PATCH 40/95] documenting outs --- .../Validations/Representativity.py | 43 ++++++------------- 1 file changed, 14 insertions(+), 29 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index f7f67b2555..97c0f41ef5 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -248,7 +248,21 @@ def _evaluate(self, datasets, **kwargs): par = np.atleast_2d([datasets[0][var].meanValue for var in parametersNames]).reshape(-1,1) correctedTargets, correctedTargetCovariance, correctedTargetErrorCov, UtarVarTilde_no_Umes_var, Inner1 = self._targetCorrection(FOMs, UparVar, UMeasurables, UMeasurablesVar, senFOMs, senMeasurables) correctedParameters, correctedParametersCovariance = self._parameterCorrection(par, UparVar, UMeasurables, UMeasurablesVar, senMeasurables) + # # 9. Create outputs + """ + Assuming the number of parameters is P, + number of measurables in the mock/prototype experiment is M, + and the number of figure of merits (FOMS) is F, then the representativity outcomes to be reported are: + + BiasFactor: $R \in \mathbb{R}^{M \times F}$ reported element by element as BiasFactor_MockFi_TarFj + ExactBiasFactor: same as the bias factor but assuming measureables are also uncertain. + CorrectedParameters: best parameters to perform the measurements at parTilde \in \mathbb{R}^{P} + UncertaintyinCorrectedParameters: $parTildeVar \in \mathbb{R}^{P \times P}$ + CorrectedTargets: $TarTilde \in \mathbb{R}^{F}$ + UncertaintyinCorrectedTargets:$TarTildeVar \in \mathbb{R}^{F \times F}$ + ExactUncertaintyinCorrectedTargets:$TarTildeVar \in \mathbb{R}^{F \times F}$ + """ outs = {} for i,param in enumerate(self.featureParameters): name4 = "CorrectedParameters_{}".format(param.split("|")[-1]) @@ -278,19 +292,6 @@ def _evaluate(self, datasets, **kwargs): name4 = "ExactCorrectedCov_Tar{}_Tar{}".format(targ.split("|")[-1], tar.split("|")[-1]) outs[name3] = UtarVarTilde[i,k] outs[name4] = UtarVarTildeExact[i,k] - ## TODO: - # # 5. Compute correction in parameters - # par_tilde, par_var_tilde = Parameter_correction_theory(par, Upar, Upar_var, Umes, Umes_var, expNormalizedSen) - # pm_tilde, Cpm_tilde = Parameter_correction_theory(par, Upar, Upar_var,UF, UF_var,mesParametersNormalizedSen) - # # 6. Compute correction in targets - # FOMsim_tilde_theory, FOMsim_var_tilde_theory, UFOMsim_var_tilde_theory, Umes_var, UFOM_var_tilde_no_Umes_var, Inner1 = Target_correction_theory(par, FOM, Upar, Upar_var, Umes, Umes_var, mesParametersNormalizedSen, expNormalizedSen) - # # 7. Computer representativity factor - # r,r_exact, UFOMsim_var_tilde_rep,UFOMsim_var_tilde_rep_exact = Representativity(par, Upar, Upar_var, F, nSF, nSFOM, Umes_var) - # print('==== Representativity ====') - # print('r') - # print(r) - # print('UFOMsim_var_tilde_rep') - # print(UFOMsim_var_tilde_rep) return outs def _generateSensitivityMatrix(self, outputs, inputs, sensDict, datasets, normalize=True): @@ -314,22 +315,6 @@ def _generateSensitivityMatrix(self, outputs, inputs, sensDict, datasets, normal sensMatr[i, j] = sensDict[senName][0]* datasets[0][inpVar].meanValue / datasets[0][outVar].meanValue return sensMatr - # def _generateMatrixFromDataset(self,dataset,rows,cols): - # """ - # Reconstruct sensitivity matrix from the Basic Statistic calculation - # @ In, rows, list, list of rows names - # @ In, cols, list, list of colums names - # @ In, - # @ Out, matr, numpy.array, 2-D array of the reconstructed matrix - # """ - # matr = np.zeros((len(rows), len(cols))) - # inputVars = [x.split("|")[-1] for x in rows] - # outputVars = [x.split("|")[-1] for x in cols] - # for i, outVar in enumerate(outputVars): - # for j, inpVar in enumerate(inputVars): - # matr[i, j] = dataset[senName] - # return matr - def _getDataFromDatasets(self, datasets, var, names=None): """ Utility function to retrieve the data from datasets From 0f0ad010ee2d107937307f891bb1a58a4d6ab5ea Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 21 Jun 2022 17:45:25 -0600 Subject: [PATCH 41/95] adding two different models for experiment and target --- .../Validations/Representativity.py | 6 +- .../{linModel.py => expLinModel.py} | 18 ++- tests/framework/AnalyticModels/tarLinModel.py | 59 +++++++++ ...lidation_gate_representativityLinModel.xml | 121 +++++++++++------- 4 files changed, 151 insertions(+), 53 deletions(-) rename tests/framework/AnalyticModels/{linModel.py => expLinModel.py} (65%) create mode 100644 tests/framework/AnalyticModels/tarLinModel.py diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 97c0f41ef5..251691dd19 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -232,10 +232,10 @@ def _evaluate(self, datasets, **kwargs): # # 6. Compute Normalized Uncertainties # In mock experiment outputs (measurables) sens = self.stat[self.featureDataObject[-1]].run({"Data":[[None, None, datasets[self.featureDataObject[-1]]]]}) - senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens, datasets) + senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens, datasets[0]) # In target outputs (FOMs) sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) - senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens, datasets) + senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens, datasets[1]) # # 7. Compute representativities r,rExact = self._calculateBiasFactor(senMeasurables, senFOMs, UparVar, UMeasurablesVar) # # 8. Compute corrected Uncertainties @@ -312,7 +312,7 @@ def _generateSensitivityMatrix(self, outputs, inputs, sensDict, datasets, normal if not normalize: sensMatr[i, j] = sensDict[senName][0] else: - sensMatr[i, j] = sensDict[senName][0]* datasets[0][inpVar].meanValue / datasets[0][outVar].meanValue + sensMatr[i, j] = sensDict[senName][0]* datasets[inpVar].meanValue / datasets[outVar].meanValue return sensMatr def _getDataFromDatasets(self, datasets, var, names=None): diff --git a/tests/framework/AnalyticModels/linModel.py b/tests/framework/AnalyticModels/expLinModel.py similarity index 65% rename from tests/framework/AnalyticModels/linModel.py rename to tests/framework/AnalyticModels/expLinModel.py index 56cc6cc188..26b48ff3ff 100644 --- a/tests/framework/AnalyticModels/linModel.py +++ b/tests/framework/AnalyticModels/expLinModel.py @@ -21,8 +21,8 @@ import numpy as np ################## -A = np.array([[2, -3],[1,8],[-5, -5]]) -b = np.array([[0],[0],[0]]) +# A = np.array([[2, -3],[1,8],[-5, -5]]) +# b = np.array([[0],[0],[0]]) def run(self,Input): """ @@ -34,12 +34,24 @@ def run(self,Input): self.F1,self.F2,self.F3 = main(Input) def main(Input): - y = A @ np.array(list(Input.values())).reshape(-1,1) + b + # y = A @ np.array(list(Input.values())).reshape(-1,1) + b + m = len([key for key in Input.keys() if 'e' in key]) # number of experiments + n = len([par for par in Input.keys() if 'p' in par]) # number of parameters + A = np.array([Input['e1'],Input['e2'],Input['e3']]).reshape(-1,n) + b = Input['bE'].reshape(-1,1) + x = np.atleast_2d(np.array([Input['p1'],Input['p2']])).reshape(-1,1) + assert(np.shape(A)[1],np.shape(b)[0]) + assert(np.shape(A)[0],np.shape(b)[0],m) + y = A @ x + b return y[:] if __name__ == '__main__': Input = {} + Input['e1'] = [2,-3] + Input['e2'] = [1,8] + Input['e3'] = [-5, -5] + Input['bE'] = np.array([[0],[0],[0]]) Input['x1'] = 5.5 Input['x2'] = 8 a,b,c = main(Input) diff --git a/tests/framework/AnalyticModels/tarLinModel.py b/tests/framework/AnalyticModels/tarLinModel.py new file mode 100644 index 0000000000..f51f9ef381 --- /dev/null +++ b/tests/framework/AnalyticModels/tarLinModel.py @@ -0,0 +1,59 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#*************************************** +#* Simple analytic test ExternalModule * +#*************************************** +# +# Simulates a steady state linear model that maps $J-$parameters (i.e., $\mathbb{R}^J$) to k Responses +# +# External Modules +import numpy as np +################## +# Author: Mohammad Abdo (@Jimmy-INL) + +# A = np.array([[2, -3],[1,8],[-5, -5]]) +# b = np.array([[0],[0],[0]]) + +def run(self,Input): + """ + Method require by RAVEN to run this as an external model. + @ In, self, object, object to store members on + @ In, Input, dict, dictionary containing inputs from RAVEN + @ Out, None + """ + self.FOM1,self.FOM2,self.FOM3 = main(Input) + +def main(Input): + # y = A @ np.array(list(Input.values())).reshape(-1,1) + b + m = len([key for key in Input.keys() if 'o' in key]) # number of experiments + n = len([par for par in Input.keys() if 'p' in par]) # number of parameters + A = np.array([Input['o1'],Input['o2'],Input['o3']]).reshape(-1,n) + b = Input['bT'].reshape(-1,1) + x = np.atleast_2d(np.array([Input['p1'],Input['p2']])).reshape(-1,1) + assert(np.shape(A)[1],np.shape(b)[0]) + assert(np.shape(A)[0],np.shape(b)[0],m) + y = A @ x + b + return y[:] + + +if __name__ == '__main__': + Input = {} + Input['o1'] = [2,-3] + Input['o2'] = [1,8] + Input['o3'] = [-5, -5] + Input['bT'] = np.array([[0],[0],[0]]) + Input['p1'] = 5.5 + Input['p2'] = 8 + a,b,c = main(Input) + print(a,b,c) diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml index f267161e2b..cb38dfe134 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -2,7 +2,7 @@ Representativity - mcRun, PP1 + mcRunExp, mcRunTar, PP1 1 @@ -21,12 +21,15 @@ - - p1,p2,F1, F2, F3 + + p1, p2, e1, e2, e3, bE, F1, F2, F3 + + + p1, p2, o1, o2, o3, bT, FOM1, FOM2, FOM3 outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 - outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 + outputDataMC2|FOM1, outputDataMC2|FOM2, outputDataMC2|FOM3 outputDataMC1|p1,outputDataMC1|p2 outputDataMC2|p1,outputDataMC2|p2 @@ -51,7 +54,7 @@ - + 100 @@ -61,15 +64,39 @@ dist2 + 2,-3 + 1, 8 + -5,-5 + 0,0,0 + + + + 100 + + + dist1 + + + dist2 + + 2,-3 + 1, 8 + -5,-5 + 0,0,0 - + inputPlaceHolder2 linModel - MC_external + ExperimentMCSampler outputDataMC1 + + + inputPlaceHolder2 + tarModel + TargetMCSampler outputDataMC2 @@ -92,56 +119,56 @@ p1,p2 - F1, F2, F3 + FOM1, FOM2, FOM3 InputPlaceHolder - BiasFactor_MockF1_TarF1, - BiasFactor_MockF1_TarF2, - BiasFactor_MockF1_TarF3, - BiasFactor_MockF2_TarF1, - BiasFactor_MockF2_TarF2, - BiasFactor_MockF2_TarF3, - BiasFactor_MockF3_TarF1, - BiasFactor_MockF3_TarF2, - BiasFactor_MockF3_TarF3, - ExactBiasFactor_MockF1_TarF1, - ExactBiasFactor_MockF1_TarF2, - ExactBiasFactor_MockF1_TarF3, - ExactBiasFactor_MockF2_TarF1, - ExactBiasFactor_MockF2_TarF2, - ExactBiasFactor_MockF2_TarF3, - ExactBiasFactor_MockF3_TarF1, - ExactBiasFactor_MockF3_TarF2, - ExactBiasFactor_MockF3_TarF3, + BiasFactor_MockF1_TarFOM1, + BiasFactor_MockF1_TarFOM2, + BiasFactor_MockF1_TarFOM3, + BiasFactor_MockF2_TarFOM1, + BiasFactor_MockF2_TarFOM2, + BiasFactor_MockF2_TarFOM3, + BiasFactor_MockF3_TarFOM1, + BiasFactor_MockF3_TarFOM2, + BiasFactor_MockF3_TarFOM3, + ExactBiasFactor_MockF1_TarFOM1, + ExactBiasFactor_MockF1_TarFOM2, + ExactBiasFactor_MockF1_TarFOM3, + ExactBiasFactor_MockF2_TarFOM1, + ExactBiasFactor_MockF2_TarFOM2, + ExactBiasFactor_MockF2_TarFOM3, + ExactBiasFactor_MockF3_TarFOM1, + ExactBiasFactor_MockF3_TarFOM2, + ExactBiasFactor_MockF3_TarFOM3, CorrectedParameters_p1, CorrectedParameters_p2, - CorrectedTargets_F1, - CorrectedTargets_F2, - CorrectedTargets_F3, + CorrectedTargets_FOM1, + CorrectedTargets_FOM2, + CorrectedTargets_FOM3, VarianceInCorrectedParameters_p1, VarianceInCorrectedParameters_p2, CovarianceInCorrectedParameters_p1_p2, CovarianceInCorrectedParameters_p2_p1, - CorrectedVar_TarF1, - CorrectedVar_TarF2, - CorrectedVar_TarF3, - ExactCorrectedVar_TarF1, - ExactCorrectedVar_TarF2, - ExactCorrectedVar_TarF3, - CorrectedCov_TarF1_TarF2, - CorrectedCov_TarF2_TarF1, - CorrectedCov_TarF1_TarF3, - CorrectedCov_TarF3_TarF1, - CorrectedCov_TarF2_TarF3, - CorrectedCov_TarF3_TarF2, - ExactCorrectedCov_TarF1_TarF2, - ExactCorrectedCov_TarF2_TarF1, - ExactCorrectedCov_TarF1_TarF3, - ExactCorrectedCov_TarF3_TarF1, - ExactCorrectedCov_TarF2_TarF3, - ExactCorrectedCov_TarF3_TarF2 + CorrectedVar_TarFOM1, + CorrectedVar_TarFOM2, + CorrectedVar_TarFOM3, + ExactCorrectedVar_TarFOM1, + ExactCorrectedVar_TarFOM2, + ExactCorrectedVar_TarFOM3, + CorrectedCov_TarFOM1_TarFOM2, + CorrectedCov_TarFOM2_TarFOM1, + CorrectedCov_TarFOM1_TarFOM3, + CorrectedCov_TarFOM3_TarFOM1, + CorrectedCov_TarFOM2_TarFOM3, + CorrectedCov_TarFOM3_TarFOM2, + ExactCorrectedCov_TarFOM1_TarFOM2, + ExactCorrectedCov_TarFOM2_TarFOM1, + ExactCorrectedCov_TarFOM1_TarFOM3, + ExactCorrectedCov_TarFOM3_TarFOM1, + ExactCorrectedCov_TarFOM2_TarFOM3, + ExactCorrectedCov_TarFOM3_TarFOM2 From 570036bb2d26e60ad819ff138dec0dfade49ad29 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 30 Aug 2022 21:35:11 -0600 Subject: [PATCH 42/95] updating scipy --- dependencies.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dependencies.xml b/dependencies.xml index 423dcd7466..a9308cdd90 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -37,7 +37,7 @@ Note all install methods after "main" take
2.10 1.18 - 1.2 + 1.5 0.24 1.1 0.16 From 52d3dedc7e78154a8daef5a4ebdaa85110a6b5e6 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 1 Sep 2022 13:47:57 -0600 Subject: [PATCH 43/95] deleting representativity Lin test --- dependencies.xml | 4 +- ...lidation_gate_representativityLinModel.xml | 183 ------------------ 2 files changed, 2 insertions(+), 185 deletions(-) delete mode 100644 tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml diff --git a/dependencies.xml b/dependencies.xml index a9308cdd90..cf4623ca42 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -35,7 +35,7 @@ Note all install methods after "main" take -->
- 2.10 + 3.6 1.18 1.5 0.24 @@ -59,7 +59,7 @@ Note all install methods after "main" take - 1.9 + 1.12 2.9 diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml deleted file mode 100644 index cb38dfe134..0000000000 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml +++ /dev/null @@ -1,183 +0,0 @@ - - - - Representativity - mcRunExp, mcRunTar, PP1 - 1 - - - - framework/PostProcessors/Validation/test_validation_gate_representativity - Mohammad Abdo (@Jimmy-INL) - 2021-04-29 - PostProcessors.Validation - - This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. - This test a linear model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. - - - Added Modification for new PP API - - - - - - p1, p2, e1, e2, e3, bE, F1, F2, F3 - - - p1, p2, o1, o2, o3, bT, FOM1, FOM2, FOM3 - - - outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 - outputDataMC2|FOM1, outputDataMC2|FOM2, outputDataMC2|FOM3 - - outputDataMC1|p1,outputDataMC1|p2 - outputDataMC2|p1,outputDataMC2|p2 - outputDataMC1|time - outputDataMC2|time - - - - - - - - 5.5 - 0.55 - - - 8 - 0.8 - - - - - - - 100 - - - dist1 - - - dist2 - - 2,-3 - 1, 8 - -5,-5 - 0,0,0 - - - - 100 - - - dist1 - - - dist2 - - 2,-3 - 1, 8 - -5,-5 - 0,0,0 - - - - - - inputPlaceHolder2 - linModel - ExperimentMCSampler - outputDataMC1 - - - inputPlaceHolder2 - tarModel - TargetMCSampler - outputDataMC2 - - - outputDataMC1 - outputDataMC2 - pp1 - pp1_metric - pp1_metric_dump - - - - - - p1,p2 - OutputPlaceHolder - - - p1,p2 - F1, F2, F3 - - - p1,p2 - FOM1, FOM2, FOM3 - - - InputPlaceHolder - - BiasFactor_MockF1_TarFOM1, - BiasFactor_MockF1_TarFOM2, - BiasFactor_MockF1_TarFOM3, - BiasFactor_MockF2_TarFOM1, - BiasFactor_MockF2_TarFOM2, - BiasFactor_MockF2_TarFOM3, - BiasFactor_MockF3_TarFOM1, - BiasFactor_MockF3_TarFOM2, - BiasFactor_MockF3_TarFOM3, - ExactBiasFactor_MockF1_TarFOM1, - ExactBiasFactor_MockF1_TarFOM2, - ExactBiasFactor_MockF1_TarFOM3, - ExactBiasFactor_MockF2_TarFOM1, - ExactBiasFactor_MockF2_TarFOM2, - ExactBiasFactor_MockF2_TarFOM3, - ExactBiasFactor_MockF3_TarFOM1, - ExactBiasFactor_MockF3_TarFOM2, - ExactBiasFactor_MockF3_TarFOM3, - CorrectedParameters_p1, - CorrectedParameters_p2, - CorrectedTargets_FOM1, - CorrectedTargets_FOM2, - CorrectedTargets_FOM3, - VarianceInCorrectedParameters_p1, - VarianceInCorrectedParameters_p2, - CovarianceInCorrectedParameters_p1_p2, - CovarianceInCorrectedParameters_p2_p1, - CorrectedVar_TarFOM1, - CorrectedVar_TarFOM2, - CorrectedVar_TarFOM3, - ExactCorrectedVar_TarFOM1, - ExactCorrectedVar_TarFOM2, - ExactCorrectedVar_TarFOM3, - CorrectedCov_TarFOM1_TarFOM2, - CorrectedCov_TarFOM2_TarFOM1, - CorrectedCov_TarFOM1_TarFOM3, - CorrectedCov_TarFOM3_TarFOM1, - CorrectedCov_TarFOM2_TarFOM3, - CorrectedCov_TarFOM3_TarFOM2, - ExactCorrectedCov_TarFOM1_TarFOM2, - ExactCorrectedCov_TarFOM2_TarFOM1, - ExactCorrectedCov_TarFOM1_TarFOM3, - ExactCorrectedCov_TarFOM3_TarFOM1, - ExactCorrectedCov_TarFOM2_TarFOM3, - ExactCorrectedCov_TarFOM3_TarFOM2 - - - - - - - csv - pp1_metric - - - - From dc19f37a0aec84ebea71951422b771b2e2041988 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 7 Sep 2022 11:40:46 -0600 Subject: [PATCH 44/95] Fixing an issue in BS that affected the test /tests/framework/user_guide/raven_tutorial/PostProcess.xml --- ravenframework/Models/PostProcessors/BasicStatistics.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ravenframework/Models/PostProcessors/BasicStatistics.py b/ravenframework/Models/PostProcessors/BasicStatistics.py index b08a0f556e..c8466ee767 100644 --- a/ravenframework/Models/PostProcessors/BasicStatistics.py +++ b/ravenframework/Models/PostProcessors/BasicStatistics.py @@ -224,9 +224,6 @@ def initialize(self, runInfo, inputs, initDict): """ if len(inputs)>1: self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one DataObject') - if self.pivotParameter is not None: - if not inputs[-1].checkIndexAlignment(indexesToCheck=self.pivotParameter): - self.raiseAnError(IOError, "The data provided by the input data object is not synchronized!") self.inputDataObjectName = inputs[-1].name #construct a list of all the parameters that have requested values into self.allUsedParams self.allUsedParams = set() @@ -245,6 +242,8 @@ def initialize(self, runInfo, inputs, initDict): inputObj = inputs[-1] if type(inputs) == list else inputs if inputObj.type == 'HistorySet': self.dynamic = True + if not inputObj.checkIndexAlignment(indexesToCheck=self.pivotParameter): + self.raiseAnError(IOError, "The data provided by the input data object is not synchronized!") inputMetaKeys = [] outputMetaKeys = [] for metric, infos in self.toDo.items(): From df439856a47877b17cd066e0f180d64834ad93e1 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 7 Sep 2022 11:50:28 -0600 Subject: [PATCH 45/95] adding the old dependencies --- dependencies.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dependencies.xml b/dependencies.xml index cf4623ca42..3d339e082e 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -45,7 +45,7 @@ Note all install methods after "main" take 3.2 1.6 - 2.0 + 2.7 3 From 34a0c0562dc32d6923da1f1540797920dba83731 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 7 Sep 2022 11:53:34 -0600 Subject: [PATCH 46/95] updating dependencies --- dependencies.xml | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/dependencies.xml b/dependencies.xml index 3d339e082e..a31a154149 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -35,7 +35,7 @@ Note all install methods after "main" take -->
- 3.6 + 1.18 1.5 0.24 @@ -43,9 +43,10 @@ Note all install methods after "main" take 0.16 1.5 3.2 - + 0.12 1.6 - 2.7 + 2.3 + 2.7 3 @@ -59,13 +60,22 @@ Note all install methods after "main" take - 1.12 + 1.12 + + 2.9 1.1 + 0.9.39 + 6.4 + + + + + 3
@@ -76,4 +86,12 @@ Note all install methods after "main" take remove remove + + remove + remove + remove + remove + remove + remove +
From d0afbdf1e3c923157a3c178a3d2f494402debd07 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 29 Sep 2022 15:30:35 -0600 Subject: [PATCH 47/95] addressing some of Congjian's comments about the manual, and dockstrings --- doc/user_manual/PostProcessors/Validation.tex | 20 +----- .../Validations/Representativity.py | 67 ++++++++++++++++++- 2 files changed, 67 insertions(+), 20 deletions(-) diff --git a/doc/user_manual/PostProcessors/Validation.tex b/doc/user_manual/PostProcessors/Validation.tex index 49f6db313d..0a881d0e74 100644 --- a/doc/user_manual/PostProcessors/Validation.tex +++ b/doc/user_manual/PostProcessors/Validation.tex @@ -9,7 +9,7 @@ \subsubsection{Validation PostProcessors} \begin{itemize} \item \textbf{Probabilistic}, using probabilistic method for validation, can be used for both static and time-dependent problems. \item \textbf{PPDSS}, using dynamic system scaling method for validation, can only be used for time-dependent problems. - % \item \textbf{Representativity} + \item \textbf{Representativity}, using represntativity (bias) factor for validation, currently, can be used for static data. % \item \textbf{PCM} \end{itemize} % @@ -173,23 +173,15 @@ \subsubsection{Validation PostProcessors} \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of targets. These are the Figures of merit (FOMs) in the target model against which the mock model is being validated. - \item \xmlNode{featureParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputrs to the mock model. + \item \xmlNode{featureParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputs to the mock model. \item \xmlNode{targetParameters}, \xmlDesc{comma separated string, required field}, contains a comma separated list of target parameters/inputs. - \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable of the moch model. Default is ``time''. + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable of the mock model. Default is ``time''. \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). \item \xmlNode{targetPivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable in the target model. Default is ``time''. \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). - - \item \xmlNode{Metric}, \xmlDesc{string, required field}, specifies the \textbf{Metric} name that is defined via - \textbf{Metrics} entity. In this xml-node, the following xml attributes need to be specified: - \begin{itemize} - \item \xmlAttr{class}, \xmlDesc{required string attribute}, the class of this metric (e.g. Metrics) - \item \xmlAttr{type}, \xmlDesc{required string attribute}, the sub-type of this Metric (e.g. SKL, Minkowski) - \end{itemize} - The choice of the available metrics depends on the specific validation algorithm that is chosen (see table \ref{tab:ValidationAlgorithms}) \end{itemize} @@ -226,18 +218,12 @@ \subsubsection{Validation PostProcessors} outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 - simIndex outputDataMC1|p1,outputDataMC1|p2 outputDataMC2|p1,outputDataMC2|p2 outputDataMC1|time - outputDataMC2|time ... ... - - - -... \end{lstlisting} diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 251691dd19..73348a5787 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -232,9 +232,11 @@ def _evaluate(self, datasets, **kwargs): # # 6. Compute Normalized Uncertainties # In mock experiment outputs (measurables) sens = self.stat[self.featureDataObject[-1]].run({"Data":[[None, None, datasets[self.featureDataObject[-1]]]]}) + # normalize sensitivities senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens, datasets[0]) # In target outputs (FOMs) sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) + # normalize sensitivities senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens, datasets[1]) # # 7. Compute representativities r,rExact = self._calculateBiasFactor(senMeasurables, senFOMs, UparVar, UMeasurablesVar) @@ -348,6 +350,13 @@ def _getDataFromDatasets(self, datasets, var, names=None): return data def _computeMoments(self, datasets, features, targets): + """ + A utility function to compute moments, mean value, variance and covariance + @ In, datasets, xarray datasets, datasets containing prototype (mock) data and target data + @ In, features, names of feature variables: measurables + @ In, targets, names of target variables: figures of merit (FOMs) + @ out, datasets, xarray datasets, datasets after adding moments + """ for var in [x.split("|")[-1] for x in features + targets]: #datasets.data_vars datasets[var].attrs['meanValue'] = np.mean(datasets[var].values) for var2 in [x.split("|")[-1] for x in features + targets]: @@ -358,11 +367,23 @@ def _computeMoments(self, datasets, features, targets): return datasets def _computeErrors(self,datasets,features,targets): + """ + A utility function to transform variables to the relative error of these variable + @ In, datasets, xarray datasets, datasets containing prototype (mock) data and target data + @ In, features, names of feature variables: measurables + @ In, targets, names of target variables: figures of merit (FOMs) + @ out, datasets, xarray datasets, datasets after computing errors in each variable + """ for var in [x.split("|")[-1] for x in features + targets]: datasets['err_'+str(var)] = (datasets[var].values - datasets[var].attrs['meanValue'])/datasets[var].attrs['meanValue'] def _computeUncertaintyMatrixInErrors(self, data, parameters): - + """ + A utility function to variance and covariance of variables in the error space + @ In, data, xarray dataset, data containing either prototype (mock) data or target data + @ In, parameters, names of parameters/inputs to each model + @ out, uncertMatr, np.array, The variance covariance matrix of errors + """ uncertMatr = np.zeros((len(parameters), len(parameters))) for i, var1 in enumerate(parameters): for j, var2 in enumerate(parameters): @@ -373,18 +394,44 @@ def _computeUncertaintyMatrixInErrors(self, data, parameters): return uncertMatr def _calculateBiasFactor(self, normalizedSenExp, normalizedSenTar, UparVar, UmesVar=None): + """ + A utility function to compute the bias factor (i.e., representativity factor) + @ In, normalizedSenExp, np.array, the normalized sensitivities of the mock/prototype measurables + @ In, normalizedSenTar, np.array, the normalized sensitivities of the target variables/Figures of merit (FOMs) with respect to the parameters + @ In, UparVar, np.array, variance covariance matrix of the parameters error + @ In, UmesVar, np.array, variance covariance matrix of the measurables error, default is None + @ Out, r, np.array, the representativity (bias factor) matrix neglecting uncertainties in measurables + @ Out, rExact, np.array, the representativity (bias factor) matrix considering uncertainties in measurables + """ # Compute representativity (#eq 79) r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T))).real rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar))).real return r, rExact def _calculateCovofTargetErrorsfromBiasFactor(self, normalizedSenTar, UparVar, r): + """ + A utility function to compute variance covariance matrix of the taget errors from the bias factors + @ In, normalizedSenTar, np.array, the normalized sensitivities of the targets + @ In, UparVar, np.array, the variance covariance matrix of the parameters in the error space + @ In, r, np.array, the bias factor matrix + @ Out, UtarVarTilde, np.array, the variance convariance matrix of error in the corrected targets + """ # re-compute Utar_var_tilde from r (#eq 80) chol = sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T).real UtarVarTilde = chol @ (np.eye(np.shape(r)[0]) - r @ r.T) @ chol return UtarVarTilde def _parameterCorrection(self, par, UparVar, Umes, UmesVar, normalizedSen): #eq 48 and eq 67 + """ + A utility function that computes the correction in parameters + @ In, par, np.array, the parameters (inputs) of the mock experiment + @ In, UparVar, np.array, variance covariance matrix of the parameters in the error space + @ In, Umes, np.array, the error in measurements + @ In, UmesVar, np.array, variance covariance matrix of the measurables in the error space + @ In, normalizedSen, np.array, the normalized sensitivity matrix + @ Out, parTilde, np.array, the corrected parameters + @ Out, parTildeVar, np.array, the variance covariance matrix of the corrected parameters (uncertainty in the corrected parameters) + """ # Compute adjusted par #eq 48 UparTilde = UparVar @ normalizedSen.T @ np.linalg.pinv(normalizedSen @ UparVar @ normalizedSen.T + UmesVar) @ Umes @@ -406,6 +453,20 @@ def _parameterCorrection(self, par, UparVar, Umes, UmesVar, normalizedSen): #eq return parTilde, parVarTilde def _targetCorrection(self, FOMs, UparVar, Umes, UmesVar, normalizedSenTar, normalizedSenExp): + """ + A utility function to compute corrections in targets based on the representativity analysis + @ In, FOMs, np.array, target out puts (Figures of merit) + @ In, UparVar, np.array, np.array, variance covariance matrix of the parameters in the error space + @ In, Umes, np.array, the error in measurements + @ In, UmesVar, np.array, variance covariance matrix of the measurables in the error space + @ In, normalizedSenTar, np.array, normalized sensitivities of the target outputs w.r.t. parameterts + @ In, normalizedSenExp, np.array, normalized sensitivities of the mock prototype/experiment outputs (measurements) w.r.t. parameterts + @ Out, tarTilde, np.array, corrected targets (FOMs) + @ Out, tarVarTilde, np.array, variance covariance matrix for the corrected targets + @ Out, UtarVarTilde, np.array, variance covariance matrix for the corrected targets in error space + @ Out, UtarVartilde_no_UmesVar, np.array, variance covariance matrix for the corrected targets in error space assuming no uncer + @ Out, propagetedExpUncert, np.array, propagated variance covariance matrix of experiments due to parameter uncertainties + """ # Compute adjusted target #eq 71 UtarTilde = normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenTar.T + UmesVar) @ Umes # back transform to parameters @@ -426,9 +487,9 @@ def _targetCorrection(self, FOMs, UparVar, Umes, UmesVar, normalizedSenTar, norm # Compute adjusted par_var neglecting UmesVar (to compare to representativity) # The representativity (#eq 79 negelcts UmesVar) - Inner1 = (normalizedSenExp @ UparVar) @ normalizedSenExp.T + propagetedExpUncert = (normalizedSenExp @ UparVar) @ normalizedSenExp.T UtarVarztilde_no_UmesVar = (normalizedSenTar @ UparVar @ normalizedSenTar.T)\ - (normalizedSenTar @ UparVar @ normalizedSenExp.T)\ @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenExp.T)\ @ (normalizedSenExp @ UparVar @ normalizedSenTar.T) - return tarTilde, tarVarTilde, UtarVarTilde, UtarVarztilde_no_UmesVar, Inner1 \ No newline at end of file + return tarTilde, tarVarTilde, UtarVarTilde, UtarVarztilde_no_UmesVar, propagetedExpUncert \ No newline at end of file From 3c7c56f7d823c91f1ae6de1c8df8d0884e44e1cd Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 4 Oct 2022 08:50:58 -0600 Subject: [PATCH 48/95] few changes to the manual --- doc/user_manual/PostProcessors/Validation.tex | 16 +++---- doc/user_manual/raven_user_manual.bib | 46 +++++++++++++++++++ 2 files changed, 54 insertions(+), 8 deletions(-) diff --git a/doc/user_manual/PostProcessors/Validation.tex b/doc/user_manual/PostProcessors/Validation.tex index 0a881d0e74..fd906cc802 100644 --- a/doc/user_manual/PostProcessors/Validation.tex +++ b/doc/user_manual/PostProcessors/Validation.tex @@ -23,7 +23,7 @@ \subsubsection{Validation PostProcessors} \hline \textbf{Validation Algorithm} & \textbf{DataObject} & \textbf{Available Metrics} \\ \hline Probabilistic & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet\end{tabular} & \begin{tabular}[c]{@{}c@{}}CDFAreaDifference\\ \\ PDFCommonArea\end{tabular} \\ \hline -Representativity & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet \\DataSet\end{tabular} & \begin{tabular}[c]{@{}c@{}}BiasFactor\end{tabular} \\ \hline +Representativity & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet \\DataSet\end{tabular} & \begin{tabular}[c]{@{}c@{}}\end{tabular} \\ \hline PPDSS & HistorySet & DSS \\ \hline \end{tabular} \end{table} @@ -156,13 +156,13 @@ \subsubsection{Validation PostProcessors} post-processor interface that acts as a gate for applying these validation algorithms (i.e., representativity, Physics-guided Convergence Mapping (PCM), and Dynamic System Scaling (DSS)). The post-processor is in charge of deploying a common infrastructure for the user of \textbf{Validation} problems. -The usage of this post-processor is three fold. one, to quantitatively assess if a mock/prototype model/experiment -form a good representation of a target model. Two, if a set of experiments can represent a target model and can -claim a full coverage of the design space and scenarios, and three, if the available set of experiments are not -enough to declare coverage what are the remaining experiments required in order to achieve full coverage and -increase the representativity/bias factor. The representativity theory was first founded in the -Neutronics community \ref{} then shortly after, was transformed to the thermal hydraulics \ref{}. -So far several algorithms are implemented within this post-processor: +%The usage of this post-processor is three fold. one, to quantitatively assess if a mock/prototype model/experiment +%form a good representation of a target model. Two, if a set of experiments can represent a target model and can +%claim a full coverage of the design space and scenarios, and three, if the available set of experiments are not +%enough to declare coverage what are the remaining experiments required in order to achieve full coverage and +%increase the representativity/bias factor. +The representativity theory was first founded in the +Neutronics community \cite{Gandini, palmiotti1, palmiotti2}, then lately, was transformed to the thermal hydraulics \cite{Epiney1, Epiney2}. So far, several algorithms are implemented within this post-processor: % \ppType{Representativity}{Representativity} % diff --git a/doc/user_manual/raven_user_manual.bib b/doc/user_manual/raven_user_manual.bib index 0766c78752..66b13c904f 100644 --- a/doc/user_manual/raven_user_manual.bib +++ b/doc/user_manual/raven_user_manual.bib @@ -112,3 +112,49 @@ @TechReport{RAVENtheoryManual year = {2016}, key = {INL/EXT-16-38178} } + +@book{Gandini, + title={Uncertainty analysis and experimental data transposition methods based on perturbation theory}, + author={Gandini, A}, + journal={Uncertainty Analysis}, + pages={217--258}, + year={1988}, + publisher={CRC Press, Boca Raton, Fla, USA} +} + +@article{palmiotti1, + title={A global approach to the physics validation of simulation codes for future nuclear systems}, + author={Palmiotti, Giuseppe and Salvatores, Massimo and Aliberti, Gerardo and Hiruta, Hikarui and McKnight, R and Oblozinsky, P and Yang, WS}, + journal={Annals of Nuclear Energy}, + volume={36}, + number={3}, + pages={355--361}, + year={2009}, + publisher={Elsevier} +} + +@article{palmiotti2, + title={The role of experiments and of sensitivity analysis in simulation validation strategies with emphasis on reactor physics}, + author={Palmiotti, Giuseppe and Salvatores, Massimo}, + journal={Annals of Nuclear Energy}, + volume={52}, + pages={10--21}, + year={2013}, + publisher={Elsevier} +} + +@article{Epiney1, + title={A Systematic Approach to Inform Experiment Design Through Modern Modeling and Simulation Methods}, + author={Epiney, A and Rabiti, C and Davis, C}, + year={2019} +} + +@inproceedings{Epiney2, + title={Representativity Analysis Applied to TREAT Water Loop LOCA Experiment Design}, + author={Epiney, Aaron S and Woolstenhulme, Nicolas}, + booktitle={International Conference on Nuclear Engineering}, + volume={83785}, + pages={V003T13A055}, + year={2020}, + organization={American Society of Mechanical Engineers} +} \ No newline at end of file From cdd6c51bb0c34952130a84fdf7d6b4b1cb36fb50 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 25 Jan 2023 09:07:32 -0700 Subject: [PATCH 49/95] updating dependencies --- dependencies.xml | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/dependencies.xml b/dependencies.xml index a31a154149..b29ebc7270 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -36,18 +36,26 @@ Note all install methods after "main" take
- 1.18 - 1.5 - 0.24 - 1.1 - 0.16 + 1.21 + 1.7 + 1.0 + 1.3 + + 0.19 1.5 - 3.2 - 0.12 - 1.6 - 2.3 - 2.7 - 3 + 3.3 + 0.13 + 2.2 + 2.9 + + 3.7 + 3 @@ -60,14 +68,15 @@ Note all install methods after "main" take - 1.12 + 2.1 + 1.13 - 2.9 + 2.22 1.1 - 0.9.39 + 0.9 6.4 From 573692f57a315f9ce32fbf806e73a3770cbcd4df Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 2 Feb 2023 14:14:34 -0700 Subject: [PATCH 50/95] adding tests and golded files --- .../pp1_metric_dump.csv | 2 ++ .../pp1_metric_dump.csv | 2 ++ .../pp1_metric_dump.csv | 2 ++ .../framework/PostProcessors/Validation/tests | 21 +++++++++++++++++++ 4 files changed, 27 insertions(+) create mode 100644 tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv create mode 100644 tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv create mode 100644 tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv new file mode 100644 index 0000000000..06916e151e --- /dev/null +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv @@ -0,0 +1,2 @@ +BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 +0.956931548906,0.158944501164,-0.119856086224,0.16791486128,0.37959479339,0.453551518937,-0.120864717061,0.460352644806,0.663418240664,0.677393800129,0.112080613376,-0.085428449283,0.118557800967,0.267118040533,0.319022128683,-0.0891847494166,0.326728461496,0.472040049741,5.50515445786,8.21264618362,-12.7004102917,68.6723913603,-66.9629170421,0.116530335,0.30564001965,0.0,0.0,-1.9388073124e-10,-5.5616475517e-11,-3.72335944868e-11,0.0194254248685,0.0038238410237,0.00210312019109,-9.30123543881e-11,-9.30123545459e-11,-5.77709779258e-11,-5.7770978011e-11,-4.255110962e-11,-4.25511095561e-11,0.00766855194786,0.00766855194786,0.00356093767321,0.00356093767321,0.00248057657059,0.00248057657059 diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv new file mode 100644 index 0000000000..6cf5fc8e7e --- /dev/null +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv @@ -0,0 +1,2 @@ +BiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM1,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM1 +0.999917799082,0.707120008205,5.50515445786,8.21264618362,-12.7004102917,0.213154131346,0.34725436153,0.0,0.0,6.38668105814e-06,0.0194240671795 diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv new file mode 100644 index 0000000000..2b17d95a32 --- /dev/null +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv @@ -0,0 +1,2 @@ +BiasFactor_MockF1_TarFOM1 +0.698423539624 diff --git a/tests/framework/PostProcessors/Validation/tests b/tests/framework/PostProcessors/Validation/tests index 0420a95157..127517cb4b 100644 --- a/tests/framework/PostProcessors/Validation/tests +++ b/tests/framework/PostProcessors/Validation/tests @@ -20,4 +20,25 @@ rel_err = 0.00001 zero_threshold = 1e-9 [../] + [./test_validation_representativity1] + type = 'RavenFramework' + input = 'test_representativity_perfectLinExpToTarget.xml' + csv = 'RepresentativityPerfectMatch/pp1_metric_dump.csv' + rel_err = 0.00001 + zero_threshold = 1e-9 + [../] + [./test_validation_representativity2] + type = 'RavenFramework' + input = 'test_representativity_singlePerfectLinExpToTarget.xml' + csv = 'RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv' + rel_err = 0.00001 + zero_threshold = 1e-9 + [../] + [./test_validation_representativity3] + type = 'RavenFramework' + input = 'test_representativity_rankDifficientLinExpToTarget.xml' + csv = 'RepresentativityrankDifficient/pp1_metric_dump.csv' + rel_err = 0.00001 + zero_threshold = 1e-9 + [../] [] From 98c115fcbec1e0785ceea0c3408e4950967afd75 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Sun, 5 Feb 2023 09:51:23 -0700 Subject: [PATCH 51/95] addressing few comments from wang --- .../Validations/Representativity.py | 32 ++++++++----------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 73348a5787..5f512625ba 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -11,15 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" - Created on April 29, 2021 - - @author: Mohammad Abdo (@Jimmy-INL) - - This class represents a base class for the validation algorithms - It inherits from the PostProcessor directly - ##TODO: Recast it once the new PostProcesso API gets in place -""" #External Modules------------------------------------------------------------------------------------ import numpy as np @@ -30,7 +21,6 @@ #Internal Modules------------------------------------------------------------------------------------ from ravenframework.utils import InputData, InputTypes -from ravenframework.utils import utils from .. import ValidationBase #Internal Modules End-------------------------------------------------------------------------------- @@ -40,9 +30,9 @@ class Representativity(ValidationBase): It represents the base class for most validation problems @ Authors: Mohammad abdo (@Jimmy-INL) - Congjian Wang (@) - Andrea Alfonsi (@) - Aaron Epiney (@) + Congjian Wang (@wangcj05) + Andrea Alfonsi (@aalfonsi) + Aaron Epiney (@AaronEpiney) """ @@ -62,7 +52,6 @@ class cls. specs.addSub(parametersInput) targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType, descr=r"""Target model parameters/inputs""") - targetParametersInput.addParam("type", InputTypes.StringType) specs.addSub(targetParametersInput) targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType, descr=r"""ID of the temporal variable of the target model. Default is ``time''. @@ -163,13 +152,17 @@ def _handleInput(self, paramInput): self.targetParameters = child.value elif child.getName() == 'targetPivotParameter': self.targetPivotParameter = child.value + _, notFound = paramInput.findNodesAndExtractValues(['featureParameters', + 'targetParameters']) + # notFound must be empty + assert(not notFound) def run(self, inputIn): """ - This method executes the postprocessor action. In this case it loads the - results to specified dataObject - @ In, inputIn, list, dictionary of data to process - @ Out, outputDict, dict, dictionary containing the post-processed results + This method executes the postprocessor action. In this case it computes representativity/bias factors, corrected data, etc. + + @ In, inputIn, dictionary of data to process + @ Out, evaluation, dict, dictionary containing the post-processed results """ dataSets = [data for _, _, data in inputIn['Data']] pivotParameter = self.pivotParameter @@ -181,7 +174,8 @@ def run(self, inputIn): self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) else: pivotParameter = self.pivotParameter - evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()}#inputIn + evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()} + ## TODO: This is a placeholder to remember the time dependent case # if pivotParameter: # # Uncomment this to cause crash: print(dataSets[0], pivotParameter) From b17288205b56e5b4f06913b364889d40926c5a9f Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Sun, 5 Feb 2023 12:44:37 -0700 Subject: [PATCH 52/95] addressing some manual comments from wang --- doc/user_manual/PostProcessors/Validation.tex | 93 +++++++++---------- doc/user_manual/postprocessor.tex | 1 - doc/user_manual/raven_user_manual.bib | 43 +++++++++ 3 files changed, 86 insertions(+), 51 deletions(-) diff --git a/doc/user_manual/PostProcessors/Validation.tex b/doc/user_manual/PostProcessors/Validation.tex index 85acb5c098..b8f2b7466b 100644 --- a/doc/user_manual/PostProcessors/Validation.tex +++ b/doc/user_manual/PostProcessors/Validation.tex @@ -9,12 +9,12 @@ \subsubsection{Validation PostProcessors} \begin{itemize} \item \textbf{Probabilistic}, using probabilistic method for validation, can be used for both static and time-dependent problems. \item \textbf{PPDSS}, using dynamic system scaling method for validation, can only be used for time-dependent problems. - % \item \textbf{Representativity} + \item \textbf{Representativity} \item \textbf{PCM}, using Physics-guided Coverage Mapping method for validation, can only be used for static problems. \end{itemize} % -The choices of the available metrics and acceptable data objects are specified in table \ref{tab:ValidationAlgorithms}. +The choices of the available metrics and acceptable data objects are specified in table~\ref{tab:ValidationAlgorithms}. \begin{table}[] \caption{Validation Algorithms and respective available metrics and DataObjects} @@ -23,7 +23,7 @@ \subsubsection{Validation PostProcessors} \hline \textbf{Validation Algorithm} & \textbf{DataObject} & \textbf{Available Metrics} \\ \hline Probabilistic & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet\end{tabular} & \begin{tabular}[c]{@{}c@{}}CDFAreaDifference\\ \\ PDFCommonArea\end{tabular} \\ \hline -Representativity & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet \\DataSet\end{tabular} & \begin{tabular}[c]{@{}c@{}}BiasFactor\end{tabular} \\ \hline +Representativity & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet\end{tabular} & \begin{tabular}[c]{@{}c@{}}\end{tabular} \\ \hline PPDSS & HistorySet & DSS \\ \hline PCM & PointSet & (not applicable) \\ \hline \end{tabular} @@ -106,7 +106,7 @@ \subsubsection{Validation PostProcessors} \item \xmlAttr{type}, \xmlDesc{required string attribute}, the sub-type of this Metric (e.g., SKL, Minkowski) \end{itemize} \nb The choice of the available metric is \xmlString{DSS}, please - refer to \ref{sec:Metrics} for detailed descriptions about this metric. + refer to~\ref{sec:Metrics} for detailed descriptions about this metric. \item \xmlNode{pivotParameterFeature}, \xmlDesc{string, required field}, specifies the pivotParameter for a feature . The feature pivot parameter is the shared index of the output variables in the data object. \item \xmlNode{pivotParameterTarget}, \xmlDesc{string, required field}, specifies the pivotParameter for a target . The target pivot parameter is the shared index of the output variables in the data object. \item \xmlNode{separateFeatureData}, \xmlDesc{string, optional field}, specifies the custom feature interval to apply DSS postprocessing. The string should contain three parts; start time, `|', and end time all in one. For example, 0.0|0.5. @@ -248,7 +248,7 @@ \subsubsection{Validation PostProcessors} number of measurements should be equal to the number of features and in the same order as the features listed in \xmlNode{Features}. \end{itemize} -The output of PCM is comma separated list of strings in the format of ``pri\textunderscore post\textunderscore stdReduct\textunderscore [targetName]'', +The output of PCM is comma separated list of strings in the format of ``pri\textunderscore post\textunderscore stdReduct\textunderscore [targetName]'', where [targetName] is the $VariableName$ specified in DataObject of \xmlNode{Targets}. @@ -280,7 +280,7 @@ \subsubsection{Validation PostProcessors} claim a full coverage of the design space and scenarios, and three, if the available set of experiments are not enough to declare coverage what are the remaining experiments required in order to achieve full coverage and increase the representativity/bias factor. The representativity theory was first founded in the -Neutronics community \ref{} then shortly after, was transformed to the thermal hydraulics \ref{}. +Neutronics community \cite{Palmiotti1,Palmiotti2} then shortly after, was transformed to the thermal hydraulics \cite{RepAaron}. So far several algorithms are implemented within this post-processor: % \ppType{Representativity}{Representativity} @@ -301,20 +301,6 @@ \subsubsection{Validation PostProcessors} \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). \item \xmlNode{targetPivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable in the target model. Default is ``time''. \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). - - \item \xmlNode{Metric}, \xmlDesc{string, required field}, specifies the \textbf{Metric} name that is defined via - \textbf{Metrics} entity. In this xml-node, the following xml attributes need to be specified: - \begin{itemize} - \item \xmlAttr{class}, \xmlDesc{required string attribute}, the class of this metric (e.g. Metrics) - \item \xmlAttr{type}, \xmlDesc{required string attribute}, the sub-type of this Metric (e.g. SKL, Minkowski) - \end{itemize} - The choice of the available metrics depends on the specific validation algorithm that is chosen (see table \ref{tab:ValidationAlgorithms}) -\end{itemize} - - -The \textbf{Represntativity} post-processor can make use of the \textbf{Metric} system (See Chapter \ref{sec:Metrics}), -in conjunction with the specific algorithm chosen from the list above, -to report validation scores for both static and time-dependent data. Indeed, Both \textbf{PointSet} and \textbf{HistorySet} can be accepted by this post-processor. If the name of given variable to be compared is unique, it can be used directly, otherwise the variable can be specified with $DataObjectName|InputOrOutput|VariableName$ nomenclature. @@ -324,39 +310,46 @@ \subsubsection{Validation PostProcessors} ... - - inputPlaceHolder2 - linModel - MC_external - outputDataMC1 - outputDataMC2 - - - outputDataMC1 - outputDataMC2 - pp1 - pp1_metric - pp1_metric_dump - + + + inputPlaceHolder2 + linModel + ExperimentMCSampler + outputDataMC1 + + + + inputPlaceHolder2 + tarModel + TargetMCSampler + outputDataMC2 + + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + ... -... - - outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 - outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 - simIndex - outputDataMC1|p1,outputDataMC1|p2 - outputDataMC2|p1,outputDataMC2|p2 - outputDataMC1|time - outputDataMC2|time - -... - -... - - - + + p1, p2, e1, e2, e3, bE, F1, F2, F3 + + + p1, p2, o1, o2, o3, bT, FOM1, FOM2, FOM3 + + + outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 + outputDataMC2|FOM1, outputDataMC2|FOM2, outputDataMC2|FOM3 + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + outputDataMC2|time + + ... \end{lstlisting} diff --git a/doc/user_manual/postprocessor.tex b/doc/user_manual/postprocessor.tex index 9382de7552..2f533a96fd 100644 --- a/doc/user_manual/postprocessor.tex +++ b/doc/user_manual/postprocessor.tex @@ -72,7 +72,6 @@ \subsection{PostProcessor} %%%%% PP ComparisonStatistics %%%%%%% \input{PostProcessors/ComparisonStatistics.tex} - %%%%% PP ImportanceRank %%%%%%% \input{PostProcessors/ImportanceRank.tex} diff --git a/doc/user_manual/raven_user_manual.bib b/doc/user_manual/raven_user_manual.bib index 0766c78752..041136a0f3 100644 --- a/doc/user_manual/raven_user_manual.bib +++ b/doc/user_manual/raven_user_manual.bib @@ -112,3 +112,46 @@ @TechReport{RAVENtheoryManual year = {2016}, key = {INL/EXT-16-38178} } +@proceedings{RepAaron, + author = {Epiney, Aaron S. and Woolstenhulme, Nicolas}, + title = {Representativity Analysis Applied to TREAT Water Loop LOCA Experiment Design}, + volume = {Volume 3: Student Paper Competition; Thermal-Hydraulics; Verification and Validation}, + series = {International Conference on Nuclear Engineering}, + year = {2020}, + month = {08}, + abstract = {The Transient Reactor Test (TREAT) Facility at Idaho National Laboratory (INL) started testing new fuels and reactor technologies once again in 2018 and new experiments and tests are currently being designed like for example the water loop “TREAT Water Environment Recirculating Loop” (TWERL). During the design of such experiments, the designer must assess how close the experiment reproduces the physics (and other important phenomena) happening during a transient of interest compared to the full-size reactor the experiment attempts representing. Traditionally, to assess this “representativity” of the experiment, scaling theory involving expert judgment is needed. This paper presents a step towards a systematic modeling and simulation (M\\&S) informed methodology for experiment design. The new methodology compares a model of the full system and a model of the mock-up facility that are subject to the same perturbations. In this way, the “overlap” of the perturbed experiment and full-size facility model outputs can be analyzed and the “representativity” of the experiment determined. The paper presents a RELAP5-3D analysis, where TWERL LOCA calculations are compared to prototypic PWR LOCA calculations with respect to representativity. To inform the design of the TWERL experiments, i.e. to find the most “representative” configuration for the TWERL loop, different design parameters for TWERL have been optimized in the study.}, + doi = {10.1115/ICONE2020-16914}, + url = {https://doi.org/10.1115/ICONE2020-16914}, + note = {V003T13A055}, + eprint = {https://asmedigitalcollection.asme.org/ICONE/proceedings-pdf/ICONE2020/83785/V003T13A055/6580006/v003t13a055-icone2020-16914.pdf}, +} + +@article{Palmiotti1, +title = {A global approach to the physics validation of simulation codes for future nuclear systems}, +journal = {Annals of Nuclear Energy}, +volume = {36}, +number = {3}, +pages = {355-361}, +year = {2009}, +note = {PHYSOR 2008}, +issn = {0306-4549}, +doi = {https://doi.org/10.1016/j.anucene.2008.11.012}, +url = {https://www.sciencedirect.com/science/article/pii/S0306454908002995}, +author = {Giuseppe Palmiotti and Massimo Salvatores and Gerardo Aliberti and H. Hiruta and R. McKnight and P. Oblozinsky and W.S. Yang}, +abstract = {This paper presents a global approach to the validation of the parameters that enter into the neutronics simulation tools for advanced fast reactors with the objective to reduce the uncertainties associated to crucial design parameters. This global approach makes use of sensitivity/uncertainty methods; statistical data adjustments; integral experiment selection, analysis and “representativity” quantification with respect to a reference system; scientifically based cross-section covariance data and appropriate methods for their use in multigroup calculations. This global approach has been applied to the uncertainty reduction on the criticality of the Advanced Burner Reactor (both metal and oxide core versions) presently investigated in the frame of the GNEP initiative. The results obtained are very encouraging and allow to indicate some possible improvements of the ENDF/B-VII data file.} +} + +@article{Palmiotti2, +title = {The role of experiments and of sensitivity analysis in simulation validation strategies with emphasis on reactor physics}, +journal = {Annals of Nuclear Energy}, +volume = {52}, +pages = {10-21}, +year = {2013}, +note = {Nuclear Reactor Safety Simulation and Uncertainty Analysis}, +issn = {0306-4549}, +doi = {https://doi.org/10.1016/j.anucene.2012.06.002}, +url = {https://www.sciencedirect.com/science/article/pii/S0306454912001843}, +author = {Giuseppe Palmiotti and Massimo Salvatores}, +keywords = {Reactor design, Simulation validation strategies, Sensitivity analysis}, +abstract = {The complementary role of experiments and of sensitivity analysis has been and still is a key feature of validation strategies used in the field of simulation tools for nuclear reactor design. The present paper gives a summary of the development of more and more sophisticated validation strategies up to the present trend for science-based validation approaches. Most examples and some very recent original developments are given, mostly in the field of neutronics that has traditionally provided cutting edge advances for simulation tools validation.} +} \ No newline at end of file From 0d532f47c091f66ab156394d7b797e3a8b0aa4ee Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Sun, 5 Feb 2023 17:57:32 -0700 Subject: [PATCH 53/95] removing duplicated Representativity.py --- .../validationAlgorithms/Representativity.py | 247 ------------------ 1 file changed, 247 deletions(-) delete mode 100644 framework/Models/PostProcessors/validationAlgorithms/Representativity.py diff --git a/framework/Models/PostProcessors/validationAlgorithms/Representativity.py b/framework/Models/PostProcessors/validationAlgorithms/Representativity.py deleted file mode 100644 index 239d446cfc..0000000000 --- a/framework/Models/PostProcessors/validationAlgorithms/Representativity.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - Created on April 29, 2021 - - @author: Mohammad Abdo (@Jimmy-INL) - - This class represents a base class for the validation algorithms - It inherits from the PostProcessor directly - ##TODO: Recast it once the new PostProcesso API gets in place -""" - -#External Modules------------------------------------------------------------------------------------ -import numpy as np -import xarray as xr -#External Modules End-------------------------------------------------------------------------------- - -#Internal Modules------------------------------------------------------------------------------------ -#from utils import xmlUtils -from utils import InputData, InputTypes -#import Files -#import Distributions -#import MetricDistributor -from utils import utils -from ..Validation import Validation -# from utils.mathUtils import partialDerivative, derivatives -#Internal Modules End-------------------------------------------------------------------------------- - -class Representativity(Validation): - """ - Representativity is a base class for validation problems - It represents the base class for most validation problems - """ - - @classmethod - def getInputSpecification(cls): - """ - Method to get a reference to a class that specifies the input data for - class cls. - @ In, cls, the class for which we are retrieving the specification - @ Out, specs, InputData.ParameterInput, class to use for - specifying input of cls. - """ - specs = super(Representativity, cls).getInputSpecification() - parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType) - parametersInput.addParam("type", InputTypes.StringType) - specs.addSub(parametersInput) - targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType) - targetParametersInput.addParam("type", InputTypes.StringType) - specs.addSub(targetParametersInput) - targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType) - specs.addSub(targetPivotParameterInput) - return specs - - def __init__(self): - """ - Constructor - @ In, None - @ Out, None - """ - super().__init__() - from Models.PostProcessors import factory as ppFactory # delay import to allow definition - self.printTag = 'POSTPROCESSOR Representativity' - self.dynamicType = ['static','dynamic'] # for now only static is available - self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics - self.name = 'Represntativity' - self.stat = ppFactory.returnInstance('BasicStatistics') - self.stat.what = ['NormalizedSensitivities'] # expected value calculation - - - # def inputToInternal(self, currentInputs): - # """ - # Method to convert an input object into the internal format that is - # understandable by this pp. - # @ In, currentInputs, list or DataObject, data object or a list of data objects - # @ Out, measureList, list of (feature, target), the list of the features and targets to measure the distance between - # """ - # if type(currentInputs) != list: - # currentInputs = [currentInputs] - # hasPointSet = False - # hasHistorySet = False - # #Check for invalid types - # for currentInput in currentInputs: - # inputType = None - # if hasattr(currentInput, 'type'): - # inputType = currentInput.type - - # if isinstance(currentInput, Files.File): - # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") - # elif isinstance(currentInput, Distributions.Distribution): - # pass #Allowed type - # elif inputType == 'HDF5': - # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") - # elif inputType == 'PointSet': - # hasPointSet = True - # elif inputType == 'HistorySet': - # hasHistorySet = True - # if self.multiOutput == 'raw_values': - # self.dynamic = True - # if self.pivotParameter not in currentInput.getVars('indexes'): - # self.raiseAnError(IOError, self, 'Pivot parameter', self.pivotParameter,'has not been found in DataObject', currentInput.name) - # if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): - # self.raiseAnError(IOError, "HistorySet", currentInput.name," is not syncronized, please use Interfaced PostProcessor HistorySetSync to pre-process it") - # pivotValues = currentInput.asDataset()[self.pivotParameter].values - # if len(self.pivotValues) == 0: - # self.pivotValues = pivotValues - # elif set(self.pivotValues) != set(pivotValues): - # self.raiseAnError(IOError, "Pivot values for pivot parameter",self.pivotParameter, "in provided HistorySets are not the same") - # else: - # self.raiseAnError(IOError, "Metric cannot process "+inputType+ " of type "+str(type(currentInput))) - # if self.multiOutput == 'raw_values' and hasPointSet and hasHistorySet: - # self.multiOutput = 'mean' - # self.raiseAWarning("Reset 'multiOutput' to 'mean', since both PointSet and HistorySet are provided as Inputs. Calculation outputs will be aggregated by averaging") - - # measureList = [] - - # for cnt in range(len(self.features)): - # feature = self.features[cnt] - # target = self.targets[cnt] - # featureData = self.__getMetricSide(feature, currentInputs) - # targetData = self.__getMetricSide(target, currentInputs) - # measureList.append((featureData, targetData)) - - # return measureList - - # def initialize(self, features, targets, **kwargs): - # """ - # Set up this interface for a particular activity - # @ In, features, list, list of features - # @ In, targets, list, list of targets - # @ In, kwargs, dict, keyword arguments - # """ - # super().initialize(features, targets, **kwargs) - # self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.targets), 'prefix':'nsen'}]} - # # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} - # fakeRunInfo = {'workingDir':'','stepName':''} - # self.stat.initialize(fakeRunInfo, self.Parameters, features, **kwargs) - - def _handleInput(self, paramInput): - """ - Function to handle the parsed paramInput for this class. - @ In, paramInput, ParameterInput, the already parsed input. - @ Out, None - """ - super()._handleInput(paramInput) - for child in paramInput.subparts: - if child.getName() == 'featureParameters': - self.Parameters = child.value - elif child.getName() == 'targetParameters': - self.targetParameters = child.value - elif child.getName() == 'targetPivotParameter': - self.targetPivotParameter = child.value - - def run(self, inputIn): - """ - This method executes the postprocessor action. In this case it loads the - results to specified dataObject - @ In, inputIn, list, dictionary of data to process - @ Out, outputDict, dict, dictionary containing the post-processed results - """ - dataSets = [data for _, _, data in inputIn['Data']] - pivotParameter = self.pivotParameter - names=[] - if isinstance(inputIn['Data'][0][-1], xr.Dataset): - names = [inp[-1].attrs['name'] for inp in inputIn['Data']] - if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: - if 'dynamic' not in self.dynamicType: #self.model.dataType: - self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) - else: - pivotParameter = self.pivotParameter - evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()} - if pivotParameter: - if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): - self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) - if pivotParameter not in evaluation: - evaluation[pivotParameter] = dataSets[0][pivotParameter] - return evaluation - - def _evaluate(self, datasets, **kwargs): - """ - Main method to "do what you do". - @ In, datasets, list, list of datasets (data1,data2,etc.) to used. - @ In, kwargs, dict, keyword arguments - @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} - """ - # self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] - for data in datasets: - sen = self.stat.run(data) - names = kwargs.get('dataobjectNames') - outs = {} - for feat, targ, param, targParam in zip(self.features, self.targets, self.Parameters, self.targetParameters): - featData = self._getDataFromDatasets(datasets, feat, names) - targData = self._getDataFromDatasets(datasets, targ, names) - Parameters = self._getDataFromDatasets(datasets, param, names) - targetParameters = self._getDataFromDatasets(datasets, targParam, names) - # senFOMs = partialDerivative(featData.data,np.atleast_2d(Parameters.data)[0,:],'x1') - senFOMs = np.atleast_2d(Parameters[0])#.data - senMeasurables = np.atleast_2d(targetParameters[0]) - covParameters = senFOMs @ senMeasurables.T - for metric in self.metrics: - name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.name) - outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) - return outs - - def _getDataFromDatasets(self, datasets, var, names=None): - """ - Utility function to retrieve the data from datasets - @ In, datasets, list, list of datasets (data1,data2,etc.) to search from. - @ In, names, list, optional, list of datasets names (data1,data2,etc.). If not present, the search will be done on the full list. - @ In, var, str, the variable to find (either in fromat dataobject|var or simply var) - @ Out, data, tuple(numpy.ndarray, xarray.DataArray or None), the retrived data (data, probability weights (None if not present)) - """ - data = None - pw = None - dat = None - if "|" in var and names is not None: - do, feat = var.split("|") - doindex = names.index(do) - dat = datasets[doindex][feat] - else: - for doindex, ds in enumerate(datasets): - if var in ds: - dat = ds[var] - break - if 'ProbabilityWeight-{}'.format(feat) in datasets[names.index(do)]: - pw = datasets[doindex]['ProbabilityWeight-{}'.format(feat)].values - elif 'ProbabilityWeight' in datasets[names.index(do)]: - pw = datasets[doindex]['ProbabilityWeight'].values - dim = len(dat.shape) - # (numRealizations, numHistorySteps) for MetricDistributor - dat = dat.values - if dim == 1: - # the following reshaping does not require a copy - dat.shape = (dat.shape[0], 1) - data = dat, pw - return data \ No newline at end of file From e480c125339afb8a2e91f5cfe11d71653259b22e Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Sun, 5 Feb 2023 21:49:45 -0700 Subject: [PATCH 54/95] Removed old Representativity.py --- .../validationAlgorithms/Representativity.py | 247 ++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100644 framework/Models/PostProcessors/validationAlgorithms/Representativity.py diff --git a/framework/Models/PostProcessors/validationAlgorithms/Representativity.py b/framework/Models/PostProcessors/validationAlgorithms/Representativity.py new file mode 100644 index 0000000000..239d446cfc --- /dev/null +++ b/framework/Models/PostProcessors/validationAlgorithms/Representativity.py @@ -0,0 +1,247 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Created on April 29, 2021 + + @author: Mohammad Abdo (@Jimmy-INL) + + This class represents a base class for the validation algorithms + It inherits from the PostProcessor directly + ##TODO: Recast it once the new PostProcesso API gets in place +""" + +#External Modules------------------------------------------------------------------------------------ +import numpy as np +import xarray as xr +#External Modules End-------------------------------------------------------------------------------- + +#Internal Modules------------------------------------------------------------------------------------ +#from utils import xmlUtils +from utils import InputData, InputTypes +#import Files +#import Distributions +#import MetricDistributor +from utils import utils +from ..Validation import Validation +# from utils.mathUtils import partialDerivative, derivatives +#Internal Modules End-------------------------------------------------------------------------------- + +class Representativity(Validation): + """ + Representativity is a base class for validation problems + It represents the base class for most validation problems + """ + + @classmethod + def getInputSpecification(cls): + """ + Method to get a reference to a class that specifies the input data for + class cls. + @ In, cls, the class for which we are retrieving the specification + @ Out, specs, InputData.ParameterInput, class to use for + specifying input of cls. + """ + specs = super(Representativity, cls).getInputSpecification() + parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType) + parametersInput.addParam("type", InputTypes.StringType) + specs.addSub(parametersInput) + targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType) + targetParametersInput.addParam("type", InputTypes.StringType) + specs.addSub(targetParametersInput) + targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType) + specs.addSub(targetPivotParameterInput) + return specs + + def __init__(self): + """ + Constructor + @ In, None + @ Out, None + """ + super().__init__() + from Models.PostProcessors import factory as ppFactory # delay import to allow definition + self.printTag = 'POSTPROCESSOR Representativity' + self.dynamicType = ['static','dynamic'] # for now only static is available + self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics + self.name = 'Represntativity' + self.stat = ppFactory.returnInstance('BasicStatistics') + self.stat.what = ['NormalizedSensitivities'] # expected value calculation + + + # def inputToInternal(self, currentInputs): + # """ + # Method to convert an input object into the internal format that is + # understandable by this pp. + # @ In, currentInputs, list or DataObject, data object or a list of data objects + # @ Out, measureList, list of (feature, target), the list of the features and targets to measure the distance between + # """ + # if type(currentInputs) != list: + # currentInputs = [currentInputs] + # hasPointSet = False + # hasHistorySet = False + # #Check for invalid types + # for currentInput in currentInputs: + # inputType = None + # if hasattr(currentInput, 'type'): + # inputType = currentInput.type + + # if isinstance(currentInput, Files.File): + # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") + # elif isinstance(currentInput, Distributions.Distribution): + # pass #Allowed type + # elif inputType == 'HDF5': + # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") + # elif inputType == 'PointSet': + # hasPointSet = True + # elif inputType == 'HistorySet': + # hasHistorySet = True + # if self.multiOutput == 'raw_values': + # self.dynamic = True + # if self.pivotParameter not in currentInput.getVars('indexes'): + # self.raiseAnError(IOError, self, 'Pivot parameter', self.pivotParameter,'has not been found in DataObject', currentInput.name) + # if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): + # self.raiseAnError(IOError, "HistorySet", currentInput.name," is not syncronized, please use Interfaced PostProcessor HistorySetSync to pre-process it") + # pivotValues = currentInput.asDataset()[self.pivotParameter].values + # if len(self.pivotValues) == 0: + # self.pivotValues = pivotValues + # elif set(self.pivotValues) != set(pivotValues): + # self.raiseAnError(IOError, "Pivot values for pivot parameter",self.pivotParameter, "in provided HistorySets are not the same") + # else: + # self.raiseAnError(IOError, "Metric cannot process "+inputType+ " of type "+str(type(currentInput))) + # if self.multiOutput == 'raw_values' and hasPointSet and hasHistorySet: + # self.multiOutput = 'mean' + # self.raiseAWarning("Reset 'multiOutput' to 'mean', since both PointSet and HistorySet are provided as Inputs. Calculation outputs will be aggregated by averaging") + + # measureList = [] + + # for cnt in range(len(self.features)): + # feature = self.features[cnt] + # target = self.targets[cnt] + # featureData = self.__getMetricSide(feature, currentInputs) + # targetData = self.__getMetricSide(target, currentInputs) + # measureList.append((featureData, targetData)) + + # return measureList + + # def initialize(self, features, targets, **kwargs): + # """ + # Set up this interface for a particular activity + # @ In, features, list, list of features + # @ In, targets, list, list of targets + # @ In, kwargs, dict, keyword arguments + # """ + # super().initialize(features, targets, **kwargs) + # self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.targets), 'prefix':'nsen'}]} + # # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} + # fakeRunInfo = {'workingDir':'','stepName':''} + # self.stat.initialize(fakeRunInfo, self.Parameters, features, **kwargs) + + def _handleInput(self, paramInput): + """ + Function to handle the parsed paramInput for this class. + @ In, paramInput, ParameterInput, the already parsed input. + @ Out, None + """ + super()._handleInput(paramInput) + for child in paramInput.subparts: + if child.getName() == 'featureParameters': + self.Parameters = child.value + elif child.getName() == 'targetParameters': + self.targetParameters = child.value + elif child.getName() == 'targetPivotParameter': + self.targetPivotParameter = child.value + + def run(self, inputIn): + """ + This method executes the postprocessor action. In this case it loads the + results to specified dataObject + @ In, inputIn, list, dictionary of data to process + @ Out, outputDict, dict, dictionary containing the post-processed results + """ + dataSets = [data for _, _, data in inputIn['Data']] + pivotParameter = self.pivotParameter + names=[] + if isinstance(inputIn['Data'][0][-1], xr.Dataset): + names = [inp[-1].attrs['name'] for inp in inputIn['Data']] + if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: + if 'dynamic' not in self.dynamicType: #self.model.dataType: + self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) + else: + pivotParameter = self.pivotParameter + evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()} + if pivotParameter: + if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): + self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) + if pivotParameter not in evaluation: + evaluation[pivotParameter] = dataSets[0][pivotParameter] + return evaluation + + def _evaluate(self, datasets, **kwargs): + """ + Main method to "do what you do". + @ In, datasets, list, list of datasets (data1,data2,etc.) to used. + @ In, kwargs, dict, keyword arguments + @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} + """ + # self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] + for data in datasets: + sen = self.stat.run(data) + names = kwargs.get('dataobjectNames') + outs = {} + for feat, targ, param, targParam in zip(self.features, self.targets, self.Parameters, self.targetParameters): + featData = self._getDataFromDatasets(datasets, feat, names) + targData = self._getDataFromDatasets(datasets, targ, names) + Parameters = self._getDataFromDatasets(datasets, param, names) + targetParameters = self._getDataFromDatasets(datasets, targParam, names) + # senFOMs = partialDerivative(featData.data,np.atleast_2d(Parameters.data)[0,:],'x1') + senFOMs = np.atleast_2d(Parameters[0])#.data + senMeasurables = np.atleast_2d(targetParameters[0]) + covParameters = senFOMs @ senMeasurables.T + for metric in self.metrics: + name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.name) + outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) + return outs + + def _getDataFromDatasets(self, datasets, var, names=None): + """ + Utility function to retrieve the data from datasets + @ In, datasets, list, list of datasets (data1,data2,etc.) to search from. + @ In, names, list, optional, list of datasets names (data1,data2,etc.). If not present, the search will be done on the full list. + @ In, var, str, the variable to find (either in fromat dataobject|var or simply var) + @ Out, data, tuple(numpy.ndarray, xarray.DataArray or None), the retrived data (data, probability weights (None if not present)) + """ + data = None + pw = None + dat = None + if "|" in var and names is not None: + do, feat = var.split("|") + doindex = names.index(do) + dat = datasets[doindex][feat] + else: + for doindex, ds in enumerate(datasets): + if var in ds: + dat = ds[var] + break + if 'ProbabilityWeight-{}'.format(feat) in datasets[names.index(do)]: + pw = datasets[doindex]['ProbabilityWeight-{}'.format(feat)].values + elif 'ProbabilityWeight' in datasets[names.index(do)]: + pw = datasets[doindex]['ProbabilityWeight'].values + dim = len(dat.shape) + # (numRealizations, numHistorySteps) for MetricDistributor + dat = dat.values + if dim == 1: + # the following reshaping does not require a copy + dat.shape = (dat.shape[0], 1) + data = dat, pw + return data \ No newline at end of file From 20562747b3b6a82dfdee56a2331367ffa25d3d10 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 6 Feb 2023 17:10:52 -0700 Subject: [PATCH 55/95] deleting duplicated representativity.py --- .../validationAlgorithms/Representativity.py | 247 ------------------ 1 file changed, 247 deletions(-) delete mode 100644 framework/Models/PostProcessors/validationAlgorithms/Representativity.py diff --git a/framework/Models/PostProcessors/validationAlgorithms/Representativity.py b/framework/Models/PostProcessors/validationAlgorithms/Representativity.py deleted file mode 100644 index 239d446cfc..0000000000 --- a/framework/Models/PostProcessors/validationAlgorithms/Representativity.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - Created on April 29, 2021 - - @author: Mohammad Abdo (@Jimmy-INL) - - This class represents a base class for the validation algorithms - It inherits from the PostProcessor directly - ##TODO: Recast it once the new PostProcesso API gets in place -""" - -#External Modules------------------------------------------------------------------------------------ -import numpy as np -import xarray as xr -#External Modules End-------------------------------------------------------------------------------- - -#Internal Modules------------------------------------------------------------------------------------ -#from utils import xmlUtils -from utils import InputData, InputTypes -#import Files -#import Distributions -#import MetricDistributor -from utils import utils -from ..Validation import Validation -# from utils.mathUtils import partialDerivative, derivatives -#Internal Modules End-------------------------------------------------------------------------------- - -class Representativity(Validation): - """ - Representativity is a base class for validation problems - It represents the base class for most validation problems - """ - - @classmethod - def getInputSpecification(cls): - """ - Method to get a reference to a class that specifies the input data for - class cls. - @ In, cls, the class for which we are retrieving the specification - @ Out, specs, InputData.ParameterInput, class to use for - specifying input of cls. - """ - specs = super(Representativity, cls).getInputSpecification() - parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType) - parametersInput.addParam("type", InputTypes.StringType) - specs.addSub(parametersInput) - targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType) - targetParametersInput.addParam("type", InputTypes.StringType) - specs.addSub(targetParametersInput) - targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType) - specs.addSub(targetPivotParameterInput) - return specs - - def __init__(self): - """ - Constructor - @ In, None - @ Out, None - """ - super().__init__() - from Models.PostProcessors import factory as ppFactory # delay import to allow definition - self.printTag = 'POSTPROCESSOR Representativity' - self.dynamicType = ['static','dynamic'] # for now only static is available - self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics - self.name = 'Represntativity' - self.stat = ppFactory.returnInstance('BasicStatistics') - self.stat.what = ['NormalizedSensitivities'] # expected value calculation - - - # def inputToInternal(self, currentInputs): - # """ - # Method to convert an input object into the internal format that is - # understandable by this pp. - # @ In, currentInputs, list or DataObject, data object or a list of data objects - # @ Out, measureList, list of (feature, target), the list of the features and targets to measure the distance between - # """ - # if type(currentInputs) != list: - # currentInputs = [currentInputs] - # hasPointSet = False - # hasHistorySet = False - # #Check for invalid types - # for currentInput in currentInputs: - # inputType = None - # if hasattr(currentInput, 'type'): - # inputType = currentInput.type - - # if isinstance(currentInput, Files.File): - # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") - # elif isinstance(currentInput, Distributions.Distribution): - # pass #Allowed type - # elif inputType == 'HDF5': - # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") - # elif inputType == 'PointSet': - # hasPointSet = True - # elif inputType == 'HistorySet': - # hasHistorySet = True - # if self.multiOutput == 'raw_values': - # self.dynamic = True - # if self.pivotParameter not in currentInput.getVars('indexes'): - # self.raiseAnError(IOError, self, 'Pivot parameter', self.pivotParameter,'has not been found in DataObject', currentInput.name) - # if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): - # self.raiseAnError(IOError, "HistorySet", currentInput.name," is not syncronized, please use Interfaced PostProcessor HistorySetSync to pre-process it") - # pivotValues = currentInput.asDataset()[self.pivotParameter].values - # if len(self.pivotValues) == 0: - # self.pivotValues = pivotValues - # elif set(self.pivotValues) != set(pivotValues): - # self.raiseAnError(IOError, "Pivot values for pivot parameter",self.pivotParameter, "in provided HistorySets are not the same") - # else: - # self.raiseAnError(IOError, "Metric cannot process "+inputType+ " of type "+str(type(currentInput))) - # if self.multiOutput == 'raw_values' and hasPointSet and hasHistorySet: - # self.multiOutput = 'mean' - # self.raiseAWarning("Reset 'multiOutput' to 'mean', since both PointSet and HistorySet are provided as Inputs. Calculation outputs will be aggregated by averaging") - - # measureList = [] - - # for cnt in range(len(self.features)): - # feature = self.features[cnt] - # target = self.targets[cnt] - # featureData = self.__getMetricSide(feature, currentInputs) - # targetData = self.__getMetricSide(target, currentInputs) - # measureList.append((featureData, targetData)) - - # return measureList - - # def initialize(self, features, targets, **kwargs): - # """ - # Set up this interface for a particular activity - # @ In, features, list, list of features - # @ In, targets, list, list of targets - # @ In, kwargs, dict, keyword arguments - # """ - # super().initialize(features, targets, **kwargs) - # self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.targets), 'prefix':'nsen'}]} - # # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} - # fakeRunInfo = {'workingDir':'','stepName':''} - # self.stat.initialize(fakeRunInfo, self.Parameters, features, **kwargs) - - def _handleInput(self, paramInput): - """ - Function to handle the parsed paramInput for this class. - @ In, paramInput, ParameterInput, the already parsed input. - @ Out, None - """ - super()._handleInput(paramInput) - for child in paramInput.subparts: - if child.getName() == 'featureParameters': - self.Parameters = child.value - elif child.getName() == 'targetParameters': - self.targetParameters = child.value - elif child.getName() == 'targetPivotParameter': - self.targetPivotParameter = child.value - - def run(self, inputIn): - """ - This method executes the postprocessor action. In this case it loads the - results to specified dataObject - @ In, inputIn, list, dictionary of data to process - @ Out, outputDict, dict, dictionary containing the post-processed results - """ - dataSets = [data for _, _, data in inputIn['Data']] - pivotParameter = self.pivotParameter - names=[] - if isinstance(inputIn['Data'][0][-1], xr.Dataset): - names = [inp[-1].attrs['name'] for inp in inputIn['Data']] - if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: - if 'dynamic' not in self.dynamicType: #self.model.dataType: - self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) - else: - pivotParameter = self.pivotParameter - evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()} - if pivotParameter: - if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): - self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) - if pivotParameter not in evaluation: - evaluation[pivotParameter] = dataSets[0][pivotParameter] - return evaluation - - def _evaluate(self, datasets, **kwargs): - """ - Main method to "do what you do". - @ In, datasets, list, list of datasets (data1,data2,etc.) to used. - @ In, kwargs, dict, keyword arguments - @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} - """ - # self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] - for data in datasets: - sen = self.stat.run(data) - names = kwargs.get('dataobjectNames') - outs = {} - for feat, targ, param, targParam in zip(self.features, self.targets, self.Parameters, self.targetParameters): - featData = self._getDataFromDatasets(datasets, feat, names) - targData = self._getDataFromDatasets(datasets, targ, names) - Parameters = self._getDataFromDatasets(datasets, param, names) - targetParameters = self._getDataFromDatasets(datasets, targParam, names) - # senFOMs = partialDerivative(featData.data,np.atleast_2d(Parameters.data)[0,:],'x1') - senFOMs = np.atleast_2d(Parameters[0])#.data - senMeasurables = np.atleast_2d(targetParameters[0]) - covParameters = senFOMs @ senMeasurables.T - for metric in self.metrics: - name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.name) - outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) - return outs - - def _getDataFromDatasets(self, datasets, var, names=None): - """ - Utility function to retrieve the data from datasets - @ In, datasets, list, list of datasets (data1,data2,etc.) to search from. - @ In, names, list, optional, list of datasets names (data1,data2,etc.). If not present, the search will be done on the full list. - @ In, var, str, the variable to find (either in fromat dataobject|var or simply var) - @ Out, data, tuple(numpy.ndarray, xarray.DataArray or None), the retrived data (data, probability weights (None if not present)) - """ - data = None - pw = None - dat = None - if "|" in var and names is not None: - do, feat = var.split("|") - doindex = names.index(do) - dat = datasets[doindex][feat] - else: - for doindex, ds in enumerate(datasets): - if var in ds: - dat = ds[var] - break - if 'ProbabilityWeight-{}'.format(feat) in datasets[names.index(do)]: - pw = datasets[doindex]['ProbabilityWeight-{}'.format(feat)].values - elif 'ProbabilityWeight' in datasets[names.index(do)]: - pw = datasets[doindex]['ProbabilityWeight'].values - dim = len(dat.shape) - # (numRealizations, numHistorySteps) for MetricDistributor - dat = dat.values - if dim == 1: - # the following reshaping does not require a copy - dat.shape = (dat.shape[0], 1) - data = dat, pw - return data \ No newline at end of file From 10c719d3776cb282c81da3cb09f9347ed73e001e Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 6 Feb 2023 19:29:51 -0700 Subject: [PATCH 56/95] deleting unnecessary metric --- ravenframework/Metrics/metrics/Factory.py | 1 - .../metrics/RepresentativityFactors.py | 92 ------------------- 2 files changed, 93 deletions(-) delete mode 100644 ravenframework/Metrics/metrics/RepresentativityFactors.py diff --git a/ravenframework/Metrics/metrics/Factory.py b/ravenframework/Metrics/metrics/Factory.py index a53c4cb70d..f37d0cb576 100644 --- a/ravenframework/Metrics/metrics/Factory.py +++ b/ravenframework/Metrics/metrics/Factory.py @@ -25,7 +25,6 @@ from .CDFAreaDifference import CDFAreaDifference from .PDFCommonArea import PDFCommonArea from .ScipyMetric import ScipyMetric -from .RepresentativityFactors import RepresentativityFactors from .DSS import DSS factory = EntityFactory('Metrics') diff --git a/ravenframework/Metrics/metrics/RepresentativityFactors.py b/ravenframework/Metrics/metrics/RepresentativityFactors.py deleted file mode 100644 index b86af5cdcb..0000000000 --- a/ravenframework/Metrics/metrics/RepresentativityFactors.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2017 Battelle Energy Alliance, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on April 29 2021 - -@author: Mohammad Abdo (@Jimmy-INL) -""" -#External Modules------------------------------------------------------------------------------------ -import numpy as np -import scipy as sp -from scipy.linalg import sqrtm -import copy -#External Modules End-------------------------------------------------------------------------------- - -#Internal Modules------------------------------------------------------------------------------------ -from .MetricInterface import MetricInterface -from ...utils import InputData, InputTypes -#Internal Modules End-------------------------------------------------------------------------------- - -class RepresentativityFactors(MetricInterface): - """ - RepresntativityFactors is the metric class used to quantitatively - assess the relativeness of a mock experiment to the target plant. - """ - availScaling ={} - - @classmethod - def getInputSpecification(cls): - """ - Method to get a reference to a class that specifies the input data for - class cls. - @ In, cls, the class for which we are retrieving the specification - @ Out, inputSpecification, InputData.ParameterInput, class to use for - specifying input of cls. - """ - inputSpecification = super(RepresentativityFactors, cls).getInputSpecification() - actionTypeInput = InputData.parameterInputFactory("actionType", contentType=InputTypes.StringType) - inputSpecification.addSub(actionTypeInput) - - return inputSpecification - - def __init__(self): - """ - Constructor - @ In, None - @ Out, None - """ - super().__init__() - # The type of given analysis - self.actionType = None - # True indicates the metric needs to be able to handle dynamic data - self._dynamicHandling = True - # True indicates the metric needs to be able to handle pairwise data - self._pairwiseHandling = False - - def run(self, x, y, weights = None, axis = 0, **kwargs): - """ - This method computes DSS distance between two inputs x and y based on given metric - @ In, x, numpy.ndarray, array containing data of x, if 1D array is provided, - the array will be reshaped via x.reshape(-1,1), shape (n_samples, ), if 2D - array is provided, shape (n_samples, n_time_steps) - @ In, y, numpy.ndarray, array containing data of y, if 1D array is provided, - the array will be reshaped via y.reshape(-1,1), shape (n_samples, ), if 2D - array is provided, shape (n_samples, n_time_steps) - @ In, weights, array_like (numpy.array or list), optional, weights associated - with input, shape (n_samples) if axis = 0, otherwise shape (n_time_steps) - @ In, axis, integer, optional, axis along which a metric is performed, default is 0, - i.e. the metric will performed along the first dimension (the "rows"). - If metric postprocessor is used, the first dimension is the RAVEN_sample_ID, - and the second dimension is the pivotParameter if HistorySet is provided. - @ In, kwargs, dict, dictionary of parameters characteristic of each metric - @ Out, value, float, metric result - """ - senMeasurables = kwargs['senMeasurables'] - senFOMs = kwargs['senFOMs'] - covParameters = kwargs['covParameters'] - # r = (senFOMs.T @ covParameters @ senMeasurables)/\ - # np.sqrt(senFOMs.T @ covParameters @ senFOMs)/\ - # np.sqrt(senMeasurables.T @ covParameters @ senMeasurables) - r = (sp.linalg.pinv(sqrtm(senFOMs @ covParameters @ senFOMs.T)) @ sqrtm(senFOMs @ covParameters @ senMeasurables.T) @ sqrtm(senFOMs @ covParameters @ senMeasurables.T) @ sp.linalg.pinv(sqrtm(senMeasurables @ covParameters @ senMeasurables.T))).real - return r From eb0fde7722634e5fd5c5e6e4756673b27c549ca0 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 6 Feb 2023 19:36:23 -0700 Subject: [PATCH 57/95] modifying dockstring --- .../Models/PostProcessors/Validations/Representativity.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 5f512625ba..9c476e079d 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -29,7 +29,7 @@ class Representativity(ValidationBase): Representativity is a base class for validation problems It represents the base class for most validation problems - @ Authors: Mohammad abdo (@Jimmy-INL) + @ Authors: Mohammad Abdo (@Jimmy-INL) Congjian Wang (@wangcj05) Andrea Alfonsi (@aalfonsi) Aaron Epiney (@AaronEpiney) @@ -486,4 +486,4 @@ def _targetCorrection(self, FOMs, UparVar, Umes, UmesVar, normalizedSenTar, norm - (normalizedSenTar @ UparVar @ normalizedSenExp.T)\ @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenExp.T)\ @ (normalizedSenExp @ UparVar @ normalizedSenTar.T) - return tarTilde, tarVarTilde, UtarVarTilde, UtarVarztilde_no_UmesVar, propagetedExpUncert \ No newline at end of file + return tarTilde, tarVarTilde, UtarVarTilde, UtarVarztilde_no_UmesVar, propagetedExpUncert From d7297bccb06bfe3ee88983223353b600ec4d95ea Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Fri, 10 Feb 2023 12:03:42 -0700 Subject: [PATCH 58/95] adding tests --- ...representativity_perfectLinExpToTarget.xml | 181 +++++++++++++++++ ...ntativity_rankDifficientLinExpToTarget.xml | 183 ++++++++++++++++++ ...entativity_singlePerfectLinExpToTarget.xml | 139 +++++++++++++ 3 files changed, 503 insertions(+) create mode 100644 tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml create mode 100644 tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml create mode 100644 tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml diff --git a/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml new file mode 100644 index 0000000000..f460811f08 --- /dev/null +++ b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml @@ -0,0 +1,181 @@ + + + + RepresentativityPerfectMatch + mcRunExp, mcRunTar, PP1 + 1 + + + + framework/PostProcessors/Validation/test_validation_representativity1 + Mohammad Abdo (@Jimmy-INL) + 2021-04-29 + PostProcessors.Validation + + This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. + This test a linear model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. + + + Added Modification for new PP API + + + + + + p1, p2, e1, e2, e3, bE, F1, F2, F3 + + + p1, p2, o1, o2, o3, bT, FOM1, FOM2, FOM3 + + + outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 + outputDataMC2|FOM1, outputDataMC2|FOM2, outputDataMC2|FOM3 + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + outputDataMC2|time + + + + + + 5.5 + 0.55 + + + 8 + 0.8 + + + + + + + 100 + + + dist1 + + + dist2 + + 2,-3 + 1, 8 + -5,-5 + 0,0,0 + + + + 100 + + + dist1 + + + dist2 + + 2,-3 + 1, 8 + -5,-5 + 0,0,0 + + + + + + + inputPlaceHolder2 + linModel + ExperimentMCSampler + outputDataMC1 + + + + inputPlaceHolder2 + tarModel + TargetMCSampler + outputDataMC2 + + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + + + + + p1,p2 + OutputPlaceHolder + + + p1,p2 + F1, F2, F3 + + + p1,p2 + FOM1, FOM2, FOM3 + + + InputPlaceHolder + + BiasFactor_MockF1_TarFOM1, + BiasFactor_MockF1_TarFOM2, + BiasFactor_MockF1_TarFOM3, + BiasFactor_MockF2_TarFOM1, + BiasFactor_MockF2_TarFOM2, + BiasFactor_MockF2_TarFOM3, + BiasFactor_MockF3_TarFOM1, + BiasFactor_MockF3_TarFOM2, + BiasFactor_MockF3_TarFOM3, + ExactBiasFactor_MockF1_TarFOM1, + ExactBiasFactor_MockF1_TarFOM2, + ExactBiasFactor_MockF1_TarFOM3, + ExactBiasFactor_MockF2_TarFOM1, + ExactBiasFactor_MockF2_TarFOM2, + ExactBiasFactor_MockF2_TarFOM3, + ExactBiasFactor_MockF3_TarFOM1, + ExactBiasFactor_MockF3_TarFOM2, + ExactBiasFactor_MockF3_TarFOM3, + CorrectedParameters_p1, + CorrectedParameters_p2, + CorrectedTargets_FOM1, + CorrectedTargets_FOM2, + CorrectedTargets_FOM3, + VarianceInCorrectedParameters_p1, + VarianceInCorrectedParameters_p2, + CovarianceInCorrectedParameters_p1_p2, + CovarianceInCorrectedParameters_p2_p1, + CorrectedVar_TarFOM1, + CorrectedVar_TarFOM2, + CorrectedVar_TarFOM3, + ExactCorrectedVar_TarFOM1, + ExactCorrectedVar_TarFOM2, + ExactCorrectedVar_TarFOM3, + CorrectedCov_TarFOM1_TarFOM2, + CorrectedCov_TarFOM2_TarFOM1, + CorrectedCov_TarFOM1_TarFOM3, + CorrectedCov_TarFOM3_TarFOM1, + CorrectedCov_TarFOM2_TarFOM3, + CorrectedCov_TarFOM3_TarFOM2, + ExactCorrectedCov_TarFOM1_TarFOM2, + ExactCorrectedCov_TarFOM2_TarFOM1, + ExactCorrectedCov_TarFOM1_TarFOM3, + ExactCorrectedCov_TarFOM3_TarFOM1, + ExactCorrectedCov_TarFOM2_TarFOM3, + ExactCorrectedCov_TarFOM3_TarFOM2 + + + + + + + csv + pp1_metric + + + + diff --git a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml new file mode 100644 index 0000000000..dd520e2276 --- /dev/null +++ b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml @@ -0,0 +1,183 @@ + + + + RepresentativityrankDifficient + mcRunExp, mcRunTar, PP1 + 1 + + + + framework/PostProcessors/Validation/test_validation_representativity3 + Mohammad Abdo (@Jimmy-INL) + 2021-04-29 + PostProcessors.Validation + + This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. + This test a linear model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. + + + Added Modification for new PP API + + + + + + p1, p2, e1, bE, F1 + + + p1, p2, o1, bT, FOM1 + + + outputDataMC1|F1 + outputDataMC2|FOM1 + + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + outputDataMC2|time + + + + + + + + 5.5 + 0.55 + + + 8 + 0.8 + + + + + + + 100 + + + dist1 + + + dist2 + + 2,-3 + + 0 + + + + 100 + + + dist1 + + + dist2 + + 2,3 + + 0 + + + + + + inputPlaceHolder2 + linModel + ExperimentMCSampler + outputDataMC1 + + + inputPlaceHolder2 + tarModel + TargetMCSampler + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + + + + + p1,p2 + OutputPlaceHolder + + + p1,p2 + F1 + + + p1,p2 + FOM1 + + + InputPlaceHolder + + BiasFactor_MockF1_TarFOM1, + + ExactBiasFactor_MockF1_TarFOM1, + + CorrectedParameters_p1, + CorrectedParameters_p2, + CorrectedTargets_FOM1, + + VarianceInCorrectedParameters_p1, + VarianceInCorrectedParameters_p2, + CovarianceInCorrectedParameters_p1_p2, + CovarianceInCorrectedParameters_p2_p1, + CorrectedVar_TarFOM1, + + ExactCorrectedVar_TarFOM1, + + + + + + + + + + csv + pp1_metric + + + + diff --git a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml new file mode 100644 index 0000000000..eeee32fd72 --- /dev/null +++ b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml @@ -0,0 +1,139 @@ + + + + RepresentativityPerfectSingleMeasurable + mcRunExp, mcRunTar, PP1 + 1 + + + + framework/PostProcessors/Validation/test_validation_representativity2 + Mohammad Abdo (@Jimmy-INL) + 2021-04-29 + PostProcessors.Validation + + This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. + This test a linear model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. + + + Added Modification for new PP API + + + + + + p1, p2, e1, bE, F1 + + + p1, p2, o1, bT, FOM1 + + + outputDataMC1|F1 + outputDataMC2|FOM1 + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + outputDataMC2|time + + + + + + 5.5 + 0.55 + + + 8 + 0.8 + + + + + + + 100 + + + dist1 + + + dist2 + + 2,-3 + 0 + + + + 100 + + + dist1 + + + dist2 + + 2,-3 + 0 + + + + + + inputPlaceHolder2 + linModel + ExperimentMCSampler + outputDataMC1 + + + inputPlaceHolder2 + tarModel + TargetMCSampler + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + + + + + p1,p2 + OutputPlaceHolder + + + p1,p2 + F1 + + + p1,p2 + FOM1 + + + InputPlaceHolder + + BiasFactor_MockF1_TarFOM1, + ExactBiasFactor_MockF1_TarFOM1, + CorrectedParameters_p1, + CorrectedParameters_p2, + CorrectedTargets_FOM1, + VarianceInCorrectedParameters_p1, + VarianceInCorrectedParameters_p2, + CovarianceInCorrectedParameters_p1_p2, + CovarianceInCorrectedParameters_p2_p1, + CorrectedVar_TarFOM1, + ExactCorrectedVar_TarFOM1, + + + + + + + csv + pp1_metric + + + From 211e54b3ebf557842ce9be6f3d4a05293fb27378 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Fri, 10 Feb 2023 18:56:24 -0700 Subject: [PATCH 59/95] updating dependencies --- dependencies.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dependencies.xml b/dependencies.xml index b29ebc7270..409bdb12b0 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -49,7 +49,7 @@ Note all install methods after "main" take --> 0.19 1.5 - 3.3 + 3.5 0.13 2.2 2.9 From 44214f6c9e4fb4c0dd25c4f1a93cf2e9715079e6 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 15 Feb 2023 13:25:10 -0700 Subject: [PATCH 60/95] adding analytic models --- .../AnalyticModels/singleExpLinModel.py | 56 ++++++++++++++++++ .../AnalyticModels/singleTarLinModel.py | 58 +++++++++++++++++++ 2 files changed, 114 insertions(+) create mode 100644 tests/framework/AnalyticModels/singleExpLinModel.py create mode 100644 tests/framework/AnalyticModels/singleTarLinModel.py diff --git a/tests/framework/AnalyticModels/singleExpLinModel.py b/tests/framework/AnalyticModels/singleExpLinModel.py new file mode 100644 index 0000000000..462b1466aa --- /dev/null +++ b/tests/framework/AnalyticModels/singleExpLinModel.py @@ -0,0 +1,56 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#*************************************** +#* Simple analytic test ExternalModule * +#*************************************** +# +# Simulates a steady state linear model that maps $J-$parameters (i.e., $\mathbb{R}^J$) to k Responses +# +# External Modules +import numpy as np +################## + +# A = np.array([[2, -3],[1,8],[-5, -5]]) +# b = np.array([[0],[0],[0]]) + +def run(self,Input): + """ + Method require by RAVEN to run this as an external model. + @ In, self, object, object to store members on + @ In, Input, dict, dictionary containing inputs from RAVEN + @ Out, None + """ + self.F1 = main(Input) + +def main(Input): + # y = A @ np.array(list(Input.values())).reshape(-1,1) + b + m = len([key for key in Input.keys() if 'e' in key]) # number of experiments + n = len([par for par in Input.keys() if 'p' in par]) # number of parameters + A = np.array([Input['e1']]).reshape(-1,n) + b = Input['bE'].reshape(-1,1) + x = np.atleast_2d(np.array([Input['p1'],Input['p2']])).reshape(-1,1) + assert(np.shape(A)[1],np.shape(b)[0]) + assert(np.shape(A)[0],np.shape(b)[0],m) + y = A @ x + b + return y[:] + + +if __name__ == '__main__': + Input = {} + Input['e1'] = [2,-3] + Input['bE'] = np.array([[0],[0],[0]]) + Input['x1'] = 5.5 + Input['x2'] = 8 + a = main(Input) + print(a) diff --git a/tests/framework/AnalyticModels/singleTarLinModel.py b/tests/framework/AnalyticModels/singleTarLinModel.py new file mode 100644 index 0000000000..c53f4e205c --- /dev/null +++ b/tests/framework/AnalyticModels/singleTarLinModel.py @@ -0,0 +1,58 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#*************************************** +#* Simple analytic test ExternalModule * +#*************************************** +# +# Simulates a steady state linear model that maps $J-$parameters (i.e., $\mathbb{R}^J$) to k Responses +# +# External Modules +import numpy as np +################## +# Author: Mohammad Abdo (@Jimmy-INL) + +# A = np.array([[2, -3],[1,8],[-5, -5]]) +# b = np.array([[0],[0],[0]]) + +def run(self,Input): + """ + Method require by RAVEN to run this as an external model. + @ In, self, object, object to store members on + @ In, Input, dict, dictionary containing inputs from RAVEN + @ Out, None + """ + self.FOM1 = main(Input) + +def main(Input): + m = len([key for key in Input.keys() if 'o' in key]) # number of experiments + n = len([par for par in Input.keys() if 'p' in par]) # number of parameters + A = np.array([Input['o1']]).reshape(-1,n) + b = Input['bT'].reshape(-1,1) + x = np.atleast_2d(np.array([Input['p1'],Input['p2']])).reshape(-1,1) + assert(np.shape(A)[1],np.shape(b)[0]) + assert(np.shape(A)[0],np.shape(b)[0],m) + y = A @ x + b + return y[:] + + +if __name__ == '__main__': + Input = {} + Input['o1'] = [2,-3] + Input['o2'] = [1,8] + Input['o3'] = [-5, -5] + Input['bT'] = np.array([[0],[0],[0]]) + Input['p1'] = 5.5 + Input['p2'] = 8 + a,b,c = main(Input) + print(a,b,c) From 34d2d8c4cb163d111fed0ca35c2d0abd58c47bef Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 20 Feb 2023 13:41:25 -0700 Subject: [PATCH 61/95] updating dependencies with devel --- dependencies_new.xml | 77 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 dependencies_new.xml diff --git a/dependencies_new.xml b/dependencies_new.xml new file mode 100644 index 0000000000..81843fe32e --- /dev/null +++ b/dependencies_new.xml @@ -0,0 +1,77 @@ + + +
+ + + + + + + + + + + + 3.9 + + + + + + + + + + + + + + + + + + 1.3 + 6.0 + + + 1.1 +
+ + remove + remove + remove + remove + remove + remove + +
From bcf7b77153a20b4ddcb09e515841eb80e501ad6c Mon Sep 17 00:00:00 2001 From: Jimmy-INL <52417034+Jimmy-INL@users.noreply.github.com> Date: Mon, 20 Feb 2023 15:31:48 -0700 Subject: [PATCH 62/95] Delete dependencies_new.xml --- dependencies_new.xml | 77 -------------------------------------------- 1 file changed, 77 deletions(-) delete mode 100644 dependencies_new.xml diff --git a/dependencies_new.xml b/dependencies_new.xml deleted file mode 100644 index 81843fe32e..0000000000 --- a/dependencies_new.xml +++ /dev/null @@ -1,77 +0,0 @@ - - -
- - - - - - - - - - - - 3.9 - - - - - - - - - - - - - - - - - - 1.3 - 6.0 - - - 1.1 -
- - remove - remove - remove - remove - remove - remove - -
From bac01e9cd9f632aa34231b4b50b533d699281e36 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Tue, 21 Feb 2023 08:37:13 -0700 Subject: [PATCH 63/95] adding docstrings to external models --- .../Validations/Representativity.py | 6 +++-- .../AnalyticModels/singleExpLinModel.py | 25 ++++++------------- .../AnalyticModels/singleTarLinModel.py | 25 ++++++------------- ...representativity_perfectLinExpToTarget.xml | 7 ++++-- ...ntativity_rankDifficientLinExpToTarget.xml | 7 ++++-- ...entativity_singlePerfectLinExpToTarget.xml | 7 ++++-- 6 files changed, 34 insertions(+), 43 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 9c476e079d..b1cbdcc2f5 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -397,9 +397,11 @@ def _calculateBiasFactor(self, normalizedSenExp, normalizedSenTar, UparVar, Umes @ Out, r, np.array, the representativity (bias factor) matrix neglecting uncertainties in measurables @ Out, rExact, np.array, the representativity (bias factor) matrix considering uncertainties in measurables """ + if UmesVar is None: + UmesVar = np.zeros((len(normalizedSenExp), len(normalizedSenExp))) # Compute representativity (#eq 79) - r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T))).real - rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar))).real + r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T))).real + rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar))).real return r, rExact def _calculateCovofTargetErrorsfromBiasFactor(self, normalizedSenTar, UparVar, r): diff --git a/tests/framework/AnalyticModels/singleExpLinModel.py b/tests/framework/AnalyticModels/singleExpLinModel.py index 462b1466aa..b9aea8c6a4 100644 --- a/tests/framework/AnalyticModels/singleExpLinModel.py +++ b/tests/framework/AnalyticModels/singleExpLinModel.py @@ -20,10 +20,7 @@ # External Modules import numpy as np ################## - -# A = np.array([[2, -3],[1,8],[-5, -5]]) -# b = np.array([[0],[0],[0]]) - +# Author: Mohammad Abdo (@Jimmy-INL) def run(self,Input): """ Method require by RAVEN to run this as an external model. @@ -34,23 +31,17 @@ def run(self,Input): self.F1 = main(Input) def main(Input): - # y = A @ np.array(list(Input.values())).reshape(-1,1) + b + """ + Experiment Model evaluation method + @ In, Input, dict, dictionary containing inputs from RAVEN + @ Out, y[:], floats, list of response values from the linear model $ y = Ax+b $ + """ m = len([key for key in Input.keys() if 'e' in key]) # number of experiments n = len([par for par in Input.keys() if 'p' in par]) # number of parameters A = np.array([Input['e1']]).reshape(-1,n) b = Input['bE'].reshape(-1,1) x = np.atleast_2d(np.array([Input['p1'],Input['p2']])).reshape(-1,1) - assert(np.shape(A)[1],np.shape(b)[0]) + assert(np.shape(A)[1],np.shape(b)[0],n) assert(np.shape(A)[0],np.shape(b)[0],m) y = A @ x + b - return y[:] - - -if __name__ == '__main__': - Input = {} - Input['e1'] = [2,-3] - Input['bE'] = np.array([[0],[0],[0]]) - Input['x1'] = 5.5 - Input['x2'] = 8 - a = main(Input) - print(a) + return y[:] \ No newline at end of file diff --git a/tests/framework/AnalyticModels/singleTarLinModel.py b/tests/framework/AnalyticModels/singleTarLinModel.py index c53f4e205c..9c049ab367 100644 --- a/tests/framework/AnalyticModels/singleTarLinModel.py +++ b/tests/framework/AnalyticModels/singleTarLinModel.py @@ -21,10 +21,6 @@ import numpy as np ################## # Author: Mohammad Abdo (@Jimmy-INL) - -# A = np.array([[2, -3],[1,8],[-5, -5]]) -# b = np.array([[0],[0],[0]]) - def run(self,Input): """ Method require by RAVEN to run this as an external model. @@ -35,24 +31,17 @@ def run(self,Input): self.FOM1 = main(Input) def main(Input): + """ + Target Model evaluation method + @ In, Input, dict, dictionary containing inputs from RAVEN + @ Out, y[:], floats, list of response values from the linear model $ y = Ax+b $ + """ m = len([key for key in Input.keys() if 'o' in key]) # number of experiments n = len([par for par in Input.keys() if 'p' in par]) # number of parameters A = np.array([Input['o1']]).reshape(-1,n) b = Input['bT'].reshape(-1,1) x = np.atleast_2d(np.array([Input['p1'],Input['p2']])).reshape(-1,1) - assert(np.shape(A)[1],np.shape(b)[0]) + assert(np.shape(A)[1],np.shape(b)[0],n) assert(np.shape(A)[0],np.shape(b)[0],m) y = A @ x + b - return y[:] - - -if __name__ == '__main__': - Input = {} - Input['o1'] = [2,-3] - Input['o2'] = [1,8] - Input['o3'] = [-5, -5] - Input['bT'] = np.array([[0],[0],[0]]) - Input['p1'] = 5.5 - Input['p2'] = 8 - a,b,c = main(Input) - print(a,b,c) + return y[:] \ No newline at end of file diff --git a/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml index f460811f08..d049fde50b 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml @@ -22,10 +22,12 @@ - p1, p2, e1, e2, e3, bE, F1, F2, F3 + p1, p2, e1, e2, e3, bE + F1, F2, F3 - p1, p2, o1, o2, o3, bT, FOM1, FOM2, FOM3 + p1, p2, o1, o2, o3, bT + FOM1, FOM2, FOM3 outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 @@ -51,6 +53,7 @@ + 42 100 diff --git a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml index dd520e2276..ac649af433 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml @@ -22,10 +22,12 @@ - p1, p2, e1, bE, F1 + p1, p2, e1, e2, e3, bE + F1, F2, F3 - p1, p2, o1, bT, FOM1 + p1, p2, o1, o2, o3, bT + FOM1, FOM2, FOM3 outputDataMC1|F1 @@ -56,6 +58,7 @@ + 42 100 diff --git a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml index eeee32fd72..a855794241 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml @@ -22,10 +22,12 @@ - p1, p2, e1, bE, F1 + p1, p2, e1, bE + F1 - p1, p2, o1, bT, FOM1 + p1, p2, o1, bT + FOM1 outputDataMC1|F1 @@ -51,6 +53,7 @@ + 42 100 From 74c7e53fa0bcf61575b9e85fab9b0e3de10dcc93 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 22 Mar 2023 15:58:49 -0600 Subject: [PATCH 64/95] addressing Congjian's comments about metric occurance --- .../Models/PostProcessors/Metric.py | 2 +- .../pp1_metric_dump.csv | 4 +- ...ntativity_rankDifficientLinExpToTarget.xml | 57 +++++++++---------- 3 files changed, 29 insertions(+), 34 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Metric.py b/ravenframework/Models/PostProcessors/Metric.py index 8cc796fcaa..fd882d4fbc 100644 --- a/ravenframework/Models/PostProcessors/Metric.py +++ b/ravenframework/Models/PostProcessors/Metric.py @@ -89,7 +89,7 @@ def __init__(self): self.pivotParameter = None self.pivotValues = [] # assembler objects to be requested - self.addAssemblerObject('Metric', InputData.Quantity.zero_to_infinity) + self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity) def __getMetricSide(self, metricDataName, currentInputs): """ diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv index 2b17d95a32..5eefd68761 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv @@ -1,2 +1,2 @@ -BiasFactor_MockF1_TarFOM1 -0.698423539624 +BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 +0.128430862967,0.689134736572,-0.0927298672854,0.128430862967,0.689134736572,-0.0927298672854,0.582902422521,0.000702674732941,0.812541858656,0.0905312547636,0.486858549858,-0.0657937469498,0.0905312547636,0.486858549858,-0.0657937469498,0.412552681203,-0.000828229250926,0.575604210418,5.50515445786,8.21264618362,34.6822822385,68.6723913603,-66.9629170421,0.11687181512,0.305599328839,0.0,0.0,-1.04930626376e-17,-1.59128115159e-17,-9.13773877431e-18,0.00239229818393,0.00382232231092,0.00210266379545,-1.2547237984e-17,-1.25687885235e-17,-9.6828258244e-18,-9.7450612549e-18,-1.12497918092e-17,-1.12847924134e-17,0.00285784685918,0.00285784685918,0.00220866990912,0.00220866990912,0.0024774248481,0.0024774248481 diff --git a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml index ac649af433..f4fe39367e 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml @@ -21,18 +21,17 @@ - + p1, p2, e1, e2, e3, bE F1, F2, F3 - + p1, p2, o1, o2, o3, bT FOM1, FOM2, FOM3 - outputDataMC1|F1 - outputDataMC2|FOM1 - + outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 + outputDataMC2|FOM1, outputDataMC2|FOM2, outputDataMC2|FOM3 outputDataMC1|p1,outputDataMC1|p2 outputDataMC2|p1,outputDataMC2|p2 outputDataMC1|time @@ -40,10 +39,6 @@ - - 5.5 @@ -68,9 +63,9 @@ dist2 2,-3 - - 0 + 2,-3 + -5,-5 + 0,0,0 @@ -83,9 +78,9 @@ dist2 2,3 - - 0 + 1, 8 + -5,-5 + 0,0,0 @@ -118,60 +113,60 @@ p1,p2 - F1 + F1, F2, F3 p1,p2 - FOM1 + FOM1, FOM2, FOM3 InputPlaceHolder BiasFactor_MockF1_TarFOM1, - + BiasFactor_MockF3_TarFOM3, ExactBiasFactor_MockF1_TarFOM1, - + ExactBiasFactor_MockF3_TarFOM3, CorrectedParameters_p1, CorrectedParameters_p2, CorrectedTargets_FOM1, - + CorrectedTargets_FOM2, + CorrectedTargets_FOM3, VarianceInCorrectedParameters_p1, VarianceInCorrectedParameters_p2, CovarianceInCorrectedParameters_p1_p2, CovarianceInCorrectedParameters_p2_p1, CorrectedVar_TarFOM1, - + CorrectedVar_TarFOM2, + CorrectedVar_TarFOM3, ExactCorrectedVar_TarFOM1, - - - + ExactCorrectedCov_TarFOM3_TarFOM2 From cadd061ac828ac61072f6bd1e2104b582ee2297e Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 23 Mar 2023 10:12:32 -0600 Subject: [PATCH 65/95] clarifications to validation.tex --- doc/user_manual/PostProcessors/Validation.tex | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/user_manual/PostProcessors/Validation.tex b/doc/user_manual/PostProcessors/Validation.tex index 8e788c9fe3..3542d04280 100644 --- a/doc/user_manual/PostProcessors/Validation.tex +++ b/doc/user_manual/PostProcessors/Validation.tex @@ -282,15 +282,14 @@ \subsubsection{Validation PostProcessors} % \begin{itemize} - \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features, which can be the measuables/observables of the mock model. Reader should be warned that this nomenclature is different than the Machine learning nomenclature. + \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features, which can be the measuables/observables of the mock model. Reader should be warned that this nomenclature is different than the machine learning nomenclature. \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of targets. These are the Figures of merit (FOMs) in the target model against which the mock model is being validated. - \item \xmlNode{featureParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputs to the mock model. + \item \xmlNode{featureParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputs to the mock/prototype model. - \item \xmlNode{targetParameters}, \xmlDesc{comma separated string, required field}, contains a comma separated list of - target parameters/inputs. + \item \xmlNode{targetParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputs to the target model. \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable of the mock model. Default is ``time''. \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). @@ -331,10 +330,12 @@ \subsubsection{Validation PostProcessors} ... - p1, p2, e1, e2, e3, bE, F1, F2, F3 + p1, p2, e1, e2, e3, bE + F1, F2, F3 - p1, p2, o1, o2, o3, bT, FOM1, FOM2, FOM3 + p1, p2, o1, o2, o3, bT + FOM1, FOM2, FOM3 outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 From bc4261f0da8db4dab73b97a4949a7faf2403fe15 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Wed, 29 Mar 2023 12:11:35 -0600 Subject: [PATCH 66/95] adding the format DS|Input/Output|name --- .../PostProcessors/Validations/Representativity.py | 14 ++++++-------- ...test_representativity_perfectLinExpToTarget.xml | 8 ++++---- ...presentativity_rankDifficientLinExpToTarget.xml | 8 ++++---- ...epresentativity_singlePerfectLinExpToTarget.xml | 8 ++++---- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index b1cbdcc2f5..8a44db1476 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -11,6 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# @ Authors: Mohammad Abdo (@Jimmy-INL) +# Congjian Wang (@wangcj05) +# Andrea Alfonsi (@aalfonsi) +# Aaron Epiney (@AaronEpiney) #External Modules------------------------------------------------------------------------------------ import numpy as np @@ -28,12 +32,6 @@ class Representativity(ValidationBase): """ Representativity is a base class for validation problems It represents the base class for most validation problems - - @ Authors: Mohammad Abdo (@Jimmy-INL) - Congjian Wang (@wangcj05) - Andrea Alfonsi (@aalfonsi) - Aaron Epiney (@AaronEpiney) - """ @classmethod @@ -110,8 +108,8 @@ def initialize(self, runInfo, inputs, initDict): if len(names) != 1: self.raiseAnError(IOError, "'Targets' and 'targetParameters' should come from the same DataObjects, but they present in differet DataObjects:{}".fortmat(','.join(names))) targetDataObject = list(names)[0] - featVars = [x.split("|")[-1] for x in self.features] + [x.split("|")[1] for x in self.featureParameters] - targVars = [x.split("|")[-1] for x in self.targets] + [x.split("|")[1] for x in self.targetParameters] + featVars = [x.split("|")[-1] for x in self.features] + [x.split("|")[-1] for x in self.featureParameters] + targVars = [x.split("|")[-1] for x in self.targets] + [x.split("|")[-1] for x in self.targetParameters] for i, inp in enumerate(inputs): if inp.name == featDataObject: diff --git a/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml index d049fde50b..9315292765 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml @@ -30,10 +30,10 @@ FOM1, FOM2, FOM3 - outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 - outputDataMC2|FOM1, outputDataMC2|FOM2, outputDataMC2|FOM3 - outputDataMC1|p1,outputDataMC1|p2 - outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|Output|F1, outputDataMC1|Output|F2, outputDataMC1|Output|F3 + outputDataMC2|Output|FOM1, outputDataMC2|Output|FOM2, outputDataMC2|Output|FOM3 + outputDataMC1|Input|p1,outputDataMC1|Input|p2 + outputDataMC2|Input|p1,outputDataMC2|Input|p2 outputDataMC1|time outputDataMC2|time diff --git a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml index f4fe39367e..c99119ee55 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml @@ -30,10 +30,10 @@ FOM1, FOM2, FOM3 - outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 - outputDataMC2|FOM1, outputDataMC2|FOM2, outputDataMC2|FOM3 - outputDataMC1|p1,outputDataMC1|p2 - outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|Output|F1, outputDataMC1|Output|F2, outputDataMC1|Output|F3 + outputDataMC2|Output|FOM1, outputDataMC2|Output|FOM2, outputDataMC2|Output|FOM3 + outputDataMC1|Input|p1,outputDataMC1|Input|p2 + outputDataMC2|Input|p1,outputDataMC2|Input|p2 outputDataMC1|time outputDataMC2|time diff --git a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml index a855794241..0763066dbe 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml @@ -30,10 +30,10 @@ FOM1 - outputDataMC1|F1 - outputDataMC2|FOM1 - outputDataMC1|p1,outputDataMC1|p2 - outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|Output|F1 + outputDataMC2|Output|FOM1 + outputDataMC1|Input|p1,outputDataMC1|Input|p2 + outputDataMC2|Input|p1,outputDataMC2|Input|p2 outputDataMC1|time outputDataMC2|time From eeec9da258652ad1336d7c546a1b3fd8461eaca8 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Fri, 31 Mar 2023 11:59:17 -0600 Subject: [PATCH 67/95] changing the names of input nodes --- .../Models/PostProcessors/ValidationBase.py | 32 ++++---- .../PostProcessors/Validations/PPDSS.py | 18 ++-- .../PhysicsGuidedCoverageMapping.py | 6 +- .../Validations/Probabilistic.py | 4 +- .../Validations/Representativity.py | 82 +++++++++---------- ...representativity_perfectLinExpToTarget.xml | 8 +- ...ntativity_rankDifficientLinExpToTarget.xml | 8 +- ...entativity_singlePerfectLinExpToTarget.xml | 8 +- .../Validation/test_validation_dss.xml | 12 +-- .../Validation/test_validation_gate_pcm.xml | 32 ++++---- .../test_validation_gate_probabilistic.xml | 7 +- ...validation_gate_probabilistic_time_dep.xml | 4 +- 12 files changed, 106 insertions(+), 115 deletions(-) diff --git a/ravenframework/Models/PostProcessors/ValidationBase.py b/ravenframework/Models/PostProcessors/ValidationBase.py index 87948f36ce..2b01e99cbb 100644 --- a/ravenframework/Models/PostProcessors/ValidationBase.py +++ b/ravenframework/Models/PostProcessors/ValidationBase.py @@ -58,9 +58,9 @@ class cls. specs.addSub(preProcessorInput) pivotParameterInput = InputData.parameterInputFactory("pivotParameter", contentType=InputTypes.StringType) specs.addSub(pivotParameterInput) - featuresInput = InputData.parameterInputFactory("Features", contentType=InputTypes.StringListType) + featuresInput = InputData.parameterInputFactory("prototypeOutputs", contentType=InputTypes.StringListType) specs.addSub(featuresInput) - targetsInput = InputData.parameterInputFactory("Targets", contentType=InputTypes.StringListType) + targetsInput = InputData.parameterInputFactory("targetOutputs", contentType=InputTypes.StringListType) specs.addSub(targetsInput) metricInput = InputData.parameterInputFactory("Metric", contentType=InputTypes.StringType) metricInput.addParam("class", InputTypes.StringType) @@ -85,8 +85,8 @@ def __init__(self): self.dataType = ['static', 'dynamic'] # the type of data can be passed in (static aka PointSet, dynamic aka HistorySet) (if both are present the validation algorithm can work for both data types) self.acceptableMetrics = [] # if not populated all types of metrics are accepted, otherwise list the metrics (see Probablistic.py for an example) - self.features = None # list of feature variables - self.targets = None # list of target variables + self.prototypeOutputs = None # list of feature variables + self.targetOutputs = None # list of target variables self.pivotValues = None # pivot values (present if dynamic == True) self.addAssemblerObject('Metric', InputData.Quantity.zero_to_infinity) @@ -126,14 +126,14 @@ def _handleInput(self, paramInput): for child in paramInput.subparts: if child.getName() == 'pivotParameter': self.pivotParameter = child.value - elif child.getName() == 'Features': - self.features = child.value - elif child.getName() == 'Targets': - self.targets = child.value + elif child.getName() == 'prototypeOutputs': + self.prototypeOutputs = child.value + elif child.getName() == 'targetOutputs': + self.targetOutputs = child.value if 'static' not in self.dataType and self.pivotParameter is None: self.raiseAnError(IOError, "The validation algorithm '{}' is a dynamic model ONLY but no node has been inputted".format(self._type)) - if not self.features: - self.raiseAnError(IOError, "XML node 'Features' is required but not provided") + # if not self.features: + # self.raiseAnError(IOError, "XML node 'prototypeParameters' is required but not provided") def initialize(self, runInfo, inputs, initDict): """ @@ -152,20 +152,20 @@ def initialize(self, runInfo, inputs, initDict): if len(inputs) > 1: # if inputs > 1, check if the | is present to understand where to get the features and target - notStandard = [k for k in self.features + self.targets if "|" not in k] + notStandard = [k for k in self.prototypeOutputs + self.targetOutputs if "|" not in k] if notStandard: self.raiseAnError(IOError, "# Input Datasets/DataObjects > 1! features and targets must use the syntax DataObjectName|feature to be usable! Not standard features are: {}!".format(",".join(notStandard))) # now lets check that the variables are in the dataobjects if isinstance(inputs[0], DataObjects.DataSet): do = [inp.name for inp in inputs] if len(inputs) > 1: - allFound = [feat.split("|")[0].strip() in do for feat in self.features] - allFound += [targ.split("|")[0].strip() in do for targ in self.targets] + allFound = [feat.split("|")[0].strip() in do for feat in self.prototypeOutputs] + allFound += [targ.split("|")[0].strip() in do for targ in self.targetOutputs] if not all(allFound): - self.raiseAnError(IOError, "Targets and Features are linked to DataObjects that have not been listed as inputs in the Step. Please check input!") + self.raiseAnError(IOError, "targetParameters and prototypeParameters are linked to DataObjects that have not been listed as inputs in the Step. Please check input!") # check variables for indx, dobj in enumerate(do): - variables = [var.split("|")[-1].strip() for var in (self.features + self.targets) if dobj in var] + variables = [var.split("|")[-1].strip() for var in (self.prototypeOutputs + self.targetOutputs) if dobj in var] if not utils.isASubset(variables,inputs[indx].getVars()): self.raiseAnError(IOError, "The variables '{}' not found in input DataObjet '{}'!".format(",".join(list(set(list(inputs[indx].getVars())) - set(variables))), dobj)) @@ -186,7 +186,7 @@ def _getDataFromDataDict(self, datasets, var, names=None): """ pw = None if "|" in var and names is not None: - do, feat = var.split("|") + do, _, feat = var.split("|") dat = datasets[do][feat] else: for doIndex, ds in enumerate(datasets): diff --git a/ravenframework/Models/PostProcessors/Validations/PPDSS.py b/ravenframework/Models/PostProcessors/Validations/PPDSS.py index e5c395c5af..5639b9d084 100644 --- a/ravenframework/Models/PostProcessors/Validations/PPDSS.py +++ b/ravenframework/Models/PostProcessors/Validations/PPDSS.py @@ -96,8 +96,8 @@ def __init__(self): self.name = 'PPDSS' # Postprocessor name self.dynamic = True # Must be time-dependent? self.dynamicType = ['dynamic'] # Specification of dynamic type - self.features = None # list of feature variables - self.targets = None # list of target variables + self.prototypeOutputs = None # list of feature variables + self.targetOutputs = None # list of target variables self.multiOutput = 'raw_values' # defines aggregating of multiple outputs for HistorySet # currently allow raw_values self.pivotParameterFeature = None # Feature pivot parameter variable @@ -124,10 +124,10 @@ def _handleInput(self, paramInput): if child.getName() == 'Metric': if 'type' not in child.parameterValues.keys() or 'class' not in child.parameterValues.keys(): self.raiseAnError(IOError, 'Tag Metric must have attributes "class" and "type"') - elif child.getName() == 'Features': - self.features = child.value - elif child.getName() == 'Targets': - self.targets = child.value + elif child.getName() == 'prototypeOutputs': + self.prototypeOutputs = child.value + elif child.getName() == 'targetOutputs': + self.targetOutputs = child.value elif child.getName() == 'multiOutput': self.multiOutput = child.value elif child.getName() == 'pivotParameterFeature': @@ -192,10 +192,10 @@ def _evaluate(self, datasets, **kwargs): """ realizations = [] realizationArray = [] - if len(self.features) > 1 or len(self.targets) > 1: + if len(self.prototypeOutputs) > 1 or len(self.targetOutputs) > 1: self.raiseAnError(IOError, "The number of inputs for features or targets is greater than 1. Please restrict to one set per step.") - feat = self.features[0] - targ = self.targets[0] + feat = self.prototypeOutputs[0] + targ = self.targetOutputs[0] scaleRatioBeta = self.scaleRatioBeta scaleRatioOmega = self.scaleRatioOmega nameFeat = feat.split("|") diff --git a/ravenframework/Models/PostProcessors/Validations/PhysicsGuidedCoverageMapping.py b/ravenframework/Models/PostProcessors/Validations/PhysicsGuidedCoverageMapping.py index b3934330db..e404a2754d 100644 --- a/ravenframework/Models/PostProcessors/Validations/PhysicsGuidedCoverageMapping.py +++ b/ravenframework/Models/PostProcessors/Validations/PhysicsGuidedCoverageMapping.py @@ -76,7 +76,7 @@ def _handleInput(self, paramInput): self.measurements = child.value # Number of Features responses must equal to number of Measurements responses # Number of samples between Features and Measurements can be different - if len(self.features) != len(self.measurements): + if len(self.prototypeOutputs) != len(self.measurements): self.raiseAnError(IOError, 'The number of variables found in XML node "Features" is not equal the number of variables found in XML node "Measurements"') def run(self, inputIn): @@ -109,7 +109,7 @@ def _evaluate(self, datasets, **kwargs): msrData = [] featPW = [] msrPW = [] - for feat, msr in zip(self.features, self.measurements): + for feat, msr in zip(self.prototypeOutputs, self.measurements): featDataProb = self._getDataFromDataDict(datasets, feat, names) msrDataProb = self._getDataFromDataDict(datasets, msr, names) # M>=1 Feature arrays (1D) to 2D array with dimension (N, M) @@ -139,7 +139,7 @@ def _evaluate(self, datasets, **kwargs): # For each Target/Application model/response, calculate an uncertainty reduction fraction # using all available Features/Experiments - for targ in self.targets: + for targ in self.targetOutputs: targDataProb = self._getDataFromDataDict(datasets, targ, names) # Data values in Data, =targ, feat, msr targData = targDataProb[0] diff --git a/ravenframework/Models/PostProcessors/Validations/Probabilistic.py b/ravenframework/Models/PostProcessors/Validations/Probabilistic.py index b7a41d59cc..18fc92955a 100644 --- a/ravenframework/Models/PostProcessors/Validations/Probabilistic.py +++ b/ravenframework/Models/PostProcessors/Validations/Probabilistic.py @@ -106,7 +106,7 @@ def _evaluate(self, datasets, **kwargs): """ names = kwargs.get('dataobjectNames') outputDict = {} - for feat, targ in zip(self.features, self.targets): + for feat, targ in zip(self.prototypeOutputs, self.targetOutputs): featData = self._getDataFromDataDict(datasets, feat, names) targData = self._getDataFromDataDict(datasets, targ, names) for metric in self.metrics: @@ -124,7 +124,7 @@ def _getDataFromDataDict(self, datasets, var, names=None): """ pw = None if "|" in var and names is not None: - do, feat = var.split("|") + do, _, feat = var.split("|") dat = datasets[do][feat] else: for doIndex, ds in enumerate(datasets): diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 8a44db1476..420c22737c 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -44,13 +44,13 @@ class cls. specifying input of cls. """ specs = super(Representativity, cls).getInputSpecification() - parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType, + prototypeParameters = InputData.parameterInputFactory("prototypeParameters", contentType=InputTypes.StringListType, descr=r"""mock model parameters/inputs""") - parametersInput.addParam("type", InputTypes.StringType) - specs.addSub(parametersInput) - targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType, + prototypeParameters.addParam("type", InputTypes.StringType) + specs.addSub(prototypeParameters) + targetParameters = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType, descr=r"""Target model parameters/inputs""") - specs.addSub(targetParametersInput) + specs.addSub(targetParameters) targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType, descr=r"""ID of the temporal variable of the target model. Default is ``time''. \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation).""") @@ -94,22 +94,22 @@ def initialize(self, runInfo, inputs, initDict): super().initialize(runInfo, inputs, initDict) if len(inputs) != 2: self.raiseAnError(IOError, "PostProcessor", self.name, "can only accept two DataObjects, but got {}!".format(str(len(inputs)))) - params = self.features+self.targets+self.featureParameters+self.targetParameters + params = self.prototypeOutputs+self.targetOutputs+self.prototypeParameters+self.targetParameters validParams = [True if "|" in x else False for x in params] if not all(validParams): notValid = list(np.asarray(params)[np.where(np.asarray(validParams)==False)[0]]) - self.raiseAnError(IOError, "'Features', 'Targets', 'featureParameters', and 'targetParameters' should use 'DataObjectName|variable' format, but variables {} do not follow this rule.".format(','.join(notValid))) + self.raiseAnError(IOError, "'prototypeParameters', 'targetParameters', 'prototypeOutputs', and 'targetOutputs' should use 'DataObjectName|Input or Output|variable' format, but variables {} do not follow this rule.".format(','.join(notValid))) # Assume features and targets are in the format of: DataObjectName|Variables - names = set([x.split("|")[0] for x in self.features] + [x.split("|")[0] for x in self.featureParameters]) + names = set([x.split("|")[0] for x in self.prototypeOutputs] + [x.split("|")[0] for x in self.prototypeParameters]) if len(names) != 1: - self.raiseAnError(IOError, "'Features' and 'featureParameters' should come from the same DataObjects, but they present in differet DataObjects:{}".fortmat(','.join(names))) + self.raiseAnError(IOError, "'prototypeOutputs' and 'prototypeParameters' should come from the same DataObjects, but they present in differet DataObjects:{}".fortmat(','.join(names))) featDataObject = list(names)[0] - names = set([x.split("|")[0] for x in self.targets] + [x.split("|")[0] for x in self.targetParameters]) + names = set([x.split("|")[0] for x in self.targetOutputs] + [x.split("|")[0] for x in self.targetParameters]) if len(names) != 1: - self.raiseAnError(IOError, "'Targets' and 'targetParameters' should come from the same DataObjects, but they present in differet DataObjects:{}".fortmat(','.join(names))) + self.raiseAnError(IOError, "'targetOutputs' and 'targetParameters' should come from the same DataObjects, but they present in differet DataObjects:{}".fortmat(','.join(names))) targetDataObject = list(names)[0] - featVars = [x.split("|")[-1] for x in self.features] + [x.split("|")[-1] for x in self.featureParameters] - targVars = [x.split("|")[-1] for x in self.targets] + [x.split("|")[-1] for x in self.targetParameters] + featVars = [x.split("|")[-1] for x in self.prototypeOutputs] + [x.split("|")[-1] for x in self.prototypeParameters] + targVars = [x.split("|")[-1] for x in self.targetOutputs] + [x.split("|")[-1] for x in self.targetParameters] for i, inp in enumerate(inputs): if inp.name == featDataObject: @@ -127,11 +127,11 @@ def initialize(self, runInfo, inputs, initDict): self.raiseAnError(IOError, "Variables {} are missing from DataObject {}".format(','.join(missing), self.targetDataObject[0].name)) featStat = self.getBasicStat() - featStat.toDo = {'sensitivity':[{'targets':set([x.split("|")[-1] for x in self.features]), 'features':set([x.split("|")[-1] for x in self.featureParameters]),'prefix':self.senPrefix}]} + featStat.toDo = {'sensitivity':[{'targets':set([x.split("|")[-1] for x in self.prototypeOutputs]), 'features':set([x.split("|")[-1] for x in self.prototypeParameters]),'prefix':self.senPrefix}]} featStat.initialize(runInfo, [self.featureDataObject[0]], initDict) self.stat[self.featureDataObject[-1]] = featStat tartStat = self.getBasicStat() - tartStat.toDo = {'sensitivity':[{'targets':set([x.split("|")[-1] for x in self.targets]), 'features':set([x.split("|")[-1] for x in self.targetParameters]),'prefix':self.senPrefix}]} + tartStat.toDo = {'sensitivity':[{'targets':set([x.split("|")[-1] for x in self.targetOutputs]), 'features':set([x.split("|")[-1] for x in self.targetParameters]),'prefix':self.senPrefix}]} tartStat.initialize(runInfo, [self.targetDataObject[0]], initDict) self.stat[self.targetDataObject[-1]] = tartStat @@ -144,13 +144,13 @@ def _handleInput(self, paramInput): """ super()._handleInput(paramInput) for child in paramInput.subparts: - if child.getName() == 'featureParameters': - self.featureParameters = child.value + if child.getName() == 'prototypeParameters': + self.prototypeParameters = child.value elif child.getName() == 'targetParameters': self.targetParameters = child.value elif child.getName() == 'targetPivotParameter': self.targetPivotParameter = child.value - _, notFound = paramInput.findNodesAndExtractValues(['featureParameters', + _, notFound = paramInput.findNodesAndExtractValues(['prototypeParameters', 'targetParameters']) # notFound must be empty assert(not notFound) @@ -167,7 +167,7 @@ def run(self, inputIn): names=[] if isinstance(inputIn['Data'][0][-1], xr.Dataset): names = [self.getDataSetName(inp[-1]) for inp in inputIn['Data']] - if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: + if len(inputIn['Data'][0][-1].indexes) > 1 and self.pivotParameter is None: if 'dynamic' not in self.dynamicType: #self.model.dataType: self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) else: @@ -193,52 +193,52 @@ def _evaluate(self, datasets, **kwargs): # # ## Analysis: # # 1. Compute mean and variance: # For mock model - self._computeMoments(datasets[0], self.featureParameters, self.features) - measurableNames = [s.split("|")[-1] for s in self.features] + self._computeMoments(datasets[0], self.prototypeParameters, self.prototypeOutputs) + measurableNames = [s.split("|")[-1] for s in self.prototypeOutputs] measurables = [datasets[0][var].meanValue for var in measurableNames] # For target model - self._computeMoments(datasets[1], self.targetParameters, self.targets) - FOMNames = [s.split("|")[-1] for s in self.targets] + self._computeMoments(datasets[1], self.targetParameters, self.targetOutputs) + FOMNames = [s.split("|")[-1] for s in self.targetOutputs] FOMs = np.atleast_2d([datasets[1][var].meanValue for var in FOMNames]).reshape(-1,1) # # 2. Propagate error from parameters to experiment and target outputs. # For mock model - self._computeErrors(datasets[0],self.featureParameters, self.features) - measurableErrorNames = ['err_' + s.split("|")[-1] for s in self.features] - FOMErrorNames = ['err_' + s.split("|")[-1] for s in self.targets] + self._computeErrors(datasets[0],self.prototypeParameters, self.prototypeOutputs) + measurableErrorNames = ['err_' + s.split("|")[-1] for s in self.prototypeOutputs] + FOMErrorNames = ['err_' + s.split("|")[-1] for s in self.targetOutputs] self._computeMoments(datasets[0], measurableErrorNames, measurableErrorNames) UMeasurables = np.atleast_2d([datasets[0][var].meanValue for var in measurableErrorNames]).reshape(-1,1) # For target model - self._computeErrors(datasets[1],self.targetParameters, self.targets) + self._computeErrors(datasets[1],self.targetParameters, self.targetOutputs) self._computeMoments(datasets[1], FOMErrorNames, FOMErrorNames) UFOMs = np.atleast_2d([datasets[1][var].meanValue for var in FOMErrorNames]).reshape(-1,1) # # 3. Compute mean and variance in the error space: - self._computeMoments(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters],['err_' + s2.split("|")[-1] for s2 in self.features]) - self._computeMoments(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targets]) + self._computeMoments(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeParameters],['err_' + s2.split("|")[-1] for s2 in self.prototypeOutputs]) + self._computeMoments(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targetOutputs]) # # 4. Compute Uncertainties in parameters - UparVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.featureParameters]) + UparVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeParameters]) # # 5. Compute Uncertainties in outputs # Outputs of Mock model (Measurables F_i) - UMeasurablesVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.features]) + UMeasurablesVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeOutputs]) # Outputs of Target model (Targets FOM_i) - UFOMsVar = self._computeUncertaintyMatrixInErrors(datasets[1],['err_' + s.split("|")[-1] for s in self.targets]) + UFOMsVar = self._computeUncertaintyMatrixInErrors(datasets[1],['err_' + s.split("|")[-1] for s in self.targetOutputs]) # # 6. Compute Normalized Uncertainties # In mock experiment outputs (measurables) sens = self.stat[self.featureDataObject[-1]].run({"Data":[[None, None, datasets[self.featureDataObject[-1]]]]}) # normalize sensitivities - senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens, datasets[0]) + senMeasurables = self._generateSensitivityMatrix(self.prototypeOutputs, self.prototypeParameters, sens, datasets[0]) # In target outputs (FOMs) sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) # normalize sensitivities - senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens, datasets[1]) + senFOMs = self._generateSensitivityMatrix(self.targetOutputs, self.targetParameters, sens, datasets[1]) # # 7. Compute representativities r,rExact = self._calculateBiasFactor(senMeasurables, senFOMs, UparVar, UMeasurablesVar) # # 8. Compute corrected Uncertainties UtarVarTilde = self._calculateCovofTargetErrorsfromBiasFactor(senFOMs,UparVar,r) UtarVarTildeExact = self._calculateCovofTargetErrorsfromBiasFactor(senFOMs,UparVar,rExact) # # 9 Compute Corrected Targets, - # for var in self.targets: + # for var in self.targetOutputs: # self._getDataFromDatasets(datasets, var, names=None) - parametersNames = [s.split("|")[-1] for s in self.featureParameters] + parametersNames = [s.split("|")[-1] for s in self.prototypeParameters] par = np.atleast_2d([datasets[0][var].meanValue for var in parametersNames]).reshape(-1,1) correctedTargets, correctedTargetCovariance, correctedTargetErrorCov, UtarVarTilde_no_Umes_var, Inner1 = self._targetCorrection(FOMs, UparVar, UMeasurables, UMeasurablesVar, senFOMs, senMeasurables) correctedParameters, correctedParametersCovariance = self._parameterCorrection(par, UparVar, UMeasurables, UMeasurablesVar, senMeasurables) @@ -258,10 +258,10 @@ def _evaluate(self, datasets, **kwargs): ExactUncertaintyinCorrectedTargets:$TarTildeVar \in \mathbb{R}^{F \times F}$ """ outs = {} - for i,param in enumerate(self.featureParameters): + for i,param in enumerate(self.prototypeParameters): name4 = "CorrectedParameters_{}".format(param.split("|")[-1]) outs[name4] = correctedParameters[i] - for j, param2 in enumerate(self.featureParameters): + for j, param2 in enumerate(self.prototypeParameters): if param == param2: name5 = "VarianceInCorrectedParameters_{}".format(param.split("|")[-1]) outs[name5] = correctedParametersCovariance[i,i] @@ -269,15 +269,15 @@ def _evaluate(self, datasets, **kwargs): name6 = "CovarianceInCorrectedParameters_{}_{}".format(param.split("|")[-1],param2.split("|")[-1]) outs[name6] = correctedParametersCovariance[i,j] - for i,targ in enumerate(self.targets): + for i,targ in enumerate(self.targetOutputs): name3 = "CorrectedTargets_{}".format(targ.split("|")[-1]) outs[name3] = correctedTargets[i] - for j,feat in enumerate(self.features): + for j,feat in enumerate(self.prototypeOutputs): name1 = "BiasFactor_Mock{}_Tar{}".format(feat.split("|")[-1], targ.split("|")[-1]) name2 = "ExactBiasFactor_Mock{}_Tar{}".format(feat.split("|")[-1], targ.split("|")[-1]) outs[name1] = r[i,j] outs[name2] = rExact[i,j] - for k,tar in enumerate(self.targets): + for k,tar in enumerate(self.targetOutputs): if k == i: name3 = "CorrectedVar_Tar{}".format(tar.split("|")[-1]) name4 = "ExactCorrectedVar_Tar{}".format(tar.split("|")[-1]) diff --git a/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml index 9315292765..2091b5d338 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml @@ -30,12 +30,10 @@ FOM1, FOM2, FOM3 - outputDataMC1|Output|F1, outputDataMC1|Output|F2, outputDataMC1|Output|F3 - outputDataMC2|Output|FOM1, outputDataMC2|Output|FOM2, outputDataMC2|Output|FOM3 - outputDataMC1|Input|p1,outputDataMC1|Input|p2 + outputDataMC1|Output|F1, outputDataMC1|Output|F2, outputDataMC1|Output|F3 + outputDataMC2|Output|FOM1, outputDataMC2|Output|FOM2, outputDataMC2|Output|FOM3 + outputDataMC1|Input|p1,outputDataMC1|Input|p2 outputDataMC2|Input|p1,outputDataMC2|Input|p2 - outputDataMC1|time - outputDataMC2|time diff --git a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml index c99119ee55..78d5c68866 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml @@ -30,12 +30,10 @@ FOM1, FOM2, FOM3 - outputDataMC1|Output|F1, outputDataMC1|Output|F2, outputDataMC1|Output|F3 - outputDataMC2|Output|FOM1, outputDataMC2|Output|FOM2, outputDataMC2|Output|FOM3 - outputDataMC1|Input|p1,outputDataMC1|Input|p2 + outputDataMC1|Output|F1, outputDataMC1|Output|F2, outputDataMC1|Output|F3 + outputDataMC2|Output|FOM1, outputDataMC2|Output|FOM2, outputDataMC2|Output|FOM3 + outputDataMC1|Input|p1,outputDataMC1|Input|p2 outputDataMC2|Input|p1,outputDataMC2|Input|p2 - outputDataMC1|time - outputDataMC2|time diff --git a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml index 0763066dbe..99059ed01e 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml @@ -30,12 +30,10 @@ FOM1 - outputDataMC1|Output|F1 - outputDataMC2|Output|FOM1 - outputDataMC1|Input|p1,outputDataMC1|Input|p2 + outputDataMC1|Output|F1 + outputDataMC2|Output|FOM1 + outputDataMC1|Input|p1,outputDataMC1|Input|p2 outputDataMC2|Input|p1,outputDataMC2|Input|p2 - outputDataMC1|time - outputDataMC2|time diff --git a/tests/framework/PostProcessors/Validation/test_validation_dss.xml b/tests/framework/PostProcessors/Validation/test_validation_dss.xml index eedbef4a3d..3c49299214 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_dss.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_dss.xml @@ -29,8 +29,8 @@ sigma,rho,beta,x2,y2,z2,time2,x0,y0,z0 - outMC1|x1 - outMC2|x2 + outMC1|x1 + outMC2|x2 dss time1 time2 @@ -39,8 +39,8 @@ 1 - outMC1|x1 - outMC2|x2 + outMC1|x1 + outMC2|x2 dss time1 time2 @@ -51,8 +51,8 @@ 1 - outMC1|y1 - outMC2|y2 + outMC1|y1 + outMC2|y2 dss time1 time2 diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_pcm.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_pcm.xml index 306f0179fc..690216da05 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_pcm.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_pcm.xml @@ -13,12 +13,12 @@ PostProcessors.Validation.PhysicsGuidedCoverageMapping This test is aimed to show how PCM works. - For simplicity, this test is using a linear model + For simplicity, this test is using a linear model as experiment (Feature) and application (Target) models. The linear model has two input variables and four responses, all of which (F2, F3, F4) serve as three Targets and (F1, F2) as two Features. Coordinates of F2 are twice of F1, of F4 are orthorgnal to F1, and of F3 are in between. - The output is a fraction value reflecting the uncertainty reduction fraction + The output is a fraction value reflecting the uncertainty reduction fraction using Feature to validate Target comparing to the Target prior. The output name convention is 'pri_post_stdReduct_'+"Target name". @@ -41,9 +41,9 @@ F1,F2,F3,F4 - outputDataMC1|F1,outputDataMC1|F2 - outputDataMC2|F2,outputDataMC2|F3,outputDataMC2|F4 - msrData|F1,msrData|F2 + outputDataMC1|Output|F1,outputDataMC1|Output|F2 + outputDataMC2|Output|F2,outputDataMC2|Output|F3,outputDataMC2|Output|F4 + msrData|Output|F1,msrData|Output|F2
@@ -76,8 +76,8 @@ x2_dist - - + + 20 @@ -87,8 +87,8 @@ x2_msr_dist - - + + @@ -109,7 +109,7 @@ msr MC_msr msrData - + outputDataMC1 outputDataMC2 @@ -128,26 +128,26 @@ x1,x2 OutputPlaceHolder - + x1,x2 - F1,F2 + F1,F2 x1,x2 F2,F3,F4 - + x1,x2 - F1,F2 - + F1,F2 + InputPlaceHolder pri_post_stdReduct_F2,pri_post_stdReduct_F3,pri_post_stdReduct_F4 - + csv pcm_metric diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_probabilistic.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_probabilistic.xml index 9a7c63f60b..2e6392391d 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_probabilistic.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_probabilistic.xml @@ -26,11 +26,8 @@ x1,x2,ans,ans2 - outputDataMC1|ans - outputDataMC2|ans2 - + outputDataMC1|Output|ans + outputDataMC2|Output|ans2 cdf_diff pdf_area diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_probabilistic_time_dep.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_probabilistic_time_dep.xml index 9f4aaecca6..181d3a09a8 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_probabilistic_time_dep.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_probabilistic_time_dep.xml @@ -32,8 +32,8 @@ - simulation|ans - experiment|ans2 + simulation|Output|ans + experiment|output|ans2 time cdf_diff pdf_area From b596c3c482872aab48693331d1f7a85eaf915ca3 Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Tue, 11 Apr 2023 10:16:42 -0600 Subject: [PATCH 68/95] add initSeed and regold --- .../gold/RepresentativityPerfectMatch/pp1_metric_dump.csv | 2 +- .../RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv | 2 +- .../gold/RepresentativityrankDifficient/pp1_metric_dump.csv | 2 +- .../Validation/test_representativity_perfectLinExpToTarget.xml | 1 + .../test_representativity_rankDifficientLinExpToTarget.xml | 1 + .../test_representativity_singlePerfectLinExpToTarget.xml | 1 + 6 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv index 06916e151e..ad9fb60b90 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 -0.956931548906,0.158944501164,-0.119856086224,0.16791486128,0.37959479339,0.453551518937,-0.120864717061,0.460352644806,0.663418240664,0.677393800129,0.112080613376,-0.085428449283,0.118557800967,0.267118040533,0.319022128683,-0.0891847494166,0.326728461496,0.472040049741,5.50515445786,8.21264618362,-12.7004102917,68.6723913603,-66.9629170421,0.116530335,0.30564001965,0.0,0.0,-1.9388073124e-10,-5.5616475517e-11,-3.72335944868e-11,0.0194254248685,0.0038238410237,0.00210312019109,-9.30123543881e-11,-9.30123545459e-11,-5.77709779258e-11,-5.7770978011e-11,-4.255110962e-11,-4.25511095561e-11,0.00766855194786,0.00766855194786,0.00356093767321,0.00356093767321,0.00248057657059,0.00248057657059 +0.952211359555,0.250271687715,-0.216304652393,0.185531760198,0.0387392998064,0.813521241802,-0.133761425828,0.709880942747,0.399897080092,0.677393804148,0.112080602506,-0.0854284430575,0.118557786105,0.267118080728,0.319022105663,-0.0891847386449,0.326728432364,0.472040066425,5.50515445786,8.21264618362,-12.7004102917,68.6723913603,-66.9629170421,0.116530335,0.30564001965,0.0,0.0,-0.000195556692406,-2.65834682239e-08,-2.17694487548e-05,0.0194254248685,0.0038238410237,0.00210312019109,-2.28003840267e-06,-2.28003840267e-06,6.52469263184e-05,6.52469263184e-05,7.60728236044e-07,7.60728236044e-07,0.00766855194786,0.00766855194786,0.00356093767321,0.00356093767321,0.00248057657059,0.00248057657059 diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv index 6cf5fc8e7e..b506d1d03d 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM1,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM1 -0.999917799082,0.707120008205,5.50515445786,8.21264618362,-12.7004102917,0.213154131346,0.34725436153,0.0,0.0,6.38668105814e-06,0.0194240671795 +0.999917799082,0.707120008205,5.50515445786,8.21264618362,-12.7004102917,0.213154131346,0.34725436153,0.0,0.0,6.38668105813e-06,0.0194240671795 diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv index 5eefd68761..78daf4e6ba 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 -0.128430862967,0.689134736572,-0.0927298672854,0.128430862967,0.689134736572,-0.0927298672854,0.582902422521,0.000702674732941,0.812541858656,0.0905312547636,0.486858549858,-0.0657937469498,0.0905312547636,0.486858549858,-0.0657937469498,0.412552681203,-0.000828229250926,0.575604210418,5.50515445786,8.21264618362,34.6822822385,68.6723913603,-66.9629170421,0.11687181512,0.305599328839,0.0,0.0,-1.04930626376e-17,-1.59128115159e-17,-9.13773877431e-18,0.00239229818393,0.00382232231092,0.00210266379545,-1.2547237984e-17,-1.25687885235e-17,-9.6828258244e-18,-9.7450612549e-18,-1.12497918092e-17,-1.12847924134e-17,0.00285784685918,0.00285784685918,0.00220866990912,0.00220866990912,0.0024774248481,0.0024774248481 +0.0506288727737,0.801890136639,-0.250799305044,0.206232836721,0.576379338449,0.0653395778339,0.582902433954,0.000702671972469,0.812541845471,0.0905312478225,0.486858551554,-0.0657937441882,0.0905312478225,0.486858551554,-0.0657937441882,0.412552689318,-0.000828231210454,0.575604201073,5.50515445786,8.21264618362,34.6822822385,68.6723913603,-66.9629170421,0.11687181512,0.305599328839,0.0,0.0,-4.22254225102e-05,-7.29426797661e-08,-8.4047595698e-05,0.00239229820168,0.00382232232854,0.00210266381305,1.7555727946e-06,1.7555727946e-06,-5.95730270385e-05,-5.95730270385e-05,2.47680165956e-06,2.47680165956e-06,0.00285784687706,0.00285784687706,0.00220866992683,0.00220866992683,0.00247742486608,0.00247742486608 diff --git a/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml index 2091b5d338..8909070621 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_perfectLinExpToTarget.xml @@ -68,6 +68,7 @@ 100 + 2019 dist1 diff --git a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml index 78d5c68866..2dd331b8df 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml @@ -68,6 +68,7 @@ 100 + 2019 dist1 diff --git a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml index 99059ed01e..18f619497d 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml @@ -66,6 +66,7 @@ 100 + 2019 dist1 From 408940dba5f4403a38d681b7075b1c36b146bafe Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Tue, 11 Apr 2023 13:34:39 -0600 Subject: [PATCH 69/95] add dedug info --- .../Validations/Representativity.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 420c22737c..5c09d945d8 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -395,11 +395,36 @@ def _calculateBiasFactor(self, normalizedSenExp, normalizedSenTar, UparVar, Umes @ Out, r, np.array, the representativity (bias factor) matrix neglecting uncertainties in measurables @ Out, rExact, np.array, the representativity (bias factor) matrix considering uncertainties in measurables """ + + + UmesVar_r = np.array([[ 0.03603028, 0.01503859, 0.00722245], [ 0.01503859, 0.00766866, 0.00507709], [ 0.00722245, 0.00507709, 0.00424897]]) + normalizedSenTar_r = np.array([[-0.86540007, 1.86540007], [ 0.08002442, 0.91997558], [ 0.41033666, 0.58966334]]) + normalizedSenExp_r = np.array([[-0.80794013, 1.80794013], [ 0.07731272, 0.92268728], [ 0.40131466, 0.59868534]]) + UparVar_r=np.array([[ 0.00774389, -0.00049797], [-0.00049797, 0.00903588]]) + r_r = np.array([[ 0.95693156, 0.16791481, -0.12086468], [ 0.15894451, 0.37959475, 0.46035267], [-0.11985607, 0.45355145, 0.66341829]]) + rE_r = np.array([[ 0.67739381, 0.11855779, -0.08918473], [ 0.1120806 , 0.2671181 , 0.3267284 ], [-0.08542844, 0.31902209, 0.47204009]]) if UmesVar is None: UmesVar = np.zeros((len(normalizedSenExp), len(normalizedSenExp))) # Compute representativity (#eq 79) r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T))).real rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar))).real + print('UmesVar', UmesVar) + print('normalizedSenExp',normalizedSenExp) + print("normalizedSenTar",normalizedSenTar) + print("UparVar", UparVar) + print('r',r ) + print('rExact', rExact) + exp = sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T) + tar = sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T) + print('Exp cond: sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T)', np.linalg.cond(exp)) + print('Tar cond: sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)', np.linalg.cond(tar)) + print('UmesVar Error', UmesVar-UmesVar_r) + print('normalizedSenExp Error',normalizedSenExp-normalizedSenExp_r) + print("normalizedSenTar Error",normalizedSenTar-normalizedSenTar_r) + print("UparVar Error", UparVar-UparVar_r) + print('r Error',r -r_r) + print('rExact Error', rExact-rE_r) + return r, rExact def _calculateCovofTargetErrorsfromBiasFactor(self, normalizedSenTar, UparVar, r): From 09f760843095d64598b8e0bc043fa4051163286c Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Sat, 6 May 2023 18:34:44 -0600 Subject: [PATCH 70/95] adding rtol to the pseudo inverse and negelecting correlation in covariance matrix --- .../Models/PostProcessors/Validations/Representativity.py | 6 ++++-- .../gold/RepresentativityPerfectMatch/pp1_metric_dump.csv | 2 +- .../pp1_metric_dump.csv | 2 +- .../gold/RepresentativityrankDifficient/pp1_metric_dump.csv | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 420c22737c..400301da31 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -216,9 +216,11 @@ def _evaluate(self, datasets, **kwargs): self._computeMoments(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targetOutputs]) # # 4. Compute Uncertainties in parameters UparVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeParameters]) + UparVar = np.diag(np.diag(UparVar)) # # 5. Compute Uncertainties in outputs # Outputs of Mock model (Measurables F_i) UMeasurablesVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeOutputs]) + UMeasurablesVar = np.diag(np.diag(UMeasurablesVar)) # Outputs of Target model (Targets FOM_i) UFOMsVar = self._computeUncertaintyMatrixInErrors(datasets[1],['err_' + s.split("|")[-1] for s in self.targetOutputs]) # # 6. Compute Normalized Uncertainties @@ -398,8 +400,8 @@ def _calculateBiasFactor(self, normalizedSenExp, normalizedSenTar, UparVar, Umes if UmesVar is None: UmesVar = np.zeros((len(normalizedSenExp), len(normalizedSenExp))) # Compute representativity (#eq 79) - r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T))).real - rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T)) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar))).real + r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=1e-4) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T),rtol=1e-4)).real + rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=1e-4) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar),rtol=1e-4)).real return r, rExact def _calculateCovofTargetErrorsfromBiasFactor(self, normalizedSenTar, UparVar, r): diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv index 06916e151e..4472ee5a84 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 -0.956931548906,0.158944501164,-0.119856086224,0.16791486128,0.37959479339,0.453551518937,-0.120864717061,0.460352644806,0.663418240664,0.677393800129,0.112080613376,-0.085428449283,0.118557800967,0.267118040533,0.319022128683,-0.0891847494166,0.326728461496,0.472040049741,5.50515445786,8.21264618362,-12.7004102917,68.6723913603,-66.9629170421,0.116530335,0.30564001965,0.0,0.0,-1.9388073124e-10,-5.5616475517e-11,-3.72335944868e-11,0.0194254248685,0.0038238410237,0.00210312019109,-9.30123543881e-11,-9.30123545459e-11,-5.77709779258e-11,-5.7770978011e-11,-4.255110962e-11,-4.25511095561e-11,0.00766855194786,0.00766855194786,0.00356093767321,0.00356093767321,0.00248057657059,0.00248057657059 +0.956932087764,0.159161787732,-0.119563064908,0.167699113517,0.379596817967,0.453629602147,-0.121159615409,0.460275888308,0.663417726361,0.614868146447,0.167744524268,0.011528548694,0.332238541657,0.277889631541,0.258901218507,0.0301375584694,0.360345785632,0.475713880249,5.50515445786,8.21264618362,-12.7004102917,68.6723913603,-66.9629170421,0.164423936118,0.171483721392,0.0,0.0,5.39443865651e-17,1.27204174207e-17,8.06249737133e-18,0.0140274750642,0.00213562754926,0.00163006250251,2.15716686445e-17,2.15622760665e-17,1.07433546312e-17,1.07350376715e-17,7.76942825882e-18,7.73195322856e-18,0.00421132452039,0.00421132452039,0.000781759501857,0.000781759501857,0.00141042089218,0.00141042089218 diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv index 6cf5fc8e7e..8132eabb81 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM1,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM1 -0.999917799082,0.707120008205,5.50515445786,8.21264618362,-12.7004102917,0.213154131346,0.34725436153,0.0,0.0,6.38668105814e-06,0.0194240671795 +0.999910325902,0.699796578127,5.50515445786,8.21264618362,-12.7004102917,0.217892904797,0.354562331426,0.0,0.0,6.67895679264e-06,0.0190039413823 diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv index 5eefd68761..87c0a4d144 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 -0.128430862967,0.689134736572,-0.0927298672854,0.128430862967,0.689134736572,-0.0927298672854,0.582902422521,0.000702674732941,0.812541858656,0.0905312547636,0.486858549858,-0.0657937469498,0.0905312547636,0.486858549858,-0.0657937469498,0.412552681203,-0.000828229250926,0.575604210418,5.50515445786,8.21264618362,34.6822822385,68.6723913603,-66.9629170421,0.11687181512,0.305599328839,0.0,0.0,-1.04930626376e-17,-1.59128115159e-17,-9.13773877431e-18,0.00239229818393,0.00382232231092,0.00210266379545,-1.2547237984e-17,-1.25687885235e-17,-9.6828258244e-18,-9.7450612549e-18,-1.12497918092e-17,-1.12847924134e-17,0.00285784685918,0.00285784685918,0.00220866990912,0.00220866990912,0.0024774248481,0.0024774248481 +0.127840517432,0.689133318553,-0.0935525059172,0.127840517432,0.689133318553,-0.0935525059172,0.583161908487,0.0020982313425,0.812353239616,0.122597896477,0.549076304254,-0.0456197323653,0.122597896477,0.549076304254,-0.0456197323653,0.396997212211,0.134821784747,0.500408142154,5.50515445786,8.21264618362,34.6822822385,68.6723913603,-66.9629170421,0.150276288105,0.180581151315,0.0,0.0,1.69224440881e-17,2.21181422082e-17,1.50557652274e-17,0.00188720226778,0.00234533276428,0.00192221135148,1.92347002822e-17,1.91949397426e-17,1.59433557204e-17,1.5845519454e-17,1.80608965817e-17,1.79696824372e-17,0.0019201706747,0.0019201706747,0.00187419840372,0.00187419840372,0.00175247224665,0.00175247224665 From 493cd25b5a9a77d2c3df8dc7742250d8883bc48f Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Sat, 6 May 2023 19:24:43 -0600 Subject: [PATCH 71/95] regolding --- .../PostProcessors/Validations/Representativity.py | 12 ++---------- .../RepresentativityPerfectMatch/pp1_metric_dump.csv | 2 +- .../pp1_metric_dump.csv | 2 +- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 1812f6d1fc..8caa050d68 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -397,19 +397,11 @@ def _calculateBiasFactor(self, normalizedSenExp, normalizedSenTar, UparVar, Umes @ Out, r, np.array, the representativity (bias factor) matrix neglecting uncertainties in measurables @ Out, rExact, np.array, the representativity (bias factor) matrix considering uncertainties in measurables """ - - - UmesVar_r = np.array([[ 0.03603028, 0.01503859, 0.00722245], [ 0.01503859, 0.00766866, 0.00507709], [ 0.00722245, 0.00507709, 0.00424897]]) - normalizedSenTar_r = np.array([[-0.86540007, 1.86540007], [ 0.08002442, 0.91997558], [ 0.41033666, 0.58966334]]) - normalizedSenExp_r = np.array([[-0.80794013, 1.80794013], [ 0.07731272, 0.92268728], [ 0.40131466, 0.59868534]]) - UparVar_r=np.array([[ 0.00774389, -0.00049797], [-0.00049797, 0.00903588]]) - r_r = np.array([[ 0.95693156, 0.16791481, -0.12086468], [ 0.15894451, 0.37959475, 0.46035267], [-0.11985607, 0.45355145, 0.66341829]]) - rE_r = np.array([[ 0.67739381, 0.11855779, -0.08918473], [ 0.1120806 , 0.2671181 , 0.3267284 ], [-0.08542844, 0.31902209, 0.47204009]]) if UmesVar is None: UmesVar = np.zeros((len(normalizedSenExp), len(normalizedSenExp))) # Compute representativity (#eq 79) - r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=1e-4) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T),rtol=1e-4)).real - rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=1e-4) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sqrtm(normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar),rtol=1e-4)).real + r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=1e-4) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T),rtol=1e-4)).real + rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=1e-4) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar),rtol=1e-4)).real return r, rExact def _calculateCovofTargetErrorsfromBiasFactor(self, normalizedSenTar, UparVar, r): diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv index 4472ee5a84..746d7087d9 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 -0.956932087764,0.159161787732,-0.119563064908,0.167699113517,0.379596817967,0.453629602147,-0.121159615409,0.460275888308,0.663417726361,0.614868146447,0.167744524268,0.011528548694,0.332238541657,0.277889631541,0.258901218507,0.0301375584694,0.360345785632,0.475713880249,5.50515445786,8.21264618362,-12.7004102917,68.6723913603,-66.9629170421,0.164423936118,0.171483721392,0.0,0.0,5.39443865651e-17,1.27204174207e-17,8.06249737133e-18,0.0140274750642,0.00213562754926,0.00163006250251,2.15716686445e-17,2.15622760665e-17,1.07433546312e-17,1.07350376715e-17,7.76942825882e-18,7.73195322856e-18,0.00421132452039,0.00421132452039,0.000781759501857,0.000781759501857,0.00141042089218,0.00141042089218 +0.956932087764,0.159161787732,-0.119563064908,0.167699113517,0.379596817967,0.453629602147,-0.121159615409,0.460275888308,0.663417726361,0.614868146447,0.167744524268,0.011528548694,0.332238541657,0.277889631541,0.258901218507,0.0301375584694,0.360345785632,0.475713880249,5.50515445786,8.21264618362,-12.7004102917,68.6723913603,-66.9629170421,0.164423936118,0.171483721392,0.0,0.0,-2.11385525993e-17,-1.65952116054e-18,1.78861964506e-18,0.0140274750642,0.00213562754926,0.00163006250251,-8.73323937893e-18,-8.65159192269e-18,-4.14928652802e-18,-4.23960447815e-18,-1.81362205386e-19,-3.02031782984e-19,0.00421132452039,0.00421132452039,0.000781759501857,0.000781759501857,0.00141042089218,0.00141042089218 diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv index 87c0a4d144..b5a2c7c87b 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 -0.127840517432,0.689133318553,-0.0935525059172,0.127840517432,0.689133318553,-0.0935525059172,0.583161908487,0.0020982313425,0.812353239616,0.122597896477,0.549076304254,-0.0456197323653,0.122597896477,0.549076304254,-0.0456197323653,0.396997212211,0.134821784747,0.500408142154,5.50515445786,8.21264618362,34.6822822385,68.6723913603,-66.9629170421,0.150276288105,0.180581151315,0.0,0.0,1.69224440881e-17,2.21181422082e-17,1.50557652274e-17,0.00188720226778,0.00234533276428,0.00192221135148,1.92347002822e-17,1.91949397426e-17,1.59433557204e-17,1.5845519454e-17,1.80608965817e-17,1.79696824372e-17,0.0019201706747,0.0019201706747,0.00187419840372,0.00187419840372,0.00175247224665,0.00175247224665 +0.127840517432,0.689133318553,-0.0935525059172,0.127840517432,0.689133318553,-0.0935525059172,0.583161908487,0.0020982313425,0.812353239616,0.122597896477,0.549076304254,-0.0456197323653,0.122597896477,0.549076304254,-0.0456197323653,0.396997212211,0.134821784747,0.500408142154,5.50515445786,8.21264618362,34.6822822385,68.6723913603,-66.9629170421,0.150276288105,0.180581151315,0.0,0.0,1.47450969503e-17,1.55223124996e-17,1.3892035355e-17,0.00188720226778,0.00234533276428,0.00192221135148,1.47523391354e-17,1.4612477848e-17,1.35617810137e-17,1.35064844719e-17,1.45875164807e-17,1.46752478244e-17,0.0019201706747,0.0019201706747,0.00187419840372,0.00187419840372,0.00175247224665,0.00175247224665 From 415da982ffcc473a93e105aed8547bf8b09788b1 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Sat, 6 May 2023 19:36:12 -0600 Subject: [PATCH 72/95] updating dependencies --- dependencies.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dependencies.xml b/dependencies.xml index 409bdb12b0..070a4bcab0 100644 --- a/dependencies.xml +++ b/dependencies.xml @@ -36,7 +36,7 @@ Note all install methods after "main" take
- 1.21 + 1.22 1.7 1.0 1.3 @@ -47,12 +47,12 @@ Note all install methods after "main" take AttributeError: 'EntryPoints' object has no attribute 'get' so should be skipped. --> - 0.19 + 2023 1.5 3.5 0.13 2.2 - 2.9 + 2.10 3.7 3 @@ -68,7 +68,7 @@ Note all install methods after "main" take - 2.1 + 2.2 1.13 @@ -84,7 +84,7 @@ Note all install methods after "main" take - 3 + 4
From 2940523787417c6eca18cde27440321635cc720e Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Sat, 6 May 2023 20:32:15 -0600 Subject: [PATCH 73/95] removing unnecessary tests --- .../test_validation_gate_representativity.xml | 109 ----------------- ...test_validation_gate_representativity2.xml | 112 ------------------ 2 files changed, 221 deletions(-) delete mode 100644 tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml delete mode 100644 tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml deleted file mode 100644 index 046b0ec8ea..0000000000 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml +++ /dev/null @@ -1,109 +0,0 @@ - - - - Representativity - mcRun, PP1 - 1 - - - - framework/PostProcessors/Validation/test_validation_gate_representativity - Mohammad Abdo (@Jimmy-INL) - 2021-04-29 - PostProcessors.Validation - - This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. - This test uses a toy 1D slab reflective model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. - - - Added Modification for new PP API - - - - - - time,phi_0,x,a_tilde,phi - - - outputDataMC1|ans - outputDataMC2|ans2 - simIndex - outputDataMC1|x1,outputDataMC1|x2 - outputDataMC2|x1,outputDataMC2|x2 - outputDataMC1|time - outputDataMC2|time - - - - - - - - - - 1 - 0.5 - - - -1 - 1 - - - - - - - 10 - - - dist1 - - - dist2 - - - - - - - inputPlaceHolder2 - slab - MC_external - outputDataMC1 - outputDataMC2 - - - outputDataMC1 - outputDataMC2 - pp1 - pp1_metric - pp1_metric_dump - - - - - - x1,x2 - OutputPlaceHolder - - - x1,x2 - ans - - - x1,x2 - ans2 - - - InputPlaceHolder - - - - - - csv - pp1_metric - - - - diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml deleted file mode 100644 index 33e96e5611..0000000000 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml +++ /dev/null @@ -1,112 +0,0 @@ - - - - Representativity - mcRun, PP1 - 1 - - - - framework/PostProcessors/Validation/test_validation_gate_representativity - Mohammad Abdo (@Jimmy-INL) - 2021-04-29 - PostProcessors.Validation.Representativity - - This test is aimed to show how to use the mechanics of the Validation Post-Processor. For semplicity, - this test is using the attenuation model (analytical) and simple representativity factors metrics. - The output name convention is ``feature name''\underscore``target name''\underscore``metric name''=. - - - Added Modification for new PP API - - - - - - x1, x2 - ans, ans2 - - - outputDataMC1|ans - outputDataMC2|ans2 - simIndex - outputDataMC1|x1,outputDataMC1|x2 - outputDataMC2|x1,outputDataMC2|x2 - outputDataMC1|time - outputDataMC2|time - - - - - - - - - - 1 - 0.5 - - - -1 - 1 - - - - - - - 10 - - - dist1 - - - dist2 - - - - - - - inputPlaceHolder2 - poly - MC_external - outputDataMC1 - outputDataMC2 - - - outputDataMC1 - outputDataMC2 - pp1 - pp1_metric - pp1_metric_dump - - - - - - x1,x2 - OutputPlaceHolder - - - x1,x2 - ans - - - x1,x2 - ans2 - - - InputPlaceHolder - ans_ans2_simIndex - - - - - - csv - pp1_metric - - - - From 296a4da82a36deddfc27e4feba1d4a9523919c41 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Fri, 12 May 2023 11:03:11 -0600 Subject: [PATCH 74/95] updating manual --- doc/user_manual/PostProcessors/Validation.tex | 364 +++++++++--------- 1 file changed, 182 insertions(+), 182 deletions(-) diff --git a/doc/user_manual/PostProcessors/Validation.tex b/doc/user_manual/PostProcessors/Validation.tex index 3542d04280..2aec9db716 100644 --- a/doc/user_manual/PostProcessors/Validation.tex +++ b/doc/user_manual/PostProcessors/Validation.tex @@ -10,11 +10,11 @@ \subsubsection{Validation PostProcessors} \item \textbf{Probabilistic}, using probabilistic method for validation, can be used for both static and time-dependent problems. \item \textbf{PPDSS}, using dynamic system scaling method for validation, can only be used for time-dependent problems. \item \textbf{Representativity}, using represntativity (bias) factor for validation, currently, can be used for static data. - \item \textbf{PCM}, using Physics-guided Coverage Mapping method for validation, can only be used for static problems. + % \item \textbf{PCM} \end{itemize} % -The choices of the available metrics and acceptable data objects are specified in table~\ref{tab:ValidationAlgorithms}. +The choices of the available metrics and acceptable data objects are specified in table \ref{tab:ValidationAlgorithms}. \begin{table}[] \caption{Validation Algorithms and respective available metrics and DataObjects} @@ -23,9 +23,8 @@ \subsubsection{Validation PostProcessors} \hline \textbf{Validation Algorithm} & \textbf{DataObject} & \textbf{Available Metrics} \\ \hline Probabilistic & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet\end{tabular} & \begin{tabular}[c]{@{}c@{}}CDFAreaDifference\\ \\ PDFCommonArea\end{tabular} \\ \hline -Representativity & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet\end{tabular} & \begin{tabular}[c]{@{}c@{}}\end{tabular} \\ \hline +Representativity & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet \\DataSet\end{tabular} & \begin{tabular}[c]{@{}c@{}}\end{tabular} \\ \hline PPDSS & HistorySet & DSS \\ \hline -PCM & PointSet & (not applicable) \\ \hline \end{tabular} \end{table} @@ -91,12 +90,10 @@ \subsubsection{Validation PostProcessors} % \begin{itemize} - \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features. Make sure the feature data are normalized by a nominal value. - To enable user defined time interval selection, this postprocessor will only consider the first feature name provided. If user provides more than one, - it will output an error. - \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, specifies the names of the targets. Make sure the feature data are normalized by a nominal value. \nb Each target is paired with a feature listed in xml node \xmlNode{Features}. - To enable user defined time interval selection, this postprocessor will only consider the first feature name provided. If user provides more than one, - it will output an error. + \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features. + \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of + targets. \nb Each target is paired with a feature listed in xml node \xmlNode{Features}. In this case, the + number of targets should be equal to the number of features. \item \xmlNode{pivotParameter}, \xmlDesc{string, required field if HistorySet is used}, specifies the pivotParameter for a . The pivot parameter is the shared index of the output variables in the data object. \item \xmlNode{Metric}, \xmlDesc{string, required field}, specifies the \textbf{Metric} name that is defined via @@ -106,21 +103,10 @@ \subsubsection{Validation PostProcessors} \item \xmlAttr{type}, \xmlDesc{required string attribute}, the sub-type of this Metric (e.g., SKL, Minkowski) \end{itemize} \nb The choice of the available metric is \xmlString{DSS}, please - refer to~\ref{sec:Metrics} for detailed descriptions about this metric. + refer to \ref{sec:Metrics} for detailed descriptions about this metric. \item \xmlNode{pivotParameterFeature}, \xmlDesc{string, required field}, specifies the pivotParameter for a feature . The feature pivot parameter is the shared index of the output variables in the data object. \item \xmlNode{pivotParameterTarget}, \xmlDesc{string, required field}, specifies the pivotParameter for a target . The target pivot parameter is the shared index of the output variables in the data object. - \item \xmlNode{separateFeatureData}, \xmlDesc{string, optional field}, specifies the custom feature interval to apply DSS postprocessing. The string should contain three parts; start time, `|', and end time all in one. For example, 0.0|0.5. - The start and end time should be in ratios or raw values of the full interval. In this case 0.5 would be either the midpoint time or time 0.5 of the given time units. This node is not required and if not provided, the default is the full time interval. - the following attributes need to be specified: - \begin{itemize} - \item \xmlAttr{type}, \xmlDesc{optional string attribute}, options are `ratio' or `raw\_values'. The default is `ratio'. - \end{itemize} - \item \xmlNode{separateTargetData}, \xmlDesc{string, optional field}, specifies the custom target interval to apply DSS postprocessing. The string should contain three parts; start time, `|', and end time all in one. For example, 0.0|0.5. - The start and end time should be in ratios or raw values of the full interval. In this case 0.5 would be either the midpoint time or time 0.5 of the given time units. This node is not required and if not provided, the default is the full time interval. - the following attributes need to be specified: - \begin{itemize} - \item \xmlAttr{type}, \xmlDesc{optional string attribute}, options are `ratio' or `raw\_values'. The default is `ratio'. - \end{itemize} + \item \xmlNode{multiOutput}, \xmlDesc{string, required field}, to extract raw values for the HistorySet. The user must use ‘raw values’ for the full set of metrics’ calculations to be dumped. \item \xmlNode{scale}, \xmlDesc{string, required field}, specifies the type of time scaling. The following are the options for scaling (specific definitions for each scaling type is provided in \ref{sec:dssdoc}): \begin{itemize} \item \textbf{DataSynthesis}, calculating the distortion for two data sets without applying other scaling ratios. @@ -130,27 +116,13 @@ \subsubsection{Validation PostProcessors} \item \textbf{omega\_strain}, calculating the distortion for two data sets with scaling ratios for agent of changes. \item \textbf{identity}, calculating the distortion for two data sets with scaling ratios of 1. \end{itemize} - \item \xmlNode{scaleBeta}, \xmlDesc{float, required field}, specifies the parameter of interest scaling ratio between the feature and target. - \item \xmlNode{scaleOmega}, \xmlDesc{float, required field}, specifies the agents of change scaling ratio between the feature and target. -\end{itemize} - -The output \textbf{DataObjects} has required and optional components to provide the user the flexibility to obtain desired postprocessed data. The following are information about DSS output \textbf{DataObjects}: -\begin{itemize} - \item \xmlNode{Output}, \xmlDesc{string, required field}, specifies the string of postprocessed results to output. The following is the list of DSS output names: - \begin{itemize} - \item \textbf{pivot\_parameter}, provides the pivot parameter used to postprocess feature and target input data. - \item \textbf{total\_distance\_targetName\_featureName}, provides the total metric distance of the whole time interval. `targetName' and `featureName' are the string names of the input target and feature. - \item \textbf{feature\_beta\_targetName\_featureName}, provides the normalized feature data provided from \textbf{DataObjects} input. `targetName' and `featureName' are the string names of the input target and feature. - \item \textbf{target\_beta\_targetName\_featureName}, provides the normalized target data provided from \textbf{DataObjects} input. `targetName' and `featureName' are the string names of the input target and feature. - \item \textbf{feature\_omega\_targetName\_featureName}, provides the normalized feature first order derivative data. `targetName' and `featureName' are the string names of the input target and feature. - \item \textbf{target\_omega\_targetName\_featureName}, provides the normalized target first order derivative data. `targetName' and `featureName' are the string names of the input target and feature. - \item \textbf{feature\_D\_targetName\_featureName}, provides the feature temporal displacement rate (second order term) data. `targetName' and `featureName' are the string names of the input target and feature. - \item \textbf{target\_D\_targetName\_featureName}, provides the target temporal displacement rate (second order term) data. `targetName' and `featureName' are the string names of the input target and feature. - \item \textbf{process\_time\_targetName\_featureName}, provides the shared process time data. `targetName' and `featureName' are the string names of the input target and feature. - \item \textbf{standard\_error\_targetName\_featureName}, provides the standard error of the overall transient data. `targetName' and `featureName' are the string names of the input target and feature. - \end{itemize} + \item \xmlNode{scaleBeta}, \xmlDesc{float or comma separated list of floats, required field}, specifies the parameter of interest scaling ratio between the feature and target. + To provide more than one scaling factor, separate by adding a comma in between each number. Providing more than one scaling factor presumes there are more than one parameter to be post-processed. + If so, \xmlNode{Features}, \xmlNode{Targets}, and \xmlNode{scaleOmega} must have the same number scaling factors. + \item \xmlNode{scaleOmega}, \xmlDesc{float or comma separated list of floats, required field}, specifies the agents of change scaling ratio between the feature and target. + To provide more than one scaling factor, separate by adding a comma in between each number. Providing more than one scaling factor presumes there are more than one parameter to be post-processed. + If so, \xmlNode{Features}, \xmlNode{Targets}, and \xmlNode{scaleBeta} must have the same number scaling factors. \end{itemize} -pivot parameter must be named `pivot\_parameter' and this array is assigned within the post-processor algorithm. \textbf{Example:} \begin{lstlisting}[style=XML,morekeywords={subType}] @@ -162,106 +134,15 @@ \subsubsection{Validation PostProcessors} ... ... - - outMC1|x1 - outMC2|x2 - dss - time1 - time2 - DataSynthesis - 1 - 1 - - outMC1|x1 - outMC2|x2 + outMC1|x1,outMC1|y1 + outMC2|x2,outMC2|y2 dss time1 time2 - 0.0|0.5 - 0.0|0.5 DataSynthesis - 1 - 1 - - - outMC1|x1 - outMC2|x2 - dss - time1 - time2 - 0.2475|0.495 - 0.3475|0.695 - DataSynthesis - 1 - 1 - - ... - - ... - - ... - - - dss_x2_x1,total_distance_x2_x1,process_time_x2_x1,standard_deviation_x2_x1 - - - pivot_parameter - - - - - dss_x2_x1,total_distance_x2_x1,process_time_x2_x1,standard_deviation_x2_x1 - - - pivot_parameter - - - - - dss_y2_y1,total_distance_y2_y1,process_time_y2_y1,standard_deviation_y2_y1 - - - pivot_parameter - - - ... - - ... - -\end{lstlisting} - -\paragraph{PCM} -\textbf{PCM} evaluates the uncertainty reduction fraction when using Feature(s) to validate each Target via Physics-guided Coverage Mapping (PCM) method. - -% -\ppType{PCM}{PhysicsGuidedCoverageMapping} -% - -\begin{itemize} - \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features. - \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of - targets. \nb Each target will be validated using all features listed in xml node \xmlNode{Features}. The - number of targets is not necessarily equal to the number of features. - \item \xmlNode{Measurements}, \xmlDesc{comma separated string, required field}, contains a comma separated list of - measurements of the features. \nb Each measurement correspond to a feature listed in xml node \xmlNode{Features}. The - number of measurements should be equal to the number of features and in the same order as the features listed in \xmlNode{Features}. -\end{itemize} - -The output of PCM is comma separated list of strings in the format of ``pri\textunderscore post\textunderscore stdReduct\textunderscore [targetName]'', -where [targetName] is the $VariableName$ specified in DataObject of \xmlNode{Targets}. - - -\textbf{Example:} -\begin{lstlisting}[style=XML,morekeywords={subType}] - - ... - - ... - - outputDataMC1|F1,outputDataMC1|F2 - outputDataMC2|F2,outputDataMC2|F3,outputDataMC2|F4 - msrData|F1,msrData|F2 + 1,1 + 1,1 ... @@ -275,77 +156,196 @@ \subsubsection{Validation PostProcessors} post-processor interface that acts as a gate for applying these validation algorithms (i.e., representativity, Physics-guided Convergence Mapping (PCM), and Dynamic System Scaling (DSS)). The post-processor is in charge of deploying a common infrastructure for the user of \textbf{Validation} problems. -The representativity theory was first founded in the Neutronics community~\cite{Gandini, palmiotti1, palmiotti2}, then lately, was transformed to the thermal hydraulics~\cite{Epiney1, Epiney2}. - +%The usage of this post-processor is three fold. one, to quantitatively assess if a mock/prototype model/experiment +%form a good representation of a target model. Two, if a set of experiments can represent a target model and can +%claim a full coverage of the design space and scenarios, and three, if the available set of experiments are not +%enough to declare coverage what are the remaining experiments required in order to achieve full coverage and +%increase the representativity/bias factor. +The representativity theory was first founded in the +Neutronics community \cite{Gandini, palmiotti1, palmiotti2}, then lately, was transformed to the thermal hydraulics \cite{Epiney1, Epiney2}. So far, several algorithms are implemented within this post-processor: % \ppType{Representativity}{Representativity} % \begin{itemize} - \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features, which can be the measuables/observables of the mock model. Reader should be warned that this nomenclature is different than the machine learning nomenclature. + \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features, which can be the measuables/observables of the mock model. Reader should be warned that this nomenclature is different than the Machine learning nomenclature. \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of targets. These are the Figures of merit (FOMs) in the target model against which the mock model is being validated. - \item \xmlNode{featureParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputs to the mock/prototype model. + \item \xmlNode{featureParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputs to the mock model. - \item \xmlNode{targetParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputs to the target model. + \item \xmlNode{targetParameters}, \xmlDesc{comma separated string, required field}, contains a comma separated list of + target parameters/inputs. \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable of the mock model. Default is ``time''. \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). \item \xmlNode{targetPivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable in the target model. Default is ``time''. \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). +\end{itemize} + + +The \textbf{Represntativity} post-processor can make use of the \textbf{Metric} system (See Chapter \ref{sec:Metrics}), +in conjunction with the specific algorithm chosen from the list above, +to report validation scores for both static and time-dependent data. Indeed, Both \textbf{PointSet} and \textbf{HistorySet} can be accepted by this post-processor. If the name of given variable to be compared is unique, it can be used directly, otherwise the variable can be specified with $DataObjectName|InputOrOutput|VariableName$ nomenclature. +The \xmlNode{Output} node of the \xmlNode{PointSet} the \xmlNode{Representativity} Postprocessor, accepts outputs like: +\begin{itemize} + \item BiasFactor\_Mock\{prototype output $var_i$ name\}\_Tar\{target output $var_j$ name\}: + + representativity or bias factor of prototype output (measurable) i with respect to target j + + assuming no measurement uncertainty in the mock model + + (i.e., BiasFactor\_MockF1\_TarFOM2). + + \item ExactBiasFactor\_Mock\{prototype output $var_i$ name\}\_Tar\{target output $var_j$ name\}: + + representativity or bias factor of prototype output (measurable) i with respect to target j + + considering measurements uncertainty in the mock model + + (i.e., ExactBiasFactor\_MockF1\_TarFOM2). + + \item CorrectedParameters\_\{parameter output $var_i$ name\}: + + the adjusted/corrected value of parameter i due to the analysis + + (i.e., CorrectedParameters\_p1) + + \item CorrectedTargets\_{output $var_i$ name}: + + the adjusted/corrected value of target i due to the analysis + + (i.e., CorrectedTargets\_FOM1) + + \item VarianceInCorrectedParameter\_\{parameter output $var_i$ name\}: + + variance in corrected parameter i (squared uncertainty) + + (i.e., VarianceInCorrectedParameter\_p1) + + \item CovarianceInCorrectedParameters\_\{parameter $var_i$ name\}\_\{parameter $var_j$ name\}: + + Covariance between parameter i and parameter j + + (i.e., CovarianceInCorrectred\_p1) + + \item CorrectedVar\_Tar\{target $vat_i$ name\}: + + variance in corrected target i + + (i.e., CorrectedVar\_TarFOM1) + + \item ExactCorrectedVar\_Tar\{target $var_i$ name\}: + + exact variance in corrected target i considering uncertainty in measurements in the mock model. + + (i.e., ExactCorrectedVar\_TarFOM1) + + \item CorrectedCov\_Tar\{target $var_i$ name\}\_Tar\{target $var_j$ name\}: + + covariance between corrected target i and corrected target j + + (i.e., CorrectedCov\_TarFOM1) + + \item ExactCorrectedCov\_Tar\{target $var_i$ name\}\_Tar\{target $var_j$ name\} + + exact covariance between corrected target i and corrected target j considering uncertainties in measurements in the mock model. + + (i.e., ExactCorrectedCov\_TarFom1\_TarFOM2) + +\nb{all variable names proceeded by 'Exact' takes into account the measurement uncertainties in the mock experiment} + +\end{itemize} + + \textbf{Example:} \begin{lstlisting}[style=XML,morekeywords={subType}] ... - - - inputPlaceHolder2 - linModel - ExperimentMCSampler - outputDataMC1 - - - - inputPlaceHolder2 - tarModel - TargetMCSampler - outputDataMC2 - - - - outputDataMC1 - outputDataMC2 - pp1 - pp1_metric - pp1_metric_dump - + + inputPlaceHolder2 + linModel + MC_external + outputDataMC1 + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + ... - - p1, p2, e1, e2, e3, bE - F1, F2, F3 - - - p1, p2, o1, o2, o3, bT - FOM1, FOM2, FOM3 - - - outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 - outputDataMC2|FOM1, outputDataMC2|FOM2, outputDataMC2|FOM3 - outputDataMC1|p1,outputDataMC1|p2 - outputDataMC2|p1,outputDataMC2|p2 - outputDataMC1|time - outputDataMC2|time - - +... + + outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 + outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + +... + +... + + InputPlaceHolder + + BiasFactor_MockF1_TarFOM1, + BiasFactor_MockF1_TarFOM2, + BiasFactor_MockF1_TarFOM3, + BiasFactor_MockF2_TarFOM1, + BiasFactor_MockF2_TarFOM2, + BiasFactor_MockF2_TarFOM3, + BiasFactor_MockF3_TarFOM1, + BiasFactor_MockF3_TarFOM2, + BiasFactor_MockF3_TarFOM3, + ExactBiasFactor_MockF1_TarFOM1, + ExactBiasFactor_MockF1_TarFOM2, + ExactBiasFactor_MockF1_TarFOM3, + ExactBiasFactor_MockF2_TarFOM1, + ExactBiasFactor_MockF2_TarFOM2, + ExactBiasFactor_MockF2_TarFOM3, + ExactBiasFactor_MockF3_TarFOM1, + ExactBiasFactor_MockF3_TarFOM2, + ExactBiasFactor_MockF3_TarFOM3, + CorrectedParameters_p1, + CorrectedParameters_p2, + CorrectedTargets_FOM1, + CorrectedTargets_FOM2, + CorrectedTargets_FOM3, + VarianceInCorrectedParameters_p1, + VarianceInCorrectedParameters_p2, + CovarianceInCorrectedParameters_p1_p2, + CovarianceInCorrectedParameters_p2_p1, + CorrectedVar_TarFOM1, + CorrectedVar_TarFOM2, + CorrectedVar_TarFOM3, + ExactCorrectedVar_TarFOM1, + ExactCorrectedVar_TarFOM2, + ExactCorrectedVar_TarFOM3, + CorrectedCov_TarFOM1_TarFOM2, + CorrectedCov_TarFOM2_TarFOM1, + CorrectedCov_TarFOM1_TarFOM3, + CorrectedCov_TarFOM3_TarFOM1, + CorrectedCov_TarFOM2_TarFOM3, + CorrectedCov_TarFOM3_TarFOM2, + ExactCorrectedCov_TarFOM1_TarFOM2, + ExactCorrectedCov_TarFOM2_TarFOM1, + ExactCorrectedCov_TarFOM1_TarFOM3, + ExactCorrectedCov_TarFOM3_TarFOM1, + ExactCorrectedCov_TarFOM2_TarFOM3, + ExactCorrectedCov_TarFOM3_TarFOM2 + + ... \end{lstlisting} From 9675884e7cf4d85a9460838dbe682b5b3d51b5dc Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 15 May 2023 08:40:17 -0600 Subject: [PATCH 75/95] regolding and checking rank dificient covariance matrices --- .../Validations/Probabilistic.py | 4 -- .../Validations/Representativity.py | 53 +++++-------------- tests/framework/AnalyticModels/expLinModel.py | 4 +- .../pp1_metric_dump.csv | 2 +- .../pp1_metric_dump.csv | 2 +- .../pp1_metric_dump.csv | 2 +- 6 files changed, 18 insertions(+), 49 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Probabilistic.py b/ravenframework/Models/PostProcessors/Validations/Probabilistic.py index 18fc92955a..a123a32968 100644 --- a/ravenframework/Models/PostProcessors/Validations/Probabilistic.py +++ b/ravenframework/Models/PostProcessors/Validations/Probabilistic.py @@ -15,10 +15,6 @@ Created on April 04, 2021 @author: alfoa - - This class represents a base class for the validation algorithms - It inherits from the PostProcessor directly - ##TODO: Recast it once the new PostProcesso API gets in place """ #External Modules------------------------------------------------------------------------------------ diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 8caa050d68..5b0eabf840 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -11,10 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# @ Authors: Mohammad Abdo (@Jimmy-INL) -# Congjian Wang (@wangcj05) -# Andrea Alfonsi (@aalfonsi) -# Aaron Epiney (@AaronEpiney) +""" + Created on April 04, 2021 + + @ Authors: Mohammad Abdo (@Jimmy-INL) + Congjian Wang (@wangcj05) + Andrea Alfonsi (@aalfonsi) +""" #External Modules------------------------------------------------------------------------------------ import numpy as np @@ -216,7 +219,8 @@ def _evaluate(self, datasets, **kwargs): self._computeMoments(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targetOutputs]) # # 4. Compute Uncertainties in parameters UparVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeParameters]) - UparVar = np.diag(np.diag(UparVar)) + if np.linalg.matrix_rank(UparVar) < np.shape(UparVar)[0]: + UparVar = UparVar + np.diag(np.ones(np.shape(UparVar)[0])*np.finfo(np.float32).eps) # # 5. Compute Uncertainties in outputs # Outputs of Mock model (Measurables F_i) UMeasurablesVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeOutputs]) @@ -311,38 +315,6 @@ def _generateSensitivityMatrix(self, outputs, inputs, sensDict, datasets, normal sensMatr[i, j] = sensDict[senName][0]* datasets[inpVar].meanValue / datasets[outVar].meanValue return sensMatr - def _getDataFromDatasets(self, datasets, var, names=None): - """ - Utility function to retrieve the data from datasets - @ In, datasets, list, list of datasets (data1,data2,etc.) to search from. - @ In, names, list, optional, list of datasets names (data1,data2,etc.). If not present, the search will be done on the full list. - @ In, var, str, the variable to find (either in fromat dataobject|var or simply var) - @ Out, data, tuple(numpy.ndarray, xarray.DataArray or None), the retrived data (data, probability weights (None if not present)) - """ - data = None - pw = None - dat = None - if "|" in var and names is not None: - do, feat = var.split("|") - doindex = names.index(do) - dat = datasets[doindex][feat] - else: - for doindex, ds in enumerate(datasets): - if var in ds: - dat = ds[var] - break - if 'ProbabilityWeight-{}'.format(feat) in datasets[names.index(do)]: - pw = datasets[doindex]['ProbabilityWeight-{}'.format(feat)].values - elif 'ProbabilityWeight' in datasets[names.index(do)]: - pw = datasets[doindex]['ProbabilityWeight'].values - dim = len(dat.shape) - dat = dat.values - if dim == 1: - # the following reshaping does not require a copy - dat.shape = (dat.shape[0], 1) - data = dat, pw - return data - def _computeMoments(self, datasets, features, targets): """ A utility function to compute moments, mean value, variance and covariance @@ -370,7 +342,7 @@ def _computeErrors(self,datasets,features,targets): """ for var in [x.split("|")[-1] for x in features + targets]: datasets['err_'+str(var)] = (datasets[var].values - datasets[var].attrs['meanValue'])/datasets[var].attrs['meanValue'] - + return datasets def _computeUncertaintyMatrixInErrors(self, data, parameters): """ A utility function to variance and covariance of variables in the error space @@ -400,8 +372,9 @@ def _calculateBiasFactor(self, normalizedSenExp, normalizedSenTar, UparVar, Umes if UmesVar is None: UmesVar = np.zeros((len(normalizedSenExp), len(normalizedSenExp))) # Compute representativity (#eq 79) - r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=1e-4) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T),rtol=1e-4)).real - rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=1e-4) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar),rtol=1e-4)).real + tol = 1e-6 + r = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=tol) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T),rtol=tol)).real + rExact = (sp.linalg.pinv(sqrtm(normalizedSenTar @ UparVar @ normalizedSenTar.T),rtol=tol) @ (normalizedSenTar @ UparVar @ normalizedSenExp.T) @ sp.linalg.pinv(sqrtm(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar),rtol=tol)).real return r, rExact def _calculateCovofTargetErrorsfromBiasFactor(self, normalizedSenTar, UparVar, r): diff --git a/tests/framework/AnalyticModels/expLinModel.py b/tests/framework/AnalyticModels/expLinModel.py index 26b48ff3ff..035c1831f9 100644 --- a/tests/framework/AnalyticModels/expLinModel.py +++ b/tests/framework/AnalyticModels/expLinModel.py @@ -52,7 +52,7 @@ def main(Input): Input['e2'] = [1,8] Input['e3'] = [-5, -5] Input['bE'] = np.array([[0],[0],[0]]) - Input['x1'] = 5.5 - Input['x2'] = 8 + Input['p1'] = 5.5 + Input['p2'] = 8 a,b,c = main(Input) print(a,b,c) diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv index 746d7087d9..e28c0e54e5 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectMatch/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 -0.956932087764,0.159161787732,-0.119563064908,0.167699113517,0.379596817967,0.453629602147,-0.121159615409,0.460275888308,0.663417726361,0.614868146447,0.167744524268,0.011528548694,0.332238541657,0.277889631541,0.258901218507,0.0301375584694,0.360345785632,0.475713880249,5.50515445786,8.21264618362,-12.7004102917,68.6723913603,-66.9629170421,0.164423936118,0.171483721392,0.0,0.0,-2.11385525993e-17,-1.65952116054e-18,1.78861964506e-18,0.0140274750642,0.00213562754926,0.00163006250251,-8.73323937893e-18,-8.65159192269e-18,-4.14928652802e-18,-4.23960447815e-18,-1.81362205386e-19,-3.02031782984e-19,0.00421132452039,0.00421132452039,0.000781759501857,0.000781759501857,0.00141042089218,0.00141042089218 +0.956931548103,0.158944500315,-0.119856079619,0.167914857535,0.379594787803,0.453551486076,-0.120864711712,0.460352642151,0.663418263045,0.622953692665,0.168703460234,0.00999759077449,0.333561740878,0.275229496201,0.254849386412,0.0278303625985,0.355873728293,0.470485463976,5.50515445786,8.21264618362,-12.7004102917,68.6723913603,-66.9629170421,0.16494093266,0.172276908973,0.0,0.0,-1.77349989183e-16,-1.24861665582e-17,6.93021929434e-18,0.0143995547479,0.0021311702406,0.00158930335822,-5.43546114151e-17,-5.44174372803e-17,-1.1382507111e-17,-1.12204825963e-17,2.16377372229e-18,2.23066101281e-18,0.00429411398383,0.00429411398383,0.000763476797026,0.000763476797026,0.00137548132103,0.00137548132103 diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv index 8132eabb81..b506d1d03d 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityPerfectSingleMeasurable/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM1,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM1 -0.999910325902,0.699796578127,5.50515445786,8.21264618362,-12.7004102917,0.217892904797,0.354562331426,0.0,0.0,6.67895679264e-06,0.0190039413823 +0.999917799082,0.707120008205,5.50515445786,8.21264618362,-12.7004102917,0.213154131346,0.34725436153,0.0,0.0,6.38668105813e-06,0.0194240671795 diff --git a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv index b5a2c7c87b..8c304c2ccd 100644 --- a/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv +++ b/tests/framework/PostProcessors/Validation/gold/RepresentativityrankDifficient/pp1_metric_dump.csv @@ -1,2 +1,2 @@ BiasFactor_MockF1_TarFOM1,BiasFactor_MockF1_TarFOM2,BiasFactor_MockF1_TarFOM3,BiasFactor_MockF2_TarFOM1,BiasFactor_MockF2_TarFOM2,BiasFactor_MockF2_TarFOM3,BiasFactor_MockF3_TarFOM1,BiasFactor_MockF3_TarFOM2,BiasFactor_MockF3_TarFOM3,ExactBiasFactor_MockF1_TarFOM1,ExactBiasFactor_MockF1_TarFOM2,ExactBiasFactor_MockF1_TarFOM3,ExactBiasFactor_MockF2_TarFOM1,ExactBiasFactor_MockF2_TarFOM2,ExactBiasFactor_MockF2_TarFOM3,ExactBiasFactor_MockF3_TarFOM1,ExactBiasFactor_MockF3_TarFOM2,ExactBiasFactor_MockF3_TarFOM3,CorrectedParameters_p1,CorrectedParameters_p2,CorrectedTargets_FOM1,CorrectedTargets_FOM2,CorrectedTargets_FOM3,VarianceInCorrectedParameters_p1,VarianceInCorrectedParameters_p2,CovarianceInCorrectedParameters_p1_p2,CovarianceInCorrectedParameters_p2_p1,CorrectedVar_TarFOM1,CorrectedVar_TarFOM2,CorrectedVar_TarFOM3,ExactCorrectedVar_TarFOM1,ExactCorrectedVar_TarFOM2,ExactCorrectedVar_TarFOM3,CorrectedCov_TarFOM1_TarFOM2,CorrectedCov_TarFOM2_TarFOM1,CorrectedCov_TarFOM1_TarFOM3,CorrectedCov_TarFOM3_TarFOM1,CorrectedCov_TarFOM2_TarFOM3,CorrectedCov_TarFOM3_TarFOM2,ExactCorrectedCov_TarFOM1_TarFOM2,ExactCorrectedCov_TarFOM2_TarFOM1,ExactCorrectedCov_TarFOM1_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM1,ExactCorrectedCov_TarFOM2_TarFOM3,ExactCorrectedCov_TarFOM3_TarFOM2 -0.127840517432,0.689133318553,-0.0935525059172,0.127840517432,0.689133318553,-0.0935525059172,0.583161908487,0.0020982313425,0.812353239616,0.122597896477,0.549076304254,-0.0456197323653,0.122597896477,0.549076304254,-0.0456197323653,0.396997212211,0.134821784747,0.500408142154,5.50515445786,8.21264618362,34.6822822385,68.6723913603,-66.9629170421,0.150276288105,0.180581151315,0.0,0.0,1.47450969503e-17,1.55223124996e-17,1.3892035355e-17,0.00188720226778,0.00234533276428,0.00192221135148,1.47523391354e-17,1.4612477848e-17,1.35617810137e-17,1.35064844719e-17,1.45875164807e-17,1.46752478244e-17,0.0019201706747,0.0019201706747,0.00187419840372,0.00187419840372,0.00175247224665,0.00175247224665 +0.128430862967,0.689134736572,-0.0927298672854,0.128430862967,0.689134736572,-0.0927298672854,0.582902422521,0.000702674732939,0.812541858656,0.12322065411,0.553725781283,-0.0465852501921,0.12322065411,0.553725781283,-0.0465852501921,0.391200280662,0.128987035362,0.494626127249,5.50515445786,8.21264618362,34.6822822385,68.6723913603,-66.9629170421,0.149346499179,0.179681468724,0.0,0.0,-3.39552860427e-18,-2.97038521552e-18,-3.81029875244e-18,0.00183645364595,0.00231975244857,0.00186608541152,-2.86314843314e-18,-2.88211952166e-18,-3.60551750031e-18,-3.52137194756e-18,-2.84730452175e-18,-2.78886225425e-18,0.00187787000787,0.00187787000787,0.00182011762032,0.00182011762032,0.0017035765031,0.0017035765031 From a77cdb62aaee3318bda32a4ee30bff3500bb883f Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Thu, 18 May 2023 15:28:06 -0600 Subject: [PATCH 76/95] enhancing poorly written functions --- .../Validations/Representativity.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 5b0eabf840..c194c3b868 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -196,27 +196,27 @@ def _evaluate(self, datasets, **kwargs): # # ## Analysis: # # 1. Compute mean and variance: # For mock model - self._computeMoments(datasets[0], self.prototypeParameters, self.prototypeOutputs) + datasets[0] = self._computeMoments(datasets[0], self.prototypeParameters, self.prototypeOutputs) measurableNames = [s.split("|")[-1] for s in self.prototypeOutputs] measurables = [datasets[0][var].meanValue for var in measurableNames] # For target model - self._computeMoments(datasets[1], self.targetParameters, self.targetOutputs) + datasets[1] = self._computeMoments(datasets[1], self.targetParameters, self.targetOutputs) FOMNames = [s.split("|")[-1] for s in self.targetOutputs] FOMs = np.atleast_2d([datasets[1][var].meanValue for var in FOMNames]).reshape(-1,1) # # 2. Propagate error from parameters to experiment and target outputs. # For mock model - self._computeErrors(datasets[0],self.prototypeParameters, self.prototypeOutputs) + datasets[0] = self._computeErrors(datasets[0],self.prototypeParameters, self.prototypeOutputs) measurableErrorNames = ['err_' + s.split("|")[-1] for s in self.prototypeOutputs] FOMErrorNames = ['err_' + s.split("|")[-1] for s in self.targetOutputs] - self._computeMoments(datasets[0], measurableErrorNames, measurableErrorNames) + datasets[0] = self._computeMoments(datasets[0], measurableErrorNames, measurableErrorNames) UMeasurables = np.atleast_2d([datasets[0][var].meanValue for var in measurableErrorNames]).reshape(-1,1) # For target model - self._computeErrors(datasets[1],self.targetParameters, self.targetOutputs) - self._computeMoments(datasets[1], FOMErrorNames, FOMErrorNames) + datasets[1] = self._computeErrors(datasets[1],self.targetParameters, self.targetOutputs) + datasets[1] = self._computeMoments(datasets[1], FOMErrorNames, FOMErrorNames) UFOMs = np.atleast_2d([datasets[1][var].meanValue for var in FOMErrorNames]).reshape(-1,1) # # 3. Compute mean and variance in the error space: - self._computeMoments(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeParameters],['err_' + s2.split("|")[-1] for s2 in self.prototypeOutputs]) - self._computeMoments(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targetOutputs]) + datasets[0] = self._computeMoments(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeParameters],['err_' + s2.split("|")[-1] for s2 in self.prototypeOutputs]) + datasets[1] = self._computeMoments(datasets[1],['err_' + s.split("|")[-1] for s in self.targetParameters],['err_' + s2.split("|")[-1] for s2 in self.targetOutputs]) # # 4. Compute Uncertainties in parameters UparVar = self._computeUncertaintyMatrixInErrors(datasets[0],['err_' + s.split("|")[-1] for s in self.prototypeParameters]) if np.linalg.matrix_rank(UparVar) < np.shape(UparVar)[0]: From dd9116747323053dba5b68e6eb58e3c3f4ae1d4e Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 5 Jun 2023 10:51:04 -0600 Subject: [PATCH 77/95] changing self.sampleTag to directly assign to "RAVEN_sample_ID" --- ravenframework/Models/PostProcessors/BasicStatistics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ravenframework/Models/PostProcessors/BasicStatistics.py b/ravenframework/Models/PostProcessors/BasicStatistics.py index c8466ee767..bfb7093537 100644 --- a/ravenframework/Models/PostProcessors/BasicStatistics.py +++ b/ravenframework/Models/PostProcessors/BasicStatistics.py @@ -182,7 +182,7 @@ def inputToInternal(self, inputIn): except KeyError: missing = [var for var in self.parameters['targets'] if var not in dataSet] self.raiseAnError(KeyError, "Variables: '{}' missing from dataset '{}'!".format(", ".join(missing),self.inputDataObjectName)) - self.sampleTag = utils.first(dataSet.dims) + self.sampleTag = 'RAVEN_sample_ID' if self.dynamic: dims = inputDataset.sizes.keys() From 1f8796410c3f5b2b6e30d0af064e15808436e3bd Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 5 Jun 2023 11:17:00 -0600 Subject: [PATCH 78/95] making manual modifications for DSS --- doc/user_manual/PostProcessors/Validation.tex | 7 +++---- ravenframework/Models/PostProcessors/ValidationBase.py | 4 ++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/doc/user_manual/PostProcessors/Validation.tex b/doc/user_manual/PostProcessors/Validation.tex index 2aec9db716..c4bd6b43da 100644 --- a/doc/user_manual/PostProcessors/Validation.tex +++ b/doc/user_manual/PostProcessors/Validation.tex @@ -90,10 +90,9 @@ \subsubsection{Validation PostProcessors} % \begin{itemize} - \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features. - \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of - targets. \nb Each target is paired with a feature listed in xml node \xmlNode{Features}. In this case, the - number of targets should be equal to the number of features. + \item \xmlNode{prototypeOutputs}, \xmlDesc{comma separated string, required field}, contains a comma separated list of strings specifying the names of the prototype/mock model outputs. + \item \xmlNode{targetOutputs}, \xmlDesc{comma separated string, required field}, contains a comma separated list of + strings specifying target outputs. \item \xmlNode{pivotParameter}, \xmlDesc{string, required field if HistorySet is used}, specifies the pivotParameter for a . The pivot parameter is the shared index of the output variables in the data object. \item \xmlNode{Metric}, \xmlDesc{string, required field}, specifies the \textbf{Metric} name that is defined via diff --git a/ravenframework/Models/PostProcessors/ValidationBase.py b/ravenframework/Models/PostProcessors/ValidationBase.py index 2b01e99cbb..16b1c92837 100644 --- a/ravenframework/Models/PostProcessors/ValidationBase.py +++ b/ravenframework/Models/PostProcessors/ValidationBase.py @@ -132,8 +132,8 @@ def _handleInput(self, paramInput): self.targetOutputs = child.value if 'static' not in self.dataType and self.pivotParameter is None: self.raiseAnError(IOError, "The validation algorithm '{}' is a dynamic model ONLY but no node has been inputted".format(self._type)) - # if not self.features: - # self.raiseAnError(IOError, "XML node 'prototypeParameters' is required but not provided") + if not self.prototypeOutputs: + self.raiseAnError(IOError, "XML node 'prototypeOutputs' is required but not provided") def initialize(self, runInfo, inputs, initDict): """ From 6f0d9ddcf08a996905b593fc2375313bf58bcf54 Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 5 Jun 2023 11:35:01 -0600 Subject: [PATCH 79/95] updating linExpModel --- tests/framework/AnalyticModels/expLinModel.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/framework/AnalyticModels/expLinModel.py b/tests/framework/AnalyticModels/expLinModel.py index 035c1831f9..c713656980 100644 --- a/tests/framework/AnalyticModels/expLinModel.py +++ b/tests/framework/AnalyticModels/expLinModel.py @@ -34,6 +34,12 @@ def run(self,Input): self.F1,self.F2,self.F3 = main(Input) def main(Input): + """ + This method computes linear responses based on Inputs. i.e., $$y = A @ x$$ + + @ In, Input, dict, dictionary containing inputs from RAVEN + @ out, y[:], elements of response vector y + """ # y = A @ np.array(list(Input.values())).reshape(-1,1) + b m = len([key for key in Input.keys() if 'e' in key]) # number of experiments n = len([par for par in Input.keys() if 'p' in par]) # number of parameters From be6e8115ecaa7063e6060c69ec8284b6c12ccd9a Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 5 Jun 2023 11:37:34 -0600 Subject: [PATCH 80/95] updating ExpModel and TarModel --- tests/framework/AnalyticModels/expLinModel.py | 1 - tests/framework/AnalyticModels/tarLinModel.py | 7 ++++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/framework/AnalyticModels/expLinModel.py b/tests/framework/AnalyticModels/expLinModel.py index c713656980..bc8537cd3d 100644 --- a/tests/framework/AnalyticModels/expLinModel.py +++ b/tests/framework/AnalyticModels/expLinModel.py @@ -40,7 +40,6 @@ def main(Input): @ In, Input, dict, dictionary containing inputs from RAVEN @ out, y[:], elements of response vector y """ - # y = A @ np.array(list(Input.values())).reshape(-1,1) + b m = len([key for key in Input.keys() if 'e' in key]) # number of experiments n = len([par for par in Input.keys() if 'p' in par]) # number of parameters A = np.array([Input['e1'],Input['e2'],Input['e3']]).reshape(-1,n) diff --git a/tests/framework/AnalyticModels/tarLinModel.py b/tests/framework/AnalyticModels/tarLinModel.py index f51f9ef381..a529f24ce1 100644 --- a/tests/framework/AnalyticModels/tarLinModel.py +++ b/tests/framework/AnalyticModels/tarLinModel.py @@ -35,7 +35,12 @@ def run(self,Input): self.FOM1,self.FOM2,self.FOM3 = main(Input) def main(Input): - # y = A @ np.array(list(Input.values())).reshape(-1,1) + b + """ + This method computes linear responses of the target application based on Inputs. i.e., $$y = A @ x$$ + + @ In, Input, dict, dictionary containing inputs from RAVEN + @ out, y[:], elements of response vector y + """ m = len([key for key in Input.keys() if 'o' in key]) # number of experiments n = len([par for par in Input.keys() if 'p' in par]) # number of parameters A = np.array([Input['o1'],Input['o2'],Input['o3']]).reshape(-1,n) From 0fd1ddb980aecbdfbd44868f5982d61efee8e00c Mon Sep 17 00:00:00 2001 From: Jimmy-INL Date: Mon, 5 Jun 2023 11:48:00 -0600 Subject: [PATCH 81/95] enhancing tests description --- .../test_representativity_rankDifficientLinExpToTarget.xml | 3 ++- .../test_representativity_singlePerfectLinExpToTarget.xml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml index 2dd331b8df..fe37603f48 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_rankDifficientLinExpToTarget.xml @@ -13,7 +13,8 @@ PostProcessors.Validation This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. - This test a linear model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. + This tests a linear model as both the mock experiment and the target plant models. The linear operators describing the physics are rank difficient. This is done intentionally by making two experiments (out of the three) identical + Added Modification for new PP API diff --git a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml index 18f619497d..536256055d 100644 --- a/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml +++ b/tests/framework/PostProcessors/Validation/test_representativity_singlePerfectLinExpToTarget.xml @@ -13,7 +13,7 @@ PostProcessors.Validation This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. - This test a linear model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. + This test a linear model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. This test includes a single experiment. Added Modification for new PP API From c504f74bbdc928fa209bb90775f05f22a81318db Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Wed, 7 Jun 2023 09:35:27 -0600 Subject: [PATCH 82/95] add sampleTag for LimitSurface and SafestPoint PP --- ravenframework/Models/PostProcessors/LimitSurfaceIntegral.py | 4 ++-- .../Models/PostProcessors/PostProcessorInterface.py | 1 + ravenframework/Models/PostProcessors/SafestPoint.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ravenframework/Models/PostProcessors/LimitSurfaceIntegral.py b/ravenframework/Models/PostProcessors/LimitSurfaceIntegral.py index dab2d047e4..f6ba7e93c2 100644 --- a/ravenframework/Models/PostProcessors/LimitSurfaceIntegral.py +++ b/ravenframework/Models/PostProcessors/LimitSurfaceIntegral.py @@ -256,9 +256,9 @@ def run(self, input): f = np.vectorize(self.variableDist[varName].ppf, otypes=[np.float64]) randomMatrix[:, index] = f(randomMatrix[:, index]) tempDict[varName] = randomMatrix[:, index] - pb = self.stat._runLegacy({'targets':{self.target:xarray.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] + pb = self.stat._runLegacy({'targets':{self.target:xarray.DataArray(self.functionS.evaluate(tempDict)[self.target], dims=self.sampleTag)}})[self.computationPrefix +"_"+self.target] if self.errorModel: - boundError = abs(pb-self.stat._runLegacy({'targets':{self.target:xarray.DataArray(self.errorModel.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target]) + boundError = abs(pb-self.stat._runLegacy({'targets':{self.target:xarray.DataArray(self.errorModel.evaluate(tempDict)[self.target], dims=self.sampleTag)}})[self.computationPrefix +"_"+self.target]) else: self.raiseAnError(NotImplemented, "quadrature not yet implemented") return pb, boundError diff --git a/ravenframework/Models/PostProcessors/PostProcessorInterface.py b/ravenframework/Models/PostProcessors/PostProcessorInterface.py index ed21f8cd3f..bc6574bb78 100644 --- a/ravenframework/Models/PostProcessors/PostProcessorInterface.py +++ b/ravenframework/Models/PostProcessors/PostProcessorInterface.py @@ -67,6 +67,7 @@ def __init__(self): ## One possible solution is all postpocessors return a list of realizations, and we only ## use addRealization method to add the collections into the DataObjects self.outputMultipleRealizations = False + self.sampleTag = 'RAVEN_sample_ID' # raven sample tag used to store data def _handleInput(self, paramInput): """ diff --git a/ravenframework/Models/PostProcessors/SafestPoint.py b/ravenframework/Models/PostProcessors/SafestPoint.py index bbce61b0a7..45fdb471c4 100644 --- a/ravenframework/Models/PostProcessors/SafestPoint.py +++ b/ravenframework/Models/PostProcessors/SafestPoint.py @@ -334,7 +334,7 @@ def run(self, input): rlz[self.outputName][ncLine] = np.prod(probList) rlz['ProbabilityWeight'][ncLine] = np.prod(probList) metadata = {'ProbabilityWeight':xarray.DataArray(rlz['ProbabilityWeight'])} - targets = {tar:xarray.DataArray( rlz[tar]) for tar in self.controllableOrd} + targets = {tar:xarray.DataArray( rlz[tar], dims=self.sampleTag) for tar in self.controllableOrd} rlz['ExpectedSafestPointCoordinates'] = self.stat._runLegacy({'metadata':metadata, 'targets':targets}) self.raiseADebug(rlz['ExpectedSafestPointCoordinates']) return rlz From abf38df033303f11cd2ee41423782e0826ee5135 Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Thu, 21 Sep 2023 09:11:31 -0600 Subject: [PATCH 83/95] fix issues in pcm pp --- .../PhysicsGuidedCoverageMapping.py | 4 +-- .../test_validation_gate_pcm_Snapshot.xml | 34 +++++++++---------- .../test_validation_gate_pcm_Static.xml | 20 +++++------ .../test_validation_gate_pcm_Tdep.xml | 24 ++++++------- 4 files changed, 41 insertions(+), 41 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/PhysicsGuidedCoverageMapping.py b/ravenframework/Models/PostProcessors/Validations/PhysicsGuidedCoverageMapping.py index e9b898bd77..113540542c 100644 --- a/ravenframework/Models/PostProcessors/Validations/PhysicsGuidedCoverageMapping.py +++ b/ravenframework/Models/PostProcessors/Validations/PhysicsGuidedCoverageMapping.py @@ -383,7 +383,7 @@ def pcmTdep(featData, msrData, targData, recError): featPW = [] msrPW = [] - for feat, msr, targ in zip(self.features, self.measurements, self.targets): + for feat, msr, targ in zip(self.prototypeOutputs, self.measurements, self.targetOutputs): featDataProb = self._getDataFromDataDict(datasets, feat, names) msrDataProb = self._getDataFromDataDict(datasets, msr, names) # read targets' data @@ -465,7 +465,7 @@ def pcmTdep(featData, msrData, targData, recError): msrData = np.array(msrData).T targData = np.array(targData).T outputArray = PCM(featData, msrData, targData) - for targ in self.targets: + for targ in self.targetOutputs: name = "static_pri_post_stdReduct_" + targ.split('|')[-1] outputDict[name] = np.asarray(outputArray) diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Snapshot.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Snapshot.xml index 7af5690a21..e32a69b4e7 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Snapshot.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Snapshot.xml @@ -12,14 +12,14 @@ 2022-12-05 PostProcessors.Validation.PhysicsGuidedCoverageMapping - This test is aimed to show how snapshot_PCM works.This test is using SETH-C and SETH-D data + This test is aimed to show how snapshot_PCM works.This test is using SETH-C and SETH-D data as experiment (Feature) and application (Target) models.It basically runs a loop of static_PCM. - In each iteration of the loop,one execution of static_PCM is applied. + In each iteration of the loop,one execution of static_PCM is applied. Here, temperatures from one timestep in SETH-C are used as experiemnt responses (Features); temperatures from the corresponding timestep in SETH-D are used as application responses (Target) - The output is a fraction value reflecting the uncertainty reduction fraction - of Target Posterior comparing to the Target prior, - which includes uncertainty reductions along timesteps + The output is a fraction value reflecting the uncertainty reduction fraction + of Target Posterior comparing to the Target prior, + which includes uncertainty reductions along timesteps and has two columns:'time' and 'snapshot_pri_post_stdReduct'. @@ -31,8 +31,8 @@ time - exp|TempC - app|TempD + exp|TempC + app|TempD msr|TempMsrC Snapshot @@ -60,32 +60,32 @@ - time + time - TempC + TempC - time + time - TempD - + TempD + - time + time - TempMsrC - + TempMsrC + InputPlaceHolder - time + time snapshot_pri_post_stdReduct - + csv pcmSnapshot_metric diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Static.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Static.xml index b11a26a609..c8612ebaeb 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Static.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Static.xml @@ -13,11 +13,11 @@ PostProcessors.Validation.PhysicsGuidedCoverageMapping This test is aimed to show how PCM works. - This test is using SETH-C and SETH-D data + This test is using SETH-C and SETH-D data as experiment (Feature) and application (Target) models. Here, three timesteps' samples from SETH-C are used as experiment responses (Features), one timestep's samples from SETH-D are used as application responses (Target) - The output is a fraction value reflecting the uncertainty reduction fraction + The output is a fraction value reflecting the uncertainty reduction fraction using Feature to validate Target comparing to the Target prior. The output name convention is 'pri_post_stdReduct_'+"Target name". @@ -31,8 +31,8 @@ - expData|time20s,expData|time40s,expData|time50s - appData|time20s + expData|time20s,expData|time40s,expData|time50s + appData|time20s msrData|time20s,msrData|time40s,msrData|time50s Static @@ -59,21 +59,21 @@ - time20s,time40s,time50s + time20s,time40s,time50s - time20s - + time20s + - time20s,time40s,time50s - + time20s,time40s,time50s + InputPlaceHolder static_pri_post_stdReduct_time20s - + csv pcmStatic_metric diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Tdep.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Tdep.xml index 25a7ff60e4..4658563c13 100644 --- a/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Tdep.xml +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_pcm_Tdep.xml @@ -12,7 +12,7 @@ 2023-04-23 PostProcessors.Validation.PhysicsGuidedCoverageMapping - This test is aimed to show how Tdep_PCM works.This test uses the coefficients of SETH-C and SETH-D data + This test is aimed to show how Tdep_PCM works.This test uses the coefficients of SETH-C and SETH-D data based on their U subspace as experiment (Feature) and application (Target) models. Here, coefficients of SETH-C are used as experiemnt responses (Features); coefficients of SETH-C and SETH-D temperatures data are used as application responses (Target) @@ -28,8 +28,8 @@ time - exp|TempC - app|TempD + exp|TempC + app|TempD msr|TempMsrC Tdep 0.001 @@ -58,22 +58,22 @@ - time + time - TempC + TempC - time + time - TempD - + TempD + - time + time - TempMsrC - + TempMsrC + time @@ -83,7 +83,7 @@ - + csv pcmTdep_metric From 57b4678480481aa6aac39a5dfc8fbac86075912c Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Thu, 21 Sep 2023 13:12:43 -0600 Subject: [PATCH 84/95] fix issues when retrieve variables from data object --- ravenframework/Models/PostProcessors/ValidationBase.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ravenframework/Models/PostProcessors/ValidationBase.py b/ravenframework/Models/PostProcessors/ValidationBase.py index 16b1c92837..e24c735906 100644 --- a/ravenframework/Models/PostProcessors/ValidationBase.py +++ b/ravenframework/Models/PostProcessors/ValidationBase.py @@ -186,10 +186,12 @@ def _getDataFromDataDict(self, datasets, var, names=None): """ pw = None if "|" in var and names is not None: - do, _, feat = var.split("|") + info = var.split("|") + do = info[0] + feat = info[1] dat = datasets[do][feat] else: - for doIndex, ds in enumerate(datasets): + for _, ds in enumerate(datasets): if var in ds: dat = ds[var] break From 8fe2c47616b14c4e408f36cb7390eb82514d0802 Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Thu, 21 Sep 2023 13:43:29 -0600 Subject: [PATCH 85/95] some fix, still need to update self.stat.inputToInternal --- .../SubdomainBasicStatistics.py | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/ravenframework/Models/PostProcessors/SubdomainBasicStatistics.py b/ravenframework/Models/PostProcessors/SubdomainBasicStatistics.py index 646294e2e1..76ce10d2ed 100644 --- a/ravenframework/Models/PostProcessors/SubdomainBasicStatistics.py +++ b/ravenframework/Models/PostProcessors/SubdomainBasicStatistics.py @@ -21,13 +21,12 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessorInterface import PostProcessorInterface +from .PostProcessorReadyInterface import PostProcessorReadyInterface from .BasicStatistics import BasicStatistics -from ...utils import utils from ...utils import InputData, InputTypes #Internal Modules End----------------------------------------------------------- -class SubdomainBasicStatistics(PostProcessorInterface): +class SubdomainBasicStatistics(PostProcessorReadyInterface): """ Subdomain basic statitistics class. It computes all statistics on subdomains """ @@ -76,6 +75,9 @@ def __init__(self): self.validDataType = ['PointSet', 'HistorySet', 'DataSet'] self.outputMultipleRealizations = True self.printTag = 'PostProcessor SUBDOMAIN STATISTICS' + self.inputDataObjectName = None # name for input data object + self.setInputDataType('xrDataset') + self.sampleTag = 'RAVEN_sample_ID' def inputToInternal(self, currentInp): """ @@ -88,15 +90,12 @@ def inputToInternal(self, currentInp): cellIDs = self.gridEntity.returnCellIdsWithCoordinates() dimensionNames = self.gridEntity.returnParameter('dimensionNames') self.dynamic = False - currentInput = currentInp [-1] if type(currentInp) == list else currentInp - if len(currentInput) == 0: - self.raiseAnError(IOError, "In post-processor " +self.name+" the input "+currentInput.name+" is empty.") - if currentInput.type not in ['PointSet','HistorySet']: - self.raiseAnError(IOError, self, 'This Postprocessor accepts PointSet and HistorySet only! Got ' + currentInput.type) # extract all required data from input DataObjects, an input dataset is constructed - dataSet = currentInput.asDataset() - processedDataSet, pbWeights = self.stat.inputToInternal(currentInput) + inpVars, outVars, dataSet = currentInp['Data'][0] + processedDataSet, pbWeights = self.stat.inputToInternal(currentInp) + self.sampleSize = dataSet.sizes[self.sampleTag] + for cellId, verteces in cellIDs.items(): # create masks maskDataset = None @@ -115,9 +114,9 @@ def inputToInternal(self, currentInp): # check if at least sample is available (for scalar quantities) and at least 2 samples for derivative quantities setWhat = set(self.stat.what) minimumNumberOfSamples = 2 if len(setWhat.intersection(set(self.stat.vectorVals))) > 0 else 1 - if len(cellDataset[currentInput.sampleTag]) < minimumNumberOfSamples: + if self.sampleSize < minimumNumberOfSamples: self.raiseAnError(RuntimeError,"Number of samples in cell " - f"{cellId} < {minimumNumberOfSamples}. Found {len(cellDataset[currentInput.sampleTag])}" + f"{cellId} < {minimumNumberOfSamples}. Found {self.sampleSize}" " samples within the cell. Please make the evaluation grid coarser or increase number of samples!") # store datasets From 5a92e6b08d52a4ddffe255736b33daf646d33d52 Mon Sep 17 00:00:00 2001 From: congjian wang Date: Thu, 21 Sep 2023 15:38:56 -0600 Subject: [PATCH 86/95] fix SubdomainBasicStatistics --- .../Models/PostProcessors/BasicStatistics.py | 12 ++++++++++++ .../PostProcessors/SubdomainBasicStatistics.py | 6 ++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/ravenframework/Models/PostProcessors/BasicStatistics.py b/ravenframework/Models/PostProcessors/BasicStatistics.py index cefff68496..f7adf968b5 100644 --- a/ravenframework/Models/PostProcessors/BasicStatistics.py +++ b/ravenframework/Models/PostProcessors/BasicStatistics.py @@ -212,6 +212,18 @@ def inputToInternal(self, inputIn): return inputDataset, pbWeights + + def resetProbabilityWeight(self, pbWeights): + """ + Reset probability weight using given pbWeights + @ In, pbWeights, xr.Dataset, dataset contains probability weights and + variable probability weight + @ Out, None + """ + if 'ProbabilityWeight' in pbWeights: + self.realizationWeight = xr.Dataset() + self.realizationWeight['ProbabilityWeight'] = pbWeights['ProbabilityWeight'] + def initialize(self, runInfo, inputs, initDict): """ Method to initialize the BasicStatistic pp. In here the working dir is diff --git a/ravenframework/Models/PostProcessors/SubdomainBasicStatistics.py b/ravenframework/Models/PostProcessors/SubdomainBasicStatistics.py index 76ce10d2ed..2537a7d87b 100644 --- a/ravenframework/Models/PostProcessors/SubdomainBasicStatistics.py +++ b/ravenframework/Models/PostProcessors/SubdomainBasicStatistics.py @@ -168,7 +168,8 @@ def run(self, inputIn): midPoint = self.gridEntity.returnCellsMidPoints(returnDict=True) firstPass = True for i, (cellId, data) in enumerate(inputData.items()): - cellData = self.stat.inputToInternal(data) + cellData = data + self.stat.resetProbabilityWeight(data[1]) res = self.stat._runLocal(cellData) for k in res: if firstPass: @@ -181,8 +182,9 @@ def run(self, inputIn): results[k][i] = np.atleast_1d(midPoint[cellId][k]) firstPass = False outputRealization['data'] = results + indexes = inputIn['Data'][0][-1].indexes if self.stat.dynamic: - dims = dict.fromkeys(results.keys(), inputIn[-1].indexes if type(inputIn) == list else inputIn.indexes) + dims = dict.fromkeys(results.keys(), indexes) for k in list(midPoint.values())[0]: dims[k] = [] outputRealization['dims'] = dims From 80bfa385ba8281caf5c711f38813ce5d3c88cd3a Mon Sep 17 00:00:00 2001 From: congjian wang Date: Thu, 21 Sep 2023 15:43:21 -0600 Subject: [PATCH 87/95] fix validation base --- ravenframework/Models/PostProcessors/ValidationBase.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ravenframework/Models/PostProcessors/ValidationBase.py b/ravenframework/Models/PostProcessors/ValidationBase.py index e24c735906..121bd0251d 100644 --- a/ravenframework/Models/PostProcessors/ValidationBase.py +++ b/ravenframework/Models/PostProcessors/ValidationBase.py @@ -188,7 +188,7 @@ def _getDataFromDataDict(self, datasets, var, names=None): if "|" in var and names is not None: info = var.split("|") do = info[0] - feat = info[1] + feat = info[-1] dat = datasets[do][feat] else: for _, ds in enumerate(datasets): From 91215f092b7a534c17dfc3b8b09bd4a622aeb96e Mon Sep 17 00:00:00 2001 From: congjian wang Date: Thu, 21 Sep 2023 16:55:12 -0600 Subject: [PATCH 88/95] update manual --- doc/user_manual/PostProcessors/Validation.tex | 242 ++++++++++-------- 1 file changed, 130 insertions(+), 112 deletions(-) diff --git a/doc/user_manual/PostProcessors/Validation.tex b/doc/user_manual/PostProcessors/Validation.tex index 02794fdc86..c5e7084082 100644 --- a/doc/user_manual/PostProcessors/Validation.tex +++ b/doc/user_manual/PostProcessors/Validation.tex @@ -9,7 +9,7 @@ \subsubsection{Validation PostProcessors} \begin{itemize} \item \textbf{Probabilistic}, using probabilistic method for validation, can be used for both static and time-dependent problems. \item \textbf{PPDSS}, using dynamic system scaling method for validation, can only be used for time-dependent problems. - \item \textbf{Representativity}, using represntativity (bias) factor for validation, currently, can be used for static data. + \item \textbf{Representativity}, using representativity (bias) factor for validation, currently, can be used for static data. \item \textbf{PCM}, using Physics-guided Coverage Mapping method for validation, can be used for static and time-dependent problems. \end{itemize} % @@ -17,6 +17,7 @@ \subsubsection{Validation PostProcessors} The choices of the available metrics and acceptable data objects are specified in table \ref{tab:ValidationAlgorithms}. \begin{table}[] +\centering \caption{Validation Algorithms and respective available metrics and DataObjects} \label{tab:ValidationAlgorithms} \begin{tabular}{|c|c|c|} @@ -25,12 +26,13 @@ \subsubsection{Validation PostProcessors} Probabilistic & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet\end{tabular} & \begin{tabular}[c]{@{}c@{}}CDFAreaDifference\\ \\ PDFCommonArea\end{tabular} \\ \hline Representativity & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet \\DataSet\end{tabular} & \begin{tabular}[c]{@{}c@{}}\end{tabular} \\ \hline PPDSS & HistorySet & DSS \\ \hline +PCM & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet\end{tabular} & \begin{tabular}[c]{@{}c@{}}\end{tabular} \\ \hline \end{tabular} \end{table} These post-processors can accept multiple \textbf{DataObjects} as inputs. When multiple DataObjects are provided, The user can use $DataObjectName|InputOrOutput|VariableName$ nomenclature to specify the variable -in \xmlNode{Features} and \xmlNode{Targets} for comparison. +in \xmlNode{prototypeOutputs} and \xmlNode{targetOutputs} for comparison. \paragraph{Probabilistic} The \textbf{Probabilistic} specify that the validation needs to be performed @@ -42,10 +44,10 @@ \subsubsection{Validation PostProcessors} % \begin{itemize} - \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features. - \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of - targets. \nb Each target is paired with a feature listed in xml node \xmlNode{Features}. In this case, the - number of targets should be equal to the number of features. + \item \xmlNode{prototypeOutputs}, \xmlDesc{comma separated string, required field}, specifies the names of the prototype outputs. + \item \xmlNode{targetOutputs}, \xmlDesc{comma separated string, required field}, contains a comma separated list of + targetOutputs. \nb Each target output is paired with a feature listed in xml node \xmlNode{prototypeOutputs}. In this case, the + number of target outputs should be equal to the number of prototype outputs. \item \xmlNode{pivotParameter}, \xmlDesc{string, required field if HistorySet is used}, specifies the pivotParameter for a . The pivot parameter is the shared index of the output variables in the data object. \item \xmlNode{Metric}, \xmlDesc{string, required field}, specifies the \textbf{Metric} name that is defined via @@ -70,8 +72,8 @@ \subsubsection{Validation PostProcessors} ... - outputDataMC1|ans - outputDataMC2|ans2 + outputDataMC1|ans + outputDataMC2|ans2 cdf_diff pdf_area @@ -117,10 +119,10 @@ \subsubsection{Validation PostProcessors} \end{itemize} \item \xmlNode{scaleBeta}, \xmlDesc{float or comma separated list of floats, required field}, specifies the parameter of interest scaling ratio between the feature and target. To provide more than one scaling factor, separate by adding a comma in between each number. Providing more than one scaling factor presumes there are more than one parameter to be post-processed. - If so, \xmlNode{Features}, \xmlNode{Targets}, and \xmlNode{scaleOmega} must have the same number scaling factors. + If so, \xmlNode{prototypeOutputs}, \xmlNode{targetOutputs}, and \xmlNode{scaleOmega} must have the same number scaling factors. \item \xmlNode{scaleOmega}, \xmlDesc{float or comma separated list of floats, required field}, specifies the agents of change scaling ratio between the feature and target. To provide more than one scaling factor, separate by adding a comma in between each number. Providing more than one scaling factor presumes there are more than one parameter to be post-processed. - If so, \xmlNode{Features}, \xmlNode{Targets}, and \xmlNode{scaleBeta} must have the same number scaling factors. + If so, \xmlNode{prototypeOutputs}, \xmlNode{targetOutputs}, and \xmlNode{scaleBeta} must have the same number scaling factors. \end{itemize} \textbf{Example:} @@ -134,8 +136,8 @@ \subsubsection{Validation PostProcessors} ... - outMC1|x1,outMC1|y1 - outMC2|x2,outMC2|y2 + outMC1|x1,outMC1|y1 + outMC2|x2,outMC2|y2 dss time1 time2 @@ -166,12 +168,12 @@ \subsubsection{Validation PostProcessors} % \begin{itemize} - \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features, which can be the measuables/observables of the mock model. Reader should be warned that this nomenclature is different than the Machine learning nomenclature. + \item \xmlNode{prototypeOutputs}, \xmlDesc{comma separated string, required field}, specifies the names of the prototypeOutputs, which can be the measuables/observables of the mock model. Reader should be warned that this nomenclature is different than the Machine learning nomenclature. - \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of - targets. These are the Figures of merit (FOMs) in the target model against which the mock model is being validated. + \item \xmlNode{targetOutputs}, \xmlDesc{comma separated string, required field}, contains a comma separated list of + targetOutputs. These are the Figures of merit (FOMs) in the target model against which the mock model is being validated. - \item \xmlNode{featureParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputs to the mock model. + \item \xmlNode{prototypeParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputs to the mock model. \item \xmlNode{targetParameters}, \xmlDesc{comma separated string, required field}, contains a comma separated list of target parameters/inputs. @@ -260,6 +262,92 @@ \subsubsection{Validation PostProcessors} \end{itemize} +\textbf{Example: Representativity} +\begin{lstlisting}[style=XML,morekeywords={subType}] + + ... + + + inputPlaceHolder2 + linModel + MC_external + outputDataMC1 + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + + ... + + ... + + outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 + outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + + ... + + ... + + InputPlaceHolder + + BiasFactor_MockF1_TarFOM1, + BiasFactor_MockF1_TarFOM2, + BiasFactor_MockF1_TarFOM3, + BiasFactor_MockF2_TarFOM1, + BiasFactor_MockF2_TarFOM2, + BiasFactor_MockF2_TarFOM3, + BiasFactor_MockF3_TarFOM1, + BiasFactor_MockF3_TarFOM2, + BiasFactor_MockF3_TarFOM3, + ExactBiasFactor_MockF1_TarFOM1, + ExactBiasFactor_MockF1_TarFOM2, + ExactBiasFactor_MockF1_TarFOM3, + ExactBiasFactor_MockF2_TarFOM1, + ExactBiasFactor_MockF2_TarFOM2, + ExactBiasFactor_MockF2_TarFOM3, + ExactBiasFactor_MockF3_TarFOM1, + ExactBiasFactor_MockF3_TarFOM2, + ExactBiasFactor_MockF3_TarFOM3, + CorrectedParameters_p1, + CorrectedParameters_p2, + CorrectedTargets_FOM1, + CorrectedTargets_FOM2, + CorrectedTargets_FOM3, + VarianceInCorrectedParameters_p1, + VarianceInCorrectedParameters_p2, + CovarianceInCorrectedParameters_p1_p2, + CovarianceInCorrectedParameters_p2_p1, + CorrectedVar_TarFOM1, + CorrectedVar_TarFOM2, + CorrectedVar_TarFOM3, + ExactCorrectedVar_TarFOM1, + ExactCorrectedVar_TarFOM2, + ExactCorrectedVar_TarFOM3, + CorrectedCov_TarFOM1_TarFOM2, + CorrectedCov_TarFOM2_TarFOM1, + CorrectedCov_TarFOM1_TarFOM3, + CorrectedCov_TarFOM3_TarFOM1, + CorrectedCov_TarFOM2_TarFOM3, + CorrectedCov_TarFOM3_TarFOM2, + ExactCorrectedCov_TarFOM1_TarFOM2, + ExactCorrectedCov_TarFOM2_TarFOM1, + ExactCorrectedCov_TarFOM1_TarFOM3, + ExactCorrectedCov_TarFOM3_TarFOM1, + ExactCorrectedCov_TarFOM2_TarFOM3, + ExactCorrectedCov_TarFOM3_TarFOM2 + + + ... + + \end{lstlisting} \paragraph{PCM} \textbf{PCM} evaluates the uncertainty reduction fraction and obtain posterior distribution of Target @@ -269,13 +357,13 @@ \subsubsection{Validation PostProcessors} \begin{itemize} \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, defaulted as `time', and required by Snapshot and Tdep PCM. - \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features. - \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of - targets. \nb Each target will be validated using all features listed in xml node \xmlNode{Features}. The - number of targets is not necessarily equal to the number of features. + \item \xmlNode{prototypeOutputs}, \xmlDesc{comma separated string, required field}, specifies the names of the prototypeOutputs. + \item \xmlNode{targetOutputs}, \xmlDesc{comma separated string, required field}, contains a comma separated list of + targetOutputs. \nb Each target will be validated using all prototypeOutputs listed in xml node \xmlNode{prototypeOutputs}. The + number of targetOutputs is not necessarily equal to the number of prototypeOutputs. \item \xmlNode{Measurements}, \xmlDesc{comma separated string, required field}, contains a comma separated list of - measurements of the features. \nb Each measurement correspond to a feature listed in xml node \xmlNode{Features}. The - number of measurements should be equal to the number of features and in the same order as the features listed in \xmlNode{Features}. + measurements of the features. \nb Each measurement correspond to a feature listed in xml node \xmlNode{prototypeOutputs}. The + number of measurements should be equal to the number of prototypeOutputs and in the same order as the prototypeOutputs listed in \xmlNode{prototypeOutputs}. \item \xmlNode{pcmType}, \xmlDesc{string, required field}, contains the string given by users to choose the version of PCM to be applied. \nb It has three options: `Static', `Snapshot', and `Tdep', corresponding to the three PCM versions. \item \xmlNode{ReconstructionError}, \xmlDesc{float, optional field}, contains the value given by users to determind the @@ -283,97 +371,27 @@ \subsubsection{Validation PostProcessors} \end{itemize} The output of Static PCM is comma separated list of strings in the format of ``pri\textunderscore post\textunderscore stdReduct\textunderscore [targetName]'', -where [targetName] is the $VariableName$ specified in DataObject of \xmlNode{Targets}. +where [targetName] is the $VariableName$ specified in DataObject of \xmlNode{targetOutputs}. The output of Snapshot PCM includes two comma separated lists ``time'' and ``snapshot\textunderscore pri\textunderscore post\textunderscore stdReduct'', -which corresponding to the timesteps and uncertainty reduction fraction of the time-series Target data specified in DataObject of \xmlNode{Targets}. +which corresponding to the timesteps and uncertainty reduction fraction of the time-series Target data specified in DataObject of \xmlNode{targetOutputs}. The output of Tdep PCM includes three comma separated lists ``time'', ``Tdep\textunderscore post\textunderscore mean'', and ``Error'', -which corresponding to the timesteps, posterior mean, and error between posterior and prior Target data specified in DataObject of \xmlNode{Targets}. +which corresponding to the timesteps, posterior mean, and error between posterior and prior Target data specified in DataObject of \xmlNode{targetOutputs}. \textbf{Example: Static PCM} \begin{lstlisting}[style=XML,morekeywords={subType}] - -... - - - inputPlaceHolder2 - linModel - MC_external - outputDataMC1 - outputDataMC2 - - - outputDataMC1 - outputDataMC2 - pp1 - pp1_metric - pp1_metric_dump - - -... - -... - - outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 - outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 - outputDataMC1|p1,outputDataMC1|p2 - outputDataMC2|p1,outputDataMC2|p2 - outputDataMC1|time - -... - -... - - InputPlaceHolder - - BiasFactor_MockF1_TarFOM1, - BiasFactor_MockF1_TarFOM2, - BiasFactor_MockF1_TarFOM3, - BiasFactor_MockF2_TarFOM1, - BiasFactor_MockF2_TarFOM2, - BiasFactor_MockF2_TarFOM3, - BiasFactor_MockF3_TarFOM1, - BiasFactor_MockF3_TarFOM2, - BiasFactor_MockF3_TarFOM3, - ExactBiasFactor_MockF1_TarFOM1, - ExactBiasFactor_MockF1_TarFOM2, - ExactBiasFactor_MockF1_TarFOM3, - ExactBiasFactor_MockF2_TarFOM1, - ExactBiasFactor_MockF2_TarFOM2, - ExactBiasFactor_MockF2_TarFOM3, - ExactBiasFactor_MockF3_TarFOM1, - ExactBiasFactor_MockF3_TarFOM2, - ExactBiasFactor_MockF3_TarFOM3, - CorrectedParameters_p1, - CorrectedParameters_p2, - CorrectedTargets_FOM1, - CorrectedTargets_FOM2, - CorrectedTargets_FOM3, - VarianceInCorrectedParameters_p1, - VarianceInCorrectedParameters_p2, - CovarianceInCorrectedParameters_p1_p2, - CovarianceInCorrectedParameters_p2_p1, - CorrectedVar_TarFOM1, - CorrectedVar_TarFOM2, - CorrectedVar_TarFOM3, - ExactCorrectedVar_TarFOM1, - ExactCorrectedVar_TarFOM2, - ExactCorrectedVar_TarFOM3, - CorrectedCov_TarFOM1_TarFOM2, - CorrectedCov_TarFOM2_TarFOM1, - CorrectedCov_TarFOM1_TarFOM3, - CorrectedCov_TarFOM3_TarFOM1, - CorrectedCov_TarFOM2_TarFOM3, - CorrectedCov_TarFOM3_TarFOM2, - ExactCorrectedCov_TarFOM1_TarFOM2, - ExactCorrectedCov_TarFOM2_TarFOM1, - ExactCorrectedCov_TarFOM1_TarFOM3, - ExactCorrectedCov_TarFOM3_TarFOM1, - ExactCorrectedCov_TarFOM2_TarFOM3, - ExactCorrectedCov_TarFOM3_TarFOM2 - - -... - + + ... + + ... + + outputDataMC1|F1,outputDataMC1|F2 + outputDataMC2|F2,outputDataMC2|F3,outputDataMC2|F4 + msrData|F1,msrData|F2 + + ... + + ... + \end{lstlisting} \textbf{Example: Snapshot PCM} @@ -384,8 +402,8 @@ \subsubsection{Validation PostProcessors} ... time - exp|TempC - app|TempD + exp|TempC + app|TempD msr|TempMsrC Snapshot @@ -403,8 +421,8 @@ \subsubsection{Validation PostProcessors} ... time - exp|TempC - app|TempD + exp|TempC + app|TempD msr|TempMsrC Tdep 0.001 From 8c271cb7ac9e5f6192238c559eaf0ac890065a6c Mon Sep 17 00:00:00 2001 From: mohammad-abdo Date: Thu, 28 Sep 2023 12:29:39 -0600 Subject: [PATCH 89/95] modifying rquations 71,74,79 --- .../Models/PostProcessors/Validations/Representativity.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index c194c3b868..6b8b349de5 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -437,12 +437,12 @@ def _targetCorrection(self, FOMs, UparVar, Umes, UmesVar, normalizedSenTar, norm @ Out, propagetedExpUncert, np.array, propagated variance covariance matrix of experiments due to parameter uncertainties """ # Compute adjusted target #eq 71 - UtarTilde = normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenTar.T + UmesVar) @ Umes + UtarTilde = normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar) @ Umes # back transform to parameters tarTilde = UtarTilde * FOMs + FOMs # Compute adjusted par_var #eq 74 - UtarVarTilde = normalizedSenTar @ UparVar @ normalizedSenTar.T - normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenTar.T + UmesVar) @ normalizedSenExp @ UparVar @ normalizedSenTar.T + UtarVarTilde = normalizedSenTar @ UparVar @ normalizedSenTar.T - normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar) @ normalizedSenExp @ UparVar @ normalizedSenTar.T # back transform the variance UtarVarTildeDiag = np.diagonal(UtarVarTilde) @@ -457,8 +457,8 @@ def _targetCorrection(self, FOMs, UparVar, Umes, UmesVar, normalizedSenTar, norm # Compute adjusted par_var neglecting UmesVar (to compare to representativity) # The representativity (#eq 79 negelcts UmesVar) propagetedExpUncert = (normalizedSenExp @ UparVar) @ normalizedSenExp.T - UtarVarztilde_no_UmesVar = (normalizedSenTar @ UparVar @ normalizedSenTar.T)\ + UtarVartilde_no_UmesVar = (normalizedSenTar @ UparVar @ normalizedSenTar.T)\ - (normalizedSenTar @ UparVar @ normalizedSenExp.T)\ @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenExp.T)\ @ (normalizedSenExp @ UparVar @ normalizedSenTar.T) - return tarTilde, tarVarTilde, UtarVarTilde, UtarVarztilde_no_UmesVar, propagetedExpUncert + return tarTilde, tarVarTilde, UtarVarTilde, UtarVartilde_no_UmesVar, propagetedExpUncert From 1d33eaa729b50e790a32a5124cf1fec56743a6a3 Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Thu, 28 Sep 2023 12:45:24 -0600 Subject: [PATCH 90/95] fix equation 71 and 74 --- .../Models/PostProcessors/Validations/Representativity.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index c194c3b868..6b8b349de5 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -437,12 +437,12 @@ def _targetCorrection(self, FOMs, UparVar, Umes, UmesVar, normalizedSenTar, norm @ Out, propagetedExpUncert, np.array, propagated variance covariance matrix of experiments due to parameter uncertainties """ # Compute adjusted target #eq 71 - UtarTilde = normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenTar.T + UmesVar) @ Umes + UtarTilde = normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar) @ Umes # back transform to parameters tarTilde = UtarTilde * FOMs + FOMs # Compute adjusted par_var #eq 74 - UtarVarTilde = normalizedSenTar @ UparVar @ normalizedSenTar.T - normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenTar.T + UmesVar) @ normalizedSenExp @ UparVar @ normalizedSenTar.T + UtarVarTilde = normalizedSenTar @ UparVar @ normalizedSenTar.T - normalizedSenTar @ UparVar @ normalizedSenExp.T @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenExp.T + UmesVar) @ normalizedSenExp @ UparVar @ normalizedSenTar.T # back transform the variance UtarVarTildeDiag = np.diagonal(UtarVarTilde) @@ -457,8 +457,8 @@ def _targetCorrection(self, FOMs, UparVar, Umes, UmesVar, normalizedSenTar, norm # Compute adjusted par_var neglecting UmesVar (to compare to representativity) # The representativity (#eq 79 negelcts UmesVar) propagetedExpUncert = (normalizedSenExp @ UparVar) @ normalizedSenExp.T - UtarVarztilde_no_UmesVar = (normalizedSenTar @ UparVar @ normalizedSenTar.T)\ + UtarVartilde_no_UmesVar = (normalizedSenTar @ UparVar @ normalizedSenTar.T)\ - (normalizedSenTar @ UparVar @ normalizedSenExp.T)\ @ np.linalg.pinv(normalizedSenExp @ UparVar @ normalizedSenExp.T)\ @ (normalizedSenExp @ UparVar @ normalizedSenTar.T) - return tarTilde, tarVarTilde, UtarVarTilde, UtarVarztilde_no_UmesVar, propagetedExpUncert + return tarTilde, tarVarTilde, UtarVarTilde, UtarVartilde_no_UmesVar, propagetedExpUncert From 5c29aec243e5ec4915476b10c6ef365bdf737d67 Mon Sep 17 00:00:00 2001 From: congjian wang Date: Mon, 16 Oct 2023 21:04:46 -0600 Subject: [PATCH 91/95] address comments --- doc/user_manual/raven_user_manual.bib | 3 ++- .../Models/PostProcessors/Validations/Representativity.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/user_manual/raven_user_manual.bib b/doc/user_manual/raven_user_manual.bib index 66b13c904f..55c4a9c84f 100644 --- a/doc/user_manual/raven_user_manual.bib +++ b/doc/user_manual/raven_user_manual.bib @@ -146,6 +146,7 @@ @article{palmiotti2 @article{Epiney1, title={A Systematic Approach to Inform Experiment Design Through Modern Modeling and Simulation Methods}, author={Epiney, A and Rabiti, C and Davis, C}, + journal={Proc. 18th Int. Topl. Mtg. on Nuclear Reactor Thermal Hydraulics (NURETH-18)} year={2019} } @@ -157,4 +158,4 @@ @inproceedings{Epiney2 pages={V003T13A055}, year={2020}, organization={American Society of Mechanical Engineers} -} \ No newline at end of file +} diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 6b8b349de5..92ad6b5b67 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -29,6 +29,7 @@ #Internal Modules------------------------------------------------------------------------------------ from ravenframework.utils import InputData, InputTypes from .. import ValidationBase +from .. import factory as ppFactory # delay import to allow definition #Internal Modules End-------------------------------------------------------------------------------- class Representativity(ValidationBase): @@ -81,7 +82,6 @@ def getBasicStat(self): @ In, None @ Out, stat, object, Basic Statistic PostProcessor Object """ - from .. import factory as ppFactory # delay import to allow definition stat = ppFactory.returnInstance('BasicStatistics') stat.what = ['NormalizedSensitivities'] # expected value calculation return stat From 396dbc5c7e4ef7bdb14d9466045e9adf6679717a Mon Sep 17 00:00:00 2001 From: congjian wang Date: Tue, 17 Oct 2023 09:50:29 -0600 Subject: [PATCH 92/95] delay import factory to allow definition --- .../Models/PostProcessors/Validations/Representativity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 92ad6b5b67..ef2e7ea31e 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -29,7 +29,6 @@ #Internal Modules------------------------------------------------------------------------------------ from ravenframework.utils import InputData, InputTypes from .. import ValidationBase -from .. import factory as ppFactory # delay import to allow definition #Internal Modules End-------------------------------------------------------------------------------- class Representativity(ValidationBase): @@ -68,6 +67,7 @@ def __init__(self): @ Out, None """ super().__init__() + from .. import factory as ppFactory # delay import to allow definition self.printTag = 'POSTPROCESSOR Representativity' self.dynamicType = ['static'] # for now only static is available self.name = 'Representativity' From 645644e192c3188faf63431bd54258fa0648e5db Mon Sep 17 00:00:00 2001 From: congjian wang Date: Tue, 17 Oct 2023 10:03:03 -0600 Subject: [PATCH 93/95] revert changes --- .../Models/PostProcessors/Validations/Representativity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index ef2e7ea31e..6b8b349de5 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -67,7 +67,6 @@ def __init__(self): @ Out, None """ super().__init__() - from .. import factory as ppFactory # delay import to allow definition self.printTag = 'POSTPROCESSOR Representativity' self.dynamicType = ['static'] # for now only static is available self.name = 'Representativity' @@ -82,6 +81,7 @@ def getBasicStat(self): @ In, None @ Out, stat, object, Basic Statistic PostProcessor Object """ + from .. import factory as ppFactory # delay import to allow definition stat = ppFactory.returnInstance('BasicStatistics') stat.what = ['NormalizedSensitivities'] # expected value calculation return stat From 6d7d77bb2f80c4a8a73165d9952038e7b3e1e16f Mon Sep 17 00:00:00 2001 From: mohammad-abdo Date: Sun, 5 Nov 2023 22:24:53 -0700 Subject: [PATCH 94/95] addressing Joshs comment of returning a different variable instead of modifying the input dataset --- .../Validations/Representativity.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py index 6b8b349de5..293ea47d11 100644 --- a/ravenframework/Models/PostProcessors/Validations/Representativity.py +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -323,14 +323,15 @@ def _computeMoments(self, datasets, features, targets): @ In, targets, names of target variables: figures of merit (FOMs) @ out, datasets, xarray datasets, datasets after adding moments """ - for var in [x.split("|")[-1] for x in features + targets]: #datasets.data_vars - datasets[var].attrs['meanValue'] = np.mean(datasets[var].values) + moments = datasets.copy() + for var in [x.split("|")[-1] for x in features + targets]: + moments[var].attrs['meanValue'] = np.mean(datasets[var].values) for var2 in [x.split("|")[-1] for x in features + targets]: if var == var2: - datasets[var2].attrs['var'] = np.var(datasets[var].values) + moments[var2].attrs['var'] = np.var(datasets[var].values) else: - datasets[var2].attrs['cov_'+str(var)] = np.cov(datasets[var2].values,datasets[var].values) - return datasets + moments[var2].attrs['cov_'+str(var)] = np.cov(datasets[var2].values,datasets[var].values) + return moments def _computeErrors(self,datasets,features,targets): """ @@ -340,9 +341,10 @@ def _computeErrors(self,datasets,features,targets): @ In, targets, names of target variables: figures of merit (FOMs) @ out, datasets, xarray datasets, datasets after computing errors in each variable """ + errors = datasets.copy() for var in [x.split("|")[-1] for x in features + targets]: - datasets['err_'+str(var)] = (datasets[var].values - datasets[var].attrs['meanValue'])/datasets[var].attrs['meanValue'] - return datasets + errors['err_'+str(var)] = (datasets[var].values - datasets[var].attrs['meanValue'])/datasets[var].attrs['meanValue'] + return errors def _computeUncertaintyMatrixInErrors(self, data, parameters): """ A utility function to variance and covariance of variables in the error space From 548f9f76eb0b7f086f39fa9595556b7be62f3acd Mon Sep 17 00:00:00 2001 From: Congjian Wang Date: Mon, 6 Nov 2023 14:58:52 -0700 Subject: [PATCH 95/95] fix docs --- doc/user_manual/raven_user_manual.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/user_manual/raven_user_manual.bib b/doc/user_manual/raven_user_manual.bib index 55c4a9c84f..137aa3ef1a 100644 --- a/doc/user_manual/raven_user_manual.bib +++ b/doc/user_manual/raven_user_manual.bib @@ -146,7 +146,7 @@ @article{palmiotti2 @article{Epiney1, title={A Systematic Approach to Inform Experiment Design Through Modern Modeling and Simulation Methods}, author={Epiney, A and Rabiti, C and Davis, C}, - journal={Proc. 18th Int. Topl. Mtg. on Nuclear Reactor Thermal Hydraulics (NURETH-18)} + journal={Proc. 18th Int. Topl. Mtg. on Nuclear Reactor Thermal Hydraulics (NURETH-18)}, year={2019} }