diff --git a/doc/user_manual/couplingAcode.tex b/doc/user_manual/couplingAcode.tex index 217eb9f1cc..ae079e1207 100644 --- a/doc/user_manual/couplingAcode.tex +++ b/doc/user_manual/couplingAcode.tex @@ -17,7 +17,7 @@ \section{Advanced Users: How to couple a new code} path/to/raven/distribution/raven/framework/CodeInterfaces/ \end{lstlisting} At the initialization stage, RAVEN imports all the Interfaces that are contained in this directory and performs some preliminary cross-checks. -\\It is important to notice that the name of class in the Interface module is the one the user needs to specify when the new interface +\\It is important to notice that the name of class in the Interface module is the one the user needs to specify when the new interface needs to be used. For example, if the Interface module contains the class ``NewCode'', the \textit{subType} in the \xmlNode{Code} block will be ``NewCode'': \begin{lstlisting}[language=python] class NewCode(CodeInterfaceBase): @@ -158,6 +158,9 @@ \subsection{Pre-requisites.} result1, result2, result3 aValue1, aValue2, aValue3 \end{lstlisting} +Note that in general RAVEN is content with accepting floats or strings as data types in the CSV. +However, if the CSV produced by running the code has a large number of columns (say, over 1000), it +is necessary to include only floats and change the CSV loading utility. See more below (\ref{subsubsec:setCsvLoadUtil}) %%%%%%% \subsection{Code Interface Creation} \label{subsec:codeinterfacecreation} @@ -179,7 +182,7 @@ \subsection{Code Interface Creation} from CodeInterfaceBaseClass import CodeInterfaceBase class NewCode(CodeInterfaceBase): ... - def initialize(self, runInfoDict, oriInputFiles) + def initialize(self, runInfoDict, oriInputFiles) def finalizeCodeOutput(self, command, output, workingDir) def getInputExtension(self) def checkForOutputFailure(self, output, workingDir) @@ -268,7 +271,7 @@ \subsubsection{Method: \texttt{createNewInput}} input files. This list of files is the one the code interface needs to use to print the new perturbed list of files. Indeed, RAVEN already changes the file location in sub-directories and the Code Interface does not need to change the filename or location of the files. For example, the files are going to have a absolute path as following: - $.\\ path\_to\_working\_directory\\stepName\\anUniqueIdentifier\\filename.extension$. In case of sampling, the + $.\\ path\_to\_working\_directory\\stepName\\anUniqueIdentifier\\filename.extension$. In case of sampling, the ``\textit{anUniqueIdentifier}'' is going to be an integer (e.g. 1). \item \textbf{\texttt{oriInputFiles}} , data type = list, List of the original input files; \item \textbf{\texttt{samplerType}} , data type = string, Sampler type (e.g. MonteCarlo, @@ -318,7 +321,7 @@ \subsubsection{Method: \texttt{initialize}} def initialize(self, runInfoDict, oriInputFiles) \end{lstlisting} The \textbf{initialize} function is an optional method. If present, it is called -by RAVEN code at the begin of each Step (once per step) involving the particular Code Interface. +by RAVEN code at the begin of each Step (once per step) involving the particular Code Interface. This method is generally indicated to retrieve information from the RunInfo and/or the Input files. \\RAVEN is going to call this function passing in the following arguments: \begin{itemize} @@ -390,7 +393,7 @@ \subsubsection{Method: \texttt{setRunOnShell}} some specific use cases, the following argument may need to be setted by the code interface developers: \begin{itemize} \item{shell}, the default value is \textbf{True}. If shell is \textbf{True}, the specified command - generated by RAVEN will be executed through the shell. This will allow RAVEN to have an enhanced + generated by RAVEN will be executed through the shell. This will allow RAVEN to have an enhanced control flow with convenient access to other shell features such as shell pipes, filename wildcards, environment variable expansion, and expansion of ``~'' to a user's home directory. If shell is \textbf{False}, all the shell based features are disabled. In other words, the users could not use the @@ -406,6 +409,21 @@ \subsubsection{Method: \texttt{setRunOnShell}} \end{lstlisting} \end{itemize} +\subsubsection{Method: \texttt{setCsvLoadUtil}} +\label{subsubsec:setCsvLoadUtil} +\begin{lstlisting}[language=python] +self.setCsvLoadUtil('pandas') +\end{lstlisting} +The default CSV loader in RAVEN is pandas, which allows arbitrary data types in the CSV, generally +strings and floats. However, arbitrary data can be challenging to load if there are a large number +of columns in the code's output CSV that RAVEN attempts to read in. As a rule of thumb, if there are +over 1000 columns in a typical output CSV for your Code, the resulting values should only be floats +and integers (not strings), and this method should be called during the CodeInterface construction +or initialization +to set the loading utility to \texttt{numpy}. While RAVEN's \texttt{numpy} CSV loading is notably +faster than RAVEN's \texttt{pandas} CSV loading, it does not allow the flexibility of string entries +except in the CSV header. + \subsection{Tools for Developing Code Interfaces} To make generating a code interface as simple as possible, there are several tools RAVEN makes available within the Code Interface objects. diff --git a/framework/CodeInterfaceBaseClass.py b/framework/CodeInterfaceBaseClass.py index 81af90dc22..06c927ad53 100644 --- a/framework/CodeInterfaceBaseClass.py +++ b/framework/CodeInterfaceBaseClass.py @@ -24,6 +24,7 @@ #Internal Modules------------------------------------------------------------------------------------ from utils import utils +import CsvLoader #Internal Modules End-------------------------------------------------------------------------------- class CodeInterfaceBase(utils.metaclass_insert(abc.ABCMeta,object)): @@ -44,6 +45,7 @@ def __init__(self): self.inputExtensions = [] # list of input extensions self._runOnShell = True # True if the specified command by the code interfaces will be executed through shell. self._ravenWorkingDir = None # location of RAVEN's main working directory + self._csvLoadUtil = 'pandas' # utility to use to load CSVs def setRunOnShell(self, shell=True): """ @@ -61,6 +63,27 @@ def getRunOnShell(self): """ return self._runOnShell + def getCsvLoadUtil(self): + """ + Returns the string representation of the CSV loading utility to use + @ In, None + @ Out, getCsvLoadUtil, str, name of utility to use + """ + # default to pandas, overwrite to 'numpy' if all of the following: + # - all entries are guaranteed to be floats + # - results CSV have a large number of headers (>1000) + return self._csvLoadUtil + + def setCsvLoadUtil(self, util): + """ + Returns the string representation of the CSV loading utility to use + @ In, getCsvLoadUtil, str, name of utility to use + """ + ok = CsvLoader.CsvLoader.acceptableUtils + if util not in ok: + raise TypeError(f'Unrecognized CSV loading utility: "{util}"! Expected one of: {ok}') + self._csvLoadUtil = util + def genCommand(self, inputFiles, executable, flags=None, fileArgs=None, preExec=None): """ This method is used to retrieve the command (in tuple format) needed to launch the Code. diff --git a/framework/CodeInterfaces/CobraTF/CTFinterface.py b/framework/CodeInterfaces/CobraTF/CTFinterface.py index f7de107a8f..b02aa1909f 100644 --- a/framework/CodeInterfaces/CobraTF/CTFinterface.py +++ b/framework/CodeInterfaces/CobraTF/CTFinterface.py @@ -17,15 +17,25 @@ """ from __future__ import division, print_function, unicode_literals, absolute_import +import os from ctfdata import ctfdata from CodeInterfaceBaseClass import CodeInterfaceBase from GenericCodeInterface import GenericParser -import os class CTF(CodeInterfaceBase): """ this class is used a part of a code dictionary to specialize Model.Code for CTF (Cobra-TF) """ + def __init__(self): + """ + Constructor. + @ In, None + @ Out, None + """ + CodeInterfaceBase.__init__(self) + # CTF creates enormous CSVs that are all floats, so we use numpy to speed up the loading + self.setCsvLoadUtil('numpy') + def finalizeCodeOutput(self,command,output,workingDir): """ This method is called by the RAVEN code at the end of each code run to create CSV files containing the code output results. diff --git a/framework/CsvLoader.py b/framework/CsvLoader.py index bcbd323cb8..df34a42c3c 100644 --- a/framework/CsvLoader.py +++ b/framework/CsvLoader.py @@ -21,10 +21,10 @@ from __future__ import division, print_function, unicode_literals, absolute_import #End compatibility block for Python 3---------------------------------------------------------------- -#External Modules------------------------------------------------------------------------------------ +#External +#Modules------------------------------------------------------------------------------------ import numpy as np -from scipy.interpolate import interp1d -import copy +import pandas as pd #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ @@ -36,43 +36,91 @@ class CsvLoader(MessageHandler.MessageUser): """ Class aimed to load the CSV files """ - def __init__(self,messageHandler): + acceptableUtils = ['pandas', 'numpy'] + + def __init__(self, messageHandler): """ Constructor @ In, messageHandler, MessageHandler, the message handler @ Out, None """ - self.allOutParam = False # all output parameters? - self.allFieldNames = [] - self.type = 'CsvLoader' - self.printTag = self.type - self.messageHandler = messageHandler + self.type = 'CsvLoader' # naming type for this class + self.printTag = self.type # message handling representation + self.allOutParam = False # all output parameters? + self.allFieldNames = [] # "header" of the CSV file + self.messageHandler = messageHandler # message handling utility + + def loadCsvFile(self, myFile, nullOK=None, utility='pandas'): + """ + Function to load a csv file into realization format + It also retrieves the headers + The format of the csv must be comma-separated (pandas readable) + @ In, myFile, string, Input file name (absolute path) + @ In, nullOK, bool, indicates if null values are acceptable + @ In, utility, str, indicates which utility should be used to load the csv + @ Out, loadCsvFile, pandas.DataFrame or numpy.ndarray, the loaded data + """ + if utility == 'pandas': + return self._loadCsvPandas(myFile, nullOK=nullOK) + elif utility == 'numpy': + return self._loadCsvNumpy(myFile, nullOK=nullOK) + else: + self.raiseAnError(RuntimeError, f'Unrecognized CSV loading utility: "{utility}"') + + def _loadCsvPandas(self, myFile, nullOK=None): + """ + Function to load a csv file into realization format + It also retrieves the headers + The format of the csv must be comma-separated (pandas readable) + @ In, myFile, string, Input file name (absolute path) + @ In, nullOK, bool, indicates if null values are acceptable + @ Out, df, pandas.DataFrame, the loaded data + """ + # first try reading the file + try: + df = pd.read_csv(myFile) + except pd.errors.EmptyDataError: + # no data in file + self.raiseAWarning(f'Tried to read data from "{myFile}", but the file is empty!') + return + else: + self.raiseADebug(f'Reading data from "{myFile}"') + # check for NaN contents -> this isn't allowed in RAVEN currently, although we might need to change this for ND + if (not nullOK) and (pd.isnull(df).values.sum() != 0): + bad = pd.isnull(df).any(1).nonzero()[0][0] + self.raiseAnError(IOError, f'Invalid data in input file: row "{bad+1}" in "{myFile}"') + self.allFieldNames = list(df.columns) + return df - def loadCsvFile(self,myFile): + def _loadCsvNumpy(self, myFile, nullOK=None): """ - Function to load a csv file into a numpy array (2D) + Function to load a csv file into realization format It also retrieves the headers - The format of the csv must be: - STRING,STRING,STRING,STRING - FLOAT ,FLOAT ,FLOAT ,FLOAT - ... - FLOAT ,FLOAT ,FLOAT ,FLOAT - @ In, fileIn, string, Input file name (absolute path) - @ Out, data, numpy.ndarray, the loaded data + The format of the csv must be comma-separated with all floats after header row + @ In, myFile, string, Input file name (absolute path) + @ In, nullOK, bool, indicates if null values are acceptable + @ Out, data, np.ndarray, the loaded data """ - # open file - myFile.open(mode='rb') - # read the field names - head = myFile.readline().decode() - self.allFieldNames = head.split(',') - for index in range(len(self.allFieldNames)): - self.allFieldNames[index] = self.allFieldNames[index].strip() - # load the table data (from the csv file) into a numpy nd array - data = np.loadtxt(myFile,dtype='float',delimiter=',',ndmin=2,skiprows=1) - # close file - myFile.close() + with open(myFile, 'rb') as f: + head = f.readline().decode() + self.allFieldNames = list(x.strip() for x in head.split(',')) + data = np.loadtxt(myFile, dtype=float, delimiter=',', ndmin=2, skiprows=1) return data + def toRealization(self, data): + """ + Converts data from the "loadCsvFile" format to a realization-style format (dictionary + currently) + @ In, data, pandas.DataFrame or np.ndarray, result of loadCsvFile + @ Out, rlz, dict, realization + """ + rlz = {} + if isinstance(data, pd.DataFrame): + rlz = dict((header, np.array(data[header])) for header in self.allFieldNames) + elif isinstance(data, np.ndarray): + rlz = dict((header, entry) for header, entry in zip(self.allFieldNames, data.T)) + return rlz + def getAllFieldNames(self): """ Function to get all field names found in the csv file diff --git a/framework/DataObjects/DataSet.py b/framework/DataObjects/DataSet.py index 85f20c0a7a..a4ecdc74ff 100644 --- a/framework/DataObjects/DataSet.py +++ b/framework/DataObjects/DataSet.py @@ -35,6 +35,8 @@ from .DataObject import DataObject except ValueError: from DataObject import DataObject + +import CsvLoader from utils import utils, cached_ndarray, xmlUtils, mathUtils # for profiling with kernprof @@ -1802,19 +1804,8 @@ def _readPandasCSV(self, fname, nullOK=None): # datasets can have them because we don't have a 2d+ CSV storage strategy yet else: nullOK = True - # first try reading the file - try: - df = pd.read_csv(fname) - except pd.errors.EmptyDataError: - # no data in file - self.raiseAWarning('Tried to read data from "{}", but the file is empty!'.format(fname+'.csv')) - return - else: - self.raiseADebug('Reading data from "{}.csv"'.format(fname)) - # check for NaN contents -> this isn't allowed in RAVEN currently, although we might need to change this for ND - if (not nullOK) and (pd.isnull(df).values.sum() != 0): - bad = pd.isnull(df).any(1).nonzero()[0][0] - self.raiseAnError(IOError,'Invalid data in input file: row "{}" in "{}"'.format(bad+1,fname)) + loader = CsvLoader.CsvLoader(self.messageHandler) + df = loader.loadCsvFile(fname, nullOK=nullOK) return df def _resetScaling(self): diff --git a/framework/DataObjects/HistorySet.py b/framework/DataObjects/HistorySet.py index 092e46ff67..1b89ddfbe6 100644 --- a/framework/DataObjects/HistorySet.py +++ b/framework/DataObjects/HistorySet.py @@ -206,7 +206,7 @@ def _selectiveRealization(self,rlz): # TODO someday needs to be implemented for when ND data is collected! For now, use base class. # TODO externalize it in the DataObject base class toRemove = [] - for var,val in rlz.items(): + for var, val in rlz.items(): if var in self.protectedTags: continue # only modify it if it is not already scalar @@ -227,9 +227,9 @@ def _selectiveRealization(self,rlz): val = val.values # FIXME this is largely a biproduct of old length-one-vector approaches in the deprecataed data objects if val.size == 1: - rlz[var] = float(val) + rlz[var] = val[0] else: - rlz[var] = float(val[indic]) + rlz[var] = val[indic] elif method in ['inputPivotValue']: pivotParam = self.getDimensions(var) assert(len(pivotParam) == 1) # TODO only handle History for now diff --git a/framework/DataObjects/PointSet.py b/framework/DataObjects/PointSet.py index 3390dea76c..56c9c92008 100644 --- a/framework/DataObjects/PointSet.py +++ b/framework/DataObjects/PointSet.py @@ -118,7 +118,7 @@ def _selectiveRealization(self,rlz): # data was previously formatted by _formatRealization # then select the point we want toRemove = [] - for var,val in rlz.items(): + for var, val in rlz.items(): if var in self.protectedTags: continue # only modify it if it is not already scalar @@ -134,7 +134,7 @@ def _selectiveRealization(self,rlz): else: toRemove.append(var) continue - if method in ['inputRow','outputRow']: + if method in ['inputRow', 'outputRow']: # zero-d xarrays give false behavior sometimes # TODO formatting should not be necessary once standardized history,float realizations are established if type(val) == list: @@ -143,9 +143,9 @@ def _selectiveRealization(self,rlz): val = val.values # FIXME this is largely a biproduct of old length-one-vector approaches in the deprecataed data objects if val.size == 1: - rlz[var] = float(val) + rlz[var] = val[0] else: - rlz[var] = float(val[indic]) + rlz[var] = val[indic] elif method in ['inputPivotValue', 'outputPivotValue']: pivotParam = self.getDimensions(var) assert(len(pivotParam) == 1) # TODO only handle History for now diff --git a/framework/Models/Code.py b/framework/Models/Code.py index e015726d0a..a4507db458 100644 --- a/framework/Models/Code.py +++ b/framework/Models/Code.py @@ -550,6 +550,7 @@ def evaluateSample(self, myInput, samplerType, kwargs): localenv[key]=str(value) elif not self.code.getRunOnShell(): command = self._expandCommand(command) + print(f'DEBUGG command: |{command}|') ## reset python path localenv.pop('PYTHONPATH',None) ## This code should be evaluated by the job handler, so it is fine to wait @@ -604,7 +605,6 @@ def evaluateSample(self, myInput, samplerType, kwargs): ## If the run was successful if returnCode == 0: - returnDict = {} ## This may be a tautology at this point --DPM 4/12/17 ## Special case for RAVEN interface. Added ravenCase flag --ALFOA 09/17/17 if outputFile is not None and not ravenCase: @@ -613,16 +613,12 @@ def evaluateSample(self, myInput, samplerType, kwargs): outFile.initialize(outputFile+'.csv',self.messageHandler,path=metaData['subDirectory']) csvLoader = CsvLoader.CsvLoader(self.messageHandler) - csvData = csvLoader.loadCsvFile(outFile) - if np.isnan(csvData).all(): - self.raiseAnError(IOError, 'The data collected from', outputFile+'.csv', 'only contain "NAN"') - headers = csvLoader.getAllFieldNames() - - ## Numpy by default iterates over rows, thus we transpose the data and - ## zip it with the headers in order to do store it very cleanly into a - ## dictionary. - for header,data in zip(headers, csvData.T): - returnDict[header] = data + # does this CodeInterface have sufficiently intense (or limited) CSV files that + # it needs to assume floats and use numpy, or can we use pandas? + loadUtility = self.code.getCsvLoadUtil() + csvData = csvLoader.loadCsvFile(outFile.getAbsFile(), nullOK=False, utility=loadUtility) + returnDict = csvLoader.toRealization(csvData) + if not ravenCase: self._replaceVariablesNamesWithAliasSystem(returnDict, 'inout', True) returnDict.update(kwargs) diff --git a/tests/framework/CodeInterfaceTests/CobraTF/test1_cobratf_code_interface.xml b/tests/framework/CodeInterfaceTests/CobraTF/test1_cobratf_code_interface.xml index 587f1dd912..5407f79394 100644 --- a/tests/framework/CodeInterfaceTests/CobraTF/test1_cobratf_code_interface.xml +++ b/tests/framework/CodeInterfaceTests/CobraTF/test1_cobratf_code_interface.xml @@ -1,5 +1,5 @@ - + framework/CodeInterfaceTests/CobraTF.test1 jyoo,alptezbasaran @@ -43,7 +43,7 @@ 0.907 0.1 1.0 - 0.8 + 0.8 diff --git a/tests/framework/DataObjects/StringIO/input.i b/tests/framework/DataObjects/StringIO/input.i new file mode 100644 index 0000000000..94428b9380 --- /dev/null +++ b/tests/framework/DataObjects/StringIO/input.i @@ -0,0 +1 @@ +x0 = $RAVEN-x0:0$ diff --git a/tests/framework/DataObjects/StringIO/makes_strings.py b/tests/framework/DataObjects/StringIO/makes_strings.py new file mode 100644 index 0000000000..0dd53061f8 --- /dev/null +++ b/tests/framework/DataObjects/StringIO/makes_strings.py @@ -0,0 +1,81 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + A Code model for testing string outputs +""" + +import sys +import numpy as np + +def read(file): + """ + Reads in input file. + @ In, file, string, file to read + @ Out, data, dict, input params + """ + data = {} + for line in open(inFile,'r'): + line = line.split('#')[0].strip() + if line == '': + continue + arg, val = (a.strip() for a in line.split('=')) + data[arg] = float(val) + return data + +def main(input): + """ + Runs the case. + @ In, input, dict, from input file + @ Out, data, dict, results + """ + x0 = input['x0'] + # single string output + ss = 'lower' if x0 < 0.5 else 'upper' + # vector string output + time = np.arange(5) + st = np.array(list(ss)) + # single float output + fs = 0 if x0 < 0.5 else 1 + ft = 100 * fs + np.arange(5) + result = {'x0': x0, 'time': time, 'ss': ss, 'st': st, 'fs': fs, 'ft': ft} + return result + +def write(data, file): + """ + Runs the case. + @ In, file, string, file to write to + @ In, data, dict, results from run + @ Out, None + """ + x0 = data['x0'] + time = data['time'] + ss = data['ss'] + st = data['st'] + fs = data['fs'] + ft = data['ft'] + with open(file + '.csv', 'w') as f: + f.writelines('time,x0,ss,st,fs,ft\n') + for t in data['time']: + f.writelines(f'{time[t]},{x0},{ss},{st[t]},{fs},{ft[t]}\n') + +#can be used as a code as well +if __name__=="__main__": + # read arguments + inFile = sys.argv[sys.argv.index('-i')+1] + outFile = sys.argv[sys.argv.index('-o')+1] + + input = read(inFile) # construct the input + res = main(input) # run the code + write(res, outFile) # write the results diff --git a/tests/framework/DataObjects/gold/StringIO/data.csv b/tests/framework/DataObjects/gold/StringIO/data.csv new file mode 100644 index 0000000000..2704af2547 --- /dev/null +++ b/tests/framework/DataObjects/gold/StringIO/data.csv @@ -0,0 +1,11 @@ +RAVEN_sample_ID,time,x0,ss,fs,st,ft,PointProbability,prefix,ProbabilityWeight,ProbabilityWeight-x0 +0,0,0.1,lower,0,l,0,1.0,1,0.5,0.5 +0,1,0.1,lower,0,o,1,1.0,1,0.5,0.5 +0,2,0.1,lower,0,w,2,1.0,1,0.5,0.5 +0,3,0.1,lower,0,e,3,1.0,1,0.5,0.5 +0,4,0.1,lower,0,r,4,1.0,1,0.5,0.5 +1,0,0.9,upper,1,u,100,1.0,2,0.5,0.5 +1,1,0.9,upper,1,p,101,1.0,2,0.5,0.5 +1,2,0.9,upper,1,p,102,1.0,2,0.5,0.5 +1,3,0.9,upper,1,e,103,1.0,2,0.5,0.5 +1,4,0.9,upper,1,r,104,1.0,2,0.5,0.5 diff --git a/tests/framework/DataObjects/gold/StringIO/histories_0.csv b/tests/framework/DataObjects/gold/StringIO/histories_0.csv new file mode 100644 index 0000000000..3f87f2b257 --- /dev/null +++ b/tests/framework/DataObjects/gold/StringIO/histories_0.csv @@ -0,0 +1,6 @@ +time,st,ft +0,l,0 +1,o,1 +2,w,2 +3,e,3 +4,r,4 diff --git a/tests/framework/DataObjects/gold/StringIO/histories_1.csv b/tests/framework/DataObjects/gold/StringIO/histories_1.csv new file mode 100644 index 0000000000..b96e0a01bd --- /dev/null +++ b/tests/framework/DataObjects/gold/StringIO/histories_1.csv @@ -0,0 +1,6 @@ +time,st,ft +0,u,100 +1,p,101 +2,p,102 +3,e,103 +4,r,104 diff --git a/tests/framework/DataObjects/gold/StringIO/points.csv b/tests/framework/DataObjects/gold/StringIO/points.csv new file mode 100644 index 0000000000..d79f5db406 --- /dev/null +++ b/tests/framework/DataObjects/gold/StringIO/points.csv @@ -0,0 +1,3 @@ +x0,ss,fs,st,ft,PointProbability,prefix,ProbabilityWeight,ProbabilityWeight-x0 +0.1,lower,0,r,4,1.0,1,0.5,0.5 +0.9,upper,1,r,104,1.0,2,0.5,0.5 diff --git a/tests/framework/DataObjects/string_io.xml b/tests/framework/DataObjects/string_io.xml new file mode 100644 index 0000000000..9095ed223a --- /dev/null +++ b/tests/framework/DataObjects/string_io.xml @@ -0,0 +1,97 @@ + + + + + framework/DataObjects.StringIO + talbpaul + 2020-07-07 + DataObjects.DataSet + + Tests that strings can be part of the output space. + + + + + StringIO + sample, print + + + + + template + code + grid + ps + hs + ds + + + ps + hs + ds + points + histories + data + + + + + + x0 + ss,fs,st,ft + + + x0,ss,fs + st,ft + + + x0 + ss,fs,st,ft + ss,fs,st,ft + + + + + input.i + + + + + + dist + 0.1 0.9 + + + + + + + 0 + 1 + + + + + + StringIO/makes_strings.py + + + + + + + + + csv + ps + + + csv + hs + + + csv + ds + + + diff --git a/tests/framework/DataObjects/tests b/tests/framework/DataObjects/tests index 7d7df62904..735c683734 100644 --- a/tests/framework/DataObjects/tests +++ b/tests/framework/DataObjects/tests @@ -83,6 +83,12 @@ heavy = true max_time = 400 [../] + + [./StringIO] + type = 'RavenFramework' + input = 'string_io.xml' + csv = 'StringIO/points.csv StringIO/histories_0.csv StringIO/histories_1.csv StringIO/data.csv' + [../] []