Skip to content

Commit

Permalink
Merge pull request #28989 from loganharbour/better_json
Browse files Browse the repository at this point in the history
Store tests by relative instead of absolute paths in the previous results file
  • Loading branch information
loganharbour authored Nov 4, 2024
2 parents fa4a6be + 6d407e2 commit 7c0efca
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 44 deletions.
40 changes: 22 additions & 18 deletions python/TestHarness/TestHarness.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,6 @@ def __init__(self, argv, moose_dir, app_name=None, moose_python=None):
# Finally load the plugins!
self.factory.loadPlugins(dirs, 'testers', "IS_TESTER")

self._infiles = ['tests']
self.parse_errors = []
self.test_table = []
self.num_passed = 0
Expand Down Expand Up @@ -368,14 +367,12 @@ def findAndRunTests(self, find_only=False):
self.preRun()
self.start_time = datetime.datetime.now()
launched_tests = []
if self.options.input_file_name != '':
self._infiles = self.options.input_file_name.split(',')

if self.options.spec_file and os.path.isdir(self.options.spec_file):
search_dir = self.options.spec_file
elif self.options.spec_file and os.path.isfile(self.options.spec_file):
search_dir = os.path.dirname(self.options.spec_file)
self._infiles = [os.path.basename(self.options.spec_file)]
assert self.options.input_file_name == os.path.basename(self.options.spec_file)
else:
search_dir = os.getcwd()

Expand Down Expand Up @@ -426,7 +423,7 @@ def findAndRunTests(self, find_only=False):
testroot_params["root_params"] = root_params

# See if there were other arguments (test names) passed on the command line
if file in self._infiles \
if file == self.options.input_file_name \
and os.path.abspath(os.path.join(dirpath, file)) not in launched_tests:

if self.notMySpecFile(dirpath, file):
Expand Down Expand Up @@ -524,10 +521,8 @@ def augmentParameters(self, filename, tester, testroot_params={}):
test_dir = os.path.abspath(os.path.dirname(filename))
relative_path = test_dir.replace(self.run_tests_dir, '')
first_directory = relative_path.split(os.path.sep)[1] # Get first directory
for infile in self._infiles:
if infile in relative_path:
relative_path = relative_path.replace('/' + infile + '/', ':')
break
if self.options.input_file_name in relative_path:
relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
relative_path = re.sub('^[/:]*', '', relative_path) # Trim slashes and colons
relative_hitpath = os.path.join(*params['hit_path'].split(os.sep)[2:]) # Trim root node "[Tests]"
formatted_name = relative_path + '.' + relative_hitpath
Expand Down Expand Up @@ -768,7 +763,7 @@ def cleanup(self):
tester_dirs[tester.getTestDir()] = (tester_dirs.get(tester.getTestDir(), 0) + total_time)
for k, v in tester_dirs.items():
rel_spec_path = f'{os.path.sep}'.join(k.split(os.path.sep)[-2:])
dag_table.append([f'{rel_spec_path}{os.path.sep}{self._infiles[0]}', f'{v:.3f}'])
dag_table.append([f'{rel_spec_path}{os.path.sep}{self.options.input_file_name}', f'{v:.3f}'])

sorted_table = sorted(dag_table, key=lambda dag_table: float(dag_table[1]), reverse=True)
if sorted_table[0:self.options.longest_jobs]:
Expand Down Expand Up @@ -858,10 +853,7 @@ def initializeResults(self):
sys.exit(1)

# Adhere to previous input file syntax, or set the default
_input_file_name = 'tests'
if self.options.input_file_name:
_input_file_name = self.options.input_file_name
self.options.input_file_name = self.options.results_storage.get('input_file_name', _input_file_name)
self.options.input_file_name = self.options.results_storage.get('input_file_name', self.options.input_file_name)

# Done working with existing storage
return
Expand All @@ -877,6 +869,8 @@ def initializeResults(self):
# Record the input file name that was used
storage['input_file_name'] = self.options.input_file_name

# The test root directory
storage['root_dir'] = self._rootdir
# Record that we are using --sep-files
storage['sep_files'] = self.options.sep_files

Expand Down Expand Up @@ -1036,7 +1030,7 @@ def parseCLArgs(self, argv):
parser.add_argument('-t', '--timing', action='store_true', dest='timing', help='Report Timing information for passing tests')
parser.add_argument('--longest-jobs', action='store', dest='longest_jobs', type=int, default=0, help='Print the longest running jobs upon completion')
parser.add_argument('-s', '--scale', action='store_true', dest='scaling', help='Scale problems that have SCALE_REFINE set')
parser.add_argument('-i', nargs=1, action='store', type=str, dest='input_file_name', default='', help='The test specification file to look for')
parser.add_argument('-i', nargs=1, action='store', type=str, dest='input_file_name', help='The test specification file to look for (default: tests)')
parser.add_argument('--libmesh_dir', nargs=1, action='store', type=str, dest='libmesh_dir', help='Currently only needed for bitten code coverage')
parser.add_argument('--skip-config-checks', action='store_true', dest='skip_config_checks', help='Skip configuration checks (all tests will run regardless of restrictions)')
parser.add_argument('--parallel', '-p', nargs='?', action='store', type=int, dest='parallel', const=1, help='Number of processors to use when running mpiexec')
Expand Down Expand Up @@ -1160,9 +1154,15 @@ def checkAndUpdateCLArgs(self):
if opts.check_input and opts.enable_recover:
print('ERROR: --check-input and --recover cannot be used simultaneously')
sys.exit(1)
if opts.spec_file and not os.path.exists(opts.spec_file):
print('ERROR: --spec-file supplied but path does not exist')
sys.exit(1)
if opts.spec_file:
if not os.path.exists(opts.spec_file):
print('ERROR: --spec-file supplied but path does not exist')
sys.exit(1)
if os.path.isfile(opts.spec_file):
if opts.input_file_name:
print('ERROR: Cannot use -i with --spec-file being a file')
sys.exit(1)
self.options.input_file_name = os.path.basename(opts.spec_file)
if opts.verbose and opts.quiet:
print('Do not be an oxymoron with --verbose and --quiet')
sys.exit(1)
Expand Down Expand Up @@ -1192,6 +1192,10 @@ def checkAndUpdateCLArgs(self):
if opts.libmesh_dir:
self.libmesh_dir = opts.libmesh_dir

# Set default
if not self.options.input_file_name:
self.options.input_file_name = 'tests'

def postRun(self, specs, timing):
return

Expand Down
4 changes: 1 addition & 3 deletions python/TestHarness/schedulers/Job.py
Original file line number Diff line number Diff line change
Expand Up @@ -764,9 +764,7 @@ def storeResults(self, scheduler):
joint_status = self.getJointStatus()

# Base job data
job_data = {'name' : self.getTestNameShort(),
'long_name' : self.getTestName(),
'timing' : self.timer.totalTimes(),
job_data = {'timing' : self.timer.totalTimes(),
'status' : joint_status.status,
'status_message' : joint_status.message,
'fail' : self.isFail(),
Expand Down
15 changes: 8 additions & 7 deletions python/TestHarness/testers/Tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,25 +180,26 @@ def getResultsEntry(self, options, create, graceful=False):
""" Get the entry in the results storage for this tester """
tests = options.results_storage['tests']

test_dir = self.getTestDir()
short_name = self.getTestNameShort()
test_dir = self.getTestName()[:(-len(short_name) - 1)]
test_dir_entry = tests.get(test_dir)
if not test_dir_entry:
if not create:
if graceful:
return None, None
raise Exception(f'Test folder {test_dir} not in results')
tests[test_dir] = {}
tests[test_dir] = {'tests': {}}
test_dir_entry = tests[test_dir]

test_name = self.getTestName()
test_name_entry = test_dir_entry.get(test_name)
test_name_entry = test_dir_entry['tests'].get(short_name)
test_dir_entry['spec_file'] = self.getSpecFile()
if not test_name_entry:
if not create:
if graceful:
return test_dir_entry, None
raise Exception(f'Test {test_dir}/{test_name} not in results')
test_dir_entry[test_name] = {}
return test_dir_entry, test_dir_entry.get(test_name)
raise Exception(f'Test {test_dir}/{short_name} not in results')
test_dir_entry['tests'][short_name] = {}
return test_dir_entry, test_dir_entry['tests'][short_name]

# Return a tuple (status, message, caveats) for this tester as found
# in the previous results
Expand Down
29 changes: 13 additions & 16 deletions python/TestHarness/tests/test_WriteResults.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,31 +17,28 @@ def tearDown(self):
"""
self.setUp()

def checkFilesExist(self, output_dir, tests, output_object_names):
def checkFilesExist(self, output_dir, tests, output_object_names, spec_file):
# The directories within the test directory where these tests reside
test_folders = ['tests', 'test_harness']
test_folder = os.path.join('tests', 'test_harness')
# The complete path to the directory where the tests reside
test_base_path = os.path.join(os.getenv('MOOSE_DIR'), 'test', *test_folders)
test_base_path = os.path.join(os.getenv('MOOSE_DIR'), 'test', test_folder)
# The complete path where the output should reside
output_base_path = os.path.join(output_dir, *test_folders)
output_base_path = os.path.join(output_dir, test_folder)

# Load the previous results
with open(os.path.join(output_dir, '.previous_test_results.json')) as f:
results = json.load(f)
test_results = results['tests']
# We should only have one test spec
self.assertEqual(1, len(test_results))
# The test spec should be in the results
self.assertIn(test_base_path, test_results)
test_spec_results = test_results[test_base_path]
# The test folder should be in the results
test_spec_results = test_results[test_folder]
# The number of tests in the test spec should be the number provided
self.assertEqual(len(tests), len(test_spec_results))
self.assertEqual(len(tests), len(test_spec_results['tests']))
# Test spec should match
self.assertEqual(os.path.join(test_base_path, spec_file), test_spec_results['spec_file'])

for test in tests:
# The test name should be in the test spec results
test_name_short = f'{"/".join(test_folders)}.{test}'
self.assertIn(test_name_short, test_spec_results)
test_results = test_spec_results[test_name_short]
# Test should be in the results
test_results = test_spec_results['tests'][test]
# Get the output files from the test spec
result_output_files = test_results['output_files']
# Make sure each output file exists and is set in the results file
Expand All @@ -59,8 +56,8 @@ def testWriteAll(self):
with tempfile.TemporaryDirectory() as output_dir:
with self.assertRaises(subprocess.CalledProcessError):
self.runTests('--no-color', '-i', 'diffs', '--sep-files', '-o', output_dir, tmp_output=False)
self.checkFilesExist(output_dir, ['csvdiff', 'exodiff'], ['runner_run', 'tester'])
self.checkFilesExist(output_dir, ['csvdiff', 'exodiff'], ['runner_run', 'tester'], 'diffs')

with tempfile.TemporaryDirectory() as output_dir:
self.runTests('--no-color', '-i', 'always_ok', '--sep-files', '-o', output_dir, tmp_output=False)
self.checkFilesExist(output_dir, ['always_ok'], ['runner_run'])
self.checkFilesExist(output_dir, ['always_ok'], ['runner_run'], 'always_ok')

0 comments on commit 7c0efca

Please sign in to comment.