Skip to content

Commit

Permalink
uibenchjanktests: Rework to allow listing subtests
Browse files Browse the repository at this point in the history
Rework the uibenchjanktests workload to allow specifying a list of
subtests. The activity will be re-launched for each provided subtest. If
none are specified, all available tests will be run in alphabetical order.

The workload output will now include metrics with their respective test
names as classifiers.

Add a 'full' parameter to revert back to the old default 'full run'
behaviour with restarts between subtests.

Signed-off-by: Kajetan Puchalski <[email protected]>
  • Loading branch information
mrkajetanp authored and marcbonnici committed Jun 30, 2023
1 parent 951eec9 commit bf72a57
Showing 1 changed file with 78 additions and 32 deletions.
110 changes: 78 additions & 32 deletions wa/workloads/uibenchjanktests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,21 @@
#
import re

from wa import Parameter, ApkWorkload, PackageHandler, TestPackageHandler
from wa import Parameter, ApkWorkload, PackageHandler, TestPackageHandler, ConfigError
from wa.utils.types import list_or_string
from wa.framework.exception import WorkloadError


class Uibenchjanktests(ApkWorkload):

name = 'uibenchjanktests'
description = """
Runs a particular test of the UIBench JankTests_ test suite. The suite
is provided by Google as an automated version of the UIBench testbench
for the Android UI.
Runs a particular test (or list of tests) of the UIBench JankTests_
test suite. The suite is provided by Google as an automated version
of the UIBench testbench for the Android UI.
The workload supports running the default set of tests without
restarting the app or running an arbitrary set of tests with
restarting the app in between each test.
.. _JankTests: https://android.googlesource.com/platform/platform_testing/+/master/tests/jank/uibench/src/com/android/uibench/janktests
"""
Expand All @@ -37,15 +42,26 @@ class Uibenchjanktests(ApkWorkload):
r'INSTRUMENTATION_STATUS: (?P<name>[\w-]+)=(?P<value>[-+\d.]+)')

parameters = [
Parameter('test', kind=str,
description='Test to be run. Defaults to full run.'),
Parameter('tests', kind=list_or_string,
description="""
Tests to be run. Defaults to running every available
subtest in alphabetical order. The app will be restarted
for each subtest, unlike when using full=True.
""", default=None, aliases=['test']),
Parameter('full', kind=bool, default=False,
description="""
Runs the full suite of tests that the app defaults to
when no subtests are specified. The actual tests and their
order might depend on the version of the app. The subtests
will be run back to back without restarting the app in between.
"""),
Parameter('wait', kind=bool, default=True,
description='Forces am instrument to wait until the '
'instrumentation terminates before terminating itself. The '
'net effect is to keep the shell open until the tests have '
'finished. This flag is not required, but if you do not use '
'it, you will not see the results of your tests.'),
Parameter('raw', kind=bool, default=False,
Parameter('raw', kind=bool, default=True,
description='Outputs results in raw format. Use this flag '
'when you want to collect performance measurements, so that '
'they are not formatted as test results. This flag is '
Expand Down Expand Up @@ -92,40 +108,70 @@ def __init__(self, target, **kwargs):
instrument_wait=self.wait,
no_hidden_api_checks=self.no_hidden_api_checks)

def validate(self):
if self.full and self.tests is not None:
raise ConfigError("Can't select subtests while 'full' is True")

def initialize(self, context):
super(Uibenchjanktests, self).initialize(context)
self.dut_apk.initialize(context)
self.dut_apk.initialize_package(context)
if 'class' not in self.apk.args:
class_for_method = dict(self.apk.apk_info.methods)
class_for_method[None] = self._DEFAULT_CLASS
try:
method = class_for_method[self.test]
except KeyError as e:
msg = 'Unknown test "{}". Known tests:\n\t{}'
known_tests = '\n\t'.join(
m for m in class_for_method.keys()
if m is not None and m.startswith('test'))
raise ValueError(msg.format(e, known_tests))
klass = '{}.{}'.format(self.package_names[0], method)

if self.test:
klass += '#{}'.format(self.test)
self.apk.args['class'] = klass

self.output = {}

# Full run specified, don't select subtests
if self.full:
self.apk.args['class'] = '{}.{}'.format(
self.package_names[0], self._DEFAULT_CLASS
)
return

self.available_tests = {
test: cl for test, cl in self.apk.apk_info.methods
if test.startswith('test')
}

# default to running all tests in alphabetical order
# pylint: disable=access-member-before-definition
if not self.tests:
self.tests = sorted(self.available_tests.keys())
# raise error if any of the tests are not available
elif any([t not in self.available_tests for t in self.tests]):
msg = 'Unknown test(s) specified. Known tests: {}'
known_tests = '\n'.join(self.available_tests.keys())
raise ValueError(msg.format(known_tests))

def run(self, context):
self.apk.start_activity()
self.apk.wait_instrument_over()
# Full run, just run the activity directly
if self.full:
self.apk.start_activity()
self.apk.wait_instrument_over()
self.output['full'] = self.apk.instrument_output
return

for test in self.tests:
self.apk.args['class'] = '{}.{}#{}'.format(
self.package_names[0],
self.available_tests[test], test
)
self.apk.setup(context)
self.apk.start_activity()
try:
self.apk.wait_instrument_over()
except WorkloadError as e:
self.logger.warning(str(e))
self.output[test] = self.apk.instrument_output

def update_output(self, context):
super(Uibenchjanktests, self).update_output(context)
output = self.apk.instrument_output
for section in self._OUTPUT_SECTION_REGEX.finditer(output):
if int(section.group('code')) != -1:
msg = 'Run failed (INSTRUMENTATION_STATUS_CODE: {}). See log.'
raise RuntimeError(msg.format(section.group('code')))
for metric in self._OUTPUT_GFXINFO_REGEX.finditer(section.group()):
context.add_metric(metric.group('name'), metric.group('value'))
for test, test_output in self.output.items():
for section in self._OUTPUT_SECTION_REGEX.finditer(test_output):
if int(section.group('code')) != -1:
msg = 'Run failed (INSTRUMENTATION_STATUS_CODE: {}). See log.'
raise RuntimeError(msg.format(section.group('code')))
for metric in self._OUTPUT_GFXINFO_REGEX.finditer(section.group()):
context.add_metric(metric.group('name'), metric.group('value'),
classifiers={'test_name': test})

def teardown(self, context):
super(Uibenchjanktests, self).teardown(context)
Expand Down

0 comments on commit bf72a57

Please sign in to comment.