xref: /aosp_15_r20/art/test/utils/regen-test-files (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1#! /usr/bin/env python3
2#
3# Copyright 2020 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17# Regenerate some ART test related files.
18
19# This script handles only a subset of ART run-tests at the moment; additional
20# cases will be added later.
21
22import argparse
23import copy
24import collections
25import itertools
26import json
27import logging
28import os
29import re
30import sys
31import textwrap
32import xml.dom.minidom
33
34logging.basicConfig(format='%(levelname)s: %(message)s')
35
36ME = os.path.basename(sys.argv[0])
37
38# Common advisory placed at the top of all generated files.
39ADVISORY = f"Generated by `{ME}`. Do not edit manually."
40
41# Default indentation unit.
42INDENT = "  "
43
44# Indentation unit for XML files.
45XML_INDENT = "    "
46
47def reindent(str, indent = ""):
48  """Reindent literal string while removing common leading spaces."""
49  return textwrap.indent(textwrap.dedent(str), indent)
50
51def copyright_header_text(year):
52  """Return the copyright header text used in XML files."""
53  return reindent(f"""\
54    Copyright (C) {year} The Android Open Source Project
55
56        Licensed under the Apache License, Version 2.0 (the "License");
57        you may not use this file except in compliance with the License.
58        You may obtain a copy of the License at
59
60             http://www.apache.org/licenses/LICENSE-2.0
61
62        Unless required by applicable law or agreed to in writing, software
63        distributed under the License is distributed on an "AS IS" BASIS,
64        WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
65        See the License for the specific language governing permissions and
66        limitations under the License.
67    """, " ")
68
69def split_list(l, n):
70  """Return a list of `n` sublists of (contiguous) elements of list `l`."""
71  assert n > 0
72  (d, m) = divmod(len(l), n)
73  # If the length of `l` is divisible by `n`, use that that divisor (`d`) as size of each sublist;
74  # otherwise, the next integer value (`d + 1`).
75  s = d if m == 0 else d + 1
76  result = [l[i:i + s] for i in range(0, len(l), s)]
77  assert len(result) == n
78  return result
79
80# The prefix used in the Soong module name of all ART run-tests.
81ART_RUN_TEST_MODULE_NAME_PREFIX = "art-run-test-"
82
83# Number of shards used to declare ART run-tests in the sharded ART MTS test plan.
84NUM_MTS_ART_RUN_TEST_SHARDS = 1
85
86# Name of the ART MTS test list containing "eng-only" test modules,
87# which require a device-under-test running a `userdebug` or `eng`
88# build.
89ENG_ONLY_TEST_LIST_NAME = "mts-art-tests-list-eng-only"
90
91# Name of Lint baseline filename used in certain ART run-tests,
92# e.g. for the `NewApi` check (see e.g. b/268261262).
93LINT_BASELINE_FILENAME = "lint-baseline.xml"
94
95# Curated list of tests that have a custom `run` script, but that are
96# known to work fine with the default test execution strategy (i.e.
97# when ignoring their `run` script), even if not exactly as they would
98# with the original ART run-test harness.
99runnable_test_exceptions = frozenset([
100  "055-enum-performance",
101  "059-finalizer-throw",
102  "080-oom-throw",
103  "133-static-invoke-super",
104  "159-app-image-fields",
105  "160-read-barrier-stress",
106  "163-app-image-methods",
107  "165-lock-owner-proxy",
108  "168-vmstack-annotated",
109  "176-app-image-string",
110  "304-method-tracing",
111  "628-vdex",
112  "643-checker-bogus-ic",
113  "676-proxy-jit-at-first-use",
114  "677-fsi2",
115  "678-quickening",
116  "818-clinit-nterp",
117  "821-madvise-willneed",
118  "1004-checker-volatile-ref-load",
119  "1338-gc-no-los",
120])
121
122# Known slow tests, for which the timeout value is raised.
123known_slow_tests = frozenset([
124  "080-oom-throw",
125  "099-vmdebug",
126  "109-suspend-check",
127  "175-alloc-big-bignums",
128])
129
130# Known failing ART run-tests.
131# TODO(rpl): Investigate and address the causes of failures.
132known_failing_tests = frozenset([
133  "004-SignalTest",
134  "004-UnsafeTest",
135  "051-thread",
136  "086-null-super",
137  "087-gc-after-link",
138  "136-daemon-jni-shutdown",
139  "139-register-natives",
140  "148-multithread-gc-annotations",
141  "149-suspend-all-stress",
142  "150-loadlibrary",
143  "154-gc-loop",
144  "169-threadgroup-jni",
145  "177-visibly-initialized-deadlock",
146  "179-nonvirtual-jni",
147  "203-multi-checkpoint",
148  "305-other-fault-handler",
149  # 449-checker-bce: Dependency on `libarttest`.
150  "449-checker-bce",
151  "454-get-vreg",
152  "461-get-reference-vreg",
153  "466-get-live-vreg",
154  "497-inlining-and-class-loader",
155  "530-regression-lse",
156  "555-UnsafeGetLong-regression",
157  # 596-monitor-inflation: Dependency on `libarttest`.
158  "596-monitor-inflation",
159  "602-deoptimizeable",
160  "604-hot-static-interface",
161  "616-cha-native",
162  "616-cha-regression-proxy-method",
163  # 623-checker-loop-regressions: Dependency on `libarttest`.
164  "623-checker-loop-regressions",
165  "626-set-resolved-string",
166  "642-fp-callees",
167  "647-jni-get-field-id",
168  "655-jit-clinit",
169  "656-loop-deopt",
170  "664-aget-verifier",
171  # 680-checker-deopt-dex-pc-0: Dependency on `libarttest`.
172  "680-checker-deopt-dex-pc-0",
173  "685-deoptimizeable",
174  "687-deopt",
175  "693-vdex-inmem-loader-evict",
176  "708-jit-cache-churn",
177  # 716-jli-jit-samples: Dependency on `libarttest`.
178  "716-jli-jit-samples",
179  "717-integer-value-of",
180  "720-thread-priority",
181  # 730-cha-deopt: Fails with:
182  #
183  #   Test command execution failed with status FAILED: CommandResult: exit code=1, out=, err=Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: length=0; index=0
184  #           at Main.main(Main.java:24)
185  #
186  "730-cha-deopt",
187  # 813-fp-args: Dependency on `libarttest`.
188  "813-fp-args",
189  # 821-many-args: Fails with:
190  #
191  #   Test command execution failed with status FAILED: CommandResult: exit code=1, out=, err=Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: length=0; index=0
192  #           at Main.main(Main.java:20)
193  #
194  "821-many-args",
195  # 823-cha-inlining: Dependency on `libarttest`.
196  "823-cha-inlining",
197  # 826-infinite-loop: The test expects an argument passed to `Main.main` (the test library,
198  # usually `arttestd` or `arttest)`, but the ART run-test TradeFed test runner
199  # (`com.android.tradefed.testtype.ArtRunTest`) does not implement this yet.
200  "826-infinite-loop",
201  # 832-cha-recursive: Dependency on `libarttest`.
202  "832-cha-recursive",
203  # 837-deopt: Dependency on `libarttest`.
204  "837-deopt",
205  # 844-exception: Dependency on `libarttest`.
206  "844-exception",
207  # 844-exception2: Dependency on `libarttest`.
208  "844-exception2",
209  # 966-default-conflict: Dependency on `libarttest`.
210  "966-default-conflict",
211  # These tests need native code.
212  "993-breakpoints-non-debuggable",
213  # 1002-notify-startup: Dependency on `libarttest` + custom `check` script.
214  "1002-notify-startup",
215  "1337-gc-coverage",
216  "1339-dead-reference-safe",
217  "1945-proxy-method-arguments",
218  "2011-stack-walk-concurrent-instrument",
219  "2033-shutdown-mechanics",
220  "2036-jni-filechannel",
221  "2037-thread-name-inherit",
222  # 2040-huge-native-alloc: Fails with:
223  #
224  #   Test command execution failed with status FAILED: CommandResult: exit code=1, out=, err=Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: length=0; index=0
225  #           at Main.main(Main.java:56)
226  #
227  "2040-huge-native-alloc",
228  "2235-JdkUnsafeTest",
229  "2243-single-step-default",
230  "2262-miranda-methods",
231  "2262-default-conflict-methods",
232  # 2275-pthread-name: Dependency on `libarttest`.
233  "2275-pthread-name",
234])
235
236# These ART run-tests are new and have not had enough post-submit runs
237# to meet pre-submit SLOs. Monitor their post-submit runs before
238# removing them from this set (in order to promote them to
239# presubmits).
240postsubmit_only_tests = frozenset([
241])
242
243known_failing_on_hwasan_tests = frozenset([
244  "CtsJdwpTestCases", # times out
245  # apexd fails to unmount com.android.runtime on ASan builds.
246  "art_standalone_dexopt_chroot_setup_tests",
247])
248
249# ART gtests that do not need root access to the device.
250art_gtest_user_module_names = [
251    "art_libnativebridge_cts_tests",
252    "art_standalone_artd_tests",
253    "art_standalone_cmdline_tests",
254    "art_standalone_compiler_tests",
255    "art_standalone_dex2oat_cts_tests",
256    "art_standalone_dex2oat_tests",
257    "art_standalone_dexdump_tests",
258    "art_standalone_dexlist_tests",
259    "art_standalone_libartbase_tests",
260    "art_standalone_libartpalette_tests",
261    "art_standalone_libartservice_tests",
262    "art_standalone_libarttools_tests",
263    "art_standalone_libdexfile_support_tests",
264    "art_standalone_libdexfile_tests",
265    "art_standalone_libprofile_tests",
266    "art_standalone_oatdump_tests",
267    "art_standalone_odrefresh_tests",
268    "art_standalone_runtime_tests",
269    "art_standalone_sigchain_tests",
270    "libnativebridge-lazy-tests",
271    "libnativebridge-tests",
272    "libnativeloader_test",
273]
274
275# ART gtests that need root access to the device.
276art_gtest_eng_only_module_names = [
277    "art_standalone_dexopt_chroot_setup_tests",
278    "art_standalone_dexoptanalyzer_tests",
279    "art_standalone_profman_tests",
280    "libnativeloader_e2e_tests",
281]
282
283# All supported ART gtests.
284art_gtest_module_names = sorted(art_gtest_user_module_names + art_gtest_eng_only_module_names)
285
286# These ART gtests are new and have not had enough post-submit runs
287# to meet pre-submit SLOs. Monitor their post-submit runs before
288# removing them from this set (in order to promote them to
289# presubmits).
290art_gtest_postsubmit_only_module_names = [
291]
292
293# ART gtests not supported in MTS.
294art_gtest_modules_excluded_from_mts = [
295    # TODO(b/347717488): Consider adding this test to ART MTS.
296    "libnativebridge-tests",
297]
298
299# ART gtests supported in MTS that do not need root access to the device.
300art_gtest_mts_user_module_names = [t for t in art_gtest_user_module_names
301                                   if t not in art_gtest_modules_excluded_from_mts]
302
303# ART gtests supported in presubmits.
304art_gtest_presubmit_module_names = [t for t in art_gtest_module_names
305                                    if t not in art_gtest_postsubmit_only_module_names]
306
307# ART gtests supported in Mainline presubmits.
308art_gtest_mainline_presubmit_module_names = copy.copy(art_gtest_presubmit_module_names)
309
310# ART gtests supported in postsubmits.
311unknown_art_gtest_postsubmit_only_module_names = [t for t in art_gtest_postsubmit_only_module_names
312                                                  if t not in art_gtest_module_names]
313if unknown_art_gtest_postsubmit_only_module_names:
314  logging.error(textwrap.dedent("""\
315  The following `art_gtest_postsubmit_only_module_names` elements are not part of
316  `art_gtest_module_names`: """) + str(unknown_art_gtest_postsubmit_only_module_names))
317  sys.exit(1)
318art_gtest_postsubmit_module_names = copy.copy(art_gtest_postsubmit_only_module_names)
319
320# Tests exhibiting a flaky behavior, currently exluded from MTS for
321# the stake of stability / confidence (b/209958457).
322flaky_tests_excluded_from_mts = {
323    "CtsLibcoreFileIOTestCases": [
324        ("android.cts.FileChannelInterProcessLockTest#" + m) for m in [
325         "test_lockJJZ_Exclusive_asyncChannel",
326         "test_lockJJZ_Exclusive_syncChannel",
327         "test_lock_differentChannelTypes",
328         "test_lockJJZ_Shared_asyncChannel",
329         "test_lockJJZ_Shared_syncChannel",
330        ]
331    ],
332    "CtsLibcoreTestCases": [
333        ("com.android.org.conscrypt.javax.net.ssl.SSLSocketVersionCompatibilityTest#" + m + c)
334        for (m, c) in itertools.product(
335            [
336                "test_SSLSocket_interrupt_read_withoutAutoClose",
337                "test_SSLSocket_setSoWriteTimeout",
338            ],
339            [
340                "[0: TLSv1.2 client, TLSv1.2 server]",
341                "[1: TLSv1.2 client, TLSv1.3 server]",
342                "[2: TLSv1.3 client, TLSv1.2 server]",
343                "[3: TLSv1.3 client, TLSv1.3 server]",
344            ]
345        )
346    ] + [
347        ("libcore.dalvik.system.DelegateLastClassLoaderTest#" + m) for m in [
348            "testLookupOrderNodelegate_getResource",
349            "testLookupOrder_getResource",
350        ]
351    ]
352}
353
354# Tests excluded from all test mapping test groups.
355#
356# Example of admissible values in this dictionary:
357#
358#   "art_standalone_cmdline_tests": ["CmdlineParserTest#TestCompilerOption"],
359#   "art_standalone_dexopt_chroot_setup_tests": ["DexoptChrootSetupTest#HelloWorld"],
360#
361failing_tests_excluded_from_test_mapping = {
362  # Empty.
363}
364
365# Tests failing because of linking issues, currently exluded from MTS
366# and Mainline Presubmits to minimize noise in continuous runs while
367# we investigate.
368#
369# Example of admissible values in this dictionary: same as for
370# `failing_tests_excluded_from_test_mapping` (see above).
371#
372# TODO(b/247108425): Address the linking issues and re-enable these
373# tests.
374failing_tests_excluded_from_mts_and_mainline_presubmits = {
375    "art_standalone_compiler_tests": ["JniCompilerTest*"],
376    "art_standalone_libartpalette_tests": ["PaletteClientJniTest*"],
377}
378
379failing_tests_excluded_from_mainline_presubmits = (
380  failing_tests_excluded_from_test_mapping |
381  failing_tests_excluded_from_mts_and_mainline_presubmits
382)
383
384# Is `run_test` a Checker test (i.e. a test containing Checker
385# assertions)?
386def is_checker_test(run_test):
387  return re.match("^[0-9]+-checker-", run_test)
388
389def gen_mts_test_list_file(tests, test_list_file, copyright_year, configuration_description,
390                           tests_description, comments = []):
391  """Generate an ART MTS test list file."""
392  root = xml.dom.minidom.Document()
393
394  advisory_header = root.createComment(f" {ADVISORY} ")
395  root.appendChild(advisory_header)
396  copyright_header = root.createComment(copyright_header_text(copyright_year))
397  root.appendChild(copyright_header)
398
399  configuration = root.createElement("configuration")
400  root.appendChild(configuration)
401  configuration.setAttribute("description", configuration_description)
402
403  def append_option(name, value):
404    option = root.createElement("option")
405    option.setAttribute("name", name)
406    option.setAttribute("value", value)
407    configuration.appendChild(option)
408
409  def append_comment(comment):
410    xml_comment = root.createComment(f" {comment} ")
411    configuration.appendChild(xml_comment)
412
413  # Test declarations.
414  # ------------------
415
416  test_declarations_comments = [tests_description + "."]
417  test_declarations_comments.extend(comments)
418  for c in test_declarations_comments:
419    append_comment(c)
420  for t in tests:
421    append_option("compatibility:include-filter", t)
422
423  # `MainlineTestModuleController` configurations.
424  # ----------------------------------------------
425
426  module_controller_configuration_comments = [
427      f"Enable MainlineTestModuleController for {tests_description}."]
428  module_controller_configuration_comments.extend(comments)
429  for c in module_controller_configuration_comments:
430    append_comment(c)
431  for t in tests:
432    append_option("compatibility:module-arg", f"{t}:enable:true")
433  for t in tests:
434    if t in ["CtsLibcoreTestCases", "CtsLibcoreOjTestCases"]:
435      append_comment("core-test-mode=mts tells ExpectationBasedFilter to exclude @NonMts Tests")
436      append_option("compatibility:module-arg", f"{t}:instrumentation-arg:core-test-mode:=mts")
437
438  xml_str = root.toprettyxml(indent = XML_INDENT, encoding = "utf-8")
439
440  with open(test_list_file, "wb") as f:
441    logging.debug(f"Writing `{test_list_file}`.")
442    f.write(xml_str)
443
444class Generator:
445  def __init__(self, top_dir):
446    """Generator of ART test files for an Android source tree anchored at `top_dir`."""
447    # Path to the Android top source tree.
448    self.top_dir = top_dir
449    # Path to the ART directory
450    self.art_dir = os.path.join(top_dir, "art")
451    # Path to the ART tests top-level directory.
452    self.art_test_dir = os.path.join(self.art_dir, "test")
453    # Path to the MTS configuration directory.
454    self.mts_config_dir = os.path.join(
455        top_dir, "test", "mts", "tools", "mts-tradefed", "res", "config")
456    # Path to the ART JVM TI CTS tests top-level directory.
457    self.jvmti_cts_test_dir = os.path.join(top_dir, "cts/hostsidetests/jvmti/run-tests")
458
459  # Return the list of ART run-tests (in short form, i.e. `001-HelloWorld`,
460  # not `art-run-test-001-HelloWorld`).
461  def enumerate_run_tests(self):
462    return sorted([run_test
463                   for run_test in os.listdir(self.art_test_dir)
464                   if re.match("^[0-9]{3,}-", run_test)])
465
466  # Return the list of ART JVM TI CTS tests.
467  def enumerate_jvmti_cts_tests(self):
468    return sorted([re.sub(r"test-(\d+)", r"CtsJvmtiRunTest\1HostTestCases", cts_jvmti_test_dir)
469                   for cts_jvmti_test_dir in os.listdir(self.jvmti_cts_test_dir)
470                   if re.match(r"^test-\d+$", cts_jvmti_test_dir)])
471
472  # Return the metadata of a test, if any.
473  def get_test_metadata(self, run_test):
474    run_test_path = os.path.join(self.art_test_dir, run_test)
475    metadata_file = os.path.join(run_test_path, "test-metadata.json")
476    metadata = {}
477    if os.path.exists(metadata_file):
478      with open(metadata_file, "r") as f:
479        try:
480          metadata = json.load(f)
481        except json.decoder.JSONDecodeError:
482          logging.error(f"Unable to parse test metadata file `{metadata_file}`")
483          raise
484    return metadata
485
486  # Can the build script of `run_test` be safely ignored?
487  def can_ignore_build_script(self, run_test):
488    # Check whether there are test metadata with build parameters
489    # enabling us to safely ignore the build script.
490    metadata = self.get_test_metadata(run_test)
491    build_param = metadata.get("build-param", {})
492    # Ignore build scripts that are just about preventing building for
493    # the JVM and/or using VarHandles (Soong builds JARs with
494    # VarHandle support by default (i.e. by using an API level greater
495    # or equal to 28), so we can ignore build scripts that just
496    # request support for this feature.)
497    experimental_var_handles = {"experimental": "var-handles"}
498    jvm_supported_false = {"jvm-supported": "false"}
499    if (build_param == experimental_var_handles or
500        build_param == jvm_supported_false or
501        build_param == experimental_var_handles | jvm_supported_false):
502      return True
503    return False
504
505  # Can `run_test` be built with Soong?
506  # TODO(b/147814778): Add build support for more tests.
507  def is_soong_buildable(self, run_test):
508    run_test_path = os.path.join(self.art_test_dir, run_test)
509
510    # Skip tests with non-default build rules, unless these build
511    # rules can be safely ignored.
512    if (os.path.isfile(os.path.join(run_test_path, "generate-sources")) or
513        os.path.isfile(os.path.join(run_test_path, "javac_post.sh"))):
514      return False
515    if os.path.isfile(os.path.join(run_test_path, "build.py")):
516      if not self.can_ignore_build_script(run_test):
517        return False
518    # Skip tests with sources outside the `src` directory.
519    for subdir in ["jasmin",
520                   "jasmin-multidex",
521                   "smali",
522                   "smali-ex",
523                   "smali-multidex",
524                   "src-aotex",
525                   "src-bcpex",
526                   "src-ex",
527                   "src-ex2",
528                   "src-multidex"]:
529      if os.path.isdir(os.path.join(run_test_path, subdir)):
530        return False
531    # Skip tests that have both an `src` directory and an `src-art` directory.
532    if os.path.isdir(os.path.join(run_test_path, "src")) and \
533       os.path.isdir(os.path.join(run_test_path, "src-art")):
534        return False
535    # Skip tests that have neither an `src` directory nor an `src-art` directory.
536    if not os.path.isdir(os.path.join(run_test_path, "src")) and \
537       not os.path.isdir(os.path.join(run_test_path, "src-art")):
538      return False
539    # Skip test with a copy of `sun.misc.Unsafe`.
540    if os.path.isfile(os.path.join(run_test_path, "src", "sun", "misc", "Unsafe.java")):
541      return False
542    # Skip tests with Hidden API specs.
543    if os.path.isfile(os.path.join(run_test_path, "hiddenapi-flags.csv")):
544      return False
545    # All other tests are considered buildable.
546    return True
547
548  # Can the run script of `run_test` be safely ignored?
549  def can_ignore_run_script(self, run_test):
550    # Unconditionally consider some identified tests that have a
551    # (not-yet-handled) custom `run` script as runnable.
552    #
553    # TODO(rpl): Get rid of this exception mechanism by supporting
554    # these tests' `run` scripts properly.
555    if run_test in runnable_test_exceptions:
556      return True
557    # Check whether there are test metadata with run parameters
558    # enabling us to safely ignore the run script.
559    metadata = self.get_test_metadata(run_test)
560    run_param = metadata.get("run-param", {})
561    if run_param.get("default-run", ""):
562      return True
563    return False
564
565  # Generate a Blueprint property group as a string, i.e. something looking like
566  # this:
567  #
568  #   ```
569  #       <group_name>: {
570  #         <key0>: "<value0>",
571  #         ...
572  #         <keyN>: "<valueN>",
573  #       }
574  #   ```
575  #
576  # where `(key0, value0), ..., (keyN, valueN)` are key-value pairs in `props`.
577  def gen_prop_group(self, group_name, props):
578    props_joined = """,
579            """.join([f"{k}: \"{v}\"" for (k, v) in props.items()])
580    return f"""
581          {group_name}: {{
582              {props_joined},
583          }},"""
584
585  def gen_libs_list_impl(self, library_type, libraries):
586    if len(libraries) == 0:
587      return ""
588    libraries_joined = """,
589              """.join(libraries)
590    return f"""
591          {library_type}: [
592              {libraries_joined},
593          ],"""
594
595  def gen_libs_list(self, libraries):
596    return self.gen_libs_list_impl("libs", libraries);
597
598  def gen_static_libs_list(self, libraries):
599    return self.gen_libs_list_impl("static_libs", libraries);
600
601  def gen_java_library_rule(self, name, src_dir, libraries, extra_props):
602    return f"""\
603
604
605      // Library with {src_dir}/ sources for the test.
606      java_library {{
607          name: "{name}",
608          defaults: ["art-run-test-defaults"],{self.gen_libs_list(libraries)}
609          srcs: ["{src_dir}/**/*.java"],{extra_props}
610      }}"""
611
612  # Can `run_test` be succesfully run with TradeFed?
613  # TODO(b/147812905): Add run-time support for more tests.
614  def is_tradefed_runnable(self, run_test):
615    run_test_path = os.path.join(self.art_test_dir, run_test)
616
617    # Skip tests with non-default run rules, unless these run rules
618    # can be safely ignored.
619    if os.path.isfile(os.path.join(run_test_path, "run.py")):
620      if not self.can_ignore_run_script(run_test):
621        return False
622    # Skip tests known to fail.
623    if run_test in known_failing_tests:
624      return False
625    # All other tests are considered runnable.
626    return True
627
628  def is_slow(self, run_test):
629    return run_test in known_slow_tests
630
631  def regen_bp_files(self, run_tests, buildable_tests):
632    for run_test in run_tests:
633      # Remove any previously generated file.
634      bp_file = os.path.join(self.art_test_dir, run_test, "Android.bp")
635      if os.path.exists(bp_file):
636        logging.debug(f"Removing `{bp_file}`.")
637        os.remove(bp_file)
638
639    for run_test in buildable_tests:
640      self.regen_bp_file(run_test)
641
642  def regen_bp_file(self, run_test):
643    """Regenerate Blueprint file for an ART run-test."""
644
645    run_test_path = os.path.join(self.art_test_dir, run_test)
646    bp_file = os.path.join(run_test_path, "Android.bp")
647
648    # Optional test metadata (JSON file).
649    metadata = self.get_test_metadata(run_test)
650    test_suites = metadata.get("test_suites", [])
651    is_cts_test = "cts" in test_suites
652    is_mcts_test = "mcts-art" in test_suites
653
654    # For now we make it mandatory for an ART CTS test to be an ART
655    # MCTS test and vice versa.
656    if is_cts_test != is_mcts_test:
657      (present, absent) = ("mts", "mcts-art") if is_cts_test else ("mcts-art", "mts")
658      logging.error(f"Inconsistent test suites state in metadata for ART run-test `{run_test}`: " +
659                    f"`test_suites` contains `{present}` but not `{absent}`")
660      sys.exit(1)
661
662    # Do not package non-runnable ART run-tests in ART MTS (see b/363075236).
663    if self.is_tradefed_runnable(run_test):
664      test_suites.append("mts-art")
665
666    run_test_module_name = ART_RUN_TEST_MODULE_NAME_PREFIX + run_test
667
668    # Set the test configuration template.
669    if self.is_tradefed_runnable(run_test):
670      if is_cts_test:
671        test_config_template = "art-run-test-target-cts-template"
672      elif self.is_slow(run_test):
673        test_config_template = "art-run-test-target-slow-template"
674      else:
675        test_config_template = "art-run-test-target-template"
676    else:
677      test_config_template = "art-run-test-target-no-test-suite-tag-template"
678
679    # Define the `test_suites` property, if test suites are present in
680    # the test's metadata.
681    test_suites_prop = ""
682    if test_suites:
683      test_suites_joined = """,
684              """.join([f"\"{s}\"" for s in test_suites])
685      test_suites_prop = f"""\
686
687          test_suites: [
688              {test_suites_joined},
689          ],"""
690
691    include_srcs_prop = ""
692    if is_checker_test(run_test):
693      include_srcs_prop = """\
694
695          // Include the Java source files in the test's artifacts, to make Checker assertions
696          // available to the TradeFed test runner.
697          include_srcs: true,"""
698
699    # Set the version of the SDK to compile the Java test module
700    # against, if needed.
701    sdk_version_prop = ""
702    if is_cts_test:
703      # Have CTS and MCTS test modules use the test API
704      # (`test_current`) so that they do not depend on the framework
705      # private platform API (`private`), which is the default.
706      sdk_version_prop = """
707          sdk_version: "test_current","""
708
709    # The default source directory is `src`, except if `src-art` exists.
710    if os.path.isdir(os.path.join(run_test_path, "src-art")):
711      source_dir = "src-art"
712    else:
713      source_dir = "src"
714
715    src_library_rules = []
716    test_libraries = []
717    extra_props = ""
718    # Honor the Lint baseline file, if present.
719    if os.path.isfile(os.path.join(run_test_path, LINT_BASELINE_FILENAME)):
720      extra_props += self.gen_prop_group("lint", {"baseline_filename": LINT_BASELINE_FILENAME})
721    if os.path.isdir(os.path.join(run_test_path, "src2")):
722      test_library = f"{run_test_module_name}-{source_dir}"
723      src_library_rules.append(
724          self.gen_java_library_rule(test_library, source_dir, test_libraries, extra_props))
725      test_libraries.append(f"\"{test_library}\"")
726      source_dir = "src2"
727
728    with open(bp_file, "w") as f:
729      logging.debug(f"Writing `{bp_file}`.")
730      f.write(textwrap.dedent(f"""\
731      // {ADVISORY}
732
733      // Build rules for ART run-test `{run_test}`.
734
735      package {{
736          // See: http://go/android-license-faq
737          // A large-scale-change added 'default_applicable_licenses' to import
738          // all of the 'license_kinds' from "art_license"
739          // to get the below license kinds:
740          //   SPDX-license-identifier-Apache-2.0
741          default_applicable_licenses: ["art_license"],
742      }}{''.join(src_library_rules)}
743
744      // Test's Dex code.
745      java_test {{
746          name: "{run_test_module_name}",
747          defaults: ["art-run-test-defaults"],
748          test_config_template: ":{test_config_template}",
749          srcs: ["{source_dir}/**/*.java"],{self.gen_static_libs_list(test_libraries)}
750          data: [
751              ":{run_test_module_name}-expected-stdout",
752              ":{run_test_module_name}-expected-stderr",
753          ],{test_suites_prop}{include_srcs_prop}{sdk_version_prop}
754      }}
755      """))
756
757      def add_expected_output_genrule(type_str):
758        type_str_long = "standard output" if type_str == "stdout" else "standard error"
759        in_file = os.path.join(run_test_path, f"expected-{type_str}.txt")
760        if os.path.islink(in_file):
761          # Genrules are sandboxed, so if we just added the symlink to the srcs list, it would
762          # be a dangling symlink in the sandbox. Instead, if we see a symlink, depend on the
763          # genrule from the test that the symlink is pointing to instead of the symlink itself.
764          link_target = os.readlink(in_file)
765          basename = os.path.basename(in_file)
766          match = re.fullmatch('\.\./([a-zA-Z0-9_-]+)/' + re.escape(basename), link_target)
767          if not match:
768            sys.exit(f"Error: expected symlink to be '../something/{basename}', got {link_target}")
769          f.write(textwrap.dedent(f"""\
770
771            // Test's expected {type_str_long}.
772            genrule {{
773                name: "{run_test_module_name}-expected-{type_str}",
774                out: ["{run_test_module_name}-expected-{type_str}.txt"],
775                srcs: [":{ART_RUN_TEST_MODULE_NAME_PREFIX}{match.group(1)}-expected-{type_str}"],
776                cmd: "cp -f $(in) $(out)",
777            }}
778          """))
779        else:
780          f.write(textwrap.dedent(f"""\
781
782            // Test's expected {type_str_long}.
783            genrule {{
784                name: "{run_test_module_name}-expected-{type_str}",
785                out: ["{run_test_module_name}-expected-{type_str}.txt"],
786                srcs: ["expected-{type_str}.txt"],
787                cmd: "cp -f $(in) $(out)",
788            }}
789          """))
790
791      add_expected_output_genrule("stdout")
792      add_expected_output_genrule("stderr")
793
794
795  def regen_test_mapping_file(self, art_run_tests):
796    """Regenerate ART's `TEST_MAPPING`."""
797
798    # See go/test-mapping#attributes and
799    # https://source.android.com/docs/core/tests/development/test-mapping
800    # for more information about Test Mapping test groups.
801
802    # ART run-tests used in `*presubmit` test groups, used both in pre- and post-submit runs.
803    presubmit_run_test_module_names = [ART_RUN_TEST_MODULE_NAME_PREFIX + t
804                                       for t in art_run_tests
805                                       if t not in postsubmit_only_tests]
806    # ART run-tests used in the `postsubmit` test group, used in post-submit runs only.
807    postsubmit_run_test_module_names = [ART_RUN_TEST_MODULE_NAME_PREFIX + t
808                                        for t in art_run_tests
809                                        if t in postsubmit_only_tests]
810
811    def gen_tests_dict(tests, excluded_test_cases = {}, excluded_test_modules = [], suffix = ""):
812      return [
813          ({"name": t + suffix,
814            "options": [
815                {"exclude-filter": e}
816                for e in excluded_test_cases[t]
817            ]}
818           if t in excluded_test_cases
819           else {"name": t + suffix})
820          for t in tests
821          if t not in excluded_test_modules
822      ]
823
824    # Mainline presubmits.
825    mainline_presubmit_apex_suffix = "[com.google.android.art.apex]"
826    mainline_other_presubmit_tests = []
827    mainline_presubmit_tests = (mainline_other_presubmit_tests + presubmit_run_test_module_names +
828                                art_gtest_mainline_presubmit_module_names)
829    mainline_presubmit_tests_dict = \
830      gen_tests_dict(mainline_presubmit_tests,
831                     failing_tests_excluded_from_mainline_presubmits,
832                     [],
833                     mainline_presubmit_apex_suffix)
834
835    # ART mainline presubmits tests without APEX suffix
836    art_mainline_presubmit_tests_dict = \
837        gen_tests_dict(mainline_presubmit_tests,
838                       failing_tests_excluded_from_mainline_presubmits,
839                       [],
840                       "")
841
842    # Android Virtualization Framework presubmits
843    avf_presubmit_tests = ["ComposHostTestCases"]
844    avf_presubmit_tests_dict = gen_tests_dict(avf_presubmit_tests,
845                                              failing_tests_excluded_from_test_mapping)
846
847    # Presubmits.
848    other_presubmit_tests = [
849        "ArtServiceTests",
850        "BootImageProfileTest",
851        "CtsJdwpTestCases",
852        "art-apex-update-rollback",
853        "art_standalone_dexpreopt_tests",
854    ]
855    presubmit_tests = (other_presubmit_tests + presubmit_run_test_module_names +
856                       art_gtest_presubmit_module_names)
857    presubmit_tests_dict = gen_tests_dict(presubmit_tests,
858                                          failing_tests_excluded_from_test_mapping)
859    hwasan_presubmit_tests_dict = gen_tests_dict(presubmit_tests,
860                                                 failing_tests_excluded_from_test_mapping,
861                                                 known_failing_on_hwasan_tests)
862
863    # Postsubmits.
864    postsubmit_tests = postsubmit_run_test_module_names + art_gtest_postsubmit_module_names
865    postsubmit_tests_dict = [{"name": t} for t in postsubmit_tests]
866    postsubmit_tests_dict = gen_tests_dict(postsubmit_tests,
867                                           failing_tests_excluded_from_test_mapping)
868
869    # Use an `OrderedDict` container to preserve the order in which items are inserted.
870    # Do not produce an entry for a test group if it is empty.
871    test_mapping_dict = collections.OrderedDict([
872        (test_group_name, test_group_dict)
873        for (test_group_name, test_group_dict)
874        in [
875            ("art-mainline-presubmit", art_mainline_presubmit_tests_dict),
876            ("mainline-presubmit", mainline_presubmit_tests_dict),
877            ("presubmit", presubmit_tests_dict),
878            ("hwasan-presubmit", hwasan_presubmit_tests_dict),
879            ("avf-presubmit", avf_presubmit_tests_dict),
880            ("postsubmit", postsubmit_tests_dict),
881        ]
882        if test_group_dict
883    ])
884    test_mapping_contents = json.dumps(test_mapping_dict, indent = INDENT)
885
886    test_mapping_file = os.path.join(self.art_dir, "TEST_MAPPING")
887    with open(test_mapping_file, "w") as f:
888      logging.debug(f"Writing `{test_mapping_file}`.")
889      f.write(f"// {ADVISORY}\n")
890      f.write(test_mapping_contents)
891      f.write("\n")
892
893  def create_mts_test_shard(self, tests_description, tests, shard_num, copyright_year,
894                            comments = []):
895    """Factory method instantiating an `MtsTestShard`."""
896    return self.MtsTestShard(self.mts_config_dir, tests_description, tests, shard_num,
897                             copyright_year, comments)
898
899  class MtsTestShard:
900    """Class encapsulating data and generation logic for an ART MTS test shard."""
901
902    def __init__(self, mts_config_dir, tests_description, tests, shard_num, copyright_year,
903                 comments):
904      self.mts_config_dir = mts_config_dir
905      self.tests_description = tests_description
906      self.tests = tests
907      self.shard_num = shard_num
908      self.copyright_year = copyright_year
909      self.comments = comments
910
911    def shard_id(self):
912      return f"{self.shard_num:02}"
913
914    def test_plan_name(self):
915      return "mts-art-shard-" + self.shard_id()
916
917    def test_list_name(self):
918      return "mts-art-tests-list-user-shard-" + self.shard_id()
919
920    def regen_test_plan_file(self):
921      """Regenerate ART MTS test plan file shard (`mts-art-shard-<shard_num>.xml`)."""
922      root = xml.dom.minidom.Document()
923
924      advisory_header = root.createComment(f" {ADVISORY} ")
925      root.appendChild(advisory_header)
926      copyright_header = root.createComment(copyright_header_text(self.copyright_year))
927      root.appendChild(copyright_header)
928
929      configuration = root.createElement("configuration")
930      root.appendChild(configuration)
931      configuration.setAttribute(
932          "description",
933          f"Run {self.test_plan_name()} from a preexisting MTS installation.")
934
935      # Included XML files.
936      included_xml_files = ["mts", self.test_list_name()]
937      # Special case for the test plan of shard 03 (ART gtests), where we also
938      # include ART MTS eng-only tests.
939      #
940      # TODO(rpl): Restucture the MTS generation logic to avoid special-casing
941      # at that level of the generator.
942      if self.shard_num == 3:
943        included_xml_files.append(ENG_ONLY_TEST_LIST_NAME)
944      for xml_file in included_xml_files:
945        include = root.createElement("include")
946        include.setAttribute("name", xml_file)
947        configuration.appendChild(include)
948
949      # Test plan name.
950      option = root.createElement("option")
951      option.setAttribute("name", "plan")
952      option.setAttribute("value", self.test_plan_name())
953      configuration.appendChild(option)
954
955      xml_str = root.toprettyxml(indent = XML_INDENT, encoding = "utf-8")
956
957      test_plan_file = os.path.join(self.mts_config_dir, self.test_plan_name() + ".xml")
958      with open(test_plan_file, "wb") as f:
959        logging.debug(f"Writing `{test_plan_file}`.")
960        f.write(xml_str)
961
962    def regen_test_list_file(self):
963      """Regenerate ART MTS test list file (`mts-art-tests-list-user-shard-<shard_num>.xml`)."""
964      configuration_description = \
965        f"List of ART MTS tests that do not need root access (shard {self.shard_id()})"
966      test_list_file = os.path.join(self.mts_config_dir, self.test_list_name() + ".xml")
967      gen_mts_test_list_file(self.tests, test_list_file, self.copyright_year,
968                             configuration_description, self.tests_description, self.comments)
969
970  def regen_mts_art_tests_list_user_file(self, num_mts_art_run_test_shards):
971    """Regenerate ART MTS test list file (`mts-art-tests-list-user.xml`)."""
972    root = xml.dom.minidom.Document()
973
974    advisory_header = root.createComment(f" {ADVISORY} ")
975    root.appendChild(advisory_header)
976    copyright_header = root.createComment(copyright_header_text(2020))
977    root.appendChild(copyright_header)
978
979    configuration = root.createElement("configuration")
980    root.appendChild(configuration)
981    configuration.setAttribute("description", "List of ART MTS tests that do not need root access.")
982
983    # Included XML files.
984    for s in range(num_mts_art_run_test_shards):
985      include = root.createElement("include")
986      include.setAttribute("name", f"mts-art-tests-list-user-shard-{s:02}")
987      configuration.appendChild(include)
988
989    def append_test_exclusion(test):
990      option = root.createElement("option")
991      option.setAttribute("name", "compatibility:exclude-filter")
992      option.setAttribute("value", test)
993      configuration.appendChild(option)
994
995    # Excluded flaky tests.
996    xml_comment = root.createComment(" Excluded flaky tests (b/209958457). ")
997    configuration.appendChild(xml_comment)
998    for module in flaky_tests_excluded_from_mts:
999      for testcase in flaky_tests_excluded_from_mts[module]:
1000        append_test_exclusion(f"{module} {testcase}")
1001
1002    # Excluded failing tests.
1003    xml_comment = root.createComment(" Excluded failing tests (b/247108425). ")
1004    configuration.appendChild(xml_comment)
1005    for module in failing_tests_excluded_from_mts_and_mainline_presubmits:
1006      for testcase in failing_tests_excluded_from_mts_and_mainline_presubmits[module]:
1007        append_test_exclusion(f"{module} {testcase}")
1008
1009    xml_str = root.toprettyxml(indent = XML_INDENT, encoding = "utf-8")
1010
1011    mts_art_tests_list_user_file = os.path.join(self.mts_config_dir, "mts-art-tests-list-user.xml")
1012    with open(mts_art_tests_list_user_file, "wb") as f:
1013      logging.debug(f"Writing `{mts_art_tests_list_user_file}`.")
1014      f.write(xml_str)
1015
1016  def regen_art_mts_files(self, art_run_tests, art_jvmti_cts_tests):
1017    """Regenerate ART MTS definition files."""
1018
1019    # Remove any previously MTS ART test plan shard (`mts-art-shard-[0-9]+.xml`)
1020    # and any test list shard (`mts-art-tests-list-user-shard-[0-9]+.xml`).
1021    old_test_plan_shards = sorted([
1022        test_plan_shard
1023        for test_plan_shard in os.listdir(self.mts_config_dir)
1024        if re.match("^mts-art-(tests-list-user-)?shard-[0-9]+.xml$", test_plan_shard)])
1025    for shard in old_test_plan_shards:
1026      shard_path = os.path.join(self.mts_config_dir, shard)
1027      if os.path.exists(shard_path):
1028        logging.debug(f"Removing `{shard_path}`.")
1029        os.remove(shard_path)
1030
1031    mts_test_shards = []
1032
1033    # ART run-tests shard(s).
1034    art_run_test_module_names = [ART_RUN_TEST_MODULE_NAME_PREFIX + t for t in art_run_tests]
1035    art_run_test_shards = split_list(art_run_test_module_names, NUM_MTS_ART_RUN_TEST_SHARDS)
1036    for i in range(len(art_run_test_shards)):
1037      art_tests_shard_i_tests = art_run_test_shards[i]
1038      art_tests_shard_i = self.create_mts_test_shard(
1039          "ART run-tests", art_tests_shard_i_tests, i, 2020,
1040          ["TODO(rpl): Find a way to express this list in a more concise fashion."])
1041      mts_test_shards.append(art_tests_shard_i)
1042
1043    # CTS Libcore non-OJ tests (`CtsLibcoreTestCases`) shard.
1044    cts_libcore_tests_shard_num = len(mts_test_shards)
1045    cts_libcore_tests_shard = self.create_mts_test_shard(
1046        "CTS Libcore non-OJ tests", ["CtsLibcoreTestCases"], cts_libcore_tests_shard_num, 2020)
1047    mts_test_shards.append(cts_libcore_tests_shard)
1048
1049    # Other CTS tests shard.
1050    other_cts_tests_shard_num = len(mts_test_shards)
1051    other_cts_libcore_tests_shard_tests = [
1052        "CtsLibcoreApiEvolutionTestCases",
1053        "CtsLibcoreFileIOTestCases",
1054        "CtsLibcoreJsr166TestCases",
1055        "CtsLibcoreLegacy22TestCases",
1056        "CtsLibcoreOjTestCases",
1057        "CtsLibcoreWycheproofBCTestCases",
1058        "MtsLibcoreOkHttpTestCases",
1059        "MtsLibcoreBouncyCastleTestCases",
1060    ]
1061    other_cts_tests_shard_tests = art_jvmti_cts_tests + other_cts_libcore_tests_shard_tests
1062    other_cts_tests_shard = self.create_mts_test_shard(
1063        "Other CTS tests", other_cts_tests_shard_tests, other_cts_tests_shard_num, 2021)
1064    mts_test_shards.append(other_cts_tests_shard)
1065
1066    # ART gtests shard.
1067    art_gtests_shard_num = len(mts_test_shards)
1068    art_gtests_shard_tests = art_gtest_mts_user_module_names
1069    art_gtests_shard = self.create_mts_test_shard(
1070        "ART gtests", art_gtests_shard_tests, art_gtests_shard_num, 2022)
1071    mts_test_shards.append(art_gtests_shard)
1072
1073    for s in mts_test_shards:
1074      s.regen_test_plan_file()
1075      s.regen_test_list_file()
1076
1077    # Generate the MTS test list file of "eng-only" tests (tests that
1078    # need root access to the device-under-test and are not part of
1079    # "user" test plans).
1080    #
1081    # TODO(rpl): Refactor the MTS file generation logic to better
1082    # handle the special case of "eng-only" tests, which do not play
1083    # well with `MtsTestShard` at the moment).
1084    eng_only_test_list_file = os.path.join(self.mts_config_dir, ENG_ONLY_TEST_LIST_NAME + ".xml")
1085    gen_mts_test_list_file(
1086        art_gtest_eng_only_module_names, eng_only_test_list_file,
1087        copyright_year = 2020,
1088        configuration_description = "List of ART MTS tests that need root access.",
1089        tests_description = "ART gtests")
1090
1091    self.regen_mts_art_tests_list_user_file(len(mts_test_shards))
1092
1093  def regen_test_files(self, regen_art_mts):
1094    """Regenerate ART test files.
1095
1096    Args:
1097      regen_art_mts: If true, also regenerate the ART MTS definition.
1098    """
1099    run_tests = self.enumerate_run_tests()
1100
1101    # Create a list of the tests that can currently be built, and for
1102    # which a Blueprint file is to be generated.
1103    buildable_tests = list(filter(self.is_soong_buildable, run_tests))
1104
1105    # Create a list of the tests that can be built and run
1106    # (successfully). These tests are to be added to ART's
1107    # `TEST_MAPPING` file and also tagged as part of TradeFed's
1108    # `art-target-run-test` test suite via the `test-suite-tag` option
1109    # in their configuration file.
1110    expected_succeeding_tests = list(filter(self.is_tradefed_runnable,
1111                                            buildable_tests))
1112
1113    # Regenerate Blueprint files.
1114    # ---------------------------
1115
1116    self.regen_bp_files(run_tests, buildable_tests)
1117
1118    buildable_tests_percentage = int(len(buildable_tests) * 100 / len(run_tests))
1119
1120    print(f"Generated Blueprint files for {len(buildable_tests)} ART run-tests out of"
1121          f" {len(run_tests)} ({buildable_tests_percentage}%).")
1122
1123    # Regenerate `TEST_MAPPING` file.
1124    # -------------------------------
1125
1126    # Note: We only include ART run-tests expected to succeed for now.
1127    num_expected_succeeding_tests = len(expected_succeeding_tests)
1128
1129    presubmit_run_tests = set(expected_succeeding_tests).difference(postsubmit_only_tests)
1130    num_presubmit_run_tests = len(presubmit_run_tests)
1131    presubmit_run_tests_percentage = int(
1132        num_presubmit_run_tests * 100 / num_expected_succeeding_tests)
1133
1134    num_mainline_presubmit_run_tests = num_presubmit_run_tests
1135    mainline_presubmit_run_tests_percentage = presubmit_run_tests_percentage
1136
1137    postsubmit_run_tests = set(expected_succeeding_tests).intersection(postsubmit_only_tests)
1138    num_postsubmit_run_tests = len(postsubmit_run_tests)
1139    postsubmit_run_tests_percentage = int(
1140        num_postsubmit_run_tests * 100 / num_expected_succeeding_tests)
1141
1142    self.regen_test_mapping_file(expected_succeeding_tests)
1143
1144    expected_succeeding_tests_percentage = int(
1145        num_expected_succeeding_tests * 100 / len(run_tests))
1146
1147    num_gtests = len(art_gtest_module_names)
1148
1149    num_presubmit_gtests = len(art_gtest_presubmit_module_names)
1150    presubmit_gtests_percentage = int(num_presubmit_gtests * 100 / num_gtests)
1151
1152    num_mainline_presubmit_gtests = len(art_gtest_mainline_presubmit_module_names)
1153    mainline_presubmit_gtests_percentage = int(num_mainline_presubmit_gtests * 100 / num_gtests)
1154
1155    num_postsubmit_gtests = len(art_gtest_postsubmit_module_names)
1156    postsubmit_gtests_percentage = int(num_postsubmit_gtests * 100 / num_gtests)
1157
1158    print(f"Generated TEST_MAPPING entries for {num_expected_succeeding_tests} ART run-tests out"
1159          f" of {len(run_tests)} ({expected_succeeding_tests_percentage}%):")
1160    for (num_tests, test_kind, tests_percentage, test_group_name) in [
1161        (num_mainline_presubmit_run_tests, "ART run-tests", mainline_presubmit_run_tests_percentage,
1162         "art-mainline-presubmit"),
1163        (num_mainline_presubmit_run_tests, "ART run-tests", mainline_presubmit_run_tests_percentage,
1164         "mainline-presubmit"),
1165        (num_presubmit_run_tests, "ART run-tests", presubmit_run_tests_percentage, "presubmit"),
1166        (num_postsubmit_run_tests, "ART run-tests", postsubmit_run_tests_percentage, "postsubmit"),
1167        (num_mainline_presubmit_gtests, "ART gtests", mainline_presubmit_gtests_percentage,
1168         "mainline-presubmit"),
1169        (num_presubmit_gtests, "ART gtests", presubmit_gtests_percentage, "presubmit"),
1170        (num_postsubmit_gtests, "ART gtests", postsubmit_gtests_percentage, "postsubmit"),
1171    ]:
1172      print(
1173          f"  {num_tests:3d} {test_kind} ({tests_percentage}%) in `{test_group_name}` test group.")
1174    print("""  Note: Tests in `*presubmit` test groups are executed in pre- and
1175        post-submit test runs. Tests in the `postsubmit` test group
1176        are only executed in post-submit test runs.""")
1177
1178    # Regenerate ART MTS definition (optional).
1179    # -----------------------------------------
1180
1181    if regen_art_mts:
1182      self.regen_art_mts_files(expected_succeeding_tests, self.enumerate_jvmti_cts_tests())
1183      print(f"Generated ART MTS entries for {num_expected_succeeding_tests} ART run-tests out"
1184            f" of {len(run_tests)} ({expected_succeeding_tests_percentage}%).")
1185
1186def main():
1187  if "ANDROID_BUILD_TOP" not in os.environ:
1188    logging.error("ANDROID_BUILD_TOP environment variable is empty; did you forget to run `lunch`?")
1189    sys.exit(1)
1190
1191  parser = argparse.ArgumentParser(
1192      formatter_class=argparse.RawDescriptionHelpFormatter,
1193      description=textwrap.dedent("Regenerate some ART test related files."),
1194      epilog=textwrap.dedent("""\
1195        Regenerate ART run-tests Blueprint files, ART's `TEST_MAPPING` file, and
1196        optionally the ART MTS (Mainline Test Suite) definition.
1197        """))
1198  parser.add_argument("-m", "--regen-art-mts", help="regenerate the ART MTS definition as well",
1199                      action="store_true")
1200  parser.add_argument("-v", "--verbose", help="enable verbose output", action="store_true")
1201  args = parser.parse_args()
1202
1203  if args.verbose:
1204    logging.getLogger().setLevel(logging.DEBUG)
1205
1206  generator = Generator(os.path.join(os.environ["ANDROID_BUILD_TOP"]))
1207  generator.regen_test_files(args.regen_art_mts)
1208
1209
1210if __name__ == "__main__":
1211  main()
1212