xref: /aosp_15_r20/external/cronet/build/android/pylib/results/report_results.py (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1# Copyright 2013 The Chromium Authors
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Module containing utility functions for reporting results."""
6
7
8import logging
9import os
10import re
11
12from pylib import constants
13from pylib.results.flakiness_dashboard import results_uploader
14from pylib.utils import logging_utils
15
16
17def _LogToFile(results, test_type, suite_name):
18  """Log results to local files which can be used for aggregation later."""
19  log_file_path = os.path.join(constants.GetOutDirectory(), 'test_logs')
20  if not os.path.exists(log_file_path):
21    os.mkdir(log_file_path)
22  full_file_name = os.path.join(
23      log_file_path, re.sub(r'\W', '_', test_type).lower() + '.log')
24  if not os.path.exists(full_file_name):
25    with open(full_file_name, 'w') as log_file:
26      print(
27          '\n%s results for %s build %s:' %
28          (test_type, os.environ.get('BUILDBOT_BUILDERNAME'),
29           os.environ.get('BUILDBOT_BUILDNUMBER')),
30          file=log_file)
31    logging.info('Writing results to %s.', full_file_name)
32
33  logging.info('Writing results to %s.', full_file_name)
34  with open(full_file_name, 'a') as log_file:
35    shortened_suite_name = suite_name[:25] + (suite_name[25:] and '...')
36    print(
37        '%s%s' % (shortened_suite_name.ljust(30), results.GetShortForm()),
38        file=log_file)
39
40
41def _LogToFlakinessDashboard(results, test_type, test_package,
42                             flakiness_server):
43  """Upload results to the flakiness dashboard"""
44  logging.info('Upload results for test type "%s", test package "%s" to %s',
45               test_type, test_package, flakiness_server)
46
47  try:
48    # TODO(jbudorick): remove Instrumentation once instrumentation tests
49    # switch to platform mode.
50    if test_type in ('instrumentation', 'Instrumentation'):
51      if flakiness_server == constants.UPSTREAM_FLAKINESS_SERVER:
52        assert test_package in ['ContentShellTest',
53                                'ChromePublicTest',
54                                'ChromeSyncShellTest',
55                                'SystemWebViewShellLayoutTest',
56                                'WebViewInstrumentationTest']
57        dashboard_test_type = ('%s_instrumentation_tests' %
58                               test_package.lower().rstrip('test'))
59      # Downstream server.
60      else:
61        dashboard_test_type = 'Chromium_Android_Instrumentation'
62
63    elif test_type == 'gtest':
64      dashboard_test_type = test_package
65
66    else:
67      logging.warning('Invalid test type')
68      return
69
70    results_uploader.Upload(
71        results, flakiness_server, dashboard_test_type)
72
73  except Exception: # pylint: disable=broad-except
74    logging.exception('Failure while logging to %s', flakiness_server)
75
76
77def LogFull(results, test_type, test_package, annotation=None,
78            flakiness_server=None):
79  """Log the tests results for the test suite.
80
81  The results will be logged three different ways:
82    1. Log to stdout.
83    2. Log to local files for aggregating multiple test steps
84       (on buildbots only).
85    3. Log to flakiness dashboard (on buildbots only).
86
87  Args:
88    results: An instance of TestRunResults object.
89    test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.).
90    test_package: Test package name (e.g. 'ipc_tests' for gtests,
91                  'ContentShellTest' for instrumentation tests)
92    annotation: If instrumenation test type, this is a list of annotations
93                (e.g. ['Feature', 'SmallTest']).
94    flakiness_server: If provider, upload the results to flakiness dashboard
95                      with this URL.
96    """
97  # pylint doesn't like how colorama set up its color enums.
98  # pylint: disable=no-member
99  black_on_white = (logging_utils.BACK.WHITE, logging_utils.FORE.BLACK)
100  with logging_utils.OverrideColor(logging.CRITICAL, black_on_white):
101    if not results.DidRunPass():
102      logging.critical('*' * 80)
103      logging.critical('Detailed Logs')
104      logging.critical('*' * 80)
105      for line in results.GetLogs().splitlines():
106        logging.critical(line)
107    logging.critical('*' * 80)
108    logging.critical('Summary')
109    logging.critical('*' * 80)
110    # Assign uniform color, depending on presence of 'FAILED' over lines.
111    if any('FAILED' in line for line in results.GetGtestForm().splitlines()):
112      # Red on white, dim.
113      color = (logging_utils.BACK.WHITE, logging_utils.FORE.RED,
114               logging_utils.STYLE.DIM)
115    else:
116      # Green on white, dim.
117      color = (logging_utils.BACK.WHITE, logging_utils.FORE.GREEN,
118               logging_utils.STYLE.DIM)
119    with logging_utils.OverrideColor(logging.CRITICAL, color):
120      for line in results.GetGtestForm().splitlines():
121        logging.critical(line)
122    logging.critical('*' * 80)
123
124  if os.environ.get('BUILDBOT_BUILDERNAME'):
125    # It is possible to have multiple buildbot steps for the same
126    # instrumenation test package using different annotations.
127    if annotation and len(annotation) == 1:
128      suite_name = annotation[0]
129    else:
130      suite_name = test_package
131    _LogToFile(results, test_type, suite_name)
132
133    if flakiness_server:
134      _LogToFlakinessDashboard(results, test_type, test_package,
135                               flakiness_server)
136