xref: /aosp_15_r20/external/bazelbuild-rules_android/test/bashunit/unittest.bash (revision 9e965d6fece27a77de5377433c2f7e6999b8cc0b)
1#!/bin/bash
2#
3# Copyright 2015 The Bazel Authors. All rights reserved.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#    http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Common utility file for Bazel shell tests
18#
19# unittest.bash: a unit test framework in Bash.
20#
21# A typical test suite looks like so:
22#
23#   ------------------------------------------------------------------------
24#   #!/bin/bash
25#
26#   source path/to/unittest.bash || exit 1
27#
28#   # Test that foo works.
29#   function test_foo() {
30#     foo >$TEST_log || fail "foo failed";
31#     expect_log "blah" "Expected to see 'blah' in output of 'foo'."
32#   }
33#
34#   # Test that bar works.
35#   function test_bar() {
36#     bar 2>$TEST_log || fail "bar failed";
37#     expect_not_log "ERROR" "Unexpected error from 'bar'."
38#     ...
39#     assert_equals $x $y
40#   }
41#
42#   run_suite "Test suite for blah"
43#   ------------------------------------------------------------------------
44#
45# Each test function is considered to pass iff fail() is not called
46# while it is active.  fail() may be called directly, or indirectly
47# via other assertions such as expect_log().  run_suite must be called
48# at the very end.
49#
50# A test suite may redefine functions "set_up" and/or "tear_down";
51# these functions are executed before and after each test function,
52# respectively.  Similarly, "cleanup" and "timeout" may be redefined,
53# and these function are called upon exit (of any kind) or a timeout.
54#
55# The user can pass --test_filter to blaze test to select specific tests
56# to run with Bash globs. A union of tests matching any of the provided globs
57# will be run. Additionally the user may define TESTS=(test_foo test_bar ...) to
58# specify a subset of test functions to execute, for example, a working set
59# during debugging. By default, all functions called test_* will be executed.
60#
61# This file provides utilities for assertions over the output of a
62# command.  The output of the command under test is directed to the
63# file $TEST_log, and then the expect_log* assertions can be used to
64# test for the presence of certain regular expressions in that file.
65#
66# The test framework is responsible for restoring the original working
67# directory before each test.
68#
69# The order in which test functions are run is not defined, so it is
70# important that tests clean up after themselves.
71#
72# Each test will be run in a new subshell.
73#
74# Functions named __* are not intended for use by clients.
75#
76# This framework implements the "test sharding protocol".
77#
78
79[[ -n "$BASH_VERSION" ]] ||
80  { echo "unittest.bash only works with bash!" >&2; exit 1; }
81
82export BAZEL_SHELL_TEST=1
83
84DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
85
86# Load the environment support utilities.
87source "${DIR}/unittest_utils.sh" || { echo "unittest_utils.sh not found" >&2; exit 1; }
88
89#### Global variables:
90
91TEST_name=""                    # The name of the current test.
92
93TEST_log=$TEST_TMPDIR/log       # The log file over which the
94                                # expect_log* assertions work.  Must
95                                # be absolute to be robust against
96                                # tests invoking 'cd'!
97
98TEST_passed="true"              # The result of the current test;
99                                # failed assertions cause this to
100                                # become false.
101
102# These variables may be overridden by the test suite:
103
104TESTS=()                        # A subset or "working set" of test
105                                # functions that should be run.  By
106                                # default, all tests called test_* are
107                                # run.
108
109_TEST_FILTERS=()                # List of globs to use to filter the tests.
110                                # If non-empty, all tests matching at least one
111                                # of the globs are run and test list provided in
112                                # the arguments is ignored if present.
113
114__in_tear_down=0                # Indicates whether we are in `tear_down` phase
115                                # of test. Used to avoid re-entering `tear_down`
116                                # on failures within it.
117
118if (( $# > 0 )); then
119  (
120    IFS=':'
121    echo "WARNING: Passing test names in arguments (--test_arg) is deprecated, please use --test_filter='$*' instead." >&2
122  )
123
124  # Legacy behavior is to ignore missing regexp, but with errexit
125  # the following line fails without || true.
126  # TODO(dmarting): maybe we should revisit the way of selecting
127  # test with that framework (use Bazel's environment variable instead).
128  TESTS=($(for i in "$@"; do echo $i; done | grep ^test_ || true))
129  if (( ${#TESTS[@]} == 0 )); then
130    echo "WARNING: Arguments do not specify tests!" >&2
131  fi
132fi
133# TESTBRIDGE_TEST_ONLY contains the value of --test_filter, if any. We want to
134# preferentially use that instead of $@ to determine which tests to run.
135if [[ ${TESTBRIDGE_TEST_ONLY:-} != "" ]]; then
136  if (( ${#TESTS[@]} != 0 )); then
137    echo "WARNING: Both --test_arg and --test_filter specified, ignoring --test_arg" >&2
138    TESTS=()
139  fi
140  # Split TESTBRIDGE_TEST_ONLY on colon and store it in `_TEST_FILTERS` array.
141  IFS=':' read -r -a _TEST_FILTERS <<< "$TESTBRIDGE_TEST_ONLY"
142fi
143
144TEST_verbose="true"             # Whether or not to be verbose.  A
145                                # command; "true" or "false" are
146                                # acceptable.  The default is: true.
147
148TEST_script="$0"                # Full path to test script
149# Check if the script path is absolute, if not prefix the PWD.
150if [[ ! "$TEST_script" = /* ]]; then
151  TEST_script="${PWD}/$0"
152fi
153
154
155#### Internal functions
156
157function __show_log() {
158    echo "-- Test log: -----------------------------------------------------------"
159    [[ -e $TEST_log ]] && cat "$TEST_log" || echo "(Log file did not exist.)"
160    echo "------------------------------------------------------------------------"
161}
162
163# Usage: __pad <title> <pad-char>
164# Print $title padded to 80 columns with $pad_char.
165function __pad() {
166    local title=$1
167    local pad=$2
168    # Ignore the subshell error -- `head` closes the fd before reading to the
169    # end, therefore the subshell will get SIGPIPE while stuck in `write`.
170    {
171        echo -n "${pad}${pad} ${title} "
172        printf "%80s" " " | tr ' ' "$pad"
173    } | head -c 80 || true
174    echo
175}
176
177#### Exported functions
178
179# Usage: init_test ...
180# Deprecated.  Has no effect.
181function init_test() {
182    :
183}
184
185
186# Usage: set_up
187# Called before every test function.  May be redefined by the test suite.
188function set_up() {
189    :
190}
191
192# Usage: tear_down
193# Called after every test function.  May be redefined by the test suite.
194function tear_down() {
195    :
196}
197
198# Usage: cleanup
199# Called upon eventual exit of the test suite.  May be redefined by
200# the test suite.
201function cleanup() {
202    :
203}
204
205# Usage: timeout
206# Called upon early exit from a test due to timeout.
207function timeout() {
208    :
209}
210
211# Usage: testenv_set_up
212# Called prior to set_up. For use by testenv.sh.
213function testenv_set_up() {
214    :
215}
216
217# Usage: testenv_tear_down
218# Called after tear_down. For use by testenv.sh.
219function testenv_tear_down() {
220    :
221}
222
223# Usage: fail <message> [<message> ...]
224# Print failure message with context information, and mark the test as
225# a failure.  The context includes a stacktrace including the longest sequence
226# of calls outside this module.  (We exclude the top and bottom portions of
227# the stack because they just add noise.)  Also prints the contents of
228# $TEST_log.
229function fail() {
230    __show_log >&2
231    echo "${TEST_name} FAILED: $*." >&2
232    # Keep the original error message if we fail in `tear_down` after a failure.
233    [[ "${TEST_passed}" == "true" ]] && echo "$@" >"$TEST_TMPDIR"/__fail
234    TEST_passed="false"
235    __show_stack
236    # Cleanup as we are leaving the subshell now
237    __run_tear_down_after_failure
238    exit 1
239}
240
241function __run_tear_down_after_failure() {
242    # Skip `tear_down` after a failure in `tear_down` to prevent infinite
243    # recursion.
244    (( __in_tear_down )) && return
245    __in_tear_down=1
246    echo -e "\nTear down:\n" >&2
247    tear_down
248    testenv_tear_down
249}
250
251# Usage: warn <message>
252# Print a test warning with context information.
253# The context includes a stacktrace including the longest sequence
254# of calls outside this module.  (We exclude the top and bottom portions of
255# the stack because they just add noise.)
256function warn() {
257    __show_log >&2
258    echo "${TEST_name} WARNING: $1." >&2
259    __show_stack
260
261    if [[ -n "${TEST_WARNINGS_OUTPUT_FILE:-}" ]]; then
262      echo "${TEST_name} WARNING: $1." >> "$TEST_WARNINGS_OUTPUT_FILE"
263    fi
264}
265
266# Usage: show_stack
267# Prints the portion of the stack that does not belong to this module,
268# i.e. the user's code that called a failing assertion.  Stack may not
269# be available if Bash is reading commands from stdin; an error is
270# printed in that case.
271__show_stack() {
272    local i=0
273    local trace_found=0
274
275    # Skip over active calls within this module:
276    while (( i < ${#FUNCNAME[@]} )) && [[ ${BASH_SOURCE[i]:-} == "${BASH_SOURCE[0]}" ]]; do
277       (( ++i ))
278    done
279
280    # Show all calls until the next one within this module (typically run_suite):
281    while (( i < ${#FUNCNAME[@]} )) && [[ ${BASH_SOURCE[i]:-} != "${BASH_SOURCE[0]}" ]]; do
282        # Read online docs for BASH_LINENO to understand the strange offset.
283        # Undefined can occur in the BASH_SOURCE stack apparently when one exits from a subshell
284        echo "${BASH_SOURCE[i]:-"Unknown"}:${BASH_LINENO[i - 1]:-"Unknown"}: in call to ${FUNCNAME[i]:-"Unknown"}" >&2
285        (( ++i ))
286        trace_found=1
287    done
288
289    (( trace_found )) || echo "[Stack trace not available]" >&2
290}
291
292# Usage: expect_log <regexp> [error-message]
293# Asserts that $TEST_log matches regexp.  Prints the contents of
294# $TEST_log and the specified (optional) error message otherwise, and
295# returns non-zero.
296function expect_log() {
297    local pattern=$1
298    local message=${2:-Expected regexp "$pattern" not found}
299    grep -sq -- "$pattern" $TEST_log && return 0
300
301    fail "$message"
302    return 1
303}
304
305# Usage: expect_log_warn <regexp> [error-message]
306# Warns if $TEST_log does not match regexp.  Prints the contents of
307# $TEST_log and the specified (optional) error message on mismatch.
308function expect_log_warn() {
309    local pattern=$1
310    local message=${2:-Expected regexp "$pattern" not found}
311    grep -sq -- "$pattern" $TEST_log && return 0
312
313    warn "$message"
314    return 1
315}
316
317# Usage: expect_log_once <regexp> [error-message]
318# Asserts that $TEST_log contains one line matching <regexp>.
319# Prints the contents of $TEST_log and the specified (optional)
320# error message otherwise, and returns non-zero.
321function expect_log_once() {
322    local pattern=$1
323    local message=${2:-Expected regexp "$pattern" not found exactly once}
324    expect_log_n "$pattern" 1 "$message"
325}
326
327# Usage: expect_log_n <regexp> <count> [error-message]
328# Asserts that $TEST_log contains <count> lines matching <regexp>.
329# Prints the contents of $TEST_log and the specified (optional)
330# error message otherwise, and returns non-zero.
331function expect_log_n() {
332    local pattern=$1
333    local expectednum=${2:-1}
334    local message=${3:-Expected regexp "$pattern" not found exactly $expectednum times}
335    local count=$(grep -sc -- "$pattern" $TEST_log)
336    (( count == expectednum )) && return 0
337    fail "$message"
338    return 1
339}
340
341# Usage: expect_not_log <regexp> [error-message]
342# Asserts that $TEST_log does not match regexp.  Prints the contents
343# of $TEST_log and the specified (optional) error message otherwise, and
344# returns non-zero.
345function expect_not_log() {
346    local pattern=$1
347    local message=${2:-Unexpected regexp "$pattern" found}
348    grep -sq -- "$pattern" $TEST_log || return 0
349
350    fail "$message"
351    return 1
352}
353
354# Usage: expect_query_targets <arguments>
355# Checks that log file contains exactly the targets in the argument list.
356function expect_query_targets() {
357  for arg in "$@"; do
358    expect_log_once "^$arg$"
359  done
360
361# Checks that the number of lines started with '//' equals to the number of
362# arguments provided.
363  expect_log_n "^//[^ ]*$" $#
364}
365
366# Usage: expect_log_with_timeout <regexp> <timeout> [error-message]
367# Waits for the given regexp in the $TEST_log for up to timeout seconds.
368# Prints the contents of $TEST_log and the specified (optional)
369# error message otherwise, and returns non-zero.
370function expect_log_with_timeout() {
371    local pattern=$1
372    local timeout=$2
373    local message=${3:-Regexp "$pattern" not found in "$timeout" seconds}
374    local count=0
375    while (( count < timeout )); do
376      grep -sq -- "$pattern" "$TEST_log" && return 0
377      let count=count+1
378      sleep 1
379    done
380
381    grep -sq -- "$pattern" "$TEST_log" && return 0
382    fail "$message"
383    return 1
384}
385
386# Usage: expect_cmd_with_timeout <expected> <cmd> [timeout]
387# Repeats the command once a second for up to timeout seconds (10s by default),
388# until the output matches the expected value. Fails and returns 1 if
389# the command does not return the expected value in the end.
390function expect_cmd_with_timeout() {
391    local expected="$1"
392    local cmd="$2"
393    local timeout=${3:-10}
394    local count=0
395    while (( count < timeout )); do
396      local actual="$($cmd)"
397      [[ "$expected" == "$actual" ]] && return 0
398      (( ++count ))
399      sleep 1
400    done
401
402    [[ "$expected" == "$actual" ]] && return 0
403    fail "Expected '${expected}' within ${timeout}s, was '${actual}'"
404    return 1
405}
406
407# Usage: assert_one_of <expected_list>... <actual>
408# Asserts that actual is one of the items in expected_list
409#
410# Example:
411#     local expected=( "foo", "bar", "baz" )
412#     assert_one_of $expected $actual
413function assert_one_of() {
414    local args=("$@")
415    local last_arg_index=$((${#args[@]} - 1))
416    local actual=${args[last_arg_index]}
417    unset args[last_arg_index]
418    for expected_item in "${args[@]}"; do
419      [[ "$expected_item" == "$actual" ]] && return 0
420    done;
421
422    fail "Expected one of '${args[*]}', was '$actual'"
423    return 1
424}
425
426# Usage: assert_not_one_of <expected_list>... <actual>
427# Asserts that actual is not one of the items in expected_list
428#
429# Example:
430#     local unexpected=( "foo", "bar", "baz" )
431#     assert_not_one_of $unexpected $actual
432function assert_not_one_of() {
433    local args=("$@")
434    local last_arg_index=$((${#args[@]} - 1))
435    local actual=${args[last_arg_index]}
436    unset args[last_arg_index]
437    for expected_item in "${args[@]}"; do
438      if [[ "$expected_item" == "$actual" ]]; then
439        fail "'${args[*]}' contains '$actual'"
440        return 1
441      fi
442    done;
443
444    return 0
445}
446
447# Usage: assert_equals <expected> <actual>
448# Asserts [[ expected == actual ]].
449function assert_equals() {
450    local expected=$1 actual=$2
451    [[ "$expected" == "$actual" ]] && return 0
452
453    fail "Expected '$expected', was '$actual'"
454    return 1
455}
456
457# Usage: assert_not_equals <unexpected> <actual>
458# Asserts [[ unexpected != actual ]].
459function assert_not_equals() {
460    local unexpected=$1 actual=$2
461    [[ "$unexpected" != "$actual" ]] && return 0;
462
463    fail "Expected not '${unexpected}', was '${actual}'"
464    return 1
465}
466
467# Usage: assert_contains <regexp> <file> [error-message]
468# Asserts that file matches regexp.  Prints the contents of
469# file and the specified (optional) error message otherwise, and
470# returns non-zero.
471function assert_contains() {
472    local pattern=$1
473    local file=$2
474    local message=${3:-Expected regexp "$pattern" not found in "$file"}
475    grep -sq -- "$pattern" "$file" && return 0
476
477    cat "$file" >&2
478    fail "$message"
479    return 1
480}
481
482# Usage: assert_not_contains <regexp> <file> [error-message]
483# Asserts that file does not match regexp.  Prints the contents of
484# file and the specified (optional) error message otherwise, and
485# returns non-zero.
486function assert_not_contains() {
487    local pattern=$1
488    local file=$2
489    local message=${3:-Expected regexp "$pattern" found in "$file"}
490
491    if [[ -f "$file" ]]; then
492      grep -sq -- "$pattern" "$file" || return 0
493    else
494      fail "$file is not a file: $message"
495      return 1
496    fi
497
498    cat "$file" >&2
499    fail "$message"
500    return 1
501}
502
503function assert_contains_n() {
504    local pattern=$1
505    local expectednum=${2:-1}
506    local file=$3
507    local message=${4:-Expected regexp "$pattern" not found exactly $expectednum times}
508    local count
509    if [[ -f "$file" ]]; then
510      count=$(grep -sc -- "$pattern" "$file")
511    else
512      fail "$file is not a file: $message"
513      return 1
514    fi
515    (( count == expectednum )) && return 0
516
517    cat "$file" >&2
518    fail "$message"
519    return 1
520}
521
522# Updates the global variables TESTS if
523# sharding is enabled, i.e. ($TEST_TOTAL_SHARDS > 0).
524function __update_shards() {
525    [[ -z "${TEST_TOTAL_SHARDS-}" ]] && return 0
526
527    (( TEST_TOTAL_SHARDS > 0 )) ||
528      { echo "Invalid total shards ${TEST_TOTAL_SHARDS}" >&2; exit 1; }
529
530    (( TEST_SHARD_INDEX < 0 || TEST_SHARD_INDEX >= TEST_TOTAL_SHARDS )) &&
531      { echo "Invalid shard ${TEST_SHARD_INDEX}" >&2; exit 1; }
532
533    IFS=$'\n' read -rd $'\0' -a TESTS < <(
534        for test in "${TESTS[@]}"; do echo "$test"; done |
535            awk "NR % ${TEST_TOTAL_SHARDS} == ${TEST_SHARD_INDEX}" &&
536            echo -en '\0')
537
538    [[ -z "${TEST_SHARD_STATUS_FILE-}" ]] || touch "$TEST_SHARD_STATUS_FILE"
539}
540
541# Usage: __test_terminated <signal-number>
542# Handler that is called when the test terminated unexpectedly
543function __test_terminated() {
544    __show_log >&2
545    echo "$TEST_name FAILED: terminated by signal $1." >&2
546    TEST_passed="false"
547    __show_stack
548    timeout
549    exit 1
550}
551
552# Usage: __test_terminated_err
553# Handler that is called when the test terminated unexpectedly due to "errexit".
554function __test_terminated_err() {
555    # When a subshell exits due to signal ERR, its parent shell also exits,
556    # thus the signal handler is called recursively and we print out the
557    # error message and stack trace multiple times. We're only interested
558    # in the first one though, as it contains the most information, so ignore
559    # all following.
560    if [[ -f $TEST_TMPDIR/__err_handled ]]; then
561      exit 1
562    fi
563    __show_log >&2
564    if [[ ! -z "$TEST_name" ]]; then
565      echo -n "$TEST_name " >&2
566    fi
567    echo "FAILED: terminated because this command returned a non-zero status:" >&2
568    touch $TEST_TMPDIR/__err_handled
569    TEST_passed="false"
570    __show_stack
571    # If $TEST_name is still empty, the test suite failed before we even started
572    # to run tests, so we shouldn't call tear_down.
573    if [[ -n "$TEST_name" ]]; then
574      __run_tear_down_after_failure
575    fi
576    exit 1
577}
578
579# Usage: __trap_with_arg <handler> <signals ...>
580# Helper to install a trap handler for several signals preserving the signal
581# number, so that the signal number is available to the trap handler.
582function __trap_with_arg() {
583    func="$1" ; shift
584    for sig ; do
585        trap "$func $sig" "$sig"
586    done
587}
588
589# Usage: <node> <block>
590# Adds the block to the given node in the report file. Quotes in the in
591# arguments need to be escaped.
592function __log_to_test_report() {
593    local node="$1"
594    local block="$2"
595    if [[ ! -e "$XML_OUTPUT_FILE" ]]; then
596        local xml_header='<?xml version="1.0" encoding="UTF-8"?>'
597        echo "${xml_header}<testsuites></testsuites>" > "$XML_OUTPUT_FILE"
598    fi
599
600    # replace match on node with block and match
601    # replacement expression only needs escaping for quotes
602    perl -e "\
603\$input = @ARGV[0]; \
604\$/=undef; \
605open FILE, '+<$XML_OUTPUT_FILE'; \
606\$content = <FILE>; \
607if (\$content =~ /($node.*)\$/) { \
608  seek FILE, 0, 0; \
609  print FILE \$\` . \$input . \$1; \
610}; \
611close FILE" "$block"
612}
613
614# Usage: <total> <passed>
615# Adds the test summaries to the xml nodes.
616function __finish_test_report() {
617    local suite_name="$1"
618    local total="$2"
619    local passed="$3"
620    local failed=$((total - passed))
621
622    # Update the xml output with the suite name and total number of
623    # passed/failed tests.
624    cat "$XML_OUTPUT_FILE" | \
625      sed \
626        "s/<testsuites>/<testsuites tests=\"$total\" failures=\"0\" errors=\"$failed\">/" | \
627      sed \
628        "s/<testsuite>/<testsuite name=\"${suite_name}\" tests=\"$total\" failures=\"0\" errors=\"$failed\">/" \
629        > "${XML_OUTPUT_FILE}.bak"
630
631    rm -f "$XML_OUTPUT_FILE"
632    mv "${XML_OUTPUT_FILE}.bak" "$XML_OUTPUT_FILE"
633}
634
635# Multi-platform timestamp function
636UNAME=$(uname -s | tr 'A-Z' 'a-z')
637if [[ "$UNAME" == "linux" ]] || [[ "$UNAME" =~ msys_nt* ]]; then
638    function timestamp() {
639      echo $(($(date +%s%N)/1000000))
640    }
641else
642    function timestamp() {
643      # macOS and BSDs do not have %N, so Python is the best we can do.
644      # LC_ALL=C works around python 3.8 and 3.9 crash on macOS when the
645      # filesystem encoding is unspecified (e.g. when LANG=en_US).
646      local PYTHON=python
647      command -v python3 &> /dev/null && PYTHON=python3
648      LC_ALL=C "${PYTHON}" -c 'import time; print(int(round(time.time() * 1000)))'
649    }
650fi
651
652function get_run_time() {
653  local ts_start=$1
654  local ts_end=$2
655  run_time_ms=$((ts_end - ts_start))
656  echo $((run_time_ms / 1000)).${run_time_ms: -3}
657}
658
659# Usage: run_tests <suite-comment>
660# Must be called from the end of the user's test suite.
661# Calls exit with zero on success, non-zero otherwise.
662function run_suite() {
663  local message="$1"
664  # The name of the suite should be the script being run, which
665  # will be the filename with the ".sh" extension removed.
666  local suite_name="$(basename "$0")"
667
668  echo >&2
669  echo "$message" >&2
670  echo >&2
671
672  __log_to_test_report "<\/testsuites>" "<testsuite></testsuite>"
673
674  local total=0
675  local passed=0
676
677  atexit "cleanup"
678
679  # If the user didn't specify an explicit list of tests (e.g. a
680  # working set), use them all.
681  if (( ${#TESTS[@]} == 0 )); then
682    # Even if there aren't any tests, this needs to succeed.
683    local all_tests=()
684    IFS=$'\n' read -d $'\0' -ra all_tests < <(
685        declare -F | awk '{print $3}' | grep ^test_ || true; echo -en '\0')
686
687    if (( "${#_TEST_FILTERS[@]}" == 0 )); then
688      # Use ${array[@]+"${array[@]}"} idiom to avoid errors when running with
689      # Bash version <= 4.4 with `nounset` when `all_tests` is empty (
690      # https://github.com/bminor/bash/blob/a0c0a00fc419b7bc08202a79134fcd5bc0427071/CHANGES#L62-L63).
691      TESTS=("${all_tests[@]+${all_tests[@]}}")
692    else
693      for t in "${all_tests[@]+${all_tests[@]}}"; do
694        local matches=0
695        for f in "${_TEST_FILTERS[@]}"; do
696          # We purposely want to glob match.
697          # shellcheck disable=SC2053
698          [[ "$t" = $f ]] && matches=1 && break
699        done
700        if (( matches )); then
701          TESTS+=("$t")
702        fi
703      done
704    fi
705
706  elif [[ -n "${TEST_WARNINGS_OUTPUT_FILE:-}" ]]; then
707    if grep -q "TESTS=" "$TEST_script" ; then
708      echo "TESTS variable overridden in sh_test. Please remove before submitting" \
709        >> "$TEST_WARNINGS_OUTPUT_FILE"
710    fi
711  fi
712
713  # Reset TESTS in the common case where it contains a single empty string.
714  if [[ -z "${TESTS[*]-}" ]]; then
715    TESTS=()
716  fi
717  local original_tests_size=${#TESTS[@]}
718
719  __update_shards
720
721  if [[ "${#TESTS[@]}" -ne 0 ]]; then
722    for TEST_name in "${TESTS[@]}"; do
723      >"$TEST_log" # Reset the log.
724      TEST_passed="true"
725
726      (( ++total ))
727      if [[ "$TEST_verbose" == "true" ]]; then
728          date >&2
729          __pad "$TEST_name" '*' >&2
730      fi
731
732      local run_time="0.0"
733      rm -f "${TEST_TMPDIR}"/{__ts_start,__ts_end}
734
735      if [[ "$(type -t "$TEST_name")" == function ]]; then
736        # Save exit handlers eventually set.
737        local SAVED_ATEXIT="$ATEXIT";
738        ATEXIT=
739
740        # Run test in a subshell.
741        rm -f "${TEST_TMPDIR}"/__err_handled
742        __trap_with_arg __test_terminated INT KILL PIPE TERM ABRT FPE ILL QUIT SEGV
743
744        # Remember -o pipefail value and disable it for the subshell result
745        # collection.
746        if [[ "${SHELLOPTS}" =~ (^|:)pipefail(:|$) ]]; then
747          local __opt_switch=-o
748        else
749          local __opt_switch=+o
750        fi
751        set +o pipefail
752        (
753          set "${__opt_switch}" pipefail
754          # if errexit is enabled, make sure we run cleanup and collect the log.
755          if [[ "$-" = *e* ]]; then
756            set -E
757            trap __test_terminated_err ERR
758          fi
759          timestamp >"${TEST_TMPDIR}"/__ts_start
760          testenv_set_up
761          set_up
762          eval "$TEST_name"
763          __in_tear_down=1
764          tear_down
765          testenv_tear_down
766          timestamp >"${TEST_TMPDIR}"/__ts_end
767          test "$TEST_passed" == "true"
768        ) 2>&1 | tee "${TEST_TMPDIR}"/__log
769        # Note that tee will prevent the control flow continuing if the test
770        # spawned any processes which are still running and have not closed
771        # their stdout.
772
773        test_subshell_status=${PIPESTATUS[0]}
774        set "${__opt_switch}" pipefail
775        if (( test_subshell_status != 0 )); then
776          TEST_passed="false"
777          # Ensure that an end time is recorded in case the test subshell
778          # terminated prematurely.
779          [[ -f "$TEST_TMPDIR"/__ts_end ]] || timestamp >"$TEST_TMPDIR"/__ts_end
780        fi
781
782        # Calculate run time for the testcase.
783        local ts_start
784        ts_start=$(<"${TEST_TMPDIR}"/__ts_start)
785        local ts_end
786        ts_end=$(<"${TEST_TMPDIR}"/__ts_end)
787        run_time=$(get_run_time $ts_start $ts_end)
788
789        # Eventually restore exit handlers.
790        if [[ -n "$SAVED_ATEXIT" ]]; then
791          ATEXIT="$SAVED_ATEXIT"
792          trap "$ATEXIT" EXIT
793        fi
794      else # Bad test explicitly specified in $TESTS.
795        fail "Not a function: '$TEST_name'"
796      fi
797
798      local testcase_tag=""
799
800      local red='\033[0;31m'
801      local green='\033[0;32m'
802      local no_color='\033[0m'
803
804      if [[ "$TEST_verbose" == "true" ]]; then
805          echo >&2
806      fi
807
808      if [[ "$TEST_passed" == "true" ]]; then
809        if [[ "$TEST_verbose" == "true" ]]; then
810          echo -e "${green}PASSED${no_color}: ${TEST_name}" >&2
811        fi
812        (( ++passed ))
813        testcase_tag="<testcase name=\"${TEST_name}\" status=\"run\" time=\"${run_time}\" classname=\"\"></testcase>"
814      else
815        echo -e "${red}FAILED${no_color}: ${TEST_name}" >&2
816        # end marker in CDATA cannot be escaped, we need to split the CDATA sections
817        log=$(sed 's/]]>/]]>]]&gt;<![CDATA[/g' "${TEST_TMPDIR}"/__log)
818        fail_msg=$(cat "${TEST_TMPDIR}"/__fail 2> /dev/null || echo "No failure message")
819        # Replacing '&' with '&amp;', '<' with '&lt;', '>' with '&gt;', and '"' with '&quot;'
820        escaped_fail_msg=$(echo "$fail_msg" | sed 's/&/\&amp;/g' | sed 's/</\&lt;/g' | sed 's/>/\&gt;/g' | sed 's/"/\&quot;/g')
821        testcase_tag="<testcase name=\"${TEST_name}\" status=\"run\" time=\"${run_time}\" classname=\"\"><error message=\"${escaped_fail_msg}\"><![CDATA[${log}]]></error></testcase>"
822      fi
823
824      if [[ "$TEST_verbose" == "true" ]]; then
825          echo >&2
826      fi
827      __log_to_test_report "<\/testsuite>" "$testcase_tag"
828    done
829  fi
830
831  __finish_test_report "$suite_name" $total $passed
832  __pad "${passed} / ${total} tests passed." '*' >&2
833  if (( original_tests_size == 0 )); then
834    __pad "No tests found." '*'
835    exit 1
836  elif (( total != passed )); then
837    __pad "There were errors." '*' >&2
838    exit 1
839  elif (( total == 0 )); then
840    __pad "No tests executed due to sharding. Check your test's shard_count." '*'
841    __pad "Succeeding anyway." '*'
842  fi
843
844  exit 0
845}
846