xref: /aosp_15_r20/external/pytorch/.circleci/scripts/binary_linux_test.sh (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1#!/bin/bash
2
3OUTPUT_SCRIPT=${OUTPUT_SCRIPT:-/home/circleci/project/ci_test_script.sh}
4
5# only source if file exists
6if [[ -f /home/circleci/project/env ]]; then
7  source /home/circleci/project/env
8fi
9cat >"${OUTPUT_SCRIPT}" <<EOL
10# =================== The following code will be executed inside Docker container ===================
11set -eux -o pipefail
12
13retry () {
14    "\$@"  || (sleep 1 && "\$@") || (sleep 2 && "\$@")
15}
16
17# Source binary env file here if exists
18if [[ -e "${BINARY_ENV_FILE:-/nofile}" ]]; then
19  source "${BINARY_ENV_FILE:-/nofile}"
20fi
21
22python_nodot="\$(echo $DESIRED_PYTHON | tr -d m.u)"
23
24# Set up Python
25if [[ "$PACKAGE_TYPE" == conda ]]; then
26  retry conda create -qyn testenv python="$DESIRED_PYTHON"
27  source activate testenv >/dev/null
28elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
29  python_path="/opt/python/cp\$python_nodot-cp\${python_nodot}"
30  # Prior to Python 3.8 paths were suffixed with an 'm'
31  if [[ -d  "\${python_path}/bin" ]]; then
32    export PATH="\${python_path}/bin:\$PATH"
33  elif [[ -d "\${python_path}m/bin" ]]; then
34    export PATH="\${python_path}m/bin:\$PATH"
35  fi
36fi
37
38EXTRA_CONDA_FLAGS=""
39NUMPY_PIN=""
40PROTOBUF_PACKAGE="defaults::protobuf"
41
42if [[ "\$python_nodot" = *310* ]]; then
43  # There's an issue with conda channel priority where it'll randomly pick 1.19 over 1.20
44  # we set a lower boundary here just to be safe
45  NUMPY_PIN=">=1.21.2"
46  PROTOBUF_PACKAGE="protobuf>=3.19.0"
47fi
48
49if [[ "\$python_nodot" = *39* ]]; then
50  # There's an issue with conda channel priority where it'll randomly pick 1.19 over 1.20
51  # we set a lower boundary here just to be safe
52  NUMPY_PIN=">=1.20"
53fi
54
55# Move debug wheels out of the package dir so they don't get installed
56mkdir -p /tmp/debug_final_pkgs
57mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to move"
58
59# Install the package
60# These network calls should not have 'retry's because they are installing
61# locally and aren't actually network calls
62# TODO there is duplicated and inconsistent test-python-env setup across this
63#   file, builder/smoke_test.sh, and builder/run_tests.sh, and also in the
64#   conda build scripts themselves. These should really be consolidated
65# Pick only one package of multiple available (which happens as result of workflow re-runs)
66pkg="/final_pkgs/\$(ls -1 /final_pkgs|sort|tail -1)"
67if [[ "\$PYTORCH_BUILD_VERSION" == *dev* ]]; then
68    CHANNEL="nightly"
69else
70    CHANNEL="test"
71fi
72
73if [[ "$PACKAGE_TYPE" == conda ]]; then
74  (
75    # For some reason conda likes to re-activate the conda environment when attempting this install
76    # which means that a deactivate is run and some variables might not exist when that happens,
77    # namely CONDA_MKL_INTERFACE_LAYER_BACKUP from libblas so let's just ignore unbound variables when
78    # it comes to the conda installation commands
79    set +u
80    retry conda install \${EXTRA_CONDA_FLAGS} -yq \
81      "numpy\${NUMPY_PIN}" \
82      mkl>=2018 \
83      ninja \
84      sympy>=1.12 \
85      typing-extensions \
86      ${PROTOBUF_PACKAGE}
87    if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
88      retry conda install -c pytorch -y cpuonly
89    else
90      cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
91      CUDA_PACKAGE="pytorch-cuda"
92      retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c "pytorch-\${CHANNEL}" "pytorch-cuda=\${cu_ver}"
93    fi
94    conda install \${EXTRA_CONDA_FLAGS} -y "\$pkg" --offline
95  )
96elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
97  if [[ "\$BUILD_ENVIRONMENT" != *s390x* ]]; then
98    if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
99      pkg_no_python="$(ls -1 /final_pkgs/torch_no_python* | sort |tail -1)"
100      pkg_torch="$(ls -1 /final_pkgs/torch-* | sort |tail -1)"
101      # todo: after folder is populated use the pypi_pkg channel instead
102      pip install "\$pkg_no_python" "\$pkg_torch" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}_pypi_pkg"
103      retry pip install -q numpy protobuf typing-extensions
104    else
105      pip install "\$pkg" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}"
106      retry pip install -q numpy protobuf typing-extensions
107    fi
108  else
109    pip install "\$pkg"
110    retry pip install -q numpy protobuf typing-extensions
111  fi
112fi
113if [[ "$PACKAGE_TYPE" == libtorch ]]; then
114  pkg="\$(ls /final_pkgs/*-latest.zip)"
115  unzip "\$pkg" -d /tmp
116  cd /tmp/libtorch
117fi
118
119# Test the package
120/builder/check_binary.sh
121
122if [[ "\$GPU_ARCH_TYPE" != *s390x* && "\$GPU_ARCH_TYPE" != *xpu* && "\$GPU_ARCH_TYPE" != *rocm*  && "$PACKAGE_TYPE" != libtorch ]]; then
123  # Exclude s390, xpu, rocm and libtorch builds from smoke testing
124  python /builder/test/smoke_test/smoke_test.py --package=torchonly --torch-compile-check disabled
125fi
126
127# Clean temp files
128cd /builder && git clean -ffdx
129
130# =================== The above code will be executed inside Docker container ===================
131EOL
132echo
133echo
134echo "The script that will run in the next step is:"
135cat "${OUTPUT_SCRIPT}"
136