1#!/usr/bin/env python3 2# Copyright 2017 gRPC authors. 3# 4# Licensed under the Apache License, Version 2.0 (the "License"); 5# you may not use this file except in compliance with the License. 6# You may obtain a copy of the License at 7# 8# http://www.apache.org/licenses/LICENSE-2.0 9# 10# Unless required by applicable law or agreed to in writing, software 11# distributed under the License is distributed on an "AS IS" BASIS, 12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13# See the License for the specific language governing permissions and 14# limitations under the License. 15"""Build and upload docker images to Google Container Registry per matrix.""" 16 17from __future__ import print_function 18 19import argparse 20import atexit 21import multiprocessing 22import os 23import shutil 24import subprocess 25import sys 26import tempfile 27 28# Language Runtime Matrix 29import client_matrix 30 31python_util_dir = os.path.abspath( 32 os.path.join(os.path.dirname(__file__), '../run_tests/python_utils')) 33sys.path.append(python_util_dir) 34import dockerjob 35import jobset 36 37_IMAGE_BUILDER = 'tools/run_tests/dockerize/build_interop_image.sh' 38_LANGUAGES = list(client_matrix.LANG_RUNTIME_MATRIX.keys()) 39# All gRPC release tags, flattened, deduped and sorted. 40_RELEASES = sorted( 41 list( 42 set(release 43 for release_dict in list(client_matrix.LANG_RELEASE_MATRIX.values()) 44 for release in list(release_dict.keys())))) 45 46# Destination directory inside docker image to keep extra info from build time. 47_BUILD_INFO = '/var/local/build_info' 48 49argp = argparse.ArgumentParser(description='Run interop tests.') 50argp.add_argument('--gcr_path', 51 default='gcr.io/grpc-testing', 52 help='Path of docker images in Google Container Registry') 53 54argp.add_argument('--release', 55 default='master', 56 choices=['all', 'master'] + _RELEASES, 57 help='github commit tag to checkout. When building all ' 58 'releases defined in client_matrix.py, use "all". Valid only ' 59 'with --git_checkout.') 60 61argp.add_argument('-l', 62 '--language', 63 choices=['all'] + sorted(_LANGUAGES), 64 nargs='+', 65 default=['all'], 66 help='Test languages to build docker images for.') 67 68argp.add_argument('--git_checkout', 69 action='store_true', 70 help='Use a separate git clone tree for building grpc stack. ' 71 'Required when using --release flag. By default, current' 72 'tree and the sibling will be used for building grpc stack.') 73 74argp.add_argument('--git_checkout_root', 75 default='/export/hda3/tmp/grpc_matrix', 76 help='Directory under which grpc-go/java/main repo will be ' 77 'cloned. Valid only with --git_checkout.') 78 79argp.add_argument('--keep', 80 action='store_true', 81 help='keep the created local images after uploading to GCR') 82 83argp.add_argument('--reuse_git_root', 84 default=False, 85 action='store_const', 86 const=True, 87 help='reuse the repo dir. If False, the existing git root ' 88 'directory will removed before a clean checkout, because ' 89 'reusing the repo can cause git checkout error if you switch ' 90 'between releases.') 91 92argp.add_argument( 93 '--upload_images', 94 action='store_true', 95 help='If set, images will be uploaded to container registry after building.' 96) 97 98args = argp.parse_args() 99 100 101def add_files_to_image(image, with_files, label=None): 102 """Add files to a docker image. 103 104 image: docker image name, i.e. grpc_interop_java:26328ad8 105 with_files: additional files to include in the docker image. 106 label: label string to attach to the image. 107 """ 108 tag_idx = image.find(':') 109 if tag_idx == -1: 110 jobset.message('FAILED', 111 'invalid docker image %s' % image, 112 do_newline=True) 113 sys.exit(1) 114 orig_tag = '%s_' % image 115 subprocess.check_output(['docker', 'tag', image, orig_tag]) 116 117 lines = ['FROM ' + orig_tag] 118 if label: 119 lines.append('LABEL %s' % label) 120 121 temp_dir = tempfile.mkdtemp() 122 atexit.register(lambda: subprocess.call(['rm', '-rf', temp_dir])) 123 124 # Copy with_files inside the tmp directory, which will be the docker build 125 # context. 126 for f in with_files: 127 shutil.copy(f, temp_dir) 128 lines.append('COPY %s %s/' % (os.path.basename(f), _BUILD_INFO)) 129 130 # Create a Dockerfile. 131 with open(os.path.join(temp_dir, 'Dockerfile'), 'w') as f: 132 f.write('\n'.join(lines)) 133 134 jobset.message('START', 'Repackaging %s' % image, do_newline=True) 135 build_cmd = ['docker', 'build', '--rm', '--tag', image, temp_dir] 136 subprocess.check_output(build_cmd) 137 dockerjob.remove_image(orig_tag, skip_nonexistent=True) 138 139 140def build_image_jobspec(runtime, env, gcr_tag, stack_base): 141 """Build interop docker image for a language with runtime. 142 143 runtime: a <lang><version> string, for example go1.8. 144 env: dictionary of env to passed to the build script. 145 gcr_tag: the tag for the docker image (i.e. v1.3.0). 146 stack_base: the local gRPC repo path. 147 """ 148 basename = 'grpc_interop_%s' % runtime 149 tag = '%s/%s:%s' % (args.gcr_path, basename, gcr_tag) 150 build_env = {'INTEROP_IMAGE': tag, 'BASE_NAME': basename} 151 build_env.update(env) 152 image_builder_path = _IMAGE_BUILDER 153 if client_matrix.should_build_docker_interop_image_from_release_tag(lang): 154 image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER) 155 build_job = jobset.JobSpec(cmdline=[image_builder_path], 156 environ=build_env, 157 shortname='build_docker_%s' % runtime, 158 timeout_seconds=30 * 60) 159 build_job.tag = tag 160 return build_job 161 162 163def build_all_images_for_lang(lang): 164 """Build all docker images for a language across releases and runtimes.""" 165 if not args.git_checkout: 166 if args.release != 'master': 167 print( 168 'Cannot use --release without also enabling --git_checkout.\n') 169 sys.exit(1) 170 releases = [args.release] 171 else: 172 if args.release == 'all': 173 releases = client_matrix.get_release_tags(lang) 174 else: 175 # Build a particular release. 176 if args.release not in ['master' 177 ] + client_matrix.get_release_tags(lang): 178 jobset.message('SKIPPED', 179 '%s for %s is not defined' % 180 (args.release, lang), 181 do_newline=True) 182 return [] 183 releases = [args.release] 184 185 images = [] 186 for release in releases: 187 images += build_all_images_for_release(lang, release) 188 jobset.message('SUCCESS', 189 'All docker images built for %s at %s.' % (lang, releases), 190 do_newline=True) 191 return images 192 193 194def build_all_images_for_release(lang, release): 195 """Build all docker images for a release across all runtimes.""" 196 docker_images = [] 197 build_jobs = [] 198 199 env = {} 200 # If we not using current tree or the sibling for grpc stack, do checkout. 201 stack_base = '' 202 if args.git_checkout: 203 stack_base = checkout_grpc_stack(lang, release) 204 var = { 205 'go': 'GRPC_GO_ROOT', 206 'java': 'GRPC_JAVA_ROOT', 207 'node': 'GRPC_NODE_ROOT' 208 }.get(lang, 'GRPC_ROOT') 209 env[var] = stack_base 210 211 for runtime in client_matrix.get_runtimes_for_lang_release(lang, release): 212 job = build_image_jobspec(runtime, env, release, stack_base) 213 docker_images.append(job.tag) 214 build_jobs.append(job) 215 216 jobset.message('START', 'Building interop docker images.', do_newline=True) 217 print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs)) 218 219 num_failures, _ = jobset.run(build_jobs, 220 newline_on_success=True, 221 maxjobs=multiprocessing.cpu_count()) 222 if num_failures: 223 jobset.message('FAILED', 224 'Failed to build interop docker images.', 225 do_newline=True) 226 docker_images_cleanup.extend(docker_images) 227 sys.exit(1) 228 229 jobset.message('SUCCESS', 230 'All docker images built for %s at %s.' % (lang, release), 231 do_newline=True) 232 233 if release != 'master': 234 commit_log = os.path.join(stack_base, 'commit_log') 235 if os.path.exists(commit_log): 236 for image in docker_images: 237 add_files_to_image(image, [commit_log], 'release=%s' % release) 238 return docker_images 239 240 241def cleanup(): 242 if not args.keep: 243 for image in docker_images_cleanup: 244 dockerjob.remove_image(image, skip_nonexistent=True) 245 246 247docker_images_cleanup = [] 248atexit.register(cleanup) 249 250 251def maybe_apply_patches_on_git_tag(stack_base, lang, release): 252 files_to_patch = [] 253 254 release_info = client_matrix.LANG_RELEASE_MATRIX[lang].get(release) 255 if release_info: 256 files_to_patch = release_info.patch 257 if not files_to_patch: 258 return 259 patch_file_relative_path = 'patches/%s_%s/git_repo.patch' % (lang, release) 260 patch_file = os.path.abspath( 261 os.path.join(os.path.dirname(__file__), patch_file_relative_path)) 262 if not os.path.exists(patch_file): 263 jobset.message('FAILED', 264 'expected patch file |%s| to exist' % patch_file) 265 sys.exit(1) 266 subprocess.check_output(['git', 'apply', patch_file], 267 cwd=stack_base, 268 stderr=subprocess.STDOUT) 269 270 # TODO(jtattermusch): this really would need simplification and refactoring 271 # - "git add" and "git commit" can easily be done in a single command 272 # - it looks like the only reason for the existence of the "files_to_patch" 273 # entry is to perform "git add" - which is clumsy and fragile. 274 # - we only allow a single patch with name "git_repo.patch". A better design 275 # would be to allow multiple patches that can have more descriptive names. 276 for repo_relative_path in files_to_patch: 277 subprocess.check_output(['git', 'add', repo_relative_path], 278 cwd=stack_base, 279 stderr=subprocess.STDOUT) 280 subprocess.check_output([ 281 'git', 'commit', '-m', 282 ('Hack performed on top of %s git ' 283 'tag in order to build and run the %s ' 284 'interop tests on that tag.' % (lang, release)) 285 ], 286 cwd=stack_base, 287 stderr=subprocess.STDOUT) 288 289 290def checkout_grpc_stack(lang, release): 291 """Invokes 'git check' for the lang/release and returns directory created.""" 292 assert args.git_checkout and args.git_checkout_root 293 294 if not os.path.exists(args.git_checkout_root): 295 os.makedirs(args.git_checkout_root) 296 297 repo = client_matrix.get_github_repo(lang) 298 # Get the subdir name part of repo 299 # For example, '[email protected]:grpc/grpc-go.git' should use 'grpc-go'. 300 repo_dir = os.path.splitext(os.path.basename(repo))[0] 301 stack_base = os.path.join(args.git_checkout_root, repo_dir) 302 303 # Clean up leftover repo dir if necessary. 304 if not args.reuse_git_root and os.path.exists(stack_base): 305 jobset.message('START', 'Removing git checkout root.', do_newline=True) 306 shutil.rmtree(stack_base) 307 308 if not os.path.exists(stack_base): 309 subprocess.check_call(['git', 'clone', '--recursive', repo], 310 cwd=os.path.dirname(stack_base)) 311 312 # git checkout. 313 jobset.message('START', 314 'git checkout %s from %s' % (release, stack_base), 315 do_newline=True) 316 # We should NEVER do checkout on current tree !!! 317 assert not os.path.dirname(__file__).startswith(stack_base) 318 output = subprocess.check_output(['git', 'checkout', release], 319 cwd=stack_base, 320 stderr=subprocess.STDOUT) 321 maybe_apply_patches_on_git_tag(stack_base, lang, release) 322 commit_log = subprocess.check_output(['git', 'log', '-1'], cwd=stack_base) 323 jobset.message('SUCCESS', 324 'git checkout', 325 '%s: %s' % (str(output), commit_log), 326 do_newline=True) 327 328 # git submodule update 329 jobset.message('START', 330 'git submodule update --init at %s from %s' % 331 (release, stack_base), 332 do_newline=True) 333 subprocess.check_call(['git', 'submodule', 'update', '--init'], 334 cwd=stack_base, 335 stderr=subprocess.STDOUT) 336 jobset.message('SUCCESS', 337 'git submodule update --init', 338 '%s: %s' % (str(output), commit_log), 339 do_newline=True) 340 341 # Write git log to commit_log so it can be packaged with the docker image. 342 with open(os.path.join(stack_base, 'commit_log'), 'wb') as f: 343 f.write(commit_log) 344 return stack_base 345 346 347languages = args.language if args.language != ['all'] else _LANGUAGES 348for lang in languages: 349 docker_images = build_all_images_for_lang(lang) 350 for image in docker_images: 351 if args.upload_images: 352 jobset.message('START', 'Uploading %s' % image, do_newline=True) 353 # docker image name must be in the format <gcr_path>/<image>:<gcr_tag> 354 assert image.startswith(args.gcr_path) and image.find(':') != -1 355 subprocess.call(['gcloud', 'docker', '--', 'push', image]) 356 else: 357 # Uploading (and overwriting images) by default can easily break things. 358 print( 359 'Not uploading image %s, run with --upload_images to upload.' % 360 image) 361