xref: /aosp_15_r20/cts/apps/CameraITS/tests/scene4/test_multi_camera_alignment.py (revision b7c941bb3fa97aba169d73cee0bed2de8ac964bf)
1# Copyright 2018 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Verify multi-camera alignment using internal parameters."""
15
16
17import logging
18import math
19import os.path
20
21import cv2
22from mobly import test_runner
23import numpy as np
24
25import its_base_test
26import camera_properties_utils
27import capture_request_utils
28import image_processing_utils
29import its_session_utils
30import opencv_processing_utils
31
32_ALIGN_ATOL_MM = 5.0  # mm
33_ALIGN_RTOL = 0.01  # 1% of sensor diagonal in pixels
34_CHART_DISTANCE_RTOL = 0.1
35_CIRCLE_COLOR = 0  # [0: black, 255: white]
36_CIRCLE_MIN_AREA = 0.005  # multiplied by image size
37_CIRCLE_RTOL = 0.1  # 10%
38_CM_TO_M = 1E-2
39_M_TO_MM = 1E3
40_MM_TO_UM = 1E3
41_NAME = os.path.splitext(os.path.basename(__file__))[0]
42_REFERENCE_GYRO = 1
43_REFERENCE_UNDEFINED = 2
44_TEST_REQUIRED_MPC = 33
45_TRANS_MATRIX_REF = np.array([0, 0, 0])  # translation matrix for ref cam is 000
46
47
48def convert_cap_and_prep_img(cap, props, fmt, img_name):
49  """Convert the capture to an RGB image and prep image.
50
51  Args:
52    cap: capture element
53    props: dict of capture properties
54    fmt: capture format ('raw' or 'yuv')
55    img_name: name to save image as
56
57  Returns:
58    img uint8 numpy array
59  """
60
61  img = image_processing_utils.convert_capture_to_rgb_image(cap, props=props)
62
63  # save image
64  image_processing_utils.write_image(img, img_name)
65
66  # convert [0, 1] image to [0, 255] and cast as uint8
67  img = image_processing_utils.convert_image_to_uint8(img)
68
69  # scale to match calibration data if RAW
70  if fmt == 'raw':
71    img = cv2.resize(img, None, fx=2, fy=2)
72
73  return img
74
75
76def calc_pixel_size(props):
77  ar = props['android.sensor.info.pixelArraySize']
78  sensor_size = props['android.sensor.info.physicalSize']
79  pixel_size_w = sensor_size['width'] / ar['width']
80  pixel_size_h = sensor_size['height'] / ar['height']
81  logging.debug('pixel size(um): %.2f x %.2f',
82                pixel_size_w * _MM_TO_UM, pixel_size_h * _MM_TO_UM)
83  return (pixel_size_w + pixel_size_h) / 2 * _MM_TO_UM
84
85
86def select_ids_to_test(ids, props, chart_distance):
87  """Determine the best 2 cameras to test for the rig used.
88
89  Cameras are pre-filtered to only include supportable cameras.
90  Supportable cameras are: YUV(RGB), RAW(Bayer)
91
92  Args:
93    ids: unicode string; physical camera ids
94    props: dict; physical camera properties dictionary
95    chart_distance: float; distance to chart in meters
96  Returns:
97    test_ids to be tested
98  """
99  chart_distance = abs(chart_distance)*100  # convert M to CM
100  test_ids = []
101  for i in ids:
102    sensor_size = props[i]['android.sensor.info.physicalSize']
103    focal_l = props[i]['android.lens.info.availableFocalLengths'][0]
104    diag = math.sqrt(sensor_size['height'] ** 2 + sensor_size['width'] ** 2)
105    fov = round(2 * math.degrees(math.atan(diag / (2 * focal_l))), 2)
106    logging.debug('Camera: %s, FoV: %.2f, chart_distance: %.1fcm', i, fov,
107                  chart_distance)
108    # determine best combo with rig used or recommend different rig
109    if (opencv_processing_utils.FOV_THRESH_TELE < fov <
110        opencv_processing_utils.FOV_THRESH_UW):
111      test_ids.append(i)  # RFoV camera
112    elif fov < opencv_processing_utils.FOV_THRESH_TELE40:
113      logging.debug(
114          'Skipping camera %s. FoV < %s is too small for testing.',
115          i, opencv_processing_utils.FOV_THRESH_TELE40)
116      continue  # super-TELE camera
117    elif (fov <= opencv_processing_utils.FOV_THRESH_TELE and
118          math.isclose(chart_distance,
119                       opencv_processing_utils.CHART_DISTANCE_31CM,
120                       rel_tol=_CHART_DISTANCE_RTOL)):
121      test_ids.append(i)  # TELE camera in RFoV rig
122    elif (fov >= opencv_processing_utils.FOV_THRESH_UW and
123          math.isclose(chart_distance,
124                       opencv_processing_utils.CHART_DISTANCE_22CM,
125                       rel_tol=_CHART_DISTANCE_RTOL)):
126      test_ids.append(i)  # WFoV camera in WFoV rig
127    else:
128      logging.debug('Skipping camera %s. Not appropriate for test rig.', i)
129
130  if len(test_ids) < 2:
131    raise AssertionError('Error: started with 2+ cameras, reduced to <2 based '
132                         f'on FoVs. Wrong test rig? test_ids: {test_ids}')
133  return test_ids[0:2]
134
135
136def determine_valid_out_surfaces(cam, props, fmt, cap_camera_ids, sizes):
137  """Determine a valid output surfaces for captures.
138
139  Args:
140    cam:                obj; camera object
141    props:              dict; props for the physical cameras
142    fmt:                str; capture format ('yuv' or 'raw')
143    cap_camera_ids:     list; camera capture ids
144    sizes:              dict; valid physical sizes for the cap_camera_ids
145
146  Returns:
147    valid out_surfaces
148  """
149  valid_stream_combo = False
150
151  # try simultaneous capture
152  w, h = capture_request_utils.get_available_output_sizes('yuv', props)[0]
153  out_surfaces = [{'format': 'yuv', 'width': w, 'height': h},
154                  {'format': fmt, 'physicalCamera': cap_camera_ids[0],
155                   'width': sizes[cap_camera_ids[0]][0],
156                   'height': sizes[cap_camera_ids[0]][1]},
157                  {'format': fmt, 'physicalCamera': cap_camera_ids[1],
158                   'width': sizes[cap_camera_ids[1]][0],
159                   'height': sizes[cap_camera_ids[1]][1]},]
160  valid_stream_combo = cam.is_stream_combination_supported(out_surfaces)
161
162  # try each camera individually
163  if not valid_stream_combo:
164    out_surfaces = []
165    for cap_id in cap_camera_ids:
166      out_surface = {'format': fmt, 'physicalCamera': cap_id,
167                     'width': sizes[cap_id][0],
168                     'height': sizes[cap_id][1]}
169      valid_stream_combo = cam.is_stream_combination_supported(out_surface)
170      if valid_stream_combo:
171        out_surfaces.append(out_surface)
172      else:
173        camera_properties_utils.skip_unless(valid_stream_combo)
174
175  return out_surfaces
176
177
178def take_images(cam, caps, props, fmt, cap_camera_ids, out_surfaces):
179  """Capture images.
180
181  Args:
182    cam: obj; camera object
183    caps: dict; capture results indexed by (fmt, id)
184    props: dict; props for the physical cameras
185    fmt: str; capture format ('yuv' or 'raw')
186    cap_camera_ids: list; camera capture ids
187    out_surfaces: list; valid output surfaces for caps
188
189  Returns:
190    caps: dict; capture information indexed by (fmt, cap_id)
191  """
192
193  logging.debug('out_surfaces: %s', str(out_surfaces))
194  if len(out_surfaces) == 3:  # do simultaneous capture
195    # Do 3A without getting the values
196    cam.do_3a(lock_ae=True, lock_awb=True)
197    req = capture_request_utils.auto_capture_request(props=props, do_af=True)
198    _, caps[(fmt,
199             cap_camera_ids[0])], caps[(fmt,
200                                        cap_camera_ids[1])] = cam.do_capture(
201                                            req, out_surfaces)
202
203  else:  # step through cameras individually
204    for i, out_surface in enumerate(out_surfaces):
205      # Do 3A without getting the values
206      cam.do_3a(lock_ae=True, lock_awb=True)
207      req = capture_request_utils.auto_capture_request(props=props, do_af=True)
208      caps[(fmt, cap_camera_ids[i])] = cam.do_capture(req, out_surface)
209
210  return caps
211
212
213def undo_zoom(cap, circle):
214  """Correct coordinates and size of circle for zoom.
215
216  Assume that the maximum physical YUV image size is close to active array size.
217
218  Args:
219    cap: camera capture element
220    circle: dict of circle values
221  Returns:
222    unzoomed circle dict
223  """
224  yuv_w = cap['width']
225  yuv_h = cap['height']
226  logging.debug('cap size: %d x %d', yuv_w, yuv_h)
227  cr = cap['metadata']['android.scaler.cropRegion']
228  cr_w = cr['right'] - cr['left']
229  cr_h = cr['bottom'] - cr['top']
230
231  # Offset due to aspect ratio difference of crop region and yuv
232  # - fit yuv box inside of differently shaped cr box
233  yuv_aspect = yuv_w / yuv_h
234  relative_aspect = yuv_aspect / (cr_w/cr_h)
235  if relative_aspect > 1:
236    zoom_ratio = yuv_w / cr_w
237    yuv_x = 0
238    yuv_y = (cr_h - cr_w / yuv_aspect) / 2
239  else:
240    zoom_ratio = yuv_h / cr_h
241    yuv_x = (cr_w - cr_h * yuv_aspect) / 2
242    yuv_y = 0
243
244  circle['x'] = cr['left'] + yuv_x + circle['x'] / zoom_ratio
245  circle['y'] = cr['top'] + yuv_y + circle['y'] / zoom_ratio
246  circle['r'] = circle['r'] / zoom_ratio
247
248  logging.debug(' Calculated zoom ratio: %.3f', zoom_ratio)
249  logging.debug(' Corrected circle X: %.2f', circle['x'])
250  logging.debug(' Corrected circle Y: %.2f', circle['y'])
251  logging.debug(' Corrected circle radius: %.2f', circle['r'])
252
253  return circle
254
255
256def convert_to_world_coordinates(x, y, r, t, k, z_w):
257  """Convert x,y coordinates to world coordinates.
258
259  Conversion equation is:
260  A = [[x*r[2][0] - dot(k_row0, r_col0), x*r_[2][1] - dot(k_row0, r_col1)],
261       [y*r[2][0] - dot(k_row1, r_col0), y*r_[2][1] - dot(k_row1, r_col1)]]
262  b = [[z_w*dot(k_row0, r_col2) + dot(k_row0, t) - x*(r[2][2]*z_w + t[2])],
263       [z_w*dot(k_row1, r_col2) + dot(k_row1, t) - y*(r[2][2]*z_w + t[2])]]
264
265  [[x_w], [y_w]] = inv(A) * b
266
267  Args:
268    x: x location in pixel space
269    y: y location in pixel space
270    r: rotation matrix
271    t: translation matrix
272    k: intrinsic matrix
273    z_w: z distance in world space
274
275  Returns:
276    x_w:    x in meters in world space
277    y_w:    y in meters in world space
278  """
279  c_1 = r[2, 2] * z_w + t[2]
280  k_x1 = np.dot(k[0, :], r[:, 0])
281  k_x2 = np.dot(k[0, :], r[:, 1])
282  k_x3 = z_w * np.dot(k[0, :], r[:, 2]) + np.dot(k[0, :], t)
283  k_y1 = np.dot(k[1, :], r[:, 0])
284  k_y2 = np.dot(k[1, :], r[:, 1])
285  k_y3 = z_w * np.dot(k[1, :], r[:, 2]) + np.dot(k[1, :], t)
286
287  a = np.array([[x*r[2][0]-k_x1, x*r[2][1]-k_x2],
288                [y*r[2][0]-k_y1, y*r[2][1]-k_y2]])
289  b = np.array([[k_x3-x*c_1], [k_y3-y*c_1]])
290  return (float(x.item()) for x in np.dot(np.linalg.inv(a), b))
291
292
293def convert_to_image_coordinates(p_w, r, t, k):
294  p_c = np.dot(r, p_w) + t
295  p_h = np.dot(k, p_c)
296  return p_h[0] / p_h[2], p_h[1] / p_h[2]
297
298
299def define_reference_camera(pose_reference, cam_reference):
300  """Determine the reference camera.
301
302  Args:
303    pose_reference: 0 for cameras, 1 for gyro
304    cam_reference: dict with key of physical camera and value True/False
305  Returns:
306    i_ref: physical id of reference camera
307    i_2nd: physical id of secondary camera
308  """
309
310  if pose_reference == _REFERENCE_GYRO:
311    logging.debug('pose_reference is GYRO')
312    keys = list(cam_reference.keys())
313    i_ref = keys[0]  # pick first camera as ref
314    i_2nd = keys[1]
315  else:
316    logging.debug('pose_reference is CAMERA')
317    i_refs = [k for (k, v) in cam_reference.items() if v]
318    i_ref = i_refs[0]
319    if len(i_refs) > 1:
320      logging.debug('Warning: more than 1 reference camera. Check translation '
321                    'matrices. cam_reference: %s', str(cam_reference))
322      i_2nd = i_refs[1]  # use second camera since both at same location
323    else:
324      i_2nd = next(k for (k, v) in cam_reference.items() if not v)
325  return i_ref, i_2nd
326
327
328class MultiCameraAlignmentTest(its_base_test.ItsBaseTest):
329
330  """Test the multi camera system parameters related to camera spacing.
331
332  Using the multi-camera physical cameras, take a picture of scene4
333  (a black circle and surrounding square on a white background) with
334  one of the physical cameras. Then find the circle center and radius. Using
335  the parameters:
336      android.lens.poseReference
337      android.lens.poseTranslation
338      android.lens.poseRotation
339      android.lens.instrinsicCalibration
340      android.lens.distortion (if available)
341  project the circle center to the world coordinates for each camera.
342  Compare the difference between the two cameras' circle centers in
343  world coordinates.
344
345  Reproject the world coordinates back to pixel coordinates and compare
346  against originals as a correctness check.
347
348  Compare the circle sizes if the focal lengths of the cameras are
349  different using
350      android.lens.availableFocalLengths.
351  """
352
353  def test_multi_camera_alignment(self):
354    # capture images
355    with its_session_utils.ItsSession(
356        device_id=self.dut.serial,
357        camera_id=self.camera_id,
358        hidden_physical_id=self.hidden_physical_id) as cam:
359      props = cam.get_camera_properties()
360      name_with_log_path = os.path.join(self.log_path, _NAME)
361      chart_distance = self.chart_distance * _CM_TO_M
362
363      # check media performance class
364      should_run = (camera_properties_utils.read_3a(props) and
365                    camera_properties_utils.per_frame_control(props) and
366                    camera_properties_utils.logical_multi_camera(props) and
367                    camera_properties_utils.backward_compatible(props))
368      media_performance_class = its_session_utils.get_media_performance_class(
369          self.dut.serial)
370      cameras_facing_same_direction = cam.get_facing_to_ids().get(
371          props['android.lens.facing'], [])
372      has_multiple_same_facing_cameras = len(cameras_facing_same_direction) > 1
373
374      if (media_performance_class >= _TEST_REQUIRED_MPC and
375          not should_run and
376          cam.is_primary_camera() and
377          has_multiple_same_facing_cameras and
378          (props['android.lens.facing'] ==
379           camera_properties_utils.LENS_FACING['BACK'])
380          ):
381        logging.error('Found multiple camera IDs %s facing in the same '
382                      'direction as primary camera %s.',
383                      cameras_facing_same_direction, self.camera_id)
384        its_session_utils.raise_mpc_assertion_error(
385            _TEST_REQUIRED_MPC, _NAME, media_performance_class)
386
387      # check SKIP conditions
388      camera_properties_utils.skip_unless(should_run)
389
390      # load chart for scene
391      its_session_utils.load_scene(
392          cam, props, self.scene, self.tablet, self.chart_distance)
393
394      pose_reference = props['android.lens.poseReference']
395
396      # Convert chart_distance for lens facing back
397      if (props['android.lens.facing'] ==
398          camera_properties_utils.LENS_FACING['BACK']):
399        # API spec defines +z is pointing out from screen
400        logging.debug('lens facing BACK')
401        chart_distance *= -1
402
403      # find physical camera IDs
404      ids = camera_properties_utils.logical_multi_camera_physical_ids(props)
405      physical_props = {}
406      physical_ids = []
407      physical_raw_ids = []
408      for i in ids:
409        physical_props[i] = cam.get_camera_properties_by_id(i)
410        if physical_props[i][
411            'android.lens.poseReference'] == _REFERENCE_UNDEFINED:
412          continue
413        # find YUV+RGB capable physical cameras
414        if (camera_properties_utils.backward_compatible(physical_props[i]) and
415            not camera_properties_utils.mono_camera(physical_props[i])):
416          physical_ids.append(i)
417        # find RAW+RGB capable physical cameras
418        if (camera_properties_utils.backward_compatible(physical_props[i]) and
419            not camera_properties_utils.mono_camera(physical_props[i]) and
420            camera_properties_utils.raw16(physical_props[i])):
421          physical_raw_ids.append(i)
422
423      # determine formats and select cameras
424      fmts = ['yuv']
425      if len(physical_raw_ids) >= 2:
426        fmts.insert(0, 'raw')  # add RAW to analysis if enough cameras
427        logging.debug('Selecting RAW+RGB supported cameras')
428        physical_raw_ids = select_ids_to_test(physical_raw_ids, physical_props,
429                                              chart_distance)
430      logging.debug('Selecting YUV+RGB cameras')
431      camera_properties_utils.skip_unless(len(physical_ids) >= 2)
432      physical_ids = select_ids_to_test(physical_ids, physical_props,
433                                        chart_distance)
434
435      # do captures for valid formats
436      caps = {}
437      for i, fmt in enumerate(fmts):
438        physical_sizes = {}
439        capture_cam_ids = physical_ids
440        fmt_code = capture_request_utils.FMT_CODE_YUV
441        if fmt == 'raw':
442          capture_cam_ids = physical_raw_ids
443          fmt_code = capture_request_utils.FMT_CODE_RAW
444        for physical_id in capture_cam_ids:
445          configs = physical_props[physical_id][
446              'android.scaler.streamConfigurationMap'][
447                  'availableStreamConfigurations']
448          fmt_configs = [cfg for cfg in configs if cfg['format'] == fmt_code]
449          out_configs = [cfg for cfg in fmt_configs if not cfg['input']]
450          out_sizes = [(cfg['width'], cfg['height']) for cfg in out_configs]
451          physical_sizes[physical_id] = max(out_sizes, key=lambda item: item[1])
452
453        out_surfaces = determine_valid_out_surfaces(
454            cam, props, fmt, capture_cam_ids, physical_sizes)
455        caps = take_images(
456            cam, caps, physical_props, fmt, capture_cam_ids, out_surfaces)
457
458    # process images for correctness
459    for j, fmt in enumerate(fmts):
460      size = {}
461      k = {}
462      cam_reference = {}
463      r = {}
464      t = {}
465      circle = {}
466      fl = {}
467      sensor_diag = {}
468      pixel_sizes = {}
469      capture_cam_ids = physical_ids
470      if fmt == 'raw':
471        capture_cam_ids = physical_raw_ids
472      logging.debug('Format: %s', str(fmt))
473      for i in capture_cam_ids:
474        # convert cap and prep image
475        img_name = f'{name_with_log_path}_{fmt}_{i}.jpg'
476        img = convert_cap_and_prep_img(
477            caps[(fmt, i)], physical_props[i], fmt, img_name)
478        size[i] = (caps[fmt, i]['width'], caps[fmt, i]['height'])
479
480        # load parameters for each physical camera
481        if j == 0:
482          logging.debug('Camera %s', i)
483        k[i] = camera_properties_utils.get_intrinsic_calibration(
484            physical_props[i], caps[fmt, i]['metadata'], j == 0)
485        r[i] = camera_properties_utils.get_rotation_matrix(
486            physical_props[i], j == 0)
487        t[i] = camera_properties_utils.get_translation_matrix(
488            physical_props[i], j == 0)
489
490        # API spec defines poseTranslation as the world coordinate p_w_cam of
491        # optics center. When applying [R|t] to go from world coordinates to
492        # camera coordinates, we need -R*p_w_cam of the coordinate reported in
493        # metadata.
494        # ie. for a camera with optical center at world coordinate (5, 4, 3)
495        # and identity rotation, to convert a world coordinate into the
496        # camera's coordinate, we need a translation vector of [-5, -4, -3]
497        # so that: [I|[-5, -4, -3]^T] * [5, 4, 3]^T = [0,0,0]^T
498        t[i] = -1.0 * np.dot(r[i], t[i])
499
500        if (t[i] == _TRANS_MATRIX_REF).all():
501          cam_reference[i] = True
502        else:
503          cam_reference[i] = False
504
505        # Correct lens distortion to image (if available) and save before/after
506        if (camera_properties_utils.distortion_correction(physical_props[i]) and
507            caps[fmt, i]['metadata'] and
508            fmt == 'raw'):
509          cv2_distort = camera_properties_utils.get_distortion_matrix(
510              physical_props[i])
511          if cv2_distort is None:
512            raise AssertionError(f'Camera {i} has no distortion matrix!')
513          if not np.any(cv2_distort):
514            logging.warning('Camera %s has distortion matrix of all zeroes', i)
515          image_processing_utils.write_image(
516              img/255, f'{name_with_log_path}_{fmt}_{i}.jpg')
517          img = cv2.undistort(img, k[i], cv2_distort)
518          image_processing_utils.write_image(
519              img/255, f'{name_with_log_path}_{fmt}_correct_{i}.jpg')
520
521        # Find the circles in grayscale image
522        circle[i] = opencv_processing_utils.find_circle(
523            img, f'{name_with_log_path}_{fmt}_gray_{i}.jpg',
524            _CIRCLE_MIN_AREA, _CIRCLE_COLOR)
525        logging.debug('Circle radius %s: %.2f', format(i), circle[i]['r'])
526
527        # Undo zoom to image (if applicable).
528        if fmt == 'yuv':
529          circle[i] = undo_zoom(caps[(fmt, i)], circle[i])
530
531        # Find focal length, pixel & sensor size
532        fl[i] = physical_props[i]['android.lens.info.availableFocalLengths'][0]
533        pixel_sizes[i] = calc_pixel_size(physical_props[i])
534        sensor_diag[i] = math.sqrt(size[i][0] ** 2 + size[i][1] ** 2)
535
536      i_ref, i_2nd = define_reference_camera(pose_reference, cam_reference)
537      logging.debug('reference camera: %s, secondary camera: %s', i_ref, i_2nd)
538
539      # Convert circle centers to real world coordinates
540      x_w = {}
541      y_w = {}
542      for i in [i_ref, i_2nd]:
543        x_w[i], y_w[i] = convert_to_world_coordinates(
544            circle[i]['x'], circle[i]['y'], r[i], t[i], k[i], chart_distance)
545
546      # Back convert to image coordinates for correctness check
547      x_p = {}
548      y_p = {}
549      x_p[i_2nd], y_p[i_2nd] = convert_to_image_coordinates(
550          [x_w[i_ref], y_w[i_ref], chart_distance], r[i_2nd], t[i_2nd],
551          k[i_2nd])
552      x_p[i_ref], y_p[i_ref] = convert_to_image_coordinates(
553          [x_w[i_2nd], y_w[i_2nd], chart_distance], r[i_ref], t[i_ref],
554          k[i_ref])
555
556      # Summarize results
557      for i in [i_ref, i_2nd]:
558        logging.debug(' Camera: %s', i)
559        logging.debug(' x, y (pixels): %.1f, %.1f', circle[i]['x'],
560                      circle[i]['y'])
561        logging.debug(' x_w, y_w (mm): %.2f, %.2f', x_w[i] * 1.0E3,
562                      y_w[i] * 1.0E3)
563        logging.debug(' x_p, y_p (pixels): %.1f, %.1f', x_p[i], y_p[i])
564
565      # Check center locations
566      err_mm = np.linalg.norm(np.array([x_w[i_ref], y_w[i_ref]]) -
567                              np.array([x_w[i_2nd], y_w[i_2nd]])) * _M_TO_MM
568      logging.debug('Center location err (mm): %.2f ATOL: %.2f',
569                    err_mm, _ALIGN_ATOL_MM)
570      if err_mm > _ALIGN_ATOL_MM:
571        raise AssertionError(
572            f'Centers {i_ref} <-> {i_2nd} too different! '
573            f'val={err_mm:.2f}, ATOL={_ALIGN_ATOL_MM} mm')
574
575      # Check projections back into pixel space
576      for i in [i_ref, i_2nd]:
577        err = np.linalg.norm(np.array([circle[i]['x'], circle[i]['y']]) -
578                             np.array([x_p[i], y_p[i]]).reshape(1, -1))
579        logging.debug('Camera %s projection error (pixels): %.1f', i, err)
580        tol = _ALIGN_RTOL * sensor_diag[i]
581        if err >= tol:
582          raise AssertionError(f'Camera {i} project location too different! '
583                               f'diff={err:.2f}, ATOL={tol:.2f} pixels')
584
585      # Check focal length and circle size if more than 1 focal length
586      if len(fl) > 1:
587        logging.debug('Circle radii (pixels); ref: %.1f, 2nd: %.1f',
588                      circle[i_ref]['r'], circle[i_2nd]['r'])
589        logging.debug('Focal lengths (diopters); ref: %.2f, 2nd: %.2f',
590                      fl[i_ref], fl[i_2nd])
591        logging.debug('Pixel size (um); ref: %.2f, 2nd: %.2f',
592                      pixel_sizes[i_ref], pixel_sizes[i_2nd])
593        if not math.isclose(circle[i_ref]['r']*pixel_sizes[i_ref]/fl[i_ref],
594                            circle[i_2nd]['r']*pixel_sizes[i_2nd]/fl[i_2nd],
595                            rel_tol=_CIRCLE_RTOL):
596          raise AssertionError(
597              f'Circle size scales improperly! RTOL: {_CIRCLE_RTOL} '
598              'Metric: radius*pixel_size/focal_length should be equal.')
599
600if __name__ == '__main__':
601  test_runner.main()
602