xref: /aosp_15_r20/cts/apps/CameraITS/tests/scene7/test_multi_camera_switch.py (revision b7c941bb3fa97aba169d73cee0bed2de8ac964bf)
1# Copyright 2024 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Verify that the switch from UW to W has similar RGB values."""
15
16
17import logging
18import math
19import os.path
20import pathlib
21
22import cv2
23from mobly import test_runner
24import numpy as np
25
26import its_base_test
27import camera_properties_utils
28import image_processing_utils
29import its_session_utils
30import opencv_processing_utils
31import preview_processing_utils
32
33
34_AE_ATOL = 7.0
35_AE_RTOL = 0.04  # 4%
36_ARUCO_MARKERS_COUNT = 4
37_AWB_ATOL_AB = 10  # ATOL for A and B means in LAB color space
38_AWB_ATOL_L = 3  # ATOL for L means in LAB color space
39_CH_FULL_SCALE = 255
40_COLORS = ('r', 'g', 'b', 'gray')
41_COLOR_GRAY = _COLORS[3]
42_CONVERGED_STATE = 2
43_IMG_FORMAT = 'png'
44_MP4_FORMAT = '.mp4'
45_NAME = os.path.splitext(os.path.basename(__file__))[0]
46_PATCH_MARGIN = 50  # pixels
47_RECORDING_DURATION = 400  # milliseconds
48_SENSOR_ORIENTATIONS = (90, 270)
49_SKIP_INITIAL_FRAMES = 15
50_ZOOM_RANGE_UW_W = (0.95, 2.05)  # UW/W crossover range
51_ZOOM_STEP = 0.01
52
53
54def _get_error_msg(failed_awb_msg, failed_ae_msg, failed_af_msg):
55  """"Returns the error message string.
56
57  Args:
58    failed_awb_msg: list of awb error msgs
59    failed_ae_msg: list of ae error msgs
60    failed_af_msg: list of af error msgs
61  Returns:
62    error_msg: str; error_msg string
63  """
64  error_msg = ''
65  if failed_awb_msg:
66    error_msg = f'{error_msg}----AWB Check----\n'
67    for msg in failed_awb_msg:
68      error_msg = f'{error_msg}{msg}\n'
69  if failed_ae_msg:
70    error_msg = f'{error_msg}----AE Check----\n'
71    for msg in failed_ae_msg:
72      error_msg = f'{error_msg}{msg}\n'
73  if failed_af_msg:
74    error_msg = f'{error_msg}----AF Check----\n'
75    for msg in failed_af_msg:
76      error_msg = f'{error_msg}{msg}\n'
77  return error_msg
78
79
80def _check_orientation_and_flip(props, uw_img, w_img, img_name_stem):
81  """Checks the sensor orientation and flips image.
82
83  The preview stream captures are flipped based on the sensor
84  orientation while using the front camera. In such cases, check the
85  sensor orientation and flip the image if needed.
86
87  Args:
88    props: camera properties object.
89    uw_img: image captured using UW lens.
90    w_img: image captured using W lens.
91    img_name_stem: prefix for the img name to be saved
92
93  Returns:
94    numpy array of uw_img and w_img.
95  """
96  uw_img = (
97      preview_processing_utils.mirror_preview_image_by_sensor_orientation(
98          props['android.sensor.orientation'], uw_img))
99  w_img = (
100      preview_processing_utils.mirror_preview_image_by_sensor_orientation(
101          props['android.sensor.orientation'], w_img))
102  uw_img_name = f'{img_name_stem}_uw.png'
103  w_img_name = f'{img_name_stem}_w.png'
104  image_processing_utils.write_image(uw_img / _CH_FULL_SCALE, uw_img_name)
105  image_processing_utils.write_image(w_img / _CH_FULL_SCALE, w_img_name)
106  return uw_img, w_img
107
108
109def _do_ae_check(uw_img, w_img, log_path, suffix):
110  """Checks that the luma change is within range.
111
112  Args:
113    uw_img: image captured using UW lens.
114    w_img: image captured using W lens.
115    log_path: path to save the image.
116    suffix: str; patch suffix to be used in file name.
117  Returns:
118    failed_ae_msg: Failed AE check messages if any. None otherwise.
119    uw_y_avg: y_avg value for UW lens
120    w_y_avg: y_avg value for W lens
121  """
122  failed_ae_msg = []
123  file_stem = f'{os.path.join(log_path, _NAME)}_{suffix}'
124  uw_y = _extract_y(
125      uw_img, f'{file_stem}_uw_y.png')
126  uw_y_avg = np.average(uw_y)
127  logging.debug('UW y_avg: %.4f', uw_y_avg)
128
129  w_y = _extract_y(w_img, f'{file_stem}_w_y.png')
130  w_y_avg = np.average(w_y)
131  logging.debug('W y_avg: %.4f', w_y_avg)
132
133  y_avg_change_percent = (abs(w_y_avg-uw_y_avg)/uw_y_avg)*100
134  logging.debug('y_avg_change_percent: %.4f', y_avg_change_percent)
135
136  if not math.isclose(uw_y_avg, w_y_avg, rel_tol=_AE_RTOL, abs_tol=_AE_ATOL):
137    failed_ae_msg.append('y_avg change is greater than threshold value for '
138                         f'patch: {suffix} '
139                         f'diff: {abs(w_y_avg-uw_y_avg):.4f} '
140                         f'ATOL: {_AE_ATOL} '
141                         f'RTOL: {_AE_RTOL} '
142                         f'uw_y_avg: {uw_y_avg:.4f} '
143                         f'w_y_avg: {w_y_avg:.4f} ')
144  return failed_ae_msg, uw_y_avg, w_y_avg
145
146
147def _do_af_check(uw_img, w_img):
148  """Checks the AF behavior between the uw and w img.
149
150  Args:
151    uw_img: image captured using UW lens.
152    w_img: image captured using W lens.
153
154  Returns:
155    failed_af_msg: Failed AF check messages if any. None otherwise.
156    sharpness_uw: sharpness value for UW lens
157    sharpness_w: sharpness value for W lens
158  """
159  failed_af_msg = []
160  sharpness_uw = image_processing_utils.compute_image_sharpness(uw_img)
161  logging.debug('Sharpness for UW patch: %.2f', sharpness_uw)
162  sharpness_w = image_processing_utils.compute_image_sharpness(w_img)
163  logging.debug('Sharpness for W patch: %.2f', sharpness_w)
164
165  if sharpness_w < sharpness_uw:
166    failed_af_msg.append('Sharpness should be higher for W lens.'
167                         f'sharpness_w: {sharpness_w:.4f} '
168                         f'sharpness_uw: {sharpness_uw:.4f}')
169  return failed_af_msg, sharpness_uw, sharpness_w
170
171
172def _do_awb_check(uw_img, w_img, cab_atol, patch_color):
173  """Checks the delta Cab for UW and W img.
174
175  Args:
176    uw_img: image captured using UW lens.
177    w_img: image captured using W lens.
178    cab_atol: float; threshold to use for delta Cab.
179    patch_color: str; color of the patch to be tested.
180  Returns:
181    failed_awb_msg: Failed AWB check messages if any. None otherwise.
182  """
183  failed_awb_msg = []
184  uw_l, uw_a, uw_b = _get_lab_means(uw_img, 'UW')
185  w_l, w_a, w_b = _get_lab_means(w_img, 'W')
186
187  # Calculate Delta Cab
188  cab = np.sqrt(abs(uw_a - w_a)**2 + abs(uw_b - w_b)**2)
189  logging.debug('delta_C: %.4f', cab)
190
191  if cab > cab_atol:
192    failed_awb_msg.append('Delta Cab is greater than the threshold value for '
193                          f'patch: {patch_color} '
194                          f'CAB_ATOL: {cab_atol} '
195                          f'delta_cab: {cab:.4f} '
196                          f'UW L, a, b means: {uw_l:.4f}, '
197                          f'{uw_a:.4f}, {uw_b:.4f}'
198                          f'W L, a, b means: {w_l:.4f}, {w_a:.4f}, {w_b:.4f}')
199  return failed_awb_msg
200
201
202def _extract_main_patch(corners, ids, img_rgb, img_path, lens_suffix):
203  """Extracts the main rectangle patch from the captured frame.
204
205  Find aruco markers in the captured image and detects if the
206  expected number of aruco markers have been found or not.
207  It then, extracts the main rectangle patch and saves it
208  without the aruco markers in it.
209
210  Args:
211    corners: list of detected corners.
212    ids: list of int ids for each ArUco markers in the input_img.
213    img_rgb: An openCV image in RGB order.
214    img_path: Path to save the image.
215    lens_suffix: str; suffix used to save the image.
216  Returns:
217    rectangle_patch: numpy float image array of the rectangle patch.
218  """
219  rectangle_patch = opencv_processing_utils.get_patch_from_aruco_markers(
220      img_rgb, corners, ids)
221  patch_path = img_path.with_name(
222      f'{img_path.stem}_{lens_suffix}_patch{img_path.suffix}')
223  image_processing_utils.write_image(rectangle_patch/_CH_FULL_SCALE, patch_path)
224  return rectangle_patch
225
226
227def _extract_y(img_uint8, file_name):
228  """Converts an RGB uint8 image to YUV and returns Y.
229
230  The Y img is saved with file_name in the test dir.
231
232  Args:
233    img_uint8: An openCV image in RGB order.
234    file_name: file name along with the path to save the image.
235
236  Returns:
237    An openCV image converted to Y.
238  """
239  y_uint8 = opencv_processing_utils.convert_to_y(img_uint8, 'RGB')
240  y_uint8 = np.expand_dims(y_uint8, axis=2)  # add plane to save image
241  image_processing_utils.write_image(y_uint8/_CH_FULL_SCALE, file_name)
242  return y_uint8
243
244
245def _find_aruco_markers(img, img_path, lens_suffix):
246  """Detect ArUco markers in the input image.
247
248  Args:
249    img: input img with ArUco markers.
250    img_path: path to save the image.
251    lens_suffix: suffix used to save the image.
252  Returns:
253    corners: list of detected corners.
254    ids: list of int ids for each ArUco markers in the input_img.
255  """
256  aruco_path = img_path.with_name(
257      f'{img_path.stem}_{lens_suffix}_aruco{img_path.suffix}')
258  corners, ids, _ = opencv_processing_utils.find_aruco_markers(
259      img, aruco_path)
260  if len(ids) != _ARUCO_MARKERS_COUNT:
261    raise AssertionError(
262        f'{_ARUCO_MARKERS_COUNT} ArUco markers should be detected.')
263  return corners, ids
264
265
266def _get_lab_means(img, identifier):
267  """Computes the mean of L,a,b img in Cielab color space.
268
269  Args:
270    img: RGB img in numpy format.
271    identifier: str; identifier for logging statement. ie. 'UW' or 'W'
272
273  Returns:
274    mean_l, mean_a, mean_b: mean of L, a, b channels
275  """
276  # Convert to Lab color space
277  from skimage import color  # pylint: disable=g-import-not-at-top
278  img_lab = color.rgb2lab(img)
279
280  mean_l = np.mean(img_lab[:, :, 0])  # Extract L* channel and get mean
281  mean_a = np.mean(img_lab[:, :, 1])  # Extract a* channel and get mean
282  mean_b = np.mean(img_lab[:, :, 2])  # Extract b* channel and get mean
283
284  logging.debug('Lens: %s, mean_l: %.2f, mean_a: %.2f, mean_b: %.2f',
285                identifier, mean_l, mean_a, mean_b)
286  return mean_l, mean_a, mean_b
287
288
289def _get_four_quadrant_patches(img, img_path, lens_suffix):
290  """Divides the img in 4 equal parts and returns the patches.
291
292  Args:
293    img: an openCV image in RGB order.
294    img_path: path to save the image.
295    lens_suffix: str; suffix used to save the image.
296  Returns:
297    four_quadrant_patches: list of 4 patches.
298  """
299  num_rows = 2
300  num_columns = 2
301  size_x = math.floor(img.shape[1])
302  size_y = math.floor(img.shape[0])
303  four_quadrant_patches = []
304  for i in range(0, num_rows):
305    for j in range(0, num_columns):
306      x = size_x / num_rows * j
307      y = size_y / num_columns * i
308      h = size_y / num_columns
309      w = size_x / num_rows
310      patch = img[int(y):int(y+h), int(x):int(x+w)]
311      patch_path = img_path.with_name(
312          f'{img_path.stem}_{lens_suffix}_patch_'
313          f'{i}_{j}{img_path.suffix}')
314      image_processing_utils.write_image(patch/_CH_FULL_SCALE, patch_path)
315      cropped_patch = patch[_PATCH_MARGIN:-_PATCH_MARGIN,
316                            _PATCH_MARGIN:-_PATCH_MARGIN]
317      four_quadrant_patches.append(cropped_patch)
318      cropped_patch_path = img_path.with_name(
319          f'{img_path.stem}_{lens_suffix}_cropped_patch_'
320          f'{i}_{j}{img_path.suffix}')
321      image_processing_utils.write_image(
322          cropped_patch/_CH_FULL_SCALE, cropped_patch_path)
323  return four_quadrant_patches
324
325
326def _get_slanted_edge_patch(img, img_path, lens_suffix):
327  """Crops the central slanted edge part of the img and returns the patch.
328
329  Args:
330    img: an openCV image in RGB order.
331    img_path: path to save the image.
332    lens_suffix: str; suffix used to save the image. ie: 'w' or 'uw'.
333
334  Returns:
335    slanted_edge_patch: list of 4 coordinates.
336  """
337  num_rows = 3
338  num_columns = 5
339  size_x = math.floor(img.shape[1])
340  size_y = math.floor(img.shape[0])
341  slanted_edge_patch = []
342  x = int(round(size_x / num_columns * (num_columns // 2), 0))
343  y = int(round(size_y / num_rows * (num_rows // 2), 0))
344  w = int(round(size_x / num_columns, 0))
345  h = int(round(size_y / num_rows, 0))
346  patch = img[y:y+h, x:x+w]
347  slanted_edge_patch = patch[_PATCH_MARGIN:-_PATCH_MARGIN,
348                             _PATCH_MARGIN:-_PATCH_MARGIN]
349  filename_with_path = img_path.with_name(
350      f'{img_path.stem}_{lens_suffix}_slanted_edge{img_path.suffix}'
351  )
352  image_processing_utils.write_rgb_uint8_image(
353      slanted_edge_patch, filename_with_path
354  )
355  return slanted_edge_patch
356
357
358class MultiCameraSwitchTest(its_base_test.ItsBaseTest):
359  """Test that the switch from UW to W lens has similar RGB values.
360
361  This test uses various zoom ratios within range android.control.zoomRatioRange
362  to capture images and find the point when the physical camera changes
363  to determine the crossover point of change from UW to W.
364  It does preview recording at UW and W crossover point to verify that
365  the AE, AWB and AF behavior remains the same.
366  """
367
368  def test_multi_camera_switch(self):
369    with its_session_utils.ItsSession(
370        device_id=self.dut.serial,
371        camera_id=self.camera_id,
372        hidden_physical_id=self.hidden_physical_id) as cam:
373      props = cam.get_camera_properties()
374      props = cam.override_with_hidden_physical_camera_props(props)
375      chart_distance = self.chart_distance
376      failed_awb_msg = []
377      failed_ae_msg = []
378      failed_af_msg = []
379
380      # check SKIP conditions
381      first_api_level = its_session_utils.get_first_api_level(self.dut.serial)
382      camera_properties_utils.skip_unless(
383          first_api_level >= its_session_utils.ANDROID15_API_LEVEL and
384          camera_properties_utils.zoom_ratio_range(props) and
385          camera_properties_utils.logical_multi_camera(props) and
386          camera_properties_utils.ae_regions(props))
387
388      # Check the zoom range
389      zoom_range = props['android.control.zoomRatioRange']
390      logging.debug('zoomRatioRange: %s', zoom_range)
391      camera_properties_utils.skip_unless(
392          len(zoom_range) > 1 and
393          (zoom_range[0] <= _ZOOM_RANGE_UW_W[0] <= zoom_range[1]) and
394          (zoom_range[0] <= _ZOOM_RANGE_UW_W[1] <= zoom_range[1]))
395
396      its_session_utils.load_scene(
397          cam, props, self.scene, self.tablet, chart_distance)
398
399      preview_test_size = preview_processing_utils.get_max_preview_test_size(
400          cam, self.camera_id)
401      cam.do_3a()
402
403      try:
404        # Start dynamic preview recording and collect results
405        capture_results, file_list = (
406            preview_processing_utils.preview_over_zoom_range(
407                self.dut, cam, preview_test_size, _ZOOM_RANGE_UW_W[0],
408                _ZOOM_RANGE_UW_W[1], _ZOOM_STEP, self.log_path)
409        )
410
411        physical_id_before = None
412        counter = 0  # counter for the index of crossover point result
413        lens_changed = False
414        converged_state_counter = 0
415        converged_state = False
416
417        for capture_result in capture_results:
418          counter += 1
419          ae_state = capture_result['android.control.aeState']
420          awb_state = capture_result['android.control.awbState']
421          af_state = capture_result['android.control.afState']
422          physical_id = capture_result[
423              'android.logicalMultiCamera.activePhysicalId']
424          if not physical_id_before:
425            physical_id_before = physical_id
426          zoom_ratio = float(capture_result['android.control.zoomRatio'])
427          if physical_id_before == physical_id:
428            continue
429          else:
430            logging.debug('Active physical id changed')
431            logging.debug('Crossover zoom ratio point: %f', zoom_ratio)
432            physical_id_before = physical_id
433            lens_changed = True
434            if ae_state == awb_state == af_state == _CONVERGED_STATE:
435              converged_state = True
436              converged_state_counter = counter
437              logging.debug('3A converged at the crossover point')
438            break
439
440        # If the frame at crossover point was not converged, then
441        # traverse the list of capture results after crossover point
442        # to find the converged frame which will be used for AE,
443        # AWB and AF checks.
444        if not converged_state:
445          converged_state_counter = counter
446          for capture_result in capture_results[converged_state_counter-1:]:
447            converged_state_counter += 1
448            ae_state = capture_result['android.control.aeState']
449            awb_state = capture_result['android.control.awbState']
450            af_state = capture_result['android.control.afState']
451            if physical_id_before == capture_result[
452                'android.logicalMultiCamera.activePhysicalId']:
453              if ae_state == awb_state == af_state == _CONVERGED_STATE:
454                logging.debug('3A converged after crossover point.')
455                logging.debug('Zoom ratio at converged state after crossover'
456                              'point: %f', zoom_ratio)
457                converged_state = True
458                break
459
460      except Exception as e:
461        # Remove all the files except mp4 recording in case of any error
462        for filename in os.listdir(self.log_path):
463          file_path = os.path.join(self.log_path, filename)
464          if os.path.isfile(file_path) and not filename.endswith(_MP4_FORMAT):
465            os.remove(file_path)
466        raise AssertionError('Error during crossover check') from e
467
468      # Raise error if lens did not switch within the range
469      # _ZOOM_RANGE_UW_W
470      # TODO(ruchamk): Add lens_changed to the CameraITS metrics
471      if not lens_changed:
472        e_msg = 'Crossover point not found. Try running the test again!'
473        raise AssertionError(e_msg)
474
475      # Raise error is 3A does not converge after the lens change
476      if not converged_state:
477        e_msg = '3A not converged after the lens change.'
478        raise AssertionError(e_msg)
479
480      img_uw_file = file_list[counter-2]
481      capture_result_uw = capture_results[counter-2]
482      uw_phy_id = (
483          capture_result_uw['android.logicalMultiCamera.activePhysicalId']
484      )
485      physical_props_uw = cam.get_camera_properties_by_id(uw_phy_id)
486      min_focus_distance_uw = (
487          physical_props_uw['android.lens.info.minimumFocusDistance']
488      )
489      logging.debug('Min focus distance for UW phy_id: %s is %f',
490                    uw_phy_id, min_focus_distance_uw)
491
492      logging.debug('Capture results uw crossover: %s', capture_result_uw)
493      logging.debug('Capture results w crossover: %s',
494                    capture_results[counter-1])
495      img_w_file = file_list[converged_state_counter-1]
496      capture_result_w = capture_results[converged_state_counter-1]
497      logging.debug('Capture results w crossover converged: %s',
498                    capture_result_w)
499      w_phy_id = capture_result_w['android.logicalMultiCamera.activePhysicalId']
500      physical_props_w = cam.get_camera_properties_by_id(w_phy_id)
501      min_focus_distance_w = (
502          physical_props_w['android.lens.info.minimumFocusDistance']
503      )
504      logging.debug('Min focus distance for W phy_id: %s is %f',
505                    w_phy_id, min_focus_distance_w)
506
507      # Remove unwanted frames and only save the UW and
508      # W crossover point frames along with mp4 recording
509      its_session_utils.remove_frame_files(self.log_path, [
510          os.path.join(self.log_path, img_uw_file),
511          os.path.join(self.log_path, img_w_file)])
512
513      # Add suffix to the UW and W image files
514      uw_path = pathlib.Path(os.path.join(self.log_path, img_uw_file))
515      uw_name = uw_path.with_name(f'{uw_path.stem}_uw{uw_path.suffix}')
516      os.rename(os.path.join(self.log_path, img_uw_file), uw_name)
517
518      w_path = pathlib.Path(os.path.join(self.log_path, img_w_file))
519      w_name = w_path.with_name(f'{w_path.stem}_w{w_path.suffix}')
520      os.rename(os.path.join(self.log_path, img_w_file), w_name)
521
522      # Convert UW and W img to numpy array
523      uw_img = image_processing_utils.convert_image_to_numpy_array(
524          str(uw_name))
525      w_img = image_processing_utils.convert_image_to_numpy_array(
526          str(w_name))
527
528      # Check the sensor orientation and flip image
529      if (props['android.lens.facing'] ==
530          camera_properties_utils.LENS_FACING['FRONT']):
531        img_name_stem = os.path.join(self.log_path, 'flipped_preview')
532        uw_img, w_img = _check_orientation_and_flip(
533            props, uw_img, w_img, img_name_stem
534        )
535
536      # Find ArUco markers in the image with UW lens
537      # and extract the outer box patch
538      corners, ids = _find_aruco_markers(uw_img, uw_path, 'uw')
539      uw_chart_patch = _extract_main_patch(
540          corners, ids, uw_img, uw_path, 'uw')
541      uw_four_patches = _get_four_quadrant_patches(
542          uw_chart_patch, uw_path, 'uw')
543
544      # Find ArUco markers in the image with W lens
545      # and extract the outer box patch
546      corners, ids = _find_aruco_markers(w_img, w_path, 'w')
547      w_chart_patch = _extract_main_patch(
548          corners, ids, w_img, w_path, 'w')
549      w_four_patches = _get_four_quadrant_patches(
550          w_chart_patch, w_path, 'w')
551
552      ae_uw_y_avgs = {}
553      ae_w_y_avgs = {}
554
555      for uw_patch, w_patch, patch_color in zip(
556          uw_four_patches, w_four_patches, _COLORS):
557        logging.debug('Checking for quadrant color: %s', patch_color)
558
559        # AE Check: Extract the Y component from rectangle patch
560        ae_msg, uw_y_avg, w_y_avg = _do_ae_check(
561            uw_patch, w_patch, self.log_path, patch_color)
562        if ae_msg:
563          failed_ae_msg.append(f'{ae_msg}\n')
564        ae_uw_y_avgs.update({patch_color: f'{uw_y_avg:.4f}'})
565        ae_w_y_avgs.update({patch_color: f'{w_y_avg:.4f}'})
566
567        # AWB Check : Verify that delta Cab are within the limits
568        if camera_properties_utils.awb_regions(props):
569          cab_atol = _AWB_ATOL_L if patch_color == _COLOR_GRAY else _AWB_ATOL_AB
570          awb_msg = _do_awb_check(uw_patch, w_patch, cab_atol, patch_color)
571          if awb_msg:
572            failed_awb_msg.append(f'{awb_msg}\n')
573
574      # Below print statements are for logging purpose.
575      # Do not replace with logging.
576      print(f'{_NAME}_ae_uw_y_avgs: ', ae_uw_y_avgs)
577      print(f'{_NAME}_ae_w_y_avgs: ', ae_w_y_avgs)
578
579      # Skip the AF check FF->FF
580      if min_focus_distance_w == 0:
581        logging.debug('AF check skipped for this device.')
582      else:
583        # AF check using slanted edge
584        uw_slanted_edge_patch = _get_slanted_edge_patch(
585            uw_chart_patch, uw_path, 'uw')
586        w_slanted_edge_patch = _get_slanted_edge_patch(
587            w_chart_patch, w_path, 'w')
588        failed_af_msg, sharpness_uw, sharpness_w = _do_af_check(
589            uw_slanted_edge_patch, w_slanted_edge_patch)
590        print(f'{_NAME}_uw_sharpness: {sharpness_uw:.4f}')
591        print(f'{_NAME}_w_sharpness: {sharpness_w:.4f}')
592
593      if failed_awb_msg or failed_ae_msg or failed_af_msg:
594        error_msg = _get_error_msg(failed_awb_msg, failed_ae_msg, failed_af_msg)
595        raise AssertionError(f'{_NAME} failed with following errors:\n'
596                             f'{error_msg}')
597
598if __name__ == '__main__':
599  test_runner.main()
600