1# Copyright 2018 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Verify multi-camera alignment using internal parameters."""
15
16
17import logging
18import math
19import os.path
20from mobly import test_runner
21import numpy as np
22
23import cv2
24import its_base_test
25import camera_properties_utils
26import capture_request_utils
27import image_processing_utils
28import its_session_utils
29import opencv_processing_utils
30
31ALIGN_TOL_MM = 4.0  # mm
32ALIGN_TOL = 0.01  # multiplied by sensor diagonal to convert to pixels
33CIRCLE_COLOR = 0  # [0: black, 255: white]
34CIRCLE_MIN_AREA = 0.01  # multiplied by image size
35CIRCLE_RTOL = 0.1  # 10%
36CM_TO_M = 1E-2
37FMT_CODE_RAW = 0x20
38FMT_CODE_YUV = 0x23
39LENS_FACING_BACK = 1  # 0: FRONT, 1: BACK, 2: EXTERNAL
40M_TO_MM = 1E3
41MM_TO_UM = 1E3
42NAME = os.path.splitext(os.path.basename(__file__))[0]
43REFERENCE_GYRO = 1
44REFERENCE_UNDEFINED = 2
45TRANS_MATRIX_REF = np.array([0, 0, 0])  # translation matrix for ref cam is 000
46
47
48def convert_cap_and_prep_img(cap, props, fmt, img_name, debug):
49  """Convert the capture to an RGB image and prep image.
50
51  Args:
52    cap: capture element
53    props: dict of capture properties
54    fmt: capture format ('raw' or 'yuv')
55    img_name: name to save image as
56    debug: boolean for debug mode
57
58  Returns:
59    img uint8 numpy array
60  """
61
62  img = image_processing_utils.convert_capture_to_rgb_image(cap, props=props)
63
64  # save images if debug
65  if debug:
66    image_processing_utils.write_image(img, img_name)
67
68  # convert to [0, 255] images and cast as uint8
69  img *= 255
70  img = img.astype(np.uint8)
71
72  # scale to match calibration data if RAW
73  if fmt == 'raw':
74    img = cv2.resize(img, None, fx=2, fy=2)
75
76  return img
77
78
79def calc_pixel_size(props):
80  ar = props['android.sensor.info.pixelArraySize']
81  sensor_size = props['android.sensor.info.physicalSize']
82  pixel_size_w = sensor_size['width'] / ar['width']
83  pixel_size_h = sensor_size['height'] / ar['height']
84  logging.debug('pixel size(um): %.2f x %.2f',
85                pixel_size_w * MM_TO_UM, pixel_size_h * MM_TO_UM)
86  return (pixel_size_w + pixel_size_h) / 2 * MM_TO_UM
87
88
89def select_ids_to_test(ids, props, chart_distance):
90  """Determine the best 2 cameras to test for the rig used.
91
92  Cameras are pre-filtered to only include supportable cameras.
93  Supportable cameras are: YUV(RGB), RAW(Bayer)
94
95  Args:
96    ids: unicode string; physical camera ids
97    props: dict; physical camera properties dictionary
98    chart_distance: float; distance to chart in meters
99  Returns:
100    test_ids to be tested
101  """
102  chart_distance = abs(chart_distance)*100  # convert M to CM
103  test_ids = []
104  for i in ids:
105    sensor_size = props[i]['android.sensor.info.physicalSize']
106    focal_l = props[i]['android.lens.info.availableFocalLengths'][0]
107    diag = math.sqrt(sensor_size['height'] ** 2 + sensor_size['width'] ** 2)
108    fov = round(2 * math.degrees(math.atan(diag / (2 * focal_l))), 2)
109    logging.debug('Camera: %s, FoV: %.2f, chart_distance: %.1fcm', i, fov,
110                  chart_distance)
111    # determine best combo with rig used or recommend different rig
112    if (opencv_processing_utils.FOV_THRESH_TELE < fov <
113        opencv_processing_utils.FOV_THRESH_WFOV):
114      test_ids.append(i)  # RFoV camera
115    elif fov < opencv_processing_utils.FOV_THRESH_TELE40:
116      logging.debug('Skipping camera. Not appropriate multi-camera testing.')
117      continue  # super-TELE camera
118    elif (fov <= opencv_processing_utils.FOV_THRESH_TELE and
119          np.isclose(chart_distance,
120                     opencv_processing_utils.CHART_DISTANCE_RFOV, rtol=0.1)):
121      test_ids.append(i)  # TELE camera in RFoV rig
122    elif (fov >= opencv_processing_utils.FOV_THRESH_WFOV and
123          np.isclose(chart_distance,
124                     opencv_processing_utils.CHART_DISTANCE_WFOV, rtol=0.1)):
125      test_ids.append(i)  # WFoV camera in WFoV rig
126    else:
127      logging.debug('Skipping camera. Not appropriate for test rig.')
128
129  e_msg = 'Error: started with 2+ cameras, reduced to <2. Wrong test rig?'
130  e_msg += '\ntest_ids: %s' % str(test_ids)
131  assert len(test_ids) >= 2, e_msg
132  return test_ids[0:2]
133
134
135def determine_valid_out_surfaces(cam, props, fmt, cap_camera_ids, sizes):
136  """Determine a valid output surfaces for captures.
137
138  Args:
139    cam:                obj; camera object
140    props:              dict; props for the physical cameras
141    fmt:                str; capture format ('yuv' or 'raw')
142    cap_camera_ids:     list; camera capture ids
143    sizes:              dict; valid physical sizes for the cap_camera_ids
144
145  Returns:
146    valid out_surfaces
147  """
148  valid_stream_combo = False
149
150  # try simultaneous capture
151  w, h = capture_request_utils.get_available_output_sizes('yuv', props)[0]
152  out_surfaces = [{'format': 'yuv', 'width': w, 'height': h},
153                  {'format': fmt, 'physicalCamera': cap_camera_ids[0],
154                   'width': sizes[cap_camera_ids[0]][0],
155                   'height': sizes[cap_camera_ids[0]][1]},
156                  {'format': fmt, 'physicalCamera': cap_camera_ids[1],
157                   'width': sizes[cap_camera_ids[1]][0],
158                   'height': sizes[cap_camera_ids[1]][1]},]
159  valid_stream_combo = cam.is_stream_combination_supported(out_surfaces)
160
161  # try each camera individually
162  if not valid_stream_combo:
163    out_surfaces = []
164    for cap_id in cap_camera_ids:
165      out_surface = {'format': fmt, 'physicalCamera': cap_id,
166                     'width': sizes[cap_id][0],
167                     'height': sizes[cap_id][1]}
168      valid_stream_combo = cam.is_stream_combination_supported(out_surface)
169      if valid_stream_combo:
170        out_surfaces.append(out_surface)
171      else:
172        camera_properties_utils.skip_unless(valid_stream_combo)
173
174  return out_surfaces
175
176
177def take_images(cam, caps, props, fmt, cap_camera_ids, out_surfaces, log_path,
178                debug):
179  """Do image captures.
180
181  Args:
182    cam: obj; camera object
183    caps: dict; capture results indexed by (fmt, id)
184    props: dict; props for the physical cameras
185    fmt: str; capture format ('yuv' or 'raw')
186    cap_camera_ids: list; camera capture ids
187    out_surfaces: list; valid output surfaces for caps
188    log_path: str; location to save files
189    debug: bool; determine if debug mode or not.
190
191  Returns:
192    caps: dict; capture information indexed by (fmt, cap_id)
193  """
194
195  logging.debug('out_surfaces: %s', str(out_surfaces))
196  if len(out_surfaces) == 3:  # do simultaneous capture
197    # Do 3A and get the values
198    s, e, _, _, fd = cam.do_3a(get_results=True, lock_ae=True, lock_awb=True)
199    if fmt == 'raw':
200      e *= 2  # brighten RAW images
201
202    req = capture_request_utils.manual_capture_request(s, e, fd)
203    _, caps[(fmt,
204             cap_camera_ids[0])], caps[(fmt,
205                                        cap_camera_ids[1])] = cam.do_capture(
206                                            req, out_surfaces)
207
208  else:  # step through cameras individually
209    for i, out_surface in enumerate(out_surfaces):
210      # Do 3A and get the values
211      s, e, _, _, fd = cam.do_3a(get_results=True,
212                                 lock_ae=True, lock_awb=True)
213      if fmt == 'raw':
214        e *= 2  # brighten RAW images
215
216      req = capture_request_utils.manual_capture_request(s, e, fd)
217      caps[(fmt, cap_camera_ids[i])] = cam.do_capture(req, out_surface)
218
219  # save images if debug
220  if debug:
221    for i in [0, 1]:
222      img = image_processing_utils.convert_capture_to_rgb_image(
223          caps[(fmt, cap_camera_ids[i])], props=props[cap_camera_ids[i]])
224      image_processing_utils.write_image(img, '%s_%s_%s.jpg' % (
225          os.path.join(log_path, NAME), fmt, cap_camera_ids[i]))
226
227  return caps
228
229
230def undo_zoom(cap, circle):
231  """Correct coordinates and size of circle for zoom.
232
233  Assume that the maximum physical YUV image size is close to active array size.
234
235  Args:
236    cap: camera capture element
237    circle: dict of circle values
238  Returns:
239    unzoomed circle dict
240  """
241  yuv_w = cap['width']
242  yuv_h = cap['height']
243  logging.debug('cap size: %d x %d', yuv_w, yuv_h)
244  cr = cap['metadata']['android.scaler.cropRegion']
245  cr_w = cr['right'] - cr['left']
246  cr_h = cr['bottom'] - cr['top']
247
248  # Offset due to aspect ratio difference of crop region and yuv
249  # - fit yuv box inside of differently shaped cr box
250  yuv_aspect = yuv_w / yuv_h
251  relative_aspect = yuv_aspect / (cr_w/cr_h)
252  if relative_aspect > 1:
253    zoom_ratio = yuv_w / cr_w
254    yuv_x = 0
255    yuv_y = (cr_h - cr_w / yuv_aspect) / 2
256  else:
257    zoom_ratio = yuv_h / cr_h
258    yuv_x = (cr_w - cr_h * yuv_aspect) / 2
259    yuv_y = 0
260
261  circle['x'] = cr['left'] + yuv_x + circle['x'] / zoom_ratio
262  circle['y'] = cr['top'] + yuv_y + circle['y'] / zoom_ratio
263  circle['r'] = circle['r'] / zoom_ratio
264
265  logging.debug(' Calculated zoom ratio: %.3f', zoom_ratio)
266  logging.debug(' Corrected circle X: %.2f', circle['x'])
267  logging.debug(' Corrected circle Y: %.2f', circle['y'])
268  logging.debug(' Corrected circle radius: %.2f', circle['r'])
269
270  return circle
271
272
273def convert_to_world_coordinates(x, y, r, t, k, z_w):
274  """Convert x,y coordinates to world coordinates.
275
276  Conversion equation is:
277  A = [[x*r[2][0] - dot(k_row0, r_col0), x*r_[2][1] - dot(k_row0, r_col1)],
278       [y*r[2][0] - dot(k_row1, r_col0), y*r_[2][1] - dot(k_row1, r_col1)]]
279  b = [[z_w*dot(k_row0, r_col2) + dot(k_row0, t) - x*(r[2][2]*z_w + t[2])],
280       [z_w*dot(k_row1, r_col2) + dot(k_row1, t) - y*(r[2][2]*z_w + t[2])]]
281
282  [[x_w], [y_w]] = inv(A) * b
283
284  Args:
285    x: x location in pixel space
286    y: y location in pixel space
287    r: rotation matrix
288    t: translation matrix
289    k: intrinsic matrix
290    z_w: z distance in world space
291
292  Returns:
293    x_w:    x in meters in world space
294    y_w:    y in meters in world space
295  """
296  c_1 = r[2, 2] * z_w + t[2]
297  k_x1 = np.dot(k[0, :], r[:, 0])
298  k_x2 = np.dot(k[0, :], r[:, 1])
299  k_x3 = z_w * np.dot(k[0, :], r[:, 2]) + np.dot(k[0, :], t)
300  k_y1 = np.dot(k[1, :], r[:, 0])
301  k_y2 = np.dot(k[1, :], r[:, 1])
302  k_y3 = z_w * np.dot(k[1, :], r[:, 2]) + np.dot(k[1, :], t)
303
304  a = np.array([[x*r[2][0]-k_x1, x*r[2][1]-k_x2],
305                [y*r[2][0]-k_y1, y*r[2][1]-k_y2]])
306  b = np.array([[k_x3-x*c_1], [k_y3-y*c_1]])
307  return np.dot(np.linalg.inv(a), b)
308
309
310def convert_to_image_coordinates(p_w, r, t, k):
311  p_c = np.dot(r, p_w) + t
312  p_h = np.dot(k, p_c)
313  return p_h[0] / p_h[2], p_h[1] / p_h[2]
314
315
316def define_reference_camera(pose_reference, cam_reference):
317  """Determine the reference camera.
318
319  Args:
320    pose_reference: 0 for cameras, 1 for gyro
321    cam_reference: dict with key of physical camera and value True/False
322  Returns:
323    i_ref: physical id of reference camera
324    i_2nd: physical id of secondary camera
325  """
326
327  if pose_reference == REFERENCE_GYRO:
328    logging.debug('pose_reference is GYRO')
329    keys = list(cam_reference.keys())
330    i_ref = keys[0]  # pick first camera as ref
331    i_2nd = keys[1]
332  else:
333    logging.debug('pose_reference is CAMERA')
334    i_refs = [k for (k, v) in cam_reference.items() if v]
335    i_ref = i_refs[0]
336    if len(i_refs) > 1:
337      logging.debug('Warning: more than 1 reference camera. Check translation '
338                    'matrices. cam_reference: %s', str(cam_reference))
339      i_2nd = i_refs[1]  # use second camera since both at same location
340    else:
341      i_2nd = next(k for (k, v) in cam_reference.items() if not v)
342  return i_ref, i_2nd
343
344
345class MultiCameraAlignmentTest(its_base_test.ItsBaseTest):
346
347  """Test the multi camera system parameters related to camera spacing.
348
349  Using the multi-camera physical cameras, take a picture of scene4
350  (a black circle and surrounding square on a white background) with
351  one of the physical cameras. Then find the circle center and radius. Using
352  the parameters:
353      android.lens.poseReference
354      android.lens.poseTranslation
355      android.lens.poseRotation
356      android.lens.instrinsicCalibration
357      android.lens.distortion (if available)
358  project the circle center to the world coordinates for each camera.
359  Compare the difference between the two cameras' circle centers in
360  world coordinates.
361
362  Reproject the world coordinates back to pixel coordinates and compare
363  against originals as a correctness check.
364
365  Compare the circle sizes if the focal lengths of the cameras are
366  different using
367      android.lens.availableFocalLengths.
368  """
369
370  def test_multi_camera_alignment(self):
371    # capture images
372    with its_session_utils.ItsSession(
373        device_id=self.dut.serial,
374        camera_id=self.camera_id,
375        hidden_physical_id=self.hidden_physical_id) as cam:
376      props = cam.get_camera_properties()
377      log_path = self.log_path
378      chart_distance = self.chart_distance * CM_TO_M
379
380      # check SKIP conditions
381      camera_properties_utils.skip_unless(
382          camera_properties_utils.read_3a(props) and
383          camera_properties_utils.per_frame_control(props) and
384          camera_properties_utils.logical_multi_camera(props) and
385          camera_properties_utils.backward_compatible(props))
386
387      # load chart for scene
388      its_session_utils.load_scene(
389          cam, props, self.scene, self.tablet, self.chart_distance)
390
391      debug = self.debug_mode
392      pose_reference = props['android.lens.poseReference']
393
394      # Convert chart_distance for lens facing back
395      if props['android.lens.facing'] == LENS_FACING_BACK:
396        # API spec defines +z is pointing out from screen
397        logging.debug('lens facing BACK')
398        chart_distance *= -1
399
400      # find physical camera IDs
401      ids = camera_properties_utils.logical_multi_camera_physical_ids(props)
402      physical_props = {}
403      physical_ids = []
404      physical_raw_ids = []
405      for i in ids:
406        physical_props[i] = cam.get_camera_properties_by_id(i)
407        if physical_props[i][
408            'android.lens.poseReference'] == REFERENCE_UNDEFINED:
409          continue
410        # find YUV+RGB capable physical cameras
411        if (camera_properties_utils.backward_compatible(physical_props[i]) and
412            not camera_properties_utils.mono_camera(physical_props[i])):
413          physical_ids.append(i)
414        # find RAW+RGB capable physical cameras
415        if (camera_properties_utils.backward_compatible(physical_props[i]) and
416            not camera_properties_utils.mono_camera(physical_props[i]) and
417            camera_properties_utils.raw16(physical_props[i])):
418          physical_raw_ids.append(i)
419
420      # determine formats and select cameras
421      fmts = ['yuv']
422      if len(physical_raw_ids) >= 2:
423        fmts.insert(0, 'raw')  # add RAW to analysis if enough cameras
424        logging.debug('Selecting RAW+RGB supported cameras')
425        physical_raw_ids = select_ids_to_test(physical_raw_ids, physical_props,
426                                              chart_distance)
427      logging.debug('Selecting YUV+RGB cameras')
428      camera_properties_utils.skip_unless(len(physical_ids) >= 2)
429      physical_ids = select_ids_to_test(physical_ids, physical_props,
430                                        chart_distance)
431
432      # do captures for valid formats
433      caps = {}
434      for i, fmt in enumerate(fmts):
435        physical_sizes = {}
436        capture_cam_ids = physical_ids
437        fmt_code = FMT_CODE_YUV
438        if fmt == 'raw':
439          capture_cam_ids = physical_raw_ids
440          fmt_code = FMT_CODE_RAW
441        for physical_id in capture_cam_ids:
442          configs = physical_props[physical_id][
443              'android.scaler.streamConfigurationMap'][
444                  'availableStreamConfigurations']
445          fmt_configs = [cfg for cfg in configs if cfg['format'] == fmt_code]
446          out_configs = [cfg for cfg in fmt_configs if not cfg['input']]
447          out_sizes = [(cfg['width'], cfg['height']) for cfg in out_configs]
448          physical_sizes[physical_id] = max(out_sizes, key=lambda item: item[1])
449
450        out_surfaces = determine_valid_out_surfaces(
451            cam, props, fmt, capture_cam_ids, physical_sizes)
452        caps = take_images(cam, caps, physical_props, fmt, capture_cam_ids,
453                           out_surfaces, log_path, debug)
454
455    # process images for correctness
456    for j, fmt in enumerate(fmts):
457      size = {}
458      k = {}
459      cam_reference = {}
460      r = {}
461      t = {}
462      circle = {}
463      fl = {}
464      sensor_diag = {}
465      pixel_sizes = {}
466      capture_cam_ids = physical_ids
467      if fmt == 'raw':
468        capture_cam_ids = physical_raw_ids
469      logging.debug('Format: %s', str(fmt))
470      for i in capture_cam_ids:
471        # convert cap and prep image
472        img_name = '%s_%s_%s.jpg' % (os.path.join(log_path, NAME), fmt, i)
473        img = convert_cap_and_prep_img(
474            caps[(fmt, i)], physical_props[i], fmt, img_name, debug)
475        size[i] = (caps[fmt, i]['width'], caps[fmt, i]['height'])
476
477        # load parameters for each physical camera
478        if j == 0:
479          logging.debug('Camera %s', i)
480        k[i] = camera_properties_utils.get_intrinsic_calibration(
481            physical_props[i], j == 0)
482        r[i] = camera_properties_utils.get_rotation_matrix(
483            physical_props[i], j == 0)
484        t[i] = camera_properties_utils.get_translation_matrix(
485            physical_props[i], j == 0)
486
487        # API spec defines poseTranslation as the world coordinate p_w_cam of
488        # optics center. When applying [R|t] to go from world coordinates to
489        # camera coordinates, we need -R*p_w_cam of the coordinate reported in
490        # metadata.
491        # ie. for a camera with optical center at world coordinate (5, 4, 3)
492        # and identity rotation, to convert a world coordinate into the
493        # camera's coordinate, we need a translation vector of [-5, -4, -3]
494        # so that: [I|[-5, -4, -3]^T] * [5, 4, 3]^T = [0,0,0]^T
495        t[i] = -1.0 * np.dot(r[i], t[i])
496        if debug and j == 1:
497          logging.debug('t: %s', str(t[i]))
498          logging.debug('r: %s', str(r[i]))
499
500        if (t[i] == TRANS_MATRIX_REF).all():
501          cam_reference[i] = True
502        else:
503          cam_reference[i] = False
504
505        # Correct lens distortion to image (if available) and save before/after
506        if (camera_properties_utils.distortion_correction(physical_props[i]) and
507            camera_properties_utils.intrinsic_calibration(physical_props[i]) and
508            fmt == 'raw'):
509          cv2_distort = camera_properties_utils.get_distortion_matrix(
510              physical_props[i])
511          image_processing_utils.write_image(img/255, '%s_%s_%s.jpg' % (
512              os.path.join(log_path, NAME), fmt, i))
513          img = cv2.undistort(img, k[i], cv2_distort)
514          image_processing_utils.write_image(img/255, '%s_%s_correct_%s.jpg' % (
515              os.path.join(log_path, NAME), fmt, i))
516
517        # Find the circles in grayscale image
518        circle[i] = opencv_processing_utils.find_circle(
519            img, '%s_%s_gray_%s.jpg' % (os.path.join(log_path, NAME), fmt, i),
520            CIRCLE_MIN_AREA, CIRCLE_COLOR)
521        logging.debug('Circle radius %s:  %.2f', format(i), circle[i]['r'])
522
523        # Undo zoom to image (if applicable).
524        if fmt == 'yuv':
525          circle[i] = undo_zoom(caps[(fmt, i)], circle[i])
526
527        # Find focal length, pixel & sensor size
528        fl[i] = physical_props[i]['android.lens.info.availableFocalLengths'][0]
529        pixel_sizes[i] = calc_pixel_size(physical_props[i])
530        sensor_diag[i] = math.sqrt(size[i][0] ** 2 + size[i][1] ** 2)
531
532      i_ref, i_2nd = define_reference_camera(pose_reference, cam_reference)
533      logging.debug('reference camera: %s, secondary camera: %s', i_ref, i_2nd)
534
535      # Convert circle centers to real world coordinates
536      x_w = {}
537      y_w = {}
538      for i in [i_ref, i_2nd]:
539        x_w[i], y_w[i] = convert_to_world_coordinates(
540            circle[i]['x'], circle[i]['y'], r[i], t[i], k[i], chart_distance)
541
542      # Back convert to image coordinates for correctness check
543      x_p = {}
544      y_p = {}
545      x_p[i_2nd], y_p[i_2nd] = convert_to_image_coordinates(
546          [x_w[i_ref], y_w[i_ref], chart_distance], r[i_2nd], t[i_2nd],
547          k[i_2nd])
548      x_p[i_ref], y_p[i_ref] = convert_to_image_coordinates(
549          [x_w[i_2nd], y_w[i_2nd], chart_distance], r[i_ref], t[i_ref],
550          k[i_ref])
551
552      # Summarize results
553      for i in [i_ref, i_2nd]:
554        logging.debug(' Camera: %s', i)
555        logging.debug(' x, y (pixels): %.1f, %.1f', circle[i]['x'],
556                      circle[i]['y'])
557        logging.debug(' x_w, y_w (mm): %.2f, %.2f', x_w[i] * 1.0E3,
558                      y_w[i] * 1.0E3)
559        logging.debug(' x_p, y_p (pixels): %.1f, %.1f', x_p[i], y_p[i])
560
561      # Check center locations
562      err = np.linalg.norm(np.array([x_w[i_ref], y_w[i_ref]]) -
563                           np.array([x_w[i_2nd], y_w[i_2nd]]))
564      logging.debug('Center location err (mm): %.2f', err*M_TO_MM)
565      msg = 'Center locations %s <-> %s too different!' % (i_ref, i_2nd)
566      msg += ' val=%.2fmm, THRESH=%.fmm' % (err*M_TO_MM, ALIGN_TOL_MM)
567      assert err < ALIGN_TOL, msg
568
569      # Check projections back into pixel space
570      for i in [i_ref, i_2nd]:
571        err = np.linalg.norm(np.array([circle[i]['x'], circle[i]['y']]) -
572                             np.array([x_p[i], y_p[i]]))
573        logging.debug('Camera %s projection error (pixels): %.1f', i, err)
574        tol = ALIGN_TOL * sensor_diag[i]
575        msg = 'Camera %s project locations too different!' % i
576        msg += ' diff=%.2f, TOL=%.2f' % (err, tol)
577        assert err < tol, msg
578
579      # Check focal length and circle size if more than 1 focal length
580      if len(fl) > 1:
581        logging.debug('Circle radii (pixels); ref: %.1f, 2nd: %.1f',
582                      circle[i_ref]['r'], circle[i_2nd]['r'])
583        logging.debug('Focal lengths (diopters); ref: %.2f, 2nd: %.2f',
584                      fl[i_ref], fl[i_2nd])
585        logging.debug('Pixel size (um); ref: %.2f, 2nd: %.2f',
586                      pixel_sizes[i_ref], pixel_sizes[i_2nd])
587        msg = 'Circle size scales improperly! RTOL=%.1f\n' % CIRCLE_RTOL
588        msg += 'Metric: radius/focal_length*sensor_diag should be equal.'
589        assert np.isclose(circle[i_ref]['r']*pixel_sizes[i_ref]/fl[i_ref],
590                          circle[i_2nd]['r']*pixel_sizes[i_2nd]/fl[i_2nd],
591                          rtol=CIRCLE_RTOL), msg
592
593if __name__ == '__main__':
594  test_runner.main()
595