1# Copyright 2016 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Image processing utilities using openCV."""
15
16
17import logging
18import math
19import os
20import pathlib
21import cv2
22import numpy
23import scipy.spatial
24
25import camera_properties_utils
26import capture_request_utils
27import error_util
28import image_processing_utils
29
30AE_AWB_METER_WEIGHT = 1000  # 1 - 1000 with 1000 the highest
31ANGLE_CHECK_TOL = 1  # degrees
32ANGLE_NUM_MIN = 10  # Minimum number of angles for find_angle() to be valid
33ARUCO_CORNER_COUNT = 4  # total of 4 corners to a aruco marker
34
35TEST_IMG_DIR = os.path.join(os.environ['CAMERA_ITS_TOP'], 'test_images')
36CH_FULL_SCALE = 255
37CHART_FILE = os.path.join(TEST_IMG_DIR, 'ISO12233.png')
38CHART_HEIGHT_31CM = 13.5  # cm
39CHART_HEIGHT_22CM = 9.5  # cm
40CHART_DISTANCE_31CM = 31.0  # cm
41CHART_DISTANCE_22CM = 22.0  # cm
42CHART_SCALE_RTOL = 0.1
43CHART_SCALE_START = 0.65
44CHART_SCALE_STOP = 1.35
45CHART_SCALE_STEP = 0.025
46
47CIRCLE_AR_ATOL = 0.1  # circle aspect ratio tolerance
48CIRCLISH_ATOL = 0.10  # contour area vs ideal circle area & aspect ratio TOL
49CIRCLISH_LOW_RES_ATOL = 0.15  # loosen for low res images
50CIRCLE_MIN_PTS = 20
51CIRCLE_RADIUS_NUMPTS_THRESH = 2  # contour num_pts/radius: empirically ~3x
52CIRCLE_COLOR_ATOL = 0.05  # circle color fill tolerance
53CIRCLE_LOCATION_VARIATION_RTOL = 0.05  # tolerance to remove similar circles
54
55CV2_CONTRAST_ALPHA = 1.25  # contrast
56CV2_CONTRAST_BETA = 0  # brightness
57CV2_THESHOLD_LOWER_BLACK = 0
58CV2_LINE_THICKNESS = 3  # line thickness for drawing on images
59CV2_BLACK = (0, 0, 0)
60CV2_BLUE = (0, 0, 255)
61CV2_RED = (255, 0, 0)  # color in cv2 to draw lines
62CV2_RED_NORM = tuple(numpy.array(CV2_RED) / 255)
63CV2_GREEN = (0, 255, 0)
64CV2_GREEN_NORM = tuple(numpy.array(CV2_GREEN) / 255)
65CV2_WHITE = (255, 255, 255)
66CV2_YELLOW = (255, 255, 0)
67CV2_THRESHOLD_BLOCK_SIZE = 11
68CV2_THRESHOLD_CONSTANT = 2
69
70CV2_HOME_DIRECTORY = os.path.dirname(cv2.__file__)
71CV2_ALTERNATE_DIRECTORY = pathlib.Path(CV2_HOME_DIRECTORY).parents[3]
72HAARCASCADE_FILE_NAME = 'haarcascade_frontalface_default.xml'
73
74FACES_ALIGNED_MIN_NUM = 2
75FACE_CENTER_MATCH_TOL_X = 10  # 10 pixels or ~1.5% in 640x480 image
76FACE_CENTER_MATCH_TOL_Y = 20  # 20 pixels or ~4% in 640x480 image
77FACE_CENTER_MIN_LOGGING_DIST = 50
78FACE_MIN_CENTER_DELTA = 15
79
80FOV_THRESH_TELE25 = 25
81FOV_THRESH_TELE40 = 40
82FOV_THRESH_TELE = 60
83FOV_THRESH_UW = 90
84
85IMAGE_ROTATION_THRESHOLD = 40  # rotation by 20 pixels
86
87LOW_RES_IMG_THRESH = 320 * 240
88
89NUM_AE_AWB_REGIONS = 4
90
91SCALE_WIDE_IN_22CM_RIG = 0.67
92SCALE_TELE_IN_22CM_RIG = 0.5
93SCALE_TELE_IN_31CM_RIG = 0.67
94SCALE_TELE40_IN_22CM_RIG = 0.33
95SCALE_TELE40_IN_31CM_RIG = 0.5
96SCALE_TELE25_IN_31CM_RIG = 0.33
97
98SQUARE_AREA_MIN_REL = 0.05  # Minimum size for square relative to image area
99SQUARE_CROP_MARGIN = 0  # Set to aid detection of QR codes
100SQUARE_TOL = 0.05  # Square W vs H mismatch RTOL
101SQUARISH_RTOL = 0.10
102SQUARISH_AR_RTOL = 0.10
103
104VGA_HEIGHT = 480
105VGA_WIDTH = 640
106
107
108def convert_to_y(img, color_order='RGB'):
109  """Returns a Y image from a uint8 RGB or BGR ordered image.
110
111  Args:
112    img: a uint8 openCV image.
113    color_order: str; 'RGB' or 'BGR' to signify color plane order.
114
115  Returns:
116    The Y plane of the input img.
117  """
118  if img.dtype != 'uint8':
119    raise AssertionError(f'Incorrect input type: {img.dtype}! Expected: uint8')
120  if color_order == 'RGB':
121    y, _, _ = cv2.split(cv2.cvtColor(img, cv2.COLOR_RGB2YUV))
122  elif color_order == 'BGR':
123    y, _, _ = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2YUV))
124  else:
125    raise AssertionError(f'Undefined color order: {color_order}!')
126  return y
127
128
129def binarize_image(img_gray):
130  """Returns a binarized image based on cv2 thresholds.
131
132  Args:
133    img_gray: A grayscale openCV image.
134  Returns:
135    An openCV image binarized to 0 (black) and 255 (white).
136  """
137  _, img_bw = cv2.threshold(numpy.uint8(img_gray), 0, 255,
138                            cv2.THRESH_BINARY + cv2.THRESH_OTSU)
139  return img_bw
140
141
142def _load_opencv_haarcascade_file():
143  """Return Haar Cascade file for face detection."""
144  for cv2_directory in (CV2_HOME_DIRECTORY, CV2_ALTERNATE_DIRECTORY,):
145    for path, _, files in os.walk(cv2_directory):
146      if HAARCASCADE_FILE_NAME in files:
147        haarcascade_file = os.path.join(path, HAARCASCADE_FILE_NAME)
148        logging.debug('Haar Cascade file location: %s', haarcascade_file)
149        return haarcascade_file
150  raise error_util.CameraItsError('haarcascade_frontalface_default.xml was '
151                                  f'not found in {CV2_HOME_DIRECTORY} '
152                                  f'or {CV2_ALTERNATE_DIRECTORY}')
153
154
155def find_opencv_faces(img, scale_factor, min_neighbors):
156  """Finds face rectangles with openCV.
157
158  Args:
159    img: numpy array; 3-D RBG image with [0,1] values
160    scale_factor: float, specifies how much image size is reduced at each scale
161    min_neighbors: int, specifies minimum number of neighbors to keep rectangle
162  Returns:
163    List of rectangles with faces
164  """
165  # prep opencv
166  opencv_haarcascade_file = _load_opencv_haarcascade_file()
167  face_cascade = cv2.CascadeClassifier(opencv_haarcascade_file)
168  img_uint8 = image_processing_utils.convert_image_to_uint8(img)
169  img_gray = cv2.cvtColor(img_uint8, cv2.COLOR_RGB2GRAY)
170
171  # find face rectangles with opencv
172  faces_opencv = face_cascade.detectMultiScale(
173      img_gray, scale_factor, min_neighbors)
174  logging.debug('%s', str(faces_opencv))
175  return faces_opencv
176
177
178def find_all_contours(img):
179  cv2_version = cv2.__version__
180  if cv2_version.startswith('3.'):  # OpenCV 3.x
181    _, contours, _ = cv2.findContours(img, cv2.RETR_TREE,
182                                      cv2.CHAIN_APPROX_SIMPLE)
183  else:  # OpenCV 2.x and 4.x
184    contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
185  return contours
186
187
188def calc_chart_scaling(chart_distance, camera_fov):
189  """Returns charts scaling factor.
190
191  Args:
192   chart_distance: float; distance in cm from camera of displayed chart
193   camera_fov: float; camera field of view.
194
195  Returns:
196   chart_scaling: float; scaling factor for chart
197  """
198  chart_scaling = 1.0
199  fov = float(camera_fov)
200  is_chart_distance_22cm = math.isclose(
201      chart_distance, CHART_DISTANCE_22CM, rel_tol=CHART_SCALE_RTOL)
202  is_chart_distance_31cm = math.isclose(
203      chart_distance, CHART_DISTANCE_31CM, rel_tol=CHART_SCALE_RTOL)
204
205  if FOV_THRESH_TELE < fov < FOV_THRESH_UW and is_chart_distance_22cm:
206    chart_scaling = SCALE_WIDE_IN_22CM_RIG
207  elif FOV_THRESH_TELE40 < fov <= FOV_THRESH_TELE and is_chart_distance_22cm:
208    chart_scaling = SCALE_TELE_IN_22CM_RIG
209  elif fov <= FOV_THRESH_TELE40 and is_chart_distance_22cm:
210    chart_scaling = SCALE_TELE40_IN_22CM_RIG
211  elif (fov <= FOV_THRESH_TELE25 and
212        is_chart_distance_31cm or
213        chart_distance > CHART_DISTANCE_31CM):
214    chart_scaling = SCALE_TELE25_IN_31CM_RIG
215  elif fov <= FOV_THRESH_TELE40 and is_chart_distance_31cm:
216    chart_scaling = SCALE_TELE40_IN_31CM_RIG
217  elif fov <= FOV_THRESH_TELE and is_chart_distance_31cm:
218    chart_scaling = SCALE_TELE_IN_31CM_RIG
219  return chart_scaling
220
221
222def scale_img(img, scale=1.0):
223  """Scale image based on a real number scale factor."""
224  dim = (int(img.shape[1] * scale), int(img.shape[0] * scale))
225  return cv2.resize(img.copy(), dim, interpolation=cv2.INTER_AREA)
226
227
228class Chart(object):
229  """Definition for chart object.
230
231  Defines PNG reference file, chart, size, distance and scaling range.
232  """
233
234  def __init__(
235      self,
236      cam,
237      props,
238      log_path,
239      chart_file=None,
240      height=None,
241      distance=None,
242      scale_start=None,
243      scale_stop=None,
244      scale_step=None,
245      rotation=None):
246    """Initial constructor for class.
247
248    Args:
249     cam: open ITS session
250     props: camera properties object
251     log_path: log path to store the captured images.
252     chart_file: str; absolute path to png file of chart
253     height: float; height in cm of displayed chart
254     distance: float; distance in cm from camera of displayed chart
255     scale_start: float; start value for scaling for chart search
256     scale_stop: float; stop value for scaling for chart search
257     scale_step: float; step value for scaling for chart search
258     rotation: clockwise rotation in degrees (multiple of 90) or None
259    """
260    self._file = chart_file or CHART_FILE
261    if math.isclose(
262        distance, CHART_DISTANCE_31CM, rel_tol=CHART_SCALE_RTOL):
263      self._height = height or CHART_HEIGHT_31CM
264      self._distance = distance
265    else:
266      self._height = height or CHART_HEIGHT_22CM
267      self._distance = CHART_DISTANCE_22CM
268    self._scale_start = scale_start or CHART_SCALE_START
269    self._scale_stop = scale_stop or CHART_SCALE_STOP
270    self._scale_step = scale_step or CHART_SCALE_STEP
271    self.opt_val = None
272    self.locate(cam, props, log_path, rotation)
273
274  def _set_scale_factors_to_one(self):
275    """Set scale factors to 1.0 for skipped tests."""
276    self.wnorm = 1.0
277    self.hnorm = 1.0
278    self.xnorm = 0.0
279    self.ynorm = 0.0
280    self.scale = 1.0
281
282  def _calc_scale_factors(self, cam, props, fmt, log_path, rotation):
283    """Take an image with s, e, & fd to find the chart location.
284
285    Args:
286     cam: An open its session.
287     props: Properties of cam
288     fmt: Image format for the capture
289     log_path: log path to save the captured images.
290     rotation: clockwise rotation of template in degrees (multiple of 90) or
291       None
292
293    Returns:
294      template: numpy array; chart template for locator
295      img_3a: numpy array; RGB image for chart location
296      scale_factor: float; scaling factor for chart search
297    """
298    req = capture_request_utils.auto_capture_request()
299    cap_chart = image_processing_utils.stationary_lens_cap(cam, req, fmt)
300    img_3a = image_processing_utils.convert_capture_to_rgb_image(
301        cap_chart, props)
302    img_3a = image_processing_utils.rotate_img_per_argv(img_3a)
303    af_scene_name = os.path.join(log_path, 'af_scene.jpg')
304    image_processing_utils.write_image(img_3a, af_scene_name)
305    template = cv2.imread(self._file, cv2.IMREAD_ANYDEPTH)
306    if rotation is not None:
307      logging.debug('Rotating template by %d degrees', rotation)
308      template = numpy.rot90(template, k=rotation / 90)
309    focal_l = cap_chart['metadata']['android.lens.focalLength']
310    pixel_pitch = (
311        props['android.sensor.info.physicalSize']['height'] / img_3a.shape[0])
312    logging.debug('Chart distance: %.2fcm', self._distance)
313    logging.debug('Chart height: %.2fcm', self._height)
314    logging.debug('Focal length: %.2fmm', focal_l)
315    logging.debug('Pixel pitch: %.2fum', pixel_pitch * 1E3)
316    logging.debug('Template width: %dpixels', template.shape[1])
317    logging.debug('Template height: %dpixels', template.shape[0])
318    chart_pixel_h = self._height * focal_l / (self._distance * pixel_pitch)
319    scale_factor = template.shape[0] / chart_pixel_h
320    if rotation == 90 or rotation == 270:
321      # With the landscape to portrait override turned on, the width and height
322      # of the active array, normally w x h, will be h x (w * (h/w)^2). Reduce
323      # the applied scaling by the same factor to compensate for this, because
324      # the chart will take up more of the scene. Assume w > h, since this is
325      # meant for landscape sensors.
326      rotate_physical_aspect = (
327          props['android.sensor.info.physicalSize']['height'] /
328          props['android.sensor.info.physicalSize']['width'])
329      scale_factor *= rotate_physical_aspect ** 2
330    logging.debug('Chart/image scale factor = %.2f', scale_factor)
331    return template, img_3a, scale_factor
332
333  def locate(self, cam, props, log_path, rotation):
334    """Find the chart in the image, and append location to chart object.
335
336    Args:
337      cam: Open its session.
338      props: Camera properties object.
339      log_path: log path to store the captured images.
340      rotation: clockwise rotation of template in degrees (multiple of 90) or
341        None
342
343    The values appended are:
344    xnorm: float; [0, 1] left loc of chart in scene
345    ynorm: float; [0, 1] top loc of chart in scene
346    wnorm: float; [0, 1] width of chart in scene
347    hnorm: float; [0, 1] height of chart in scene
348    scale: float; scale factor to extract chart
349    opt_val: float; The normalized match optimization value [0, 1]
350    """
351    fmt = {'format': 'yuv', 'width': VGA_WIDTH, 'height': VGA_HEIGHT}
352    cam.do_3a()
353    chart, scene, s_factor = self._calc_scale_factors(cam, props, fmt, log_path,
354                                                      rotation)
355    scale_start = self._scale_start * s_factor
356    scale_stop = self._scale_stop * s_factor
357    scale_step = self._scale_step * s_factor
358    offset = scale_step / 2
359    self.scale = s_factor
360    logging.debug('scale start: %.3f, stop: %.3f, step: %.3f',
361                  scale_start, scale_stop, scale_step)
362    logging.debug('Used offset of %.3f to include stop value.', offset)
363    max_match = []
364    # convert [0.0, 1.0] image to [0, 255] and then grayscale
365    scene_uint8 = image_processing_utils.convert_image_to_uint8(scene)
366    scene_gray = image_processing_utils.convert_rgb_to_grayscale(scene_uint8)
367
368    # find scene
369    logging.debug('Finding chart in scene...')
370    for scale in numpy.arange(scale_start, scale_stop + offset, scale_step):
371      scene_scaled = scale_img(scene_gray, scale)
372      if (scene_scaled.shape[0] < chart.shape[0] or
373          scene_scaled.shape[1] < chart.shape[1]):
374        logging.debug(
375            'Skipped scale %.3f. scene_scaled shape: %s, chart shape: %s',
376            scale, scene_scaled.shape, chart.shape)
377        continue
378      result = cv2.matchTemplate(scene_scaled, chart, cv2.TM_CCOEFF_NORMED)
379      _, opt_val, _, top_left_scaled = cv2.minMaxLoc(result)
380      logging.debug(' scale factor: %.3f, opt val: %.3f', scale, opt_val)
381      max_match.append((opt_val, scale, top_left_scaled))
382
383    # determine if optimization results are valid
384    opt_values = [x[0] for x in max_match]
385    if not opt_values or (2.0 * min(opt_values) > max(opt_values)):
386      estring = ('Warning: unable to find chart in scene!\n'
387                 'Check camera distance and self-reported '
388                 'pixel pitch, focal length and hyperfocal distance.')
389      logging.warning(estring)
390      self._set_scale_factors_to_one()
391    else:
392      if (max(opt_values) == opt_values[0] or
393          max(opt_values) == opt_values[len(opt_values) - 1]):
394        estring = ('Warning: Chart is at extreme range of locator.')
395        logging.warning(estring)
396      # find max and draw bbox
397      matched_scale_and_loc = max(max_match, key=lambda x: x[0])
398      self.opt_val = matched_scale_and_loc[0]
399      self.scale = matched_scale_and_loc[1]
400      logging.debug('Optimum scale factor: %.3f', self.scale)
401      logging.debug('Opt val: %.3f', self.opt_val)
402      top_left_scaled = matched_scale_and_loc[2]
403      logging.debug('top_left_scaled: %d, %d', top_left_scaled[0],
404                    top_left_scaled[1])
405      h, w = chart.shape
406      bottom_right_scaled = (top_left_scaled[0] + w, top_left_scaled[1] + h)
407      logging.debug('bottom_right_scaled: %d, %d', bottom_right_scaled[0],
408                    bottom_right_scaled[1])
409      top_left = ((top_left_scaled[0] // self.scale),
410                  (top_left_scaled[1] // self.scale))
411      bottom_right = ((bottom_right_scaled[0] // self.scale),
412                      (bottom_right_scaled[1] // self.scale))
413      self.wnorm = ((bottom_right[0]) - top_left[0]) / scene.shape[1]
414      self.hnorm = ((bottom_right[1]) - top_left[1]) / scene.shape[0]
415      self.xnorm = (top_left[0]) / scene.shape[1]
416      self.ynorm = (top_left[1]) / scene.shape[0]
417      patch = image_processing_utils.get_image_patch(
418          scene_uint8, self.xnorm, self.ynorm, self.wnorm, self.hnorm) / 255
419      image_processing_utils.write_image(
420          patch, os.path.join(log_path, 'template_scene.jpg'))
421
422
423def component_shape(contour):
424  """Measure the shape of a connected component.
425
426  Args:
427    contour: return from cv2.findContours. A list of pixel coordinates of
428    the contour.
429
430  Returns:
431    The most left, right, top, bottom pixel location, height, width, and
432    the center pixel location of the contour.
433  """
434  shape = {'left': numpy.inf, 'right': 0, 'top': numpy.inf, 'bottom': 0,
435           'width': 0, 'height': 0, 'ctx': 0, 'cty': 0}
436  for pt in contour:
437    if pt[0][0] < shape['left']:
438      shape['left'] = pt[0][0]
439    if pt[0][0] > shape['right']:
440      shape['right'] = pt[0][0]
441    if pt[0][1] < shape['top']:
442      shape['top'] = pt[0][1]
443    if pt[0][1] > shape['bottom']:
444      shape['bottom'] = pt[0][1]
445  shape['width'] = shape['right'] - shape['left'] + 1
446  shape['height'] = shape['bottom'] - shape['top'] + 1
447  shape['ctx'] = (shape['left'] + shape['right']) // 2
448  shape['cty'] = (shape['top'] + shape['bottom']) // 2
449  return shape
450
451
452def find_circle_fill_metric(shape, img_bw, color):
453  """Find the proportion of points matching a desired color on a shape's axes.
454
455  Args:
456    shape: dictionary returned by component_shape(...)
457    img_bw: binarized numpy image array
458    color: int of [0 or 255] 0 is black, 255 is white
459  Returns:
460    float: number of x, y axis points matching color / total x, y axis points
461  """
462  matching = 0
463  total = 0
464  for y in range(shape['top'], shape['bottom']):
465    total += 1
466    matching += 1 if img_bw[y][shape['ctx']] == color else 0
467  for x in range(shape['left'], shape['right']):
468    total += 1
469    matching += 1 if img_bw[shape['cty']][x] == color else 0
470  logging.debug('Found %d matching points out of %d', matching, total)
471  return matching / total
472
473
474def find_circle(img, img_name, min_area, color, use_adaptive_threshold=False):
475  """Find the circle in the test image.
476
477  Args:
478    img: numpy image array in RGB, with pixel values in [0,255].
479    img_name: string with image info of format and size.
480    min_area: float of minimum area of circle to find
481    color: int of [0 or 255] 0 is black, 255 is white
482    use_adaptive_threshold: True if binarization should use adaptive threshold.
483
484  Returns:
485    circle = {'x', 'y', 'r', 'w', 'h', 'x_offset', 'y_offset'}
486  """
487  circle = {}
488  img_size = img.shape
489  if img_size[0]*img_size[1] >= LOW_RES_IMG_THRESH:
490    circlish_atol = CIRCLISH_ATOL
491  else:
492    circlish_atol = CIRCLISH_LOW_RES_ATOL
493
494  # convert to gray-scale image and binarize using adaptive/global threshold
495  if use_adaptive_threshold:
496    img_gray = cv2.cvtColor(img.astype(numpy.uint8), cv2.COLOR_BGR2GRAY)
497    img_bw = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
498                                   cv2.THRESH_BINARY, CV2_THRESHOLD_BLOCK_SIZE,
499                                   CV2_THRESHOLD_CONSTANT)
500  else:
501    img_gray = image_processing_utils.convert_rgb_to_grayscale(img)
502    img_bw = binarize_image(img_gray)
503
504  # find contours
505  contours = find_all_contours(255-img_bw)
506
507  # Check each contour and find the circle bigger than min_area
508  num_circles = 0
509  circle_contours = []
510  logging.debug('Initial number of contours: %d', len(contours))
511  min_circle_area = min_area * img_size[0] * img_size[1]
512  logging.debug('Screening out circles w/ radius < %.1f (pixels) or %d pts.',
513                math.sqrt(min_circle_area / math.pi), CIRCLE_MIN_PTS)
514  for contour in contours:
515    area = cv2.contourArea(contour)
516    num_pts = len(contour)
517    if (area > min_circle_area and num_pts >= CIRCLE_MIN_PTS):
518      shape = component_shape(contour)
519      radius = (shape['width'] + shape['height']) / 4
520      colour = img_bw[shape['cty']][shape['ctx']]
521      circlish = (math.pi * radius**2) / area
522      aspect_ratio = shape['width'] / shape['height']
523      fill = find_circle_fill_metric(shape, img_bw, color)
524      logging.debug('Potential circle found. radius: %.2f, color: %d, '
525                    'circlish: %.3f, ar: %.3f, pts: %d, fill metric: %.3f',
526                    radius, colour, circlish, aspect_ratio, num_pts, fill)
527      if (colour == color and
528          math.isclose(1.0, circlish, abs_tol=circlish_atol) and
529          math.isclose(1.0, aspect_ratio, abs_tol=CIRCLE_AR_ATOL) and
530          num_pts/radius >= CIRCLE_RADIUS_NUMPTS_THRESH and
531          math.isclose(1.0, fill, abs_tol=CIRCLE_COLOR_ATOL)):
532        radii = [
533            image_processing_utils.distance(
534                (shape['ctx'], shape['cty']), numpy.squeeze(point))
535            for point in contour
536        ]
537        minimum_radius, maximum_radius = min(radii), max(radii)
538        logging.debug('Minimum radius: %.2f, maximum radius: %.2f',
539                      minimum_radius, maximum_radius)
540        if circle:
541          old_circle_center = (circle['x'], circle['y'])
542          new_circle_center = (shape['ctx'], shape['cty'])
543          # Based on image height
544          center_distance_atol = img_size[0]*CIRCLE_LOCATION_VARIATION_RTOL
545          if math.isclose(
546              image_processing_utils.distance(
547                  old_circle_center, new_circle_center),
548              0,
549              abs_tol=center_distance_atol
550          ) and maximum_radius - minimum_radius < circle['radius_spread']:
551            logging.debug('Replacing the previously found circle. '
552                          'Circle located at %s has a smaller radius spread '
553                          'than the previously found circle at %s. '
554                          'Current radius spread: %.2f, '
555                          'previous radius spread: %.2f',
556                          new_circle_center, old_circle_center,
557                          maximum_radius - minimum_radius,
558                          circle['radius_spread'])
559            circle_contours.pop()
560            circle = {}
561            num_circles -= 1
562        circle_contours.append(contour)
563
564        # Populate circle dictionary
565        circle['x'] = shape['ctx']
566        circle['y'] = shape['cty']
567        circle['r'] = (shape['width'] + shape['height']) / 4
568        circle['w'] = float(shape['width'])
569        circle['h'] = float(shape['height'])
570        circle['x_offset'] = (shape['ctx'] - img_size[1]//2) / circle['w']
571        circle['y_offset'] = (shape['cty'] - img_size[0]//2) / circle['h']
572        circle['radius_spread'] = maximum_radius - minimum_radius
573        logging.debug('Num pts: %d', num_pts)
574        logging.debug('Aspect ratio: %.3f', aspect_ratio)
575        logging.debug('Circlish value: %.3f', circlish)
576        logging.debug('Location: %.1f x %.1f', circle['x'], circle['y'])
577        logging.debug('Radius: %.3f', circle['r'])
578        logging.debug('Circle center position wrt to image center: %.3fx%.3f',
579                      circle['x_offset'], circle['y_offset'])
580        num_circles += 1
581        # if more than one circle found, break
582        if num_circles == 2:
583          break
584
585  if num_circles == 0:
586    image_processing_utils.write_image(img/255, img_name, True)
587    if not use_adaptive_threshold:
588      return find_circle(
589          img, img_name, min_area, color, use_adaptive_threshold=True)
590    else:
591      raise AssertionError('No circle detected. '
592                           'Please take pictures according to instructions.')
593
594  if num_circles > 1:
595    image_processing_utils.write_image(img/255, img_name, True)
596    cv2.drawContours(img, circle_contours, -1, CV2_RED,
597                     CV2_LINE_THICKNESS)
598    img_name_parts = img_name.split('.')
599    image_processing_utils.write_image(
600        img/255, f'{img_name_parts[0]}_contours.{img_name_parts[1]}', True)
601    if not use_adaptive_threshold:
602      return find_circle(
603          img, img_name, min_area, color, use_adaptive_threshold=True)
604    raise AssertionError('More than 1 circle detected. '
605                         'Background of scene may be too complex.')
606
607  return circle
608
609
610def append_circle_center_to_img(circle, img, img_name, save_img=True):
611  """Append circle center and image center to image and save image.
612
613  Draws line from circle center to image center and then labels end-points.
614  Adjusts text positioning depending on circle center wrt image center.
615  Moves text position left/right half of up/down movement for visual aesthetics.
616
617  Args:
618    circle: dict with circle location vals.
619    img: numpy float image array in RGB, with pixel values in [0,255].
620    img_name: string with image info of format and size.
621    save_img: optional boolean to not save image
622  """
623  line_width_scaling_factor = 500
624  text_move_scaling_factor = 3
625  img_size = img.shape
626  img_center_x = img_size[1]//2
627  img_center_y = img_size[0]//2
628
629  # draw line from circle to image center
630  line_width = int(max(1, max(img_size)//line_width_scaling_factor))
631  font_size = line_width // 2
632  move_text_dist = line_width * text_move_scaling_factor
633  cv2.line(img, (circle['x'], circle['y']), (img_center_x, img_center_y),
634           CV2_RED, line_width)
635
636  # adjust text location
637  move_text_right_circle = -1
638  move_text_right_image = 2
639  if circle['x'] > img_center_x:
640    move_text_right_circle = 2
641    move_text_right_image = -1
642
643  move_text_down_circle = -1
644  move_text_down_image = 4
645  if circle['y'] > img_center_y:
646    move_text_down_circle = 4
647    move_text_down_image = -1
648
649  # add circles to end points and label
650  radius_pt = line_width * 2  # makes a dot 2x line width
651  filled_pt = -1  # cv2 value for a filled circle
652  # circle center
653  cv2.circle(img, (circle['x'], circle['y']), radius_pt, CV2_RED, filled_pt)
654  text_circle_x = move_text_dist * move_text_right_circle + circle['x']
655  text_circle_y = move_text_dist * move_text_down_circle + circle['y']
656  cv2.putText(img, 'circle center', (text_circle_x, text_circle_y),
657              cv2.FONT_HERSHEY_SIMPLEX, font_size, CV2_RED, line_width)
658  # image center
659  cv2.circle(img, (img_center_x, img_center_y), radius_pt, CV2_RED, filled_pt)
660  text_imgct_x = move_text_dist * move_text_right_image + img_center_x
661  text_imgct_y = move_text_dist * move_text_down_image + img_center_y
662  cv2.putText(img, 'image center', (text_imgct_x, text_imgct_y),
663              cv2.FONT_HERSHEY_SIMPLEX, font_size, CV2_RED, line_width)
664  if save_img:
665    image_processing_utils.write_image(img/255, img_name, True)  # [0, 1] values
666
667
668def is_circle_cropped(circle, size):
669  """Determine if a circle is cropped by edge of image.
670
671  Args:
672    circle: list [x, y, radius] of circle
673    size: tuple (x, y) of size of img
674
675  Returns:
676    Boolean True if selected circle is cropped
677  """
678
679  cropped = False
680  circle_x, circle_y = circle[0], circle[1]
681  circle_r = circle[2]
682  x_min, x_max = circle_x - circle_r, circle_x + circle_r
683  y_min, y_max = circle_y - circle_r, circle_y + circle_r
684  if x_min < 0 or y_min < 0 or x_max > size[0] or y_max > size[1]:
685    cropped = True
686  return cropped
687
688
689def find_white_square(img, min_area):
690  """Find the white square in the test image.
691
692  Args:
693    img: numpy image array in RGB, with pixel values in [0,255].
694    min_area: float of minimum area of circle to find
695
696  Returns:
697    square = {'left', 'right', 'top', 'bottom', 'width', 'height'}
698  """
699  square = {}
700  num_squares = 0
701  img_size = img.shape
702
703  # convert to gray-scale image
704  img_gray = image_processing_utils.convert_rgb_to_grayscale(img)
705
706  # otsu threshold to binarize the image
707  img_bw = binarize_image(img_gray)
708
709  # find contours
710  contours = find_all_contours(img_bw)
711
712  # Check each contour and find the square bigger than min_area
713  logging.debug('Initial number of contours: %d', len(contours))
714  min_area = img_size[0]*img_size[1]*min_area
715  logging.debug('min_area: %.3f', min_area)
716  for contour in contours:
717    area = cv2.contourArea(contour)
718    num_pts = len(contour)
719    if (area > min_area and num_pts >= 4):
720      shape = component_shape(contour)
721      squarish = (shape['width'] * shape['height']) / area
722      aspect_ratio = shape['width'] / shape['height']
723      logging.debug('Potential square found. squarish: %.3f, ar: %.3f, pts: %d',
724                    squarish, aspect_ratio, num_pts)
725      if (math.isclose(1.0, squarish, abs_tol=SQUARISH_RTOL) and
726          math.isclose(1.0, aspect_ratio, abs_tol=SQUARISH_AR_RTOL)):
727        # Populate square dictionary
728        angle = cv2.minAreaRect(contour)[-1]
729        if angle < -45:
730          angle += 90
731        square['angle'] = angle
732        square['left'] = shape['left'] - SQUARE_CROP_MARGIN
733        square['right'] = shape['right'] + SQUARE_CROP_MARGIN
734        square['top'] = shape['top'] - SQUARE_CROP_MARGIN
735        square['bottom'] = shape['bottom'] + SQUARE_CROP_MARGIN
736        square['w'] = shape['width'] + 2*SQUARE_CROP_MARGIN
737        square['h'] = shape['height'] + 2*SQUARE_CROP_MARGIN
738        num_squares += 1
739
740  if num_squares == 0:
741    raise AssertionError('No white square detected. '
742                         'Please take pictures according to instructions.')
743  if num_squares > 1:
744    raise AssertionError('More than 1 white square detected. '
745                         'Background of scene may be too complex.')
746  return square
747
748
749def get_angle(input_img):
750  """Computes anglular inclination of chessboard in input_img.
751
752  Args:
753    input_img (2D numpy.ndarray): Grayscale image stored as a 2D numpy array.
754  Returns:
755    Median angle of squares in degrees identified in the image.
756
757  Angle estimation algorithm description:
758    Input: 2D grayscale image of chessboard.
759    Output: Angle of rotation of chessboard perpendicular to
760            chessboard. Assumes chessboard and camera are parallel to
761            each other.
762
763    1) Use adaptive threshold to make image binary
764    2) Find countours
765    3) Filter out small contours
766    4) Filter out all non-square contours
767    5) Compute most common square shape.
768        The assumption here is that the most common square instances are the
769        chessboard squares. We've shown that with our current tuning, we can
770        robustly identify the squares on the sensor fusion chessboard.
771    6) Return median angle of most common square shape.
772
773  USAGE NOTE: This function has been tuned to work for the chessboard used in
774  the sensor_fusion tests. See images in test_images/rotated_chessboard/ for
775  sample captures. If this function is used with other chessboards, it may not
776  work as expected.
777  """
778  # Tuning parameters
779  square_area_min = (float)(input_img.shape[1] * SQUARE_AREA_MIN_REL)
780
781  # Creates copy of image to avoid modifying original.
782  img = numpy.array(input_img, copy=True)
783
784  # Scale pixel values from 0-1 to 0-255
785  img_uint8 = image_processing_utils.convert_image_to_uint8(img)
786  img_thresh = cv2.adaptiveThreshold(
787      img_uint8, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 201, 2)
788
789  # Find all contours.
790  contours = find_all_contours(img_thresh)
791
792  # Filter contours to squares only.
793  square_contours = []
794  for contour in contours:
795    rect = cv2.minAreaRect(contour)
796    _, (width, height), angle = rect
797
798    # Skip non-squares
799    if not math.isclose(width, height, rel_tol=SQUARE_TOL):
800      continue
801
802    # Remove very small contours: usually just tiny dots due to noise.
803    area = cv2.contourArea(contour)
804    if area < square_area_min:
805      continue
806
807    square_contours.append(contour)
808
809  areas = []
810  for contour in square_contours:
811    area = cv2.contourArea(contour)
812    areas.append(area)
813
814  median_area = numpy.median(areas)
815
816  filtered_squares = []
817  filtered_angles = []
818  for square in square_contours:
819    area = cv2.contourArea(square)
820    if not math.isclose(area, median_area, rel_tol=SQUARE_TOL):
821      continue
822
823    filtered_squares.append(square)
824    _, (width, height), angle = cv2.minAreaRect(square)
825    filtered_angles.append(angle)
826
827  if len(filtered_angles) < ANGLE_NUM_MIN:
828    logging.debug(
829        'A frame had too few angles to be processed. '
830        'Num of angles: %d, MIN: %d', len(filtered_angles), ANGLE_NUM_MIN)
831    return None
832
833  return numpy.median(filtered_angles)
834
835
836def correct_faces_for_crop(faces, img, crop):
837  """Correct face rectangles for sensor crop.
838
839  Args:
840    faces: list of dicts with face information
841    img: np image array
842    crop: dict of crop region size with 'top, right, left, bottom' as keys
843  Returns:
844    list of face locations (left, right, top, bottom) corrected
845  """
846  faces_corrected = []
847  cw, ch = crop['right'] - crop['left'], crop['bottom'] - crop['top']
848  logging.debug('crop region: %s', str(crop))
849  w = img.shape[1]
850  h = img.shape[0]
851  for rect in [face['bounds'] for face in faces]:
852    logging.debug('rect: %s', str(rect))
853    left = int(round((rect['left'] - crop['left']) * w / cw))
854    right = int(round((rect['right'] - crop['left']) * w / cw))
855    top = int(round((rect['top'] - crop['top']) * h / ch))
856    bottom = int(round((rect['bottom'] - crop['top']) * h / ch))
857    faces_corrected.append([left, right, top, bottom])
858  logging.debug('faces_corrected: %s', str(faces_corrected))
859  return faces_corrected
860
861
862def eliminate_duplicate_centers(coordinates_list):
863  """Checks center coordinates of OpenCV's face rectangles.
864
865  Method makes sure that the list of face rectangles' centers do not
866  contain duplicates from the same face
867
868  Args:
869    coordinates_list: list; coordinates of face rectangles' centers
870  Returns:
871    non_duplicate_list: list; coordinates of face rectangles' centers
872    without duplicates on the same face
873  """
874  output = set()
875
876  for _, xy1 in enumerate(coordinates_list):
877    for _, xy2 in enumerate(coordinates_list):
878      if scipy.spatial.distance.euclidean(xy1, xy2) < FACE_MIN_CENTER_DELTA:
879        continue
880      if xy1 not in output:
881        output.add(xy1)
882      else:
883        output.add(xy2)
884  return list(output)
885
886
887def match_face_locations(faces_cropped, faces_opencv, img, img_name):
888  """Assert face locations between two methods.
889
890  Method determines if center of opencv face boxes is within face detection
891  face boxes. Using math.hypot to measure the distance between the centers,
892  as math.dist is not available for python versions before 3.8.
893
894  Args:
895    faces_cropped: list of lists with (l, r, t, b) for each face.
896    faces_opencv: list of lists with (x, y, w, h) for each face.
897    img: numpy [0, 1] image array
898    img_name: text string with path to image file
899  """
900  # turn faces_opencv into list of center locations
901  faces_opencv_center = [(x+w//2, y+h//2) for (x, y, w, h) in faces_opencv]
902  cropped_faces_centers = [
903      ((l+r)//2, (t+b)//2) for (l, r, t, b) in faces_cropped]
904  faces_opencv_center.sort(key=lambda t: [t[1], t[0]])
905  cropped_faces_centers.sort(key=lambda t: [t[1], t[0]])
906  logging.debug('cropped face centers: %s', str(cropped_faces_centers))
907  logging.debug('opencv face center: %s', str(faces_opencv_center))
908  faces_opencv_centers = []
909  num_centers_aligned = 0
910
911  # eliminate duplicate openCV face rectangles' centers the same face
912  faces_opencv_centers = eliminate_duplicate_centers(faces_opencv_center)
913  logging.debug('opencv face centers: %s', str(faces_opencv_centers))
914
915  for (x, y) in faces_opencv_centers:
916    for (x1, y1) in cropped_faces_centers:
917      centers_dist = math.hypot(x-x1, y-y1)
918      if centers_dist < FACE_CENTER_MIN_LOGGING_DIST:
919        logging.debug('centers_dist: %.3f', centers_dist)
920      if (abs(x-x1) < FACE_CENTER_MATCH_TOL_X and
921          abs(y-y1) < FACE_CENTER_MATCH_TOL_Y):
922        num_centers_aligned += 1
923
924  # If test failed, save image with green AND OpenCV red rectangles
925  image_processing_utils.write_image(img, img_name)
926  if num_centers_aligned < FACES_ALIGNED_MIN_NUM:
927    for (x, y, w, h) in faces_opencv:
928      cv2.rectangle(img, (x, y), (x+w, y+h), CV2_RED_NORM, 2)
929      image_processing_utils.write_image(img, img_name)
930      logging.debug('centered: %s', str(num_centers_aligned))
931    raise AssertionError(f'Face rectangles in wrong location(s)!. '
932                         f'Found {num_centers_aligned} rectangles near cropped '
933                         f'face centers, expected {FACES_ALIGNED_MIN_NUM}')
934
935
936def draw_green_boxes_around_faces(img, faces_cropped, img_name):
937  """Correct face rectangles for sensor crop.
938
939  Args:
940    img: numpy [0, 1] image array
941    faces_cropped: list of lists with (l, r, t, b) for each face
942    img_name: text string with path to image file
943  Returns:
944    image with green rectangles
945  """
946  # draw boxes around faces in green and save image
947  for (l, r, t, b) in faces_cropped:
948    cv2.rectangle(img, (l, t), (r, b), CV2_GREEN_NORM, 2)
949  image_processing_utils.write_image(img, img_name)
950
951
952def find_aruco_markers(input_img, output_img_path):
953  """Detects ArUco markers in the input_img.
954
955  Finds ArUco markers in the input_img and draws the contours
956  around them.
957  Args:
958    input_img: input img in numpy array with ArUco markers
959      to be detected
960    output_img_path: path of the image to be saved with contours
961      around the markers detected
962  Returns:
963    corners: list of detected corners
964    ids: list of int ids for each ArUco markers in the input_img
965    rejected_params: list of rejected corners
966  """
967  parameters = cv2.aruco.DetectorParameters_create()
968  # ArUco markers used are 4x4
969  aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100)
970  corners, ids, rejected_params = cv2.aruco.detectMarkers(
971      input_img, aruco_dict, parameters=parameters)
972  if ids is None:
973    e_msg = 'ArUco markers not detected.'
974    image_processing_utils.write_image(input_img/255, output_img_path)
975    raise AssertionError(e_msg)
976  logging.debug('Number of ArUco markers detected: %d', len(ids))
977  logging.debug('IDs of the ArUco markers detected: %s', ids)
978  logging.debug('Corners of the ArUco markers detected: %s', corners)
979  cv2.aruco.drawDetectedMarkers(input_img, corners, ids)
980  image_processing_utils.write_image(input_img/255, output_img_path)
981  return corners, ids, rejected_params
982
983
984def get_patch_from_aruco_markers(
985    input_img, aruco_marker_corners, aruco_marker_ids):
986  """Returns the rectangle patch from the aruco marker corners.
987
988  Note: Refer to image used in scene7 for ArUco markers location.
989
990  Args:
991    input_img: input img in numpy array with ArUco markers
992      to be detected
993    aruco_marker_corners: array of aruco marker corner coordinates detected by
994      opencv_processing_utils.find_aruco_markers
995    aruco_marker_ids: array of ids of aruco markers detected by
996      opencv_processing_utils.find_aruco_markers
997  Returns:
998    Numpy float image array of the rectangle patch
999  """
1000  outer_rect_coordinates = {}
1001  for corner, marker_id in zip(aruco_marker_corners, aruco_marker_ids):
1002    corner = corner.reshape(4, 2)  # opencv returns 3D array
1003    index = marker_id[0]
1004    # Roll the array 4x to align with the coordinates of the corner adjacent
1005    # to the corner of the rectangle
1006    # Marker id: 0 => index 2 coordinates
1007    # Marker id: 1 => index 3 coordinates
1008    # Marker id: 2 => index 0 coordinates
1009    # Marker id: 3 => index 1 coordinates
1010    corner = numpy.roll(corner, 4)
1011
1012    outer_rect_coordinates[index] = tuple(corner[index])
1013
1014  red_corner = tuple(map(int, outer_rect_coordinates[0]))
1015  green_corner = tuple(map(int, outer_rect_coordinates[1]))
1016  gray_corner = tuple(map(int, outer_rect_coordinates[2]))
1017  blue_corner = tuple(map(int, outer_rect_coordinates[3]))
1018
1019  logging.debug('red_corner: %s', red_corner)
1020  logging.debug('blue_corner: %s', blue_corner)
1021  logging.debug('green_corner: %s', green_corner)
1022  logging.debug('gray_corner: %s', gray_corner)
1023  # Ensure that the image is not rotated
1024  blue_gray_y_diff = abs(gray_corner[1] - blue_corner[1])
1025  red_green_y_diff = abs(green_corner[1] - red_corner[1])
1026
1027  if ((blue_gray_y_diff > IMAGE_ROTATION_THRESHOLD) or
1028      (red_green_y_diff > IMAGE_ROTATION_THRESHOLD)):
1029    raise AssertionError('Image rotation is not within the threshold. '
1030                         f'Actual blue_gray_y_diff: {blue_gray_y_diff}, '
1031                         f'red_green_y_diff: {red_green_y_diff} '
1032                         f'Expected {IMAGE_ROTATION_THRESHOLD}')
1033  cv2.rectangle(input_img, red_corner, gray_corner,
1034                CV2_RED_NORM, CV2_LINE_THICKNESS)
1035  return input_img[red_corner[1]:gray_corner[1],
1036                   red_corner[0]:gray_corner[0]].copy()
1037
1038
1039def get_chart_boundary_from_aruco_markers(
1040    aruco_marker_corners, aruco_marker_ids, input_img, output_img_path):
1041  """Returns top left and bottom right coordinates from the aruco markers.
1042
1043  Note: Refer to image used in scene8 for ArUco markers location.
1044
1045  Args:
1046    aruco_marker_corners: array of aruco marker corner coordinates detected by
1047      opencv_processing_utils.find_aruco_markers.
1048    aruco_marker_ids: array of ids of aruco markers detected by
1049      opencv_processing_utils.find_aruco_markers.
1050    input_img: 3D RGB numpy [0, 255] uint8; input image.
1051    output_img_path: string; output image path.
1052  Returns:
1053    top_left: tuple; aruco marker corner coordinates in pixel.
1054    top_right: tuple; aruco marker corner coordinates in pixel.
1055    bottom_right: tuple; aruco marker corner coordinates in pixel.
1056    bottom_left: tuple; aruco marker corner coordinates in pixel.
1057  """
1058  outer_rect_coordinates = {}
1059  for corner, marker_id in zip(aruco_marker_corners, aruco_marker_ids):
1060    corner = corner.reshape(4, 2)  # reshape opencv 3D array to 4x2
1061    index = marker_id[0]
1062    corner = numpy.roll(corner, ARUCO_CORNER_COUNT)
1063    outer_rect_coordinates[index] = tuple(corner[index])
1064    logging.debug('Corners: %s', corner)
1065    logging.debug('Index: %s', index)
1066    logging.debug('Outer rect coordinates: %s', outer_rect_coordinates[index])
1067  top_left = tuple(map(int, outer_rect_coordinates[0]))
1068  top_right = tuple(map(int, outer_rect_coordinates[1]))
1069  bottom_right = tuple(map(int, outer_rect_coordinates[2]))
1070  bottom_left = tuple(map(int, outer_rect_coordinates[3]))
1071
1072  # Outline metering rectangles with corresponding colors
1073  rect_w = round((bottom_right[0] - top_left[0])/NUM_AE_AWB_REGIONS)
1074  top_x, top_y = top_left[0], top_left[1]
1075  bottom_x, bottom_y = bottom_left[0], bottom_left[1]
1076  cv2.rectangle(
1077      input_img,
1078      (top_x, top_y), (bottom_x + rect_w, bottom_y),
1079      CV2_BLUE, CV2_LINE_THICKNESS)
1080  cv2.rectangle(
1081      input_img,
1082      (top_x + rect_w, top_y), (bottom_x + rect_w * 2, bottom_y),
1083      CV2_WHITE, CV2_LINE_THICKNESS)
1084  cv2.rectangle(
1085      input_img,
1086      (top_x + rect_w * 2, top_y), (bottom_x + rect_w * 3, bottom_y),
1087      CV2_BLACK, CV2_LINE_THICKNESS)
1088  cv2.rectangle(
1089      input_img,
1090      (top_x + rect_w * 3, top_y), bottom_right,
1091      CV2_YELLOW, CV2_LINE_THICKNESS)
1092  image_processing_utils.write_image(input_img/255, output_img_path)
1093  logging.debug('ArUco marker top_left: %s', top_left)
1094  logging.debug('ArUco marker bottom_right: %s', bottom_right)
1095  return top_left, top_right, bottom_right, bottom_left
1096
1097
1098def define_metering_rectangle_values(
1099    props, top_left, top_right, bottom_right, bottom_left, w, h):
1100  """Find normalized values of coordinates and return 4 metering rects.
1101
1102  Args:
1103    props: dict; camera properties object.
1104    top_left: coordinates; defined by aruco markers for targeted image.
1105    top_right: coordinates; defined by aruco markers for targeted image.
1106    bottom_right: coordinates; defined by aruco markers for targeted image.
1107    bottom_left: coordinates; defined by aruco markers for targeted image.
1108    w: int; active array width in pixels.
1109    h: int; active array height in pixels.
1110  Returns:
1111    meter_rects: 4 metering rectangles made of (x, y, width, height, weight).
1112      x, y are the top left coordinate of the metering rectangle.
1113  """
1114  # If testing front camera, mirror coordinates either left/right or up/down
1115  # Preview are flipped on device's natural orientation
1116  # For sensor orientation 90 or 270, it is up or down
1117  # For sensor orientation 0 or 180, it is left or right
1118  if (props['android.lens.facing'] ==
1119      camera_properties_utils.LENS_FACING['FRONT']):
1120    if props['android.sensor.orientation'] in (90, 270):
1121      tl_coordinates = (bottom_left[0], h - bottom_left[1])
1122      br_coordinates = (top_right[0], h - top_right[1])
1123      logging.debug('Found sensor orientation %d, flipping up down',
1124                    props['android.sensor.orientation'])
1125    else:
1126      tl_coordinates = (w - top_right[0], top_right[1])
1127      br_coordinates = (w - bottom_left[0], bottom_left[1])
1128      logging.debug('Found sensor orientation %d, flipping left right',
1129                    props['android.sensor.orientation'])
1130    logging.debug('Mirrored top-left coordinates: %s', tl_coordinates)
1131    logging.debug('Mirrored bottom-right coordinates: %s', br_coordinates)
1132  else:
1133    tl_coordinates, br_coordinates = top_left, bottom_right
1134
1135  # Normalize coordinates' values to construct metering rectangles
1136  meter_rects = []
1137  tl_normalized_x = tl_coordinates[0] / w
1138  tl_normalized_y = tl_coordinates[1] / h
1139  br_normalized_x = br_coordinates[0] / w
1140  br_normalized_y = br_coordinates[1] / h
1141  rect_w = round((br_normalized_x - tl_normalized_x) / NUM_AE_AWB_REGIONS, 2)
1142  rect_h = round(br_normalized_y - tl_normalized_y, 2)
1143  for i in range(NUM_AE_AWB_REGIONS):
1144    x = round(tl_normalized_x + (rect_w * i), 2)
1145    y = round(tl_normalized_y, 2)
1146    meter_rect = [x, y, rect_w, rect_h, AE_AWB_METER_WEIGHT]
1147    meter_rects.append(meter_rect)
1148  logging.debug('metering rects: %s', meter_rects)
1149  return meter_rects
1150
1151
1152def convert_image_to_high_contrast_black_white(
1153    img, contrast=CV2_CONTRAST_ALPHA, brightness=CV2_CONTRAST_BETA):
1154  """Convert capture to high contrast black and white image.
1155
1156  Args:
1157    img: numpy array of image.
1158    contrast: gain parameter between the value of 0 to 3.
1159    brightness: bias parameter between the value of 1 to 100.
1160  Returns:
1161    high_contrast_img: high contrast black and white image.
1162  """
1163  copy_img = numpy.ndarray.copy(img)
1164  uint8_img = image_processing_utils.convert_image_to_uint8(copy_img)
1165  gray_img = convert_to_y(uint8_img)
1166  img_bw = cv2.convertScaleAbs(
1167      gray_img, alpha=contrast, beta=brightness)
1168  _, high_contrast_img = cv2.threshold(
1169      numpy.uint8(img_bw), CV2_THESHOLD_LOWER_BLACK, CH_FULL_SCALE,
1170      cv2.THRESH_BINARY + cv2.THRESH_OTSU
1171  )
1172  high_contrast_img = numpy.expand_dims(
1173      (CH_FULL_SCALE - high_contrast_img), axis=2)
1174  return high_contrast_img
1175