/cts/apps/CameraITS/tests/scene3/ |
D | test_3a_consistency.py | 21 import numpy as np namespace 126 iso_exp_min = np.amin(iso_exps) 127 iso_exp_max = np.amax(iso_exps) 128 if not np.isclose(iso_exp_max, iso_exp_min, iso_exp_tol): 131 g_gain_min = np.amin(g_gains) 132 g_gain_max = np.amax(g_gains) 133 if not np.isclose(g_gain_max, g_gain_min, _GGAIN_TOL): 136 fd_min = np.amin(fds) 137 fd_max = np.amax(fds) 138 if not np.isclose(fd_max, fd_min, _FD_TOL): [all …]
|
D | test_flip_mirror.py | 21 import numpy as np namespace 73 patch.astype(np.uint8), chart.scale) 76 if np.max(patch)-np.min(patch) < 255/8: 82 template[:, :, np.newaxis] / 255.0, 87 patch[:, :, np.newaxis] / 255.0, 103 comp_chart = np.flipud(patch) 105 comp_chart = np.fliplr(patch) 107 comp_chart = np.flipud(np.fliplr(patch))
|
D | test_lens_movement_reporting.py | 20 import numpy as np namespace 142 frame_diffs = np.gradient([v['timestamp'] for v in d.values()]) 143 delta_diffs = np.amax(frame_diffs) - np.amin(frame_diffs) 144 if not np.isclose(delta_diffs, 0, atol=FRAME_ATOL_MS): 165 if not np.isclose(min_loc, max_loc, rtol=POSITION_RTOL): 172 if not np.isclose(min_sharp, max_sharp, rtol=SHARPNESS_RTOL): 180 if not np.isclose(loc, fd, rtol=POSITION_RTOL): 187 if not np.isclose(min_loc, max_loc, rtol=POSITION_RTOL): 194 if not np.isclose(min_sharp, max_sharp, rtol=SHARPNESS_RTOL): 202 if not np.isclose(loc, fd, rtol=POSITION_RTOL):
|
D | test_lens_position.py | 20 import numpy as np namespace 52 assert np.isclose(d_stat[i]['loc'], d_stat[i]['fd'], 54 assert np.isclose(d_stat[i]['loc'], d_stat[j]['loc'], 56 assert np.isclose(d_stat[i]['sharpness'], d_stat[j]['sharpness'], 64 diffs = np.gradient(times) 65 assert np.isclose(np.amin(diffs), np.amax(diffs), 72 assert np.isclose(d_stat[i]['loc'], d_move[i]['loc'], 89 assert np.isclose(d_stat[i]['sharpness'], d_move[i]['sharpness'], 119 fds_f = np.arange(hyperfocal, min_fd, (min_fd-hyperfocal)/(NUM_STEPS-1)) 120 fds_f = np.append(fds_f, min_fd)
|
/cts/suite/audio_quality/test_description/processing/ |
D | calc_thd.py | 17 import numpy as np namespace 25 fftData = abs(fft.fft(data * np.hanning(len(data)))) 31 np.argmax(fftData[baseI - iMargain /2: baseI + iMargain/2]) 32 peakLoc = np.argmax(fftData[:fftLen]) 53 index = np.linspace(0.0, samples, num=samples, endpoint=False) 55 multiplier = 2.0 * np.pi * signalFrequency / float(samplingRate) 56 data = np.sin(index * multiplier)
|
D | calc_delay.py | 17 import numpy as np namespace 29 return np.dot(data0[n:N+n], data1reversed) 57 return np.argmax(result) 68 index = np.linspace(0.0, samples, num=samples, endpoint=False) 70 multiplier = 2.0 * np.pi * signalFrequency / float(samplingRate) 71 data0 = np.sin(index * multiplier)
|
D | example.py | 18 import numpy as np namespace 45 stereo = stereoInt.astype(np.int16) 48 mono = monoInt.astype(np.int16)
|
D | gen_random.py | 18 import numpy as np namespace 32 result = np.zeros(samples * 2 if stereo else samples, dtype=np.int16) 33 randomSignal = np.random.normal(scale = peakAmpl * 2 / 3, size=samples)
|
D | check_spectrum_playback.py | 18 import numpy as np namespace 54 spectrum = np.sqrt(abs(Phh[iLow:iHigh])) 55 spectrumMean = np.mean(spectrum) 63 spectrumResult = np.zeros(len(spectrum), dtype=np.int16)
|
D | check_spectrum.py | 18 import numpy as np namespace 59 amplitudeRatio = np.sqrt(abs(Pdd[iLow:iHigh]/Phh[iLow:iHigh])) 60 ratioMean = np.mean(amplitudeRatio) 68 RatioResult = np.zeros(len(amplitudeRatio), dtype=np.int16) 76 monoData = np.zeros(n)
|
/cts/apps/CameraITS/tests/scene2_a/ |
D | test_effects.py | 20 import numpy as np namespace 92 y_min, y_max = np.amin(y)*YUV_MAX, np.amax(y)*YUV_MAX 98 u_min, u_max = np.amin(u) * YUV_MAX, np.amax(u) * YUV_MAX 99 v_min, v_max = np.amin(v) * YUV_MAX, np.amax(v) * YUV_MAX 110 u_min, u_max = np.amin(u)*YUV_MAX, np.amax(u)*YUV_MAX 111 v_min, v_max = np.amin(v)*YUV_MAX, np.amax(v)*YUV_MAX
|
D | test_jpeg_quality.py | 24 import numpy as np namespace 88 jpeg = np.concatenate((jpeg[0:i], jpeg[length:]), axis=None) 153 luma = np.array(jpeg[luma_start: luma_start + dqt_size]) 154 chroma = np.array(jpeg[chroma_start: chroma_start + dqt_size]) 155 lumas.append(np.mean(luma)) 156 chromas.append(np.mean(chroma)) 167 matrix = np.array(jpeg[start:start + dqt_size]) 169 chromas.append(np.mean(matrix)) 174 lumas.append(np.mean(matrix)) 264 lumas = np.array(lumas) [all …]
|
/cts/apps/CameraITS/tests/scene0/ |
D | test_test_patterns.py | 20 import numpy as np namespace 56 np.amax(r_tile), np.amax(gr_tile), np.amax(gb_tile), np.amax(b_tile)) 58 np.amin(r_tile), np.amin(gr_tile), np.amin(gb_tile), np.amin(b_tile)) 62 return np.isclose(var_max, var_min, atol=CH_TOL) 85 img = np.fliplr(img) 93 np.allclose(
|
D | test_tonemap_curve.py | 20 import numpy as np namespace 114 if np.allclose(COLOR_CHECKER[color], raw_means, atol=RAW_TOL): 172 raw_means = np.array(image_processing_utils.compute_image_means(raw_patch)) 173 raw_vars = np.array( 175 yuv_means = np.array(image_processing_utils.compute_image_means(yuv_patch)) 177 yuv_vars = np.array( 179 if not np.allclose(raw_means, yuv_means, atol=RGB_MEAN_TOL): 182 (str(raw_means), str(np.round(yuv_means, 3)), RGB_MEAN_TOL)) 183 if not np.allclose(raw_vars, yuv_vars, atol=RGB_VAR_TOL):
|
D | test_vibration_restriction.py | 21 import numpy as np namespace 77 var_w_vibration = np.var(magnitudes) 84 var_wo_vibration = np.var(magnitudes) 99 var_w_vibration_restricted = np.var(magnitudes)
|
/cts/apps/CameraITS/tests/scene4/ |
D | test_multi_camera_alignment.py | 21 import numpy as np namespace 45 TRANS_MATRIX_REF = np.array([0, 0, 0]) # translation matrix for ref cam is 000 70 img = img.astype(np.uint8) 119 np.isclose(chart_distance, 123 np.isclose(chart_distance, 297 k_x1 = np.dot(k[0, :], r[:, 0]) 298 k_x2 = np.dot(k[0, :], r[:, 1]) 299 k_x3 = z_w * np.dot(k[0, :], r[:, 2]) + np.dot(k[0, :], t) 300 k_y1 = np.dot(k[1, :], r[:, 0]) 301 k_y2 = np.dot(k[1, :], r[:, 1]) [all …]
|
/cts/tests/tests/uirendering/src/android/uirendering/cts/testclasses/ |
D | NinePatchTests.kt | 51 val np = with(ImageDecoder.createSource(activity.resources, R.drawable.padding_0)) { in <lambda>() constant 59 addCanvasClientWithoutUsingPicture(NinePatchCanvasClient(np, paint), hw) in <lambda>() 64 np.bitmap.recycle() in <lambda>() 69 val np = with(ImageDecoder.createSource(activity.resources, R.drawable.padding_0)) { in <lambda>() constant 80 NinePatchCanvasClient(np, paint).draw(canvas, TEST_WIDTH, TEST_HEIGHT) in <lambda>() 90 for (bitmap in arrayOf(filtered, unfiltered, noPaint, np.bitmap)) { in <lambda>()
|
/cts/apps/CameraITS/tests/scene1_1/ |
D | test_capture_result.py | 23 import numpy as np namespace 48 return np.isclose(capture_request_utils.rational_to_float(n1), 57 xs = np.array([range(lsc_map_w)] * lsc_map_h).reshape(lsc_map_h, lsc_map_w) 58 ys = np.array([[i]*lsc_map_w for i in range(lsc_map_h)]).reshape( 60 zs = np.array(lsc_map[ch::4]).reshape(lsc_map_h, lsc_map_w) 123 assert not np.allclose(awb_gains, MANUAL_AWB_GAINS, atol=ISCLOSE_ATOL) 183 assert (all([np.isclose(awb_gains[i], MANUAL_GAINS_OK[0][i], 185 all([np.isclose(awb_gains[i], MANUAL_GAINS_OK[1][i], 187 all([np.isclose(awb_gains[i], MANUAL_GAINS_OK[2][i], 199 assert all([np.isclose(c[i], c[i+1], atol=ISCLOSE_ATOL) [all …]
|
D | test_3a.py | 20 import numpy as np namespace 62 assert not np.isnan(g) 65 assert not np.isnan(x)
|
D | test_ae_af.py | 20 import numpy as np namespace 91 if np.isnan(g): 96 if np.isnan(x): 98 if not np.isclose(awb_gains[G_CHANNEL], G_GAIN, G_GAIN_TOL):
|
D | test_auto_vs_manual.py | 21 import numpy as np namespace 128 assert np.allclose(awb_xform, x, atol=AWB_MANUAL_ATOL, rtol=0), e_msg 131 assert np.allclose(awb_gains, g, atol=AWB_MANUAL_ATOL, rtol=0), e_msg 136 assert np.allclose(awb_xform_a, awb_xform, atol=AWB_AUTO_ATOL, 140 assert np.allclose(awb_gains_a, awb_gains, atol=AWB_AUTO_ATOL,
|
/cts/apps/CameraITS/tools/ |
D | dng_noise_model.py | 22 import numpy as np namespace 241 np.min(means), np.median(means), np.max(means)) 243 np.min(vars_), np.median(vars_), np.max(vars_)) 265 means_p = np.asarray(means_p).flatten() 266 vars_p = np.asarray(vars_p).flatten() 325 sens = np.asarray([e[0] for e in measured_models[pidx]]) 326 sens_sq = np.square(sens) 329 gains = np.asarray([s[0] for s in samples[pidx]]) 330 means = np.asarray([s[1] for s in samples[pidx]]) 331 vars_ = np.asarray([s[2] for s in samples[pidx]]) [all …]
|
/cts/apps/CameraITS/tests/sensor_fusion/ |
D | test_sensor_fusion.py | 28 import numpy as np namespace 206 times = np.array([(e['time'] - gyro_events[0]['time']) * _NSEC_TO_SEC 208 x = np.array([e['x'] for e in gyro_events]) 209 y = np.array([e['y'] for e in gyro_events]) 210 z = np.array([e['z'] for e in gyro_events]) 253 starts = np.array([start for start, exptime, readout in cam_events]) 254 max_frame_delta_ms = (np.amax(np.subtract(starts[1:], starts[0:-1])) / 261 exptimes = np.array([exptime for start, exptime, readout in cam_events]) 262 if not np.all(exptimes == exptimes[0]): 264 readouts = np.array([readout for start, exptime, readout in cam_events]) [all …]
|
/cts/suite/audio_quality/test_description/conf/ |
D | check_conf.py | 18 import numpy as np namespace 25 a = np.array([1,2,3])
|
/cts/apps/CameraITS/tests/inprog/rolling_shutter_skew/ |
D | test_rolling_shutter_skew.py | 20 import numpy as np namespace 319 np_cluster = np.array([[c.x, c.y] for c in largest_cluster]) 369 img = img.astype(np.uint8) 382 kernel = np.ones((3, 3), np.uint8) 416 self.x = int(np.mean(contour[:, 0, 0])) 417 self.y = int(np.mean(contour[:, 0, 1])) 419 x_r = (np.max(contour[:, 0, 0]) - np.min(contour[:, 0, 0])) / 2.0 420 y_r = (np.max(contour[:, 0, 1]) - np.min(contour[:, 0, 1])) / 2.0 570 points = np.array([[x, y], [x + w, y], [x + w, y + h], [x, y + h]], 571 np.int32)
|