/external/libxcam/tests/ |
D | test-image-deblurring.cpp | 50 blind_deblurring (cv::Mat &input_image, cv::Mat &output_image) in blind_deblurring() argument 54 image_deblurring->blind_deblurring (input_image, output_image, kernel, -1, -1, false); in blind_deblurring() 58 non_blind_deblurring (cv::Mat &input_image, cv::Mat &output_image) in non_blind_deblurring() argument 61 cv::cvtColor (input_image, input_image, CV_BGR2GRAY); in non_blind_deblurring() 74 …cv::filter2D (input_image, blurred, CV_32FC1, conv_kernel, cv::Point(-1, -1), 0, cv::BORDER_CONSTA… in non_blind_deblurring() 147 cv::Mat input_image = cv::imread (file_in_name, CV_LOAD_IMAGE_COLOR); in main() local 149 if (input_image.empty ()) in main() 156 blind_deblurring (input_image, output_image); in main() 160 non_blind_deblurring (input_image, output_image); in main() 162 float input_sharp = sharp->measure_sharp (input_image); in main()
|
/external/webrtc/webrtc/modules/video_coding/codecs/vp8/ |
D | vp8_impl.cc | 724 const VideoFrame& input_image = in Encode() local 727 if (quality_scaler_enabled_ && (input_image.width() != codec_.width || in Encode() 728 input_image.height() != codec_.height)) { in Encode() 729 int ret = UpdateCodecFrameSize(input_image); in Encode() 738 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w)); in Encode() 739 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h)); in Encode() 744 const_cast<uint8_t*>(input_image.buffer(kYPlane)); in Encode() 746 const_cast<uint8_t*>(input_image.buffer(kUPlane)); in Encode() 748 const_cast<uint8_t*>(input_image.buffer(kVPlane)); in Encode() 750 raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane); in Encode() [all …]
|
D | simulcast_encoder_adapter.cc | 232 const VideoFrame& input_image, in Encode() argument 261 int src_width = input_image.width(); in Encode() 262 int src_height = input_image.height(); in Encode() 284 input_image.IsZeroSize()) { in Encode() 285 streaminfos_[stream_idx].encoder->Encode(input_image, codec_specific_info, in Encode() 294 input_image.buffer(kYPlane), input_image.stride(kYPlane), in Encode() 295 input_image.buffer(kUPlane), input_image.stride(kUPlane), in Encode() 296 input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width, in Encode() 301 dst_frame.set_timestamp(input_image.timestamp()); in Encode() 302 dst_frame.set_render_time_ms(input_image.render_time_ms()); in Encode()
|
D | vp8_impl.h | 49 virtual int Encode(const VideoFrame& input_image, 78 int UpdateCodecFrameSize(const VideoFrame& input_image); 86 int GetEncodedPartitions(const VideoFrame& input_image, 131 int Decode(const EncodedImage& input_image, 149 int DecodePartitions(const EncodedImage& input_image,
|
D | simulcast_encoder_adapter.h | 43 int Encode(const VideoFrame& input_image,
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | lrn_ops_test.py | 41 def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0, argument 44 output = copy.deepcopy(input_image) 45 batch_size = input_image.shape[0] 46 rows = input_image.shape[1] 47 cols = input_image.shape[2] 48 depth = input_image.shape[3] 55 patch = input_image[b, r, c, begin:end]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | lrn_op_test.py | 38 def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0, argument 41 output = copy.deepcopy(input_image) 42 batch_size = input_image.shape[0] 43 rows = input_image.shape[1] 44 cols = input_image.shape[2] 45 depth = input_image.shape[3] 52 patch = input_image[b, r, c, begin:end]
|
/external/webrtc/webrtc/modules/video_coding/codecs/vp9/ |
D | vp9_impl.cc | 475 int VP9EncoderImpl::Encode(const VideoFrame& input_image, in Encode() argument 481 if (input_image.IsZeroSize()) { in Encode() 492 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w)); in Encode() 493 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h)); in Encode() 499 input_image_ = &input_image; in Encode() 503 raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane)); in Encode() 504 raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane)); in Encode() 505 raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane)); in Encode() 506 raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane); in Encode() 507 raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane); in Encode() [all …]
|
D | vp9_impl.h | 40 int Encode(const VideoFrame& input_image, 142 int Decode(const EncodedImage& input_image,
|
/external/tensorflow/tensorflow/lite/tutorials/ |
D | mnist_tflite.py | 46 def run_eval(interpreter, input_image): argument 62 input_image = np.reshape(input_image, input_details[0]['shape']) 63 interpreter.set_tensor(input_details[0]['index'], input_image)
|
/external/webrtc/webrtc/video/ |
D | video_decoder.cc | 86 const EncodedImage& input_image, in Decode() argument 93 if (!fallback_decoder_ || input_image._frameType == kVideoFrameKey) { in Decode() 94 int32_t ret = decoder_->Decode(input_image, missing_frames, fragmentation, in Decode() 112 return fallback_decoder_->Decode(input_image, missing_frames, fragmentation, in Decode()
|
/external/tensorflow/tensorflow/contrib/resampler/xla/ |
D | resampler_ops_xla_test.py | 34 input_image = array_ops.placeholder(image_np.dtype) 36 resampled = resampler.resampler(input_image, warp, name='resampler') 37 out = sess.run(resampled, {input_image: image_np, warp: warp_np}) 45 input_image = array_ops.placeholder(input_np.dtype) 50 input_image, warp, grad_output) 53 input_image: input_np,
|
/external/libxcam/modules/ocl/ |
D | cl_demo_handler.cpp | 68 SmartPtr<CLImage> input_image = convert_to_climage (context, input, desc); in prepare_parameters() local 71 XCAM_ASSERT (input_image.ptr () && output_image.ptr ()); in prepare_parameters() 72 XCAM_ASSERT (input_image->is_valid () && output_image->is_valid ()); in prepare_parameters() 73 args.push_back (new CLMemArgument (input_image)); in prepare_parameters()
|
D | cl_wavelet_denoise_handler.cpp | 60 SmartPtr<CLMemory> input_image = convert_to_clbuffer (context, input); in prepare_arguments() local 78 input_image->is_valid () && reconstruct_image->is_valid (), in prepare_arguments() 88 args.push_back (new CLMemArgument (input_image)); in prepare_arguments() 92 args.push_back (new CLMemArgument (input_image)); in prepare_arguments()
|
/external/tensorflow/tensorflow/contrib/image/python/kernel_tests/ |
D | sparse_image_warp_test.py | 111 warped_image, input_image, _ = sess.run( 114 self.assertAllClose(warped_image, input_image) 153 warped_image, input_image, flow = sess.run( 158 input_image[0, 4, 4, :], 180 input_image = self.load_image(input_file, sess) 191 float_image = np.expand_dims(np.float32(input_image) / 255, 0)
|
/external/webrtc/webrtc/test/ |
D | fake_encoder.cc | 48 int32_t FakeEncoder::Encode(const VideoFrame& input_image, in Encode() argument 102 encoded._timeStamp = input_image.timestamp(); in Encode() 103 encoded.capture_time_ms_ = input_image.render_time_ms(); in Encode() 200 int32_t DelayedEncoder::Encode(const VideoFrame& input_image, in Encode() argument 204 return FakeEncoder::Encode(input_image, codec_specific_info, frame_types); in Encode()
|
D | fake_encoder.h | 34 int32_t Encode(const VideoFrame& input_image, 78 int32_t Encode(const VideoFrame& input_image,
|
D | configurable_frame_size_encoder.h | 31 int32_t Encode(const VideoFrame& input_image,
|
/external/webrtc/webrtc/modules/video_coding/codecs/h264/ |
D | h264_video_toolbox_decoder.cc | 122 const EncodedImage& input_image, in Decode() argument 127 RTC_DCHECK(input_image._buffer); in Decode() 130 if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer, in Decode() 131 input_image._length, video_format_, in Decode() 147 new internal::FrameDecodeParams(callback_, input_image._timeStamp)); in Decode()
|
D | h264_video_toolbox_encoder.cc | 236 const VideoFrame& input_image, in Encode() argument 239 if (input_image.IsZeroSize()) { in Encode() 263 if (!internal::CopyVideoFrameToPixelBuffer(input_image, pixel_buffer)) { in Encode() 281 CMTimeMake(input_image.render_time_ms(), 1000); in Encode() 291 input_image.render_time_ms(), input_image.timestamp())); in Encode()
|
D | h264_video_toolbox_decoder.h | 36 int Decode(const EncodedImage& input_image,
|
D | h264_video_toolbox_encoder.h | 39 int Encode(const VideoFrame& input_image,
|
/external/webrtc/webrtc/ |
D | video_decoder.h | 66 virtual int32_t Decode(const EncodedImage& input_image, 97 int32_t Decode(const EncodedImage& input_image,
|
/external/tensorflow/tensorflow/contrib/receptive_field/ |
D | README.md | 50 images = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='input_image') 56 g.as_graph_def(), 'input_image', 'my_output_endpoint') 79 images = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='input_image') 85 g.as_graph_def(), 'input_image', 'InceptionResnetV2/Conv2d_7b_1x1/Relu') 170 --input_node input_image \
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_LRNGrad.pbtxt | 11 name: "input_image"
|