/external/autotest/client/tests/kvm/ |
D | guest-os.cfg.sample | 63 images += " stg24 stg25 stg26 stg27" 92 boot_path = "images/pxeboot" 108 #floppy = images/f8-32/ks.vfd 109 cdrom_unattended = images/f8-32/ks.iso 110 kernel = images/f8-32/vmlinuz 111 initrd = images/f8-32/initrd.img 127 #floppy = images/f8-64/ks.vfd 128 cdrom_unattended = images/f8-64/ks.iso 129 kernel = images/f8-64/vmlinuz 130 initrd = images/f8-64/initrd.img [all …]
|
/external/skia/gyp/ |
D | images.gyp | 6 # GYP file for images project. 10 'target_name': 'images', 24 '../include/images', 33 '../include/images/SkForceLinking.h', 34 '../src/images/SkJpegUtility.h', 35 '../include/images/SkMovie.h', 36 '../include/images/SkPageFlipper.h', 38 '../src/images/bmpdecoderhelper.cpp', 39 '../src/images/bmpdecoderhelper.h', 41 '../src/images/SkForceLinking.cpp', [all …]
|
/external/opencv3/modules/photo/src/ |
D | calibrate.cpp | 66 std::vector<Mat> images; in process() local 67 src.getMatVector(images); in process() 70 CV_Assert(images.size() == times.total()); in process() 71 checkImageDimensions(images); in process() 72 CV_Assert(images[0].depth() == CV_8U); in process() 74 int channels = images[0].channels(); in process() 83 sample_points.push_back(Point(rand() % images[0].cols, rand() % images[0].rows)); in process() 86 … x_points = static_cast<int>(sqrt(static_cast<double>(samples) * images[0].cols / images[0].rows)); in process() 88 int step_x = images[0].cols / x_points; in process() 89 int step_y = images[0].rows / y_points; in process() [all …]
|
D | merge.cpp | 61 std::vector<Mat> images; in process() local 62 src.getMatVector(images); in process() 65 CV_Assert(images.size() == times.total()); in process() 66 checkImageDimensions(images); in process() 67 CV_Assert(images[0].depth() == CV_8U); in process() 69 int channels = images[0].channels(); in process() 70 Size size = images[0].size(); in process() 73 dst.create(images[0].size(), CV_32FCC); in process() 94 for(size_t i = 0; i < images.size(); i++) { in process() 96 split(images[i], splitted); in process() [all …]
|
/external/autotest/client/cros/chameleon/ |
D | screen_comparison.py | 27 @param output_dir: The directory for output images. 51 images = [self._capturer1.capture(), self._capturer2.capture()] 53 if None in images: 55 tags[images.index(None)]) 59 # Sometimes the format of images got from X is not RGB, 62 for i, image in enumerate(images): 64 images[i] = image.convert('RGB') 70 if images[0].size != images[1].size: 71 message = ('Sizes of images %s and %s do not match: ' 73 (tuple(tags) + images[0].size + images[1].size)) [all …]
|
/external/libxml2/doc/ |
D | Makefile.am | 23 $(wildcard tutorial/images/*.png) \ 24 $(wildcard tutorial/images/callouts/*.png) $(wildcard API*.html) \ 190 tutorial/images/blank.png \ 191 tutorial/images/callouts/1.png \ 192 tutorial/images/callouts/10.png \ 193 tutorial/images/callouts/2.png \ 194 tutorial/images/callouts/3.png \ 195 tutorial/images/callouts/4.png \ 196 tutorial/images/callouts/5.png \ 197 tutorial/images/callouts/6.png \ [all …]
|
/external/deqp/android/cts/master/ |
D | egl-master.txt | 1080 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.create_destroy 1081 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.create_texture 1082 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.texsubimage2d 1083 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.copytexsubimage2d 1084 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.teximage2d 1085 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.copyteximage2d 1086 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.create_texture_render 1087 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.texsubimage2d_render 1088 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.copytexsubimage2d_render 1089 dEQP-EGL.functional.sharing.gles2.multithread.simple.images.texture_source.teximage2d_render [all …]
|
/external/opencv3/doc/tutorials/photo/hdr_imaging/ |
D | hdr_imaging.markdown | 7 Today most digital images and imaging devices use 8 bits per channel thus limiting the dynamic range 11 capture all details using a single exposure. HDR imaging works with images that use more that 8 bits 14 There are different ways to obtain HDR images, but the most common one is to use photographs of the 19 since images with different exposures should be registered and aligned. 22 case images are already aligned and there are no moving objects. We also demonstrate an alternative 29 ![](images/memorial.png) 39 -# **Load images and exposure times** 41 vector<Mat> images; 43 loadExposureSeq(argv[1], images, times); 45 Firstly we load input images and exposure times from user-defined folder. The folder should [all …]
|
/external/chromium-trace/catapult/third_party/pipeline/pipeline/ui/ |
D | jquery.treeview.css | 14 background: url(images/treeview-default.gif) -64px -25px no-repeat; 41 .treeview li { background: url(images/treeview-default-line.gif) 0 0 no-repeat; } 47 .treeview li.lastCollapsable, .treeview li.lastExpandable { background-image: url(images/treeview-d… 53 .treeview-red li { background-image: url(images/treeview-red-line.gif); } 54 …lastCollapsable, .treeview-red li.lastExpandable { background-image: url(images/treeview-red.gif);… 56 .treeview-black li { background-image: url(images/treeview-black-line.gif); } 57 …stCollapsable, .treeview-black li.lastExpandable { background-image: url(images/treeview-black.gif… 59 .treeview-gray li { background-image: url(images/treeview-gray-line.gif); } 60 …astCollapsable, .treeview-gray li.lastExpandable { background-image: url(images/treeview-gray.gif)… 62 .treeview-famfamfam li { background-image: url(images/treeview-famfamfam-line.gif); } [all …]
|
/external/chromium-trace/catapult/third_party/flot/ |
D | jquery.flot.image.js | 1 /* Flot plugin for plotting images. 11 There are two helpers included for retrieving images. The easiest work the way 12 that you put in URLs instead of images in the data, like this: 17 options are the same as you pass in to $.plot. This loads the images, replaces 18 the URLs in the data with the corresponding images and calls "callback" when 19 all images are loaded (or failed loading). In the callback, you can then call 24 Image object when all images are loaded or have failed loading. 29 images: { 40 images: { ... } 44 can't use images with anything else in a specific data series. [all …]
|
/external/opencv3/samples/cpp/tutorial_code/calib3d/camera_calibration/ |
D | VID5.xml | 3 <images> 4 images/CameraCalibraation/VID5/xx1.jpg 5 images/CameraCalibraation/VID5/xx2.jpg 6 images/CameraCalibraation/VID5/xx3.jpg 7 images/CameraCalibraation/VID5/xx4.jpg 8 images/CameraCalibraation/VID5/xx5.jpg 9 images/CameraCalibraation/VID5/xx6.jpg 10 images/CameraCalibraation/VID5/xx7.jpg 11 images/CameraCalibraation/VID5/xx8.jpg 12 </images>
|
/external/opencv3/doc/tutorials/calib3d/camera_calibration/ |
D | camera_calibration.markdown | 60 noise present in our input images, so for good results you will probably need at least 10 good 85 opt for the last one, you will need to create a configuration file where you enumerate the images to 87 The important part to remember is that the images need to be specified using the absolute path or 110 video file. If this fails or we have enough images then we run the calibration process. In case 127 those images where this is true!). 129 Then again in case of cameras we only take camera images when an input delay time is passed. 130 This is done in order to allow user moving the chessboard around and getting different images. 131 Similar images result in similar equations, and similar equations at the calibration step will 132 form an ill-posed problem, so the calibration will fail. For square images the positions of the 149 -# **Show the distortion removal for the images too** [all …]
|
/external/opencv3/doc/py_tutorials/py_core/py_image_arithmetics/ |
D | py_image_arithmetics.markdown | 1 Arithmetic Operations on Images {#tutorial_py_image_arithmetics} 7 - Learn several arithmetic operations on images like addition, subtraction, bitwise operations 14 You can add two images by OpenCV function, cv2.add() or simply by numpy operation, 15 res = img1 + img2. Both images should be of same depth and type, or second image can just be a 32 It will be more visible when you add two images. OpenCV function will provide a better result. So 38 This is also image addition, but different weights are given to images so that it gives a feeling of 39 blending or transparency. Images are added as per the equation below: 46 Here I took two images to blend them together. First image is given a weight of 0.7 and second image 64 ![image](images/blending.jpg) 73 I want to put OpenCV logo above an image. If I add two images, it will change color. If I blend it, [all …]
|
/external/opencv3/modules/photo/include/opencv2/ |
D | photo.hpp | 104 objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting> for more details. 132 This function expected to be applied to grayscale images. For colored images look at 159 This function expected to be applied to grayscale images. For colored images look at 170 /** @brief Modification of fastNlMeansDenoising function for colored images 182 @param hColor The same as h but for color components. For most images value equals 10 192 …brief Modification of fastNlMeansDenoising function for images sequence where consequtive images h… 194 images or for manual manipulation with colorspaces. For more details see 198 4-channel images sequence. All images should have the same type and 201 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 202 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to [all …]
|
/external/opencv3/doc/py_tutorials/py_feature2d/py_features_meaning/ |
D | py_features_meaning.markdown | 13 Most of you will have played the jigsaw puzzle games. You get a lot of small pieces of a images, 16 jigsaw puzzles? If the computer can play jigsaw puzzles, why can't we give a lot of real-life images 17 of a good natural scenery to computer and tell it to stitch all those images to a big single image? 18 If the computer can stitch several natural images to one, what about giving a lot of pictures of a 23 How can you stitch a lot of natural images to a single image? 28 out one good feature which can be compared across several images, you can point out one. That is 30 find them, we find the same features in other images, we align them. That's it. (In jigsaw puzzle, 31 we look more into continuity of different images). All these abilities are present in us inherently. 40 ![image](images/feature_building.jpg) 58 ![image](images/feature_simple.png) [all …]
|
/external/autotest/client/deps/glbench/ |
D | USAGE_glbench | 2 performance numbers to stdout and resulting images to a directory for 6 threshold, while the resulting images have to be found in a repository of 7 reference images. As the image name encodes the raw pixel MD5 this can be 44 1) first try to identify known buggy images by searching in 46 2) then identify good images by searching in 55 Handling of reference images 58 Good reference images themselves are located at ../glbench-images/glbench_reference_images/ 59 Images that have outstanding defects and an open bug filed are at ../glbench-images/glbench_knownba… 60 When that bug is closed the directory should be moved to ../glbench-images/glbench_fixedbad_images/… 62 To push out new reference images place them in the appropriate
|
/external/libvncserver/webclients/novnc/ |
D | vnc.html | 28 <link rel="apple-touch-startup-image" href="images/screen_320x460.png" /> 30 <link rel="apple-touch-icon" href="images/screen_57x57.png"> 32 <link rel="apple-touch-icon-precomposed" href="images/screen_57x57.png" /> 52 <input type="image" alt="viewport drag" src="images/drag.png" 56 <input type="image" alt="No mousebutton" src="images/mouse_none.png" 58 <input type="image" alt="Left mousebutton" src="images/mouse_left.png" 60 <input type="image" alt="Middle mousebutton" src="images/mouse_middle.png" 62 <input type="image" alt="Right mousebutton" src="images/mouse_right.png" 64 <input type="image" alt="Keyboard" src="images/keyboard.png" 76 <input type="image" alt="Extra keys" src="images/showextrakeys.png" [all …]
|
/external/autotest/site_utils/ |
D | restart_latest.py | 13 process only specific boards and/or a different images directory, use the -b 16 Expected images directory structure looks like: 39 # Path to Dev Server's images directory. 40 DEFAULT_IMAGES_PATH = '/usr/local/google/images' 50 ' all boards in the images directory will be processed.') 51 parser.add_option('-i', '--images', dest='images', 52 help='Path to Dev Server images directory. Defaults to %s' % 57 if not os.path.exists(options.images): 58 parser.error('The specified images directory (%s) does not exist. Please ' 59 'specify another.' % options.images) [all …]
|
/external/deqp/doc/testspecs/GLES31/ |
D | functional.copy_image.txt | 25 + 2D texture images 26 + Cube map texture images 27 + 3D texture images 28 + 2D texture array images 29 + Renderbuffer images 33 + Using images for rendering before copy 34 + Not using images for rendering before copy 48 Test creates two images and initializes them to random data. Both images have 49 always npot size and texture images are always created so that they are mipmap 59 Each test case does copying in two different ways. First they create images, [all …]
|
/external/opencv3/modules/photo/test/ |
D | test_hdr.cpp | 61 void loadExposureSeq(String path, vector<Mat>& images, vector<float>& times = DEFAULT_VECTOR) in loadExposureSeq() argument 70 images.push_back(img); in loadExposureSeq() 159 vector<Mat> images; in TEST() local 160 loadExposureSeq((test_path + "exposures/").c_str() , images); in TEST() 166 merge->process(images, result); in TEST() 173 images.clear(); in TEST() 174 images.push_back(uniform); in TEST() 176 merge->process(images, result); in TEST() 185 vector<Mat> images; in TEST() local 188 loadExposureSeq(test_path + "exposures/", images, times); in TEST() [all …]
|
/external/opencv3/doc/tutorials/imgproc/morph_lines_detection/ |
D | moprh_lines_detection.md | 20 Morphology is a set of image processing operations that process images based on predefined *structu… 26 ![Dilation on a Binary Image](images/morph21.gif) 28 ![Dilation on a Grayscale Image](images/morph6.gif) 32 ![Erosion on a Binary Image](images/morph211.png) 34 ![Erosion on a Grayscale Image](images/morph61.png) 44 ![A Diamond-Shaped Structuring Element and its Origin](images/morph12.gif) 59 ![](images/src.png) 63 ![](images/gray.png) 67 ![](images/binary.png) 69 … music notes from the music sheet, but first let's initialize the output images that we will use f… [all …]
|
/external/testng/doc/samplereport/css/ |
D | maven-classic.css | 104 background: url(../images/external-classic.png) left top no-repeat; 108 background: url(../images/external-classic.png) right center no-repeat; 113 background: url(../images/newwindow-classic.png) left top no-repeat; 117 background: url(../images/newwindow-classic.png) right center no-repeat; 145 background-image: url(../images/nw_maj_rond.gif); 155 background-image: url(../images/sw_maj_rond.gif); 162 background-image: url(../images/sw_med_rond.gif); 175 background-image: url(../images/strich.gif); 240 background-image: url(../images/icon_help_sml.gif); 246 background: #eee url(../images/help_logo.gif) top right no-repeat !important; [all …]
|
/external/opencv3/samples/cpp/ |
D | pca.cpp | 17 * on this list of images. The author recommends using 54 static void read_imgList(const string& filename, vector<Mat>& images) { in read_imgList() argument 62 images.push_back(imread(line, 0)); in read_imgList() 132 // vector to hold the images in main() 133 vector<Mat> images; in main() local 137 read_imgList(imgList, images); in main() 143 // Quit if there are not enough images for this demo. in main() 144 if(images.size() <= 1) { in main() 145 …string error_message = "This demo needs at least 2 images to work. Please add more images to your … in main() 149 // Reshape and stack images into a rowMatrix in main() [all …]
|
/external/opencv3/modules/imgproc/doc/ |
D | colors.markdown | 33 \f$X\f$, \f$Y\f$ and \f$Z\f$ cover the whole value range (in case of floating-point images, \f$Z\f$… 47 …ray}{l l} 128 & \mbox{for 8-bit images} \\ 32768 & \mbox{for 16-bit images} \\ 0.5 & \mbox{for … 54 In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and 64 - 8-bit images: \f$V \leftarrow 255 V, S \leftarrow 255 S, H \leftarrow H/2 \text{(to fit to 0 … 65 - 16-bit images: (currently not supported) \f$V <- 65535 V, S <- 65535 S, H <- H\f$ 66 - 32-bit images: H, S, and V are left as is 73 In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and 88 - 8-bit images: \f$V \leftarrow 255 \cdot V, S \leftarrow 255 \cdot S, H \leftarrow H/2 \; \tex… 89 - 16-bit images: (currently not supported) \f$V <- 65535 \cdot V, S <- 65535 \cdot S, H <- H\f$ 90 - 32-bit images: H, S, V are left as is [all …]
|
/external/opencv3/modules/stitching/src/ |
D | exposure_compensate.cpp | 61 void ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images, in feed() argument 67 feed(corners, images, level_masks); in feed() 71 void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images, in feed() argument 79 CV_Assert(corners.size() == images.size() && images.size() == masks.size()); in feed() 81 const int num_images = static_cast<int>(images.size()); in feed() 85 //Rect dst_roi = resultRoi(corners, images); in feed() 94 if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi)) in feed() 96 … subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i])).getMat(ACCESS_READ); in feed() 97 … subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ); in feed() 163 void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images, in feed() argument [all …]
|