/external/libjpeg-turbo/simd/ |
D | jcsample-sse2-64.asm | 100 psrlw xmm6,BYTE_BIT ; xmm6={0xFF 0x00 0xFF 0x00 ..} 130 psrlw xmm2,BYTE_BIT 132 psrlw xmm3,BYTE_BIT 138 psrlw xmm0,1 139 psrlw xmm1,1 244 psrlw xmm6,BYTE_BIT ; xmm6={0xFF 0x00 0xFF 0x00 ..} 278 psrlw xmm4,BYTE_BIT 280 psrlw xmm5,BYTE_BIT 287 psrlw xmm4,BYTE_BIT 289 psrlw xmm5,BYTE_BIT [all …]
|
D | jcsample-mmx.asm | 103 psrlw mm6,BYTE_BIT ; mm6={0xFF 0x00 0xFF 0x00 ..} 124 psrlw mm2,BYTE_BIT 126 psrlw mm3,BYTE_BIT 132 psrlw mm0,1 133 psrlw mm1,1 245 psrlw mm6,BYTE_BIT ; mm6={0xFF 0x00 0xFF 0x00 ..} 269 psrlw mm4,BYTE_BIT 271 psrlw mm5,BYTE_BIT 278 psrlw mm4,BYTE_BIT 280 psrlw mm5,BYTE_BIT [all …]
|
D | jcsample-sse2.asm | 103 psrlw xmm6,BYTE_BIT ; xmm6={0xFF 0x00 0xFF 0x00 ..} 136 psrlw xmm2,BYTE_BIT 138 psrlw xmm3,BYTE_BIT 144 psrlw xmm0,1 145 psrlw xmm1,1 258 psrlw xmm6,BYTE_BIT ; xmm6={0xFF 0x00 0xFF 0x00 ..} 295 psrlw xmm4,BYTE_BIT 297 psrlw xmm5,BYTE_BIT 304 psrlw xmm4,BYTE_BIT 306 psrlw xmm5,BYTE_BIT [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_subpel_variance_impl_sse2.asm | 336 psrlw m1, 4 337 psrlw m0, 4 364 psrlw m1, 4 365 psrlw m0, 4 566 psrlw m1, 4 569 psrlw m0, 4 605 psrlw m4, 4 608 psrlw m0, 4 680 psrlw m1, 4 681 psrlw m0, 4 [all …]
|
D | quantize_ssse3_x86_64.asm | 38 psrlw m5, 15 41 psrlw m0, 1 ; m0 = (m0 + 1) / 2 42 psrlw m1, 1 ; m1 = (m1 + 1) / 2 129 psrlw m8, 1 130 psrlw m13, 1 230 psrlw m14, 1 231 psrlw m13, 1
|
/external/llvm/test/CodeGen/X86/ |
D | vector-popcnt-128.ll | 60 ; SSSE3-NEXT: psrlw $4, %xmm0 76 ; SSE41-NEXT: psrlw $4, %xmm0 162 ; SSSE3-NEXT: psrlw $4, %xmm0 184 ; SSE41-NEXT: psrlw $4, %xmm0 223 ; SSE2-NEXT: psrlw $1, %xmm1 229 ; SSE2-NEXT: psrlw $2, %xmm0 233 ; SSE2-NEXT: psrlw $4, %xmm1 239 ; SSE2-NEXT: psrlw $8, %xmm0 245 ; SSE3-NEXT: psrlw $1, %xmm1 251 ; SSE3-NEXT: psrlw $2, %xmm0 [all …]
|
D | vector-shift-lshr-128.ll | 200 ; SSE2-NEXT: psrlw $8, %xmm0 208 ; SSE2-NEXT: psrlw $4, %xmm0 216 ; SSE2-NEXT: psrlw $2, %xmm0 223 ; SSE2-NEXT: psrlw $1, %xmm0 238 ; SSE41-NEXT: psrlw $8, %xmm4 242 ; SSE41-NEXT: psrlw $4, %xmm1 246 ; SSE41-NEXT: psrlw $2, %xmm1 251 ; SSE41-NEXT: psrlw $1, %xmm1 309 ; X32-SSE-NEXT: psrlw $8, %xmm0 317 ; X32-SSE-NEXT: psrlw $4, %xmm0 [all …]
|
D | vector-idiv-udiv-128.ll | 155 ; SSE-NEXT: psrlw $1, %xmm0 157 ; SSE-NEXT: psrlw $2, %xmm0 176 ; SSE2-NEXT: psrlw $8, %xmm1 179 ; SSE2-NEXT: psrlw $8, %xmm2 181 ; SSE2-NEXT: psrlw $8, %xmm2 184 ; SSE2-NEXT: psrlw $8, %xmm3 186 ; SSE2-NEXT: psrlw $8, %xmm3 189 ; SSE2-NEXT: psrlw $1, %xmm0 192 ; SSE2-NEXT: psrlw $2, %xmm0 201 ; SSE41-NEXT: psrlw $8, %xmm1 [all …]
|
D | vector-bitreverse.ll | 1025 ; SSE2-NEXT: psrlw $7, %xmm2 1048 ; SSE2-NEXT: psrlw $1, %xmm4 1053 ; SSE2-NEXT: psrlw $3, %xmm3 1057 ; SSE2-NEXT: psrlw $5, %xmm0 1072 ; SSSE3-NEXT: psrlw $4, %xmm0 1132 ; SSE2-NEXT: psrlw $1, %xmm3 1137 ; SSE2-NEXT: psrlw $3, %xmm0 1142 ; SSE2-NEXT: psrlw $5, %xmm3 1146 ; SSE2-NEXT: psrlw $7, %xmm1 1162 ; SSSE3-NEXT: psrlw $4, %xmm0 [all …]
|
D | vector-tzcnt-128.ll | 309 ; SSSE3-NEXT: psrlw $4, %xmm2 334 ; SSE41-NEXT: psrlw $4, %xmm2 445 ; X32-SSE-NEXT: psrlw $4, %xmm2 532 ; SSSE3-NEXT: psrlw $4, %xmm2 557 ; SSE41-NEXT: psrlw $4, %xmm2 645 ; X32-SSE-NEXT: psrlw $4, %xmm2 668 ; SSE2-NEXT: psrlw $1, %xmm0 674 ; SSE2-NEXT: psrlw $2, %xmm1 678 ; SSE2-NEXT: psrlw $4, %xmm2 684 ; SSE2-NEXT: psrlw $8, %xmm0 [all …]
|
D | 2012-02-23-mmx-inlineasm.ll | 5 ; CHECK: psrlw %mm0, %mm1 7 call void asm sideeffect "psrlw $0, %mm1", "y,~{dirflag},~{fpsr},~{flags}"(i32 8) nounwind
|
D | 2007-03-24-InlineAsmXConstraint.ll | 8 ; CHECK: psrlw $8, %xmm0 11 tail call void asm sideeffect "psrlw $0, %xmm0", "X,~{dirflag},~{fpsr},~{flags}"( i32 8 )
|
D | vector-rotate-128.ll | 296 ; SSE2-NEXT: psrlw $8, %xmm0 304 ; SSE2-NEXT: psrlw $4, %xmm0 312 ; SSE2-NEXT: psrlw $2, %xmm0 319 ; SSE2-NEXT: psrlw $1, %xmm0 363 ; SSE41-NEXT: psrlw $8, %xmm4 367 ; SSE41-NEXT: psrlw $4, %xmm2 371 ; SSE41-NEXT: psrlw $2, %xmm2 376 ; SSE41-NEXT: psrlw $1, %xmm2 481 ; X32-SSE-NEXT: psrlw $8, %xmm0 489 ; X32-SSE-NEXT: psrlw $4, %xmm0 [all …]
|
D | pr16807.ll | 11 ; CHECK: psrlw 15 ; CHECK: psrlw
|
D | vector-idiv-sdiv-128.ll | 159 ; SSE-NEXT: psrlw $15, %xmm1 184 ; SSE2-NEXT: psrlw $8, %xmm3 189 ; SSE2-NEXT: psrlw $8, %xmm1 193 ; SSE2-NEXT: psrlw $2, %xmm0 198 ; SSE2-NEXT: psrlw $7, %xmm1 209 ; SSE41-NEXT: psrlw $8, %xmm1 213 ; SSE41-NEXT: psrlw $8, %xmm3 217 ; SSE41-NEXT: psrlw $2, %xmm0 222 ; SSE41-NEXT: psrlw $7, %xmm1 463 ; SSE-NEXT: psrlw $15, %xmm2 [all …]
|
D | lower-vec-shift.ll | 16 ; SSE: psrlw 17 ; SSE-NEXT: psrlw 33 ; SSE: psrlw 34 ; SSE-NEXT: psrlw
|
D | x86-shifts.ll | 82 ; CHECK: psrlw 83 ; CHECK-NEXT: psrlw 175 ; CHECK: psrlw $3 193 ; CHECK: psrlw $3
|
D | vec_shift2.ll | 10 ; X32-NEXT: psrlw %xmm1, %xmm0 17 ; X64-NEXT: psrlw %xmm1, %xmm0
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_quantize_ssse3_x86_64.asm | 38 psrlw m5, 15 40 psrlw m1, 1 ; m1 = (m1 + 1) / 2 82 psrlw m8, 1 83 psrlw m13, 1 86 psrlw m0, m3, 2 88 psrlw m0, m3, 1 135 psrlw m14, 1 136 psrlw m13, 1
|
/external/libyuv/files/source/ |
D | scale_win.cc | 105 psrlw xmm0, 8 // isolate odd pixels. in ScaleRowDown2_SSSE3() 106 psrlw xmm1, 8 in ScaleRowDown2_SSSE3() 129 psrlw xmm4, 15 in ScaleRowDown2Linear_SSSE3() 164 psrlw xmm4, 15 in ScaleRowDown2Box_SSSE3() 180 psrlw xmm0, 1 in ScaleRowDown2Box_SSSE3() 181 psrlw xmm1, 1 in ScaleRowDown2Box_SSSE3() 331 psrlw xmm0, 8 in ScaleRowDown4_SSSE3() 356 psrlw xmm4, 15 in ScaleRowDown4Box_SSSE3() 387 psrlw xmm0, 4 // /16 for average of 4 * 4 in ScaleRowDown4Box_SSSE3() 573 psrlw xmm0, 2 in ScaleRowDown34_1_Box_SSSE3() [all …]
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | scale_win.cc | 110 psrlw xmm0, 8 // isolate odd pixels. in ScaleRowDown2_SSSE3() 111 psrlw xmm1, 8 in ScaleRowDown2_SSSE3() 133 psrlw xmm4, 15 in ScaleRowDown2Linear_SSSE3() 167 psrlw xmm4, 15 in ScaleRowDown2Box_SSSE3() 183 psrlw xmm0, 1 in ScaleRowDown2Box_SSSE3() 184 psrlw xmm1, 1 in ScaleRowDown2Box_SSSE3() 330 psrlw xmm0, 8 in ScaleRowDown4_SSSE3() 354 psrlw xmm4, 15 in ScaleRowDown4Box_SSSE3() 385 psrlw xmm0, 4 // /16 for average of 4 * 4 in ScaleRowDown4Box_SSSE3() 568 psrlw xmm0, 2 in ScaleRowDown34_1_Box_SSSE3() [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | testshiftlshr.ll | 34 ; SSE2-CODEGEN: psrlw 46 ; SSE2-CODEGEN: psrlw 58 ; SSE2-CODEGEN: psrlw 214 ; SSE2-CODEGEN: psrlw 226 ; SSE2-CODEGEN: psrlw 238 ; SSE2-CODEGEN: psrlw 276 ; SSE2-CODEGEN: psrlw $3 290 ; SSE2-CODEGEN: psrlw $3 306 ; SSE2-CODEGEN: psrlw $3 490 ; SSE2-CODEGEN: psrlw $3 [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | 2007-03-24-InlineAsmXConstraint.ll | 8 ; CHECK: psrlw $8, %xmm0 11 tail call void asm sideeffect "psrlw $0, %xmm0", "X,~{dirflag},~{fpsr},~{flags}"( i32 8 )
|
/external/mesa3d/src/mesa/x86/ |
D | read_rgba_span_x86.S | 564 psrlw $SCALE_ADJUST, %mm0 565 psrlw $SCALE_ADJUST, %mm2 599 psrlw $SCALE_ADJUST, %mm0 600 psrlw $SCALE_ADJUST, %mm2 637 psrlw $SCALE_ADJUST, %mm0 638 psrlw $SCALE_ADJUST, %mm2 667 psrlw $SCALE_ADJUST, %mm0
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | quantize_mmx.asm | 214 psrlw mm0, 15 216 psrlw mm1, 15 237 psrlw mm0, 15 239 psrlw mm1, 15
|