/external/libhevc/decoder/arm/ |
D | ihevcd_fmt_conv_420sp_to_rgba8888.s | 117 VMOV.16 D0[0],R10 @//C1 120 VMOV.16 D0[1],R10 @//C2 123 VMOV.16 D0[2],R10 @//C3 126 VMOV.16 D0[3],R10 @//C4 175 @VMOV.I8 Q1,#128 229 VMOV.I8 D17,#0 239 VMOV.I8 D23,#0 280 VMOV.I8 D17,#0 290 VMOV.I8 D23,#0 312 @VMOV.I8 Q1,#128 [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_sao_edge_offset_class0_chroma.s | 82 VMOV.I8 Q1,#2 @const_2 = vdupq_n_s8(2) 86 VMOV.I16 Q2,#0 @const_min_clip = vdupq_n_s16(0) 90 VMOV.I16 Q3,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 96 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 126 VMOV.8 D8[0],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 0) 127 VMOV.8 D8[1],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 1) 132 VMOV.16 D8[0],r12 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 138 VMOV.8 D9[6],r12 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 14) 139 VMOV.8 D9[7],r12 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 153 …VMOV.16 D15[3],r11 @vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, 1… [all …]
|
D | ihevc_sao_edge_offset_class3_chroma.s | 275 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 276 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 277 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 285 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 301 VMOV.8 D8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 305 VMOV.8 D8[1],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 309 VMOV.8 D9[6],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 310 VMOV.8 D9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 320 VMOV.I8 Q9,#0 351 VMOV.I8 Q9,#0 @I [all …]
|
D | ihevc_sao_edge_offset_class2_chroma.s | 263 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 267 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 271 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 293 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 307 VMOV.8 D8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 310 VMOV.8 D8[1],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 314 VMOV.8 D9[6],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 315 VMOV.8 D9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 364 VMOV.I8 Q9,#0 368 …VMOV.16 D18[0],r5 @I pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd +… [all …]
|
D | ihevc_sao_edge_offset_class0.s | 78 VMOV.I8 Q1,#2 @const_2 = vdupq_n_s8(2) 82 VMOV.I16 Q2,#0 @const_min_clip = vdupq_n_s16(0) 86 VMOV.I16 Q3,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 92 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 122 VMOV.8 D8[0],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 0) 127 VMOV.8 D8[0],r12 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 133 VMOV.8 D9[7],r12 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 147 …VMOV.8 D15[7],r11 @vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15) 161 …VMOV.8 D29[7],r11 @II Iteration vsetq_lane_u8(pu1_src_left[ht - row], pu1_cu… 173 …VMOV.8 D14[0],r11 @pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_… [all …]
|
D | ihevc_sao_edge_offset_class2.s | 181 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 185 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 189 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 203 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 221 … VMOV.8 d8[0],r8 @au1_mask = vsetq_lane_s8((-1||pu1_avail[0]), au1_mask, 0) 225 VMOV.8 d9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 265 VMOV.I8 Q9,#0 275 …VMOV.8 D18[0],r5 @I pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd +… 290 …VMOV.8 D14[0],r4 @I sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] -… 317 VMOV Q6,Q8 @I pu1_cur_row = pu1_next_row [all …]
|
D | ihevc_sao_edge_offset_class3.s | 193 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 197 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 201 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 211 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 230 VMOV.8 d8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 235 VMOV.8 d9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 272 VMOV.I8 Q9,#0 289 VMOV.8 D19[7],r8 @I vsetq_lane_u8 305 …VMOV.8 D15[7],r8 @I sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] - pu1_src_… 332 VMOV Q6,Q8 [all …]
|
D | ihevc_sao_edge_offset_class1.s | 105 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 106 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 107 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 189 VMOV Q5,Q15 @II pu1_cur_row = pu1_next_row 254 VMOV Q5,Q9 @pu1_cur_row = pu1_next_row 329 VMOV Q5,Q15 @II pu1_cur_row = pu1_next_row
|
D | ihevc_sao_edge_offset_class1_chroma.s | 109 VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) 110 VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) 111 VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 197 VMOV Q5,Q15 @II pu1_cur_row = pu1_next_row 272 VMOV Q5,Q9 @pu1_cur_row = pu1_next_row 360 VMOV Q5,Q15 @II pu1_cur_row = pu1_next_row
|
D | ihevc_sao_band_offset_chroma.s | 141 VMOV.I8 D30,#16 @vdup_n_u8(16) 224 VMOV.I8 D29,#16 @vdup_n_u8(16)
|
D | ihevc_sao_band_offset_luma.s | 132 VMOV.I8 D29,#16 @vdup_n_u8(16)
|
/external/arm-neon-tests/ |
D | ref-rvct-neon.txt | 240 VMOV/VMOVQ output: 241 VMOV/VMOVQ:0:result_int8x8 [] = { fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffff… 242 VMOV/VMOVQ:1:result_int16x4 [] = { fffffff0, fffffff0, fffffff0, fffffff0, } 243 VMOV/VMOVQ:2:result_int32x2 [] = { fffffff0, fffffff0, } 244 VMOV/VMOVQ:3:result_int64x1 [] = { fffffffffffffff0, } 245 VMOV/VMOVQ:4:result_uint8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } 246 VMOV/VMOVQ:5:result_uint16x4 [] = { fff0, fff0, fff0, fff0, } 247 VMOV/VMOVQ:6:result_uint32x2 [] = { fffffff0, fffffff0, } 248 VMOV/VMOVQ:7:result_uint64x1 [] = { fffffffffffffff0, } 249 VMOV/VMOVQ:8:result_poly8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } [all …]
|
D | ref-rvct-neon-nofp16.txt | 222 VMOV/VMOVQ output: 223 VMOV/VMOVQ:0:result_int8x8 [] = { fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffff… 224 VMOV/VMOVQ:1:result_int16x4 [] = { fffffff0, fffffff0, fffffff0, fffffff0, } 225 VMOV/VMOVQ:2:result_int32x2 [] = { fffffff0, fffffff0, } 226 VMOV/VMOVQ:3:result_int64x1 [] = { fffffffffffffff0, } 227 VMOV/VMOVQ:4:result_uint8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } 228 VMOV/VMOVQ:5:result_uint16x4 [] = { fff0, fff0, fff0, fff0, } 229 VMOV/VMOVQ:6:result_uint32x2 [] = { fffffff0, fffffff0, } 230 VMOV/VMOVQ:7:result_uint64x1 [] = { fffffffffffffff0, } 231 VMOV/VMOVQ:8:result_poly8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } [all …]
|
D | ref-rvct-all.txt | 240 VMOV/VMOVQ output: 241 VMOV/VMOVQ:0:result_int8x8 [] = { fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffffff0, fffff… 242 VMOV/VMOVQ:1:result_int16x4 [] = { fffffff0, fffffff0, fffffff0, fffffff0, } 243 VMOV/VMOVQ:2:result_int32x2 [] = { fffffff0, fffffff0, } 244 VMOV/VMOVQ:3:result_int64x1 [] = { fffffffffffffff0, } 245 VMOV/VMOVQ:4:result_uint8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } 246 VMOV/VMOVQ:5:result_uint16x4 [] = { fff0, fff0, fff0, fff0, } 247 VMOV/VMOVQ:6:result_uint32x2 [] = { fffffff0, fffffff0, } 248 VMOV/VMOVQ:7:result_uint64x1 [] = { fffffffffffffff0, } 249 VMOV/VMOVQ:8:result_poly8x8 [] = { f0, f0, f0, f0, f0, f0, f0, f0, } [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | fullfp16-neon-thumb.txt | 268 # Existing VMOV(immediate, Advanced SIMD) instructions within the encoding 274 # 2 -- VMOV op
|
D | fullfp16-neon-arm.txt | 268 # Existing VMOV(immediate, Advanced SIMD) instructions within the encoding 272 # 2 -- VMOV op
|
D | invalid-armv7.txt | 391 # VMOV cmode=0b1111 op=1 is UNDEFINED 396 # VMOV cmode=0b1111 op=1 is UNDEFINED
|
/external/llvm/test/CodeGen/ARM/ |
D | domain-conv-vmovs.ll | 106 ; + Convince LLVM to emit a VMOV to S0
|
/external/libopus/celt/arm/ |
D | celt_pitch_xcorr_arm_gnu.s | 84 @ Unlike VMOV, VAND is a data processsing instruction (and doesn't get 176 VMOV.S32 q15, #1
|
/external/pcre/dist2/src/sljit/ |
D | sljitNativeARM_T2_32.c | 177 #define VMOV 0xee000a10 macro 1644 return push_inst32(compiler, VMOV | (1 << 20) | RT4(dst) | DN4(TMP_FREG1)); in sljit_emit_fop1_conv_sw_from_f64() 1657 FAIL_IF(push_inst32(compiler, VMOV | RT4(src) | DN4(TMP_FREG1))); in sljit_emit_fop1_conv_f64_from_sw() 1664 FAIL_IF(push_inst32(compiler, VMOV | RT4(TMP_REG1) | DN4(TMP_FREG1))); in sljit_emit_fop1_conv_f64_from_sw()
|
D | sljitNativeARM_32.c | 110 #define VMOV 0xee000a10 macro 2122 return push_inst(compiler, VMOV | (1 << 20) | RD(dst) | (TMP_FREG1 << 16)); in sljit_emit_fop1_conv_sw_from_f64() 2135 FAIL_IF(push_inst(compiler, VMOV | RD(src) | (TMP_FREG1 << 16))); in sljit_emit_fop1_conv_f64_from_sw() 2142 FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | (TMP_FREG1 << 16))); in sljit_emit_fop1_conv_f64_from_sw()
|
/external/llvm/lib/Target/ARM/ |
D | ARM.td | 119 "Has slow VGETLNi32 - prefer VMOV">; 123 "Has slow VDUP32 - prefer VMOV">;
|
D | ARMScheduleSwift.td | 615 (instregex "VMOVv", "VMOV(S|D)$", "VMOV(S|D)cc",
|
/external/valgrind/none/tests/arm/ |
D | vfp.stdout.exp | 51 ---- VMOV (ARM core register to scalar) ---- 76 ---- VMOV (scalar to ARM core register) ---- 590 ---- VMOV (register) ---- 1032 ----- VMOV (immediate) ----- 1043 ----- VMOV (ARM core register and single register) ----- 1068 ----- VMOV (ARM two core registers and two single registers) ----- 1161 ----- VMOV (ARM two core registers and double register) -----
|
/external/llvm/test/MC/ARM/ |
D | simple-fp-encoding.s | 326 @ VMOV w/ optional data type suffix.
|