/external/valgrind/none/tests/arm64/ |
D | integer.c | 163 TESTINST3("add x3, x4, x5", 12345, 6789, x3, x4, x5, 0); in test_arith() 164 TESTINST3("add w3, w4, w5", 12345, 6789, x3, x4, x5, 0); in test_arith() 167 TESTINST3("adc x3, x4, x5", 12345, 6789, x3,x4,x5,0); in test_arith() 168 TESTINST3("adc x3, x4, x5", 12345, 6789, x3,x4,x5,1); in test_arith() 169 TESTINST3("adc x3, x4, x5", 0, 0xffffffffffffffffULL, x3,x4,x5,0); in test_arith() 170 TESTINST3("adc x3, x4, x5", 0, 0xffffffffffffffffULL, x3,x4,x5,1); in test_arith() 171 TESTINST3("adc x3, x4, x5", 0x31415927ULL<<32, 0x27181728ULL<<32, x3,x4,x5,0); in test_arith() 172 TESTINST3("adc x3, x4, x5", 0x31415927ULL<<32, 0x27181728ULL<<32, x3,x4,x5,1); in test_arith() 173 TESTINST3("adc x3, x4, x5", 0x00000000ULL<<32, 0x00000000ULL<<32, x3,x4,x5,0); in test_arith() 174 TESTINST3("adc x3, x4, x5", 0x00000000ULL<<32, 0x00000000ULL<<32, x3,x4,x5,1); in test_arith() [all …]
|
/external/libhevc/common/arm64/ |
D | ihevc_intra_pred_chroma_ver.s | 105 lsl x5, x4, #2 //4nt 113 add x5, x5, #2 //2nt+2 114 add x6, x0, x5 //&src[2nt+1] 116 add x5, x2, x3 //pu1_dst + dst_strd 118 add x8, x5, x3 128 st2 {v20.8b, v21.8b}, [x5],#16 133 st2 {v22.8b, v23.8b}, [x5], x11 141 st2 {v20.8b, v21.8b}, [x5],#16 146 st2 {v22.8b, v23.8b}, [x5], x11 154 st2 {v20.8b, v21.8b}, [x5],#16 [all …]
|
D | ihevc_sao_edge_offset_class2.s | 73 MOV x5,x7 //Loads pu1_avail 94 MOV x23,x5 //Store pu1_avail in sp 114 LDRB w10,[x5,#4] //pu1_avail[4] 151 LDRB w14,[x5,#7] //pu1_avail[7] 197 LDRB w11,[x5,#3] //pu1_avail[3] 203 LDRB w5,[x5,#2] //pu1_avail[2] 208 CMP x5,#0 235 MOV x5,x23 //Loads pu1_avail 237 LDRb w20, [x5] //pu1_avail[0] 245 LDRB w8,[x5,#1] //pu1_avail[1] [all …]
|
D | ihevc_padding.s | 115 add x5,x4,x1 123 add x6,x5,x1 125 st1 {v2.16b},[x5],#16 //128/8 = 16 bytes store 126 st1 {v2.16b},[x5],#16 //128/8 = 16 bytes store 127 st1 {v2.16b},[x5],#16 //128/8 = 16 bytes store 128 st1 {v2.16b},[x5],#16 //128/8 = 16 bytes store 129 st1 {v2.16b},[x5] //128/8 = 16 bytes store 233 add x5,x4,x1 241 add x6,x5,x1 243 st1 {v2.16b},[x5],#16 //128/8 = 16 bytes store [all …]
|
D | ihevc_sao_edge_offset_class3.s | 82 MOV x20,x5 //Store pu1_src_top_right in sp 85 MOV x5,x7 //Loads pu1_avail 114 LDRB w9,[x5,#5] //pu1_avail[5] 156 LDRB w10,[x5,#6] //pu1_avail[6] 208 LDRB w11,[x5,#3] //pu1_avail[3] 215 LDRB w5,[x5,#2] //pu1_avail[2] 217 CMP x5,#0 244 MOV x5,x23 //Loads pu1_avail 246 LDRb w20, [x5] //pu1_avail[0] 254 LDRB w8,[x5,#1] //pu1_avail[1] [all …]
|
D | ihevc_inter_pred_chroma_copy_w16out.s | 111 mov x16,x5 // ht 141 add x5,x0,x2 //pu1_src +src_strd 146 ld1 {v22.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 151 ld1 {v24.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 156 ld1 {v26.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 165 sub x0,x5,x11 185 add x5,x0,x2 //pu1_src +src_strd 190 ld1 {v22.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 195 ld1 {v24.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 205 lsl x5, x3,#1 [all …]
|
D | ihevc_intra_pred_luma_mode2.s | 143 lsl x5, x3, #2 163 st1 {v16.8b},[x6],x5 164 st1 {v17.8b},[x7],x5 167 st1 {v18.8b},[x9],x5 171 st1 {v19.8b},[x14],x5 172 st1 {v20.8b},[x6],x5 175 st1 {v21.8b},[x7],x5 176 st1 {v22.8b},[x9],x5 180 st1 {v23.8b},[x14],x5 232 st1 {v16.8b},[x6],x5 [all …]
|
D | ihevc_inter_pred_luma_copy_w16out.s | 91 mov x16,x5 // ht 110 add x5,x0,x2 //pu1_src +src_strd 115 ld1 {v22.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 120 ld1 {v24.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 125 ld1 {v26.8b},[x5],x2 //vld1_u8(pu1_src_tmp) 134 sub x0,x5,x11 148 lsl x5, x3,#1 149 adds x5, x5,#0 161 add x10,x1,x5 198 st1 {v2.8h},[x10],x5 //vst1q_s16(pi2_dst_tmp, tmp) [all …]
|
D | ihevc_intra_pred_luma_vert.s | 107 lsl x5, x4, #1 //2nt 113 add x5, x5, #1 //2nt+1 114 add x6, x0, x5 //&src[2nt+1] 117 add x5, x2, x3 119 add x8, x5, x3 127 st1 {v20.8b, v21.8b}, [x5],#16 132 st1 {v22.8b, v23.8b}, [x5], x11 140 st1 {v20.8b, v21.8b}, [x5],#16 145 st1 {v22.8b, v23.8b}, [x5], x11 152 st1 {v20.8b, v21.8b}, [x5],#16 [all …]
|
D | ihevc_intra_pred_luma_mode_27_to_33.s | 114 add x8,x6,x5,lsl #2 //*gai4_ihevc_ang_table[mode] 140 mov x5,x4 328 csel x4, x5, x4,le //reload nt 420 sub x20,x12,x5 456 add x5,x8,#1 //row + 1 457 mul x5, x5, x9 //pos = ((row + 1) * intra_pred_ang) 458 and x5,x5,#31 //fract = pos & (31) 459 cmp x14,x5 //if(fract_prev > fract) 464 sub x20,x5,#32 471 mov x14,x5 //fract_prev = fract [all …]
|
D | ihevc_intra_pred_chroma_mode_27_to_33.s | 109 add x8,x6,x5,lsl #2 //*gai4_ihevc_ang_table[mode] 135 mov x5,x4 322 csel x4, x5, x4,le //reload nt 414 sub x20,x12,x5 451 add x5,x8,#1 //row + 1 452 mul x5, x5, x9 //pos = ((row + 1) * intra_pred_ang) 453 and x5,x5,#31 //fract = pos & (31) 454 cmp x14,x5 //if(fract_prev > fract) 459 sub x20,x5,#32 466 mov x14,x5 //fract_prev = fract [all …]
|
D | ihevc_inter_pred_chroma_copy.s | 104 CMP x5,#0 //checks ht == 0 106 AND x8,x5,#3 //check ht for mul of 2 107 SUB x5,x5,x8 //check the rounded height value 113 CMP x5,#0 137 SUBS x5,x5,#4 //ht - 4 166 CMP x5,#0 189 SUBS x5,x5,#4 //ht -= 4 212 CMP x5,#0 234 SUBS x5,x5,#4 //ht -= 4
|
D | ihevc_intra_pred_filters_luma_mode_19_to_25.s | 119 add x7, x7, x5, lsl #2 //gai4_ihevc_ang_table[mode] 120 add x8, x8, x5, lsl #2 //gai4_ihevc_inv_ang_table 224 add x8,x6,x5,lsl #2 //*gai4_ihevc_ang_table[mode] 252 mov x5,x4 428 csel x4, x5, x4,le //reload nt 527 sub x20,x12,x5 566 add x5,x8,#1 //row + 1 567 mul x5, x5, x9 //pos = ((row + 1) * intra_pred_ang) 568 asr x14, x5, #5 //if(fract_prev > fract) 569 and x5,x5,#31 //fract = pos & (31) [all …]
|
/external/libavc/common/armv8/ |
D | ih264_intra_pred_luma_4x4_av8.s | 265 ands x5, x4, #0x01 271 sxtw x5, w5 276 add x5, x5, x6 279 add x5, x5, x7 281 add x5, x5, x8 289 add x5, x5, x6 292 add x5, x5, x7 295 add x5, x5, x8 296 add x5, x5, x9 297 add x5, x5, #4 [all …]
|
D | ih264_inter_pred_luma_copy_av8.s | 86 mov x12, x5 102 add x5, x0, x2 //pu1_src_tmp += src_strd 105 ld1 {v0.s}[0], [x5], x2 //vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 108 ld1 {v0.s}[0], [x5], x2 //vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 111 ld1 {v0.s}[0], [x5], x2 //vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 119 sub x0, x5, x11 //pu1_src = pu1_src_tmp 138 add x5, x0, x2 //pu1_src_tmp += src_strd 142 ld1 {v1.8b}, [x5], x2 //vld1_u8(pu1_src_tmp) 145 ld1 {v2.8b}, [x5], x2 //vld1_u8(pu1_src_tmp) 147 ld1 {v3.8b}, [x5], x2 //vld1_u8(pu1_src_tmp) [all …]
|
D | ih264_default_weighted_pred_av8.s | 133 st1 {v0.s}[0], [x2], x5 //load row 1 in destination 134 st1 {v0.s}[1], [x2], x5 //load row 2 in destination 136 st1 {v1.s}[0], [x2], x5 //load row 3 in destination 137 st1 {v1.s}[1], [x2], x5 //load row 4 in destination 155 st1 {v0.8b}, [x2], x5 //load row 1 in destination 157 st1 {v1.8b}, [x2], x5 //load row 2 in destination 158 st1 {v2.8b}, [x2], x5 //load row 3 in destination 159 st1 {v3.8b}, [x2], x5 //load row 4 in destination 195 st1 {v0.8b, v1.8b}, [x2], x5 //load row 1 in destination 196 st1 {v2.8b, v3.8b}, [x2], x5 //load row 2 in destination [all …]
|
D | ih264_intra_pred_luma_8x8_av8.s | 201 sxtw x5, w5 216 sxtw x5, w5 309 sxtw x5, w5 314 add x5, x5, x6 317 add x5, x5, x7 320 add x5, x5, x8 323 add x5, x5, x6 326 add x5, x5, x7 328 add x5, x5, x8 331 add x5, x5, x6 [all …]
|
/external/ceres-solver/internal/ceres/ |
D | cost_function_to_functor_test.cc | 161 const T* x5, T* residuals) const { in operator ()() 163 + x4[0] * x4[0] + x5[0] * x5[0]; in operator ()() 165 + x4[1] * x4[1] + x5[1] * x5[1]; in operator ()() 174 const T* x5, const T* x6, T* residuals) const { in operator ()() 176 + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0]; in operator ()() 178 + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1]; in operator ()() 187 const T* x5, const T* x6, const T* x7, T* residuals) const { in operator ()() 189 + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] + x7[0] * x7[0]; in operator ()() 191 + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] + x7[1] * x7[1]; in operator ()() 200 const T* x5, const T* x6, const T* x7, const T* x8, in operator ()() [all …]
|
/external/libhevc/decoder/arm64/ |
D | ihevcd_fmt_conv_420sp_to_420p.s | 92 mov x8, x5 ////Load u2_width 96 sxtw x5,w5 100 SUB x11,x5,x8 //// Dst Y increment 103 sxtw x5,w5 104 CMP x5,#0 ////skip luma if disable_luma_copy is non-zero 144 sxtw x5,w5 153 SUB x11,x5,x11 //// Dst U and V increment 155 mov x5, x15 ////Load pu1_dest_v 160 csel x4, x5, x4,EQ 161 csel x5, x3, x5,EQ [all …]
|
/external/llvm/test/Bitcode/ |
D | binaryFloatInstructions.3.2.ll | 8 define void @fadd(float %x1, double %x2 ,half %x3, fp128 %x4, x86_fp80 %x5, ppc_fp128 %x6){ 22 ; CHECK-NEXT: %res5 = fadd x86_fp80 %x5, %x5 23 %res5 = fadd x86_fp80 %x5, %x5 31 …faddFloatVec(<2 x float> %x1, <3 x float> %x2 ,<4 x float> %x3, <8 x float> %x4, <16 x float> %x5){ 45 ; CHECK-NEXT: %res5 = fadd <16 x float> %x5, %x5 46 %res5 = fadd <16 x float> %x5, %x5 51 …ubleVec(<2 x double> %x1, <3 x double> %x2 ,<4 x double> %x3, <8 x double> %x4, <16 x double> %x5){ 65 ; CHECK-NEXT: %res5 = fadd <16 x double> %x5, %x5 66 %res5 = fadd <16 x double> %x5, %x5 71 …void @faddHalfVec(<2 x half> %x1, <3 x half> %x2 ,<4 x half> %x3, <8 x half> %x4, <16 x half> %x5){ [all …]
|
D | binaryIntInstructions.3.2.ll | 8 define void @add(i1 %x1, i8 %x2 ,i16 %x3, i32 %x4, i64 %x5){ 22 ; CHECK-NEXT: %res5 = add i64 %x5, %x5 23 %res5 = add i64 %x5, %x5 37 define void @addvec8NuwNsw(<2 x i8> %x1, <3 x i8> %x2 ,<4 x i8> %x3, <8 x i8> %x4, <16 x i8> %x5){ 51 ; CHECK-NEXT: %res5 = add nuw nsw <16 x i8> %x5, %x5 52 %res5 = add nuw nsw <16 x i8> %x5, %x5 57 …e void @addvec16NuwNsw(<2 x i16> %x1, <3 x i16> %x2 ,<4 x i16> %x3, <8 x i16> %x4, <16 x i16> %x5){ 71 ; CHECK-NEXT: %res5 = add nuw nsw <16 x i16> %x5, %x5 72 %res5 = add nuw nsw <16 x i16> %x5, %x5 77 …e void @addvec32NuwNsw(<2 x i32> %x1, <3 x i32> %x2 ,<4 x i32> %x3, <8 x i32> %x4, <16 x i32> %x5){ [all …]
|
/external/ceres-solver/include/ceres/ |
D | numeric_diff_functor.h | 183 const double* x5, in operator() 185 return functor_(x0, x1, x2, x3, x4, x5, residuals); in operator() 193 const double* x5, in operator() 196 return functor_(x0, x1, x2, x3, x4, x5, x6, residuals); in operator() 204 const double* x5, in operator() 208 return functor_(x0, x1, x2, x3, x4, x5, x6, x7, residuals); in operator() 216 const double* x5, in operator() 221 return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, residuals); in operator() 229 const double* x5, in operator() 235 return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, residuals); in operator() [all …]
|
/external/valgrind/coregrind/m_syswrap/ |
D | syscall-arm64-linux.S | 87 stp x4, x5, [sp, #-16]! 101 ldr x5, [sp, #8] /* saved x1 == guest_state */ 104 ldr x0, [x5, #OFFSET_arm64_X0] 105 ldr x1, [x5, #OFFSET_arm64_X1] 106 ldr x2, [x5, #OFFSET_arm64_X2] 107 ldr x3, [x5, #OFFSET_arm64_X3] 108 ldr x4, [x5, #OFFSET_arm64_X4] 109 ldr x5, [x5, #OFFSET_arm64_X5] 113 ldr x5, [sp, #8] /* saved x1 == guest_state */ 114 str x0, [x5, #OFFSET_arm64_X0] [all …]
|
/external/libmpeg2/common/armv8/ |
D | impeg2_inter_pred.s | 110 ldr x5, [x1] //dst->y 114 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst 118 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst 120 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst 122 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst 124 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst 126 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst 128 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst 130 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst 132 st1 {v0.8b, v1.8b}, [x5], x3 //Store and increment dst [all …]
|
/external/libavc/common/ |
D | ih264_resi_trans_quant.c | 125 WORD32 x0, x1, x2, x3, x4, x5, x6, x7; in ih264_resi_trans_quant_4x4() local 135 x5 = pu1_src[1] - pu1_pred[1]; in ih264_resi_trans_quant_4x4() 141 x1 = x5 + x6; in ih264_resi_trans_quant_4x4() 142 x2 = x5 - x6; in ih264_resi_trans_quant_4x4() 162 x5 = pi2_out_tmp[4]; in ih264_resi_trans_quant_4x4() 168 x1 = x5 + x6; in ih264_resi_trans_quant_4x4() 169 x2 = x5 - x6; in ih264_resi_trans_quant_4x4() 273 WORD32 x0, x1, x2, x3, x4, x5, x6, x7; in ih264_resi_trans_quant_chroma_4x4() local 283 x5 = pu1_src[2] - pu1_pred[2]; in ih264_resi_trans_quant_chroma_4x4() 289 x1 = x5 + x6; in ih264_resi_trans_quant_chroma_4x4() [all …]
|