/external/libhevc/decoder/arm64/ |
D | ihevcd_fmt_conv_420sp_to_420p.s | 93 mov x9, x6 ////Load u2_height 111 MOV x6,x8 //// Copying width 115 SUB x6,x6,#16 118 CMP x6,#16 120 CMP x6,#0 126 sub x20,x6,#16 127 neg x6, x20 128 SUB x0,x0,x6 129 SUB x2,x2,x6 166 MOV x6,x8 //// Copying width [all …]
|
D | ihevcd_fmt_conv_420sp_to_420sp.s | 100 mov x7, x6 ////Load u2_stridey 109 MOV x6,x8 //// Copying width 113 SUB x6,x6,#32 122 CMP x6,#32 124 CMP x6,#0 130 sub x20,x6,#32 131 neg x6, x20 132 SUB x0,x0,x6 133 SUB x2,x2,x6 166 MOV x6,x8 //// Copying width [all …]
|
/external/libhevc/common/arm64/ |
D | ihevc_intra_pred_luma_mode_18_34.s | 127 csel x6, x20, x6,eq 129 csel x6, x20, x6,ne 134 ld1 {v0.8b},[x8],x6 136 ld1 {v1.8b},[x8],x6 138 ld1 {v2.8b},[x8],x6 139 ld1 {v3.8b},[x8],x6 141 ld1 {v4.8b},[x8],x6 142 ld1 {v5.8b},[x8],x6 143 ld1 {v6.8b},[x8],x6 145 ld1 {v7.8b},[x8],x6 [all …]
|
D | ihevc_inter_pred_chroma_copy.s | 103 LSL x12,x6,#1 //wd << 1 123 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd 127 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 130 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 133 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 139 SUB x1,x6,x11 //pu1_dst = pu1_dst_tmp 154 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd 158 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 177 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd 180 ST1 {v1.8b},[x6],x3 //vst1_u8(pu1_dst_tmp, tmp_src) [all …]
|
D | ihevc_padding.s | 123 add x6,x5,x1 131 add x7,x6,x1 133 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store 134 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store 135 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store 136 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store 137 st1 {v4.16b},[x6] //128/8 = 16 bytes store 241 add x6,x5,x1 249 add x7,x6,x1 251 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store [all …]
|
D | ihevc_inter_pred_chroma_vert_w16out.s | 113 mov x17,x6 // wd 119 mov x6,x17 //loads wd 125 tst x6,#3 //checks (wd & 3) 127 lsl x10,x6,#1 //2*wd 147 add x6,x0,x2 //pu1_src +src_strd 148 ld1 {v17.8b},[x6],x2 //loads pu1_src 152 ld1 {v4.8b},[x6],x2 //loads incremented src 154 ld1 {v16.8b},[x6],x2 //loads incremented src 157 ld1 {v18.8b},[x6] //loads the incremented src 162 add x6,x1,x3 //pu1_dst + dst_strd [all …]
|
D | ihevc_inter_pred_chroma_vert.s | 112 mov x17,x6 // wd 117 mov x6,x17 //loads wd 123 tst x6,#3 //checks (wd & 3) 125 lsl x10,x6,#1 //2*wd 144 add x6,x0,x2 //pu1_src +src_strd 145 ld1 {v17.8b},[x6],x2 //loads pu1_src 149 ld1 {v4.8b},[x6],x2 //loads incremented src 151 ld1 {v16.8b},[x6],x2 //loads incremented src 156 ld1 {v18.8b},[x6] //loads the incremented src 160 add x6,x1,x3 //pu1_dst + dst_strd [all …]
|
D | ihevc_inter_pred_chroma_copy_w16out.s | 112 mov x17,x6 // wd 130 lsl x6, x3,#1 131 adds x6, x6,#0 143 add x10,x1,x6 154 st1 {v22.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 157 st1 {v24.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 160 st1 {v26.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 187 add x10,x1,x6 198 st1 {v22.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 220 add x6,x0,x2 //pu1_src_tmp += src_strd [all …]
|
D | ihevc_inter_pred_luma_copy_w16out.s | 92 mov x17,x6 // wd 101 lsl x6, x3,#1 102 adds x6, x6,#0 112 add x10,x1,x6 123 st1 {v22.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 126 st1 {v24.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 129 st1 {v26.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0) 160 add x6,x0,x2 //pu1_src_tmp += src_strd 163 ld1 {v3.8b},[x6],x2 //vld1_u8(pu1_src_tmp) 164 ld1 {v5.8b},[x6],x2 //vld1_u8(pu1_src_tmp) [all …]
|
D | ihevc_intra_pred_chroma_dc.s | 117 add x6, x0, x4,lsl #1 //&src[2nt] 131 ld2 {v30.8b, v31.8b}, [x6], #16 //load from src[nt] 159 ld2 {v30.8b, v31.8b}, [x6],#16 //load from src[nt] 199 lsl x6, x3, #2 200 csel x11, x6, x11,eq 207 sub x6, x6, #16 216 st2 {v16.8b, v17.8b}, [x2], x6 217 st2 {v16.8b, v17.8b}, [x5], x6 218 st2 {v16.8b, v17.8b}, [x8], x6 219 st2 {v16.8b, v17.8b}, [x10], x6 [all …]
|
D | ihevc_intra_pred_chroma_mode_18_34.s | 127 csel x6, x20, x6,eq 129 csel x6, x20, x6,ne 136 ld1 {v0.8b, v1.8b},[x8],x6 138 ld1 {v2.8b, v3.8b},[x8],x6 140 ld1 {v4.8b, v5.8b},[x8],x6 142 ld1 {v6.8b, v7.8b},[x8],x6 144 ld1 {v16.8b, v17.8b},[x8],x6 146 ld1 {v18.8b, v19.8b},[x8],x6 148 ld1 {v20.8b, v21.8b},[x8],x6 150 ld1 {v22.8b, v23.8b},[x8],x6
|
D | ihevc_intra_pred_chroma_planar.s | 127 sub x6, x4, #1 //nt-1 128 add x6, x0,x6,lsl #1 //2*(nt-1) 129 ldr w7, [x6] 133 add x6, x4, x4,lsl #1 //3nt 134 add x6, x6, #1 //3nt + 1 135 lsl x6,x6,#1 //2*(3nt + 1) 137 add x6, x6, x0 138 ldr w7, [x6] 143 add x6, x4, x4 //2nt 144 add x14, x6, #1 //2nt+1 [all …]
|
D | ihevc_intra_pred_luma_dc.s | 123 add x6, x0, x4 //&src[nt] 153 ld1 {v0.8b},[x6],#8 //load from src[nt] 163 ld1 {v0.8b},[x6],#8 //load from src[nt] (extra load for 8) 185 ld1 {v0.8b},[x6],#8 //load from src[nt] (extra load for 16) 201 csel x6, x20, x6,eq 210 csel x6, x20, x6,ne //nt 372 lsl x6, x3, #2 373 sub x6, x6, #16 380 st1 {v20.16b}, [x2], x6 381 st1 {v20.16b}, [x5], x6 [all …]
|
D | ihevc_intra_pred_filters_luma_mode_19_to_25.s | 129 add x6, sp, x4 //ref_temp + nt 133 sub x6, x6, #1 //ref_temp + nt - 1 144 st1 {v0.s}[0],[x6],#4 //ref_temp[k + nt - 1] = pu1_ref[two_nt + k]// 149 sub x6, x6,#4 161 st1 {v0.8b},[x6],#8 162 st1 {v1.8b},[x6],#8 163 st1 {v2.8b},[x6],#8 164 st1 {v3.8b},[x6],#8 171 st1 {v0.8b},[x6],#8 172 st1 {v1.8b},[x6],#8 [all …]
|
D | ihevc_intra_pred_luma_mode2.s | 127 mov x6, x2 139 add x7,x6,x3 163 st1 {v16.8b},[x6],x5 172 st1 {v20.8b},[x6],x5 191 sub x20, x6, x14 198 mov x6, x2 205 add x7, x6, x3 232 st1 {v16.8b},[x6],x5 236 st1 {v20.8b},[x6],x5 252 add x6,x5,x3 [all …]
|
D | ihevc_intra_pred_luma_planar.s | 124 sub x6, x4, #1 //nt-1 125 add x6, x6, x0 126 ldr w7, [x6] 130 add x6, x4, x4,lsl #1 //3nt 131 add x6, x6, #1 //3nt + 1 132 add x6, x6, x0 133 ldr w7, [x6] 137 add x6, x4, x4 //2nt 138 add x14, x6, #1 //2nt+1 139 sub x6, x6, #1 //2nt-1 [all …]
|
/external/libavc/common/armv8/ |
D | ih264_padding_neon_av8.s | 93 neg x6, x1 103 st1 {v0.8b, v1.8b}, [x4], x6 181 sub x6, x1, #16 231 st1 {v0.16b}, [x4], x6 234 st1 {v2.16b}, [x4], x6 // 16 bytes store 239 st1 {v4.16b}, [x4], x6 // 16 bytes store 246 st1 {v6.16b}, [x4], x6 // 16 bytes store 251 st1 {v0.16b}, [x4], x6 // 16 bytes store 256 st1 {v2.16b}, [x4], x6 // 16 bytes store 259 st1 {v4.16b}, [x4], x6 // 16 bytes store [all …]
|
D | ih264_inter_pred_luma_copy_av8.s | 107 add x6, x1, x3 //pu1_dst_tmp += dst_strd 111 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 114 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 117 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0) 124 sub x1, x6, x11 //pu1_dst = pu1_dst_tmp 144 add x6, x1, x3 //pu1_dst_tmp += dst_strd 147 st1 {v1.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src) 150 st1 {v2.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src) 152 st1 {v3.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src) 158 sub x1, x6, x11 //pu1_dst = pu1_dst_tmp [all …]
|
/external/llvm/test/MC/Disassembler/AMDGPU/ |
D | sopk_vi.txt | 3 # VI: s_cmovk_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb0] 6 # VI: s_cmpk_eq_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb1] 9 # VI: s_cmpk_lg_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb1] 12 # VI: s_cmpk_gt_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb2] 15 # VI: s_cmpk_ge_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb2] 18 # VI: s_cmpk_lt_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb3] 21 # VI: s_cmpk_le_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb3] 24 # VI: s_cmpk_eq_u32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb4] 27 # VI: s_cmpk_lg_u32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb4] 30 # VI: s_cmpk_gt_u32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb5] [all …]
|
/external/libmpeg2/common/armv8/ |
D | impeg2_format_conv.s | 146 sub x7, x7, x6 //// Source increment 148 sub x8, x8, x6 //// Destination increment 152 mov x16, x6 189 sub x7, x7, x6, lsr #1 //// Source increment 191 sub x8, x8, x6 //// Destination increment 193 lsr x6, x6, #1 196 mov x16, x6 319 sub x7, x7, x6 //// Source increment 321 sub x8, x8, x6 //// Destination increment 325 mov x16, x6 [all …]
|
/external/lzma/Asm/x86/ |
D | 7zCrcOpt.asm | 36 movzx x6, BYTE PTR [rD] 39 xor x6, x3 88 mov x6, [SRCDAT 2] 90 CRC_XOR x6, r3, 3 92 CRC_XOR x6, r3, 2 96 CRC_XOR x6, r3, 1 98 CRC_XOR x6, r1, 0 101 CRC_XOR x6, r3, 7 104 CRC_XOR x6, r3, 6 106 CRC_XOR x6, r3, 5 [all …]
|
/external/boringssl/linux-aarch64/crypto/bn/ |
D | armv8-mont.S | 28 mul x6,x7,x9 // ap[0]*bp[0] 34 mul x15,x6,x4 // "tp[0]"*n0 50 subs xzr,x6,#1 // (*) 57 adds x6,x10,x7 67 adds x12,x12,x6 75 adds x6,x10,x7 83 adds x12,x12,x6 96 mul x6,x7,x9 // ap[0]*bp[i] 101 adds x6,x6,x23 105 mul x15,x6,x4 [all …]
|
/external/libavc/common/ |
D | ih264_resi_trans_quant.c | 125 WORD32 x0, x1, x2, x3, x4, x5, x6, x7; in ih264_resi_trans_quant_4x4() local 136 x6 = pu1_src[2] - pu1_pred[2]; in ih264_resi_trans_quant_4x4() 141 x1 = x5 + x6; in ih264_resi_trans_quant_4x4() 142 x2 = x5 - x6; in ih264_resi_trans_quant_4x4() 163 x6 = pi2_out_tmp[8]; in ih264_resi_trans_quant_4x4() 168 x1 = x5 + x6; in ih264_resi_trans_quant_4x4() 169 x2 = x5 - x6; in ih264_resi_trans_quant_4x4() 273 WORD32 x0, x1, x2, x3, x4, x5, x6, x7; in ih264_resi_trans_quant_chroma_4x4() local 284 x6 = pu1_src[4] - pu1_pred[4]; in ih264_resi_trans_quant_chroma_4x4() 289 x1 = x5 + x6; in ih264_resi_trans_quant_chroma_4x4() [all …]
|
/external/boringssl/src/ssl/test/runner/poly1305/ |
D | sum_ref.go | 48 x6 float64 504 x6 = h6 - y6 543 x6 += y4 555 x6 += x7 568 r0lowx6 = r0low * x6 576 r0highx6 = r0high * x6 584 sr1lowx6 = sr1low * x6 592 sr1highx6 = sr1high * x6 600 sr2lowx6 = sr2low * x6 608 sr2highx6 = sr2high * x6 [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | itrans8_dspr2.c | 615 int x0, x1, x2, x3, x4, x5, x6, x7; in iadst8_dspr2() local 623 x6 = input[1]; in iadst8_dspr2() 626 if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) { in iadst8_dspr2() 639 s6 = cospi_26_64 * x6 + cospi_6_64 * x7; in iadst8_dspr2() 640 s7 = cospi_6_64 * x6 - cospi_26_64 * x7; in iadst8_dspr2() 648 x6 = ROUND_POWER_OF_TWO((s2 - s6), DCT_CONST_BITS); in iadst8_dspr2() 658 s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; in iadst8_dspr2() 659 s7 = cospi_8_64 * x6 + cospi_24_64 * x7; in iadst8_dspr2() 667 x6 = ROUND_POWER_OF_TWO((s4 - s6), DCT_CONST_BITS); in iadst8_dspr2() 673 s6 = cospi_16_64 * (x6 + x7); in iadst8_dspr2() [all …]
|