Home
last modified time | relevance | path

Searched refs:x6 (Results 1 – 25 of 519) sorted by relevance

12345678910>>...21

/external/libxaac/decoder/armv8/
Dixheaacd_sbr_qmf_analysis32_neon.s58 LDRSH w4 , [x6]
59 ADD x6, x6, x9
60 LDRSH w8 , [x6]
61 ADD x6, x6, x9
62 LDRSH w11 , [x6]
63 ADD x6, x6, x9
64 LDRSH w12 , [x6]
65 ADD x6, x6, x9
72 LDRSH w4 , [x6]
73 ADD x6, x6, x9
[all …]
Dixheaacd_apply_scale_factors.s33 MOV x21, x6
73 SUBS x6, x11, x5, ASR #2 // 37-(scale_factor >> 2)
83 SUB x14, x6, #1 //dont do that extra LSL #1 in SMULWB
90 SMULL x6, w6, w17
95 ASR x6, x6, #16
100 ASR x6, x6, x14 // buffex1 = shx32(buffex1, shift);
115 NEGS x14, x6
122 SMULL x6, w6, w17
125 ASR x6, x6, #16
128 LSL x6, x6, #1
[all …]
/external/libhevc/common/arm64/
Dihevc_intra_pred_luma_mode_18_34.s127 csel x6, x20, x6,eq
129 csel x6, x20, x6,ne
134 ld1 {v0.8b},[x8],x6
136 ld1 {v1.8b},[x8],x6
138 ld1 {v2.8b},[x8],x6
139 ld1 {v3.8b},[x8],x6
141 ld1 {v4.8b},[x8],x6
142 ld1 {v5.8b},[x8],x6
143 ld1 {v6.8b},[x8],x6
145 ld1 {v7.8b},[x8],x6
[all …]
Dihevc_padding.s123 add x6,x5,x1
131 add x7,x6,x1
133 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store
134 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store
135 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store
136 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store
137 st1 {v4.16b},[x6] //128/8 = 16 bytes store
241 add x6,x5,x1
249 add x7,x6,x1
251 st1 {v4.16b},[x6],#16 //128/8 = 16 bytes store
[all …]
Dihevc_inter_pred_chroma_vert_w16out.s113 mov x17,x6 // wd
119 mov x6,x17 //loads wd
125 tst x6,#3 //checks (wd & 3)
127 lsl x10,x6,#1 //2*wd
147 add x6,x0,x2 //pu1_src +src_strd
148 ld1 {v17.8b},[x6],x2 //loads pu1_src
152 ld1 {v4.8b},[x6],x2 //loads incremented src
154 ld1 {v16.8b},[x6],x2 //loads incremented src
157 ld1 {v18.8b},[x6] //loads the incremented src
162 add x6,x1,x3 //pu1_dst + dst_strd
[all …]
Dihevc_inter_pred_chroma_copy.s103 LSL x12,x6,#1 //wd << 1
123 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd
127 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
130 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
133 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
139 SUB x1,x6,x11 //pu1_dst = pu1_dst_tmp
154 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd
158 ST1 {v0.s}[0],[x6],x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
177 ADD x6,x1,x3 //pu1_dst_tmp += dst_strd
180 ST1 {v1.8b},[x6],x3 //vst1_u8(pu1_dst_tmp, tmp_src)
[all …]
Dihevc_inter_pred_chroma_vert.s112 mov x17,x6 // wd
117 mov x6,x17 //loads wd
123 tst x6,#3 //checks (wd & 3)
125 lsl x10,x6,#1 //2*wd
144 add x6,x0,x2 //pu1_src +src_strd
145 ld1 {v17.8b},[x6],x2 //loads pu1_src
149 ld1 {v4.8b},[x6],x2 //loads incremented src
151 ld1 {v16.8b},[x6],x2 //loads incremented src
156 ld1 {v18.8b},[x6] //loads the incremented src
160 add x6,x1,x3 //pu1_dst + dst_strd
[all …]
Dihevc_inter_pred_chroma_copy_w16out.s112 mov x17,x6 // wd
130 lsl x6, x3,#1
131 adds x6, x6,#0
143 add x10,x1,x6
154 st1 {v22.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0)
157 st1 {v24.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0)
160 st1 {v26.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0)
187 add x10,x1,x6
198 st1 {v22.1d},[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0)
220 add x6,x0,x2 //pu1_src_tmp += src_strd
[all …]
Dihevc_inter_pred_luma_copy_w16out.s92 mov x17,x6 // wd
101 lsl x6, x3,#1
102 adds x6, x6,#0
112 add x10,x1,x6
123 st1 {v22.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0)
126 st1 {v24.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0)
129 st1 {v26.d}[0],[x10],x6 //vst1q_lane_s64(pi2_dst_tmp, temp, 0)
160 add x6,x0,x2 //pu1_src_tmp += src_strd
163 ld1 {v3.8b},[x6],x2 //vld1_u8(pu1_src_tmp)
164 ld1 {v5.8b},[x6],x2 //vld1_u8(pu1_src_tmp)
[all …]
Dihevc_intra_pred_chroma_planar.s127 sub x6, x4, #1 //nt-1
128 add x6, x0,x6,lsl #1 //2*(nt-1)
129 ldr w7, [x6]
133 add x6, x4, x4,lsl #1 //3nt
134 add x6, x6, #1 //3nt + 1
135 lsl x6,x6,#1 //2*(3nt + 1)
137 add x6, x6, x0
138 ldr w7, [x6]
143 add x6, x4, x4 //2nt
144 add x14, x6, #1 //2nt+1
[all …]
Dihevc_intra_pred_chroma_dc.s117 add x6, x0, x4,lsl #1 //&src[2nt]
131 ld2 {v30.8b, v31.8b}, [x6], #16 //load from src[nt]
159 ld2 {v30.8b, v31.8b}, [x6],#16 //load from src[nt]
199 lsl x6, x3, #2
200 csel x11, x6, x11,eq
207 sub x6, x6, #16
216 st2 {v16.8b, v17.8b}, [x2], x6
217 st2 {v16.8b, v17.8b}, [x5], x6
218 st2 {v16.8b, v17.8b}, [x8], x6
219 st2 {v16.8b, v17.8b}, [x10], x6
[all …]
Dihevc_intra_pred_chroma_mode_18_34.s127 csel x6, x20, x6,eq
129 csel x6, x20, x6,ne
136 ld1 {v0.8b, v1.8b},[x8],x6
138 ld1 {v2.8b, v3.8b},[x8],x6
140 ld1 {v4.8b, v5.8b},[x8],x6
142 ld1 {v6.8b, v7.8b},[x8],x6
144 ld1 {v16.8b, v17.8b},[x8],x6
146 ld1 {v18.8b, v19.8b},[x8],x6
148 ld1 {v20.8b, v21.8b},[x8],x6
150 ld1 {v22.8b, v23.8b},[x8],x6
Dihevc_intra_pred_luma_dc.s123 add x6, x0, x4 //&src[nt]
153 ld1 {v0.8b},[x6],#8 //load from src[nt]
163 ld1 {v0.8b},[x6],#8 //load from src[nt] (extra load for 8)
185 ld1 {v0.8b},[x6],#8 //load from src[nt] (extra load for 16)
201 csel x6, x20, x6,eq
210 csel x6, x20, x6,ne //nt
372 lsl x6, x3, #2
373 sub x6, x6, #16
380 st1 {v20.16b}, [x2], x6
381 st1 {v20.16b}, [x5], x6
[all …]
Dihevc_intra_pred_filters_luma_mode_19_to_25.s129 add x6, sp, x4 //ref_temp + nt
133 sub x6, x6, #1 //ref_temp + nt - 1
144 st1 {v0.s}[0],[x6],#4 //ref_temp[k + nt - 1] = pu1_ref[two_nt + k]//
149 sub x6, x6,#4
161 st1 {v0.8b},[x6],#8
162 st1 {v1.8b},[x6],#8
163 st1 {v2.8b},[x6],#8
164 st1 {v3.8b},[x6],#8
171 st1 {v0.8b},[x6],#8
172 st1 {v1.8b},[x6],#8
[all …]
Dihevc_intra_pred_luma_planar.s124 sub x6, x4, #1 //nt-1
125 add x6, x6, x0
126 ldr w7, [x6]
130 add x6, x4, x4,lsl #1 //3nt
131 add x6, x6, #1 //3nt + 1
132 add x6, x6, x0
133 ldr w7, [x6]
137 add x6, x4, x4 //2nt
138 add x14, x6, #1 //2nt+1
139 sub x6, x6, #1 //2nt-1
[all …]
/external/libhevc/decoder/arm64/
Dihevcd_fmt_conv_420sp_to_420p.s93 mov x9, x6 ////Load u2_height
111 MOV x6,x8 //// Copying width
115 SUB x6,x6,#16
118 CMP x6,#16
120 CMP x6,#0
126 sub x20,x6,#16
127 neg x6, x20
128 SUB x0,x0,x6
129 SUB x2,x2,x6
166 MOV x6,x8 //// Copying width
[all …]
Dihevcd_fmt_conv_420sp_to_420sp.s100 mov x7, x6 ////Load u2_stridey
109 MOV x6,x8 //// Copying width
113 SUB x6,x6,#32
122 CMP x6,#32
124 CMP x6,#0
130 sub x20,x6,#32
131 neg x6, x20
132 SUB x0,x0,x6
133 SUB x2,x2,x6
166 MOV x6,x8 //// Copying width
[all …]
/external/libavc/common/armv8/
Dih264_padding_neon_av8.s93 neg x6, x1
103 st1 {v0.8b, v1.8b}, [x4], x6
181 sub x6, x1, #16
231 st1 {v0.16b}, [x4], x6
234 st1 {v2.16b}, [x4], x6 // 16 bytes store
239 st1 {v4.16b}, [x4], x6 // 16 bytes store
246 st1 {v6.16b}, [x4], x6 // 16 bytes store
251 st1 {v0.16b}, [x4], x6 // 16 bytes store
256 st1 {v2.16b}, [x4], x6 // 16 bytes store
259 st1 {v4.16b}, [x4], x6 // 16 bytes store
[all …]
Dih264_inter_pred_luma_copy_av8.s107 add x6, x1, x3 //pu1_dst_tmp += dst_strd
111 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
114 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
117 st1 {v0.s}[0], [x6], x3 //vst1_lane_u32((uint32_t *)pu1_dst_tmp, src_tmp, 0)
124 sub x1, x6, x11 //pu1_dst = pu1_dst_tmp
144 add x6, x1, x3 //pu1_dst_tmp += dst_strd
147 st1 {v1.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src)
150 st1 {v2.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src)
152 st1 {v3.8b}, [x6], x3 //vst1_u8(pu1_dst_tmp, tmp_src)
158 sub x1, x6, x11 //pu1_dst = pu1_dst_tmp
[all …]
/external/llvm/test/MC/Disassembler/AMDGPU/
Dsopk_vi.txt3 # VI: s_cmovk_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb0]
6 # VI: s_cmpk_eq_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb1]
9 # VI: s_cmpk_lg_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb1]
12 # VI: s_cmpk_gt_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb2]
15 # VI: s_cmpk_ge_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb2]
18 # VI: s_cmpk_lt_i32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb3]
21 # VI: s_cmpk_le_i32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb3]
24 # VI: s_cmpk_eq_u32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb4]
27 # VI: s_cmpk_lg_u32 s2, 0x6 ; encoding: [0x06,0x00,0x82,0xb4]
30 # VI: s_cmpk_gt_u32 s2, 0x6 ; encoding: [0x06,0x00,0x02,0xb5]
[all …]
/external/boringssl/ios-aarch64/crypto/fipsmodule/
Darmv8-mont.S27 mul x6,x7,x9 // ap[0]*bp[0]
33 mul x15,x6,x4 // "tp[0]"*n0
49 subs xzr,x6,#1 // (*)
56 adds x6,x10,x7
66 adds x12,x12,x6
74 adds x6,x10,x7
82 adds x12,x12,x6
95 mul x6,x7,x9 // ap[0]*bp[i]
100 adds x6,x6,x23
104 mul x15,x6,x4
[all …]
/external/boringssl/linux-aarch64/crypto/fipsmodule/
Darmv8-mont.S28 mul x6,x7,x9 // ap[0]*bp[0]
34 mul x15,x6,x4 // "tp[0]"*n0
50 subs xzr,x6,#1 // (*)
57 adds x6,x10,x7
67 adds x12,x12,x6
75 adds x6,x10,x7
83 adds x12,x12,x6
96 mul x6,x7,x9 // ap[0]*bp[i]
101 adds x6,x6,x23
105 mul x15,x6,x4
[all …]
/external/libmpeg2/common/armv8/
Dimpeg2_format_conv.s146 sub x7, x7, x6 //// Source increment
148 sub x8, x8, x6 //// Destination increment
152 mov x16, x6
189 sub x7, x7, x6, lsr #1 //// Source increment
191 sub x8, x8, x6 //// Destination increment
193 lsr x6, x6, #1
196 mov x16, x6
319 sub x7, x7, x6 //// Source increment
321 sub x8, x8, x6 //// Destination increment
325 mov x16, x6
[all …]
/external/lzma/Asm/x86/
D7zCrcOpt.asm36 movzx x6, BYTE PTR [rD]
39 xor x6, x3
88 mov x6, [SRCDAT 2]
90 CRC_XOR x6, r3, 3
92 CRC_XOR x6, r3, 2
96 CRC_XOR x6, r3, 1
98 CRC_XOR x6, r1, 0
101 CRC_XOR x6, r3, 7
104 CRC_XOR x6, r3, 6
106 CRC_XOR x6, r3, 5
[all …]
/external/libavc/common/
Dih264_resi_trans_quant.c125 WORD32 x0, x1, x2, x3, x4, x5, x6, x7; in ih264_resi_trans_quant_4x4() local
136 x6 = pu1_src[2] - pu1_pred[2]; in ih264_resi_trans_quant_4x4()
141 x1 = x5 + x6; in ih264_resi_trans_quant_4x4()
142 x2 = x5 - x6; in ih264_resi_trans_quant_4x4()
163 x6 = pi2_out_tmp[8]; in ih264_resi_trans_quant_4x4()
168 x1 = x5 + x6; in ih264_resi_trans_quant_4x4()
169 x2 = x5 - x6; in ih264_resi_trans_quant_4x4()
273 WORD32 x0, x1, x2, x3, x4, x5, x6, x7; in ih264_resi_trans_quant_chroma_4x4() local
284 x6 = pu1_src[4] - pu1_pred[4]; in ih264_resi_trans_quant_chroma_4x4()
289 x1 = x5 + x6; in ih264_resi_trans_quant_chroma_4x4()
[all …]

12345678910>>...21