/external/libhevc/common/arm/ |
D | ihevc_inter_pred_chroma_copy_w16out.s | 139 vld1.8 {d0},[r0] @vld1_u8(pu1_src_tmp) 141 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp) 145 vld1.8 {d22},[r5],r2 @vld1_u8(pu1_src_tmp) 149 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp) 150 vld1.8 {d24},[r5],r2 @vld1_u8(pu1_src_tmp) 152 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp) 155 vld1.8 {d26},[r5],r2 @vld1_u8(pu1_src_tmp) 157 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp) 181 vld1.8 {d0},[r0] @vld1_u8(pu1_src_tmp) 183 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp) [all …]
|
D | ihevc_inter_pred_luma_copy_w16out.s | 107 vld1.8 {d0},[r0] @vld1_u8(pu1_src_tmp) 109 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp) 113 vld1.8 {d22},[r5],r2 @vld1_u8(pu1_src_tmp) 117 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp) 118 vld1.8 {d24},[r5],r2 @vld1_u8(pu1_src_tmp) 120 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp) 123 vld1.8 {d26},[r5],r2 @vld1_u8(pu1_src_tmp) 125 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp) 152 add r6,r0,r2 @pu1_src_tmp += src_strd 154 vld1.8 {d8},[r0]! @vld1_u8(pu1_src_tmp) [all …]
|
D | ihevc_inter_pred_chroma_copy.s | 131 vld1.32 {d0[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 132 add r5,r0,r2 @pu1_src_tmp += src_strd 135 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 138 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 141 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 148 sub r0,r5,r11 @pu1_src = pu1_src_tmp 163 vld1.32 {d0[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 164 add r5,r0,r2 @pu1_src_tmp += src_strd 167 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 185 add r5,r0,r2 @pu1_src_tmp += src_strd [all …]
|
D | ihevc_inter_pred_luma_copy.s | 106 vld1.32 {d0[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 107 add r5,r0,r2 @pu1_src_tmp += src_strd 110 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 113 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 116 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 124 sub r0,r5,r11 @pu1_src = pu1_src_tmp 141 add r5,r0,r2 @pu1_src_tmp += src_strd 142 vld1.8 {d0},[r0]! @vld1_u8(pu1_src_tmp) 145 vld1.8 {d1},[r5],r2 @vld1_u8(pu1_src_tmp) 148 vld1.8 {d2},[r5],r2 @vld1_u8(pu1_src_tmp) [all …]
|
D | ihevc_inter_pred_filters_luma_vert.s | 159 add r3,r0,r2 @pu1_src_tmp += src_strd@ 160 vld1.u8 {d1},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 161 vld1.u8 {d0},[r0]! @src_tmp1 = vld1_u8(pu1_src_tmp)@ 163 vld1.u8 {d2},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 165 vld1.u8 {d3},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 167 vld1.u8 {d4},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ 169 vld1.u8 {d5},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 171 vld1.u8 {d6},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 173 vld1.u8 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 175 vld1.u8 {d16},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ [all …]
|
D | ihevc_inter_pred_filters_luma_vert_w16inp.s | 148 add r3,r0,r2 @pu1_src_tmp += src_strd@ 149 vld1.16 {d1},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 150 vld1.16 {d0},[r0]! @src_tmp1 = vld1_u8(pu1_src_tmp)@ 152 vld1.16 {d2},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 154 vld1.16 {d3},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 156 vld1.16 {d4},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ 158 vld1.16 {d5},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 160 vld1.16 {d6},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 162 vld1.16 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 167 vld1.16 {d16},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ [all …]
|
D | ihevc_inter_pred_luma_vert_w16inp_w16out.s | 158 add r3,r0,r2 @pu1_src_tmp += src_strd@ 159 vld1.16 {d1},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 160 vld1.16 {d0},[r0]! @src_tmp1 = vld1_u8(pu1_src_tmp)@ 162 vld1.16 {d2},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 164 vld1.16 {d3},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 166 vld1.16 {d4},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ 168 vld1.16 {d5},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 170 vld1.16 {d6},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 172 vld1.16 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 177 vld1.16 {d16},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ [all …]
|
D | ihevc_inter_pred_chroma_vert_w16out.s | 187 vld1.32 {d6[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp1, 0 190 vld1.32 {d6[1]},[r6],r2 @loads pu1_src_tmp 192 vld1.32 {d7[1]},[r6],r2 @loads pu1_src_tmp
|
D | ihevc_inter_pred_chroma_vert.s | 188 vld1.32 {d6[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp1, 0 191 vld1.32 {d6[1]},[r6],r2 @loads pu1_src_tmp 193 vld1.32 {d7[1]},[r6],r2 @loads pu1_src_tmp
|
/external/libvpx/config/arm-neon/vpx_dsp/arm/ |
D | vpx_convolve8_vert_filter_type2_neon.asm.S | 84 add r3, r0, r2 @pu1_src_tmp += src_strd; 86 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 87 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp); 89 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 92 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 95 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); 98 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 101 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 104 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 107 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_avg_vert_filter_type2_neon.asm.S | 84 add r3, r0, r2 @pu1_src_tmp += src_strd; 86 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 87 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp); 89 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 92 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 95 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); 98 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 101 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 104 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 107 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_vert_filter_type1_neon.asm.S | 84 add r3, r0, r2 @pu1_src_tmp += src_strd; 86 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 87 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp); 89 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 92 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 95 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); 98 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 101 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 104 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 107 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_avg_vert_filter_type1_neon.asm.S | 83 add r3, r0, r2 @pu1_src_tmp += src_strd; 85 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 86 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp); 88 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 91 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 94 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); 97 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 100 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 103 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 106 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | vpx_convolve8_vert_filter_type2_neon.asm | 77 add r3, r0, r2 ;pu1_src_tmp += src_strd; 79 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 80 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp); 82 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 85 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 88 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 91 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 94 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 97 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 100 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_avg_vert_filter_type2_neon.asm | 77 add r3, r0, r2 ;pu1_src_tmp += src_strd; 79 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 80 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp); 82 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 85 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 88 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 91 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 94 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 97 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 100 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_vert_filter_type1_neon.asm | 77 add r3, r0, r2 ;pu1_src_tmp += src_strd; 79 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 80 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp); 82 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 85 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 88 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 91 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 94 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 97 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 100 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_avg_vert_filter_type1_neon.asm | 76 add r3, r0, r2 ;pu1_src_tmp += src_strd; 78 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 79 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp); 81 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 84 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 87 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 90 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 93 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 96 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 99 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); [all …]
|
/external/libavc/common/arm/ |
D | ih264_inter_pred_luma_copy_a9q.s | 96 vld1.32 {d0[0]}, [r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 97 add r5, r0, r2 @pu1_src_tmp += src_strd 100 vld1.32 {d0[0]}, [r5], r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 103 vld1.32 {d0[0]}, [r5], r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 106 vld1.32 {d0[0]}, [r5], r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0) 114 sub r0, r5, r11 @pu1_src = pu1_src_tmp 132 add r5, r0, r2 @pu1_src_tmp += src_strd 133 vld1.8 {d0}, [r0]! @vld1_u8(pu1_src_tmp) 136 vld1.8 {d1}, [r5], r2 @vld1_u8(pu1_src_tmp) 139 vld1.8 {d2}, [r5], r2 @vld1_u8(pu1_src_tmp) [all …]
|
/external/libhevc/encoder/arm/ |
D | ihevce_decomp_pre_intra_pass_neon.c | 74 UWORD8 *pu1_src_tmp = pu1_src - 3 * src_strd; in ihevce_scaling_filter_mxn() local 82 tmp = (i4_ftaps[3] * pu1_src_tmp[j] + in ihevce_scaling_filter_mxn() 83 i4_ftaps[2] * (pu1_src_tmp[j - 1] + pu1_src_tmp[j + 1]) + in ihevce_scaling_filter_mxn() 84 i4_ftaps[1] * (pu1_src_tmp[j + 2] + pu1_src_tmp[j - 2]) + in ihevce_scaling_filter_mxn() 85 i4_ftaps[0] * (pu1_src_tmp[j + 3] + pu1_src_tmp[j - 3]) + in ihevce_scaling_filter_mxn() 91 pu1_src_tmp += src_strd; in ihevce_scaling_filter_mxn() 169 UWORD8 *pu1_src_tmp = pu1_src + wd_offset + ht_offset * src_strd; in ihevce_scale_by_2_neon() local 172 pu1_src_tmp -= (3 * (1 - col_start) + src_strd * 3 * (1 - row_start)); in ihevce_scale_by_2_neon() 175 pf_copy_2d(pu1_cpy, cpy_strd, pu1_src_tmp, src_strd, wd_tmp, ht_tmp); in ihevce_scale_by_2_neon()
|
D | ihevce_scale_by_2_neon.c | 74 UWORD8 *pu1_src_tmp = pu1_src + j * src_strd - 3; in ihevce_horz_scale_neon_w16() local 79 uint8x16x2_t src = vld2q_u8(pu1_src_tmp); in ihevce_horz_scale_neon_w16() 107 pu1_src_tmp += 16; in ihevce_horz_scale_neon_w16() 125 src[mod8] = vld1q_u8(pu1_src_tmp); \ in ihevce_vert_scale_neon_w16() 126 pu1_src_tmp += src_strd; \ in ihevce_vert_scale_neon_w16() 133 UWORD8 *pu1_src_tmp = pu1_src - 3 * src_strd + i; in ihevce_vert_scale_neon_w16() local 218 UWORD8 *pu1_src_tmp = pu1_src + j * src_strd; in ihevce_scaling_filter_mxn_neon() local 234 pu1_src_tmp - 3 * src_strd + i, in ihevce_scaling_filter_mxn_neon()
|
/external/libhevc/decoder/ |
D | ihevcd_sao.c | 243 UWORD8 *pu1_src_tmp = pu1_src_luma; in ihevcd_sao_ctb() local 260 pu1_src_tmp += MIN((WORD32)CTZ(u4_no_loop_filter_flag), tmp_wd); in ihevcd_sao_ctb() 270 … pu1_src_copy[row * src_strd + col] = pu1_src_tmp[row * tmp_strd + col]; in ihevcd_sao_ctb() 275 pu1_src_tmp += MIN((WORD32)CTZ(~u4_no_loop_filter_flag), tmp_wd); in ihevcd_sao_ctb() 281 pu1_src_tmp -= sao_wd_luma; in ihevcd_sao_ctb() 284 pu1_src_tmp += min_cu * src_strd; in ihevcd_sao_ctb() 323 UWORD8 *pu1_src_tmp = pu1_src_luma; in ihevcd_sao_ctb() local 338 pu1_src_tmp += MIN((WORD32)CTZ(u4_no_loop_filter_flag), tmp_wd); in ihevcd_sao_ctb() 348 … pu1_src_tmp[row * src_strd + col] = pu1_src_copy[row * tmp_strd + col]; in ihevcd_sao_ctb() 353 pu1_src_tmp += MIN((WORD32)CTZ(~u4_no_loop_filter_flag), tmp_wd); in ihevcd_sao_ctb() [all …]
|
/external/libhevc/encoder/ |
D | ihevce_decomp_pre_intra_pass.c | 1603 UWORD8 *pu1_src_tmp = pu1_src - 3 * src_strd; in ihevce_scaling_filter_mxn() local 1611 tmp = (i4_ftaps[3] * pu1_src_tmp[j] + in ihevce_scaling_filter_mxn() 1612 i4_ftaps[2] * (pu1_src_tmp[j - 1] + pu1_src_tmp[j + 1]) + in ihevce_scaling_filter_mxn() 1613 i4_ftaps[1] * (pu1_src_tmp[j + 2] + pu1_src_tmp[j - 2]) + in ihevce_scaling_filter_mxn() 1614 i4_ftaps[0] * (pu1_src_tmp[j + 3] + pu1_src_tmp[j - 3]) + in ihevce_scaling_filter_mxn() 1620 pu1_src_tmp += src_strd; in ihevce_scaling_filter_mxn() 1700 UWORD8 *pu1_src_tmp = pu1_src + wd_offset + ht_offset * src_strd; in ihevce_scale_by_2() local 1703 pu1_src_tmp -= (3 * (1 - col_start) + src_strd * 3 * (1 - row_start)); in ihevce_scale_by_2() 1706 pf_copy_2d(pu1_cpy, cpy_strd, pu1_src_tmp, src_strd, wd_tmp, ht_tmp); in ihevce_scale_by_2()
|
D | hme_utils.c | 2118 U08 *pu1_src, *pu1_dst, *pu1_src_tmp; in hme_get_wt_inp() local 2148 pu1_src_tmp = pu1_src; in hme_get_wt_inp() 2161 pu1_dst[j] = pu1_src_tmp[j]; in hme_get_wt_inp() 2163 pu1_src_tmp += ps_curr_layer->i4_inp_stride; in hme_get_wt_inp() 2183 tmp = HME_INV_WT_PRED1(pu1_src_tmp[j], inv_wt, off, log_wdc); in hme_get_wt_inp() 2186 pu1_src_tmp += ps_curr_layer->i4_inp_stride; in hme_get_wt_inp()
|