Home
last modified time | relevance | path

Searched refs:vsubl_s8 (Results 1 – 19 of 19) sorted by relevance

/external/XNNPACK/src/qs8-vadd/gen/
Dminmax-neon-ld64-x32.c44 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
45 const int16x8_t vey01234567 = vsubl_s8(vy01234567, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
46 const int16x8_t vex89ABCDEF = vsubl_s8(vx89ABCDEF, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
47 const int16x8_t vey89ABCDEF = vsubl_s8(vy89ABCDEF, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
48 const int16x8_t vexGHIJKLMN = vsubl_s8(vxGHIJKLMN, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
49 const int16x8_t veyGHIJKLMN = vsubl_s8(vyGHIJKLMN, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
50 const int16x8_t vexOPQRSTUV = vsubl_s8(vxOPQRSTUV, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
51 const int16x8_t veyOPQRSTUV = vsubl_s8(vyOPQRSTUV, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
111 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
112 const int16x8_t vey01234567 = vsubl_s8(vy01234567, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
Dminmax-neon-ld64-x24.c42 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
43 const int16x8_t vey01234567 = vsubl_s8(vy01234567, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
44 const int16x8_t vex89ABCDEF = vsubl_s8(vx89ABCDEF, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
45 const int16x8_t vey89ABCDEF = vsubl_s8(vy89ABCDEF, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
46 const int16x8_t vexGHIJKLMN = vsubl_s8(vxGHIJKLMN, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
47 const int16x8_t veyGHIJKLMN = vsubl_s8(vyGHIJKLMN, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
98 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
99 const int16x8_t vey01234567 = vsubl_s8(vy01234567, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
Dminmax-neon-ld64-x16.c40 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
41 const int16x8_t vey01234567 = vsubl_s8(vy01234567, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
42 const int16x8_t vex89ABCDEF = vsubl_s8(vx89ABCDEF, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
43 const int16x8_t vey89ABCDEF = vsubl_s8(vy89ABCDEF, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
81 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
82 const int16x8_t vey01234567 = vsubl_s8(vy01234567, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
Dminmax-neon-ld64-x8.c38 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
39 const int16x8_t vey01234567 = vsubl_s8(vy01234567, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
68 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
69 const int16x8_t vey01234567 = vsubl_s8(vy01234567, vy_zero_point); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
/external/XNNPACK/src/qs8-vaddc/gen/
Dminmax-neon-ld64-x32.c45 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
46 const int16x8_t vex89ABCDEF = vsubl_s8(vx89ABCDEF, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
47 const int16x8_t vexGHIJKLMN = vsubl_s8(vxGHIJKLMN, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
48 const int16x8_t vexOPQRSTUV = vsubl_s8(vxOPQRSTUV, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
98 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
Dminmax-neon-ld64-x24.c44 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
45 const int16x8_t vex89ABCDEF = vsubl_s8(vx89ABCDEF, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
46 const int16x8_t vexGHIJKLMN = vsubl_s8(vxGHIJKLMN, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
89 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
Dminmax-neon-ld64-x16.c43 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
44 const int16x8_t vex89ABCDEF = vsubl_s8(vx89ABCDEF, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
76 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
Dminmax-neon-ld64-x8.c42 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
67 const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
/external/XNNPACK/src/qs8-vadd/
Dneon-ld64.c.in39 const int16x8_t vex${ABC[N:N+8]} = vsubl_s8(vx${ABC[N:N+8]}, vx_zero_point);
40 const int16x8_t vey${ABC[N:N+8]} = vsubl_s8(vy${ABC[N:N+8]}, vy_zero_point);
92 const int16x8_t vex${ABC[0:8]} = vsubl_s8(vx${ABC[0:8]}, vx_zero_point);
93 const int16x8_t vey${ABC[0:8]} = vsubl_s8(vy${ABC[0:8]}, vy_zero_point);
/external/libvpx/libvpx/vp8/common/arm/neon/
Dloopfiltersimplehorizontaledge_neon.c52 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7u8)), in vp8_loop_filter_simple_horizontal_edge_neon()
54 q3s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7u8)), in vp8_loop_filter_simple_horizontal_edge_neon()
Dloopfiltersimpleverticaledge_neon.c217 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)), in vp8_loop_filter_simple_vertical_edge_neon()
219 q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)), in vp8_loop_filter_simple_vertical_edge_neon()
Dvp8_loopfilter_neon.c75 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), in vp8_loop_filter_neon()
77 q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), in vp8_loop_filter_neon()
Dmbloopfilter_neon.c80 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), in vp8_mbloop_filter_neon()
82 q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), in vp8_mbloop_filter_neon()
/external/XNNPACK/src/qs8-vaddc/
Dneon-ld64.c.in43 const int16x8_t vex${ABC[N:N+8]} = vsubl_s8(vx${ABC[N:N+8]}, vx_zero_point);
89 const int16x8_t vex${ABC[0:8]} = vsubl_s8(vx${ABC[0:8]}, vx_zero_point);
/external/llvm-project/clang/test/CodeGen/
Daarch64-neon-intrinsics.c6825 return vsubl_s8(a, b); in test_vsubl_s8()
Darm_neon_intrinsics.c19590 return vsubl_s8(a, b); in test_vsubl_s8()
/external/clang/test/CodeGen/
Daarch64-neon-intrinsics.c7512 return vsubl_s8(a, b); in test_vsubl_s8()
Darm_neon_intrinsics.c22091 return vsubl_s8(a, b); in test_vsubl_s8()
/external/neon_2_sse/
DNEON_2_SSE.h655 _NEON2SSESTORAGE int16x8_t vsubl_s8(int8x8_t a, int8x8_t b); // VSUBL.S8 q0,d0,d0
4454 _NEON2SSESTORAGE int16x8_t vsubl_s8(int8x8_t a, int8x8_t b); // VSUBL.S8 q0,d0,d0
4455 _NEON2SSE_INLINE int16x8_t vsubl_s8(int8x8_t a, int8x8_t b) // VSUBL.S8 q0,d0,d0 in vsubl_s8() function