1; RUN: llc -mtriple=arm-eabi -mattr=+neon -pre-RA-sched=source -disable-post-ra %s -o - \
2; RUN:  | FileCheck %s
3
4define <8 x i8> @sdivi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
5;CHECK: vrecpe.f32
6;CHECK: vmovn.i32
7;CHECK: vrecpe.f32
8;CHECK: vmovn.i32
9;CHECK: vmovn.i16
10	%tmp1 = load <8 x i8>, <8 x i8>* %A
11	%tmp2 = load <8 x i8>, <8 x i8>* %B
12	%tmp3 = sdiv <8 x i8> %tmp1, %tmp2
13	ret <8 x i8> %tmp3
14}
15
16define <8 x i8> @udivi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
17;CHECK: vrecpe.f32
18;CHECK: vrecps.f32
19;CHECK: vmovn.i32
20;CHECK: vrecpe.f32
21;CHECK: vrecps.f32
22;CHECK: vmovn.i32
23;CHECK: vqmovun.s16
24	%tmp1 = load <8 x i8>, <8 x i8>* %A
25	%tmp2 = load <8 x i8>, <8 x i8>* %B
26	%tmp3 = udiv <8 x i8> %tmp1, %tmp2
27	ret <8 x i8> %tmp3
28}
29
30define <4 x i16> @sdivi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
31;CHECK: vrecpe.f32
32;CHECK: vrecps.f32
33;CHECK: vmovn.i32
34	%tmp1 = load <4 x i16>, <4 x i16>* %A
35	%tmp2 = load <4 x i16>, <4 x i16>* %B
36	%tmp3 = sdiv <4 x i16> %tmp1, %tmp2
37	ret <4 x i16> %tmp3
38}
39
40define <4 x i16> @udivi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
41;CHECK: vrecpe.f32
42;CHECK: vrecps.f32
43;CHECK: vrecps.f32
44;CHECK: vmovn.i32
45	%tmp1 = load <4 x i16>, <4 x i16>* %A
46	%tmp2 = load <4 x i16>, <4 x i16>* %B
47	%tmp3 = udiv <4 x i16> %tmp1, %tmp2
48	ret <4 x i16> %tmp3
49}
50