1; RUN: llc < %s -march=arm -mattr=+neon | grep vldr.64 | count 4
2; RUN: llc < %s -march=arm -mattr=+neon | grep vstr.64
3; RUN: llc < %s -march=arm -mattr=+neon | grep vmov
4
5define void @t1(<2 x i32>* %r, <4 x i16>* %a, <4 x i16>* %b) nounwind {
6entry:
7	%0 = load <4 x i16>* %a, align 8		; <<4 x i16>> [#uses=1]
8	%1 = load <4 x i16>* %b, align 8		; <<4 x i16>> [#uses=1]
9	%2 = add <4 x i16> %0, %1		; <<4 x i16>> [#uses=1]
10	%3 = bitcast <4 x i16> %2 to <2 x i32>		; <<2 x i32>> [#uses=1]
11	store <2 x i32> %3, <2 x i32>* %r, align 8
12	ret void
13}
14
15define <2 x i32> @t2(<4 x i16>* %a, <4 x i16>* %b) nounwind readonly {
16entry:
17	%0 = load <4 x i16>* %a, align 8		; <<4 x i16>> [#uses=1]
18	%1 = load <4 x i16>* %b, align 8		; <<4 x i16>> [#uses=1]
19	%2 = sub <4 x i16> %0, %1		; <<4 x i16>> [#uses=1]
20	%3 = bitcast <4 x i16> %2 to <2 x i32>		; <<2 x i32>> [#uses=1]
21	ret <2 x i32> %3
22}
23