Lines Matching refs:__a

7   %__a = alloca <8 x i8>, align 8
10 store <8 x i8> %tmp, <8 x i8>* %__a, align 8
13 %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
24 %__a = alloca <8 x i8>, align 8
27 store <8 x i8> %tmp, <8 x i8>* %__a, align 8
30 %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
41 %__a = alloca <8 x i8>, align 8
44 store <8 x i8> %tmp, <8 x i8>* %__a, align 8
47 %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
58 %__a = alloca <4 x i16>, align 8
61 store <4 x i16> %tmp, <4 x i16>* %__a, align 8
64 %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
79 %__a = alloca <4 x i16>, align 8
82 store <4 x i16> %tmp, <4 x i16>* %__a, align 8
85 %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
100 %__a = alloca <4 x i16>, align 8
103 store <4 x i16> %tmp, <4 x i16>* %__a, align 8
106 %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
121 %__a = alloca <2 x i32>, align 8
124 store <2 x i32> %tmp, <2 x i32>* %__a, align 8
127 %tmp2 = load <2 x i32>, <2 x i32>* %__a, align 8
142 %__a = alloca <2 x i32>, align 8
145 store <2 x i32> %tmp, <2 x i32>* %__a, align 8
148 %tmp2 = load <2 x i32>, <2 x i32>* %__a, align 8
163 %__a = alloca <2 x float>, align 8
166 store <2 x float> %tmp, <2 x float>* %__a, align 8
169 %tmp2 = load <2 x float>, <2 x float>* %__a, align 8
185 %__a = alloca <1 x i64>, align 8
188 store <1 x i64> %tmp, <1 x i64>* %__a, align 8
191 %tmp2 = load <1 x i64>, <1 x i64>* %__a, align 8
207 %__a = alloca <1 x i64>, align 8
210 store <1 x i64> %tmp, <1 x i64>* %__a, align 8
213 %tmp2 = load <1 x i64>, <1 x i64>* %__a, align 8
228 %__a = alloca <16 x i8>, align 16
231 store <16 x i8> %tmp, <16 x i8>* %__a, align 16
234 %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
245 %__a = alloca <16 x i8>, align 16
248 store <16 x i8> %tmp, <16 x i8>* %__a, align 16
251 %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
262 %__a = alloca <16 x i8>, align 16
265 store <16 x i8> %tmp, <16 x i8>* %__a, align 16
268 %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
279 %__a = alloca <8 x i16>, align 16
282 store <8 x i16> %tmp, <8 x i16>* %__a, align 16
285 %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
300 %__a = alloca <8 x i16>, align 16
303 store <8 x i16> %tmp, <8 x i16>* %__a, align 16
306 %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
321 %__a = alloca <8 x i16>, align 16
324 store <8 x i16> %tmp, <8 x i16>* %__a, align 16
327 %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
342 %__a = alloca <4 x i32>, align 16
345 store <4 x i32> %tmp, <4 x i32>* %__a, align 16
348 %tmp2 = load <4 x i32>, <4 x i32>* %__a, align 16
363 %__a = alloca <4 x i32>, align 16
366 store <4 x i32> %tmp, <4 x i32>* %__a, align 16
369 %tmp2 = load <4 x i32>, <4 x i32>* %__a, align 16
384 %__a = alloca <4 x float>, align 16
387 store <4 x float> %tmp, <4 x float>* %__a, align 16
390 %tmp2 = load <4 x float>, <4 x float>* %__a, align 16
405 %__a = alloca <2 x i64>, align 16
408 store <2 x i64> %tmp, <2 x i64>* %__a, align 16
411 %tmp2 = load <2 x i64>, <2 x i64>* %__a, align 16
426 %__a = alloca <2 x i64>, align 16
429 store <2 x i64> %tmp, <2 x i64>* %__a, align 16
432 %tmp2 = load <2 x i64>, <2 x i64>* %__a, align 16