Lines Matching refs:i24
6 ; We fully promote these to the i24 load or store size, resulting in just masks
24 %aiptr = bitcast [3 x i8]* %a to i24*
25 %ai = load i24, i24* %aiptr
28 ; CHECK: %[[ext2:.*]] = zext i8 0 to i24
29 ; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, -256
30 ; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[ext2]]
31 ; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24
32 ; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8
33 ; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281
34 ; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]]
35 ; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24
36 ; CHECK-NEXT: %[[shift0:.*]] = shl i24 %[[ext0]], 16
37 ; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], 65535
38 ; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[shift0]]
40 %biptr = bitcast [3 x i8]* %b to i24*
41 store i24 %ai, i24* %biptr
50 ; CHECK: %[[shift0:.*]] = lshr i24 %[[insert0]], 16
51 ; CHECK-NEXT: %[[trunc0:.*]] = trunc i24 %[[shift0]] to i8
52 ; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8
53 ; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8
54 ; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[insert0]] to i8
89 %a3i24ptr = bitcast i8* %a3ptr to i24*
90 store i24 1, i24* %a3i24ptr
161 ; of tricky aspects (the i24 type) that make that hard. Historically, SROA
163 ; significant byte due to shrinking the [4,8) slice to an i24, or by failing to
175 %a = alloca { i32, i24 }, align 4
178 %tmp0 = bitcast { i32, i24 }* %a to i64*
181 %tmp2 = bitcast { i32, i24 }* %a to i32*
206 %a = alloca { i32, i24 }, align 4
211 %tmp0 = bitcast { i32, i24 }* %a to i8*
218 %tmp2 = bitcast { i32, i24 }* %a to i64*
220 %tmp4 = bitcast { i32, i24 }* %a to i32*