1 // RUN: %clang_cc1 < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s
2
3 enum memory_order {
4 memory_order_relaxed, memory_order_consume, memory_order_acquire,
5 memory_order_release, memory_order_acq_rel, memory_order_seq_cst
6 };
7
test_c11_atomic_fetch_add_int_ptr(_Atomic (int *)* p)8 int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {
9 // CHECK: test_c11_atomic_fetch_add_int_ptr
10 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 12, i32 5)
11 return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
12 }
13
test_c11_atomic_fetch_sub_int_ptr(_Atomic (int *)* p)14 int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {
15 // CHECK: test_c11_atomic_fetch_sub_int_ptr
16 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 20, i32 5)
17 return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
18 }
19
test_c11_atomic_fetch_add_int(_Atomic (int)* p)20 int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {
21 // CHECK: test_c11_atomic_fetch_add_int
22 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 3, i32 5)
23 return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
24 }
25
test_c11_atomic_fetch_sub_int(_Atomic (int)* p)26 int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {
27 // CHECK: test_c11_atomic_fetch_sub_int
28 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 5, i32 5)
29 return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
30 }
31
fp2a(int ** p)32 int *fp2a(int **p) {
33 // CHECK: @fp2a
34 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 4, i32 0)
35 // Note, the GNU builtins do not multiply by sizeof(T)!
36 return __atomic_fetch_sub(p, 4, memory_order_relaxed);
37 }
38
test_atomic_fetch_add(int * p)39 int test_atomic_fetch_add(int *p) {
40 // CHECK: test_atomic_fetch_add
41 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
42 return __atomic_fetch_add(p, 55, memory_order_seq_cst);
43 }
44
test_atomic_fetch_sub(int * p)45 int test_atomic_fetch_sub(int *p) {
46 // CHECK: test_atomic_fetch_sub
47 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
48 return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
49 }
50
test_atomic_fetch_and(int * p)51 int test_atomic_fetch_and(int *p) {
52 // CHECK: test_atomic_fetch_and
53 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
54 return __atomic_fetch_and(p, 55, memory_order_seq_cst);
55 }
56
test_atomic_fetch_or(int * p)57 int test_atomic_fetch_or(int *p) {
58 // CHECK: test_atomic_fetch_or
59 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
60 return __atomic_fetch_or(p, 55, memory_order_seq_cst);
61 }
62
test_atomic_fetch_xor(int * p)63 int test_atomic_fetch_xor(int *p) {
64 // CHECK: test_atomic_fetch_xor
65 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
66 return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
67 }
68
test_atomic_fetch_nand(int * p)69 int test_atomic_fetch_nand(int *p) {
70 // CHECK: test_atomic_fetch_nand
71 // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
72 return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
73 }
74
test_atomic_add_fetch(int * p)75 int test_atomic_add_fetch(int *p) {
76 // CHECK: test_atomic_add_fetch
77 // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
78 // CHECK: {{%[^ ]*}} = add i32 [[CALL]], 55
79 return __atomic_add_fetch(p, 55, memory_order_seq_cst);
80 }
81
test_atomic_sub_fetch(int * p)82 int test_atomic_sub_fetch(int *p) {
83 // CHECK: test_atomic_sub_fetch
84 // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
85 // CHECK: {{%[^ ]*}} = add i32 [[CALL]], -55
86 return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
87 }
88
test_atomic_and_fetch(int * p)89 int test_atomic_and_fetch(int *p) {
90 // CHECK: test_atomic_and_fetch
91 // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
92 // CHECK: {{%[^ ]*}} = and i32 [[CALL]], 55
93 return __atomic_and_fetch(p, 55, memory_order_seq_cst);
94 }
95
test_atomic_or_fetch(int * p)96 int test_atomic_or_fetch(int *p) {
97 // CHECK: test_atomic_or_fetch
98 // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
99 // CHECK: {{%[^ ]*}} = or i32 [[CALL]], 55
100 return __atomic_or_fetch(p, 55, memory_order_seq_cst);
101 }
102
test_atomic_xor_fetch(int * p)103 int test_atomic_xor_fetch(int *p) {
104 // CHECK: test_atomic_xor_fetch
105 // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
106 // CHECK: {{%[^ ]*}} = xor i32 [[CALL]], 55
107 return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
108 }
109
test_atomic_nand_fetch(int * p)110 int test_atomic_nand_fetch(int *p) {
111 // CHECK: test_atomic_nand_fetch
112 // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
113 // CHECK: [[OR:%[^ ]*]] = or i32 [[CALL]], -56
114 // CHECK: {{%[^ ]*}} = xor i32 [[OR]], 55
115 return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
116 }
117