1; RUN: opt < %s -tsan -S | FileCheck %s 2; Check that atomic memory operations are converted to calls into ThreadSanitizer runtime. 3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 4 5define i8 @atomic8_load_unordered(i8* %a) nounwind uwtable { 6entry: 7 %0 = load atomic i8, i8* %a unordered, align 1 8 ret i8 %0 9} 10; CHECK: atomic8_load_unordered 11; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 0) 12 13define i8 @atomic8_load_monotonic(i8* %a) nounwind uwtable { 14entry: 15 %0 = load atomic i8, i8* %a monotonic, align 1 16 ret i8 %0 17} 18; CHECK: atomic8_load_monotonic 19; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 0) 20 21define i8 @atomic8_load_acquire(i8* %a) nounwind uwtable { 22entry: 23 %0 = load atomic i8, i8* %a acquire, align 1 24 ret i8 %0 25} 26; CHECK: atomic8_load_acquire 27; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 2) 28 29define i8 @atomic8_load_seq_cst(i8* %a) nounwind uwtable { 30entry: 31 %0 = load atomic i8, i8* %a seq_cst, align 1 32 ret i8 %0 33} 34; CHECK: atomic8_load_seq_cst 35; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 5) 36 37define void @atomic8_store_unordered(i8* %a) nounwind uwtable { 38entry: 39 store atomic i8 0, i8* %a unordered, align 1 40 ret void 41} 42; CHECK: atomic8_store_unordered 43; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 0) 44 45define void @atomic8_store_monotonic(i8* %a) nounwind uwtable { 46entry: 47 store atomic i8 0, i8* %a monotonic, align 1 48 ret void 49} 50; CHECK: atomic8_store_monotonic 51; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 0) 52 53define void @atomic8_store_release(i8* %a) nounwind uwtable { 54entry: 55 store atomic i8 0, i8* %a release, align 1 56 ret void 57} 58; CHECK: atomic8_store_release 59; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 3) 60 61define void @atomic8_store_seq_cst(i8* %a) nounwind uwtable { 62entry: 63 store atomic i8 0, i8* %a seq_cst, align 1 64 ret void 65} 66; CHECK: atomic8_store_seq_cst 67; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 5) 68 69define void @atomic8_xchg_monotonic(i8* %a) nounwind uwtable { 70entry: 71 atomicrmw xchg i8* %a, i8 0 monotonic 72 ret void 73} 74; CHECK: atomic8_xchg_monotonic 75; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 0) 76 77define void @atomic8_add_monotonic(i8* %a) nounwind uwtable { 78entry: 79 atomicrmw add i8* %a, i8 0 monotonic 80 ret void 81} 82; CHECK: atomic8_add_monotonic 83; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 0) 84 85define void @atomic8_sub_monotonic(i8* %a) nounwind uwtable { 86entry: 87 atomicrmw sub i8* %a, i8 0 monotonic 88 ret void 89} 90; CHECK: atomic8_sub_monotonic 91; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 0) 92 93define void @atomic8_and_monotonic(i8* %a) nounwind uwtable { 94entry: 95 atomicrmw and i8* %a, i8 0 monotonic 96 ret void 97} 98; CHECK: atomic8_and_monotonic 99; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 0) 100 101define void @atomic8_or_monotonic(i8* %a) nounwind uwtable { 102entry: 103 atomicrmw or i8* %a, i8 0 monotonic 104 ret void 105} 106; CHECK: atomic8_or_monotonic 107; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 0) 108 109define void @atomic8_xor_monotonic(i8* %a) nounwind uwtable { 110entry: 111 atomicrmw xor i8* %a, i8 0 monotonic 112 ret void 113} 114; CHECK: atomic8_xor_monotonic 115; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 0) 116 117define void @atomic8_nand_monotonic(i8* %a) nounwind uwtable { 118entry: 119 atomicrmw nand i8* %a, i8 0 monotonic 120 ret void 121} 122; CHECK: atomic8_nand_monotonic 123; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 0) 124 125define void @atomic8_xchg_acquire(i8* %a) nounwind uwtable { 126entry: 127 atomicrmw xchg i8* %a, i8 0 acquire 128 ret void 129} 130; CHECK: atomic8_xchg_acquire 131; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 2) 132 133define void @atomic8_add_acquire(i8* %a) nounwind uwtable { 134entry: 135 atomicrmw add i8* %a, i8 0 acquire 136 ret void 137} 138; CHECK: atomic8_add_acquire 139; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 2) 140 141define void @atomic8_sub_acquire(i8* %a) nounwind uwtable { 142entry: 143 atomicrmw sub i8* %a, i8 0 acquire 144 ret void 145} 146; CHECK: atomic8_sub_acquire 147; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 2) 148 149define void @atomic8_and_acquire(i8* %a) nounwind uwtable { 150entry: 151 atomicrmw and i8* %a, i8 0 acquire 152 ret void 153} 154; CHECK: atomic8_and_acquire 155; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 2) 156 157define void @atomic8_or_acquire(i8* %a) nounwind uwtable { 158entry: 159 atomicrmw or i8* %a, i8 0 acquire 160 ret void 161} 162; CHECK: atomic8_or_acquire 163; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 2) 164 165define void @atomic8_xor_acquire(i8* %a) nounwind uwtable { 166entry: 167 atomicrmw xor i8* %a, i8 0 acquire 168 ret void 169} 170; CHECK: atomic8_xor_acquire 171; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 2) 172 173define void @atomic8_nand_acquire(i8* %a) nounwind uwtable { 174entry: 175 atomicrmw nand i8* %a, i8 0 acquire 176 ret void 177} 178; CHECK: atomic8_nand_acquire 179; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 2) 180 181define void @atomic8_xchg_release(i8* %a) nounwind uwtable { 182entry: 183 atomicrmw xchg i8* %a, i8 0 release 184 ret void 185} 186; CHECK: atomic8_xchg_release 187; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 3) 188 189define void @atomic8_add_release(i8* %a) nounwind uwtable { 190entry: 191 atomicrmw add i8* %a, i8 0 release 192 ret void 193} 194; CHECK: atomic8_add_release 195; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 3) 196 197define void @atomic8_sub_release(i8* %a) nounwind uwtable { 198entry: 199 atomicrmw sub i8* %a, i8 0 release 200 ret void 201} 202; CHECK: atomic8_sub_release 203; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 3) 204 205define void @atomic8_and_release(i8* %a) nounwind uwtable { 206entry: 207 atomicrmw and i8* %a, i8 0 release 208 ret void 209} 210; CHECK: atomic8_and_release 211; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 3) 212 213define void @atomic8_or_release(i8* %a) nounwind uwtable { 214entry: 215 atomicrmw or i8* %a, i8 0 release 216 ret void 217} 218; CHECK: atomic8_or_release 219; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 3) 220 221define void @atomic8_xor_release(i8* %a) nounwind uwtable { 222entry: 223 atomicrmw xor i8* %a, i8 0 release 224 ret void 225} 226; CHECK: atomic8_xor_release 227; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 3) 228 229define void @atomic8_nand_release(i8* %a) nounwind uwtable { 230entry: 231 atomicrmw nand i8* %a, i8 0 release 232 ret void 233} 234; CHECK: atomic8_nand_release 235; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 3) 236 237define void @atomic8_xchg_acq_rel(i8* %a) nounwind uwtable { 238entry: 239 atomicrmw xchg i8* %a, i8 0 acq_rel 240 ret void 241} 242; CHECK: atomic8_xchg_acq_rel 243; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 4) 244 245define void @atomic8_add_acq_rel(i8* %a) nounwind uwtable { 246entry: 247 atomicrmw add i8* %a, i8 0 acq_rel 248 ret void 249} 250; CHECK: atomic8_add_acq_rel 251; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 4) 252 253define void @atomic8_sub_acq_rel(i8* %a) nounwind uwtable { 254entry: 255 atomicrmw sub i8* %a, i8 0 acq_rel 256 ret void 257} 258; CHECK: atomic8_sub_acq_rel 259; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 4) 260 261define void @atomic8_and_acq_rel(i8* %a) nounwind uwtable { 262entry: 263 atomicrmw and i8* %a, i8 0 acq_rel 264 ret void 265} 266; CHECK: atomic8_and_acq_rel 267; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 4) 268 269define void @atomic8_or_acq_rel(i8* %a) nounwind uwtable { 270entry: 271 atomicrmw or i8* %a, i8 0 acq_rel 272 ret void 273} 274; CHECK: atomic8_or_acq_rel 275; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 4) 276 277define void @atomic8_xor_acq_rel(i8* %a) nounwind uwtable { 278entry: 279 atomicrmw xor i8* %a, i8 0 acq_rel 280 ret void 281} 282; CHECK: atomic8_xor_acq_rel 283; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 4) 284 285define void @atomic8_nand_acq_rel(i8* %a) nounwind uwtable { 286entry: 287 atomicrmw nand i8* %a, i8 0 acq_rel 288 ret void 289} 290; CHECK: atomic8_nand_acq_rel 291; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 4) 292 293define void @atomic8_xchg_seq_cst(i8* %a) nounwind uwtable { 294entry: 295 atomicrmw xchg i8* %a, i8 0 seq_cst 296 ret void 297} 298; CHECK: atomic8_xchg_seq_cst 299; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 5) 300 301define void @atomic8_add_seq_cst(i8* %a) nounwind uwtable { 302entry: 303 atomicrmw add i8* %a, i8 0 seq_cst 304 ret void 305} 306; CHECK: atomic8_add_seq_cst 307; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 5) 308 309define void @atomic8_sub_seq_cst(i8* %a) nounwind uwtable { 310entry: 311 atomicrmw sub i8* %a, i8 0 seq_cst 312 ret void 313} 314; CHECK: atomic8_sub_seq_cst 315; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 5) 316 317define void @atomic8_and_seq_cst(i8* %a) nounwind uwtable { 318entry: 319 atomicrmw and i8* %a, i8 0 seq_cst 320 ret void 321} 322; CHECK: atomic8_and_seq_cst 323; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 5) 324 325define void @atomic8_or_seq_cst(i8* %a) nounwind uwtable { 326entry: 327 atomicrmw or i8* %a, i8 0 seq_cst 328 ret void 329} 330; CHECK: atomic8_or_seq_cst 331; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 5) 332 333define void @atomic8_xor_seq_cst(i8* %a) nounwind uwtable { 334entry: 335 atomicrmw xor i8* %a, i8 0 seq_cst 336 ret void 337} 338; CHECK: atomic8_xor_seq_cst 339; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 5) 340 341define void @atomic8_nand_seq_cst(i8* %a) nounwind uwtable { 342entry: 343 atomicrmw nand i8* %a, i8 0 seq_cst 344 ret void 345} 346; CHECK: atomic8_nand_seq_cst 347; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 5) 348 349define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable { 350entry: 351 cmpxchg i8* %a, i8 0, i8 1 monotonic monotonic 352 ret void 353} 354; CHECK: atomic8_cas_monotonic 355; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0, i32 0) 356 357define void @atomic8_cas_acquire(i8* %a) nounwind uwtable { 358entry: 359 cmpxchg i8* %a, i8 0, i8 1 acquire acquire 360 ret void 361} 362; CHECK: atomic8_cas_acquire 363; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2, i32 2) 364 365define void @atomic8_cas_release(i8* %a) nounwind uwtable { 366entry: 367 cmpxchg i8* %a, i8 0, i8 1 release monotonic 368 ret void 369} 370; CHECK: atomic8_cas_release 371; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3, i32 0) 372 373define void @atomic8_cas_acq_rel(i8* %a) nounwind uwtable { 374entry: 375 cmpxchg i8* %a, i8 0, i8 1 acq_rel acquire 376 ret void 377} 378; CHECK: atomic8_cas_acq_rel 379; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4, i32 2) 380 381define void @atomic8_cas_seq_cst(i8* %a) nounwind uwtable { 382entry: 383 cmpxchg i8* %a, i8 0, i8 1 seq_cst seq_cst 384 ret void 385} 386; CHECK: atomic8_cas_seq_cst 387; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5, i32 5) 388 389define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable { 390entry: 391 %0 = load atomic i16, i16* %a unordered, align 2 392 ret i16 %0 393} 394; CHECK: atomic16_load_unordered 395; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 0) 396 397define i16 @atomic16_load_monotonic(i16* %a) nounwind uwtable { 398entry: 399 %0 = load atomic i16, i16* %a monotonic, align 2 400 ret i16 %0 401} 402; CHECK: atomic16_load_monotonic 403; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 0) 404 405define i16 @atomic16_load_acquire(i16* %a) nounwind uwtable { 406entry: 407 %0 = load atomic i16, i16* %a acquire, align 2 408 ret i16 %0 409} 410; CHECK: atomic16_load_acquire 411; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 2) 412 413define i16 @atomic16_load_seq_cst(i16* %a) nounwind uwtable { 414entry: 415 %0 = load atomic i16, i16* %a seq_cst, align 2 416 ret i16 %0 417} 418; CHECK: atomic16_load_seq_cst 419; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 5) 420 421define void @atomic16_store_unordered(i16* %a) nounwind uwtable { 422entry: 423 store atomic i16 0, i16* %a unordered, align 2 424 ret void 425} 426; CHECK: atomic16_store_unordered 427; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 0) 428 429define void @atomic16_store_monotonic(i16* %a) nounwind uwtable { 430entry: 431 store atomic i16 0, i16* %a monotonic, align 2 432 ret void 433} 434; CHECK: atomic16_store_monotonic 435; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 0) 436 437define void @atomic16_store_release(i16* %a) nounwind uwtable { 438entry: 439 store atomic i16 0, i16* %a release, align 2 440 ret void 441} 442; CHECK: atomic16_store_release 443; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 3) 444 445define void @atomic16_store_seq_cst(i16* %a) nounwind uwtable { 446entry: 447 store atomic i16 0, i16* %a seq_cst, align 2 448 ret void 449} 450; CHECK: atomic16_store_seq_cst 451; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 5) 452 453define void @atomic16_xchg_monotonic(i16* %a) nounwind uwtable { 454entry: 455 atomicrmw xchg i16* %a, i16 0 monotonic 456 ret void 457} 458; CHECK: atomic16_xchg_monotonic 459; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 0) 460 461define void @atomic16_add_monotonic(i16* %a) nounwind uwtable { 462entry: 463 atomicrmw add i16* %a, i16 0 monotonic 464 ret void 465} 466; CHECK: atomic16_add_monotonic 467; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 0) 468 469define void @atomic16_sub_monotonic(i16* %a) nounwind uwtable { 470entry: 471 atomicrmw sub i16* %a, i16 0 monotonic 472 ret void 473} 474; CHECK: atomic16_sub_monotonic 475; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 0) 476 477define void @atomic16_and_monotonic(i16* %a) nounwind uwtable { 478entry: 479 atomicrmw and i16* %a, i16 0 monotonic 480 ret void 481} 482; CHECK: atomic16_and_monotonic 483; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 0) 484 485define void @atomic16_or_monotonic(i16* %a) nounwind uwtable { 486entry: 487 atomicrmw or i16* %a, i16 0 monotonic 488 ret void 489} 490; CHECK: atomic16_or_monotonic 491; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 0) 492 493define void @atomic16_xor_monotonic(i16* %a) nounwind uwtable { 494entry: 495 atomicrmw xor i16* %a, i16 0 monotonic 496 ret void 497} 498; CHECK: atomic16_xor_monotonic 499; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 0) 500 501define void @atomic16_nand_monotonic(i16* %a) nounwind uwtable { 502entry: 503 atomicrmw nand i16* %a, i16 0 monotonic 504 ret void 505} 506; CHECK: atomic16_nand_monotonic 507; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 0) 508 509define void @atomic16_xchg_acquire(i16* %a) nounwind uwtable { 510entry: 511 atomicrmw xchg i16* %a, i16 0 acquire 512 ret void 513} 514; CHECK: atomic16_xchg_acquire 515; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 2) 516 517define void @atomic16_add_acquire(i16* %a) nounwind uwtable { 518entry: 519 atomicrmw add i16* %a, i16 0 acquire 520 ret void 521} 522; CHECK: atomic16_add_acquire 523; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 2) 524 525define void @atomic16_sub_acquire(i16* %a) nounwind uwtable { 526entry: 527 atomicrmw sub i16* %a, i16 0 acquire 528 ret void 529} 530; CHECK: atomic16_sub_acquire 531; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 2) 532 533define void @atomic16_and_acquire(i16* %a) nounwind uwtable { 534entry: 535 atomicrmw and i16* %a, i16 0 acquire 536 ret void 537} 538; CHECK: atomic16_and_acquire 539; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 2) 540 541define void @atomic16_or_acquire(i16* %a) nounwind uwtable { 542entry: 543 atomicrmw or i16* %a, i16 0 acquire 544 ret void 545} 546; CHECK: atomic16_or_acquire 547; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 2) 548 549define void @atomic16_xor_acquire(i16* %a) nounwind uwtable { 550entry: 551 atomicrmw xor i16* %a, i16 0 acquire 552 ret void 553} 554; CHECK: atomic16_xor_acquire 555; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 2) 556 557define void @atomic16_nand_acquire(i16* %a) nounwind uwtable { 558entry: 559 atomicrmw nand i16* %a, i16 0 acquire 560 ret void 561} 562; CHECK: atomic16_nand_acquire 563; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 2) 564 565define void @atomic16_xchg_release(i16* %a) nounwind uwtable { 566entry: 567 atomicrmw xchg i16* %a, i16 0 release 568 ret void 569} 570; CHECK: atomic16_xchg_release 571; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 3) 572 573define void @atomic16_add_release(i16* %a) nounwind uwtable { 574entry: 575 atomicrmw add i16* %a, i16 0 release 576 ret void 577} 578; CHECK: atomic16_add_release 579; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 3) 580 581define void @atomic16_sub_release(i16* %a) nounwind uwtable { 582entry: 583 atomicrmw sub i16* %a, i16 0 release 584 ret void 585} 586; CHECK: atomic16_sub_release 587; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 3) 588 589define void @atomic16_and_release(i16* %a) nounwind uwtable { 590entry: 591 atomicrmw and i16* %a, i16 0 release 592 ret void 593} 594; CHECK: atomic16_and_release 595; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 3) 596 597define void @atomic16_or_release(i16* %a) nounwind uwtable { 598entry: 599 atomicrmw or i16* %a, i16 0 release 600 ret void 601} 602; CHECK: atomic16_or_release 603; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 3) 604 605define void @atomic16_xor_release(i16* %a) nounwind uwtable { 606entry: 607 atomicrmw xor i16* %a, i16 0 release 608 ret void 609} 610; CHECK: atomic16_xor_release 611; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 3) 612 613define void @atomic16_nand_release(i16* %a) nounwind uwtable { 614entry: 615 atomicrmw nand i16* %a, i16 0 release 616 ret void 617} 618; CHECK: atomic16_nand_release 619; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 3) 620 621define void @atomic16_xchg_acq_rel(i16* %a) nounwind uwtable { 622entry: 623 atomicrmw xchg i16* %a, i16 0 acq_rel 624 ret void 625} 626; CHECK: atomic16_xchg_acq_rel 627; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 4) 628 629define void @atomic16_add_acq_rel(i16* %a) nounwind uwtable { 630entry: 631 atomicrmw add i16* %a, i16 0 acq_rel 632 ret void 633} 634; CHECK: atomic16_add_acq_rel 635; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 4) 636 637define void @atomic16_sub_acq_rel(i16* %a) nounwind uwtable { 638entry: 639 atomicrmw sub i16* %a, i16 0 acq_rel 640 ret void 641} 642; CHECK: atomic16_sub_acq_rel 643; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 4) 644 645define void @atomic16_and_acq_rel(i16* %a) nounwind uwtable { 646entry: 647 atomicrmw and i16* %a, i16 0 acq_rel 648 ret void 649} 650; CHECK: atomic16_and_acq_rel 651; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 4) 652 653define void @atomic16_or_acq_rel(i16* %a) nounwind uwtable { 654entry: 655 atomicrmw or i16* %a, i16 0 acq_rel 656 ret void 657} 658; CHECK: atomic16_or_acq_rel 659; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 4) 660 661define void @atomic16_xor_acq_rel(i16* %a) nounwind uwtable { 662entry: 663 atomicrmw xor i16* %a, i16 0 acq_rel 664 ret void 665} 666; CHECK: atomic16_xor_acq_rel 667; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 4) 668 669define void @atomic16_nand_acq_rel(i16* %a) nounwind uwtable { 670entry: 671 atomicrmw nand i16* %a, i16 0 acq_rel 672 ret void 673} 674; CHECK: atomic16_nand_acq_rel 675; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 4) 676 677define void @atomic16_xchg_seq_cst(i16* %a) nounwind uwtable { 678entry: 679 atomicrmw xchg i16* %a, i16 0 seq_cst 680 ret void 681} 682; CHECK: atomic16_xchg_seq_cst 683; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 5) 684 685define void @atomic16_add_seq_cst(i16* %a) nounwind uwtable { 686entry: 687 atomicrmw add i16* %a, i16 0 seq_cst 688 ret void 689} 690; CHECK: atomic16_add_seq_cst 691; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 5) 692 693define void @atomic16_sub_seq_cst(i16* %a) nounwind uwtable { 694entry: 695 atomicrmw sub i16* %a, i16 0 seq_cst 696 ret void 697} 698; CHECK: atomic16_sub_seq_cst 699; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 5) 700 701define void @atomic16_and_seq_cst(i16* %a) nounwind uwtable { 702entry: 703 atomicrmw and i16* %a, i16 0 seq_cst 704 ret void 705} 706; CHECK: atomic16_and_seq_cst 707; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 5) 708 709define void @atomic16_or_seq_cst(i16* %a) nounwind uwtable { 710entry: 711 atomicrmw or i16* %a, i16 0 seq_cst 712 ret void 713} 714; CHECK: atomic16_or_seq_cst 715; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 5) 716 717define void @atomic16_xor_seq_cst(i16* %a) nounwind uwtable { 718entry: 719 atomicrmw xor i16* %a, i16 0 seq_cst 720 ret void 721} 722; CHECK: atomic16_xor_seq_cst 723; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 5) 724 725define void @atomic16_nand_seq_cst(i16* %a) nounwind uwtable { 726entry: 727 atomicrmw nand i16* %a, i16 0 seq_cst 728 ret void 729} 730; CHECK: atomic16_nand_seq_cst 731; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 5) 732 733define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable { 734entry: 735 cmpxchg i16* %a, i16 0, i16 1 monotonic monotonic 736 ret void 737} 738; CHECK: atomic16_cas_monotonic 739; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0, i32 0) 740 741define void @atomic16_cas_acquire(i16* %a) nounwind uwtable { 742entry: 743 cmpxchg i16* %a, i16 0, i16 1 acquire acquire 744 ret void 745} 746; CHECK: atomic16_cas_acquire 747; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2, i32 2) 748 749define void @atomic16_cas_release(i16* %a) nounwind uwtable { 750entry: 751 cmpxchg i16* %a, i16 0, i16 1 release monotonic 752 ret void 753} 754; CHECK: atomic16_cas_release 755; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3, i32 0) 756 757define void @atomic16_cas_acq_rel(i16* %a) nounwind uwtable { 758entry: 759 cmpxchg i16* %a, i16 0, i16 1 acq_rel acquire 760 ret void 761} 762; CHECK: atomic16_cas_acq_rel 763; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4, i32 2) 764 765define void @atomic16_cas_seq_cst(i16* %a) nounwind uwtable { 766entry: 767 cmpxchg i16* %a, i16 0, i16 1 seq_cst seq_cst 768 ret void 769} 770; CHECK: atomic16_cas_seq_cst 771; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5, i32 5) 772 773define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable { 774entry: 775 %0 = load atomic i32, i32* %a unordered, align 4 776 ret i32 %0 777} 778; CHECK: atomic32_load_unordered 779; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 0) 780 781define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable { 782entry: 783 %0 = load atomic i32, i32* %a monotonic, align 4 784 ret i32 %0 785} 786; CHECK: atomic32_load_monotonic 787; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 0) 788 789define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable { 790entry: 791 %0 = load atomic i32, i32* %a acquire, align 4 792 ret i32 %0 793} 794; CHECK: atomic32_load_acquire 795; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 2) 796 797define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable { 798entry: 799 %0 = load atomic i32, i32* %a seq_cst, align 4 800 ret i32 %0 801} 802; CHECK: atomic32_load_seq_cst 803; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 5) 804 805define void @atomic32_store_unordered(i32* %a) nounwind uwtable { 806entry: 807 store atomic i32 0, i32* %a unordered, align 4 808 ret void 809} 810; CHECK: atomic32_store_unordered 811; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 0) 812 813define void @atomic32_store_monotonic(i32* %a) nounwind uwtable { 814entry: 815 store atomic i32 0, i32* %a monotonic, align 4 816 ret void 817} 818; CHECK: atomic32_store_monotonic 819; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 0) 820 821define void @atomic32_store_release(i32* %a) nounwind uwtable { 822entry: 823 store atomic i32 0, i32* %a release, align 4 824 ret void 825} 826; CHECK: atomic32_store_release 827; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 3) 828 829define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable { 830entry: 831 store atomic i32 0, i32* %a seq_cst, align 4 832 ret void 833} 834; CHECK: atomic32_store_seq_cst 835; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 5) 836 837define void @atomic32_xchg_monotonic(i32* %a) nounwind uwtable { 838entry: 839 atomicrmw xchg i32* %a, i32 0 monotonic 840 ret void 841} 842; CHECK: atomic32_xchg_monotonic 843; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 0) 844 845define void @atomic32_add_monotonic(i32* %a) nounwind uwtable { 846entry: 847 atomicrmw add i32* %a, i32 0 monotonic 848 ret void 849} 850; CHECK: atomic32_add_monotonic 851; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 0) 852 853define void @atomic32_sub_monotonic(i32* %a) nounwind uwtable { 854entry: 855 atomicrmw sub i32* %a, i32 0 monotonic 856 ret void 857} 858; CHECK: atomic32_sub_monotonic 859; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 0) 860 861define void @atomic32_and_monotonic(i32* %a) nounwind uwtable { 862entry: 863 atomicrmw and i32* %a, i32 0 monotonic 864 ret void 865} 866; CHECK: atomic32_and_monotonic 867; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 0) 868 869define void @atomic32_or_monotonic(i32* %a) nounwind uwtable { 870entry: 871 atomicrmw or i32* %a, i32 0 monotonic 872 ret void 873} 874; CHECK: atomic32_or_monotonic 875; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 0) 876 877define void @atomic32_xor_monotonic(i32* %a) nounwind uwtable { 878entry: 879 atomicrmw xor i32* %a, i32 0 monotonic 880 ret void 881} 882; CHECK: atomic32_xor_monotonic 883; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 0) 884 885define void @atomic32_nand_monotonic(i32* %a) nounwind uwtable { 886entry: 887 atomicrmw nand i32* %a, i32 0 monotonic 888 ret void 889} 890; CHECK: atomic32_nand_monotonic 891; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 0) 892 893define void @atomic32_xchg_acquire(i32* %a) nounwind uwtable { 894entry: 895 atomicrmw xchg i32* %a, i32 0 acquire 896 ret void 897} 898; CHECK: atomic32_xchg_acquire 899; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 2) 900 901define void @atomic32_add_acquire(i32* %a) nounwind uwtable { 902entry: 903 atomicrmw add i32* %a, i32 0 acquire 904 ret void 905} 906; CHECK: atomic32_add_acquire 907; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 2) 908 909define void @atomic32_sub_acquire(i32* %a) nounwind uwtable { 910entry: 911 atomicrmw sub i32* %a, i32 0 acquire 912 ret void 913} 914; CHECK: atomic32_sub_acquire 915; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 2) 916 917define void @atomic32_and_acquire(i32* %a) nounwind uwtable { 918entry: 919 atomicrmw and i32* %a, i32 0 acquire 920 ret void 921} 922; CHECK: atomic32_and_acquire 923; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 2) 924 925define void @atomic32_or_acquire(i32* %a) nounwind uwtable { 926entry: 927 atomicrmw or i32* %a, i32 0 acquire 928 ret void 929} 930; CHECK: atomic32_or_acquire 931; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 2) 932 933define void @atomic32_xor_acquire(i32* %a) nounwind uwtable { 934entry: 935 atomicrmw xor i32* %a, i32 0 acquire 936 ret void 937} 938; CHECK: atomic32_xor_acquire 939; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 2) 940 941define void @atomic32_nand_acquire(i32* %a) nounwind uwtable { 942entry: 943 atomicrmw nand i32* %a, i32 0 acquire 944 ret void 945} 946; CHECK: atomic32_nand_acquire 947; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 2) 948 949define void @atomic32_xchg_release(i32* %a) nounwind uwtable { 950entry: 951 atomicrmw xchg i32* %a, i32 0 release 952 ret void 953} 954; CHECK: atomic32_xchg_release 955; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 3) 956 957define void @atomic32_add_release(i32* %a) nounwind uwtable { 958entry: 959 atomicrmw add i32* %a, i32 0 release 960 ret void 961} 962; CHECK: atomic32_add_release 963; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 3) 964 965define void @atomic32_sub_release(i32* %a) nounwind uwtable { 966entry: 967 atomicrmw sub i32* %a, i32 0 release 968 ret void 969} 970; CHECK: atomic32_sub_release 971; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 3) 972 973define void @atomic32_and_release(i32* %a) nounwind uwtable { 974entry: 975 atomicrmw and i32* %a, i32 0 release 976 ret void 977} 978; CHECK: atomic32_and_release 979; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 3) 980 981define void @atomic32_or_release(i32* %a) nounwind uwtable { 982entry: 983 atomicrmw or i32* %a, i32 0 release 984 ret void 985} 986; CHECK: atomic32_or_release 987; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 3) 988 989define void @atomic32_xor_release(i32* %a) nounwind uwtable { 990entry: 991 atomicrmw xor i32* %a, i32 0 release 992 ret void 993} 994; CHECK: atomic32_xor_release 995; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 3) 996 997define void @atomic32_nand_release(i32* %a) nounwind uwtable { 998entry: 999 atomicrmw nand i32* %a, i32 0 release 1000 ret void 1001} 1002; CHECK: atomic32_nand_release 1003; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 3) 1004 1005define void @atomic32_xchg_acq_rel(i32* %a) nounwind uwtable { 1006entry: 1007 atomicrmw xchg i32* %a, i32 0 acq_rel 1008 ret void 1009} 1010; CHECK: atomic32_xchg_acq_rel 1011; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 4) 1012 1013define void @atomic32_add_acq_rel(i32* %a) nounwind uwtable { 1014entry: 1015 atomicrmw add i32* %a, i32 0 acq_rel 1016 ret void 1017} 1018; CHECK: atomic32_add_acq_rel 1019; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 4) 1020 1021define void @atomic32_sub_acq_rel(i32* %a) nounwind uwtable { 1022entry: 1023 atomicrmw sub i32* %a, i32 0 acq_rel 1024 ret void 1025} 1026; CHECK: atomic32_sub_acq_rel 1027; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 4) 1028 1029define void @atomic32_and_acq_rel(i32* %a) nounwind uwtable { 1030entry: 1031 atomicrmw and i32* %a, i32 0 acq_rel 1032 ret void 1033} 1034; CHECK: atomic32_and_acq_rel 1035; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 4) 1036 1037define void @atomic32_or_acq_rel(i32* %a) nounwind uwtable { 1038entry: 1039 atomicrmw or i32* %a, i32 0 acq_rel 1040 ret void 1041} 1042; CHECK: atomic32_or_acq_rel 1043; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 4) 1044 1045define void @atomic32_xor_acq_rel(i32* %a) nounwind uwtable { 1046entry: 1047 atomicrmw xor i32* %a, i32 0 acq_rel 1048 ret void 1049} 1050; CHECK: atomic32_xor_acq_rel 1051; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 4) 1052 1053define void @atomic32_nand_acq_rel(i32* %a) nounwind uwtable { 1054entry: 1055 atomicrmw nand i32* %a, i32 0 acq_rel 1056 ret void 1057} 1058; CHECK: atomic32_nand_acq_rel 1059; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 4) 1060 1061define void @atomic32_xchg_seq_cst(i32* %a) nounwind uwtable { 1062entry: 1063 atomicrmw xchg i32* %a, i32 0 seq_cst 1064 ret void 1065} 1066; CHECK: atomic32_xchg_seq_cst 1067; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 5) 1068 1069define void @atomic32_add_seq_cst(i32* %a) nounwind uwtable { 1070entry: 1071 atomicrmw add i32* %a, i32 0 seq_cst 1072 ret void 1073} 1074; CHECK: atomic32_add_seq_cst 1075; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 5) 1076 1077define void @atomic32_sub_seq_cst(i32* %a) nounwind uwtable { 1078entry: 1079 atomicrmw sub i32* %a, i32 0 seq_cst 1080 ret void 1081} 1082; CHECK: atomic32_sub_seq_cst 1083; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 5) 1084 1085define void @atomic32_and_seq_cst(i32* %a) nounwind uwtable { 1086entry: 1087 atomicrmw and i32* %a, i32 0 seq_cst 1088 ret void 1089} 1090; CHECK: atomic32_and_seq_cst 1091; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 5) 1092 1093define void @atomic32_or_seq_cst(i32* %a) nounwind uwtable { 1094entry: 1095 atomicrmw or i32* %a, i32 0 seq_cst 1096 ret void 1097} 1098; CHECK: atomic32_or_seq_cst 1099; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 5) 1100 1101define void @atomic32_xor_seq_cst(i32* %a) nounwind uwtable { 1102entry: 1103 atomicrmw xor i32* %a, i32 0 seq_cst 1104 ret void 1105} 1106; CHECK: atomic32_xor_seq_cst 1107; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 5) 1108 1109define void @atomic32_nand_seq_cst(i32* %a) nounwind uwtable { 1110entry: 1111 atomicrmw nand i32* %a, i32 0 seq_cst 1112 ret void 1113} 1114; CHECK: atomic32_nand_seq_cst 1115; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 5) 1116 1117define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable { 1118entry: 1119 cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic 1120 ret void 1121} 1122; CHECK: atomic32_cas_monotonic 1123; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0, i32 0) 1124 1125define void @atomic32_cas_acquire(i32* %a) nounwind uwtable { 1126entry: 1127 cmpxchg i32* %a, i32 0, i32 1 acquire acquire 1128 ret void 1129} 1130; CHECK: atomic32_cas_acquire 1131; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2, i32 2) 1132 1133define void @atomic32_cas_release(i32* %a) nounwind uwtable { 1134entry: 1135 cmpxchg i32* %a, i32 0, i32 1 release monotonic 1136 ret void 1137} 1138; CHECK: atomic32_cas_release 1139; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3, i32 0) 1140 1141define void @atomic32_cas_acq_rel(i32* %a) nounwind uwtable { 1142entry: 1143 cmpxchg i32* %a, i32 0, i32 1 acq_rel acquire 1144 ret void 1145} 1146; CHECK: atomic32_cas_acq_rel 1147; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4, i32 2) 1148 1149define void @atomic32_cas_seq_cst(i32* %a) nounwind uwtable { 1150entry: 1151 cmpxchg i32* %a, i32 0, i32 1 seq_cst seq_cst 1152 ret void 1153} 1154; CHECK: atomic32_cas_seq_cst 1155; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5, i32 5) 1156 1157define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable { 1158entry: 1159 %0 = load atomic i64, i64* %a unordered, align 8 1160 ret i64 %0 1161} 1162; CHECK: atomic64_load_unordered 1163; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 0) 1164 1165define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable { 1166entry: 1167 %0 = load atomic i64, i64* %a monotonic, align 8 1168 ret i64 %0 1169} 1170; CHECK: atomic64_load_monotonic 1171; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 0) 1172 1173define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable { 1174entry: 1175 %0 = load atomic i64, i64* %a acquire, align 8 1176 ret i64 %0 1177} 1178; CHECK: atomic64_load_acquire 1179; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 2) 1180 1181define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable { 1182entry: 1183 %0 = load atomic i64, i64* %a seq_cst, align 8 1184 ret i64 %0 1185} 1186; CHECK: atomic64_load_seq_cst 1187; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 5) 1188 1189define void @atomic64_store_unordered(i64* %a) nounwind uwtable { 1190entry: 1191 store atomic i64 0, i64* %a unordered, align 8 1192 ret void 1193} 1194; CHECK: atomic64_store_unordered 1195; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 0) 1196 1197define void @atomic64_store_monotonic(i64* %a) nounwind uwtable { 1198entry: 1199 store atomic i64 0, i64* %a monotonic, align 8 1200 ret void 1201} 1202; CHECK: atomic64_store_monotonic 1203; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 0) 1204 1205define void @atomic64_store_release(i64* %a) nounwind uwtable { 1206entry: 1207 store atomic i64 0, i64* %a release, align 8 1208 ret void 1209} 1210; CHECK: atomic64_store_release 1211; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 3) 1212 1213define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable { 1214entry: 1215 store atomic i64 0, i64* %a seq_cst, align 8 1216 ret void 1217} 1218; CHECK: atomic64_store_seq_cst 1219; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 5) 1220 1221define void @atomic64_xchg_monotonic(i64* %a) nounwind uwtable { 1222entry: 1223 atomicrmw xchg i64* %a, i64 0 monotonic 1224 ret void 1225} 1226; CHECK: atomic64_xchg_monotonic 1227; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 0) 1228 1229define void @atomic64_add_monotonic(i64* %a) nounwind uwtable { 1230entry: 1231 atomicrmw add i64* %a, i64 0 monotonic 1232 ret void 1233} 1234; CHECK: atomic64_add_monotonic 1235; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 0) 1236 1237define void @atomic64_sub_monotonic(i64* %a) nounwind uwtable { 1238entry: 1239 atomicrmw sub i64* %a, i64 0 monotonic 1240 ret void 1241} 1242; CHECK: atomic64_sub_monotonic 1243; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 0) 1244 1245define void @atomic64_and_monotonic(i64* %a) nounwind uwtable { 1246entry: 1247 atomicrmw and i64* %a, i64 0 monotonic 1248 ret void 1249} 1250; CHECK: atomic64_and_monotonic 1251; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 0) 1252 1253define void @atomic64_or_monotonic(i64* %a) nounwind uwtable { 1254entry: 1255 atomicrmw or i64* %a, i64 0 monotonic 1256 ret void 1257} 1258; CHECK: atomic64_or_monotonic 1259; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 0) 1260 1261define void @atomic64_xor_monotonic(i64* %a) nounwind uwtable { 1262entry: 1263 atomicrmw xor i64* %a, i64 0 monotonic 1264 ret void 1265} 1266; CHECK: atomic64_xor_monotonic 1267; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 0) 1268 1269define void @atomic64_nand_monotonic(i64* %a) nounwind uwtable { 1270entry: 1271 atomicrmw nand i64* %a, i64 0 monotonic 1272 ret void 1273} 1274; CHECK: atomic64_nand_monotonic 1275; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 0) 1276 1277define void @atomic64_xchg_acquire(i64* %a) nounwind uwtable { 1278entry: 1279 atomicrmw xchg i64* %a, i64 0 acquire 1280 ret void 1281} 1282; CHECK: atomic64_xchg_acquire 1283; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 2) 1284 1285define void @atomic64_add_acquire(i64* %a) nounwind uwtable { 1286entry: 1287 atomicrmw add i64* %a, i64 0 acquire 1288 ret void 1289} 1290; CHECK: atomic64_add_acquire 1291; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 2) 1292 1293define void @atomic64_sub_acquire(i64* %a) nounwind uwtable { 1294entry: 1295 atomicrmw sub i64* %a, i64 0 acquire 1296 ret void 1297} 1298; CHECK: atomic64_sub_acquire 1299; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 2) 1300 1301define void @atomic64_and_acquire(i64* %a) nounwind uwtable { 1302entry: 1303 atomicrmw and i64* %a, i64 0 acquire 1304 ret void 1305} 1306; CHECK: atomic64_and_acquire 1307; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 2) 1308 1309define void @atomic64_or_acquire(i64* %a) nounwind uwtable { 1310entry: 1311 atomicrmw or i64* %a, i64 0 acquire 1312 ret void 1313} 1314; CHECK: atomic64_or_acquire 1315; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 2) 1316 1317define void @atomic64_xor_acquire(i64* %a) nounwind uwtable { 1318entry: 1319 atomicrmw xor i64* %a, i64 0 acquire 1320 ret void 1321} 1322; CHECK: atomic64_xor_acquire 1323; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 2) 1324 1325define void @atomic64_nand_acquire(i64* %a) nounwind uwtable { 1326entry: 1327 atomicrmw nand i64* %a, i64 0 acquire 1328 ret void 1329} 1330; CHECK: atomic64_nand_acquire 1331; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 2) 1332 1333define void @atomic64_xchg_release(i64* %a) nounwind uwtable { 1334entry: 1335 atomicrmw xchg i64* %a, i64 0 release 1336 ret void 1337} 1338; CHECK: atomic64_xchg_release 1339; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 3) 1340 1341define void @atomic64_add_release(i64* %a) nounwind uwtable { 1342entry: 1343 atomicrmw add i64* %a, i64 0 release 1344 ret void 1345} 1346; CHECK: atomic64_add_release 1347; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 3) 1348 1349define void @atomic64_sub_release(i64* %a) nounwind uwtable { 1350entry: 1351 atomicrmw sub i64* %a, i64 0 release 1352 ret void 1353} 1354; CHECK: atomic64_sub_release 1355; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 3) 1356 1357define void @atomic64_and_release(i64* %a) nounwind uwtable { 1358entry: 1359 atomicrmw and i64* %a, i64 0 release 1360 ret void 1361} 1362; CHECK: atomic64_and_release 1363; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 3) 1364 1365define void @atomic64_or_release(i64* %a) nounwind uwtable { 1366entry: 1367 atomicrmw or i64* %a, i64 0 release 1368 ret void 1369} 1370; CHECK: atomic64_or_release 1371; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 3) 1372 1373define void @atomic64_xor_release(i64* %a) nounwind uwtable { 1374entry: 1375 atomicrmw xor i64* %a, i64 0 release 1376 ret void 1377} 1378; CHECK: atomic64_xor_release 1379; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 3) 1380 1381define void @atomic64_nand_release(i64* %a) nounwind uwtable { 1382entry: 1383 atomicrmw nand i64* %a, i64 0 release 1384 ret void 1385} 1386; CHECK: atomic64_nand_release 1387; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 3) 1388 1389define void @atomic64_xchg_acq_rel(i64* %a) nounwind uwtable { 1390entry: 1391 atomicrmw xchg i64* %a, i64 0 acq_rel 1392 ret void 1393} 1394; CHECK: atomic64_xchg_acq_rel 1395; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 4) 1396 1397define void @atomic64_add_acq_rel(i64* %a) nounwind uwtable { 1398entry: 1399 atomicrmw add i64* %a, i64 0 acq_rel 1400 ret void 1401} 1402; CHECK: atomic64_add_acq_rel 1403; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 4) 1404 1405define void @atomic64_sub_acq_rel(i64* %a) nounwind uwtable { 1406entry: 1407 atomicrmw sub i64* %a, i64 0 acq_rel 1408 ret void 1409} 1410; CHECK: atomic64_sub_acq_rel 1411; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 4) 1412 1413define void @atomic64_and_acq_rel(i64* %a) nounwind uwtable { 1414entry: 1415 atomicrmw and i64* %a, i64 0 acq_rel 1416 ret void 1417} 1418; CHECK: atomic64_and_acq_rel 1419; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 4) 1420 1421define void @atomic64_or_acq_rel(i64* %a) nounwind uwtable { 1422entry: 1423 atomicrmw or i64* %a, i64 0 acq_rel 1424 ret void 1425} 1426; CHECK: atomic64_or_acq_rel 1427; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 4) 1428 1429define void @atomic64_xor_acq_rel(i64* %a) nounwind uwtable { 1430entry: 1431 atomicrmw xor i64* %a, i64 0 acq_rel 1432 ret void 1433} 1434; CHECK: atomic64_xor_acq_rel 1435; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 4) 1436 1437define void @atomic64_nand_acq_rel(i64* %a) nounwind uwtable { 1438entry: 1439 atomicrmw nand i64* %a, i64 0 acq_rel 1440 ret void 1441} 1442; CHECK: atomic64_nand_acq_rel 1443; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 4) 1444 1445define void @atomic64_xchg_seq_cst(i64* %a) nounwind uwtable { 1446entry: 1447 atomicrmw xchg i64* %a, i64 0 seq_cst 1448 ret void 1449} 1450; CHECK: atomic64_xchg_seq_cst 1451; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 5) 1452 1453define void @atomic64_add_seq_cst(i64* %a) nounwind uwtable { 1454entry: 1455 atomicrmw add i64* %a, i64 0 seq_cst 1456 ret void 1457} 1458; CHECK: atomic64_add_seq_cst 1459; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 5) 1460 1461define void @atomic64_sub_seq_cst(i64* %a) nounwind uwtable { 1462entry: 1463 atomicrmw sub i64* %a, i64 0 seq_cst 1464 ret void 1465} 1466; CHECK: atomic64_sub_seq_cst 1467; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 5) 1468 1469define void @atomic64_and_seq_cst(i64* %a) nounwind uwtable { 1470entry: 1471 atomicrmw and i64* %a, i64 0 seq_cst 1472 ret void 1473} 1474; CHECK: atomic64_and_seq_cst 1475; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 5) 1476 1477define void @atomic64_or_seq_cst(i64* %a) nounwind uwtable { 1478entry: 1479 atomicrmw or i64* %a, i64 0 seq_cst 1480 ret void 1481} 1482; CHECK: atomic64_or_seq_cst 1483; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 5) 1484 1485define void @atomic64_xor_seq_cst(i64* %a) nounwind uwtable { 1486entry: 1487 atomicrmw xor i64* %a, i64 0 seq_cst 1488 ret void 1489} 1490; CHECK: atomic64_xor_seq_cst 1491; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 5) 1492 1493define void @atomic64_nand_seq_cst(i64* %a) nounwind uwtable { 1494entry: 1495 atomicrmw nand i64* %a, i64 0 seq_cst 1496 ret void 1497} 1498; CHECK: atomic64_nand_seq_cst 1499; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 5) 1500 1501define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable { 1502entry: 1503 cmpxchg i64* %a, i64 0, i64 1 monotonic monotonic 1504 ret void 1505} 1506; CHECK: atomic64_cas_monotonic 1507; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0, i32 0) 1508 1509define void @atomic64_cas_acquire(i64* %a) nounwind uwtable { 1510entry: 1511 cmpxchg i64* %a, i64 0, i64 1 acquire acquire 1512 ret void 1513} 1514; CHECK: atomic64_cas_acquire 1515; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2, i32 2) 1516 1517define void @atomic64_cas_release(i64* %a) nounwind uwtable { 1518entry: 1519 cmpxchg i64* %a, i64 0, i64 1 release monotonic 1520 ret void 1521} 1522; CHECK: atomic64_cas_release 1523; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3, i32 0) 1524 1525define void @atomic64_cas_acq_rel(i64* %a) nounwind uwtable { 1526entry: 1527 cmpxchg i64* %a, i64 0, i64 1 acq_rel acquire 1528 ret void 1529} 1530; CHECK: atomic64_cas_acq_rel 1531; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4, i32 2) 1532 1533define void @atomic64_cas_seq_cst(i64* %a) nounwind uwtable { 1534entry: 1535 cmpxchg i64* %a, i64 0, i64 1 seq_cst seq_cst 1536 ret void 1537} 1538; CHECK: atomic64_cas_seq_cst 1539; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 5) 1540 1541define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable { 1542entry: 1543 %0 = load atomic i128, i128* %a unordered, align 16 1544 ret i128 %0 1545} 1546; CHECK: atomic128_load_unordered 1547; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 0) 1548 1549define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable { 1550entry: 1551 %0 = load atomic i128, i128* %a monotonic, align 16 1552 ret i128 %0 1553} 1554; CHECK: atomic128_load_monotonic 1555; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 0) 1556 1557define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable { 1558entry: 1559 %0 = load atomic i128, i128* %a acquire, align 16 1560 ret i128 %0 1561} 1562; CHECK: atomic128_load_acquire 1563; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 2) 1564 1565define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable { 1566entry: 1567 %0 = load atomic i128, i128* %a seq_cst, align 16 1568 ret i128 %0 1569} 1570; CHECK: atomic128_load_seq_cst 1571; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 5) 1572 1573define void @atomic128_store_unordered(i128* %a) nounwind uwtable { 1574entry: 1575 store atomic i128 0, i128* %a unordered, align 16 1576 ret void 1577} 1578; CHECK: atomic128_store_unordered 1579; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 0) 1580 1581define void @atomic128_store_monotonic(i128* %a) nounwind uwtable { 1582entry: 1583 store atomic i128 0, i128* %a monotonic, align 16 1584 ret void 1585} 1586; CHECK: atomic128_store_monotonic 1587; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 0) 1588 1589define void @atomic128_store_release(i128* %a) nounwind uwtable { 1590entry: 1591 store atomic i128 0, i128* %a release, align 16 1592 ret void 1593} 1594; CHECK: atomic128_store_release 1595; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 3) 1596 1597define void @atomic128_store_seq_cst(i128* %a) nounwind uwtable { 1598entry: 1599 store atomic i128 0, i128* %a seq_cst, align 16 1600 ret void 1601} 1602; CHECK: atomic128_store_seq_cst 1603; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 5) 1604 1605define void @atomic128_xchg_monotonic(i128* %a) nounwind uwtable { 1606entry: 1607 atomicrmw xchg i128* %a, i128 0 monotonic 1608 ret void 1609} 1610; CHECK: atomic128_xchg_monotonic 1611; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 0) 1612 1613define void @atomic128_add_monotonic(i128* %a) nounwind uwtable { 1614entry: 1615 atomicrmw add i128* %a, i128 0 monotonic 1616 ret void 1617} 1618; CHECK: atomic128_add_monotonic 1619; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 0) 1620 1621define void @atomic128_sub_monotonic(i128* %a) nounwind uwtable { 1622entry: 1623 atomicrmw sub i128* %a, i128 0 monotonic 1624 ret void 1625} 1626; CHECK: atomic128_sub_monotonic 1627; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 0) 1628 1629define void @atomic128_and_monotonic(i128* %a) nounwind uwtable { 1630entry: 1631 atomicrmw and i128* %a, i128 0 monotonic 1632 ret void 1633} 1634; CHECK: atomic128_and_monotonic 1635; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 0) 1636 1637define void @atomic128_or_monotonic(i128* %a) nounwind uwtable { 1638entry: 1639 atomicrmw or i128* %a, i128 0 monotonic 1640 ret void 1641} 1642; CHECK: atomic128_or_monotonic 1643; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 0) 1644 1645define void @atomic128_xor_monotonic(i128* %a) nounwind uwtable { 1646entry: 1647 atomicrmw xor i128* %a, i128 0 monotonic 1648 ret void 1649} 1650; CHECK: atomic128_xor_monotonic 1651; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 0) 1652 1653define void @atomic128_nand_monotonic(i128* %a) nounwind uwtable { 1654entry: 1655 atomicrmw nand i128* %a, i128 0 monotonic 1656 ret void 1657} 1658; CHECK: atomic128_nand_monotonic 1659; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 0) 1660 1661define void @atomic128_xchg_acquire(i128* %a) nounwind uwtable { 1662entry: 1663 atomicrmw xchg i128* %a, i128 0 acquire 1664 ret void 1665} 1666; CHECK: atomic128_xchg_acquire 1667; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 2) 1668 1669define void @atomic128_add_acquire(i128* %a) nounwind uwtable { 1670entry: 1671 atomicrmw add i128* %a, i128 0 acquire 1672 ret void 1673} 1674; CHECK: atomic128_add_acquire 1675; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 2) 1676 1677define void @atomic128_sub_acquire(i128* %a) nounwind uwtable { 1678entry: 1679 atomicrmw sub i128* %a, i128 0 acquire 1680 ret void 1681} 1682; CHECK: atomic128_sub_acquire 1683; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 2) 1684 1685define void @atomic128_and_acquire(i128* %a) nounwind uwtable { 1686entry: 1687 atomicrmw and i128* %a, i128 0 acquire 1688 ret void 1689} 1690; CHECK: atomic128_and_acquire 1691; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 2) 1692 1693define void @atomic128_or_acquire(i128* %a) nounwind uwtable { 1694entry: 1695 atomicrmw or i128* %a, i128 0 acquire 1696 ret void 1697} 1698; CHECK: atomic128_or_acquire 1699; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 2) 1700 1701define void @atomic128_xor_acquire(i128* %a) nounwind uwtable { 1702entry: 1703 atomicrmw xor i128* %a, i128 0 acquire 1704 ret void 1705} 1706; CHECK: atomic128_xor_acquire 1707; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 2) 1708 1709define void @atomic128_nand_acquire(i128* %a) nounwind uwtable { 1710entry: 1711 atomicrmw nand i128* %a, i128 0 acquire 1712 ret void 1713} 1714; CHECK: atomic128_nand_acquire 1715; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 2) 1716 1717define void @atomic128_xchg_release(i128* %a) nounwind uwtable { 1718entry: 1719 atomicrmw xchg i128* %a, i128 0 release 1720 ret void 1721} 1722; CHECK: atomic128_xchg_release 1723; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 3) 1724 1725define void @atomic128_add_release(i128* %a) nounwind uwtable { 1726entry: 1727 atomicrmw add i128* %a, i128 0 release 1728 ret void 1729} 1730; CHECK: atomic128_add_release 1731; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 3) 1732 1733define void @atomic128_sub_release(i128* %a) nounwind uwtable { 1734entry: 1735 atomicrmw sub i128* %a, i128 0 release 1736 ret void 1737} 1738; CHECK: atomic128_sub_release 1739; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 3) 1740 1741define void @atomic128_and_release(i128* %a) nounwind uwtable { 1742entry: 1743 atomicrmw and i128* %a, i128 0 release 1744 ret void 1745} 1746; CHECK: atomic128_and_release 1747; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 3) 1748 1749define void @atomic128_or_release(i128* %a) nounwind uwtable { 1750entry: 1751 atomicrmw or i128* %a, i128 0 release 1752 ret void 1753} 1754; CHECK: atomic128_or_release 1755; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 3) 1756 1757define void @atomic128_xor_release(i128* %a) nounwind uwtable { 1758entry: 1759 atomicrmw xor i128* %a, i128 0 release 1760 ret void 1761} 1762; CHECK: atomic128_xor_release 1763; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 3) 1764 1765define void @atomic128_nand_release(i128* %a) nounwind uwtable { 1766entry: 1767 atomicrmw nand i128* %a, i128 0 release 1768 ret void 1769} 1770; CHECK: atomic128_nand_release 1771; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 3) 1772 1773define void @atomic128_xchg_acq_rel(i128* %a) nounwind uwtable { 1774entry: 1775 atomicrmw xchg i128* %a, i128 0 acq_rel 1776 ret void 1777} 1778; CHECK: atomic128_xchg_acq_rel 1779; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 4) 1780 1781define void @atomic128_add_acq_rel(i128* %a) nounwind uwtable { 1782entry: 1783 atomicrmw add i128* %a, i128 0 acq_rel 1784 ret void 1785} 1786; CHECK: atomic128_add_acq_rel 1787; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 4) 1788 1789define void @atomic128_sub_acq_rel(i128* %a) nounwind uwtable { 1790entry: 1791 atomicrmw sub i128* %a, i128 0 acq_rel 1792 ret void 1793} 1794; CHECK: atomic128_sub_acq_rel 1795; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 4) 1796 1797define void @atomic128_and_acq_rel(i128* %a) nounwind uwtable { 1798entry: 1799 atomicrmw and i128* %a, i128 0 acq_rel 1800 ret void 1801} 1802; CHECK: atomic128_and_acq_rel 1803; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 4) 1804 1805define void @atomic128_or_acq_rel(i128* %a) nounwind uwtable { 1806entry: 1807 atomicrmw or i128* %a, i128 0 acq_rel 1808 ret void 1809} 1810; CHECK: atomic128_or_acq_rel 1811; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 4) 1812 1813define void @atomic128_xor_acq_rel(i128* %a) nounwind uwtable { 1814entry: 1815 atomicrmw xor i128* %a, i128 0 acq_rel 1816 ret void 1817} 1818; CHECK: atomic128_xor_acq_rel 1819; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 4) 1820 1821define void @atomic128_nand_acq_rel(i128* %a) nounwind uwtable { 1822entry: 1823 atomicrmw nand i128* %a, i128 0 acq_rel 1824 ret void 1825} 1826; CHECK: atomic128_nand_acq_rel 1827; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 4) 1828 1829define void @atomic128_xchg_seq_cst(i128* %a) nounwind uwtable { 1830entry: 1831 atomicrmw xchg i128* %a, i128 0 seq_cst 1832 ret void 1833} 1834; CHECK: atomic128_xchg_seq_cst 1835; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 5) 1836 1837define void @atomic128_add_seq_cst(i128* %a) nounwind uwtable { 1838entry: 1839 atomicrmw add i128* %a, i128 0 seq_cst 1840 ret void 1841} 1842; CHECK: atomic128_add_seq_cst 1843; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 5) 1844 1845define void @atomic128_sub_seq_cst(i128* %a) nounwind uwtable { 1846entry: 1847 atomicrmw sub i128* %a, i128 0 seq_cst 1848 ret void 1849} 1850; CHECK: atomic128_sub_seq_cst 1851; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 5) 1852 1853define void @atomic128_and_seq_cst(i128* %a) nounwind uwtable { 1854entry: 1855 atomicrmw and i128* %a, i128 0 seq_cst 1856 ret void 1857} 1858; CHECK: atomic128_and_seq_cst 1859; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 5) 1860 1861define void @atomic128_or_seq_cst(i128* %a) nounwind uwtable { 1862entry: 1863 atomicrmw or i128* %a, i128 0 seq_cst 1864 ret void 1865} 1866; CHECK: atomic128_or_seq_cst 1867; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 5) 1868 1869define void @atomic128_xor_seq_cst(i128* %a) nounwind uwtable { 1870entry: 1871 atomicrmw xor i128* %a, i128 0 seq_cst 1872 ret void 1873} 1874; CHECK: atomic128_xor_seq_cst 1875; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 5) 1876 1877define void @atomic128_nand_seq_cst(i128* %a) nounwind uwtable { 1878entry: 1879 atomicrmw nand i128* %a, i128 0 seq_cst 1880 ret void 1881} 1882; CHECK: atomic128_nand_seq_cst 1883; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 5) 1884 1885define void @atomic128_cas_monotonic(i128* %a) nounwind uwtable { 1886entry: 1887 cmpxchg i128* %a, i128 0, i128 1 monotonic monotonic 1888 ret void 1889} 1890; CHECK: atomic128_cas_monotonic 1891; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 0, i32 0) 1892 1893define void @atomic128_cas_acquire(i128* %a) nounwind uwtable { 1894entry: 1895 cmpxchg i128* %a, i128 0, i128 1 acquire acquire 1896 ret void 1897} 1898; CHECK: atomic128_cas_acquire 1899; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 2, i32 2) 1900 1901define void @atomic128_cas_release(i128* %a) nounwind uwtable { 1902entry: 1903 cmpxchg i128* %a, i128 0, i128 1 release monotonic 1904 ret void 1905} 1906; CHECK: atomic128_cas_release 1907; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 3, i32 0) 1908 1909define void @atomic128_cas_acq_rel(i128* %a) nounwind uwtable { 1910entry: 1911 cmpxchg i128* %a, i128 0, i128 1 acq_rel acquire 1912 ret void 1913} 1914; CHECK: atomic128_cas_acq_rel 1915; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 4, i32 2) 1916 1917define void @atomic128_cas_seq_cst(i128* %a) nounwind uwtable { 1918entry: 1919 cmpxchg i128* %a, i128 0, i128 1 seq_cst seq_cst 1920 ret void 1921} 1922; CHECK: atomic128_cas_seq_cst 1923; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 5, i32 5) 1924 1925define void @atomic_signal_fence_acquire() nounwind uwtable { 1926entry: 1927 fence singlethread acquire 1928 ret void 1929} 1930; CHECK: atomic_signal_fence_acquire 1931; CHECK: call void @__tsan_atomic_signal_fence(i32 2) 1932 1933define void @atomic_thread_fence_acquire() nounwind uwtable { 1934entry: 1935 fence acquire 1936 ret void 1937} 1938; CHECK: atomic_thread_fence_acquire 1939; CHECK: call void @__tsan_atomic_thread_fence(i32 2) 1940 1941define void @atomic_signal_fence_release() nounwind uwtable { 1942entry: 1943 fence singlethread release 1944 ret void 1945} 1946; CHECK: atomic_signal_fence_release 1947; CHECK: call void @__tsan_atomic_signal_fence(i32 3) 1948 1949define void @atomic_thread_fence_release() nounwind uwtable { 1950entry: 1951 fence release 1952 ret void 1953} 1954; CHECK: atomic_thread_fence_release 1955; CHECK: call void @__tsan_atomic_thread_fence(i32 3) 1956 1957define void @atomic_signal_fence_acq_rel() nounwind uwtable { 1958entry: 1959 fence singlethread acq_rel 1960 ret void 1961} 1962; CHECK: atomic_signal_fence_acq_rel 1963; CHECK: call void @__tsan_atomic_signal_fence(i32 4) 1964 1965define void @atomic_thread_fence_acq_rel() nounwind uwtable { 1966entry: 1967 fence acq_rel 1968 ret void 1969} 1970; CHECK: atomic_thread_fence_acq_rel 1971; CHECK: call void @__tsan_atomic_thread_fence(i32 4) 1972 1973define void @atomic_signal_fence_seq_cst() nounwind uwtable { 1974entry: 1975 fence singlethread seq_cst 1976 ret void 1977} 1978; CHECK: atomic_signal_fence_seq_cst 1979; CHECK: call void @__tsan_atomic_signal_fence(i32 5) 1980 1981define void @atomic_thread_fence_seq_cst() nounwind uwtable { 1982entry: 1983 fence seq_cst 1984 ret void 1985} 1986; CHECK: atomic_thread_fence_seq_cst 1987; CHECK: call void @__tsan_atomic_thread_fence(i32 5) 1988