1; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mcpu=corei7 -verify-machineinstrs -show-mc-encoding | FileCheck %s --check-prefix X64 2; RUN: llc < %s -O0 -mtriple=i386-unknown-unknown -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32 3 4@sc16 = external global i16 5 6define void @atomic_fetch_add16() nounwind { 7; X64-LABEL: atomic_fetch_add16 8; X32-LABEL: atomic_fetch_add16 9entry: 10; 32-bit 11 %t1 = atomicrmw add i16* @sc16, i16 1 acquire 12; X64: lock 13; X64: incw 14; X32: lock 15; X32: incw 16 %t2 = atomicrmw add i16* @sc16, i16 3 acquire 17; X64: lock 18; X64: addw $3, {{.*}} # encoding: [0x66,0xf0 19; X32: lock 20; X32: addw $3 21 %t3 = atomicrmw add i16* @sc16, i16 5 acquire 22; X64: lock 23; X64: xaddw {{.*}} # encoding: [0x66,0xf0 24; X32: lock 25; X32: xaddw 26 %t4 = atomicrmw add i16* @sc16, i16 %t3 acquire 27; X64: lock 28; X64: addw {{.*}} # encoding: [0x66,0xf0 29; X32: lock 30; X32: addw 31 ret void 32; X64: ret 33; X32: ret 34} 35 36define void @atomic_fetch_sub16() nounwind { 37; X64-LABEL: atomic_fetch_sub16 38; X32-LABEL: atomic_fetch_sub16 39 %t1 = atomicrmw sub i16* @sc16, i16 1 acquire 40; X64: lock 41; X64: decw 42; X32: lock 43; X32: decw 44 %t2 = atomicrmw sub i16* @sc16, i16 3 acquire 45; X64: lock 46; X64: subw $3, {{.*}} # encoding: [0x66,0xf0 47; X32: lock 48; X32: subw $3 49 %t3 = atomicrmw sub i16* @sc16, i16 5 acquire 50; X64: lock 51; X64: xaddw {{.*}} # encoding: [0x66,0xf0 52; X32: lock 53; X32: xaddw 54 %t4 = atomicrmw sub i16* @sc16, i16 %t3 acquire 55; X64: lock 56; X64: subw {{.*}} # encoding: [0x66,0xf0 57; X32: lock 58; X32: subw 59 ret void 60; X64: ret 61; X32: ret 62} 63 64define void @atomic_fetch_and16() nounwind { 65; X64-LABEL: atomic_fetch_and16 66; X32-LABEL: atomic_fetch_and16 67 %t1 = atomicrmw and i16* @sc16, i16 3 acquire 68; X64: lock 69; X64: andw $3, {{.*}} # encoding: [0x66,0xf0 70; X32: lock 71; X32: andw $3 72 %t2 = atomicrmw and i16* @sc16, i16 5 acquire 73; X64: andl 74; X64: lock 75; X64: cmpxchgw 76; X32: andl 77; X32: lock 78; X32: cmpxchgw 79 %t3 = atomicrmw and i16* @sc16, i16 %t2 acquire 80; X64: lock 81; X64: andw {{.*}} # encoding: [0x66,0xf0 82; X32: lock 83; X32: andw 84 ret void 85; X64: ret 86; X32: ret 87} 88 89define void @atomic_fetch_or16() nounwind { 90; X64-LABEL: atomic_fetch_or16 91; X32-LABEL: atomic_fetch_or16 92 %t1 = atomicrmw or i16* @sc16, i16 3 acquire 93; X64: lock 94; X64: orw $3, {{.*}} # encoding: [0x66,0xf0 95; X32: lock 96; X32: orw $3 97 %t2 = atomicrmw or i16* @sc16, i16 5 acquire 98; X64: orl 99; X64: lock 100; X64: cmpxchgw 101; X32: orl 102; X32: lock 103; X32: cmpxchgw 104 %t3 = atomicrmw or i16* @sc16, i16 %t2 acquire 105; X64: lock 106; X64: orw {{.*}} # encoding: [0x66,0xf0 107; X32: lock 108; X32: orw 109 ret void 110; X64: ret 111; X32: ret 112} 113 114define void @atomic_fetch_xor16() nounwind { 115; X64-LABEL: atomic_fetch_xor16 116; X32-LABEL: atomic_fetch_xor16 117 %t1 = atomicrmw xor i16* @sc16, i16 3 acquire 118; X64: lock 119; X64: xorw $3, {{.*}} # encoding: [0x66,0xf0 120; X32: lock 121; X32: xorw $3 122 %t2 = atomicrmw xor i16* @sc16, i16 5 acquire 123; X64: xorl 124; X64: lock 125; X64: cmpxchgw 126; X32: xorl 127; X32: lock 128; X32: cmpxchgw 129 %t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire 130; X64: lock 131; X64: xorw {{.*}} # encoding: [0x66,0xf0 132; X32: lock 133; X32: xorw 134 ret void 135; X64: ret 136; X32: ret 137} 138 139define void @atomic_fetch_nand16(i16 %x) nounwind { 140; X64-LABEL: atomic_fetch_nand16 141; X32-LABEL: atomic_fetch_nand16 142 %t1 = atomicrmw nand i16* @sc16, i16 %x acquire 143; X64: andl 144; X64: notl 145; X64: lock 146; X64: cmpxchgw 147; X32: andl 148; X32: notl 149; X32: lock 150; X32: cmpxchgw 151 ret void 152; X64: ret 153; X32: ret 154} 155 156define void @atomic_fetch_max16(i16 %x) nounwind { 157 %t1 = atomicrmw max i16* @sc16, i16 %x acquire 158; X64: movswl 159; X64: movswl 160; X64: subl 161; X64: cmov 162; X64: lock 163; X64: cmpxchgw 164 165; X32: movswl 166; X32: movswl 167; X32: subl 168; X32: cmov 169; X32: lock 170; X32: cmpxchgw 171 ret void 172; X64: ret 173; X32: ret 174} 175 176define void @atomic_fetch_min16(i16 %x) nounwind { 177 %t1 = atomicrmw min i16* @sc16, i16 %x acquire 178; X64: movswl 179; X64: movswl 180; X64: subl 181; X64: cmov 182; X64: lock 183; X64: cmpxchgw 184 185; X32: movswl 186; X32: movswl 187; X32: subl 188; X32: cmov 189; X32: lock 190; X32: cmpxchgw 191 ret void 192; X64: ret 193; X32: ret 194} 195 196define void @atomic_fetch_umax16(i16 %x) nounwind { 197 %t1 = atomicrmw umax i16* @sc16, i16 %x acquire 198; X64: movzwl 199; X64: movzwl 200; X64: subl 201; X64: cmov 202; X64: lock 203; X64: cmpxchgw 204 205; X32: movzwl 206; X32: movzwl 207; X32: subl 208; X32: cmov 209; X32: lock 210; X32: cmpxchgw 211 ret void 212; X64: ret 213; X32: ret 214} 215 216define void @atomic_fetch_umin16(i16 %x) nounwind { 217 %t1 = atomicrmw umin i16* @sc16, i16 %x acquire 218; X64: movzwl 219; X64: movzwl 220; X64: subl 221; X64: cmov 222; X64: lock 223; X64: cmpxchgw 224 225; X32: movzwl 226; X32: movzwl 227; X32: subl 228; X32: cmov 229; X32: lock 230; X32: cmpxchgw 231 ret void 232; X64: ret 233; X32: ret 234} 235 236define void @atomic_fetch_cmpxchg16() nounwind { 237 %t1 = cmpxchg i16* @sc16, i16 0, i16 1 acquire acquire 238; X64: lock 239; X64: cmpxchgw 240; X32: lock 241; X32: cmpxchgw 242 ret void 243; X64: ret 244; X32: ret 245} 246 247define void @atomic_fetch_store16(i16 %x) nounwind { 248 store atomic i16 %x, i16* @sc16 release, align 4 249; X64-NOT: lock 250; X64: movw 251; X32-NOT: lock 252; X32: movw 253 ret void 254; X64: ret 255; X32: ret 256} 257 258define void @atomic_fetch_swap16(i16 %x) nounwind { 259 %t1 = atomicrmw xchg i16* @sc16, i16 %x acquire 260; X64-NOT: lock 261; X64: xchgw 262; X32-NOT: lock 263; X32: xchgw 264 ret void 265; X64: ret 266; X32: ret 267} 268