1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -O3 | FileCheck %s --check-prefixes=X86 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=X64 4 5define x86_fp80 @fma(x86_fp80 %x, x86_fp80 %y, x86_fp80 %z) nounwind strictfp { 6; X86-LABEL: fma: 7; X86: # %bb.0: # %entry 8; X86-NEXT: subl $36, %esp 9; X86-NEXT: fldt {{[0-9]+}}(%esp) 10; X86-NEXT: fldt {{[0-9]+}}(%esp) 11; X86-NEXT: fldt {{[0-9]+}}(%esp) 12; X86-NEXT: fstpt {{[0-9]+}}(%esp) 13; X86-NEXT: fstpt {{[0-9]+}}(%esp) 14; X86-NEXT: fstpt (%esp) 15; X86-NEXT: wait 16; X86-NEXT: calll fmal 17; X86-NEXT: addl $36, %esp 18; X86-NEXT: retl 19; 20; X64-LABEL: fma: 21; X64: # %bb.0: # %entry 22; X64-NEXT: subq $56, %rsp 23; X64-NEXT: fldt {{[0-9]+}}(%rsp) 24; X64-NEXT: fldt {{[0-9]+}}(%rsp) 25; X64-NEXT: fldt {{[0-9]+}}(%rsp) 26; X64-NEXT: fstpt {{[0-9]+}}(%rsp) 27; X64-NEXT: fstpt {{[0-9]+}}(%rsp) 28; X64-NEXT: fstpt (%rsp) 29; X64-NEXT: wait 30; X64-NEXT: callq fmal 31; X64-NEXT: addq $56, %rsp 32; X64-NEXT: retq 33entry: 34 %fma = call x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80 %x, x86_fp80 %y, x86_fp80 %z, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 35 ret x86_fp80 %fma 36} 37 38define x86_fp80 @frem(x86_fp80 %x, x86_fp80 %y) nounwind strictfp { 39; X86-LABEL: frem: 40; X86: # %bb.0: # %entry 41; X86-NEXT: subl $24, %esp 42; X86-NEXT: fldt {{[0-9]+}}(%esp) 43; X86-NEXT: fldt {{[0-9]+}}(%esp) 44; X86-NEXT: fstpt {{[0-9]+}}(%esp) 45; X86-NEXT: fstpt (%esp) 46; X86-NEXT: wait 47; X86-NEXT: calll fmodl 48; X86-NEXT: addl $24, %esp 49; X86-NEXT: retl 50; 51; X64-LABEL: frem: 52; X64: # %bb.0: # %entry 53; X64-NEXT: subq $40, %rsp 54; X64-NEXT: fldt {{[0-9]+}}(%rsp) 55; X64-NEXT: fldt {{[0-9]+}}(%rsp) 56; X64-NEXT: fstpt {{[0-9]+}}(%rsp) 57; X64-NEXT: fstpt (%rsp) 58; X64-NEXT: wait 59; X64-NEXT: callq fmodl 60; X64-NEXT: addq $40, %rsp 61; X64-NEXT: retq 62entry: 63 %div = call x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80 %x, x86_fp80 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 64 ret x86_fp80 %div 65} 66 67define x86_fp80 @ceil(x86_fp80 %x) nounwind strictfp { 68; X86-LABEL: ceil: 69; X86: # %bb.0: # %entry 70; X86-NEXT: subl $12, %esp 71; X86-NEXT: fldt {{[0-9]+}}(%esp) 72; X86-NEXT: fstpt (%esp) 73; X86-NEXT: wait 74; X86-NEXT: calll ceill 75; X86-NEXT: addl $12, %esp 76; X86-NEXT: retl 77; 78; X64-LABEL: ceil: 79; X64: # %bb.0: # %entry 80; X64-NEXT: subq $24, %rsp 81; X64-NEXT: fldt {{[0-9]+}}(%rsp) 82; X64-NEXT: fstpt (%rsp) 83; X64-NEXT: wait 84; X64-NEXT: callq ceill 85; X64-NEXT: addq $24, %rsp 86; X64-NEXT: retq 87entry: 88 %ceil = call x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 89 ret x86_fp80 %ceil 90} 91 92define x86_fp80 @cos(x86_fp80 %x) nounwind strictfp { 93; X86-LABEL: cos: 94; X86: # %bb.0: # %entry 95; X86-NEXT: subl $12, %esp 96; X86-NEXT: fldt {{[0-9]+}}(%esp) 97; X86-NEXT: fstpt (%esp) 98; X86-NEXT: wait 99; X86-NEXT: calll cosl 100; X86-NEXT: addl $12, %esp 101; X86-NEXT: retl 102; 103; X64-LABEL: cos: 104; X64: # %bb.0: # %entry 105; X64-NEXT: subq $24, %rsp 106; X64-NEXT: fldt {{[0-9]+}}(%rsp) 107; X64-NEXT: fstpt (%rsp) 108; X64-NEXT: wait 109; X64-NEXT: callq cosl 110; X64-NEXT: addq $24, %rsp 111; X64-NEXT: retq 112entry: 113 %cos = call x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 114 ret x86_fp80 %cos 115} 116 117define x86_fp80 @exp(x86_fp80 %x) nounwind strictfp { 118; X86-LABEL: exp: 119; X86: # %bb.0: # %entry 120; X86-NEXT: subl $12, %esp 121; X86-NEXT: fldt {{[0-9]+}}(%esp) 122; X86-NEXT: fstpt (%esp) 123; X86-NEXT: wait 124; X86-NEXT: calll expl 125; X86-NEXT: addl $12, %esp 126; X86-NEXT: retl 127; 128; X64-LABEL: exp: 129; X64: # %bb.0: # %entry 130; X64-NEXT: subq $24, %rsp 131; X64-NEXT: fldt {{[0-9]+}}(%rsp) 132; X64-NEXT: fstpt (%rsp) 133; X64-NEXT: wait 134; X64-NEXT: callq expl 135; X64-NEXT: addq $24, %rsp 136; X64-NEXT: retq 137entry: 138 %exp = call x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 139 ret x86_fp80 %exp 140} 141 142define x86_fp80 @exp2(x86_fp80 %x) nounwind strictfp { 143; X86-LABEL: exp2: 144; X86: # %bb.0: # %entry 145; X86-NEXT: subl $12, %esp 146; X86-NEXT: fldt {{[0-9]+}}(%esp) 147; X86-NEXT: fstpt (%esp) 148; X86-NEXT: wait 149; X86-NEXT: calll exp2l 150; X86-NEXT: addl $12, %esp 151; X86-NEXT: retl 152; 153; X64-LABEL: exp2: 154; X64: # %bb.0: # %entry 155; X64-NEXT: subq $24, %rsp 156; X64-NEXT: fldt {{[0-9]+}}(%rsp) 157; X64-NEXT: fstpt (%rsp) 158; X64-NEXT: wait 159; X64-NEXT: callq exp2l 160; X64-NEXT: addq $24, %rsp 161; X64-NEXT: retq 162entry: 163 %exp2 = call x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 164 ret x86_fp80 %exp2 165} 166 167define x86_fp80 @floor(x86_fp80 %x) nounwind strictfp { 168; X86-LABEL: floor: 169; X86: # %bb.0: # %entry 170; X86-NEXT: subl $12, %esp 171; X86-NEXT: fldt {{[0-9]+}}(%esp) 172; X86-NEXT: fstpt (%esp) 173; X86-NEXT: wait 174; X86-NEXT: calll floorl 175; X86-NEXT: addl $12, %esp 176; X86-NEXT: retl 177; 178; X64-LABEL: floor: 179; X64: # %bb.0: # %entry 180; X64-NEXT: subq $24, %rsp 181; X64-NEXT: fldt {{[0-9]+}}(%rsp) 182; X64-NEXT: fstpt (%rsp) 183; X64-NEXT: wait 184; X64-NEXT: callq floorl 185; X64-NEXT: addq $24, %rsp 186; X64-NEXT: retq 187entry: 188 %floor = call x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 189 ret x86_fp80 %floor 190} 191 192define x86_fp80 @log(x86_fp80 %x) nounwind strictfp { 193; X86-LABEL: log: 194; X86: # %bb.0: # %entry 195; X86-NEXT: subl $12, %esp 196; X86-NEXT: fldt {{[0-9]+}}(%esp) 197; X86-NEXT: fstpt (%esp) 198; X86-NEXT: wait 199; X86-NEXT: calll logl 200; X86-NEXT: addl $12, %esp 201; X86-NEXT: retl 202; 203; X64-LABEL: log: 204; X64: # %bb.0: # %entry 205; X64-NEXT: subq $24, %rsp 206; X64-NEXT: fldt {{[0-9]+}}(%rsp) 207; X64-NEXT: fstpt (%rsp) 208; X64-NEXT: wait 209; X64-NEXT: callq logl 210; X64-NEXT: addq $24, %rsp 211; X64-NEXT: retq 212entry: 213 %log = call x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 214 ret x86_fp80 %log 215} 216 217define x86_fp80 @log10(x86_fp80 %x) nounwind strictfp { 218; X86-LABEL: log10: 219; X86: # %bb.0: # %entry 220; X86-NEXT: subl $12, %esp 221; X86-NEXT: fldt {{[0-9]+}}(%esp) 222; X86-NEXT: fstpt (%esp) 223; X86-NEXT: wait 224; X86-NEXT: calll log10l 225; X86-NEXT: addl $12, %esp 226; X86-NEXT: retl 227; 228; X64-LABEL: log10: 229; X64: # %bb.0: # %entry 230; X64-NEXT: subq $24, %rsp 231; X64-NEXT: fldt {{[0-9]+}}(%rsp) 232; X64-NEXT: fstpt (%rsp) 233; X64-NEXT: wait 234; X64-NEXT: callq log10l 235; X64-NEXT: addq $24, %rsp 236; X64-NEXT: retq 237entry: 238 %log10 = call x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 239 ret x86_fp80 %log10 240} 241 242define x86_fp80 @log2(x86_fp80 %x) nounwind strictfp { 243; X86-LABEL: log2: 244; X86: # %bb.0: # %entry 245; X86-NEXT: subl $12, %esp 246; X86-NEXT: fldt {{[0-9]+}}(%esp) 247; X86-NEXT: fstpt (%esp) 248; X86-NEXT: wait 249; X86-NEXT: calll log2l 250; X86-NEXT: addl $12, %esp 251; X86-NEXT: retl 252; 253; X64-LABEL: log2: 254; X64: # %bb.0: # %entry 255; X64-NEXT: subq $24, %rsp 256; X64-NEXT: fldt {{[0-9]+}}(%rsp) 257; X64-NEXT: fstpt (%rsp) 258; X64-NEXT: wait 259; X64-NEXT: callq log2l 260; X64-NEXT: addq $24, %rsp 261; X64-NEXT: retq 262entry: 263 %log2 = call x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 264 ret x86_fp80 %log2 265} 266 267define x86_fp80 @maxnum(x86_fp80 %x, x86_fp80 %y) nounwind strictfp { 268; X86-LABEL: maxnum: 269; X86: # %bb.0: # %entry 270; X86-NEXT: subl $24, %esp 271; X86-NEXT: fldt {{[0-9]+}}(%esp) 272; X86-NEXT: fldt {{[0-9]+}}(%esp) 273; X86-NEXT: fstpt {{[0-9]+}}(%esp) 274; X86-NEXT: fstpt (%esp) 275; X86-NEXT: wait 276; X86-NEXT: calll fmaxl 277; X86-NEXT: addl $24, %esp 278; X86-NEXT: retl 279; 280; X64-LABEL: maxnum: 281; X64: # %bb.0: # %entry 282; X64-NEXT: subq $40, %rsp 283; X64-NEXT: fldt {{[0-9]+}}(%rsp) 284; X64-NEXT: fldt {{[0-9]+}}(%rsp) 285; X64-NEXT: fstpt {{[0-9]+}}(%rsp) 286; X64-NEXT: fstpt (%rsp) 287; X64-NEXT: wait 288; X64-NEXT: callq fmaxl 289; X64-NEXT: addq $40, %rsp 290; X64-NEXT: retq 291entry: 292 %maxnum = call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %x, x86_fp80 %y, metadata !"fpexcept.strict") #0 293 ret x86_fp80 %maxnum 294} 295 296define x86_fp80 @minnum(x86_fp80 %x, x86_fp80 %y) nounwind strictfp { 297; X86-LABEL: minnum: 298; X86: # %bb.0: # %entry 299; X86-NEXT: subl $24, %esp 300; X86-NEXT: fldt {{[0-9]+}}(%esp) 301; X86-NEXT: fldt {{[0-9]+}}(%esp) 302; X86-NEXT: fstpt {{[0-9]+}}(%esp) 303; X86-NEXT: fstpt (%esp) 304; X86-NEXT: wait 305; X86-NEXT: calll fminl 306; X86-NEXT: addl $24, %esp 307; X86-NEXT: retl 308; 309; X64-LABEL: minnum: 310; X64: # %bb.0: # %entry 311; X64-NEXT: subq $40, %rsp 312; X64-NEXT: fldt {{[0-9]+}}(%rsp) 313; X64-NEXT: fldt {{[0-9]+}}(%rsp) 314; X64-NEXT: fstpt {{[0-9]+}}(%rsp) 315; X64-NEXT: fstpt (%rsp) 316; X64-NEXT: wait 317; X64-NEXT: callq fminl 318; X64-NEXT: addq $40, %rsp 319; X64-NEXT: retq 320entry: 321 %minnum = call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %x, x86_fp80 %y, metadata !"fpexcept.strict") #0 322 ret x86_fp80 %minnum 323} 324 325define x86_fp80 @nearbyint(x86_fp80 %x) nounwind strictfp { 326; X86-LABEL: nearbyint: 327; X86: # %bb.0: # %entry 328; X86-NEXT: subl $12, %esp 329; X86-NEXT: fldt {{[0-9]+}}(%esp) 330; X86-NEXT: fstpt (%esp) 331; X86-NEXT: wait 332; X86-NEXT: calll nearbyintl 333; X86-NEXT: addl $12, %esp 334; X86-NEXT: retl 335; 336; X64-LABEL: nearbyint: 337; X64: # %bb.0: # %entry 338; X64-NEXT: subq $24, %rsp 339; X64-NEXT: fldt {{[0-9]+}}(%rsp) 340; X64-NEXT: fstpt (%rsp) 341; X64-NEXT: wait 342; X64-NEXT: callq nearbyintl 343; X64-NEXT: addq $24, %rsp 344; X64-NEXT: retq 345entry: 346 %nearbyint = call x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 347 ret x86_fp80 %nearbyint 348} 349 350define x86_fp80 @pow(x86_fp80 %x, x86_fp80 %y) nounwind strictfp { 351; X86-LABEL: pow: 352; X86: # %bb.0: # %entry 353; X86-NEXT: subl $24, %esp 354; X86-NEXT: fldt {{[0-9]+}}(%esp) 355; X86-NEXT: fldt {{[0-9]+}}(%esp) 356; X86-NEXT: fstpt {{[0-9]+}}(%esp) 357; X86-NEXT: fstpt (%esp) 358; X86-NEXT: wait 359; X86-NEXT: calll powl 360; X86-NEXT: addl $24, %esp 361; X86-NEXT: retl 362; 363; X64-LABEL: pow: 364; X64: # %bb.0: # %entry 365; X64-NEXT: subq $40, %rsp 366; X64-NEXT: fldt {{[0-9]+}}(%rsp) 367; X64-NEXT: fldt {{[0-9]+}}(%rsp) 368; X64-NEXT: fstpt {{[0-9]+}}(%rsp) 369; X64-NEXT: fstpt (%rsp) 370; X64-NEXT: wait 371; X64-NEXT: callq powl 372; X64-NEXT: addq $40, %rsp 373; X64-NEXT: retq 374entry: 375 %pow = call x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80 %x, x86_fp80 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 376 ret x86_fp80 %pow 377} 378 379define x86_fp80 @powi(x86_fp80 %x, i32 %y) nounwind strictfp { 380; X86-LABEL: powi: 381; X86: # %bb.0: # %entry 382; X86-NEXT: subl $16, %esp 383; X86-NEXT: fldt {{[0-9]+}}(%esp) 384; X86-NEXT: wait 385; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 386; X86-NEXT: movl %eax, {{[0-9]+}}(%esp) 387; X86-NEXT: fstpt (%esp) 388; X86-NEXT: wait 389; X86-NEXT: calll __powixf2 390; X86-NEXT: addl $16, %esp 391; X86-NEXT: retl 392; 393; X64-LABEL: powi: 394; X64: # %bb.0: # %entry 395; X64-NEXT: subq $24, %rsp 396; X64-NEXT: fldt {{[0-9]+}}(%rsp) 397; X64-NEXT: fstpt (%rsp) 398; X64-NEXT: wait 399; X64-NEXT: callq __powixf2 400; X64-NEXT: addq $24, %rsp 401; X64-NEXT: retq 402entry: 403 %powi = call x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80 %x, i32 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 404 ret x86_fp80 %powi 405} 406 407define x86_fp80 @rint(x86_fp80 %x) nounwind strictfp { 408; X86-LABEL: rint: 409; X86: # %bb.0: # %entry 410; X86-NEXT: subl $12, %esp 411; X86-NEXT: fldt {{[0-9]+}}(%esp) 412; X86-NEXT: fstpt (%esp) 413; X86-NEXT: wait 414; X86-NEXT: calll rintl 415; X86-NEXT: addl $12, %esp 416; X86-NEXT: retl 417; 418; X64-LABEL: rint: 419; X64: # %bb.0: # %entry 420; X64-NEXT: subq $24, %rsp 421; X64-NEXT: fldt {{[0-9]+}}(%rsp) 422; X64-NEXT: fstpt (%rsp) 423; X64-NEXT: wait 424; X64-NEXT: callq rintl 425; X64-NEXT: addq $24, %rsp 426; X64-NEXT: retq 427entry: 428 %rint = call x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 429 ret x86_fp80 %rint 430} 431 432define x86_fp80 @round(x86_fp80 %x) nounwind strictfp { 433; X86-LABEL: round: 434; X86: # %bb.0: # %entry 435; X86-NEXT: subl $12, %esp 436; X86-NEXT: fldt {{[0-9]+}}(%esp) 437; X86-NEXT: fstpt (%esp) 438; X86-NEXT: wait 439; X86-NEXT: calll roundl 440; X86-NEXT: addl $12, %esp 441; X86-NEXT: retl 442; 443; X64-LABEL: round: 444; X64: # %bb.0: # %entry 445; X64-NEXT: subq $24, %rsp 446; X64-NEXT: fldt {{[0-9]+}}(%rsp) 447; X64-NEXT: fstpt (%rsp) 448; X64-NEXT: wait 449; X64-NEXT: callq roundl 450; X64-NEXT: addq $24, %rsp 451; X64-NEXT: retq 452entry: 453 %round = call x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 454 ret x86_fp80 %round 455} 456 457define x86_fp80 @roundeven(x86_fp80 %x) nounwind strictfp { 458; X86-LABEL: roundeven: 459; X86: # %bb.0: # %entry 460; X86-NEXT: subl $12, %esp 461; X86-NEXT: fldt {{[0-9]+}}(%esp) 462; X86-NEXT: fstpt (%esp) 463; X86-NEXT: wait 464; X86-NEXT: calll roundevenl 465; X86-NEXT: addl $12, %esp 466; X86-NEXT: retl 467; 468; X64-LABEL: roundeven: 469; X64: # %bb.0: # %entry 470; X64-NEXT: subq $24, %rsp 471; X64-NEXT: fldt {{[0-9]+}}(%rsp) 472; X64-NEXT: fstpt (%rsp) 473; X64-NEXT: wait 474; X64-NEXT: callq roundevenl 475; X64-NEXT: addq $24, %rsp 476; X64-NEXT: retq 477entry: 478 %roundeven = call x86_fp80 @llvm.experimental.constrained.roundeven.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 479 ret x86_fp80 %roundeven 480} 481 482define x86_fp80 @sin(x86_fp80 %x) nounwind strictfp { 483; X86-LABEL: sin: 484; X86: # %bb.0: # %entry 485; X86-NEXT: subl $12, %esp 486; X86-NEXT: fldt {{[0-9]+}}(%esp) 487; X86-NEXT: fstpt (%esp) 488; X86-NEXT: wait 489; X86-NEXT: calll sinl 490; X86-NEXT: addl $12, %esp 491; X86-NEXT: retl 492; 493; X64-LABEL: sin: 494; X64: # %bb.0: # %entry 495; X64-NEXT: subq $24, %rsp 496; X64-NEXT: fldt {{[0-9]+}}(%rsp) 497; X64-NEXT: fstpt (%rsp) 498; X64-NEXT: wait 499; X64-NEXT: callq sinl 500; X64-NEXT: addq $24, %rsp 501; X64-NEXT: retq 502entry: 503 %sin = call x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 504 ret x86_fp80 %sin 505} 506 507define x86_fp80 @trunc(x86_fp80 %x) nounwind strictfp { 508; X86-LABEL: trunc: 509; X86: # %bb.0: # %entry 510; X86-NEXT: subl $12, %esp 511; X86-NEXT: fldt {{[0-9]+}}(%esp) 512; X86-NEXT: fstpt (%esp) 513; X86-NEXT: wait 514; X86-NEXT: calll truncl 515; X86-NEXT: addl $12, %esp 516; X86-NEXT: retl 517; 518; X64-LABEL: trunc: 519; X64: # %bb.0: # %entry 520; X64-NEXT: subq $24, %rsp 521; X64-NEXT: fldt {{[0-9]+}}(%rsp) 522; X64-NEXT: fstpt (%rsp) 523; X64-NEXT: wait 524; X64-NEXT: callq truncl 525; X64-NEXT: addq $24, %rsp 526; X64-NEXT: retq 527entry: 528 %trunc = call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 529 ret x86_fp80 %trunc 530} 531 532define i32 @lrint(x86_fp80 %x) nounwind strictfp { 533; X86-LABEL: lrint: 534; X86: # %bb.0: # %entry 535; X86-NEXT: subl $12, %esp 536; X86-NEXT: fldt {{[0-9]+}}(%esp) 537; X86-NEXT: fstpt (%esp) 538; X86-NEXT: wait 539; X86-NEXT: calll lrintl 540; X86-NEXT: addl $12, %esp 541; X86-NEXT: retl 542; 543; X64-LABEL: lrint: 544; X64: # %bb.0: # %entry 545; X64-NEXT: subq $24, %rsp 546; X64-NEXT: fldt {{[0-9]+}}(%rsp) 547; X64-NEXT: fstpt (%rsp) 548; X64-NEXT: wait 549; X64-NEXT: callq lrintl 550; X64-NEXT: addq $24, %rsp 551; X64-NEXT: retq 552entry: 553 %rint = call i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 554 ret i32 %rint 555} 556 557define i64 @llrint(x86_fp80 %x) nounwind strictfp { 558; X86-LABEL: llrint: 559; X86: # %bb.0: # %entry 560; X86-NEXT: subl $12, %esp 561; X86-NEXT: fldt {{[0-9]+}}(%esp) 562; X86-NEXT: fstpt (%esp) 563; X86-NEXT: wait 564; X86-NEXT: calll llrintl 565; X86-NEXT: addl $12, %esp 566; X86-NEXT: retl 567; 568; X64-LABEL: llrint: 569; X64: # %bb.0: # %entry 570; X64-NEXT: subq $24, %rsp 571; X64-NEXT: fldt {{[0-9]+}}(%rsp) 572; X64-NEXT: fstpt (%rsp) 573; X64-NEXT: wait 574; X64-NEXT: callq llrintl 575; X64-NEXT: addq $24, %rsp 576; X64-NEXT: retq 577entry: 578 %rint = call i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 579 ret i64 %rint 580} 581 582define i32 @lround(x86_fp80 %x) nounwind strictfp { 583; X86-LABEL: lround: 584; X86: # %bb.0: # %entry 585; X86-NEXT: subl $12, %esp 586; X86-NEXT: fldt {{[0-9]+}}(%esp) 587; X86-NEXT: fstpt (%esp) 588; X86-NEXT: wait 589; X86-NEXT: calll lroundl 590; X86-NEXT: addl $12, %esp 591; X86-NEXT: retl 592; 593; X64-LABEL: lround: 594; X64: # %bb.0: # %entry 595; X64-NEXT: subq $24, %rsp 596; X64-NEXT: fldt {{[0-9]+}}(%rsp) 597; X64-NEXT: fstpt (%rsp) 598; X64-NEXT: wait 599; X64-NEXT: callq lroundl 600; X64-NEXT: addq $24, %rsp 601; X64-NEXT: retq 602entry: 603 %round = call i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 604 ret i32 %round 605} 606 607define i64 @llround(x86_fp80 %x) nounwind strictfp { 608; X86-LABEL: llround: 609; X86: # %bb.0: # %entry 610; X86-NEXT: subl $12, %esp 611; X86-NEXT: fldt {{[0-9]+}}(%esp) 612; X86-NEXT: fstpt (%esp) 613; X86-NEXT: wait 614; X86-NEXT: calll llroundl 615; X86-NEXT: addl $12, %esp 616; X86-NEXT: retl 617; 618; X64-LABEL: llround: 619; X64: # %bb.0: # %entry 620; X64-NEXT: subq $24, %rsp 621; X64-NEXT: fldt {{[0-9]+}}(%rsp) 622; X64-NEXT: fstpt (%rsp) 623; X64-NEXT: wait 624; X64-NEXT: callq llroundl 625; X64-NEXT: addq $24, %rsp 626; X64-NEXT: retq 627entry: 628 %round = call i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 629 ret i64 %round 630} 631 632attributes #0 = { strictfp } 633 634declare x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80, x86_fp80, x86_fp80, metadata, metadata) 635declare x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80, x86_fp80, metadata, metadata) 636declare x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80, metadata) 637declare x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80, metadata, metadata) 638declare x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80, metadata, metadata) 639declare x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80, metadata, metadata) 640declare x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80, metadata) 641declare x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80, metadata, metadata) 642declare x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80, metadata, metadata) 643declare x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80, metadata, metadata) 644declare x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80, x86_fp80, metadata) 645declare x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80, x86_fp80, metadata) 646declare x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80, metadata, metadata) 647declare x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80, x86_fp80, metadata, metadata) 648declare x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80, i32, metadata, metadata) 649declare x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80, metadata, metadata) 650declare x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80, metadata) 651declare x86_fp80 @llvm.experimental.constrained.roundeven.f80(x86_fp80, metadata) 652declare x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80, metadata, metadata) 653declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata) 654declare i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80, metadata, metadata) 655declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata) 656declare i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80, metadata) 657declare i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80, metadata) 658