1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64 3; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=X86 4 5declare i4 @llvm.udiv.fix.sat.i4 (i4, i4, i32) 6declare i15 @llvm.udiv.fix.sat.i15 (i15, i15, i32) 7declare i16 @llvm.udiv.fix.sat.i16 (i16, i16, i32) 8declare i18 @llvm.udiv.fix.sat.i18 (i18, i18, i32) 9declare i64 @llvm.udiv.fix.sat.i64 (i64, i64, i32) 10declare <4 x i32> @llvm.udiv.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32) 11 12define i16 @func(i16 %x, i16 %y) nounwind { 13; X64-LABEL: func: 14; X64: # %bb.0: 15; X64-NEXT: movzwl %si, %ecx 16; X64-NEXT: movzwl %di, %eax 17; X64-NEXT: shll $8, %eax 18; X64-NEXT: xorl %edx, %edx 19; X64-NEXT: divl %ecx 20; X64-NEXT: cmpl $131071, %eax # imm = 0x1FFFF 21; X64-NEXT: movl $131071, %ecx # imm = 0x1FFFF 22; X64-NEXT: cmovael %ecx, %eax 23; X64-NEXT: shrl %eax 24; X64-NEXT: # kill: def $ax killed $ax killed $eax 25; X64-NEXT: retq 26; 27; X86-LABEL: func: 28; X86: # %bb.0: 29; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx 30; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 31; X86-NEXT: movzwl %ax, %eax 32; X86-NEXT: shll $8, %eax 33; X86-NEXT: xorl %edx, %edx 34; X86-NEXT: divl %ecx 35; X86-NEXT: cmpl $131071, %eax # imm = 0x1FFFF 36; X86-NEXT: movl $131071, %ecx # imm = 0x1FFFF 37; X86-NEXT: cmovael %ecx, %eax 38; X86-NEXT: shrl %eax 39; X86-NEXT: # kill: def $ax killed $ax killed $eax 40; X86-NEXT: retl 41 %tmp = call i16 @llvm.udiv.fix.sat.i16(i16 %x, i16 %y, i32 7) 42 ret i16 %tmp 43} 44 45define i16 @func2(i8 %x, i8 %y) nounwind { 46; X64-LABEL: func2: 47; X64: # %bb.0: 48; X64-NEXT: movsbl %dil, %eax 49; X64-NEXT: andl $32767, %eax # imm = 0x7FFF 50; X64-NEXT: movsbl %sil, %ecx 51; X64-NEXT: andl $32767, %ecx # imm = 0x7FFF 52; X64-NEXT: shll $14, %eax 53; X64-NEXT: xorl %edx, %edx 54; X64-NEXT: divl %ecx 55; X64-NEXT: cmpl $32767, %eax # imm = 0x7FFF 56; X64-NEXT: movl $32767, %ecx # imm = 0x7FFF 57; X64-NEXT: cmovbl %eax, %ecx 58; X64-NEXT: addl %ecx, %ecx 59; X64-NEXT: movswl %cx, %eax 60; X64-NEXT: shrl %eax 61; X64-NEXT: # kill: def $ax killed $ax killed $eax 62; X64-NEXT: retq 63; 64; X86-LABEL: func2: 65; X86: # %bb.0: 66; X86-NEXT: movsbl {{[0-9]+}}(%esp), %ecx 67; X86-NEXT: andl $32767, %ecx # imm = 0x7FFF 68; X86-NEXT: movsbl {{[0-9]+}}(%esp), %eax 69; X86-NEXT: andl $32767, %eax # imm = 0x7FFF 70; X86-NEXT: shll $14, %eax 71; X86-NEXT: xorl %edx, %edx 72; X86-NEXT: divl %ecx 73; X86-NEXT: cmpl $32767, %eax # imm = 0x7FFF 74; X86-NEXT: movl $32767, %ecx # imm = 0x7FFF 75; X86-NEXT: cmovbl %eax, %ecx 76; X86-NEXT: addl %ecx, %ecx 77; X86-NEXT: movswl %cx, %eax 78; X86-NEXT: shrl %eax 79; X86-NEXT: # kill: def $ax killed $ax killed $eax 80; X86-NEXT: retl 81 %x2 = sext i8 %x to i15 82 %y2 = sext i8 %y to i15 83 %tmp = call i15 @llvm.udiv.fix.sat.i15(i15 %x2, i15 %y2, i32 14) 84 %tmp2 = sext i15 %tmp to i16 85 ret i16 %tmp2 86} 87 88define i16 @func3(i15 %x, i8 %y) nounwind { 89; X64-LABEL: func3: 90; X64: # %bb.0: 91; X64-NEXT: # kill: def $edi killed $edi def $rdi 92; X64-NEXT: leal (%rdi,%rdi), %eax 93; X64-NEXT: movzbl %sil, %ecx 94; X64-NEXT: shll $4, %ecx 95; X64-NEXT: # kill: def $ax killed $ax killed $eax 96; X64-NEXT: xorl %edx, %edx 97; X64-NEXT: divw %cx 98; X64-NEXT: # kill: def $ax killed $ax def $eax 99; X64-NEXT: movzwl %ax, %ecx 100; X64-NEXT: cmpl $32767, %ecx # imm = 0x7FFF 101; X64-NEXT: movl $32767, %ecx # imm = 0x7FFF 102; X64-NEXT: cmovbl %eax, %ecx 103; X64-NEXT: addl %ecx, %ecx 104; X64-NEXT: movswl %cx, %eax 105; X64-NEXT: shrl %eax 106; X64-NEXT: # kill: def $ax killed $ax killed $eax 107; X64-NEXT: retq 108; 109; X86-LABEL: func3: 110; X86: # %bb.0: 111; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 112; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 113; X86-NEXT: addl %eax, %eax 114; X86-NEXT: movzbl %cl, %ecx 115; X86-NEXT: shll $4, %ecx 116; X86-NEXT: # kill: def $ax killed $ax killed $eax 117; X86-NEXT: xorl %edx, %edx 118; X86-NEXT: divw %cx 119; X86-NEXT: # kill: def $ax killed $ax def $eax 120; X86-NEXT: movzwl %ax, %ecx 121; X86-NEXT: cmpl $32767, %ecx # imm = 0x7FFF 122; X86-NEXT: movl $32767, %ecx # imm = 0x7FFF 123; X86-NEXT: cmovbl %eax, %ecx 124; X86-NEXT: addl %ecx, %ecx 125; X86-NEXT: movswl %cx, %eax 126; X86-NEXT: shrl %eax 127; X86-NEXT: # kill: def $ax killed $ax killed $eax 128; X86-NEXT: retl 129 %y2 = sext i8 %y to i15 130 %y3 = shl i15 %y2, 7 131 %tmp = call i15 @llvm.udiv.fix.sat.i15(i15 %x, i15 %y3, i32 4) 132 %tmp2 = sext i15 %tmp to i16 133 ret i16 %tmp2 134} 135 136define i4 @func4(i4 %x, i4 %y) nounwind { 137; X64-LABEL: func4: 138; X64: # %bb.0: 139; X64-NEXT: andb $15, %sil 140; X64-NEXT: andb $15, %dil 141; X64-NEXT: shlb $2, %dil 142; X64-NEXT: movzbl %dil, %eax 143; X64-NEXT: divb %sil 144; X64-NEXT: movzbl %al, %ecx 145; X64-NEXT: cmpb $15, %cl 146; X64-NEXT: movl $15, %eax 147; X64-NEXT: cmovbl %ecx, %eax 148; X64-NEXT: # kill: def $al killed $al killed $eax 149; X64-NEXT: retq 150; 151; X86-LABEL: func4: 152; X86: # %bb.0: 153; X86-NEXT: movb {{[0-9]+}}(%esp), %cl 154; X86-NEXT: andb $15, %cl 155; X86-NEXT: movb {{[0-9]+}}(%esp), %al 156; X86-NEXT: andb $15, %al 157; X86-NEXT: shlb $2, %al 158; X86-NEXT: movzbl %al, %eax 159; X86-NEXT: divb %cl 160; X86-NEXT: movzbl %al, %ecx 161; X86-NEXT: cmpb $15, %al 162; X86-NEXT: movl $15, %eax 163; X86-NEXT: cmovbl %ecx, %eax 164; X86-NEXT: # kill: def $al killed $al killed $eax 165; X86-NEXT: retl 166 %tmp = call i4 @llvm.udiv.fix.sat.i4(i4 %x, i4 %y, i32 2) 167 ret i4 %tmp 168} 169 170define i64 @func5(i64 %x, i64 %y) nounwind { 171; X64-LABEL: func5: 172; X64: # %bb.0: 173; X64-NEXT: pushq %rbx 174; X64-NEXT: movq %rsi, %rdx 175; X64-NEXT: leaq (%rdi,%rdi), %rsi 176; X64-NEXT: movq %rdi, %rax 177; X64-NEXT: shrq $63, %rax 178; X64-NEXT: shrdq $33, %rax, %rsi 179; X64-NEXT: shlq $32, %rdi 180; X64-NEXT: xorl %ebx, %ebx 181; X64-NEXT: xorl %ecx, %ecx 182; X64-NEXT: callq __udivti3 183; X64-NEXT: cmpq $-1, %rax 184; X64-NEXT: movq $-1, %rcx 185; X64-NEXT: cmovbq %rax, %rcx 186; X64-NEXT: cmpq $1, %rdx 187; X64-NEXT: movl $1, %esi 188; X64-NEXT: cmovbq %rdx, %rsi 189; X64-NEXT: sbbq %rbx, %rbx 190; X64-NEXT: notq %rbx 191; X64-NEXT: orq %rax, %rbx 192; X64-NEXT: cmpq $1, %rdx 193; X64-NEXT: cmoveq %rcx, %rbx 194; X64-NEXT: shrdq $1, %rsi, %rbx 195; X64-NEXT: movq %rbx, %rax 196; X64-NEXT: popq %rbx 197; X64-NEXT: retq 198; 199; X86-LABEL: func5: 200; X86: # %bb.0: 201; X86-NEXT: pushl %ebp 202; X86-NEXT: movl %esp, %ebp 203; X86-NEXT: pushl %esi 204; X86-NEXT: andl $-8, %esp 205; X86-NEXT: subl $24, %esp 206; X86-NEXT: movl 8(%ebp), %eax 207; X86-NEXT: movl 12(%ebp), %ecx 208; X86-NEXT: movl %ecx, %edx 209; X86-NEXT: shrl %edx 210; X86-NEXT: shldl $31, %eax, %ecx 211; X86-NEXT: shll $31, %eax 212; X86-NEXT: movl %esp, %esi 213; X86-NEXT: pushl $0 214; X86-NEXT: pushl $0 215; X86-NEXT: pushl 20(%ebp) 216; X86-NEXT: pushl 16(%ebp) 217; X86-NEXT: pushl $0 218; X86-NEXT: pushl %edx 219; X86-NEXT: pushl %ecx 220; X86-NEXT: pushl %eax 221; X86-NEXT: pushl %esi 222; X86-NEXT: calll __udivti3 223; X86-NEXT: addl $32, %esp 224; X86-NEXT: movl (%esp), %eax 225; X86-NEXT: movl {{[0-9]+}}(%esp), %edx 226; X86-NEXT: cmpl $-1, %eax 227; X86-NEXT: movl $-1, %ecx 228; X86-NEXT: movl $-1, %esi 229; X86-NEXT: cmovbl %eax, %esi 230; X86-NEXT: cmpl $-1, %edx 231; X86-NEXT: cmovel %edx, %eax 232; X86-NEXT: cmovel %esi, %eax 233; X86-NEXT: cmovael %ecx, %edx 234; X86-NEXT: movl {{[0-9]+}}(%esp), %esi 235; X86-NEXT: orl {{[0-9]+}}(%esp), %esi 236; X86-NEXT: cmovnel %ecx, %edx 237; X86-NEXT: cmovnel %ecx, %eax 238; X86-NEXT: leal -4(%ebp), %esp 239; X86-NEXT: popl %esi 240; X86-NEXT: popl %ebp 241; X86-NEXT: retl 242 %tmp = call i64 @llvm.udiv.fix.sat.i64(i64 %x, i64 %y, i32 31) 243 ret i64 %tmp 244} 245 246define i18 @func6(i16 %x, i16 %y) nounwind { 247; X64-LABEL: func6: 248; X64: # %bb.0: 249; X64-NEXT: movswl %di, %eax 250; X64-NEXT: andl $262143, %eax # imm = 0x3FFFF 251; X64-NEXT: movswl %si, %ecx 252; X64-NEXT: andl $262143, %ecx # imm = 0x3FFFF 253; X64-NEXT: shll $7, %eax 254; X64-NEXT: xorl %edx, %edx 255; X64-NEXT: divl %ecx 256; X64-NEXT: cmpl $262143, %eax # imm = 0x3FFFF 257; X64-NEXT: movl $262143, %ecx # imm = 0x3FFFF 258; X64-NEXT: cmovael %ecx, %eax 259; X64-NEXT: retq 260; 261; X86-LABEL: func6: 262; X86: # %bb.0: 263; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx 264; X86-NEXT: andl $262143, %ecx # imm = 0x3FFFF 265; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax 266; X86-NEXT: andl $262143, %eax # imm = 0x3FFFF 267; X86-NEXT: shll $7, %eax 268; X86-NEXT: xorl %edx, %edx 269; X86-NEXT: divl %ecx 270; X86-NEXT: cmpl $262143, %eax # imm = 0x3FFFF 271; X86-NEXT: movl $262143, %ecx # imm = 0x3FFFF 272; X86-NEXT: cmovael %ecx, %eax 273; X86-NEXT: retl 274 %x2 = sext i16 %x to i18 275 %y2 = sext i16 %y to i18 276 %tmp = call i18 @llvm.udiv.fix.sat.i18(i18 %x2, i18 %y2, i32 7) 277 ret i18 %tmp 278} 279 280define i16 @func7(i16 %x, i16 %y) nounwind { 281; X64-LABEL: func7: 282; X64: # %bb.0: 283; X64-NEXT: movzwl %si, %ecx 284; X64-NEXT: movzwl %di, %eax 285; X64-NEXT: addl %eax, %eax 286; X64-NEXT: shlq $16, %rax 287; X64-NEXT: xorl %edx, %edx 288; X64-NEXT: divq %rcx 289; X64-NEXT: cmpq $131071, %rax # imm = 0x1FFFF 290; X64-NEXT: movl $131071, %ecx # imm = 0x1FFFF 291; X64-NEXT: cmovaeq %rcx, %rax 292; X64-NEXT: shrl %eax 293; X64-NEXT: # kill: def $ax killed $ax killed $rax 294; X64-NEXT: retq 295; 296; X86-LABEL: func7: 297; X86: # %bb.0: 298; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax 299; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 300; X86-NEXT: movzwl %cx, %ecx 301; X86-NEXT: addl %ecx, %ecx 302; X86-NEXT: movl %ecx, %edx 303; X86-NEXT: shrl $16, %edx 304; X86-NEXT: shll $16, %ecx 305; X86-NEXT: pushl $0 306; X86-NEXT: pushl %eax 307; X86-NEXT: pushl %edx 308; X86-NEXT: pushl %ecx 309; X86-NEXT: calll __udivdi3 310; X86-NEXT: addl $16, %esp 311; X86-NEXT: cmpl $131071, %eax # imm = 0x1FFFF 312; X86-NEXT: movl $131071, %ecx # imm = 0x1FFFF 313; X86-NEXT: cmovael %ecx, %eax 314; X86-NEXT: testl %edx, %edx 315; X86-NEXT: cmovnel %ecx, %eax 316; X86-NEXT: shrl %eax 317; X86-NEXT: # kill: def $ax killed $ax killed $eax 318; X86-NEXT: retl 319 %tmp = call i16 @llvm.udiv.fix.sat.i16(i16 %x, i16 %y, i32 16) 320 ret i16 %tmp 321} 322 323define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind { 324; X64-LABEL: vec: 325; X64: # %bb.0: 326; X64-NEXT: pxor %xmm3, %xmm3 327; X64-NEXT: movdqa %xmm1, %xmm2 328; X64-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] 329; X64-NEXT: movq %xmm2, %rcx 330; X64-NEXT: movdqa %xmm0, %xmm2 331; X64-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] 332; X64-NEXT: paddq %xmm2, %xmm2 333; X64-NEXT: psllq $31, %xmm2 334; X64-NEXT: movq %xmm2, %rax 335; X64-NEXT: xorl %edx, %edx 336; X64-NEXT: divq %rcx 337; X64-NEXT: movq %rax, %xmm7 338; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] 339; X64-NEXT: movq %xmm2, %rax 340; X64-NEXT: movdqa %xmm1, %xmm2 341; X64-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero 342; X64-NEXT: movq %xmm2, %rcx 343; X64-NEXT: xorl %edx, %edx 344; X64-NEXT: divq %rcx 345; X64-NEXT: movq %rax, %xmm2 346; X64-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm2[0] 347; X64-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456] 348; X64-NEXT: movdqa %xmm7, %xmm2 349; X64-NEXT: pxor %xmm4, %xmm2 350; X64-NEXT: movdqa {{.*#+}} xmm8 = [9223372043297226751,9223372043297226751] 351; X64-NEXT: movdqa %xmm8, %xmm6 352; X64-NEXT: pcmpgtd %xmm2, %xmm6 353; X64-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2] 354; X64-NEXT: pcmpeqd %xmm8, %xmm2 355; X64-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3] 356; X64-NEXT: pand %xmm9, %xmm5 357; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3] 358; X64-NEXT: por %xmm5, %xmm2 359; X64-NEXT: movdqa {{.*#+}} xmm6 = [8589934591,8589934591] 360; X64-NEXT: pand %xmm2, %xmm7 361; X64-NEXT: pandn %xmm6, %xmm2 362; X64-NEXT: por %xmm7, %xmm2 363; X64-NEXT: psrlq $1, %xmm2 364; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] 365; X64-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3] 366; X64-NEXT: movq %xmm3, %rcx 367; X64-NEXT: paddq %xmm0, %xmm0 368; X64-NEXT: psllq $31, %xmm0 369; X64-NEXT: movq %xmm0, %rax 370; X64-NEXT: xorl %edx, %edx 371; X64-NEXT: divq %rcx 372; X64-NEXT: movq %rax, %xmm3 373; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] 374; X64-NEXT: movq %xmm0, %rax 375; X64-NEXT: psrlq $32, %xmm1 376; X64-NEXT: movq %xmm1, %rcx 377; X64-NEXT: xorl %edx, %edx 378; X64-NEXT: divq %rcx 379; X64-NEXT: movq %rax, %xmm0 380; X64-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] 381; X64-NEXT: pxor %xmm3, %xmm4 382; X64-NEXT: movdqa %xmm8, %xmm0 383; X64-NEXT: pcmpgtd %xmm4, %xmm0 384; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2] 385; X64-NEXT: pcmpeqd %xmm8, %xmm4 386; X64-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] 387; X64-NEXT: pand %xmm1, %xmm4 388; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 389; X64-NEXT: por %xmm4, %xmm0 390; X64-NEXT: pand %xmm0, %xmm3 391; X64-NEXT: pandn %xmm6, %xmm0 392; X64-NEXT: por %xmm3, %xmm0 393; X64-NEXT: psrlq $1, %xmm0 394; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2] 395; X64-NEXT: retq 396; 397; X86-LABEL: vec: 398; X86: # %bb.0: 399; X86-NEXT: pushl %ebp 400; X86-NEXT: pushl %ebx 401; X86-NEXT: pushl %edi 402; X86-NEXT: pushl %esi 403; X86-NEXT: subl $16, %esp 404; X86-NEXT: movl {{[0-9]+}}(%esp), %esi 405; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 406; X86-NEXT: xorl %eax, %eax 407; X86-NEXT: addl %ecx, %ecx 408; X86-NEXT: setb %al 409; X86-NEXT: shldl $31, %ecx, %eax 410; X86-NEXT: shll $31, %ecx 411; X86-NEXT: pushl $0 412; X86-NEXT: pushl {{[0-9]+}}(%esp) 413; X86-NEXT: pushl %eax 414; X86-NEXT: pushl %ecx 415; X86-NEXT: calll __udivdi3 416; X86-NEXT: addl $16, %esp 417; X86-NEXT: cmpl $-1, %eax 418; X86-NEXT: movl $-1, %ecx 419; X86-NEXT: cmovbl %eax, %ecx 420; X86-NEXT: cmpl $1, %edx 421; X86-NEXT: movl $0, %edi 422; X86-NEXT: sbbl %edi, %edi 423; X86-NEXT: notl %edi 424; X86-NEXT: orl %eax, %edi 425; X86-NEXT: movl %edi, %ebx 426; X86-NEXT: xorl %eax, %eax 427; X86-NEXT: addl %esi, %esi 428; X86-NEXT: setb %al 429; X86-NEXT: cmpl $1, %edx 430; X86-NEXT: movl {{[0-9]+}}(%esp), %edi 431; X86-NEXT: cmovel %ecx, %ebx 432; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 433; X86-NEXT: movl $1, %ecx 434; X86-NEXT: cmovael %ecx, %edx 435; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 436; X86-NEXT: shldl $31, %esi, %eax 437; X86-NEXT: shll $31, %esi 438; X86-NEXT: pushl $0 439; X86-NEXT: pushl {{[0-9]+}}(%esp) 440; X86-NEXT: pushl %eax 441; X86-NEXT: pushl %esi 442; X86-NEXT: calll __udivdi3 443; X86-NEXT: addl $16, %esp 444; X86-NEXT: cmpl $-1, %eax 445; X86-NEXT: movl $-1, %ecx 446; X86-NEXT: cmovbl %eax, %ecx 447; X86-NEXT: cmpl $1, %edx 448; X86-NEXT: movl $1, %esi 449; X86-NEXT: cmovbl %edx, %esi 450; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 451; X86-NEXT: movl $0, %esi 452; X86-NEXT: sbbl %esi, %esi 453; X86-NEXT: notl %esi 454; X86-NEXT: orl %eax, %esi 455; X86-NEXT: xorl %eax, %eax 456; X86-NEXT: addl %edi, %edi 457; X86-NEXT: setb %al 458; X86-NEXT: cmpl $1, %edx 459; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp 460; X86-NEXT: cmovel %ecx, %esi 461; X86-NEXT: shldl $31, %edi, %eax 462; X86-NEXT: shll $31, %edi 463; X86-NEXT: pushl $0 464; X86-NEXT: pushl {{[0-9]+}}(%esp) 465; X86-NEXT: pushl %eax 466; X86-NEXT: pushl %edi 467; X86-NEXT: calll __udivdi3 468; X86-NEXT: addl $16, %esp 469; X86-NEXT: cmpl $-1, %eax 470; X86-NEXT: movl $-1, %ebx 471; X86-NEXT: cmovbl %eax, %ebx 472; X86-NEXT: cmpl $1, %edx 473; X86-NEXT: movl $0, %edi 474; X86-NEXT: sbbl %edi, %edi 475; X86-NEXT: notl %edi 476; X86-NEXT: orl %eax, %edi 477; X86-NEXT: xorl %ecx, %ecx 478; X86-NEXT: addl %ebp, %ebp 479; X86-NEXT: setb %cl 480; X86-NEXT: cmpl $1, %edx 481; X86-NEXT: movl %edx, %eax 482; X86-NEXT: movl $1, %edx 483; X86-NEXT: cmovael %edx, %eax 484; X86-NEXT: movl %eax, (%esp) # 4-byte Spill 485; X86-NEXT: cmovel %ebx, %edi 486; X86-NEXT: shldl $31, %ebp, %ecx 487; X86-NEXT: shll $31, %ebp 488; X86-NEXT: pushl $0 489; X86-NEXT: pushl {{[0-9]+}}(%esp) 490; X86-NEXT: pushl %ecx 491; X86-NEXT: pushl %ebp 492; X86-NEXT: calll __udivdi3 493; X86-NEXT: addl $16, %esp 494; X86-NEXT: cmpl $-1, %eax 495; X86-NEXT: movl $-1, %ecx 496; X86-NEXT: cmovbl %eax, %ecx 497; X86-NEXT: cmpl $1, %edx 498; X86-NEXT: movl $1, %ebx 499; X86-NEXT: cmovbl %edx, %ebx 500; X86-NEXT: movl $0, %ebp 501; X86-NEXT: sbbl %ebp, %ebp 502; X86-NEXT: notl %ebp 503; X86-NEXT: orl %eax, %ebp 504; X86-NEXT: cmpl $1, %edx 505; X86-NEXT: cmovel %ecx, %ebp 506; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 507; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload 508; X86-NEXT: shrdl $1, %eax, %ecx 509; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload 510; X86-NEXT: shrdl $1, %eax, %esi 511; X86-NEXT: movl (%esp), %eax # 4-byte Reload 512; X86-NEXT: shrdl $1, %eax, %edi 513; X86-NEXT: shrdl $1, %ebx, %ebp 514; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 515; X86-NEXT: movl %ebp, 12(%eax) 516; X86-NEXT: movl %edi, 8(%eax) 517; X86-NEXT: movl %esi, 4(%eax) 518; X86-NEXT: movl %ecx, (%eax) 519; X86-NEXT: addl $16, %esp 520; X86-NEXT: popl %esi 521; X86-NEXT: popl %edi 522; X86-NEXT: popl %ebx 523; X86-NEXT: popl %ebp 524; X86-NEXT: retl $4 525 %tmp = call <4 x i32> @llvm.udiv.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 31) 526 ret <4 x i32> %tmp 527} 528