1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
4
5; Test coverage for matchAddressRecursively's MUL handling
6
7; Based off:
8; struct A {
9;   int m_ints[5];
10;   int m_bar();
11; };
12; struct {
13;   A* m_data;
14; } c;
15; void foo(bool b, int i) {
16;   if (b)
17;     return;
18;   int j = c.m_data[i + 1].m_bar();
19;   foo(false, j);
20; }
21
22%struct.A = type { [5 x i32] }
23
24define void @foo(i1 zeroext, i32) nounwind {
25; X86-LABEL: foo:
26; X86:       # %bb.0:
27; X86-NEXT:    cmpb $0, {{[0-9]+}}(%esp)
28; X86-NEXT:    je .LBB0_1
29; X86-NEXT:  # %bb.3:
30; X86-NEXT:    retl
31; X86-NEXT:  .LBB0_1: # %.preheader
32; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
33; X86-NEXT:    .p2align 4, 0x90
34; X86-NEXT:  .LBB0_2: # =>This Inner Loop Header: Depth=1
35; X86-NEXT:    leal (%eax,%eax,4), %eax
36; X86-NEXT:    leal 20(,%eax,4), %eax
37; X86-NEXT:    pushl %eax
38; X86-NEXT:    calll bar
39; X86-NEXT:    addl $4, %esp
40; X86-NEXT:    jmp .LBB0_2
41;
42; X64-LABEL: foo:
43; X64:       # %bb.0:
44; X64-NEXT:    pushq %rax
45; X64-NEXT:    testl %edi, %edi
46; X64-NEXT:    je .LBB0_1
47; X64-NEXT:  # %bb.3:
48; X64-NEXT:    popq %rax
49; X64-NEXT:    retq
50; X64-NEXT:  .LBB0_1: # %.preheader
51; X64-NEXT:    movl %esi, %eax
52; X64-NEXT:    .p2align 4, 0x90
53; X64-NEXT:  .LBB0_2: # =>This Inner Loop Header: Depth=1
54; X64-NEXT:    incl %eax
55; X64-NEXT:    cltq
56; X64-NEXT:    shlq $2, %rax
57; X64-NEXT:    leaq (%rax,%rax,4), %rdi
58; X64-NEXT:    callq bar
59; X64-NEXT:    jmp .LBB0_2
60  br i1 %0, label %9, label %3
61
62  %4 = phi i32 [ %8, %3 ], [ %1, %2 ]
63  %5 = add nsw i32 %4, 1
64  %6 = sext i32 %5 to i64
65  %7 = getelementptr inbounds %struct.A, %struct.A* null, i64 %6
66  %8 = tail call i32 @bar(%struct.A* %7)
67  br label %3
68
69  ret void
70}
71
72declare i32 @bar(%struct.A*)
73