1; The inliner should never inline recursive functions into other functions. 2; This effectively is just peeling off the first iteration of a loop, and the 3; inliner heuristics are not set up for this. 4 5; RUN: opt -inline %s -S | FileCheck %s 6 7target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" 8target triple = "x86_64-apple-darwin10.3" 9 10@g = common global i32 0 ; <i32*> [#uses=1] 11 12define internal void @foo(i32 %x) nounwind ssp { 13entry: 14 %0 = icmp slt i32 %x, 0 ; <i1> [#uses=1] 15 br i1 %0, label %return, label %bb 16 17bb: ; preds = %entry 18 %1 = sub nsw i32 %x, 1 ; <i32> [#uses=1] 19 call void @foo(i32 %1) nounwind ssp 20 volatile store i32 1, i32* @g, align 4 21 ret void 22 23return: ; preds = %entry 24 ret void 25} 26 27 28;; CHECK: @bonk 29;; CHECK: call void @foo(i32 42) 30define void @bonk() nounwind ssp { 31entry: 32 call void @foo(i32 42) nounwind ssp 33 ret void 34} 35 36 37 38;; Here is an indirect case that should not be infinitely inlined. 39 40define internal void @f1(i32 %x, i8* %Foo, i8* %Bar) nounwind ssp { 41entry: 42 %0 = bitcast i8* %Bar to void (i32, i8*, i8*)* 43 %1 = sub nsw i32 %x, 1 44 call void %0(i32 %1, i8* %Foo, i8* %Bar) nounwind 45 volatile store i32 42, i32* @g, align 4 46 ret void 47} 48 49define internal void @f2(i32 %x, i8* %Foo, i8* %Bar) nounwind ssp { 50entry: 51 %0 = icmp slt i32 %x, 0 ; <i1> [#uses=1] 52 br i1 %0, label %return, label %bb 53 54bb: ; preds = %entry 55 %1 = bitcast i8* %Foo to void (i32, i8*, i8*)* ; <void (i32, i8*, i8*)*> [#uses=1] 56 call void %1(i32 %x, i8* %Foo, i8* %Bar) nounwind 57 volatile store i32 13, i32* @g, align 4 58 ret void 59 60return: ; preds = %entry 61 ret void 62} 63 64 65; CHECK: @top_level 66; CHECK: call void @f2(i32 122 67; Here we inline one instance of the cycle, but we don't want to completely 68; unroll it. 69define void @top_level() nounwind ssp { 70entry: 71 call void @f2(i32 123, i8* bitcast (void (i32, i8*, i8*)* @f1 to i8*), i8* bitcast (void (i32, i8*, i8*)* @f2 to i8*)) nounwind ssp 72 ret void 73} 74