1; RUN: opt %loadPolly -polly-parallel -polly-ast -analyze < %s | FileCheck %s -check-prefix=AST 2; RUN: opt %loadPolly -polly-parallel -polly-codegen -S < %s | FileCheck %s -check-prefix=IR 3; 4; float A[100]; 5; 6; void loop_references_outer_ids(long n) { 7; for (long i = 0; i < 100; i++) 8; for (long j = 0; j < 100; j++) 9; for (long k = 0; k < n + i; k++) 10; A[j] += i + j + k; 11; } 12 13; In this test case we verify that the j-loop is generated as OpenMP parallel 14; loop and that the values of 'i' and 'n', needed in the loop bounds of the 15; k-loop, are correctly passed to the subfunction. 16 17; AST: #pragma minimal dependence distance: 1 18; AST: for (int c0 = max(0, -n + 1); c0 <= 99; c0 += 1) 19; AST: #pragma omp parallel for 20; AST: for (int c1 = 0; c1 <= 99; c1 += 1) 21; AST: #pragma minimal dependence distance: 1 22; AST: for (int c2 = 0; c2 < n + c0; c2 += 1) 23; AST: Stmt_for_body6(c0, c1, c2); 24 25; IR: %polly.par.userContext = alloca { i64, i64 } 26; IR: %[[R1:[0-9a-z.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %polly.par.userContext, i32 0, i32 0 27; IR-NEXT: store i64 %n, i64* %[[R1]] 28; IR-NEXT: %[[R2:[0-9a-z.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %polly.par.userContext, i32 0, i32 1 29; IR-NEXT: store i64 %polly.indvar, i64* %[[R2]] 30; IR-NEXT: %polly.par.userContext1 = bitcast { i64, i64 }* %polly.par.userContext to i8* 31 32; IR-LABEL: @loop_references_outer_ids_polly_subfn(i8* %polly.par.userContext) 33; IR: %polly.par.userContext1 = bitcast i8* %polly.par.userContext to { i64, i64 }* 34; IR-NEXT: %[[R3:[0-9a-z.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %polly.par.userContext1, i32 0, i32 0 35; IR-NEXT: %[[R4:[0-9a-z.]+]] = load i64, i64* %[[R3]] 36; IR-NEXT: %[[R5:[0-9a-z.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %polly.par.userContext1, i32 0, i32 1 37; IR-NEXT: %[[R6:[0-9a-z.]+]] = load i64, i64* %[[R5]] 38 39target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 40 41@A = common global [100 x float] zeroinitializer, align 16 42 43define void @loop_references_outer_ids(i64 %n) { 44entry: 45 br label %for.cond 46 47for.cond: ; preds = %for.inc03, %entry 48 %i.0 = phi i64 [ 0, %entry ], [ %inc04, %for.inc03 ] 49 %exitcond1 = icmp ne i64 %i.0, 100 50 br i1 %exitcond1, label %for.body, label %for.end15 51 52for.body: ; preds = %for.cond 53 br label %for.cond1 54 55for.cond1: ; preds = %for.inc00, %for.body 56 %j.0 = phi i64 [ 0, %for.body ], [ %inc01, %for.inc00 ] 57 %exitcond = icmp ne i64 %j.0, 100 58 br i1 %exitcond, label %for.body3, label %for.end12 59 60for.body3: ; preds = %for.cond1 61 br label %for.cond4 62 63for.cond4: ; preds = %for.inc, %for.body3 64 %k.0 = phi i64 [ 0, %for.body3 ], [ %inc, %for.inc ] 65 %add = add nsw i64 %i.0, %n 66 %cmp5 = icmp slt i64 %k.0, %add 67 br i1 %cmp5, label %for.body6, label %for.end 68 69for.body6: ; preds = %for.cond4 70 %add7 = add nsw i64 %i.0, %j.0 71 %add8 = add nsw i64 %add7, %k.0 72 %conv = sitofp i64 %add8 to float 73 %arrayidx = getelementptr inbounds [100 x float], [100 x float]* @A, i64 0, i64 %j.0 74 %tmp = load float, float* %arrayidx, align 4 75 %add9 = fadd float %tmp, %conv 76 store float %add9, float* %arrayidx, align 4 77 br label %for.inc 78 79for.inc: ; preds = %for.body6 80 %inc = add nsw i64 %k.0, 1 81 br label %for.cond4 82 83for.end: ; preds = %for.cond4 84 br label %for.inc00 85 86for.inc00: ; preds = %for.end 87 %inc01 = add nsw i64 %j.0, 1 88 br label %for.cond1 89 90for.end12: ; preds = %for.cond1 91 br label %for.inc03 92 93for.inc03: ; preds = %for.end12 94 %inc04 = add nsw i64 %i.0, 1 95 br label %for.cond 96 97for.end15: ; preds = %for.cond 98 ret void 99} 100