1; RUN: opt %loadPolly -polly-scops -polly-invariant-load-hoisting=true -analyze < %s | FileCheck %s 2; 3; Verify we do hoist the invariant access to I with a execution context 4; as the address computation might wrap in the original but not in our 5; optimized version. For an input of c = 127 the original accessed address 6; would be &I[-1] = &GI[128 -1] = &GI[127] but in our optimized version 7; (due to the usage of i64 types) we would access 8; &I[127 + 1] = &I[128] = &GI[256] which would here also be out-of-bounds. 9; 10; CHECK: Invariant Accesses: { 11; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0] 12; CHECK-NEXT: [c] -> { Stmt_for_body[i0] -> MemRef_GI[129 + c] }; 13; CHECK-NEXT: Execution Context: [c] -> { : c <= 126 } 14; CHECK-NEXT: } 15; 16; int GI[256]; 17; void f(int *A, unsigned char c) { 18; int *I = &GI[128]; 19; for (int i = 0; i < 10; i++) 20; A[i] += I[(signed char)(c + (unsigned char)1)]; 21; } 22; 23target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 24 25@GI = common global [256 x i32] zeroinitializer, align 16 26 27define void @f(i32* %A, i8 zeroext %c) { 28entry: 29 br label %for.cond 30 31for.cond: ; preds = %for.inc, %entry 32 %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ] 33 %exitcond = icmp ne i64 %indvars.iv, 10 34 br i1 %exitcond, label %for.body, label %for.end 35 36for.body: ; preds = %for.cond 37 %add = add i8 %c, 1 38 %idxprom = sext i8 %add to i64 39 %arrayidx = getelementptr inbounds i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @GI, i64 0, i64 128), i64 %idxprom 40 %tmp = load i32, i32* %arrayidx, align 4 41 %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv 42 %tmp1 = load i32, i32* %arrayidx3, align 4 43 %add4 = add nsw i32 %tmp1, %tmp 44 store i32 %add4, i32* %arrayidx3, align 4 45 br label %for.inc 46 47for.inc: ; preds = %for.body 48 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 49 br label %for.cond 50 51for.end: ; preds = %for.cond 52 ret void 53} 54