1; RUN: opt %loadPolly -basic-aa -polly-stmt-granularity=bb -polly-scops -analyze -polly-allow-modref-calls \ 2; RUN: < %s | FileCheck %s 3; RUN: opt %loadPolly -basic-aa -polly-stmt-granularity=bb -polly-codegen -polly-allow-modref-calls \ 4; RUN: -disable-output < %s 5; 6; Verify that we model the may-write access of the prefetch intrinsic 7; correctly, thus that A is accessed by it but B is not. 8; 9; CHECK: Stmt_for_body 10; CHECK-NEXT: Domain := 11; CHECK-NEXT: { Stmt_for_body[i0] : 0 <= i0 <= 1023 }; 12; CHECK-NEXT: Schedule := 13; CHECK-NEXT: { Stmt_for_body[i0] -> [i0] }; 14; CHECK-NEXT: MayWriteAccess := [Reduction Type: NONE] 15; CHECK-NEXT: { Stmt_for_body[i0] -> MemRef_A[o0] }; 16; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] 17; CHECK-NEXT: { Stmt_for_body[i0] -> MemRef_B[i0] }; 18; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] 19; CHECK-NEXT: { Stmt_for_body[i0] -> MemRef_A[i0] }; 20; 21; void jd(int *restirct A, int *restrict B) { 22; for (int i = 0; i < 1024; i++) { 23; @llvm.prefetch(A); 24; A[i] = B[i]; 25; } 26; } 27; 28target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 29 30define void @jd(i32* noalias %A, i32* noalias %B) { 31entry: 32 br label %for.body 33 34for.body: ; preds = %entry, %for.inc 35 %i = phi i64 [ 0, %entry ], [ %i.next, %for.inc ] 36 %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i 37 %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 %i 38 %bc = bitcast i32* %arrayidx to i8* 39 call void @f(i8* %bc, i32 1, i32 1, i32 1) 40 %tmp = load i32, i32* %arrayidx1 41 store i32 %tmp, i32* %arrayidx, align 4 42 br label %for.inc 43 44for.inc: ; preds = %for.body 45 %i.next = add nuw nsw i64 %i, 1 46 %exitcond = icmp ne i64 %i.next, 1024 47 br i1 %exitcond, label %for.body, label %for.end 48 49for.end: ; preds = %for.inc 50 ret void 51} 52 53declare void @f(i8*, i32, i32, i32) #0 54 55attributes #0 = { argmemonly nounwind } 56