1; REQUIRES: asserts 2; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -arm-parallel-dsp -S -stats 2>&1 | FileCheck %s 3; 4; A more complicated chain: 4 mul operations, so we expect 2 smlad calls. 5; 6; CHECK: %mac1{{\.}}054 = phi i32 [ [[V17:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ] 7; CHECK: [[V8:%[0-9]+]] = bitcast i16* %arrayidx8 to i32* 8; CHECK: [[V9:%[0-9]+]] = load i32, i32* [[V8]], align 2 9; CHECK: [[V10:%[0-9]+]] = bitcast i16* %arrayidx to i32* 10; CHECK: [[V11:%[0-9]+]] = load i32, i32* [[V10]], align 2 11; CHECK: [[V12:%[0-9]+]] = call i32 @llvm.arm.smlad(i32 [[V9]], i32 [[V11]], i32 %mac1{{\.}}054) 12; CHECK: [[V13:%[0-9]+]] = bitcast i16* %arrayidx17 to i32* 13; CHECK: [[V14:%[0-9]+]] = load i32, i32* [[V13]], align 2 14; CHECK: [[V15:%[0-9]+]] = bitcast i16* %arrayidx4 to i32* 15; CHECK: [[V16:%[0-9]+]] = load i32, i32* [[V15]], align 2 16; CHECK: [[V17:%[0-9]+]] = call i32 @llvm.arm.smlad(i32 [[V14]], i32 [[V16]], i32 [[V12]]) 17; 18; And we don't want to see a 3rd smlad: 19; CHECK-NOT: call i32 @llvm.arm.smlad 20; 21; CHECK: 2 arm-parallel-dsp - Number of smlad instructions generated 22; 23define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) { 24entry: 25 %cmp52 = icmp sgt i32 %arg, 0 26 br i1 %cmp52, label %for.body.preheader, label %for.cond.cleanup 27 28for.cond.cleanup: 29 %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add28, %for.body ] 30 ret i32 %mac1.0.lcssa 31 32for.body.preheader: 33 br label %for.body 34 35for.body: 36 %mac1.054 = phi i32 [ %add28, %for.body ], [ 0, %for.body.preheader ] 37 %i.053 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ] 38 %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.053 39 %0 = load i16, i16* %arrayidx, align 2 40 %add1 = or i32 %i.053, 1 41 %arrayidx2 = getelementptr inbounds i16, i16* %arg3, i32 %add1 42 %1 = load i16, i16* %arrayidx2, align 2 43 %add3 = or i32 %i.053, 2 44 %arrayidx4 = getelementptr inbounds i16, i16* %arg3, i32 %add3 45 %2 = load i16, i16* %arrayidx4, align 2 46 %add5 = or i32 %i.053, 3 47 %arrayidx6 = getelementptr inbounds i16, i16* %arg3, i32 %add5 48 %3 = load i16, i16* %arrayidx6, align 2 49 %arrayidx8 = getelementptr inbounds i16, i16* %arg2, i32 %i.053 50 %4 = load i16, i16* %arrayidx8, align 2 51 %conv = sext i16 %4 to i32 52 %conv9 = sext i16 %0 to i32 53 %mul = mul nsw i32 %conv, %conv9 54 %arrayidx11 = getelementptr inbounds i16, i16* %arg2, i32 %add1 55 %5 = load i16, i16* %arrayidx11, align 2 56 %conv12 = sext i16 %5 to i32 57 %conv13 = sext i16 %1 to i32 58 %mul14 = mul nsw i32 %conv12, %conv13 59 %arrayidx17 = getelementptr inbounds i16, i16* %arg2, i32 %add3 60 %6 = load i16, i16* %arrayidx17, align 2 61 %conv18 = sext i16 %6 to i32 62 %conv19 = sext i16 %2 to i32 63 %mul20 = mul nsw i32 %conv18, %conv19 64 %arrayidx23 = getelementptr inbounds i16, i16* %arg2, i32 %add5 65 %7 = load i16, i16* %arrayidx23, align 2 66 %conv24 = sext i16 %7 to i32 67 %conv25 = sext i16 %3 to i32 68 %mul26 = mul nsw i32 %conv24, %conv25 69 %add15 = add i32 %mul, %mac1.054 70 %add21 = add i32 %add15, %mul14 71 %add27 = add i32 %add21, %mul20 72 %add28 = add i32 %add27, %mul26 73 %add29 = add nuw nsw i32 %i.053, 4 74 %cmp = icmp slt i32 %add29, %arg 75 br i1 %cmp, label %for.body, label %for.cond.cleanup 76} 77