1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-apple-darwin8 -mcpu=yonah | FileCheck %s
3
4define void @test(<1 x i64> %c64, <1 x i64> %mask1, i8* %P) {
5; CHECK-LABEL: test:
6; CHECK:       ## %bb.0: ## %entry
7; CHECK-NEXT:    pushl %edi
8; CHECK-NEXT:    .cfi_def_cfa_offset 8
9; CHECK-NEXT:    subl $16, %esp
10; CHECK-NEXT:    .cfi_def_cfa_offset 24
11; CHECK-NEXT:    .cfi_offset %edi, -8
12; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
13; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
14; CHECK-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
15; CHECK-NEXT:    movl %eax, (%esp)
16; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
17; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
18; CHECK-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
19; CHECK-NEXT:    movl %eax, {{[0-9]+}}(%esp)
20; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edi
21; CHECK-NEXT:    movq (%esp), %mm0
22; CHECK-NEXT:    movq {{[0-9]+}}(%esp), %mm1
23; CHECK-NEXT:    maskmovq %mm0, %mm1
24; CHECK-NEXT:    addl $16, %esp
25; CHECK-NEXT:    popl %edi
26; CHECK-NEXT:    retl
27entry:
28	%tmp4 = bitcast <1 x i64> %mask1 to x86_mmx		; <x86_mmx> [#uses=1]
29	%tmp6 = bitcast <1 x i64> %c64 to x86_mmx		; <x86_mmx> [#uses=1]
30	tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp4, x86_mmx %tmp6, i8* %P )
31	ret void
32}
33
34declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)
35