1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=1 -mtriple=x86_64-- < %s | FileCheck --check-prefix=X64 --check-prefix=BWON64 %s 3; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=0 -mtriple=x86_64-- < %s | FileCheck --check-prefix=X64 --check-prefix=BWOFF64 %s 4; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=1 -mtriple=i386-- < %s | FileCheck --check-prefix=X32 --check-prefix=BWON32 %s 5; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=0 -mtriple=i386-- < %s | FileCheck --check-prefix=X32 --check-prefix=BWOFF32 %s 6 7target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128" 8 9define i8 @test_movb(i8 %a0) { 10; BWON64-LABEL: test_movb: 11; BWON64: # BB#0: 12; BWON64-NEXT: movl %edi, %eax 13; BWON64-NEXT: retq 14; 15; BWOFF64-LABEL: test_movb: 16; BWOFF64: # BB#0: 17; BWOFF64-NEXT: movb %dil, %al 18; BWOFF64-NEXT: retq 19; 20; X32-LABEL: test_movb: 21; X32: # BB#0: 22; X32-NEXT: movb {{[0-9]+}}(%esp), %al 23; X32-NEXT: retl 24 ret i8 %a0 25} 26 27define i16 @test_movw(i16 %a0) { 28; BWON64-LABEL: test_movw: 29; BWON64: # BB#0: 30; BWON64-NEXT: movl %edi, %eax 31; BWON64-NEXT: retq 32; 33; BWOFF64-LABEL: test_movw: 34; BWOFF64: # BB#0: 35; BWOFF64-NEXT: movw %di, %ax 36; BWOFF64-NEXT: retq 37; 38; BWON32-LABEL: test_movw: 39; BWON32: # BB#0: 40; BWON32-NEXT: movzwl {{[0-9]+}}(%esp), %eax 41; BWON32-NEXT: retl 42; 43; BWOFF32-LABEL: test_movw: 44; BWOFF32: # BB#0: 45; BWOFF32-NEXT: movw {{[0-9]+}}(%esp), %ax 46; BWOFF32-NEXT: retl 47 ret i16 %a0 48} 49 50; Verify we don't mess with H-reg copies (only generated in 32-bit mode). 51define i8 @test_movb_hreg(i16 %a0) { 52; X64-LABEL: test_movb_hreg: 53; X64: # BB#0: 54; X64-NEXT: movl %edi, %eax 55; X64-NEXT: shrl $8, %eax 56; X64-NEXT: addb %dil, %al 57; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill> 58; X64-NEXT: retq 59; 60; X32-LABEL: test_movb_hreg: 61; X32: # BB#0: 62; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax 63; X32-NEXT: addb %al, %ah 64; X32-NEXT: movb %ah, %al 65; X32-NEXT: retl 66 %tmp0 = trunc i16 %a0 to i8 67 %tmp1 = lshr i16 %a0, 8 68 %tmp2 = trunc i16 %tmp1 to i8 69 %tmp3 = add i8 %tmp0, %tmp2 70 ret i8 %tmp3 71} 72