1 ; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE 2 ; RUN: llc < %s -mtriple=x86_64-linux-gnux32 -mattr=-sse | FileCheck %s -check-prefix=CHECK -check-prefix=NOSSE 3 ; 4 ; Verifies that x32 va_start lowering is sane. To regenerate this test, use 5 ; cat <<EOF | 6 ; #include <stdarg.h> 7 ; 8 ; int foo(float a, const char* fmt, ...) { 9 ; va_list ap; 10 ; va_start(ap, fmt); 11 ; int value = va_arg(ap, int); 12 ; va_end(ap); 13 ; return value; 14 ; } 15 ; EOF 16 ; build/bin/clang -mx32 -O3 -o- -S -emit-llvm -xc - 17 ; 18 target datalayout = "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128" 19 target triple = "x86_64-unknown-linux-gnux32" 20 21 %struct.__va_list_tag = type { i32, i32, i8*, i8* } 22 23 define i32 @foo(float %a, i8* nocapture readnone %fmt, ...) nounwind { 24 entry: 25 %ap = alloca [1 x %struct.__va_list_tag], align 16 26 %0 = bitcast [1 x %struct.__va_list_tag]* %ap to i8* 27 call void @llvm.lifetime.start(i64 16, i8* %0) #2 28 call void @llvm.va_start(i8* %0) 29 ; SSE: subl $72, %esp 30 ; SSE: testb %al, %al 31 ; SSE: je .[[NOFP:.*]] 32 ; SSE-DAG: movaps %xmm1 33 ; SSE-DAG: movaps %xmm2 34 ; SSE-DAG: movaps %xmm3 35 ; SSE-DAG: movaps %xmm4 36 ; SSE-DAG: movaps %xmm5 37 ; SSE-DAG: movaps %xmm6 38 ; SSE-DAG: movaps %xmm7 39 ; NOSSE-NOT: xmm 40 ; SSE: .[[NOFP]]: 41 ; CHECK-DAG: movq %r9 42 ; CHECK-DAG: movq %r8 43 ; CHECK-DAG: movq %rcx 44 ; CHECK-DAG: movq %rdx 45 ; CHECK-DAG: movq %rsi 46 %gp_offset_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 0 47 %gp_offset = load i32, i32* %gp_offset_p, align 16 48 %fits_in_gp = icmp ult i32 %gp_offset, 41 49 br i1 %fits_in_gp, label %vaarg.in_reg, label %vaarg.in_mem 50 ; CHECK: cmpl $40, [[COUNT:.*]] 51 ; CHECK: ja .[[IN_MEM:.*]] 52 53 vaarg.in_reg: ; preds = %entry 54 %1 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 3 55 %reg_save_area = load i8*, i8** %1, align 4 56 %2 = getelementptr i8, i8* %reg_save_area, i32 %gp_offset 57 %3 = add i32 %gp_offset, 8 58 store i32 %3, i32* %gp_offset_p, align 16 59 br label %vaarg.end 60 ; CHECK: movl {{[^,]*}}, [[ADDR:.*]] 61 ; CHECK: addl [[COUNT]], [[ADDR]] 62 ; SSE: jmp .[[END:.*]] 63 ; NOSSE: movl ([[ADDR]]), %eax 64 ; NOSSE: retq 65 ; CHECK: .[[IN_MEM]]: 66 vaarg.in_mem: ; preds = %entry 67 %overflow_arg_area_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 2 68 %overflow_arg_area = load i8*, i8** %overflow_arg_area_p, align 8 69 %overflow_arg_area.next = getelementptr i8, i8* %overflow_arg_area, i32 8 70 store i8* %overflow_arg_area.next, i8** %overflow_arg_area_p, align 8 71 br label %vaarg.end 72 ; CHECK: movl {{[^,]*}}, [[ADDR]] 73 ; NOSSE: movl ([[ADDR]]), %eax 74 ; NOSSE: retq 75 ; SSE: .[[END]]: 76 77 vaarg.end: ; preds = %vaarg.in_mem, %vaarg.in_reg 78 %vaarg.addr.in = phi i8* [ %2, %vaarg.in_reg ], [ %overflow_arg_area, %vaarg.in_mem ] 79 %vaarg.addr = bitcast i8* %vaarg.addr.in to i32* 80 %4 = load i32, i32* %vaarg.addr, align 4 81 call void @llvm.va_end(i8* %0) 82 call void @llvm.lifetime.end(i64 16, i8* %0) #2 83 ret i32 %4 84 ; SSE: movl ([[ADDR]]), %eax 85 ; SSE: retq 86 } 87 88 ; Function Attrs: nounwind argmemonly 89 declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind 90 91 ; Function Attrs: nounwind 92 declare void @llvm.va_start(i8*) nounwind 93 94 ; Function Attrs: nounwind 95 declare void @llvm.va_end(i8*) nounwind 96 97 ; Function Attrs: nounwind argmemonly 98 declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind 99 100