Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X86
      3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X64
      4 
      5 ; Check for assert in foldMaskAndShiftToScale due to out of range mask scaling.
      6 
      7 @b = common global i8 zeroinitializer, align 1
      8 @c = common global i8 zeroinitializer, align 1
      9 @d = common global i64 zeroinitializer, align 8
     10 @e = common global i64 zeroinitializer, align 8
     11 
     12 define void @foo() {
     13 ; X86-LABEL: foo:
     14 ; X86:       # %bb.0:
     15 ; X86-NEXT:    pushl %eax
     16 ; X86-NEXT:    .cfi_def_cfa_offset 8
     17 ; X86-NEXT:    movl d, %eax
     18 ; X86-NEXT:    notl %eax
     19 ; X86-NEXT:    movl d+4, %ecx
     20 ; X86-NEXT:    notl %ecx
     21 ; X86-NEXT:    andl $701685459, %ecx # imm = 0x29D2DED3
     22 ; X86-NEXT:    andl $-564453154, %eax # imm = 0xDE5B20DE
     23 ; X86-NEXT:    shrdl $21, %ecx, %eax
     24 ; X86-NEXT:    shrl $21, %ecx
     25 ; X86-NEXT:    andl $-2, %eax
     26 ; X86-NEXT:    xorl %edx, %edx
     27 ; X86-NEXT:    addl $7, %eax
     28 ; X86-NEXT:    adcxl %edx, %ecx
     29 ; X86-NEXT:    pushl %ecx
     30 ; X86-NEXT:    .cfi_adjust_cfa_offset 4
     31 ; X86-NEXT:    pushl %eax
     32 ; X86-NEXT:    .cfi_adjust_cfa_offset 4
     33 ; X86-NEXT:    pushl $0
     34 ; X86-NEXT:    .cfi_adjust_cfa_offset 4
     35 ; X86-NEXT:    pushl $0
     36 ; X86-NEXT:    .cfi_adjust_cfa_offset 4
     37 ; X86-NEXT:    calll __divdi3
     38 ; X86-NEXT:    addl $16, %esp
     39 ; X86-NEXT:    .cfi_adjust_cfa_offset -16
     40 ; X86-NEXT:    orl %eax, %edx
     41 ; X86-NEXT:    setne {{[0-9]+}}(%esp)
     42 ; X86-NEXT:    popl %eax
     43 ; X86-NEXT:    .cfi_def_cfa_offset 4
     44 ; X86-NEXT:    retl
     45 ;
     46 ; X64-LABEL: foo:
     47 ; X64:       # %bb.0:
     48 ; X64-NEXT:    movq {{.*}}(%rip), %rax
     49 ; X64-NEXT:    movabsq $3013716102212485120, %rcx # imm = 0x29D2DED3DE400000
     50 ; X64-NEXT:    andnq %rcx, %rax, %rcx
     51 ; X64-NEXT:    shrq $21, %rcx
     52 ; X64-NEXT:    addq $7, %rcx
     53 ; X64-NEXT:    movabsq $4393751543808, %rax # imm = 0x3FF00000000
     54 ; X64-NEXT:    testq %rax, %rcx
     55 ; X64-NEXT:    je .LBB0_1
     56 ; X64-NEXT:  # %bb.2:
     57 ; X64-NEXT:    xorl %eax, %eax
     58 ; X64-NEXT:    xorl %edx, %edx
     59 ; X64-NEXT:    divq %rcx
     60 ; X64-NEXT:    jmp .LBB0_3
     61 ; X64-NEXT:  .LBB0_1:
     62 ; X64-NEXT:    xorl %eax, %eax
     63 ; X64-NEXT:    xorl %edx, %edx
     64 ; X64-NEXT:    divl %ecx
     65 ; X64-NEXT:    # kill: def $eax killed $eax def $rax
     66 ; X64-NEXT:  .LBB0_3:
     67 ; X64-NEXT:    testq %rax, %rax
     68 ; X64-NEXT:    setne -{{[0-9]+}}(%rsp)
     69 ; X64-NEXT:    retq
     70   %1 = alloca i8, align 1
     71   %2 = load i64, i64* @d, align 8
     72   %3 = or i64 -3013716102214263007, %2
     73   %4 = xor i64 %3, -1
     74   %5 = load i64, i64* @e, align 8
     75   %6 = load i8, i8* @b, align 1
     76   %7 = trunc i8 %6 to i1
     77   %8 = zext i1 %7 to i64
     78   %9 = xor i64 %5, %8
     79   %10 = load i8, i8* @c, align 1
     80   %11 = trunc i8 %10 to i1
     81   %12 = zext i1 %11 to i32
     82   %13 = or i32 551409149, %12
     83   %14 = sub nsw i32 %13, 551409131
     84   %15 = zext i32 %14 to i64
     85   %16 = shl i64 %9, %15
     86   %17 = sub nsw i64 %16, 223084523
     87   %18 = ashr i64 %4, %17
     88   %19 = and i64 %18, 9223372036854775806
     89   %20 = add nsw i64 7, %19
     90   %21 = sdiv i64 0, %20
     91   %22 = icmp ne i64 %21, 0
     92   %23 = zext i1 %22 to i8
     93   store i8 %23, i8* %1, align 1
     94   ret void
     95 }
     96