Home | History | Annotate | Download | only in PowerPC
      1 ; RUN: llc -mcpu=pwr8 -mattr=+vsx -O2 -mtriple=powerpc64le-unknown-linux-gnu < %s > %t
      2 ; RUN: grep lxvd2x < %t | count 18
      3 ; RUN: grep stxvd2x < %t | count 18
      4 
      5 @vf = global <4 x float> <float -1.500000e+00, float 2.500000e+00, float -3.500000e+00, float 4.500000e+00>, align 16
      6 @vd = global <2 x double> <double 3.500000e+00, double -7.500000e+00>, align 16
      7 @vsi = global <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, align 16
      8 @vui = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
      9 @vsll = global <2 x i64> <i64 255, i64 -937>, align 16
     10 @vull = global <2 x i64> <i64 1447, i64 2894>, align 16
     11 @res_vsi = common global <4 x i32> zeroinitializer, align 16
     12 @res_vui = common global <4 x i32> zeroinitializer, align 16
     13 @res_vf = common global <4 x float> zeroinitializer, align 16
     14 @res_vsll = common global <2 x i64> zeroinitializer, align 16
     15 @res_vull = common global <2 x i64> zeroinitializer, align 16
     16 @res_vd = common global <2 x double> zeroinitializer, align 16
     17 
     18 define void @test1() {
     19 entry:
     20 ; CHECK-LABEL: test1
     21   %__a.addr.i31 = alloca i32, align 4
     22   %__b.addr.i32 = alloca <4 x i32>*, align 8
     23   %__a.addr.i29 = alloca i32, align 4
     24   %__b.addr.i30 = alloca <4 x float>*, align 8
     25   %__a.addr.i27 = alloca i32, align 4
     26   %__b.addr.i28 = alloca <2 x i64>*, align 8
     27   %__a.addr.i25 = alloca i32, align 4
     28   %__b.addr.i26 = alloca <2 x i64>*, align 8
     29   %__a.addr.i23 = alloca i32, align 4
     30   %__b.addr.i24 = alloca <2 x double>*, align 8
     31   %__a.addr.i20 = alloca <4 x i32>, align 16
     32   %__b.addr.i21 = alloca i32, align 4
     33   %__c.addr.i22 = alloca <4 x i32>*, align 8
     34   %__a.addr.i17 = alloca <4 x i32>, align 16
     35   %__b.addr.i18 = alloca i32, align 4
     36   %__c.addr.i19 = alloca <4 x i32>*, align 8
     37   %__a.addr.i14 = alloca <4 x float>, align 16
     38   %__b.addr.i15 = alloca i32, align 4
     39   %__c.addr.i16 = alloca <4 x float>*, align 8
     40   %__a.addr.i11 = alloca <2 x i64>, align 16
     41   %__b.addr.i12 = alloca i32, align 4
     42   %__c.addr.i13 = alloca <2 x i64>*, align 8
     43   %__a.addr.i8 = alloca <2 x i64>, align 16
     44   %__b.addr.i9 = alloca i32, align 4
     45   %__c.addr.i10 = alloca <2 x i64>*, align 8
     46   %__a.addr.i6 = alloca <2 x double>, align 16
     47   %__b.addr.i7 = alloca i32, align 4
     48   %__c.addr.i = alloca <2 x double>*, align 8
     49   %__a.addr.i = alloca i32, align 4
     50   %__b.addr.i = alloca <4 x i32>*, align 8
     51   store i32 0, i32* %__a.addr.i, align 4
     52   store <4 x i32>* @vsi, <4 x i32>** %__b.addr.i, align 8
     53   %0 = load i32, i32* %__a.addr.i, align 4
     54   %1 = load <4 x i32>*, <4 x i32>** %__b.addr.i, align 8
     55   %2 = bitcast <4 x i32>* %1 to i8*
     56   %3 = getelementptr i8, i8* %2, i32 %0
     57   %4 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %3)
     58   store <4 x i32> %4, <4 x i32>* @res_vsi, align 16
     59   store i32 0, i32* %__a.addr.i31, align 4
     60   store <4 x i32>* @vui, <4 x i32>** %__b.addr.i32, align 8
     61   %5 = load i32, i32* %__a.addr.i31, align 4
     62   %6 = load <4 x i32>*, <4 x i32>** %__b.addr.i32, align 8
     63   %7 = bitcast <4 x i32>* %6 to i8*
     64   %8 = getelementptr i8, i8* %7, i32 %5
     65   %9 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %8)
     66   store <4 x i32> %9, <4 x i32>* @res_vui, align 16
     67   store i32 0, i32* %__a.addr.i29, align 4
     68   store <4 x float>* @vf, <4 x float>** %__b.addr.i30, align 8
     69   %10 = load i32, i32* %__a.addr.i29, align 4
     70   %11 = load <4 x float>*, <4 x float>** %__b.addr.i30, align 8
     71   %12 = bitcast <4 x float>* %11 to i8*
     72   %13 = getelementptr i8, i8* %12, i32 %10
     73   %14 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %13)
     74   %15 = bitcast <4 x i32> %14 to <4 x float>
     75   store <4 x float> %15, <4 x float>* @res_vf, align 16
     76   store i32 0, i32* %__a.addr.i27, align 4
     77   store <2 x i64>* @vsll, <2 x i64>** %__b.addr.i28, align 8
     78   %16 = load i32, i32* %__a.addr.i27, align 4
     79   %17 = load <2 x i64>*, <2 x i64>** %__b.addr.i28, align 8
     80   %18 = bitcast <2 x i64>* %17 to i8*
     81   %19 = getelementptr i8, i8* %18, i32 %16
     82   %20 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %19)
     83   %21 = bitcast <2 x double> %20 to <2 x i64>
     84   store <2 x i64> %21, <2 x i64>* @res_vsll, align 16
     85   store i32 0, i32* %__a.addr.i25, align 4
     86   store <2 x i64>* @vull, <2 x i64>** %__b.addr.i26, align 8
     87   %22 = load i32, i32* %__a.addr.i25, align 4
     88   %23 = load <2 x i64>*, <2 x i64>** %__b.addr.i26, align 8
     89   %24 = bitcast <2 x i64>* %23 to i8*
     90   %25 = getelementptr i8, i8* %24, i32 %22
     91   %26 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %25)
     92   %27 = bitcast <2 x double> %26 to <2 x i64>
     93   store <2 x i64> %27, <2 x i64>* @res_vull, align 16
     94   store i32 0, i32* %__a.addr.i23, align 4
     95   store <2 x double>* @vd, <2 x double>** %__b.addr.i24, align 8
     96   %28 = load i32, i32* %__a.addr.i23, align 4
     97   %29 = load <2 x double>*, <2 x double>** %__b.addr.i24, align 8
     98   %30 = bitcast <2 x double>* %29 to i8*
     99   %31 = getelementptr i8, i8* %30, i32 %28
    100   %32 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %31)
    101   store <2 x double> %32, <2 x double>* @res_vd, align 16
    102   %33 = load <4 x i32>, <4 x i32>* @vsi, align 16
    103   store <4 x i32> %33, <4 x i32>* %__a.addr.i20, align 16
    104   store i32 0, i32* %__b.addr.i21, align 4
    105   store <4 x i32>* @res_vsi, <4 x i32>** %__c.addr.i22, align 8
    106   %34 = load <4 x i32>, <4 x i32>* %__a.addr.i20, align 16
    107   %35 = load i32, i32* %__b.addr.i21, align 4
    108   %36 = load <4 x i32>*, <4 x i32>** %__c.addr.i22, align 8
    109   %37 = bitcast <4 x i32>* %36 to i8*
    110   %38 = getelementptr i8, i8* %37, i32 %35
    111   call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %34, i8* %38)
    112   %39 = load <4 x i32>, <4 x i32>* @vui, align 16
    113   store <4 x i32> %39, <4 x i32>* %__a.addr.i17, align 16
    114   store i32 0, i32* %__b.addr.i18, align 4
    115   store <4 x i32>* @res_vui, <4 x i32>** %__c.addr.i19, align 8
    116   %40 = load <4 x i32>, <4 x i32>* %__a.addr.i17, align 16
    117   %41 = load i32, i32* %__b.addr.i18, align 4
    118   %42 = load <4 x i32>*, <4 x i32>** %__c.addr.i19, align 8
    119   %43 = bitcast <4 x i32>* %42 to i8*
    120   %44 = getelementptr i8, i8* %43, i32 %41
    121   call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %40, i8* %44)
    122   %45 = load <4 x float>, <4 x float>* @vf, align 16
    123   store <4 x float> %45, <4 x float>* %__a.addr.i14, align 16
    124   store i32 0, i32* %__b.addr.i15, align 4
    125   store <4 x float>* @res_vf, <4 x float>** %__c.addr.i16, align 8
    126   %46 = load <4 x float>, <4 x float>* %__a.addr.i14, align 16
    127   %47 = bitcast <4 x float> %46 to <4 x i32>
    128   %48 = load i32, i32* %__b.addr.i15, align 4
    129   %49 = load <4 x float>*, <4 x float>** %__c.addr.i16, align 8
    130   %50 = bitcast <4 x float>* %49 to i8*
    131   %51 = getelementptr i8, i8* %50, i32 %48
    132   call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %47, i8* %51) #1
    133   %52 = load <2 x i64>, <2 x i64>* @vsll, align 16
    134   store <2 x i64> %52, <2 x i64>* %__a.addr.i11, align 16
    135   store i32 0, i32* %__b.addr.i12, align 4
    136   store <2 x i64>* @res_vsll, <2 x i64>** %__c.addr.i13, align 8
    137   %53 = load <2 x i64>, <2 x i64>* %__a.addr.i11, align 16
    138   %54 = bitcast <2 x i64> %53 to <2 x double>
    139   %55 = load i32, i32* %__b.addr.i12, align 4
    140   %56 = load <2 x i64>*, <2 x i64>** %__c.addr.i13, align 8
    141   %57 = bitcast <2 x i64>* %56 to i8*
    142   %58 = getelementptr i8, i8* %57, i32 %55
    143   call void @llvm.ppc.vsx.stxvd2x(<2 x double> %54, i8* %58)
    144   %59 = load <2 x i64>, <2 x i64>* @vull, align 16
    145   store <2 x i64> %59, <2 x i64>* %__a.addr.i8, align 16
    146   store i32 0, i32* %__b.addr.i9, align 4
    147   store <2 x i64>* @res_vull, <2 x i64>** %__c.addr.i10, align 8
    148   %60 = load <2 x i64>, <2 x i64>* %__a.addr.i8, align 16
    149   %61 = bitcast <2 x i64> %60 to <2 x double>
    150   %62 = load i32, i32* %__b.addr.i9, align 4
    151   %63 = load <2 x i64>*, <2 x i64>** %__c.addr.i10, align 8
    152   %64 = bitcast <2 x i64>* %63 to i8*
    153   %65 = getelementptr i8, i8* %64, i32 %62
    154   call void @llvm.ppc.vsx.stxvd2x(<2 x double> %61, i8* %65)
    155   %66 = load <2 x double>, <2 x double>* @vd, align 16
    156   store <2 x double> %66, <2 x double>* %__a.addr.i6, align 16
    157   store i32 0, i32* %__b.addr.i7, align 4
    158   store <2 x double>* @res_vd, <2 x double>** %__c.addr.i, align 8
    159   %67 = load <2 x double>, <2 x double>* %__a.addr.i6, align 16
    160   %68 = load i32, i32* %__b.addr.i7, align 4
    161   %69 = load <2 x double>*, <2 x double>** %__c.addr.i, align 8
    162   %70 = bitcast <2 x double>* %69 to i8*
    163   %71 = getelementptr i8, i8* %70, i32 %68
    164   call void @llvm.ppc.vsx.stxvd2x(<2 x double> %67, i8* %71)
    165   ret void
    166 }
    167 
    168 declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
    169 declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*)
    170 declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*)
    171 declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*)
    172