1 ; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM 2 ; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB 3 4 ; Very basic fast-isel functionality. 5 define i32 @add(i32 %a, i32 %b) nounwind { 6 entry: 7 %a.addr = alloca i32, align 4 8 %b.addr = alloca i32, align 4 9 store i32 %a, i32* %a.addr 10 store i32 %b, i32* %b.addr 11 %tmp = load i32* %a.addr 12 %tmp1 = load i32* %b.addr 13 %add = add nsw i32 %tmp, %tmp1 14 ret i32 %add 15 } 16 17 ; Check truncate to bool 18 define void @test1(i32 %tmp) nounwind { 19 entry: 20 %tobool = trunc i32 %tmp to i1 21 br i1 %tobool, label %if.then, label %if.end 22 23 if.then: ; preds = %entry 24 call void @test1(i32 0) 25 br label %if.end 26 27 if.end: ; preds = %if.then, %entry 28 ret void 29 ; ARM: test1: 30 ; ARM: tst r0, #1 31 ; THUMB: test1: 32 ; THUMB: tst.w r0, #1 33 } 34 35 ; Check some simple operations with immediates 36 define void @test2(i32 %tmp, i32* %ptr) nounwind { 37 ; THUMB: test2: 38 ; ARM: test2: 39 40 b1: 41 %a = add i32 %tmp, 4096 42 store i32 %a, i32* %ptr 43 br label %b2 44 45 ; THUMB: add.w {{.*}} #4096 46 ; ARM: add {{.*}} #4096 47 48 b2: 49 %b = add i32 %tmp, 4095 50 store i32 %b, i32* %ptr 51 br label %b3 52 ; THUMB: addw {{.*}} #4095 53 ; ARM: movw {{.*}} #4095 54 ; ARM: add 55 56 b3: 57 %c = or i32 %tmp, 4 58 store i32 %c, i32* %ptr 59 ret void 60 61 ; THUMB: orr {{.*}} #4 62 ; ARM: orr {{.*}} #4 63 } 64 65 define void @test3(i32 %tmp, i32* %ptr1, i16* %ptr2, i8* %ptr3) nounwind { 66 ; THUMB: test3: 67 ; ARM: test3: 68 69 bb1: 70 %a1 = trunc i32 %tmp to i16 71 %a2 = trunc i16 %a1 to i8 72 %a3 = trunc i8 %a2 to i1 73 %a4 = zext i1 %a3 to i8 74 store i8 %a4, i8* %ptr3 75 %a5 = zext i8 %a4 to i16 76 store i16 %a5, i16* %ptr2 77 %a6 = zext i16 %a5 to i32 78 store i32 %a6, i32* %ptr1 79 br label %bb2 80 81 ; THUMB: and 82 ; THUMB: strb 83 ; THUMB: uxtb 84 ; THUMB: strh 85 ; THUMB: uxth 86 ; ARM: and 87 ; ARM: strb 88 ; ARM: uxtb 89 ; ARM: strh 90 ; ARM: uxth 91 92 bb2: 93 %b1 = trunc i32 %tmp to i16 94 %b2 = trunc i16 %b1 to i8 95 store i8 %b2, i8* %ptr3 96 %b3 = sext i8 %b2 to i16 97 store i16 %b3, i16* %ptr2 98 %b4 = sext i16 %b3 to i32 99 store i32 %b4, i32* %ptr1 100 br label %bb3 101 102 ; THUMB: strb 103 ; THUMB: sxtb 104 ; THUMB: strh 105 ; THUMB: sxth 106 ; ARM: strb 107 ; ARM: sxtb 108 ; ARM: strh 109 ; ARM: sxth 110 111 bb3: 112 %c1 = load i8* %ptr3 113 %c2 = load i16* %ptr2 114 %c3 = load i32* %ptr1 115 %c4 = zext i8 %c1 to i32 116 %c5 = sext i16 %c2 to i32 117 %c6 = add i32 %c4, %c5 118 %c7 = sub i32 %c3, %c6 119 store i32 %c7, i32* %ptr1 120 ret void 121 122 ; THUMB: ldrb 123 ; THUMB: ldrh 124 ; THUMB: uxtb 125 ; THUMB: sxth 126 ; THUMB: add 127 ; THUMB: sub 128 ; ARM: ldrb 129 ; ARM: ldrh 130 ; ARM: uxtb 131 ; ARM: sxth 132 ; ARM: add 133 ; ARM: sub 134 } 135 136 ; Check loads/stores with globals 137 @test4g = external global i32 138 139 define void @test4() { 140 %a = load i32* @test4g 141 %b = add i32 %a, 1 142 store i32 %b, i32* @test4g 143 ret void 144 145 ; THUMB: movw r0, :lower16:L_test4g$non_lazy_ptr 146 ; THUMB: movt r0, :upper16:L_test4g$non_lazy_ptr 147 ; THUMB: ldr r0, [r0] 148 ; THUMB: ldr r1, [r0] 149 ; THUMB: adds r1, #1 150 ; THUMB: str r1, [r0] 151 152 ; ARM: movw r0, :lower16:L_test4g$non_lazy_ptr 153 ; ARM: movt r0, :upper16:L_test4g$non_lazy_ptr 154 ; ARM: ldr r0, [r0] 155 ; ARM: ldr r1, [r0] 156 ; ARM: add r1, r1, #1 157 ; ARM: str r1, [r0] 158 } 159 160 ; Check unaligned stores 161 %struct.anon = type <{ float }> 162 163 @a = common global %struct.anon* null, align 4 164 165 define void @unaligned_store(float %x, float %y) nounwind { 166 entry: 167 ; ARM: @unaligned_store 168 ; ARM: vmov r1, s0 169 ; ARM: str r1, [r0] 170 171 ; THUMB: @unaligned_store 172 ; THUMB: vmov r1, s0 173 ; THUMB: str r1, [r0] 174 175 %add = fadd float %x, %y 176 %0 = load %struct.anon** @a, align 4 177 %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0 178 store float %add, float* %x1, align 1 179 ret void 180 } 181 182 ; Doublewords require only word-alignment. 183 ; rdar://10528060 184 %struct.anon.0 = type { double } 185 186 @foo_unpacked = common global %struct.anon.0 zeroinitializer, align 4 187 188 define void @test5(double %a, double %b) nounwind { 189 entry: 190 ; ARM: @test5 191 ; THUMB: @test5 192 %add = fadd double %a, %b 193 store double %add, double* getelementptr inbounds (%struct.anon.0* @foo_unpacked, i32 0, i32 0), align 4 194 ; ARM: vstr d16, [r0] 195 ; THUMB: vstr d16, [r0] 196 ret void 197 } 198 199 ; Check unaligned loads of floats 200 %class.TAlignTest = type <{ i16, float }> 201 202 define zeroext i1 @test6(%class.TAlignTest* %this) nounwind align 2 { 203 entry: 204 ; ARM: @test6 205 ; THUMB: @test6 206 %0 = alloca %class.TAlignTest*, align 4 207 store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4 208 %1 = load %class.TAlignTest** %0 209 %2 = getelementptr inbounds %class.TAlignTest* %1, i32 0, i32 1 210 %3 = load float* %2, align 1 211 %4 = fcmp une float %3, 0.000000e+00 212 ; ARM: ldr r0, [r0, #2] 213 ; ARM: vmov s0, r0 214 ; ARM: vcmpe.f32 s0, #0 215 ; THUMB: ldr.w r0, [r0, #2] 216 ; THUMB: vmov s0, r0 217 ; THUMB: vcmpe.f32 s0, #0 218 ret i1 %4 219 } 220 221 ; ARM: @urem_fold 222 ; THUMB: @urem_fold 223 ; ARM: and r0, r0, #31 224 ; THUMB: and r0, r0, #31 225 define i32 @urem_fold(i32 %a) nounwind { 226 %rem = urem i32 %a, 32 227 ret i32 %rem 228 } 229