1 # RUN: llc %s -start-after=shrink-wrap -march=mips64 -mcpu=mips64r6 -mattr=+fp64,+msa -o /dev/null 2 3 # Test that estimated size of the stack leads to the creation of an emergency 4 # spill when MSA is in use. Previously, this test case would fail during 5 # register scavenging due to the lack of a spill slot. 6 --- | 7 define inreg { i64, i64 } @test(i64 inreg %a.coerce0, i64 inreg %a.coerce1, i64 inreg %b.coerce0, i64 inreg %b.coerce1, i32 signext %c) #0 { 8 entry: 9 %retval = alloca <16 x i8>, align 16 10 %a = alloca <16 x i8>, align 16 11 %b = alloca <16 x i8>, align 16 12 %a.addr = alloca <16 x i8>, align 16 13 %b.addr = alloca <16 x i8>, align 16 14 %c.addr = alloca i32, align 4 15 %g = alloca <16 x i8>*, align 8 16 %d = alloca i8*, align 8 17 %0 = bitcast <16 x i8>* %a to { i64, i64 }* 18 %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 19 store i64 %a.coerce0, i64* %1, align 16 20 %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 21 store i64 %a.coerce1, i64* %2, align 8 22 %a1 = load <16 x i8>, <16 x i8>* %a, align 16 23 %3 = bitcast <16 x i8>* %b to { i64, i64 }* 24 %4 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %3, i32 0, i32 0 25 store i64 %b.coerce0, i64* %4, align 16 26 %5 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %3, i32 0, i32 1 27 store i64 %b.coerce1, i64* %5, align 8 28 %b2 = load <16 x i8>, <16 x i8>* %b, align 16 29 store <16 x i8> %a1, <16 x i8>* %a.addr, align 16 30 store <16 x i8> %b2, <16 x i8>* %b.addr, align 16 31 store i32 %c, i32* %c.addr, align 4 32 %6 = alloca i8, i64 6400, align 16 33 %7 = bitcast i8* %6 to <16 x i8>* 34 store <16 x i8>* %7, <16 x i8>** %g, align 8 35 %8 = load <16 x i8>*, <16 x i8>** %g, align 8 36 call void @h(<16 x i8>* %b.addr, <16 x i8>* %8) 37 %9 = load <16 x i8>*, <16 x i8>** %g, align 8 38 %10 = bitcast <16 x i8>* %9 to i8* 39 store i8* %10, i8** %d, align 8 40 %11 = load <16 x i8>, <16 x i8>* %a.addr, align 16 41 %12 = load i8*, i8** %d, align 8 42 %arrayidx = getelementptr inbounds i8, i8* %12, i64 0 43 %13 = load i8, i8* %arrayidx, align 1 44 %conv = sext i8 %13 to i32 45 %14 = call <16 x i8> @llvm.mips.fill.b(i32 %conv) 46 %add = add <16 x i8> %11, %14 47 %15 = load i8*, i8** %d, align 8 48 %arrayidx3 = getelementptr inbounds i8, i8* %15, i64 1 49 %16 = load i8, i8* %arrayidx3, align 1 50 %conv4 = sext i8 %16 to i32 51 %17 = call <16 x i8> @llvm.mips.fill.b(i32 %conv4) 52 %add5 = add <16 x i8> %add, %17 53 %18 = load <16 x i8>, <16 x i8>* %b.addr, align 16 54 %add6 = add <16 x i8> %18, %add5 55 store <16 x i8> %add6, <16 x i8>* %b.addr, align 16 56 %19 = load <16 x i8>, <16 x i8>* %b.addr, align 16 57 store <16 x i8> %19, <16 x i8>* %retval, align 16 58 %20 = bitcast <16 x i8>* %retval to { i64, i64 }* 59 %21 = load { i64, i64 }, { i64, i64 }* %20, align 16 60 ret { i64, i64 } %21 61 } 62 63 declare void @h(<16 x i8>*, <16 x i8>*) 64 65 declare <16 x i8> @llvm.mips.fill.b(i32) 66 67 declare void @llvm.stackprotector(i8*, i8**) 68 69 ... 70 --- 71 name: test 72 alignment: 3 73 exposesReturnsTwice: false 74 legalized: false 75 regBankSelected: false 76 selected: false 77 tracksRegLiveness: true 78 registers: 79 liveins: 80 - { reg: '$a0_64', virtual-reg: '' } 81 - { reg: '$a1_64', virtual-reg: '' } 82 - { reg: '$a2_64', virtual-reg: '' } 83 - { reg: '$a3_64', virtual-reg: '' } 84 - { reg: '$t0_64', virtual-reg: '' } 85 frameInfo: 86 isFrameAddressTaken: false 87 isReturnAddressTaken: false 88 hasStackMap: false 89 hasPatchPoint: false 90 stackSize: 0 91 offsetAdjustment: 0 92 maxAlignment: 16 93 adjustsStack: false 94 hasCalls: true 95 stackProtector: '' 96 maxCallFrameSize: 4294967295 97 hasOpaqueSPAdjustment: false 98 hasVAStart: false 99 hasMustTailInVarArgFunc: false 100 savePoint: '' 101 restorePoint: '' 102 fixedStack: 103 stack: 104 - { id: 0, name: retval, type: default, offset: 0, size: 16, alignment: 16, 105 callee-saved-register: '', debug-info-variable: '', 106 debug-info-expression: '', debug-info-location: '' } 107 - { id: 1, name: a, type: default, offset: 0, size: 16, alignment: 16, 108 callee-saved-register: '', debug-info-variable: '', 109 debug-info-expression: '', debug-info-location: '' } 110 - { id: 2, name: b, type: default, offset: 0, size: 16, alignment: 16, 111 callee-saved-register: '', debug-info-variable: '', 112 debug-info-expression: '', debug-info-location: '' } 113 - { id: 3, name: a.addr, type: default, offset: 0, size: 16, alignment: 16, 114 callee-saved-register: '', debug-info-variable: '', 115 debug-info-expression: '', debug-info-location: '' } 116 - { id: 4, name: b.addr, type: default, offset: 0, size: 16, alignment: 16, 117 callee-saved-register: '', debug-info-variable: '', 118 debug-info-expression: '', debug-info-location: '' } 119 - { id: 5, name: c.addr, type: default, offset: 0, size: 4, alignment: 4, 120 callee-saved-register: '', debug-info-variable: '', 121 debug-info-expression: '', debug-info-location: '' } 122 - { id: 6, name: g, type: default, offset: 0, size: 8, alignment: 8, 123 callee-saved-register: '', debug-info-variable: '', 124 debug-info-expression: '', debug-info-location: '' } 125 - { id: 7, name: d, type: default, offset: 0, size: 8, alignment: 8, 126 callee-saved-register: '', debug-info-variable: '', 127 debug-info-expression: '', debug-info-location: '' } 128 - { id: 8, name: '', type: default, offset: 0, size: 6400, 129 alignment: 16, callee-saved-register: '', debug-info-variable: '', 130 debug-info-expression: '', debug-info-location: '' } 131 constants: 132 body: | 133 bb.0.entry: 134 liveins: $a0_64, $a1_64, $a2_64, $a3_64, $t0_64 135 136 SD killed $a0_64, %stack.1.a, 0 :: (store 8 into %ir.1, align 16) 137 SD killed $a1_64, %stack.1.a, 8 :: (store 8 into %ir.2) 138 $w0 = LD_B %stack.1.a, 0 :: (dereferenceable load 16 from %ir.a) 139 SD killed $a2_64, %stack.2.b, 0 :: (store 8 into %ir.4, align 16) 140 SD killed $a3_64, %stack.2.b, 8 :: (store 8 into %ir.5) 141 $w1 = LD_B %stack.2.b, 0 :: (dereferenceable load 16 from %ir.b) 142 ST_B killed $w0, %stack.3.a.addr, 0 :: (store 16 into %ir.a.addr) 143 ST_B killed $w1, %stack.4.b.addr, 0 :: (store 16 into %ir.b.addr) 144 SW $t0, %stack.5.c.addr, 0, implicit killed $t0_64 :: (store 4 into %ir.c.addr) 145 $at_64 = LEA_ADDiu64 %stack.8, 0 146 SD killed $at_64, %stack.6.g, 0 :: (store 8 into %ir.g) 147 $a1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 148 ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp 149 $a0_64 = LEA_ADDiu64 %stack.4.b.addr, 0 150 JAL @h, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $a1_64, implicit-def $sp 151 ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp 152 $at_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 153 $v0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 154 $v1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 155 $a0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 156 $a1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 157 $a2_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 158 $a3_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 159 $t0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 160 $t1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 161 $t2_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 162 $t3_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 163 $t4_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 164 $t5_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 165 $t6_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 166 $t7_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 167 $s0_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 168 $s1_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 169 $s2_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 170 $s3_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 171 $s4_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 172 $s5_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 173 $s6_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 174 $s7_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 175 $t8_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 176 $t9_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 177 $ra_64 = LD %stack.6.g, 0 :: (dereferenceable load 8 from %ir.g) 178 $w0 = LD_B %stack.3.a.addr, 0 :: (dereferenceable load 16 from %ir.a.addr) 179 SD $at_64, %stack.7.d, 0 :: (store 8 into %ir.d) 180 SD $v0_64, %stack.7.d, 0 :: (store 8 into %ir.d) 181 SD $v1_64, %stack.7.d, 0 :: (store 8 into %ir.d) 182 SD $a0_64, %stack.7.d, 0 :: (store 8 into %ir.d) 183 SD $a1_64, %stack.7.d, 0 :: (store 8 into %ir.d) 184 SD $a2_64, %stack.7.d, 0 :: (store 8 into %ir.d) 185 SD $a3_64, %stack.7.d, 0 :: (store 8 into %ir.d) 186 SD $t0_64, %stack.7.d, 0 :: (store 8 into %ir.d) 187 SD $t1_64, %stack.7.d, 0 :: (store 8 into %ir.d) 188 SD $t2_64, %stack.7.d, 0 :: (store 8 into %ir.d) 189 SD $t3_64, %stack.7.d, 0 :: (store 8 into %ir.d) 190 SD $t4_64, %stack.7.d, 0 :: (store 8 into %ir.d) 191 SD $t5_64, %stack.7.d, 0 :: (store 8 into %ir.d) 192 SD $t6_64, %stack.7.d, 0 :: (store 8 into %ir.d) 193 SD $t7_64, %stack.7.d, 0 :: (store 8 into %ir.d) 194 SD $s0_64, %stack.7.d, 0 :: (store 8 into %ir.d) 195 SD $s1_64, %stack.7.d, 0 :: (store 8 into %ir.d) 196 SD $s2_64, %stack.7.d, 0 :: (store 8 into %ir.d) 197 SD $s3_64, %stack.7.d, 0 :: (store 8 into %ir.d) 198 SD $s4_64, %stack.7.d, 0 :: (store 8 into %ir.d) 199 SD $s5_64, %stack.7.d, 0 :: (store 8 into %ir.d) 200 SD $s6_64, %stack.7.d, 0 :: (store 8 into %ir.d) 201 SD $s7_64, %stack.7.d, 0 :: (store 8 into %ir.d) 202 SD $t8_64, %stack.7.d, 0 :: (store 8 into %ir.d) 203 SD $t9_64, %stack.7.d, 0 :: (store 8 into %ir.d) 204 SD $ra_64, %stack.7.d, 0 :: (store 8 into %ir.d) 205 $at_64 = LD %stack.7.d, 0 :: (dereferenceable load 8 from %ir.d) 206 $v0 = LB $at_64, 0 :: (load 1 from %ir.arrayidx) 207 $w1 = FILL_B killed $v0 208 $w0 = ADDV_B killed $w0, killed $w1 209 $at = LB killed $at_64, 1 :: (load 1 from %ir.arrayidx3) 210 $w1 = FILL_B killed $at 211 $w0 = ADDV_B killed $w0, killed $w1 212 $w1 = LD_B %stack.4.b.addr, 0 :: (dereferenceable load 16 from %ir.b.addr) 213 $w0 = ADDV_B killed $w1, killed $w0 214 ST_B killed $w0, %stack.4.b.addr, 0 :: (store 16 into %ir.b.addr) 215 $w0 = LD_B %stack.4.b.addr, 0 :: (dereferenceable load 16 from %ir.b.addr) 216 ST_B killed $w0, %stack.0.retval, 0 :: (store 16 into %ir.retval) 217 $v0_64 = LD %stack.0.retval, 0 :: (dereferenceable load 8 from %ir.20, align 16) 218 $v1_64 = LD %stack.0.retval, 8 :: (dereferenceable load 8 from %ir.20 + 8, align 16) 219 RetRA implicit $v0_64, implicit $v1_64 220 221 ... 222