1 ; Test the MSA intrinsics that are encoded with the BIT instruction format. 2 3 ; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s 4 ; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s 5 6 @llvm_mips_sat_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 7 @llvm_mips_sat_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 8 9 define void @llvm_mips_sat_s_b_test() nounwind { 10 entry: 11 %0 = load <16 x i8>* @llvm_mips_sat_s_b_ARG1 12 %1 = tail call <16 x i8> @llvm.mips.sat.s.b(<16 x i8> %0, i32 7) 13 store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_s_b_RES 14 ret void 15 } 16 17 declare <16 x i8> @llvm.mips.sat.s.b(<16 x i8>, i32) nounwind 18 19 ; CHECK: llvm_mips_sat_s_b_test: 20 ; CHECK: ld.b 21 ; CHECK: sat_s.b 22 ; CHECK: st.b 23 ; CHECK: .size llvm_mips_sat_s_b_test 24 ; 25 @llvm_mips_sat_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 26 @llvm_mips_sat_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 27 28 define void @llvm_mips_sat_s_h_test() nounwind { 29 entry: 30 %0 = load <8 x i16>* @llvm_mips_sat_s_h_ARG1 31 %1 = tail call <8 x i16> @llvm.mips.sat.s.h(<8 x i16> %0, i32 7) 32 store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_s_h_RES 33 ret void 34 } 35 36 declare <8 x i16> @llvm.mips.sat.s.h(<8 x i16>, i32) nounwind 37 38 ; CHECK: llvm_mips_sat_s_h_test: 39 ; CHECK: ld.h 40 ; CHECK: sat_s.h 41 ; CHECK: st.h 42 ; CHECK: .size llvm_mips_sat_s_h_test 43 ; 44 @llvm_mips_sat_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 45 @llvm_mips_sat_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 46 47 define void @llvm_mips_sat_s_w_test() nounwind { 48 entry: 49 %0 = load <4 x i32>* @llvm_mips_sat_s_w_ARG1 50 %1 = tail call <4 x i32> @llvm.mips.sat.s.w(<4 x i32> %0, i32 7) 51 store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_s_w_RES 52 ret void 53 } 54 55 declare <4 x i32> @llvm.mips.sat.s.w(<4 x i32>, i32) nounwind 56 57 ; CHECK: llvm_mips_sat_s_w_test: 58 ; CHECK: ld.w 59 ; CHECK: sat_s.w 60 ; CHECK: st.w 61 ; CHECK: .size llvm_mips_sat_s_w_test 62 ; 63 @llvm_mips_sat_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 64 @llvm_mips_sat_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 65 66 define void @llvm_mips_sat_s_d_test() nounwind { 67 entry: 68 %0 = load <2 x i64>* @llvm_mips_sat_s_d_ARG1 69 %1 = tail call <2 x i64> @llvm.mips.sat.s.d(<2 x i64> %0, i32 7) 70 store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_s_d_RES 71 ret void 72 } 73 74 declare <2 x i64> @llvm.mips.sat.s.d(<2 x i64>, i32) nounwind 75 76 ; CHECK: llvm_mips_sat_s_d_test: 77 ; CHECK: ld.d 78 ; CHECK: sat_s.d 79 ; CHECK: st.d 80 ; CHECK: .size llvm_mips_sat_s_d_test 81 ; 82 @llvm_mips_sat_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 83 @llvm_mips_sat_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 84 85 define void @llvm_mips_sat_u_b_test() nounwind { 86 entry: 87 %0 = load <16 x i8>* @llvm_mips_sat_u_b_ARG1 88 %1 = tail call <16 x i8> @llvm.mips.sat.u.b(<16 x i8> %0, i32 7) 89 store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_u_b_RES 90 ret void 91 } 92 93 declare <16 x i8> @llvm.mips.sat.u.b(<16 x i8>, i32) nounwind 94 95 ; CHECK: llvm_mips_sat_u_b_test: 96 ; CHECK: ld.b 97 ; CHECK: sat_u.b 98 ; CHECK: st.b 99 ; CHECK: .size llvm_mips_sat_u_b_test 100 ; 101 @llvm_mips_sat_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 102 @llvm_mips_sat_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 103 104 define void @llvm_mips_sat_u_h_test() nounwind { 105 entry: 106 %0 = load <8 x i16>* @llvm_mips_sat_u_h_ARG1 107 %1 = tail call <8 x i16> @llvm.mips.sat.u.h(<8 x i16> %0, i32 7) 108 store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_u_h_RES 109 ret void 110 } 111 112 declare <8 x i16> @llvm.mips.sat.u.h(<8 x i16>, i32) nounwind 113 114 ; CHECK: llvm_mips_sat_u_h_test: 115 ; CHECK: ld.h 116 ; CHECK: sat_u.h 117 ; CHECK: st.h 118 ; CHECK: .size llvm_mips_sat_u_h_test 119 ; 120 @llvm_mips_sat_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 121 @llvm_mips_sat_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 122 123 define void @llvm_mips_sat_u_w_test() nounwind { 124 entry: 125 %0 = load <4 x i32>* @llvm_mips_sat_u_w_ARG1 126 %1 = tail call <4 x i32> @llvm.mips.sat.u.w(<4 x i32> %0, i32 7) 127 store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_u_w_RES 128 ret void 129 } 130 131 declare <4 x i32> @llvm.mips.sat.u.w(<4 x i32>, i32) nounwind 132 133 ; CHECK: llvm_mips_sat_u_w_test: 134 ; CHECK: ld.w 135 ; CHECK: sat_u.w 136 ; CHECK: st.w 137 ; CHECK: .size llvm_mips_sat_u_w_test 138 ; 139 @llvm_mips_sat_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 140 @llvm_mips_sat_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 141 142 define void @llvm_mips_sat_u_d_test() nounwind { 143 entry: 144 %0 = load <2 x i64>* @llvm_mips_sat_u_d_ARG1 145 %1 = tail call <2 x i64> @llvm.mips.sat.u.d(<2 x i64> %0, i32 7) 146 store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_u_d_RES 147 ret void 148 } 149 150 declare <2 x i64> @llvm.mips.sat.u.d(<2 x i64>, i32) nounwind 151 152 ; CHECK: llvm_mips_sat_u_d_test: 153 ; CHECK: ld.d 154 ; CHECK: sat_u.d 155 ; CHECK: st.d 156 ; CHECK: .size llvm_mips_sat_u_d_test 157 ; 158 @llvm_mips_slli_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 159 @llvm_mips_slli_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 160 161 define void @llvm_mips_slli_b_test() nounwind { 162 entry: 163 %0 = load <16 x i8>* @llvm_mips_slli_b_ARG1 164 %1 = tail call <16 x i8> @llvm.mips.slli.b(<16 x i8> %0, i32 7) 165 store <16 x i8> %1, <16 x i8>* @llvm_mips_slli_b_RES 166 ret void 167 } 168 169 declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) nounwind 170 171 ; CHECK: llvm_mips_slli_b_test: 172 ; CHECK: ld.b 173 ; CHECK: slli.b 174 ; CHECK: st.b 175 ; CHECK: .size llvm_mips_slli_b_test 176 ; 177 @llvm_mips_slli_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 178 @llvm_mips_slli_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 179 180 define void @llvm_mips_slli_h_test() nounwind { 181 entry: 182 %0 = load <8 x i16>* @llvm_mips_slli_h_ARG1 183 %1 = tail call <8 x i16> @llvm.mips.slli.h(<8 x i16> %0, i32 7) 184 store <8 x i16> %1, <8 x i16>* @llvm_mips_slli_h_RES 185 ret void 186 } 187 188 declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) nounwind 189 190 ; CHECK: llvm_mips_slli_h_test: 191 ; CHECK: ld.h 192 ; CHECK: slli.h 193 ; CHECK: st.h 194 ; CHECK: .size llvm_mips_slli_h_test 195 ; 196 @llvm_mips_slli_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 197 @llvm_mips_slli_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 198 199 define void @llvm_mips_slli_w_test() nounwind { 200 entry: 201 %0 = load <4 x i32>* @llvm_mips_slli_w_ARG1 202 %1 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %0, i32 7) 203 store <4 x i32> %1, <4 x i32>* @llvm_mips_slli_w_RES 204 ret void 205 } 206 207 declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) nounwind 208 209 ; CHECK: llvm_mips_slli_w_test: 210 ; CHECK: ld.w 211 ; CHECK: slli.w 212 ; CHECK: st.w 213 ; CHECK: .size llvm_mips_slli_w_test 214 ; 215 @llvm_mips_slli_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 216 @llvm_mips_slli_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 217 218 define void @llvm_mips_slli_d_test() nounwind { 219 entry: 220 %0 = load <2 x i64>* @llvm_mips_slli_d_ARG1 221 %1 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %0, i32 7) 222 store <2 x i64> %1, <2 x i64>* @llvm_mips_slli_d_RES 223 ret void 224 } 225 226 declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) nounwind 227 228 ; CHECK: llvm_mips_slli_d_test: 229 ; CHECK: ld.d 230 ; CHECK: slli.d 231 ; CHECK: st.d 232 ; CHECK: .size llvm_mips_slli_d_test 233 ; 234 @llvm_mips_srai_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 235 @llvm_mips_srai_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 236 237 define void @llvm_mips_srai_b_test() nounwind { 238 entry: 239 %0 = load <16 x i8>* @llvm_mips_srai_b_ARG1 240 %1 = tail call <16 x i8> @llvm.mips.srai.b(<16 x i8> %0, i32 7) 241 store <16 x i8> %1, <16 x i8>* @llvm_mips_srai_b_RES 242 ret void 243 } 244 245 declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) nounwind 246 247 ; CHECK: llvm_mips_srai_b_test: 248 ; CHECK: ld.b 249 ; CHECK: srai.b 250 ; CHECK: st.b 251 ; CHECK: .size llvm_mips_srai_b_test 252 ; 253 @llvm_mips_srai_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 254 @llvm_mips_srai_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 255 256 define void @llvm_mips_srai_h_test() nounwind { 257 entry: 258 %0 = load <8 x i16>* @llvm_mips_srai_h_ARG1 259 %1 = tail call <8 x i16> @llvm.mips.srai.h(<8 x i16> %0, i32 7) 260 store <8 x i16> %1, <8 x i16>* @llvm_mips_srai_h_RES 261 ret void 262 } 263 264 declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) nounwind 265 266 ; CHECK: llvm_mips_srai_h_test: 267 ; CHECK: ld.h 268 ; CHECK: srai.h 269 ; CHECK: st.h 270 ; CHECK: .size llvm_mips_srai_h_test 271 ; 272 @llvm_mips_srai_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 273 @llvm_mips_srai_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 274 275 define void @llvm_mips_srai_w_test() nounwind { 276 entry: 277 %0 = load <4 x i32>* @llvm_mips_srai_w_ARG1 278 %1 = tail call <4 x i32> @llvm.mips.srai.w(<4 x i32> %0, i32 7) 279 store <4 x i32> %1, <4 x i32>* @llvm_mips_srai_w_RES 280 ret void 281 } 282 283 declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) nounwind 284 285 ; CHECK: llvm_mips_srai_w_test: 286 ; CHECK: ld.w 287 ; CHECK: srai.w 288 ; CHECK: st.w 289 ; CHECK: .size llvm_mips_srai_w_test 290 ; 291 @llvm_mips_srai_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 292 @llvm_mips_srai_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 293 294 define void @llvm_mips_srai_d_test() nounwind { 295 entry: 296 %0 = load <2 x i64>* @llvm_mips_srai_d_ARG1 297 %1 = tail call <2 x i64> @llvm.mips.srai.d(<2 x i64> %0, i32 7) 298 store <2 x i64> %1, <2 x i64>* @llvm_mips_srai_d_RES 299 ret void 300 } 301 302 declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) nounwind 303 304 ; CHECK: llvm_mips_srai_d_test: 305 ; CHECK: ld.d 306 ; CHECK: srai.d 307 ; CHECK: st.d 308 ; CHECK: .size llvm_mips_srai_d_test 309 ; 310 @llvm_mips_srari_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 311 @llvm_mips_srari_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 312 313 define void @llvm_mips_srari_b_test() nounwind { 314 entry: 315 %0 = load <16 x i8>* @llvm_mips_srari_b_ARG1 316 %1 = tail call <16 x i8> @llvm.mips.srari.b(<16 x i8> %0, i32 7) 317 store <16 x i8> %1, <16 x i8>* @llvm_mips_srari_b_RES 318 ret void 319 } 320 321 declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32) nounwind 322 323 ; CHECK: llvm_mips_srari_b_test: 324 ; CHECK: ld.b 325 ; CHECK: srari.b 326 ; CHECK: st.b 327 ; CHECK: .size llvm_mips_srari_b_test 328 ; 329 @llvm_mips_srari_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 330 @llvm_mips_srari_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 331 332 define void @llvm_mips_srari_h_test() nounwind { 333 entry: 334 %0 = load <8 x i16>* @llvm_mips_srari_h_ARG1 335 %1 = tail call <8 x i16> @llvm.mips.srari.h(<8 x i16> %0, i32 7) 336 store <8 x i16> %1, <8 x i16>* @llvm_mips_srari_h_RES 337 ret void 338 } 339 340 declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32) nounwind 341 342 ; CHECK: llvm_mips_srari_h_test: 343 ; CHECK: ld.h 344 ; CHECK: srari.h 345 ; CHECK: st.h 346 ; CHECK: .size llvm_mips_srari_h_test 347 ; 348 @llvm_mips_srari_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 349 @llvm_mips_srari_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 350 351 define void @llvm_mips_srari_w_test() nounwind { 352 entry: 353 %0 = load <4 x i32>* @llvm_mips_srari_w_ARG1 354 %1 = tail call <4 x i32> @llvm.mips.srari.w(<4 x i32> %0, i32 7) 355 store <4 x i32> %1, <4 x i32>* @llvm_mips_srari_w_RES 356 ret void 357 } 358 359 declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32) nounwind 360 361 ; CHECK: llvm_mips_srari_w_test: 362 ; CHECK: ld.w 363 ; CHECK: srari.w 364 ; CHECK: st.w 365 ; CHECK: .size llvm_mips_srari_w_test 366 ; 367 @llvm_mips_srari_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 368 @llvm_mips_srari_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 369 370 define void @llvm_mips_srari_d_test() nounwind { 371 entry: 372 %0 = load <2 x i64>* @llvm_mips_srari_d_ARG1 373 %1 = tail call <2 x i64> @llvm.mips.srari.d(<2 x i64> %0, i32 7) 374 store <2 x i64> %1, <2 x i64>* @llvm_mips_srari_d_RES 375 ret void 376 } 377 378 declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32) nounwind 379 380 ; CHECK: llvm_mips_srari_d_test: 381 ; CHECK: ld.d 382 ; CHECK: srari.d 383 ; CHECK: st.d 384 ; CHECK: .size llvm_mips_srari_d_test 385 ; 386 @llvm_mips_srli_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 387 @llvm_mips_srli_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 388 389 define void @llvm_mips_srli_b_test() nounwind { 390 entry: 391 %0 = load <16 x i8>* @llvm_mips_srli_b_ARG1 392 %1 = tail call <16 x i8> @llvm.mips.srli.b(<16 x i8> %0, i32 7) 393 store <16 x i8> %1, <16 x i8>* @llvm_mips_srli_b_RES 394 ret void 395 } 396 397 declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) nounwind 398 399 ; CHECK: llvm_mips_srli_b_test: 400 ; CHECK: ld.b 401 ; CHECK: srli.b 402 ; CHECK: st.b 403 ; CHECK: .size llvm_mips_srli_b_test 404 ; 405 @llvm_mips_srli_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 406 @llvm_mips_srli_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 407 408 define void @llvm_mips_srli_h_test() nounwind { 409 entry: 410 %0 = load <8 x i16>* @llvm_mips_srli_h_ARG1 411 %1 = tail call <8 x i16> @llvm.mips.srli.h(<8 x i16> %0, i32 7) 412 store <8 x i16> %1, <8 x i16>* @llvm_mips_srli_h_RES 413 ret void 414 } 415 416 declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) nounwind 417 418 ; CHECK: llvm_mips_srli_h_test: 419 ; CHECK: ld.h 420 ; CHECK: srli.h 421 ; CHECK: st.h 422 ; CHECK: .size llvm_mips_srli_h_test 423 ; 424 @llvm_mips_srli_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 425 @llvm_mips_srli_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 426 427 define void @llvm_mips_srli_w_test() nounwind { 428 entry: 429 %0 = load <4 x i32>* @llvm_mips_srli_w_ARG1 430 %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7) 431 store <4 x i32> %1, <4 x i32>* @llvm_mips_srli_w_RES 432 ret void 433 } 434 435 declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) nounwind 436 437 ; CHECK: llvm_mips_srli_w_test: 438 ; CHECK: ld.w 439 ; CHECK: srli.w 440 ; CHECK: st.w 441 ; CHECK: .size llvm_mips_srli_w_test 442 ; 443 @llvm_mips_srli_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 444 @llvm_mips_srli_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 445 446 define void @llvm_mips_srli_d_test() nounwind { 447 entry: 448 %0 = load <2 x i64>* @llvm_mips_srli_d_ARG1 449 %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 7) 450 store <2 x i64> %1, <2 x i64>* @llvm_mips_srli_d_RES 451 ret void 452 } 453 454 declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) nounwind 455 456 ; CHECK: llvm_mips_srli_d_test: 457 ; CHECK: ld.d 458 ; CHECK: srli.d 459 ; CHECK: st.d 460 ; CHECK: .size llvm_mips_srli_d_test 461 ; 462 @llvm_mips_srlri_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16 463 @llvm_mips_srlri_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16 464 465 define void @llvm_mips_srlri_b_test() nounwind { 466 entry: 467 %0 = load <16 x i8>* @llvm_mips_srlri_b_ARG1 468 %1 = tail call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %0, i32 7) 469 store <16 x i8> %1, <16 x i8>* @llvm_mips_srlri_b_RES 470 ret void 471 } 472 473 declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32) nounwind 474 475 ; CHECK: llvm_mips_srlri_b_test: 476 ; CHECK: ld.b 477 ; CHECK: srlri.b 478 ; CHECK: st.b 479 ; CHECK: .size llvm_mips_srlri_b_test 480 ; 481 @llvm_mips_srlri_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16 482 @llvm_mips_srlri_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16 483 484 define void @llvm_mips_srlri_h_test() nounwind { 485 entry: 486 %0 = load <8 x i16>* @llvm_mips_srlri_h_ARG1 487 %1 = tail call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %0, i32 7) 488 store <8 x i16> %1, <8 x i16>* @llvm_mips_srlri_h_RES 489 ret void 490 } 491 492 declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32) nounwind 493 494 ; CHECK: llvm_mips_srlri_h_test: 495 ; CHECK: ld.h 496 ; CHECK: srlri.h 497 ; CHECK: st.h 498 ; CHECK: .size llvm_mips_srlri_h_test 499 ; 500 @llvm_mips_srlri_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 501 @llvm_mips_srlri_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16 502 503 define void @llvm_mips_srlri_w_test() nounwind { 504 entry: 505 %0 = load <4 x i32>* @llvm_mips_srlri_w_ARG1 506 %1 = tail call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %0, i32 7) 507 store <4 x i32> %1, <4 x i32>* @llvm_mips_srlri_w_RES 508 ret void 509 } 510 511 declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32) nounwind 512 513 ; CHECK: llvm_mips_srlri_w_test: 514 ; CHECK: ld.w 515 ; CHECK: srlri.w 516 ; CHECK: st.w 517 ; CHECK: .size llvm_mips_srlri_w_test 518 ; 519 @llvm_mips_srlri_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16 520 @llvm_mips_srlri_d_RES = global <2 x i64> <i64 0, i64 0>, align 16 521 522 define void @llvm_mips_srlri_d_test() nounwind { 523 entry: 524 %0 = load <2 x i64>* @llvm_mips_srlri_d_ARG1 525 %1 = tail call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %0, i32 7) 526 store <2 x i64> %1, <2 x i64>* @llvm_mips_srlri_d_RES 527 ret void 528 } 529 530 declare <2 x i64> @llvm.mips.srlri.d(<2 x i64>, i32) nounwind 531 532 ; CHECK: llvm_mips_srlri_d_test: 533 ; CHECK: ld.d 534 ; CHECK: srlri.d 535 ; CHECK: st.d 536 ; CHECK: .size llvm_mips_srlri_d_test 537 ; 538