1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2 ; RUN: opt < %s -instsimplify -S | FileCheck %s 3 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s 4 5 declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b) 6 declare {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b) 7 declare {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b) 8 declare {i8, i1} @llvm.ssub.with.overflow.i8(i8 %a, i8 %b) 9 declare {i8, i1} @llvm.umul.with.overflow.i8(i8 %a, i8 %b) 10 declare {i8, i1} @llvm.smul.with.overflow.i8(i8 %a, i8 %b) 11 12 define i1 @test_uadd1() { 13 ; CHECK-LABEL: @test_uadd1( 14 ; CHECK-NEXT: ret i1 true 15 ; 16 %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 3) 17 %overflow = extractvalue {i8, i1} %x, 1 18 ret i1 %overflow 19 } 20 21 define i8 @test_uadd2() { 22 ; CHECK-LABEL: @test_uadd2( 23 ; CHECK-NEXT: ret i8 42 24 ; 25 %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 44) 26 %result = extractvalue {i8, i1} %x, 0 27 ret i8 %result 28 } 29 30 define {i8, i1} @test_uadd3(i8 %v) { 31 ; CHECK-LABEL: @test_uadd3( 32 ; CHECK-NEXT: ret { i8, i1 } undef 33 ; 34 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 undef) 35 ret {i8, i1} %result 36 } 37 38 define {i8, i1} @test_uadd4(i8 %v) { 39 ; CHECK-LABEL: @test_uadd4( 40 ; CHECK-NEXT: ret { i8, i1 } undef 41 ; 42 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 undef, i8 %v) 43 ret {i8, i1} %result 44 } 45 46 define i1 @test_sadd1() { 47 ; CHECK-LABEL: @test_sadd1( 48 ; CHECK-NEXT: ret i1 true 49 ; 50 %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 3) 51 %overflow = extractvalue {i8, i1} %x, 1 52 ret i1 %overflow 53 } 54 55 define i8 @test_sadd2() { 56 ; CHECK-LABEL: @test_sadd2( 57 ; CHECK-NEXT: ret i8 -86 58 ; 59 %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 44) 60 %result = extractvalue {i8, i1} %x, 0 61 ret i8 %result 62 } 63 64 define {i8, i1} @test_sadd3(i8 %v) { 65 ; CHECK-LABEL: @test_sadd3( 66 ; CHECK-NEXT: ret { i8, i1 } undef 67 ; 68 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 undef) 69 ret {i8, i1} %result 70 } 71 72 define {i8, i1} @test_sadd4(i8 %v) { 73 ; CHECK-LABEL: @test_sadd4( 74 ; CHECK-NEXT: ret { i8, i1 } undef 75 ; 76 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 undef, i8 %v) 77 ret {i8, i1} %result 78 } 79 80 define {i8, i1} @test_usub1(i8 %V) { 81 ; CHECK-LABEL: @test_usub1( 82 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 83 ; 84 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 %V) 85 ret {i8, i1} %x 86 } 87 88 define {i8, i1} @test_usub2(i8 %V) { 89 ; CHECK-LABEL: @test_usub2( 90 ; CHECK-NEXT: ret { i8, i1 } undef 91 ; 92 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 undef) 93 ret {i8, i1} %x 94 } 95 96 define {i8, i1} @test_usub3(i8 %V) { 97 ; CHECK-LABEL: @test_usub3( 98 ; CHECK-NEXT: ret { i8, i1 } undef 99 ; 100 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 undef, i8 %V) 101 ret {i8, i1} %x 102 } 103 104 define {i8, i1} @test_ssub1(i8 %V) { 105 ; CHECK-LABEL: @test_ssub1( 106 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 107 ; 108 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 %V) 109 ret {i8, i1} %x 110 } 111 112 define {i8, i1} @test_ssub2(i8 %V) { 113 ; CHECK-LABEL: @test_ssub2( 114 ; CHECK-NEXT: ret { i8, i1 } undef 115 ; 116 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 undef) 117 ret {i8, i1} %x 118 } 119 120 define {i8, i1} @test_ssub3(i8 %V) { 121 ; CHECK-LABEL: @test_ssub3( 122 ; CHECK-NEXT: ret { i8, i1 } undef 123 ; 124 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 undef, i8 %V) 125 ret {i8, i1} %x 126 } 127 128 define {i8, i1} @test_umul1(i8 %V) { 129 ; CHECK-LABEL: @test_umul1( 130 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 131 ; 132 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 0) 133 ret {i8, i1} %x 134 } 135 136 define {i8, i1} @test_umul2(i8 %V) { 137 ; CHECK-LABEL: @test_umul2( 138 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 139 ; 140 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 undef) 141 ret {i8, i1} %x 142 } 143 144 define {i8, i1} @test_umul3(i8 %V) { 145 ; CHECK-LABEL: @test_umul3( 146 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 147 ; 148 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 0, i8 %V) 149 ret {i8, i1} %x 150 } 151 152 define {i8, i1} @test_umul4(i8 %V) { 153 ; CHECK-LABEL: @test_umul4( 154 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 155 ; 156 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 %V) 157 ret {i8, i1} %x 158 } 159 160 define {i8, i1} @test_smul1(i8 %V) { 161 ; CHECK-LABEL: @test_smul1( 162 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 163 ; 164 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 0) 165 ret {i8, i1} %x 166 } 167 168 define {i8, i1} @test_smul2(i8 %V) { 169 ; CHECK-LABEL: @test_smul2( 170 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 171 ; 172 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 undef) 173 ret {i8, i1} %x 174 } 175 176 define {i8, i1} @test_smul3(i8 %V) { 177 ; CHECK-LABEL: @test_smul3( 178 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 179 ; 180 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 0, i8 %V) 181 ret {i8, i1} %x 182 } 183 184 define {i8, i1} @test_smul4(i8 %V) { 185 ; CHECK-LABEL: @test_smul4( 186 ; CHECK-NEXT: ret { i8, i1 } zeroinitializer 187 ; 188 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 undef, i8 %V) 189 ret {i8, i1} %x 190 } 191 192 declare i256 @llvm.cttz.i256(i256 %src, i1 %is_zero_undef) 193 194 define i256 @test_cttz() { 195 ; CHECK-LABEL: @test_cttz( 196 ; CHECK-NEXT: ret i256 1 197 ; 198 %x = call i256 @llvm.cttz.i256(i256 10, i1 false) 199 ret i256 %x 200 } 201 202 declare <2 x i256> @llvm.cttz.v2i256(<2 x i256> %src, i1 %is_zero_undef) 203 204 define <2 x i256> @test_cttz_vec() { 205 ; CHECK-LABEL: @test_cttz_vec( 206 ; CHECK-NEXT: ret <2 x i256> <i256 1, i256 1> 207 ; 208 %x = call <2 x i256> @llvm.cttz.v2i256(<2 x i256> <i256 10, i256 10>, i1 false) 209 ret <2 x i256> %x 210 } 211 212 declare i256 @llvm.ctpop.i256(i256 %src) 213 214 define i256 @test_ctpop() { 215 ; CHECK-LABEL: @test_ctpop( 216 ; CHECK-NEXT: ret i256 2 217 ; 218 %x = call i256 @llvm.ctpop.i256(i256 10) 219 ret i256 %x 220 } 221 222 ; Test a non-intrinsic that we know about as a library call. 223 declare float @fabs(float %x) 224 225 define float @test_fabs_libcall() { 226 ; CHECK-LABEL: @test_fabs_libcall( 227 ; CHECK-NEXT: [[X:%.*]] = call float @fabs(float -4.200000e+01) 228 ; CHECK-NEXT: ret float 4.200000e+01 229 ; 230 231 %x = call float @fabs(float -42.0) 232 ; This is still a real function call, so instsimplify won't nuke it -- other 233 ; passes have to do that. 234 235 ret float %x 236 } 237 238 239 declare float @llvm.fabs.f32(float) nounwind readnone 240 declare float @llvm.floor.f32(float) nounwind readnone 241 declare float @llvm.ceil.f32(float) nounwind readnone 242 declare float @llvm.trunc.f32(float) nounwind readnone 243 declare float @llvm.rint.f32(float) nounwind readnone 244 declare float @llvm.nearbyint.f32(float) nounwind readnone 245 declare float @llvm.canonicalize.f32(float) nounwind readnone 246 247 ; Test idempotent intrinsics 248 define float @test_idempotence(float %a) { 249 ; CHECK-LABEL: @test_idempotence( 250 ; CHECK-NEXT: [[A0:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]]) 251 ; CHECK-NEXT: [[B0:%.*]] = call float @llvm.floor.f32(float [[A]]) 252 ; CHECK-NEXT: [[C0:%.*]] = call float @llvm.ceil.f32(float [[A]]) 253 ; CHECK-NEXT: [[D0:%.*]] = call float @llvm.trunc.f32(float [[A]]) 254 ; CHECK-NEXT: [[E0:%.*]] = call float @llvm.rint.f32(float [[A]]) 255 ; CHECK-NEXT: [[F0:%.*]] = call float @llvm.nearbyint.f32(float [[A]]) 256 ; CHECK-NEXT: [[G0:%.*]] = call float @llvm.canonicalize.f32(float [[A]]) 257 ; CHECK-NEXT: [[R0:%.*]] = fadd float [[A0]], [[B0]] 258 ; CHECK-NEXT: [[R1:%.*]] = fadd float [[R0]], [[C0]] 259 ; CHECK-NEXT: [[R2:%.*]] = fadd float [[R1]], [[D0]] 260 ; CHECK-NEXT: [[R3:%.*]] = fadd float [[R2]], [[E0]] 261 ; CHECK-NEXT: [[R4:%.*]] = fadd float [[R3]], [[F0]] 262 ; CHECK-NEXT: [[R5:%.*]] = fadd float [[R4]], [[G0]] 263 ; CHECK-NEXT: ret float [[R5]] 264 ; 265 266 %a0 = call float @llvm.fabs.f32(float %a) 267 %a1 = call float @llvm.fabs.f32(float %a0) 268 269 %b0 = call float @llvm.floor.f32(float %a) 270 %b1 = call float @llvm.floor.f32(float %b0) 271 272 %c0 = call float @llvm.ceil.f32(float %a) 273 %c1 = call float @llvm.ceil.f32(float %c0) 274 275 %d0 = call float @llvm.trunc.f32(float %a) 276 %d1 = call float @llvm.trunc.f32(float %d0) 277 278 %e0 = call float @llvm.rint.f32(float %a) 279 %e1 = call float @llvm.rint.f32(float %e0) 280 281 %f0 = call float @llvm.nearbyint.f32(float %a) 282 %f1 = call float @llvm.nearbyint.f32(float %f0) 283 284 %g0 = call float @llvm.canonicalize.f32(float %a) 285 %g1 = call float @llvm.canonicalize.f32(float %g0) 286 287 %r0 = fadd float %a1, %b1 288 %r1 = fadd float %r0, %c1 289 %r2 = fadd float %r1, %d1 290 %r3 = fadd float %r2, %e1 291 %r4 = fadd float %r3, %f1 292 %r5 = fadd float %r4, %g1 293 294 ret float %r5 295 } 296 297 define i8* @operator_new() { 298 ; CHECK-LABEL: @operator_new( 299 ; CHECK-NEXT: entry: 300 ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @_Znwm(i64 8) 301 ; CHECK-NEXT: br i1 false, label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] 302 ; CHECK: cast.notnull: 303 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 4 304 ; CHECK-NEXT: br label [[CAST_END]] 305 ; CHECK: cast.end: 306 ; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi i8* [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] 307 ; CHECK-NEXT: ret i8* [[CAST_RESULT]] 308 ; 309 entry: 310 %call = tail call noalias i8* @_Znwm(i64 8) 311 %cmp = icmp eq i8* %call, null 312 br i1 %cmp, label %cast.end, label %cast.notnull 313 314 cast.notnull: ; preds = %entry 315 %add.ptr = getelementptr inbounds i8, i8* %call, i64 4 316 br label %cast.end 317 318 cast.end: ; preds = %cast.notnull, %entry 319 %cast.result = phi i8* [ %add.ptr, %cast.notnull ], [ null, %entry ] 320 ret i8* %cast.result 321 322 } 323 324 declare nonnull noalias i8* @_Znwm(i64) 325 326 %"struct.std::nothrow_t" = type { i8 } 327 @_ZSt7nothrow = external global %"struct.std::nothrow_t" 328 329 define i8* @operator_new_nothrow_t() { 330 ; CHECK-LABEL: @operator_new_nothrow_t( 331 ; CHECK-NEXT: entry: 332 ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @_ZnamRKSt9nothrow_t(i64 8, %"struct.std::nothrow_t"* @_ZSt7nothrow) 333 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[CALL]], null 334 ; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] 335 ; CHECK: cast.notnull: 336 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 4 337 ; CHECK-NEXT: br label [[CAST_END]] 338 ; CHECK: cast.end: 339 ; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi i8* [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] 340 ; CHECK-NEXT: ret i8* [[CAST_RESULT]] 341 ; 342 entry: 343 %call = tail call noalias i8* @_ZnamRKSt9nothrow_t(i64 8, %"struct.std::nothrow_t"* @_ZSt7nothrow) 344 %cmp = icmp eq i8* %call, null 345 br i1 %cmp, label %cast.end, label %cast.notnull 346 347 cast.notnull: ; preds = %entry 348 %add.ptr = getelementptr inbounds i8, i8* %call, i64 4 349 br label %cast.end 350 351 cast.end: ; preds = %cast.notnull, %entry 352 %cast.result = phi i8* [ %add.ptr, %cast.notnull ], [ null, %entry ] 353 ret i8* %cast.result 354 355 } 356 357 declare i8* @_ZnamRKSt9nothrow_t(i64, %"struct.std::nothrow_t"*) nounwind 358 359 define i8* @malloc_can_return_null() { 360 ; CHECK-LABEL: @malloc_can_return_null( 361 ; CHECK-NEXT: entry: 362 ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @malloc(i64 8) 363 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[CALL]], null 364 ; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] 365 ; CHECK: cast.notnull: 366 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 4 367 ; CHECK-NEXT: br label [[CAST_END]] 368 ; CHECK: cast.end: 369 ; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi i8* [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] 370 ; CHECK-NEXT: ret i8* [[CAST_RESULT]] 371 ; 372 entry: 373 %call = tail call noalias i8* @malloc(i64 8) 374 %cmp = icmp eq i8* %call, null 375 br i1 %cmp, label %cast.end, label %cast.notnull 376 377 cast.notnull: ; preds = %entry 378 %add.ptr = getelementptr inbounds i8, i8* %call, i64 4 379 br label %cast.end 380 381 cast.end: ; preds = %cast.notnull, %entry 382 %cast.result = phi i8* [ %add.ptr, %cast.notnull ], [ null, %entry ] 383 ret i8* %cast.result 384 385 } 386 387 define i32 @call_null() { 388 ; CHECK-LABEL: @call_null( 389 ; CHECK-NEXT: entry: 390 ; CHECK-NEXT: [[CALL:%.*]] = call i32 null() 391 ; CHECK-NEXT: ret i32 undef 392 ; 393 entry: 394 %call = call i32 null() 395 ret i32 %call 396 } 397 398 define i32 @call_undef() { 399 ; CHECK-LABEL: @call_undef( 400 ; CHECK-NEXT: entry: 401 ; CHECK-NEXT: [[CALL:%.*]] = call i32 undef() 402 ; CHECK-NEXT: ret i32 undef 403 ; 404 entry: 405 %call = call i32 undef() 406 ret i32 %call 407 } 408 409 @GV = private constant [8 x i32] [i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49] 410 411 define <8 x i32> @partial_masked_load() { 412 ; CHECK-LABEL: @partial_masked_load( 413 ; CHECK-NEXT: ret <8 x i32> <i32 undef, i32 undef, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47> 414 ; 415 %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -2) to <8 x i32>*), i32 4, <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef) 416 ret <8 x i32> %masked.load 417 } 418 419 define <8 x i32> @masked_load_undef_mask(<8 x i32>* %V) { 420 ; CHECK-LABEL: @masked_load_undef_mask( 421 ; CHECK-NEXT: ret <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0> 422 ; 423 %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %V, i32 4, <8 x i1> undef, <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>) 424 ret <8 x i32> %masked.load 425 } 426 427 declare noalias i8* @malloc(i64) 428 429 declare <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>) 430 431 declare double @llvm.powi.f64(double, i32) 432 declare <2 x double> @llvm.powi.v2f64(<2 x double>, i32) 433 434 define double @constant_fold_powi() { 435 ; CHECK-LABEL: @constant_fold_powi( 436 ; CHECK-NEXT: ret double 9.000000e+00 437 ; 438 %t0 = call double @llvm.powi.f64(double 3.00000e+00, i32 2) 439 ret double %t0 440 } 441 442 define <2 x double> @constant_fold_powi_vec() { 443 ; CHECK-LABEL: @constant_fold_powi_vec( 444 ; CHECK-NEXT: ret <2 x double> <double 9.000000e+00, double 2.500000e+01> 445 ; 446 %t0 = call <2 x double> @llvm.powi.v2f64(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i32 2) 447 ret <2 x double> %t0 448 } 449 450 declare i8 @llvm.fshl.i8(i8, i8, i8) 451 declare i9 @llvm.fshr.i9(i9, i9, i9) 452 declare <2 x i7> @llvm.fshl.v2i7(<2 x i7>, <2 x i7>, <2 x i7>) 453 declare <2 x i8> @llvm.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) 454 455 define i8 @fshl_no_shift(i8 %x, i8 %y) { 456 ; CHECK-LABEL: @fshl_no_shift( 457 ; CHECK-NEXT: ret i8 [[X:%.*]] 458 ; 459 %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 0) 460 ret i8 %z 461 } 462 463 define i9 @fshr_no_shift(i9 %x, i9 %y) { 464 ; CHECK-LABEL: @fshr_no_shift( 465 ; CHECK-NEXT: ret i9 [[Y:%.*]] 466 ; 467 %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 0) 468 ret i9 %z 469 } 470 471 define i8 @fshl_no_shift_modulo_bitwidth(i8 %x, i8 %y) { 472 ; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth( 473 ; CHECK-NEXT: ret i8 [[X:%.*]] 474 ; 475 %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 40) 476 ret i8 %z 477 } 478 479 define i9 @fshr_no_shift_modulo_bitwidth(i9 %x, i9 %y) { 480 ; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth( 481 ; CHECK-NEXT: ret i9 [[Y:%.*]] 482 ; 483 %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 189) 484 ret i9 %z 485 } 486 487 define <2 x i7> @fshl_no_shift_modulo_bitwidth_splat(<2 x i7> %x, <2 x i7> %y) { 488 ; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth_splat( 489 ; CHECK-NEXT: ret <2 x i7> [[X:%.*]] 490 ; 491 %z = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> %x, <2 x i7> %y, <2 x i7> <i7 21, i7 21>) 492 ret <2 x i7> %z 493 } 494 495 define <2 x i8> @fshr_no_shift_modulo_bitwidth_splat(<2 x i8> %x, <2 x i8> %y) { 496 ; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth_splat( 497 ; CHECK-NEXT: ret <2 x i8> [[Y:%.*]] 498 ; 499 %z = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> <i8 72, i8 72>) 500 ret <2 x i8> %z 501 } 502 503