1 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s 2 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK-ORIGINS %s 3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 4 5 ; Check the presence of __msan_init 6 ; CHECK: @llvm.global_ctors {{.*}} @__msan_init 7 8 ; Check the presence and the linkage type of __msan_track_origins 9 ; CHECK: @__msan_track_origins = weak_odr constant i32 0 10 11 12 ; Check instrumentation of stores 13 14 define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory { 15 entry: 16 store i32 %x, i32* %p, align 4 17 ret void 18 } 19 20 ; CHECK: @Store 21 ; CHECK: load {{.*}} @__msan_param_tls 22 ; CHECK: store 23 ; CHECK: store 24 ; CHECK: ret void 25 ; CHECK-ORIGINS: @Store 26 ; CHECK-ORIGINS: load {{.*}} @__msan_param_tls 27 ; CHECK-ORIGINS: store 28 ; CHECK-ORIGINS: icmp 29 ; CHECK-ORIGINS: br i1 30 ; CHECK-ORIGINS: <label> 31 ; CHECK-ORIGINS: store 32 ; CHECK-ORIGINS: br label 33 ; CHECK-ORIGINS: <label> 34 ; CHECK-ORIGINS: store 35 ; CHECK-ORIGINS: ret void 36 37 38 ; Check instrumentation of aligned stores 39 ; Shadow store has the same alignment as the original store; origin store 40 ; does not specify explicit alignment. 41 42 define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory { 43 entry: 44 store i32 %x, i32* %p, align 32 45 ret void 46 } 47 48 ; CHECK: @AlignedStore 49 ; CHECK: load {{.*}} @__msan_param_tls 50 ; CHECK: store {{.*}} align 32 51 ; CHECK: store {{.*}} align 32 52 ; CHECK: ret void 53 ; CHECK-ORIGINS: @AlignedStore 54 ; CHECK-ORIGINS: load {{.*}} @__msan_param_tls 55 ; CHECK-ORIGINS: store {{.*}} align 32 56 ; CHECK-ORIGINS: icmp 57 ; CHECK-ORIGINS: br i1 58 ; CHECK-ORIGINS: <label> 59 ; CHECK-ORIGINS: store {{.*}} align 32 60 ; CHECK-ORIGINS: br label 61 ; CHECK-ORIGINS: <label> 62 ; CHECK-ORIGINS: store {{.*}} align 32 63 ; CHECK-ORIGINS: ret void 64 65 66 ; load followed by cmp: check that we load the shadow and call __msan_warning. 67 define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory { 68 entry: 69 %0 = load i32* %a, align 4 70 %tobool = icmp eq i32 %0, 0 71 br i1 %tobool, label %if.end, label %if.then 72 73 if.then: ; preds = %entry 74 tail call void (...)* @foo() nounwind 75 br label %if.end 76 77 if.end: ; preds = %entry, %if.then 78 ret void 79 } 80 81 declare void @foo(...) 82 83 ; CHECK: @LoadAndCmp 84 ; CHECK: = load 85 ; CHECK: = load 86 ; CHECK: call void @__msan_warning_noreturn() 87 ; CHECK-NEXT: call void asm sideeffect 88 ; CHECK-NEXT: unreachable 89 ; CHECK: ret void 90 91 ; Check that we store the shadow for the retval. 92 define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory { 93 entry: 94 ret i32 123 95 } 96 97 ; CHECK: @ReturnInt 98 ; CHECK: store i32 0,{{.*}}__msan_retval_tls 99 ; CHECK: ret i32 100 101 ; Check that we get the shadow for the retval. 102 define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory { 103 entry: 104 %call = tail call i32 @ReturnInt() nounwind 105 store i32 %call, i32* %a, align 4 106 ret void 107 } 108 109 ; CHECK: @CopyRetVal 110 ; CHECK: load{{.*}}__msan_retval_tls 111 ; CHECK: store 112 ; CHECK: store 113 ; CHECK: ret void 114 115 116 ; Check that we generate PHIs for shadow. 117 define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory { 118 entry: 119 %tobool = icmp eq i32* %b, null 120 br i1 %tobool, label %if.else, label %if.then 121 122 if.then: ; preds = %entry 123 %0 = load i32* %b, align 4 124 br label %if.end 125 126 if.else: ; preds = %entry 127 %1 = load i32* %c, align 4 128 br label %if.end 129 130 if.end: ; preds = %if.else, %if.then 131 %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ] 132 store i32 %t.0, i32* %a, align 4 133 ret void 134 } 135 136 ; CHECK: @FuncWithPhi 137 ; CHECK: = phi 138 ; CHECK-NEXT: = phi 139 ; CHECK: store 140 ; CHECK: store 141 ; CHECK: ret void 142 143 ; Compute shadow for "x << 10" 144 define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory { 145 entry: 146 %0 = load i32* %x, align 4 147 %1 = shl i32 %0, 10 148 store i32 %1, i32* %x, align 4 149 ret void 150 } 151 152 ; CHECK: @ShlConst 153 ; CHECK: = load 154 ; CHECK: = load 155 ; CHECK: shl 156 ; CHECK: shl 157 ; CHECK: store 158 ; CHECK: store 159 ; CHECK: ret void 160 161 ; Compute shadow for "10 << x": it should have 'sext i1'. 162 define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory { 163 entry: 164 %0 = load i32* %x, align 4 165 %1 = shl i32 10, %0 166 store i32 %1, i32* %x, align 4 167 ret void 168 } 169 170 ; CHECK: @ShlNonConst 171 ; CHECK: = load 172 ; CHECK: = load 173 ; CHECK: = sext i1 174 ; CHECK: store 175 ; CHECK: store 176 ; CHECK: ret void 177 178 ; SExt 179 define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory { 180 entry: 181 %0 = load i16* %b, align 2 182 %1 = sext i16 %0 to i32 183 store i32 %1, i32* %a, align 4 184 ret void 185 } 186 187 ; CHECK: @SExt 188 ; CHECK: = load 189 ; CHECK: = load 190 ; CHECK: = sext 191 ; CHECK: = sext 192 ; CHECK: store 193 ; CHECK: store 194 ; CHECK: ret void 195 196 197 ; memset 198 define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory { 199 entry: 200 call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i32 1, i1 false) 201 ret void 202 } 203 204 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind 205 206 ; CHECK: @MemSet 207 ; CHECK: call i8* @__msan_memset 208 ; CHECK: ret void 209 210 211 ; memcpy 212 define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory { 213 entry: 214 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false) 215 ret void 216 } 217 218 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind 219 220 ; CHECK: @MemCpy 221 ; CHECK: call i8* @__msan_memcpy 222 ; CHECK: ret void 223 224 225 ; memmove is lowered to a call 226 define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory { 227 entry: 228 call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false) 229 ret void 230 } 231 232 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind 233 234 ; CHECK: @MemMove 235 ; CHECK: call i8* @__msan_memmove 236 ; CHECK: ret void 237 238 239 ; Check that we propagate shadow for "select" 240 241 define i32 @Select(i32 %a, i32 %b, i32 %c) nounwind uwtable readnone sanitize_memory { 242 entry: 243 %tobool = icmp ne i32 %c, 0 244 %cond = select i1 %tobool, i32 %a, i32 %b 245 ret i32 %cond 246 } 247 248 ; CHECK: @Select 249 ; CHECK: select 250 ; CHECK-NEXT: select 251 ; CHECK: ret i32 252 253 254 ; Check that we propagate origin for "select" with vector condition. 255 ; Select condition is flattened to i1, which is then used to select one of the 256 ; argument origins. 257 258 define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory { 259 entry: 260 %cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b 261 ret <8 x i16> %cond 262 } 263 264 ; CHECK-ORIGINS: @SelectVector 265 ; CHECK-ORIGINS: bitcast <8 x i1> {{.*}} to i8 266 ; CHECK-ORIGINS: icmp ne i8 267 ; CHECK-ORIGINS: select i1 268 ; CHECK-ORIGINS: ret <8 x i16> 269 270 271 define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory { 272 entry: 273 %0 = inttoptr i64 %x to i8* 274 ret i8* %0 275 } 276 277 ; CHECK: @IntToPtr 278 ; CHECK: load i64*{{.*}}__msan_param_tls 279 ; CHECK-NEXT: inttoptr 280 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls 281 ; CHECK: ret i8 282 283 284 define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory { 285 entry: 286 %0 = inttoptr i16 %x to i8* 287 ret i8* %0 288 } 289 290 ; CHECK: @IntToPtr_ZExt 291 ; CHECK: zext 292 ; CHECK-NEXT: inttoptr 293 ; CHECK: ret i8 294 295 296 ; Check that we insert exactly one check on udiv 297 ; (2nd arg shadow is checked, 1st arg shadow is propagated) 298 299 define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory { 300 entry: 301 %div = udiv i32 %a, %b 302 ret i32 %div 303 } 304 305 ; CHECK: @Div 306 ; CHECK: icmp 307 ; CHECK: call void @__msan_warning 308 ; CHECK-NOT: icmp 309 ; CHECK: udiv 310 ; CHECK-NOT: icmp 311 ; CHECK: ret i32 312 313 314 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests) 315 316 define zeroext i1 @ICmpSLT(i32 %x) nounwind uwtable readnone sanitize_memory { 317 %1 = icmp slt i32 %x, 0 318 ret i1 %1 319 } 320 321 ; CHECK: @ICmpSLT 322 ; CHECK: icmp slt 323 ; CHECK-NOT: call void @__msan_warning 324 ; CHECK: icmp slt 325 ; CHECK-NOT: call void @__msan_warning 326 ; CHECK: ret i1 327 328 define zeroext i1 @ICmpSGE(i32 %x) nounwind uwtable readnone sanitize_memory { 329 %1 = icmp sge i32 %x, 0 330 ret i1 %1 331 } 332 333 ; CHECK: @ICmpSGE 334 ; CHECK: icmp slt 335 ; CHECK-NOT: call void @__msan_warning 336 ; CHECK: icmp sge 337 ; CHECK-NOT: call void @__msan_warning 338 ; CHECK: ret i1 339 340 define zeroext i1 @ICmpSGT(i32 %x) nounwind uwtable readnone sanitize_memory { 341 %1 = icmp sgt i32 0, %x 342 ret i1 %1 343 } 344 345 ; CHECK: @ICmpSGT 346 ; CHECK: icmp slt 347 ; CHECK-NOT: call void @__msan_warning 348 ; CHECK: icmp sgt 349 ; CHECK-NOT: call void @__msan_warning 350 ; CHECK: ret i1 351 352 define zeroext i1 @ICmpSLE(i32 %x) nounwind uwtable readnone sanitize_memory { 353 %1 = icmp sle i32 0, %x 354 ret i1 %1 355 } 356 357 ; CHECK: @ICmpSLE 358 ; CHECK: icmp slt 359 ; CHECK-NOT: call void @__msan_warning 360 ; CHECK: icmp sle 361 ; CHECK-NOT: call void @__msan_warning 362 ; CHECK: ret i1 363 364 365 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests) 366 ; of the vector arguments. 367 368 define <2 x i1> @ICmpSLT_vector(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory { 369 %1 = icmp slt <2 x i32*> %x, zeroinitializer 370 ret <2 x i1> %1 371 } 372 373 ; CHECK: @ICmpSLT_vector 374 ; CHECK: icmp slt <2 x i64> 375 ; CHECK-NOT: call void @__msan_warning 376 ; CHECK: icmp slt <2 x i32*> 377 ; CHECK-NOT: call void @__msan_warning 378 ; CHECK: ret <2 x i1> 379 380 381 ; Check that we propagate shadow for unsigned relational comparisons with 382 ; constants 383 384 define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory { 385 entry: 386 %cmp = icmp ugt i32 %x, 7 387 ret i1 %cmp 388 } 389 390 ; CHECK: @ICmpUGTConst 391 ; CHECK: icmp ugt i32 392 ; CHECK-NOT: call void @__msan_warning 393 ; CHECK: icmp ugt i32 394 ; CHECK-NOT: call void @__msan_warning 395 ; CHECK: icmp ugt i32 396 ; CHECK-NOT: call void @__msan_warning 397 ; CHECK: ret i1 398 399 400 ; Check that loads of shadow have the same aligment as the original loads. 401 ; Check that loads of origin have the aligment of max(4, original alignment). 402 403 define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory { 404 %y = alloca i32, align 64 405 %1 = load volatile i32* %y, align 64 406 ret i32 %1 407 } 408 409 ; CHECK: @ShadowLoadAlignmentLarge 410 ; CHECK: load i32* {{.*}} align 64 411 ; CHECK: load volatile i32* {{.*}} align 64 412 ; CHECK: ret i32 413 414 define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory { 415 %y = alloca i32, align 2 416 %1 = load volatile i32* %y, align 2 417 ret i32 %1 418 } 419 420 ; CHECK: @ShadowLoadAlignmentSmall 421 ; CHECK: load i32* {{.*}} align 2 422 ; CHECK: load volatile i32* {{.*}} align 2 423 ; CHECK: ret i32 424 425 ; CHECK-ORIGINS: @ShadowLoadAlignmentSmall 426 ; CHECK-ORIGINS: load i32* {{.*}} align 2 427 ; CHECK-ORIGINS: load i32* {{.*}} align 4 428 ; CHECK-ORIGINS: load volatile i32* {{.*}} align 2 429 ; CHECK-ORIGINS: ret i32 430 431 432 ; Test vector manipulation instructions. 433 ; Check that the same bit manipulation is applied to the shadow values. 434 ; Check that there is a zero test of the shadow of %idx argument, where present. 435 436 define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory { 437 %x = extractelement <4 x i32> %vec, i32 %idx 438 ret i32 %x 439 } 440 441 ; CHECK: @ExtractElement 442 ; CHECK: extractelement 443 ; CHECK: call void @__msan_warning 444 ; CHECK: extractelement 445 ; CHECK: ret i32 446 447 define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory { 448 %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx 449 ret <4 x i32> %vec1 450 } 451 452 ; CHECK: @InsertElement 453 ; CHECK: insertelement 454 ; CHECK: call void @__msan_warning 455 ; CHECK: insertelement 456 ; CHECK: ret <4 x i32> 457 458 define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory { 459 %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1, 460 <4 x i32> <i32 0, i32 4, i32 1, i32 5> 461 ret <4 x i32> %vec2 462 } 463 464 ; CHECK: @ShuffleVector 465 ; CHECK: shufflevector 466 ; CHECK-NOT: call void @__msan_warning 467 ; CHECK: shufflevector 468 ; CHECK: ret <4 x i32> 469 470 471 ; Test bswap intrinsic instrumentation 472 define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory { 473 %y = tail call i32 @llvm.bswap.i32(i32 %x) 474 ret i32 %y 475 } 476 477 declare i32 @llvm.bswap.i32(i32) nounwind readnone 478 479 ; CHECK: @BSwap 480 ; CHECK-NOT: call void @__msan_warning 481 ; CHECK: @llvm.bswap.i32 482 ; CHECK-NOT: call void @__msan_warning 483 ; CHECK: @llvm.bswap.i32 484 ; CHECK-NOT: call void @__msan_warning 485 ; CHECK: ret i32 486 487 488 ; Store intrinsic. 489 490 define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory { 491 call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x) 492 ret void 493 } 494 495 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind 496 497 ; CHECK: @StoreIntrinsic 498 ; CHECK-NOT: br 499 ; CHECK-NOT: = or 500 ; CHECK: store <4 x i32> {{.*}} align 1 501 ; CHECK: call void @llvm.x86.sse.storeu.ps 502 ; CHECK: ret void 503 504 505 ; Load intrinsic. 506 507 define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory { 508 %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) 509 ret <16 x i8> %call 510 } 511 512 declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind 513 514 ; CHECK: @LoadIntrinsic 515 ; CHECK: load <16 x i8>* {{.*}} align 1 516 ; CHECK-NOT: br 517 ; CHECK-NOT: = or 518 ; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq 519 ; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls 520 ; CHECK: ret <16 x i8> 521 522 ; CHECK-ORIGINS: @LoadIntrinsic 523 ; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32* {{.*}} 524 ; CHECK-ORIGINS: call <16 x i8> @llvm.x86.sse3.ldu.dq 525 ; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls 526 ; CHECK-ORIGINS: ret <16 x i8> 527 528 529 ; Simple NoMem intrinsic 530 ; Check that shadow is OR'ed, and origin is Select'ed 531 ; And no shadow checks! 532 533 define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory { 534 %call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) 535 ret <8 x i16> %call 536 } 537 538 declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind 539 540 ; CHECK: @Paddsw128 541 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls 542 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls 543 ; CHECK-NEXT: = or <8 x i16> 544 ; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w 545 ; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls 546 ; CHECK-NEXT: ret <8 x i16> 547 548 ; CHECK-ORIGINS: @Paddsw128 549 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls 550 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls 551 ; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128 552 ; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0 553 ; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32 554 ; CHECK-ORIGINS: call <8 x i16> @llvm.x86.sse2.padds.w 555 ; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls 556 ; CHECK-ORIGINS: ret <8 x i16> 557 558 559 ; Test handling of vectors of pointers. 560 ; Check that shadow of such vector is a vector of integers. 561 562 define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory { 563 %x = load <8 x i8*>* %p 564 ret <8 x i8*> %x 565 } 566 567 ; CHECK: @VectorOfPointers 568 ; CHECK: load <8 x i64>* 569 ; CHECK: load <8 x i8*>* 570 ; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls 571 ; CHECK: ret <8 x i8*> 572 573 ; Test handling of va_copy. 574 575 declare void @llvm.va_copy(i8*, i8*) nounwind 576 577 define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory { 578 call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind 579 ret void 580 } 581 582 ; CHECK: @VACopy 583 ; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i32 8, i1 false) 584 ; CHECK: ret void 585 586 587 ; Test handling of volatile stores. 588 ; Check that MemorySanitizer does not add a check of the value being stored. 589 590 define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory { 591 entry: 592 store volatile i32 %x, i32* %p, align 4 593 ret void 594 } 595 596 ; CHECK: @VolatileStore 597 ; CHECK-NOT: @__msan_warning 598 ; CHECK: ret void 599 600 601 ; Test that checks are omitted but shadow propagation is kept if 602 ; sanitize_memory attribute is missing. 603 604 define i32 @NoSanitizeMemory(i32 %x) uwtable { 605 entry: 606 %tobool = icmp eq i32 %x, 0 607 br i1 %tobool, label %if.end, label %if.then 608 609 if.then: ; preds = %entry 610 tail call void @bar() 611 br label %if.end 612 613 if.end: ; preds = %entry, %if.then 614 ret i32 %x 615 } 616 617 declare void @bar() 618 619 ; CHECK: @NoSanitizeMemory 620 ; CHECK-NOT: @__msan_warning 621 ; CHECK: load i32* {{.*}} @__msan_param_tls 622 ; CHECK-NOT: @__msan_warning 623 ; CHECK: store {{.*}} @__msan_retval_tls 624 ; CHECK-NOT: @__msan_warning 625 ; CHECK: ret i32 626