1 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s 2 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s 3 4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 5 target triple = "x86_64-unknown-linux-gnu" 6 7 ; Check the presence of __msan_init 8 ; CHECK: @llvm.global_ctors {{.*}} @__msan_init 9 10 ; Check the presence and the linkage type of __msan_track_origins and 11 ; other interface symbols. 12 ; CHECK-NOT: @__msan_track_origins 13 ; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1 14 ; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0 15 ; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}] 16 ; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32 17 ; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}] 18 ; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}] 19 ; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}] 20 ; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64 21 ; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32 22 23 24 ; Check instrumentation of stores 25 26 define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory { 27 entry: 28 store i32 %x, i32* %p, align 4 29 ret void 30 } 31 32 ; CHECK: @Store 33 ; CHECK: load {{.*}} @__msan_param_tls 34 ; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls 35 ; CHECK: store 36 ; CHECK-ORIGINS: icmp 37 ; CHECK-ORIGINS: br i1 38 ; CHECK-ORIGINS: <label> 39 ; CHECK-ORIGINS: store 40 ; CHECK-ORIGINS: br label 41 ; CHECK-ORIGINS: <label> 42 ; CHECK: store 43 ; CHECK: ret void 44 45 46 ; Check instrumentation of aligned stores 47 ; Shadow store has the same alignment as the original store; origin store 48 ; does not specify explicit alignment. 49 50 define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory { 51 entry: 52 store i32 %x, i32* %p, align 32 53 ret void 54 } 55 56 ; CHECK: @AlignedStore 57 ; CHECK: load {{.*}} @__msan_param_tls 58 ; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls 59 ; CHECK: store {{.*}} align 32 60 ; CHECK-ORIGINS: icmp 61 ; CHECK-ORIGINS: br i1 62 ; CHECK-ORIGINS: <label> 63 ; CHECK-ORIGINS: store {{.*}} align 32 64 ; CHECK-ORIGINS: br label 65 ; CHECK-ORIGINS: <label> 66 ; CHECK: store {{.*}} align 32 67 ; CHECK: ret void 68 69 70 ; load followed by cmp: check that we load the shadow and call __msan_warning. 71 define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory { 72 entry: 73 %0 = load i32* %a, align 4 74 %tobool = icmp eq i32 %0, 0 75 br i1 %tobool, label %if.end, label %if.then 76 77 if.then: ; preds = %entry 78 tail call void (...)* @foo() nounwind 79 br label %if.end 80 81 if.end: ; preds = %entry, %if.then 82 ret void 83 } 84 85 declare void @foo(...) 86 87 ; CHECK: @LoadAndCmp 88 ; CHECK: = load 89 ; CHECK: = load 90 ; CHECK: call void @__msan_warning_noreturn() 91 ; CHECK-NEXT: call void asm sideeffect 92 ; CHECK-NEXT: unreachable 93 ; CHECK: ret void 94 95 ; Check that we store the shadow for the retval. 96 define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory { 97 entry: 98 ret i32 123 99 } 100 101 ; CHECK: @ReturnInt 102 ; CHECK: store i32 0,{{.*}}__msan_retval_tls 103 ; CHECK: ret i32 104 105 ; Check that we get the shadow for the retval. 106 define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory { 107 entry: 108 %call = tail call i32 @ReturnInt() nounwind 109 store i32 %call, i32* %a, align 4 110 ret void 111 } 112 113 ; CHECK: @CopyRetVal 114 ; CHECK: load{{.*}}__msan_retval_tls 115 ; CHECK: store 116 ; CHECK: store 117 ; CHECK: ret void 118 119 120 ; Check that we generate PHIs for shadow. 121 define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory { 122 entry: 123 %tobool = icmp eq i32* %b, null 124 br i1 %tobool, label %if.else, label %if.then 125 126 if.then: ; preds = %entry 127 %0 = load i32* %b, align 4 128 br label %if.end 129 130 if.else: ; preds = %entry 131 %1 = load i32* %c, align 4 132 br label %if.end 133 134 if.end: ; preds = %if.else, %if.then 135 %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ] 136 store i32 %t.0, i32* %a, align 4 137 ret void 138 } 139 140 ; CHECK: @FuncWithPhi 141 ; CHECK: = phi 142 ; CHECK-NEXT: = phi 143 ; CHECK: store 144 ; CHECK: store 145 ; CHECK: ret void 146 147 ; Compute shadow for "x << 10" 148 define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory { 149 entry: 150 %0 = load i32* %x, align 4 151 %1 = shl i32 %0, 10 152 store i32 %1, i32* %x, align 4 153 ret void 154 } 155 156 ; CHECK: @ShlConst 157 ; CHECK: = load 158 ; CHECK: = load 159 ; CHECK: shl 160 ; CHECK: shl 161 ; CHECK: store 162 ; CHECK: store 163 ; CHECK: ret void 164 165 ; Compute shadow for "10 << x": it should have 'sext i1'. 166 define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory { 167 entry: 168 %0 = load i32* %x, align 4 169 %1 = shl i32 10, %0 170 store i32 %1, i32* %x, align 4 171 ret void 172 } 173 174 ; CHECK: @ShlNonConst 175 ; CHECK: = load 176 ; CHECK: = load 177 ; CHECK: = sext i1 178 ; CHECK: store 179 ; CHECK: store 180 ; CHECK: ret void 181 182 ; SExt 183 define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory { 184 entry: 185 %0 = load i16* %b, align 2 186 %1 = sext i16 %0 to i32 187 store i32 %1, i32* %a, align 4 188 ret void 189 } 190 191 ; CHECK: @SExt 192 ; CHECK: = load 193 ; CHECK: = load 194 ; CHECK: = sext 195 ; CHECK: = sext 196 ; CHECK: store 197 ; CHECK: store 198 ; CHECK: ret void 199 200 201 ; memset 202 define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory { 203 entry: 204 call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i32 1, i1 false) 205 ret void 206 } 207 208 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind 209 210 ; CHECK: @MemSet 211 ; CHECK: call i8* @__msan_memset 212 ; CHECK: ret void 213 214 215 ; memcpy 216 define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory { 217 entry: 218 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false) 219 ret void 220 } 221 222 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind 223 224 ; CHECK: @MemCpy 225 ; CHECK: call i8* @__msan_memcpy 226 ; CHECK: ret void 227 228 229 ; memmove is lowered to a call 230 define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory { 231 entry: 232 call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false) 233 ret void 234 } 235 236 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind 237 238 ; CHECK: @MemMove 239 ; CHECK: call i8* @__msan_memmove 240 ; CHECK: ret void 241 242 243 ; Check that we propagate shadow for "select" 244 245 define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory { 246 entry: 247 %cond = select i1 %c, i32 %a, i32 %b 248 ret i32 %cond 249 } 250 251 ; CHECK: @Select 252 ; CHECK: select i1 253 ; CHECK-DAG: or i32 254 ; CHECK-DAG: xor i32 255 ; CHECK: or i32 256 ; CHECK-DAG: select i1 257 ; CHECK-ORIGINS-DAG: select 258 ; CHECK-ORIGINS-DAG: select 259 ; CHECK-DAG: select i1 260 ; CHECK: store i32{{.*}}@__msan_retval_tls 261 ; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls 262 ; CHECK: ret i32 263 264 265 ; Check that we propagate origin for "select" with vector condition. 266 ; Select condition is flattened to i1, which is then used to select one of the 267 ; argument origins. 268 269 define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory { 270 entry: 271 %cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b 272 ret <8 x i16> %cond 273 } 274 275 ; CHECK: @SelectVector 276 ; CHECK: select <8 x i1> 277 ; CHECK-DAG: or <8 x i16> 278 ; CHECK-DAG: xor <8 x i16> 279 ; CHECK: or <8 x i16> 280 ; CHECK-DAG: select <8 x i1> 281 ; CHECK-ORIGINS-DAG: select 282 ; CHECK-ORIGINS-DAG: select 283 ; CHECK-DAG: select <8 x i1> 284 ; CHECK: store <8 x i16>{{.*}}@__msan_retval_tls 285 ; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls 286 ; CHECK: ret <8 x i16> 287 288 289 ; Check that we propagate origin for "select" with scalar condition and vector 290 ; arguments. Select condition shadow is sign-extended to the vector type and 291 ; mixed into the result shadow. 292 293 define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory { 294 entry: 295 %cond = select i1 %c, <8 x i16> %a, <8 x i16> %b 296 ret <8 x i16> %cond 297 } 298 299 ; CHECK: @SelectVector2 300 ; CHECK: select i1 301 ; CHECK-DAG: or <8 x i16> 302 ; CHECK-DAG: xor <8 x i16> 303 ; CHECK: or <8 x i16> 304 ; CHECK-DAG: select i1 305 ; CHECK-ORIGINS-DAG: select i1 306 ; CHECK-ORIGINS-DAG: select i1 307 ; CHECK-DAG: select i1 308 ; CHECK: ret <8 x i16> 309 310 311 define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory { 312 entry: 313 %c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b 314 ret { i64, i64 } %c 315 } 316 317 ; CHECK: @SelectStruct 318 ; CHECK: select i1 {{.*}}, { i64, i64 } 319 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } 320 ; CHECK-ORIGINS: select i1 321 ; CHECK-ORIGINS: select i1 322 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } 323 ; CHECK: ret { i64, i64 } 324 325 326 define { i64*, double } @SelectStruct2(i1 zeroext %x, { i64*, double } %a, { i64*, double } %b) readnone sanitize_memory { 327 entry: 328 %c = select i1 %x, { i64*, double } %a, { i64*, double } %b 329 ret { i64*, double } %c 330 } 331 332 ; CHECK: @SelectStruct2 333 ; CHECK: select i1 {{.*}}, { i64, i64 } 334 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } 335 ; CHECK-ORIGINS: select i1 336 ; CHECK-ORIGINS: select i1 337 ; CHECK-NEXT: select i1 {{.*}}, { i64*, double } 338 ; CHECK: ret { i64*, double } 339 340 341 define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory { 342 entry: 343 %0 = inttoptr i64 %x to i8* 344 ret i8* %0 345 } 346 347 ; CHECK: @IntToPtr 348 ; CHECK: load i64*{{.*}}__msan_param_tls 349 ; CHECK-ORIGINS-NEXT: load i32*{{.*}}__msan_param_origin_tls 350 ; CHECK-NEXT: inttoptr 351 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls 352 ; CHECK: ret i8* 353 354 355 define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory { 356 entry: 357 %0 = inttoptr i16 %x to i8* 358 ret i8* %0 359 } 360 361 ; CHECK: @IntToPtr_ZExt 362 ; CHECK: load i16*{{.*}}__msan_param_tls 363 ; CHECK: zext 364 ; CHECK-NEXT: inttoptr 365 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls 366 ; CHECK: ret i8* 367 368 369 ; Check that we insert exactly one check on udiv 370 ; (2nd arg shadow is checked, 1st arg shadow is propagated) 371 372 define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory { 373 entry: 374 %div = udiv i32 %a, %b 375 ret i32 %div 376 } 377 378 ; CHECK: @Div 379 ; CHECK: icmp 380 ; CHECK: call void @__msan_warning 381 ; CHECK-NOT: icmp 382 ; CHECK: udiv 383 ; CHECK-NOT: icmp 384 ; CHECK: ret i32 385 386 387 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests) 388 389 define zeroext i1 @ICmpSLT(i32 %x) nounwind uwtable readnone sanitize_memory { 390 %1 = icmp slt i32 %x, 0 391 ret i1 %1 392 } 393 394 ; CHECK: @ICmpSLT 395 ; CHECK: icmp slt 396 ; CHECK-NOT: call void @__msan_warning 397 ; CHECK: icmp slt 398 ; CHECK-NOT: call void @__msan_warning 399 ; CHECK: ret i1 400 401 define zeroext i1 @ICmpSGE(i32 %x) nounwind uwtable readnone sanitize_memory { 402 %1 = icmp sge i32 %x, 0 403 ret i1 %1 404 } 405 406 ; CHECK: @ICmpSGE 407 ; CHECK: icmp slt 408 ; CHECK-NOT: call void @__msan_warning 409 ; CHECK: icmp sge 410 ; CHECK-NOT: call void @__msan_warning 411 ; CHECK: ret i1 412 413 define zeroext i1 @ICmpSGT(i32 %x) nounwind uwtable readnone sanitize_memory { 414 %1 = icmp sgt i32 0, %x 415 ret i1 %1 416 } 417 418 ; CHECK: @ICmpSGT 419 ; CHECK: icmp slt 420 ; CHECK-NOT: call void @__msan_warning 421 ; CHECK: icmp sgt 422 ; CHECK-NOT: call void @__msan_warning 423 ; CHECK: ret i1 424 425 define zeroext i1 @ICmpSLE(i32 %x) nounwind uwtable readnone sanitize_memory { 426 %1 = icmp sle i32 0, %x 427 ret i1 %1 428 } 429 430 ; CHECK: @ICmpSLE 431 ; CHECK: icmp slt 432 ; CHECK-NOT: call void @__msan_warning 433 ; CHECK: icmp sle 434 ; CHECK-NOT: call void @__msan_warning 435 ; CHECK: ret i1 436 437 438 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests) 439 ; of the vector arguments. 440 441 define <2 x i1> @ICmpSLT_vector(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory { 442 %1 = icmp slt <2 x i32*> %x, zeroinitializer 443 ret <2 x i1> %1 444 } 445 446 ; CHECK: @ICmpSLT_vector 447 ; CHECK: icmp slt <2 x i64> 448 ; CHECK-NOT: call void @__msan_warning 449 ; CHECK: icmp slt <2 x i32*> 450 ; CHECK-NOT: call void @__msan_warning 451 ; CHECK: ret <2 x i1> 452 453 454 ; Check that we propagate shadow for unsigned relational comparisons with 455 ; constants 456 457 define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory { 458 entry: 459 %cmp = icmp ugt i32 %x, 7 460 ret i1 %cmp 461 } 462 463 ; CHECK: @ICmpUGTConst 464 ; CHECK: icmp ugt i32 465 ; CHECK-NOT: call void @__msan_warning 466 ; CHECK: icmp ugt i32 467 ; CHECK-NOT: call void @__msan_warning 468 ; CHECK: icmp ugt i32 469 ; CHECK-NOT: call void @__msan_warning 470 ; CHECK: ret i1 471 472 473 ; Check that loads of shadow have the same aligment as the original loads. 474 ; Check that loads of origin have the aligment of max(4, original alignment). 475 476 define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory { 477 %y = alloca i32, align 64 478 %1 = load volatile i32* %y, align 64 479 ret i32 %1 480 } 481 482 ; CHECK: @ShadowLoadAlignmentLarge 483 ; CHECK: load volatile i32* {{.*}} align 64 484 ; CHECK: load i32* {{.*}} align 64 485 ; CHECK: ret i32 486 487 define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory { 488 %y = alloca i32, align 2 489 %1 = load volatile i32* %y, align 2 490 ret i32 %1 491 } 492 493 ; CHECK: @ShadowLoadAlignmentSmall 494 ; CHECK: load volatile i32* {{.*}} align 2 495 ; CHECK: load i32* {{.*}} align 2 496 ; CHECK-ORIGINS: load i32* {{.*}} align 4 497 ; CHECK: ret i32 498 499 500 ; Test vector manipulation instructions. 501 ; Check that the same bit manipulation is applied to the shadow values. 502 ; Check that there is a zero test of the shadow of %idx argument, where present. 503 504 define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory { 505 %x = extractelement <4 x i32> %vec, i32 %idx 506 ret i32 %x 507 } 508 509 ; CHECK: @ExtractElement 510 ; CHECK: extractelement 511 ; CHECK: call void @__msan_warning 512 ; CHECK: extractelement 513 ; CHECK: ret i32 514 515 define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory { 516 %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx 517 ret <4 x i32> %vec1 518 } 519 520 ; CHECK: @InsertElement 521 ; CHECK: insertelement 522 ; CHECK: call void @__msan_warning 523 ; CHECK: insertelement 524 ; CHECK: ret <4 x i32> 525 526 define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory { 527 %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1, 528 <4 x i32> <i32 0, i32 4, i32 1, i32 5> 529 ret <4 x i32> %vec2 530 } 531 532 ; CHECK: @ShuffleVector 533 ; CHECK: shufflevector 534 ; CHECK-NOT: call void @__msan_warning 535 ; CHECK: shufflevector 536 ; CHECK: ret <4 x i32> 537 538 539 ; Test bswap intrinsic instrumentation 540 define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory { 541 %y = tail call i32 @llvm.bswap.i32(i32 %x) 542 ret i32 %y 543 } 544 545 declare i32 @llvm.bswap.i32(i32) nounwind readnone 546 547 ; CHECK: @BSwap 548 ; CHECK-NOT: call void @__msan_warning 549 ; CHECK: @llvm.bswap.i32 550 ; CHECK-NOT: call void @__msan_warning 551 ; CHECK: @llvm.bswap.i32 552 ; CHECK-NOT: call void @__msan_warning 553 ; CHECK: ret i32 554 555 556 ; Store intrinsic. 557 558 define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory { 559 call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x) 560 ret void 561 } 562 563 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind 564 565 ; CHECK: @StoreIntrinsic 566 ; CHECK-NOT: br 567 ; CHECK-NOT: = or 568 ; CHECK: store <4 x i32> {{.*}} align 1 569 ; CHECK: call void @llvm.x86.sse.storeu.ps 570 ; CHECK: ret void 571 572 573 ; Load intrinsic. 574 575 define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory { 576 %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) 577 ret <16 x i8> %call 578 } 579 580 declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind 581 582 ; CHECK: @LoadIntrinsic 583 ; CHECK: load <16 x i8>* {{.*}} align 1 584 ; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32* {{.*}} 585 ; CHECK-NOT: br 586 ; CHECK-NOT: = or 587 ; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq 588 ; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls 589 ; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls 590 ; CHECK: ret <16 x i8> 591 592 593 ; Simple NoMem intrinsic 594 ; Check that shadow is OR'ed, and origin is Select'ed 595 ; And no shadow checks! 596 597 define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory { 598 %call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) 599 ret <8 x i16> %call 600 } 601 602 declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind 603 604 ; CHECK: @Paddsw128 605 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls 606 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls 607 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls 608 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls 609 ; CHECK-NEXT: = or <8 x i16> 610 ; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128 611 ; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0 612 ; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32 613 ; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w 614 ; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls 615 ; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls 616 ; CHECK-NEXT: ret <8 x i16> 617 618 619 ; Test handling of vectors of pointers. 620 ; Check that shadow of such vector is a vector of integers. 621 622 define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory { 623 %x = load <8 x i8*>* %p 624 ret <8 x i8*> %x 625 } 626 627 ; CHECK: @VectorOfPointers 628 ; CHECK: load <8 x i8*>* 629 ; CHECK: load <8 x i64>* 630 ; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls 631 ; CHECK: ret <8 x i8*> 632 633 ; Test handling of va_copy. 634 635 declare void @llvm.va_copy(i8*, i8*) nounwind 636 637 define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory { 638 call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind 639 ret void 640 } 641 642 ; CHECK: @VACopy 643 ; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i32 8, i1 false) 644 ; CHECK: ret void 645 646 647 ; Test that va_start instrumentation does not use va_arg_tls*. 648 ; It should work with a local stack copy instead. 649 650 %struct.__va_list_tag = type { i32, i32, i8*, i8* } 651 declare void @llvm.va_start(i8*) nounwind 652 653 ; Function Attrs: nounwind uwtable 654 define void @VAStart(i32 %x, ...) sanitize_memory { 655 entry: 656 %x.addr = alloca i32, align 4 657 %va = alloca [1 x %struct.__va_list_tag], align 16 658 store i32 %x, i32* %x.addr, align 4 659 %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag]* %va, i32 0, i32 0 660 %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8* 661 call void @llvm.va_start(i8* %arraydecay1) 662 ret void 663 } 664 665 ; CHECK: @VAStart 666 ; CHECK: call void @llvm.va_start 667 ; CHECK-NOT: @__msan_va_arg_tls 668 ; CHECK-NOT: @__msan_va_arg_overflow_size_tls 669 ; CHECK: ret void 670 671 672 ; Test handling of volatile stores. 673 ; Check that MemorySanitizer does not add a check of the value being stored. 674 675 define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory { 676 entry: 677 store volatile i32 %x, i32* %p, align 4 678 ret void 679 } 680 681 ; CHECK: @VolatileStore 682 ; CHECK-NOT: @__msan_warning 683 ; CHECK: ret void 684 685 686 ; Test that checks are omitted and returned value is always initialized if 687 ; sanitize_memory attribute is missing. 688 689 define i32 @NoSanitizeMemory(i32 %x) uwtable { 690 entry: 691 %tobool = icmp eq i32 %x, 0 692 br i1 %tobool, label %if.end, label %if.then 693 694 if.then: ; preds = %entry 695 tail call void @bar() 696 br label %if.end 697 698 if.end: ; preds = %entry, %if.then 699 ret i32 %x 700 } 701 702 declare void @bar() 703 704 ; CHECK: @NoSanitizeMemory 705 ; CHECK-NOT: @__msan_warning 706 ; CHECK: store i32 0, {{.*}} @__msan_retval_tls 707 ; CHECK-NOT: @__msan_warning 708 ; CHECK: ret i32 709 710 711 ; Test that stack allocations are unpoisoned in functions missing 712 ; sanitize_memory attribute 713 714 define i32 @NoSanitizeMemoryAlloca() { 715 entry: 716 %p = alloca i32, align 4 717 %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p) 718 ret i32 %x 719 } 720 721 declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p) 722 723 ; CHECK: @NoSanitizeMemoryAlloca 724 ; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 4, i32 4, i1 false) 725 ; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32* 726 ; CHECK: ret i32 727 728 729 ; Test that undef is unpoisoned in functions missing 730 ; sanitize_memory attribute 731 732 define i32 @NoSanitizeMemoryUndef() { 733 entry: 734 %x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef) 735 ret i32 %x 736 } 737 738 declare i32 @NoSanitizeMemoryUndefHelper(i32 %x) 739 740 ; CHECK: @NoSanitizeMemoryAlloca 741 ; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls 742 ; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef) 743 ; CHECK: ret i32 744 745 746 ; Test PHINode instrumentation in blacklisted functions 747 748 define i32 @NoSanitizeMemoryPHI(i32 %x) { 749 entry: 750 %tobool = icmp ne i32 %x, 0 751 br i1 %tobool, label %cond.true, label %cond.false 752 753 cond.true: ; preds = %entry 754 br label %cond.end 755 756 cond.false: ; preds = %entry 757 br label %cond.end 758 759 cond.end: ; preds = %cond.false, %cond.true 760 %cond = phi i32 [ undef, %cond.true ], [ undef, %cond.false ] 761 ret i32 %cond 762 } 763 764 ; CHECK: [[A:%.*]] = phi i32 [ undef, %cond.true ], [ undef, %cond.false ] 765 ; CHECK: store i32 0, i32* bitcast {{.*}} @__msan_retval_tls 766 ; CHECK: ret i32 [[A]] 767 768 769 ; Test argument shadow alignment 770 771 define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory { 772 entry: 773 ret <2 x i64> %b 774 } 775 776 ; CHECK: @ArgumentShadowAlignment 777 ; CHECK: load <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8 778 ; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8 779 ; CHECK: ret <2 x i64> 780 781 782 ; Test origin propagation for insertvalue 783 784 define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory { 785 entry: 786 %a = insertvalue { i64, i32 } undef, i64 %x, 0 787 %b = insertvalue { i64, i32 } %a, i32 %y, 1 788 ret { i64, i32 } %b 789 } 790 791 ; CHECK-ORIGINS: @make_pair_64_32 792 ; First element shadow 793 ; CHECK-ORIGINS: insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 {{.*}}, 0 794 ; First element origin 795 ; CHECK-ORIGINS: icmp ne i64 796 ; CHECK-ORIGINS: select i1 797 ; First element app value 798 ; CHECK-ORIGINS: insertvalue { i64, i32 } undef, i64 {{.*}}, 0 799 ; Second element shadow 800 ; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1 801 ; Second element origin 802 ; CHECK-ORIGINS: icmp ne i32 803 ; CHECK-ORIGINS: select i1 804 ; Second element app value 805 ; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1 806 ; CHECK-ORIGINS: ret { i64, i32 } 807 808 809 ; Test shadow propagation for aggregates passed through ellipsis. 810 811 %struct.StructByVal = type { i32, i32, i32, i32 } 812 813 declare void @VAArgStructFn(i32 %guard, ...) 814 815 define void @VAArgStruct(%struct.StructByVal* nocapture %s) sanitize_memory { 816 entry: 817 %agg.tmp2 = alloca %struct.StructByVal, align 8 818 %0 = bitcast %struct.StructByVal* %s to i8* 819 %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64* 820 %agg.tmp.sroa.0.0.copyload = load i64* %agg.tmp.sroa.0.0..sroa_cast, align 4 821 %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal* %s, i64 0, i32 2 822 %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64* 823 %agg.tmp.sroa.2.0.copyload = load i64* %agg.tmp.sroa.2.0..sroa_cast, align 4 824 %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8* 825 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %0, i64 16, i32 4, i1 false) 826 call void (i32, ...)* @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2) 827 ret void 828 } 829 830 ; "undef" and the first 2 structs go to general purpose registers; 831 ; the third struct goes to the overflow area byval 832 833 ; CHECK: @VAArgStruct 834 ; undef 835 ; CHECK: store i32 -1, i32* {{.*}}@__msan_va_arg_tls {{.*}}, align 8 836 ; first struct through general purpose registers 837 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8 838 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8 839 ; second struct through general purpose registers 840 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8 841 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8 842 ; third struct through the overflow area byval 843 ; CHECK: ptrtoint %struct.StructByVal* {{.*}} to i64 844 ; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 176 845 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 846 ; CHECK: store i64 16, i64* @__msan_va_arg_overflow_size_tls 847 ; CHECK: call void (i32, ...)* @VAArgStructFn 848 ; CHECK: ret void 849 850 declare i32 @InnerTailCall(i32 %a) 851 852 define void @MismatchedReturnTypeTailCall(i32 %a) sanitize_memory { 853 %b = tail call i32 @InnerTailCall(i32 %a) 854 ret void 855 } 856 857 ; We used to strip off the 'tail' modifier, but now that we unpoison return slot 858 ; shadow before the call, we don't need to anymore. 859 860 ; CHECK-LABEL: define void @MismatchedReturnTypeTailCall 861 ; CHECK: tail call i32 @InnerTailCall 862 ; CHECK: ret void 863