Home | History | Annotate | Download | only in MemorySanitizer
      1 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
      2 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK-ORIGINS %s
      3 ; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s -check-prefix=CHECK-AA
      4 
      5 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
      6 target triple = "x86_64-unknown-linux-gnu"
      7 
      8 ; Check the presence of __msan_init
      9 ; CHECK: @llvm.global_ctors {{.*}} @__msan_init
     10 
     11 ; Check the presence and the linkage type of __msan_track_origins and
     12 ; other interface symbols.
     13 ; CHECK-NOT: @__msan_track_origins
     14 ; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1
     15 ; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0
     16 ; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}]
     17 ; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32
     18 ; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}]
     19 ; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}]
     20 ; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}]
     21 ; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64
     22 ; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32
     23 
     24 
     25 ; Check instrumentation of stores
     26 
     27 define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
     28 entry:
     29   store i32 %x, i32* %p, align 4
     30   ret void
     31 }
     32 
     33 ; CHECK: @Store
     34 ; CHECK: load {{.*}} @__msan_param_tls
     35 ; CHECK: store
     36 ; CHECK: store
     37 ; CHECK: ret void
     38 ; CHECK-ORIGINS: @Store
     39 ; CHECK-ORIGINS: load {{.*}} @__msan_param_tls
     40 ; CHECK-ORIGINS: store
     41 ; CHECK-ORIGINS: icmp
     42 ; CHECK-ORIGINS: br i1
     43 ; CHECK-ORIGINS: <label>
     44 ; CHECK-ORIGINS: store
     45 ; CHECK-ORIGINS: br label
     46 ; CHECK-ORIGINS: <label>
     47 ; CHECK-ORIGINS: store
     48 ; CHECK-ORIGINS: ret void
     49 
     50 
     51 ; Check instrumentation of aligned stores
     52 ; Shadow store has the same alignment as the original store; origin store
     53 ; does not specify explicit alignment.
     54 
     55 define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
     56 entry:
     57   store i32 %x, i32* %p, align 32
     58   ret void
     59 }
     60 
     61 ; CHECK: @AlignedStore
     62 ; CHECK: load {{.*}} @__msan_param_tls
     63 ; CHECK: store {{.*}} align 32
     64 ; CHECK: store {{.*}} align 32
     65 ; CHECK: ret void
     66 ; CHECK-ORIGINS: @AlignedStore
     67 ; CHECK-ORIGINS: load {{.*}} @__msan_param_tls
     68 ; CHECK-ORIGINS: store {{.*}} align 32
     69 ; CHECK-ORIGINS: icmp
     70 ; CHECK-ORIGINS: br i1
     71 ; CHECK-ORIGINS: <label>
     72 ; CHECK-ORIGINS: store {{.*}} align 32
     73 ; CHECK-ORIGINS: br label
     74 ; CHECK-ORIGINS: <label>
     75 ; CHECK-ORIGINS: store {{.*}} align 32
     76 ; CHECK-ORIGINS: ret void
     77 
     78 
     79 ; load followed by cmp: check that we load the shadow and call __msan_warning.
     80 define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
     81 entry:
     82   %0 = load i32* %a, align 4
     83   %tobool = icmp eq i32 %0, 0
     84   br i1 %tobool, label %if.end, label %if.then
     85 
     86 if.then:                                          ; preds = %entry
     87   tail call void (...)* @foo() nounwind
     88   br label %if.end
     89 
     90 if.end:                                           ; preds = %entry, %if.then
     91   ret void
     92 }
     93 
     94 declare void @foo(...)
     95 
     96 ; CHECK: @LoadAndCmp
     97 ; CHECK: = load
     98 ; CHECK: = load
     99 ; CHECK: call void @__msan_warning_noreturn()
    100 ; CHECK-NEXT: call void asm sideeffect
    101 ; CHECK-NEXT: unreachable
    102 ; CHECK: ret void
    103 
    104 ; Check that we store the shadow for the retval.
    105 define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
    106 entry:
    107   ret i32 123
    108 }
    109 
    110 ; CHECK: @ReturnInt
    111 ; CHECK: store i32 0,{{.*}}__msan_retval_tls
    112 ; CHECK: ret i32
    113 
    114 ; Check that we get the shadow for the retval.
    115 define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
    116 entry:
    117   %call = tail call i32 @ReturnInt() nounwind
    118   store i32 %call, i32* %a, align 4
    119   ret void
    120 }
    121 
    122 ; CHECK: @CopyRetVal
    123 ; CHECK: load{{.*}}__msan_retval_tls
    124 ; CHECK: store
    125 ; CHECK: store
    126 ; CHECK: ret void
    127 
    128 
    129 ; Check that we generate PHIs for shadow.
    130 define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
    131 entry:
    132   %tobool = icmp eq i32* %b, null
    133   br i1 %tobool, label %if.else, label %if.then
    134 
    135   if.then:                                          ; preds = %entry
    136   %0 = load i32* %b, align 4
    137   br label %if.end
    138 
    139   if.else:                                          ; preds = %entry
    140   %1 = load i32* %c, align 4
    141   br label %if.end
    142 
    143   if.end:                                           ; preds = %if.else, %if.then
    144   %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
    145   store i32 %t.0, i32* %a, align 4
    146   ret void
    147 }
    148 
    149 ; CHECK: @FuncWithPhi
    150 ; CHECK: = phi
    151 ; CHECK-NEXT: = phi
    152 ; CHECK: store
    153 ; CHECK: store
    154 ; CHECK: ret void
    155 
    156 ; Compute shadow for "x << 10"
    157 define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
    158 entry:
    159   %0 = load i32* %x, align 4
    160   %1 = shl i32 %0, 10
    161   store i32 %1, i32* %x, align 4
    162   ret void
    163 }
    164 
    165 ; CHECK: @ShlConst
    166 ; CHECK: = load
    167 ; CHECK: = load
    168 ; CHECK: shl
    169 ; CHECK: shl
    170 ; CHECK: store
    171 ; CHECK: store
    172 ; CHECK: ret void
    173 
    174 ; Compute shadow for "10 << x": it should have 'sext i1'.
    175 define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
    176 entry:
    177   %0 = load i32* %x, align 4
    178   %1 = shl i32 10, %0
    179   store i32 %1, i32* %x, align 4
    180   ret void
    181 }
    182 
    183 ; CHECK: @ShlNonConst
    184 ; CHECK: = load
    185 ; CHECK: = load
    186 ; CHECK: = sext i1
    187 ; CHECK: store
    188 ; CHECK: store
    189 ; CHECK: ret void
    190 
    191 ; SExt
    192 define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
    193 entry:
    194   %0 = load i16* %b, align 2
    195   %1 = sext i16 %0 to i32
    196   store i32 %1, i32* %a, align 4
    197   ret void
    198 }
    199 
    200 ; CHECK: @SExt
    201 ; CHECK: = load
    202 ; CHECK: = load
    203 ; CHECK: = sext
    204 ; CHECK: = sext
    205 ; CHECK: store
    206 ; CHECK: store
    207 ; CHECK: ret void
    208 
    209 
    210 ; memset
    211 define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
    212 entry:
    213   call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i32 1, i1 false)
    214   ret void
    215 }
    216 
    217 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
    218 
    219 ; CHECK: @MemSet
    220 ; CHECK: call i8* @__msan_memset
    221 ; CHECK: ret void
    222 
    223 
    224 ; memcpy
    225 define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
    226 entry:
    227   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
    228   ret void
    229 }
    230 
    231 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
    232 
    233 ; CHECK: @MemCpy
    234 ; CHECK: call i8* @__msan_memcpy
    235 ; CHECK: ret void
    236 
    237 
    238 ; memmove is lowered to a call
    239 define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
    240 entry:
    241   call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
    242   ret void
    243 }
    244 
    245 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
    246 
    247 ; CHECK: @MemMove
    248 ; CHECK: call i8* @__msan_memmove
    249 ; CHECK: ret void
    250 
    251 
    252 ; Check that we propagate shadow for "select"
    253 
    254 define i32 @Select(i32 %a, i32 %b, i32 %c) nounwind uwtable readnone sanitize_memory {
    255 entry:
    256   %tobool = icmp ne i32 %c, 0
    257   %cond = select i1 %tobool, i32 %a, i32 %b
    258   ret i32 %cond
    259 }
    260 
    261 ; CHECK: @Select
    262 ; CHECK: select
    263 ; CHECK-NEXT: select
    264 ; CHECK: ret i32
    265 
    266 
    267 ; Check that we propagate origin for "select" with vector condition.
    268 ; Select condition is flattened to i1, which is then used to select one of the
    269 ; argument origins.
    270 
    271 define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
    272 entry:
    273   %cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
    274   ret <8 x i16> %cond
    275 }
    276 
    277 ; CHECK-ORIGINS: @SelectVector
    278 ; CHECK-ORIGINS: bitcast <8 x i1> {{.*}} to i8
    279 ; CHECK-ORIGINS: icmp ne i8
    280 ; CHECK-ORIGINS: select i1
    281 ; CHECK-ORIGINS: ret <8 x i16>
    282 
    283 
    284 define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
    285 entry:
    286   %0 = inttoptr i64 %x to i8*
    287   ret i8* %0
    288 }
    289 
    290 ; CHECK: @IntToPtr
    291 ; CHECK: load i64*{{.*}}__msan_param_tls
    292 ; CHECK-NEXT: inttoptr
    293 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
    294 ; CHECK: ret i8
    295 
    296 
    297 define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
    298 entry:
    299   %0 = inttoptr i16 %x to i8*
    300   ret i8* %0
    301 }
    302 
    303 ; CHECK: @IntToPtr_ZExt
    304 ; CHECK: zext
    305 ; CHECK-NEXT: inttoptr
    306 ; CHECK: ret i8
    307 
    308 
    309 ; Check that we insert exactly one check on udiv
    310 ; (2nd arg shadow is checked, 1st arg shadow is propagated)
    311 
    312 define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
    313 entry:
    314   %div = udiv i32 %a, %b
    315   ret i32 %div
    316 }
    317 
    318 ; CHECK: @Div
    319 ; CHECK: icmp
    320 ; CHECK: call void @__msan_warning
    321 ; CHECK-NOT: icmp
    322 ; CHECK: udiv
    323 ; CHECK-NOT: icmp
    324 ; CHECK: ret i32
    325 
    326 
    327 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
    328 
    329 define zeroext i1 @ICmpSLT(i32 %x) nounwind uwtable readnone sanitize_memory {
    330   %1 = icmp slt i32 %x, 0
    331   ret i1 %1
    332 }
    333 
    334 ; CHECK: @ICmpSLT
    335 ; CHECK: icmp slt
    336 ; CHECK-NOT: call void @__msan_warning
    337 ; CHECK: icmp slt
    338 ; CHECK-NOT: call void @__msan_warning
    339 ; CHECK: ret i1
    340 
    341 define zeroext i1 @ICmpSGE(i32 %x) nounwind uwtable readnone sanitize_memory {
    342   %1 = icmp sge i32 %x, 0
    343   ret i1 %1
    344 }
    345 
    346 ; CHECK: @ICmpSGE
    347 ; CHECK: icmp slt
    348 ; CHECK-NOT: call void @__msan_warning
    349 ; CHECK: icmp sge
    350 ; CHECK-NOT: call void @__msan_warning
    351 ; CHECK: ret i1
    352 
    353 define zeroext i1 @ICmpSGT(i32 %x) nounwind uwtable readnone sanitize_memory {
    354   %1 = icmp sgt i32 0, %x
    355   ret i1 %1
    356 }
    357 
    358 ; CHECK: @ICmpSGT
    359 ; CHECK: icmp slt
    360 ; CHECK-NOT: call void @__msan_warning
    361 ; CHECK: icmp sgt
    362 ; CHECK-NOT: call void @__msan_warning
    363 ; CHECK: ret i1
    364 
    365 define zeroext i1 @ICmpSLE(i32 %x) nounwind uwtable readnone sanitize_memory {
    366   %1 = icmp sle i32 0, %x
    367   ret i1 %1
    368 }
    369 
    370 ; CHECK: @ICmpSLE
    371 ; CHECK: icmp slt
    372 ; CHECK-NOT: call void @__msan_warning
    373 ; CHECK: icmp sle
    374 ; CHECK-NOT: call void @__msan_warning
    375 ; CHECK: ret i1
    376 
    377 
    378 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
    379 ; of the vector arguments.
    380 
    381 define <2 x i1> @ICmpSLT_vector(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
    382   %1 = icmp slt <2 x i32*> %x, zeroinitializer
    383   ret <2 x i1> %1
    384 }
    385 
    386 ; CHECK: @ICmpSLT_vector
    387 ; CHECK: icmp slt <2 x i64>
    388 ; CHECK-NOT: call void @__msan_warning
    389 ; CHECK: icmp slt <2 x i32*>
    390 ; CHECK-NOT: call void @__msan_warning
    391 ; CHECK: ret <2 x i1>
    392 
    393 
    394 ; Check that we propagate shadow for unsigned relational comparisons with
    395 ; constants
    396 
    397 define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
    398 entry:
    399   %cmp = icmp ugt i32 %x, 7
    400   ret i1 %cmp
    401 }
    402 
    403 ; CHECK: @ICmpUGTConst
    404 ; CHECK: icmp ugt i32
    405 ; CHECK-NOT: call void @__msan_warning
    406 ; CHECK: icmp ugt i32
    407 ; CHECK-NOT: call void @__msan_warning
    408 ; CHECK: icmp ugt i32
    409 ; CHECK-NOT: call void @__msan_warning
    410 ; CHECK: ret i1
    411 
    412 
    413 ; Check that loads of shadow have the same aligment as the original loads.
    414 ; Check that loads of origin have the aligment of max(4, original alignment).
    415 
    416 define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
    417   %y = alloca i32, align 64
    418   %1 = load volatile i32* %y, align 64
    419   ret i32 %1
    420 }
    421 
    422 ; CHECK: @ShadowLoadAlignmentLarge
    423 ; CHECK: load i32* {{.*}} align 64
    424 ; CHECK: load volatile i32* {{.*}} align 64
    425 ; CHECK: ret i32
    426 
    427 define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
    428   %y = alloca i32, align 2
    429   %1 = load volatile i32* %y, align 2
    430   ret i32 %1
    431 }
    432 
    433 ; CHECK: @ShadowLoadAlignmentSmall
    434 ; CHECK: load i32* {{.*}} align 2
    435 ; CHECK: load volatile i32* {{.*}} align 2
    436 ; CHECK: ret i32
    437 
    438 ; CHECK-ORIGINS: @ShadowLoadAlignmentSmall
    439 ; CHECK-ORIGINS: load i32* {{.*}} align 2
    440 ; CHECK-ORIGINS: load i32* {{.*}} align 4
    441 ; CHECK-ORIGINS: load volatile i32* {{.*}} align 2
    442 ; CHECK-ORIGINS: ret i32
    443 
    444 
    445 ; Test vector manipulation instructions.
    446 ; Check that the same bit manipulation is applied to the shadow values.
    447 ; Check that there is a zero test of the shadow of %idx argument, where present.
    448 
    449 define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
    450   %x = extractelement <4 x i32> %vec, i32 %idx
    451   ret i32 %x
    452 }
    453 
    454 ; CHECK: @ExtractElement
    455 ; CHECK: extractelement
    456 ; CHECK: call void @__msan_warning
    457 ; CHECK: extractelement
    458 ; CHECK: ret i32
    459 
    460 define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
    461   %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
    462   ret <4 x i32> %vec1
    463 }
    464 
    465 ; CHECK: @InsertElement
    466 ; CHECK: insertelement
    467 ; CHECK: call void @__msan_warning
    468 ; CHECK: insertelement
    469 ; CHECK: ret <4 x i32>
    470 
    471 define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
    472   %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
    473                         <4 x i32> <i32 0, i32 4, i32 1, i32 5>
    474   ret <4 x i32> %vec2
    475 }
    476 
    477 ; CHECK: @ShuffleVector
    478 ; CHECK: shufflevector
    479 ; CHECK-NOT: call void @__msan_warning
    480 ; CHECK: shufflevector
    481 ; CHECK: ret <4 x i32>
    482 
    483 
    484 ; Test bswap intrinsic instrumentation
    485 define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
    486   %y = tail call i32 @llvm.bswap.i32(i32 %x)
    487   ret i32 %y
    488 }
    489 
    490 declare i32 @llvm.bswap.i32(i32) nounwind readnone
    491 
    492 ; CHECK: @BSwap
    493 ; CHECK-NOT: call void @__msan_warning
    494 ; CHECK: @llvm.bswap.i32
    495 ; CHECK-NOT: call void @__msan_warning
    496 ; CHECK: @llvm.bswap.i32
    497 ; CHECK-NOT: call void @__msan_warning
    498 ; CHECK: ret i32
    499 
    500 
    501 ; Store intrinsic.
    502 
    503 define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory {
    504   call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
    505   ret void
    506 }
    507 
    508 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
    509 
    510 ; CHECK: @StoreIntrinsic
    511 ; CHECK-NOT: br
    512 ; CHECK-NOT: = or
    513 ; CHECK: store <4 x i32> {{.*}} align 1
    514 ; CHECK: call void @llvm.x86.sse.storeu.ps
    515 ; CHECK: ret void
    516 
    517 
    518 ; Load intrinsic.
    519 
    520 define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory {
    521   %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p)
    522   ret <16 x i8> %call
    523 }
    524 
    525 declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
    526 
    527 ; CHECK: @LoadIntrinsic
    528 ; CHECK: load <16 x i8>* {{.*}} align 1
    529 ; CHECK-NOT: br
    530 ; CHECK-NOT: = or
    531 ; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
    532 ; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls
    533 ; CHECK: ret <16 x i8>
    534 
    535 ; CHECK-ORIGINS: @LoadIntrinsic
    536 ; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32* {{.*}}
    537 ; CHECK-ORIGINS: call <16 x i8> @llvm.x86.sse3.ldu.dq
    538 ; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls
    539 ; CHECK-ORIGINS: ret <16 x i8>
    540 
    541 
    542 ; Simple NoMem intrinsic
    543 ; Check that shadow is OR'ed, and origin is Select'ed
    544 ; And no shadow checks!
    545 
    546 define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
    547   %call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
    548   ret <8 x i16> %call
    549 }
    550 
    551 declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
    552 
    553 ; CHECK: @Paddsw128
    554 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls
    555 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls
    556 ; CHECK-NEXT: = or <8 x i16>
    557 ; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
    558 ; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
    559 ; CHECK-NEXT: ret <8 x i16>
    560 
    561 ; CHECK-ORIGINS: @Paddsw128
    562 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls
    563 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls
    564 ; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
    565 ; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
    566 ; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32
    567 ; CHECK-ORIGINS: call <8 x i16> @llvm.x86.sse2.padds.w
    568 ; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls
    569 ; CHECK-ORIGINS: ret <8 x i16>
    570 
    571 
    572 ; Test handling of vectors of pointers.
    573 ; Check that shadow of such vector is a vector of integers.
    574 
    575 define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
    576   %x = load <8 x i8*>* %p
    577   ret <8 x i8*> %x
    578 }
    579 
    580 ; CHECK: @VectorOfPointers
    581 ; CHECK: load <8 x i64>*
    582 ; CHECK: load <8 x i8*>*
    583 ; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
    584 ; CHECK: ret <8 x i8*>
    585 
    586 ; Test handling of va_copy.
    587 
    588 declare void @llvm.va_copy(i8*, i8*) nounwind
    589 
    590 define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
    591   call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
    592   ret void
    593 }
    594 
    595 ; CHECK: @VACopy
    596 ; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i32 8, i1 false)
    597 ; CHECK: ret void
    598 
    599 
    600 ; Test handling of volatile stores.
    601 ; Check that MemorySanitizer does not add a check of the value being stored.
    602 
    603 define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
    604 entry:
    605   store volatile i32 %x, i32* %p, align 4
    606   ret void
    607 }
    608 
    609 ; CHECK: @VolatileStore
    610 ; CHECK-NOT: @__msan_warning
    611 ; CHECK: ret void
    612 
    613 
    614 ; Test that checks are omitted but shadow propagation is kept if
    615 ; sanitize_memory attribute is missing.
    616 
    617 define i32 @NoSanitizeMemory(i32 %x) uwtable {
    618 entry:
    619   %tobool = icmp eq i32 %x, 0
    620   br i1 %tobool, label %if.end, label %if.then
    621 
    622 if.then:                                          ; preds = %entry
    623   tail call void @bar()
    624   br label %if.end
    625 
    626 if.end:                                           ; preds = %entry, %if.then
    627   ret i32 %x
    628 }
    629 
    630 declare void @bar()
    631 
    632 ; CHECK: @NoSanitizeMemory
    633 ; CHECK-NOT: @__msan_warning
    634 ; CHECK: load i32* {{.*}} @__msan_param_tls
    635 ; CHECK-NOT: @__msan_warning
    636 ; CHECK: store {{.*}} @__msan_retval_tls
    637 ; CHECK-NOT: @__msan_warning
    638 ; CHECK: ret i32
    639 
    640 
    641 ; Test that stack allocations are unpoisoned in functions missing
    642 ; sanitize_memory attribute
    643 
    644 define i32 @NoSanitizeMemoryAlloca() {
    645 entry:
    646   %p = alloca i32, align 4
    647   %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
    648   ret i32 %x
    649 }
    650 
    651 declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
    652 
    653 ; CHECK: @NoSanitizeMemoryAlloca
    654 ; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 4, i32 4, i1 false)
    655 ; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32*
    656 ; CHECK: ret i32
    657 
    658 
    659 ; Test that undef is unpoisoned in functions missing
    660 ; sanitize_memory attribute
    661 
    662 define i32 @NoSanitizeMemoryUndef() {
    663 entry:
    664   %x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
    665   ret i32 %x
    666 }
    667 
    668 declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
    669 
    670 ; CHECK: @NoSanitizeMemoryAlloca
    671 ; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls
    672 ; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
    673 ; CHECK: ret i32
    674 
    675 
    676 ; Test argument shadow alignment
    677 
    678 define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
    679 entry:
    680   ret <2 x i64> %b
    681 }
    682 
    683 ; CHECK: @ArgumentShadowAlignment
    684 ; CHECK: load <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
    685 ; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
    686 ; CHECK: ret <2 x i64>
    687 
    688 
    689 ; Test byval argument shadow alignment
    690 
    691 define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval %p) sanitize_memory {
    692 entry:
    693   %x = load <2 x i64>* %p
    694   ret <2 x i64> %x
    695 }
    696 
    697 ; CHECK-AA: @ByValArgumentShadowLargeAlignment
    698 ; CHECK-AA: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 16, i32 8, i1 false)
    699 ; CHECK-AA: ret <2 x i64>
    700 
    701 
    702 define i16 @ByValArgumentShadowSmallAlignment(i16* byval %p) sanitize_memory {
    703 entry:
    704   %x = load i16* %p
    705   ret i16 %x
    706 }
    707 
    708 ; CHECK-AA: @ByValArgumentShadowSmallAlignment
    709 ; CHECK-AA: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 2, i32 2, i1 false)
    710 ; CHECK-AA: ret i16
    711 
    712