Home | History | Annotate | Download | only in OpenMP
      1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s
      2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
      3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
      4 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG
      5 // REQUIRES: x86-registered-target
      6 // expected-no-diagnostics
      7 #ifndef HEADER
      8 #define HEADER
      9 
     10 long long get_val() { return 0; }
     11 double *g_ptr;
     12 
     13 // CHECK-LABEL: define {{.*void}} @{{.*}}simple{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
     14 void simple(float *a, float *b, float *c, float *d) {
     15 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
     16 // CHECK: [[K0:%.+]] = call {{.*}}i64 @{{.*}}get_val
     17 // CHECK-NEXT: store i64 [[K0]], i64* [[K_VAR:%[^,]+]]
     18 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
     19 // CHECK: store i32 12, i32* [[LIN_VAR:%[^,]+]]
     20 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
     21 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
     22 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
     23 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
     24 // CHECK: store i32 -1, i32* [[A:%.+]],
     25 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
     26 // CHECK: store i32 -1, i32* [[R:%[^,]+]],
     27 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
     28   #pragma omp parallel for simd
     29 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
     30 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
     31 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 5
     32 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
     33 // CHECK: [[TRUE]]
     34 // CHECK: br label %[[SWITCH:[^,]+]]
     35 // CHECK: [[FALSE]]
     36 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
     37 // CHECK: br label %[[SWITCH]]
     38 // CHECK: [[SWITCH]]
     39 // CHECK: [[UP:%.+]] = phi i32 [ 5, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
     40 // CHECK: store i32 [[UP]], i32* [[UB]],
     41 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
     42 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]],
     43 
     44 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]
     45 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
     46 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]]
     47 // CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]]
     48   for (int i = 3; i < 32; i += 5) {
     49 // CHECK: [[SIMPLE_LOOP1_BODY]]
     50 // Start of body: calculate i from IV:
     51 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]
     52 // CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5
     53 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]]
     54 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
     55 // ... loop body ...
     56 // End of body: store into a[i]:
     57 // CHECK: store float [[RESULT:%.+]], float*
     58     a[i] = b[i] * c[i] * d[i];
     59 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]
     60 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1
     61 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]
     62 // br label %{{.+}}, !llvm.loop !{{.+}}
     63   }
     64 // CHECK: [[SIMPLE_LOOP1_END]]
     65 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
     66 
     67   long long k = get_val();
     68 
     69   #pragma omp parallel for simd linear(k : 3) schedule(dynamic)
     70 // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_VAR:%[^,]+]]
     71 // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]]
     72 
     73 // CHECK: call void @__kmpc_dispatch_init_4(%ident_t* {{.+}}, i32 %{{.+}}, i32 35, i32 0, i32 8, i32 1, i32 1)
     74 // CHECK: [[NEXT:%.+]] = call i32 @__kmpc_dispatch_next_4(%ident_t* {{.+}}, i32 %{{.+}}, i32* %{{.+}}, i32* [[LB:%.+]], i32* [[UB:%.+]], i32* %{{.+}})
     75 // CHECK: [[COND:%.+]] = icmp ne i32 [[NEXT]], 0
     76 // CHECK: br i1 [[COND]], label %[[CONT:.+]], label %[[END:.+]]
     77 // CHECK: [[CONT]]
     78 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
     79 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV2:%[^,]+]],
     80 
     81 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]]
     82 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     83 // CHECK-NEXT: [[CMP2:%.+]] = icmp sle i32 [[IV2]], [[UB_VAL]]
     84 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]]
     85   for (int i = 10; i > 1; i--) {
     86 // CHECK: [[SIMPLE_LOOP2_BODY]]
     87 // Start of body: calculate i from IV:
     88 // CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     89 // FIXME: It is interesting, why the following "mul 1" was not constant folded?
     90 // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1
     91 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]]
     92 // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     93 //
     94 // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     95 // CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     96 // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3
     97 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64
     98 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]]
     99 // Update of the privatized version of linear variable!
    100 // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]]
    101     a[k]++;
    102     k = k + 3;
    103 // CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
    104 // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1
    105 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
    106 // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]]
    107   }
    108 // CHECK: [[SIMPLE_LOOP2_END]]
    109 //
    110 // Update linear vars after loop, as the loop was operating on a private version.
    111 // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]]
    112 // CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27
    113 // CHECK-NEXT: store i64 [[LIN_ADD2]], i64* %{{.+}}
    114 
    115   int lin = 12;
    116   #pragma omp parallel for simd linear(lin : get_val()), linear(g_ptr)
    117 
    118 // Init linear private var.
    119 // CHECK: [[LIN_VAR:%.+]] = load i32*, i32** %
    120 // CHECK: [[LIN_LOAD:%.+]] = load i32, i32* [[LIN_VAR]]
    121 // CHECK-NEXT: store i32 [[LIN_LOAD]], i32* [[LIN_START:%[^,]+]]
    122 // Remember linear step.
    123 // CHECK: [[CALL_VAL:%.+]] = invoke
    124 // CHECK: store i64 [[CALL_VAL]], i64* [[LIN_STEP:%[^,]+]]
    125 
    126 // CHECK: [[GLIN_LOAD:%.+]] = load double*, double** [[GLIN_VAR:%.+]],
    127 // CHECK-NEXT: store double* [[GLIN_LOAD]], double** [[GLIN_START:%[^,]+]]
    128 
    129 // CHECK: call void @__kmpc_for_static_init_8u(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
    130 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    131 // CHECK: [[CMP:%.+]] = icmp ugt i64 [[UB_VAL]], 3
    132 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
    133 // CHECK: [[TRUE]]
    134 // CHECK: br label %[[SWITCH:[^,]+]]
    135 // CHECK: [[FALSE]]
    136 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    137 // CHECK: br label %[[SWITCH]]
    138 // CHECK: [[SWITCH]]
    139 // CHECK: [[UP:%.+]] = phi i64 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
    140 // CHECK: store i64 [[UP]], i64* [[UB]],
    141 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
    142 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV3:%[^,]+]],
    143 
    144 // CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]]
    145 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
    146 // CHECK-NEXT: [[CMP3:%.+]] = icmp ule i64 [[IV3]], [[UB_VAL]]
    147 // CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]]
    148   for (unsigned long long it = 2000; it >= 600; it-=400) {
    149 // CHECK: [[SIMPLE_LOOP3_BODY]]
    150 // Start of body: calculate it from IV:
    151 // CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]]
    152 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400
    153 // CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]]
    154 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}
    155 //
    156 // Linear start and step are used to calculate current value of the linear variable.
    157 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]
    158 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]
    159 // CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]]
    160 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]
    161 // CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]]
    162 // CHECK-NEXT: [[MUL:%.+]] = mul i64 [[IV3_1]], 1
    163 // CHECK: [[GEP:%.+]] = getelementptr{{.*}}[[GLINSTART]]
    164 // CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]]
    165     *g_ptr++ = 0.0;
    166 // CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]]
    167 // CHECK: store double{{.*}}[[GEP_VAL]]
    168     a[it + lin]++;
    169 // CHECK: [[FLT_INC:%.+]] = fadd float
    170 // CHECK-NEXT: store float [[FLT_INC]],
    171 // CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]]
    172 // CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1
    173 // CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]]
    174   }
    175 // CHECK: [[SIMPLE_LOOP3_END]]
    176 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
    177 //
    178 // Linear start and step are used to calculate final value of the linear variables.
    179 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]
    180 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]
    181 // CHECK: store i32 {{.+}}, i32* [[LIN_VAR]],
    182 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]
    183 // CHECK: store double* {{.*}}[[GLIN_VAR]]
    184 
    185   #pragma omp parallel for simd
    186 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
    187 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
    188 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 3
    189 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
    190 // CHECK: [[TRUE]]
    191 // CHECK: br label %[[SWITCH:[^,]+]]
    192 // CHECK: [[FALSE]]
    193 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
    194 // CHECK: br label %[[SWITCH]]
    195 // CHECK: [[SWITCH]]
    196 // CHECK: [[UP:%.+]] = phi i32 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
    197 // CHECK: store i32 [[UP]], i32* [[UB]],
    198 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
    199 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV4:%[^,]+]],
    200 
    201 // CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]]
    202 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
    203 // CHECK-NEXT: [[CMP4:%.+]] = icmp sle i32 [[IV4]], [[UB_VAL]]
    204 // CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]]
    205   for (short it = 6; it <= 20; it-=-4) {
    206 // CHECK: [[SIMPLE_LOOP4_BODY]]
    207 // Start of body: calculate it from IV:
    208 // CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]]
    209 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4
    210 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]]
    211 // CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16
    212 // CHECK-NEXT: store i16 [[LC_IT_3]], i16*
    213 
    214 // CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]]
    215 // CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1
    216 // CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]]
    217   }
    218 // CHECK: [[SIMPLE_LOOP4_END]]
    219 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
    220 
    221   #pragma omp parallel for simd
    222 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
    223 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
    224 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 25
    225 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
    226 // CHECK: [[TRUE]]
    227 // CHECK: br label %[[SWITCH:[^,]+]]
    228 // CHECK: [[FALSE]]
    229 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
    230 // CHECK: br label %[[SWITCH]]
    231 // CHECK: [[SWITCH]]
    232 // CHECK: [[UP:%.+]] = phi i32 [ 25, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
    233 // CHECK: store i32 [[UP]], i32* [[UB]],
    234 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
    235 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV5:%[^,]+]],
    236 
    237 // CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]]
    238 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
    239 // CHECK-NEXT: [[CMP5:%.+]] = icmp sle i32 [[IV5]], [[UB_VAL]]
    240 // CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]]
    241   for (unsigned char it = 'z'; it >= 'a'; it+=-1) {
    242 // CHECK: [[SIMPLE_LOOP5_BODY]]
    243 // Start of body: calculate it from IV:
    244 // CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]]
    245 // CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1
    246 // CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]]
    247 // CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8
    248 // CHECK-NEXT: store i8 [[LC_IT_2]], i8* {{.+}},
    249 
    250 // CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]]
    251 // CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1
    252 // CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]]
    253   }
    254 // CHECK: [[SIMPLE_LOOP5_END]]
    255 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
    256 
    257 // CHECK-NOT: mul i32 %{{.+}}, 10
    258   #pragma omp parallel for simd
    259   for (unsigned i=100; i<10; i+=10) {
    260   }
    261 
    262   int A;
    263   {
    264   A = -1;
    265   #pragma omp parallel for simd lastprivate(A)
    266 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
    267 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    268 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6
    269 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
    270 // CHECK: [[TRUE]]
    271 // CHECK: br label %[[SWITCH:[^,]+]]
    272 // CHECK: [[FALSE]]
    273 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    274 // CHECK: br label %[[SWITCH]]
    275 // CHECK: [[SWITCH]]
    276 // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
    277 // CHECK: store i64 [[UP]], i64* [[UB]],
    278 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
    279 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV7:%[^,]+]],
    280 
    281 // CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]]
    282 // CHECK: [[SIMD_LOOP7_COND]]
    283 // CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]]
    284 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
    285 // CHECK-NEXT: [[CMP7:%.+]] = icmp sle i64 [[IV7]], [[UB_VAL]]
    286 // CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]]
    287   for (long long i = -10; i < 10; i += 3) {
    288 // CHECK: [[SIMPLE_LOOP7_BODY]]
    289 // Start of body: calculate i from IV:
    290 // CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]]
    291 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3
    292 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]]
    293 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],
    294 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]
    295 // CHECK-NEXT: [[CONV:%.+]] = trunc i64 [[LC_VAL]] to i32
    296 // CHECK-NEXT: store i32 [[CONV]], i32* [[A_PRIV:%[^,]+]],
    297     A = i;
    298 // CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]]
    299 // CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1
    300 // CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]]
    301   }
    302 // CHECK: [[SIMPLE_LOOP7_END]]
    303 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
    304 // CHECK: load i32, i32*
    305 // CHECK: icmp ne i32 %{{.+}}, 0
    306 // CHECK: br i1 %{{.+}}, label
    307 // CHECK: [[A_PRIV_VAL:%.+]] = load i32, i32* [[A_PRIV]],
    308 // CHECK-NEXT: store i32 [[A_PRIV_VAL]], i32* %{{.+}},
    309 // CHECK-NEXT: br label
    310   }
    311   int R;
    312   {
    313   R = -1;
    314 // CHECK: store i32 1, i32* [[R_PRIV:%[^,]+]],
    315   #pragma omp parallel for simd reduction(*:R)
    316 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
    317 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    318 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6
    319 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
    320 // CHECK: [[TRUE]]
    321 // CHECK: br label %[[SWITCH:[^,]+]]
    322 // CHECK: [[FALSE]]
    323 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    324 // CHECK: br label %[[SWITCH]]
    325 // CHECK: [[SWITCH]]
    326 // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
    327 // CHECK: store i64 [[UP]], i64* [[UB]],
    328 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
    329 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV8:%[^,]+]],
    330 
    331 // CHECK: br label %[[SIMD_LOOP8_COND:[^,]+]]
    332 // CHECK: [[SIMD_LOOP8_COND]]
    333 // CHECK-NEXT: [[IV8:%.+]] = load i64, i64* [[OMP_IV8]]
    334 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
    335 // CHECK-NEXT: [[CMP8:%.+]] = icmp sle i64 [[IV8]], [[UB_VAL]]
    336 // CHECK-NEXT: br i1 [[CMP8]], label %[[SIMPLE_LOOP8_BODY:.+]], label %[[SIMPLE_LOOP8_END:[^,]+]]
    337   for (long long i = -10; i < 10; i += 3) {
    338 // CHECK: [[SIMPLE_LOOP8_BODY]]
    339 // Start of body: calculate i from IV:
    340 // CHECK: [[IV8_0:%.+]] = load i64, i64* [[OMP_IV8]]
    341 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV8_0]], 3
    342 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]]
    343 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],
    344 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]
    345 // CHECK: store i32 %{{.+}}, i32* [[R_PRIV]],
    346     R *= i;
    347 // CHECK: [[IV8_2:%.+]] = load i64, i64* [[OMP_IV8]]
    348 // CHECK-NEXT: [[ADD8_2:%.+]] = add nsw i64 [[IV8_2]], 1
    349 // CHECK-NEXT: store i64 [[ADD8_2]], i64* [[OMP_IV8]]
    350   }
    351 // CHECK: [[SIMPLE_LOOP8_END]]
    352 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
    353 // CHECK: call i32 @__kmpc_reduce_nowait(
    354 // CHECK: [[R_PRIV_VAL:%.+]] = load i32, i32* [[R_PRIV]],
    355 // CHECK: [[RED:%.+]] = mul nsw i32 %{{.+}}, [[R_PRIV_VAL]]
    356 // CHECK-NEXT: store i32 [[RED]], i32* %{{.+}},
    357 // CHECK-NEXT: call void @__kmpc_end_reduce_nowait(
    358   }
    359 }
    360 
    361 template <class T, unsigned K> T tfoo(T a) { return a + K; }
    362 
    363 template <typename T, unsigned N>
    364 int templ1(T a, T *z) {
    365   #pragma omp parallel for simd collapse(N)
    366   for (int i = 0; i < N * 2; i++) {
    367     for (long long j = 0; j < (N + N + N + N); j += 2) {
    368       z[i + j] = a + tfoo<T, N>(i + j);
    369     }
    370   }
    371   return 0;
    372 }
    373 
    374 // Instatiation templ1<float,2>
    375 // CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float {{.+}}, float* {{.+}})
    376 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
    377 void inst_templ1() {
    378   float a;
    379   float z[100];
    380   templ1<float,2> (a, z);
    381 }
    382 
    383 
    384 typedef int MyIdx;
    385 
    386 class IterDouble {
    387   double *Ptr;
    388 public:
    389   IterDouble operator++ () const {
    390     IterDouble n;
    391     n.Ptr = Ptr + 1;
    392     return n;
    393   }
    394   bool operator < (const IterDouble &that) const {
    395     return Ptr < that.Ptr;
    396   }
    397   double & operator *() const {
    398     return *Ptr;
    399   }
    400   MyIdx operator - (const IterDouble &that) const {
    401     return (MyIdx) (Ptr - that.Ptr);
    402   }
    403   IterDouble operator + (int Delta) {
    404     IterDouble re;
    405     re.Ptr = Ptr + Delta;
    406     return re;
    407   }
    408 
    409   ///~IterDouble() {}
    410 };
    411 
    412 // CHECK-LABEL: define {{.*void}} @{{.*}}iter_simple{{.*}}
    413 void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) {
    414 //
    415 // Calculate number of iterations before the loop body.
    416 // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}}
    417 // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1
    418 // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1
    419 // CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1
    420 // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1
    421 // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}}
    422   #pragma omp parallel for simd
    423 
    424 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
    425 // CHECK-DAG: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
    426 // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]],
    427 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], [[OMP_LAST_IT_VAL]]
    428 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
    429 // CHECK: [[TRUE]]
    430 // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]],
    431 // CHECK: br label %[[SWITCH:[^,]+]]
    432 // CHECK: [[FALSE]]
    433 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
    434 // CHECK: br label %[[SWITCH]]
    435 // CHECK: [[SWITCH]]
    436 // CHECK: [[UP:%.+]] = phi i32 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
    437 // CHECK: store i32 [[UP]], i32* [[UB]],
    438 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
    439 // CHECK: store i32 [[LB_VAL]], i32* [[IT_OMP_IV:%[^,]+]],
    440 
    441 // CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]]
    442 // CHECK-NEXT: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
    443 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]]
    444 // CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]]
    445   for (IterDouble i = ia; i < ib; ++i) {
    446 // CHECK: [[IT_BODY]]
    447 // Start of body: calculate i from index:
    448 // CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]]
    449 // Call of operator+ (i, IV).
    450 // CHECK: {{%.+}} = invoke {{.+}} @{{.*}}IterDouble{{.*}}
    451 // ... loop body ...
    452    *i = *ic * 0.5;
    453 // Float multiply and save result.
    454 // CHECK: [[MULR:%.+]] = fmul double {{%.+}}, 5.000000e-01
    455 // CHECK-NEXT: invoke {{.+}} @{{.*}}IterDouble{{.*}}
    456 // CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]]
    457    ++ic;
    458 //
    459 // CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]]
    460 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1
    461 // CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]]
    462 // br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]]
    463   }
    464 // CHECK: [[IT_END]]
    465 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
    466 // CHECK: ret void
    467 }
    468 
    469 
    470 // CHECK-LABEL: define {{.*void}} @{{.*}}collapsed{{.*}}
    471 void collapsed(float *a, float *b, float *c, float *d) {
    472   int i; // outer loop counter
    473   unsigned j; // middle loop couter, leads to unsigned icmp in loop header.
    474   // k declared in the loop init below
    475   short l; // inner loop counter
    476 // CHECK: call void @__kmpc_for_static_init_4u(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
    477 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
    478 // CHECK: [[CMP:%.+]] = icmp ugt i32 [[UB_VAL]], 119
    479 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
    480 // CHECK: [[TRUE]]
    481 // CHECK: br label %[[SWITCH:[^,]+]]
    482 // CHECK: [[FALSE]]
    483 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
    484 // CHECK: br label %[[SWITCH]]
    485 // CHECK: [[SWITCH]]
    486 // CHECK: [[UP:%.+]] = phi i32 [ 119, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
    487 // CHECK: store i32 [[UP]], i32* [[UB]],
    488 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
    489 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]],
    490 //
    491   #pragma omp parallel for simd collapse(4)
    492 
    493 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]
    494 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
    495 // CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB_VAL]]
    496 // CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]]
    497   for (i = 1; i < 3; i++) // 2 iterations
    498     for (j = 2u; j < 5u; j++) //3 iterations
    499       for (int k = 3; k <= 6; k++) // 4 iterations
    500         for (l = 4; l < 9; ++l) // 5 iterations
    501         {
    502 // CHECK: [[COLL1_BODY]]
    503 // Start of body: calculate i from index:
    504 // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]
    505 // Calculation of the loop counters values.
    506 // CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60
    507 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1
    508 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]]
    509 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
    510 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]
    511 // CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20
    512 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3
    513 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1
    514 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]]
    515 // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]]
    516 // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]
    517 // CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5
    518 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4
    519 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1
    520 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]]
    521 // CHECK-NEXT: store i32 [[CALC_K_3]], i32* [[LC_K:.+]]
    522 // CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]]
    523 // CHECK-NEXT: [[CALC_L_1:%.+]] = urem i32 [[IV1_4]], 5
    524 // CHECK-NEXT: [[CALC_L_1_MUL1:%.+]] = mul i32 [[CALC_L_1]], 1
    525 // CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[CALC_L_1_MUL1]]
    526 // CHECK-NEXT: [[CALC_L_3:%.+]] = trunc i32 [[CALC_L_2]] to i16
    527 // CHECK-NEXT: store i16 [[CALC_L_3]], i16* [[LC_L:.+]]
    528 // ... loop body ...
    529 // End of body: store into a[i]:
    530 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]
    531     float res = b[j] * c[k];
    532     a[i] = res * d[l];
    533 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]]
    534 // CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1
    535 // CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]]
    536 // br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]]
    537 // CHECK: [[COLL1_END]]
    538   }
    539 // i,j,l are updated; k is not updated.
    540 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
    541 // CHECK: store i32 3, i32* [[I:%[^,]+]]
    542 // CHECK: store i32 5, i32* [[I:%[^,]+]]
    543 // CHECK: store i16 9, i16* [[I:%[^,]+]]
    544 // CHECK: ret void
    545 }
    546 
    547 extern char foo();
    548 extern double globalfloat;
    549 
    550 // CHECK-LABEL: define {{.*void}} @{{.*}}widened{{.*}}
    551 void widened(float *a, float *b, float *c, float *d) {
    552   int i; // outer loop counter
    553   short j; // inner loop counter
    554   globalfloat = 1.0;
    555   int localint = 1;
    556 // CHECK: store double {{.+}}, double* [[GLOBALFLOAT:@.+]]
    557 // Counter is widened to 64 bits.
    558 // CHECK:     [[MUL:%.+]] = mul nsw i64 2, %{{.+}}
    559 // CHECK-NEXT: [[SUB:%.+]] = sub nsw i64 [[MUL]], 1
    560 // CHECK-NEXT: store i64 [[SUB]], i64* [[OMP_LAST_IT:%[^,]+]],
    561 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
    562 // CHECK-DAG: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    563 // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]],
    564 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], [[OMP_LAST_IT_VAL]]
    565 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
    566 // CHECK: [[TRUE]]
    567 // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]],
    568 // CHECK: br label %[[SWITCH:[^,]+]]
    569 // CHECK: [[FALSE]]
    570 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    571 // CHECK: br label %[[SWITCH]]
    572 // CHECK: [[SWITCH]]
    573 // CHECK: [[UP:%.+]] = phi i64 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
    574 // CHECK: store i64 [[UP]], i64* [[UB]],
    575 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
    576 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV:%[^,]+]],
    577 //
    578   #pragma omp parallel for simd collapse(2) private(globalfloat, localint)
    579 
    580 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]
    581 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
    582 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]]
    583 // CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]]
    584   for (i = 1; i < 3; i++) // 2 iterations
    585     for (j = 0; j < foo(); j++) // foo() iterations
    586   {
    587 // CHECK: [[WIDE1_BODY]]
    588 // Start of body: calculate i from index:
    589 // CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]]
    590 // Calculation of the loop counters values...
    591 // CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]]
    592 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]
    593 // CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]]
    594 // ... loop body ...
    595 //
    596 // Here we expect store into private double var, not global
    597 // CHECK-NOT: store double {{.+}}, double* [[GLOBALFLOAT]]
    598     globalfloat = (float)j/i;
    599     float res = b[j] * c[j];
    600 // Store into a[i]:
    601 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]
    602     a[i] = res * d[i];
    603 // Then there's a store into private var localint:
    604 // CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]]
    605     localint = (int)j;
    606 // CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]]
    607 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1
    608 // CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]]
    609 //
    610 // br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]]
    611 // CHECK: [[WIDE1_END]]
    612   }
    613 // i,j are updated.
    614 // CHECK: store i32 3, i32* [[I:%[^,]+]]
    615 // CHECK: store i16
    616 //
    617 // Here we expect store into original localint, not its privatized version.
    618 // CHECK-NOT: store i32 {{.+}}, i32* [[LOCALINT]]
    619   localint = (int)j;
    620 // CHECK: ret void
    621 }
    622 
    623 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
    624 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    625 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 15
    626 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
    627 // CHECK: [[TRUE]]
    628 // CHECK: br label %[[SWITCH:[^,]+]]
    629 // CHECK: [[FALSE]]
    630 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
    631 // CHECK: br label %[[SWITCH]]
    632 // CHECK: [[SWITCH]]
    633 // CHECK: [[UP:%.+]] = phi i64 [ 15, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
    634 // CHECK: store i64 [[UP]], i64* [[UB]],
    635 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
    636 // CHECK: store i64 [[LB_VAL]], i64* [[T1_OMP_IV:%[^,]+]],
    637 
    638 // ...
    639 // CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]]
    640 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
    641 // CHECK-NEXT: [[CMP1:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]]
    642 // CHECK-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]]
    643 // CHECK: [[T1_BODY]]
    644 // Loop counters i and j updates:
    645 // CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]
    646 // CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4
    647 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1
    648 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]]
    649 // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32
    650 // CHECK-NEXT: store i32 [[I_2]], i32*
    651 // CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]]
    652 // CHECK-NEXT: [[J_1:%.+]] = srem i64 [[IV2]], 4
    653 // CHECK-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2
    654 // CHECK-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]]
    655 // CHECK-NEXT: store i64 [[J_2_ADD0]], i64*
    656 // simd.for.inc:
    657 // CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]]
    658 // CHECK-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1
    659 // CHECK-NEXT: store i64 [[INC]], i64*
    660 // CHECK-NEXT: br label {{%.+}}
    661 // CHECK: [[T1_END]]
    662 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
    663 // CHECK: ret void
    664 //
    665 // TERM_DEBUG-LABEL: bar
    666 int bar() {return 0;};
    667 
    668 // TERM_DEBUG-LABEL: parallel_simd
    669 void parallel_simd(float *a) {
    670 #pragma omp parallel for simd
    671   // TERM_DEBUG-NOT: __kmpc_global_thread_num
    672   // TERM_DEBUG:     invoke i32 {{.*}}bar{{.*}}()
    673   // TERM_DEBUG:     unwind label %[[TERM_LPAD:.+]],
    674   // TERM_DEBUG-NOT: __kmpc_global_thread_num
    675   // TERM_DEBUG:     [[TERM_LPAD]]
    676   // TERM_DEBUG:     call void @__clang_call_terminate
    677   // TERM_DEBUG:     unreachable
    678   for (unsigned i = 131071; i <= 2147483647; i += 127)
    679     a[i] += bar();
    680 }
    681 // TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]],
    682 #endif // HEADER
    683 
    684