Home | History | Annotate | Download | only in OpenMP
      1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s
      2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
      3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
      4 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG
      5 // REQUIRES: x86-registered-target
      6 // expected-no-diagnostics
      7  #ifndef HEADER
      8  #define HEADER
      9 
     10 // CHECK: [[SS_TY:%.+]] = type { i32 }
     11 
     12 long long get_val() { return 0; }
     13 double *g_ptr;
     14 
     15 // CHECK-LABEL: define {{.*void}} @{{.*}}simple{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
     16 void simple(float *a, float *b, float *c, float *d) {
     17   #pragma omp simd
     18 // CHECK: store i32 0, i32* [[OMP_IV:%[^,]+]]
     19 
     20 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID:[0-9]+]]
     21 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i32 [[IV]], 6
     22 // CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]]
     23   for (int i = 3; i < 32; i += 5) {
     24 // CHECK: [[SIMPLE_LOOP1_BODY]]
     25 // Start of body: calculate i from IV:
     26 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
     27 // CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5
     28 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]]
     29 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
     30 // ... loop body ...
     31 // End of body: store into a[i]:
     32 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
     33     a[i] = b[i] * c[i] * d[i];
     34 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
     35 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1
     36 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
     37 // br label %{{.+}}, !llvm.loop !{{.+}}
     38   }
     39 // CHECK: [[SIMPLE_LOOP1_END]]
     40 
     41   long long k = get_val();
     42 
     43   #pragma omp simd linear(k : 3)
     44 // CHECK: [[K0:%.+]] = call {{.*}}i64 @{{.*}}get_val
     45 // CHECK-NEXT: store i64 [[K0]], i64* [[K_VAR:%[^,]+]]
     46 // CHECK: store i32 0, i32* [[OMP_IV2:%[^,]+]]
     47 // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_VAR]]
     48 // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]]
     49 
     50 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]]
     51 // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV2]], 9
     52 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]]
     53   for (int i = 10; i > 1; i--) {
     54 // CHECK: [[SIMPLE_LOOP2_BODY]]
     55 // Start of body: calculate i from IV:
     56 // CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     57 // FIXME: It is interesting, why the following "mul 1" was not constant folded?
     58 // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1
     59 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]]
     60 // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     61 //
     62 // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     63 // CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     64 // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3
     65 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64
     66 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]]
     67 // Update of the privatized version of linear variable!
     68 // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]]
     69     a[k]++;
     70     k = k + 3;
     71 // CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     72 // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1
     73 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
     74 // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]]
     75   }
     76 // CHECK: [[SIMPLE_LOOP2_END]]
     77 //
     78 // Update linear vars after loop, as the loop was operating on a private version.
     79 // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]]
     80 // CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27
     81 // CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[K_VAR]]
     82 //
     83 
     84   int lin = 12;
     85   #pragma omp simd linear(lin : get_val()), linear(g_ptr)
     86 
     87 // Init linear private var.
     88 // CHECK: store i32 12, i32* [[LIN_VAR:%[^,]+]]
     89 // CHECK: store i64 0, i64* [[OMP_IV3:%[^,]+]]
     90 
     91 // CHECK: [[LIN_LOAD:%.+]] = load i32, i32* [[LIN_VAR]]
     92 // CHECK-NEXT: store i32 [[LIN_LOAD]], i32* [[LIN_START:%[^,]+]]
     93 // Remember linear step.
     94 // CHECK: [[CALL_VAL:%.+]] = invoke
     95 // CHECK: store i64 [[CALL_VAL]], i64* [[LIN_STEP:%[^,]+]]
     96 
     97 // CHECK: [[GLIN_LOAD:%.+]] = load double*, double** [[GLIN_VAR:@[^,]+]]
     98 // CHECK-NEXT: store double* [[GLIN_LOAD]], double** [[GLIN_START:%[^,]+]]
     99 
    100 // CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID:[0-9]+]]
    101 // CHECK-NEXT: [[CMP3:%.+]] = icmp ult i64 [[IV3]], 4
    102 // CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]]
    103   for (unsigned long long it = 2000; it >= 600; it-=400) {
    104 // CHECK: [[SIMPLE_LOOP3_BODY]]
    105 // Start of body: calculate it from IV:
    106 // CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    107 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400
    108 // CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]]
    109 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    110 //
    111 // Linear start and step are used to calculate current value of the linear variable.
    112 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    113 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    114 // CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    115 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    116 // CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    117 // CHECK-NEXT: [[MUL:%.+]] = mul i64 [[IV3_1]], 1
    118 // CHECK: [[GEP:%.+]] = getelementptr{{.*}}[[GLINSTART]]
    119 // CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    120     *g_ptr++ = 0.0;
    121 // CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    122 // CHECK: store double{{.*}}[[GEP_VAL]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    123     a[it + lin]++;
    124 // CHECK: [[FLT_INC:%.+]] = fadd float
    125 // CHECK-NEXT: store float [[FLT_INC]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    126 // CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    127 // CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1
    128 // CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
    129   }
    130 // CHECK: [[SIMPLE_LOOP3_END]]
    131 //
    132 // Linear start and step are used to calculate final value of the linear variables.
    133 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]
    134 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]
    135 // CHECK: store i32 {{.+}}, i32* [[LIN_VAR]],
    136 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]
    137 // CHECK: store double* {{.*}}[[GLIN_VAR]]
    138 
    139   #pragma omp simd
    140 // CHECK: store i32 0, i32* [[OMP_IV4:%[^,]+]]
    141 
    142 // CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID:[0-9]+]]
    143 // CHECK-NEXT: [[CMP4:%.+]] = icmp slt i32 [[IV4]], 4
    144 // CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]]
    145   for (short it = 6; it <= 20; it-=-4) {
    146 // CHECK: [[SIMPLE_LOOP4_BODY]]
    147 // Start of body: calculate it from IV:
    148 // CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]]
    149 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4
    150 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]]
    151 // CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16
    152 // CHECK-NEXT: store i16 [[LC_IT_3]], i16* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]]
    153 
    154 // CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]]
    155 // CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1
    156 // CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]]
    157   }
    158 // CHECK: [[SIMPLE_LOOP4_END]]
    159 
    160   #pragma omp simd
    161 // CHECK: store i32 0, i32* [[OMP_IV5:%[^,]+]]
    162 
    163 // CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID:[0-9]+]]
    164 // CHECK-NEXT: [[CMP5:%.+]] = icmp slt i32 [[IV5]], 26
    165 // CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]]
    166   for (unsigned char it = 'z'; it >= 'a'; it+=-1) {
    167 // CHECK: [[SIMPLE_LOOP5_BODY]]
    168 // Start of body: calculate it from IV:
    169 // CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]]
    170 // CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1
    171 // CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]]
    172 // CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8
    173 // CHECK-NEXT: store i8 [[LC_IT_2]], i8* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]]
    174 
    175 // CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]]
    176 // CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1
    177 // CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]]
    178   }
    179 // CHECK: [[SIMPLE_LOOP5_END]]
    180 
    181 // CHECK-NOT: mul i32 %{{.+}}, 10
    182   #pragma omp simd
    183   for (unsigned i=100; i<10; i+=10) {
    184   }
    185 
    186   int A;
    187   // CHECK: store i32 -1, i32* [[A:%.+]],
    188   A = -1;
    189   #pragma omp simd lastprivate(A)
    190 // CHECK: store i64 0, i64* [[OMP_IV7:%[^,]+]]
    191 // CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]]
    192 // CHECK: [[SIMD_LOOP7_COND]]
    193 // CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID:[0-9]+]]
    194 // CHECK-NEXT: [[CMP7:%.+]] = icmp slt i64 [[IV7]], 7
    195 // CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]]
    196   for (long long i = -10; i < 10; i += 3) {
    197 // CHECK: [[SIMPLE_LOOP7_BODY]]
    198 // Start of body: calculate i from IV:
    199 // CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
    200 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3
    201 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]]
    202 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
    203 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
    204 // CHECK-NEXT: [[CONV:%.+]] = trunc i64 [[LC_VAL]] to i32
    205 // CHECK-NEXT: store i32 [[CONV]], i32* [[A_PRIV:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
    206     A = i;
    207 // CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
    208 // CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1
    209 // CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
    210   }
    211 // CHECK: [[SIMPLE_LOOP7_END]]
    212 // CHECK-NEXT: store i64 11, i64*
    213 // CHECK-NEXT: [[A_PRIV_VAL:%.+]] = load i32, i32* [[A_PRIV]],
    214 // CHECK-NEXT: store i32 [[A_PRIV_VAL]], i32* [[A]],
    215   int R;
    216   // CHECK: store i32 -1, i32* [[R:%[^,]+]],
    217   R = -1;
    218 // CHECK: store i64 0, i64* [[OMP_IV8:%[^,]+]],
    219 // CHECK: store i32 1, i32* [[R_PRIV:%[^,]+]],
    220   #pragma omp simd reduction(*:R)
    221 // CHECK: br label %[[SIMD_LOOP8_COND:[^,]+]]
    222 // CHECK: [[SIMD_LOOP8_COND]]
    223 // CHECK-NEXT: [[IV8:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID:[0-9]+]]
    224 // CHECK-NEXT: [[CMP8:%.+]] = icmp slt i64 [[IV8]], 7
    225 // CHECK-NEXT: br i1 [[CMP8]], label %[[SIMPLE_LOOP8_BODY:.+]], label %[[SIMPLE_LOOP8_END:[^,]+]]
    226   for (long long i = -10; i < 10; i += 3) {
    227 // CHECK: [[SIMPLE_LOOP8_BODY]]
    228 // Start of body: calculate i from IV:
    229 // CHECK: [[IV8_0:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
    230 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV8_0]], 3
    231 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]]
    232 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
    233 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
    234 // CHECK: store i32 %{{.+}}, i32* [[R_PRIV]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
    235     R *= i;
    236 // CHECK: [[IV8_2:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
    237 // CHECK-NEXT: [[ADD8_2:%.+]] = add nsw i64 [[IV8_2]], 1
    238 // CHECK-NEXT: store i64 [[ADD8_2]], i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
    239   }
    240 // CHECK: [[SIMPLE_LOOP8_END]]
    241 // CHECK-DAG: [[R_VAL:%.+]] = load i32, i32* [[R]],
    242 // CHECK-DAG: [[R_PRIV_VAL:%.+]] = load i32, i32* [[R_PRIV]],
    243 // CHECK: [[RED:%.+]] = mul nsw i32 [[R_VAL]], [[R_PRIV_VAL]]
    244 // CHECK-NEXT: store i32 [[RED]], i32* [[R]],
    245 // CHECK-NEXT: ret void
    246 }
    247 
    248 template <class T, unsigned K> T tfoo(T a) { return a + K; }
    249 
    250 template <typename T, unsigned N>
    251 int templ1(T a, T *z) {
    252   #pragma omp simd collapse(N)
    253   for (int i = 0; i < N * 2; i++) {
    254     for (long long j = 0; j < (N + N + N + N); j += 2) {
    255       z[i + j] = a + tfoo<T, N>(i + j);
    256     }
    257   }
    258   return 0;
    259 }
    260 
    261 // Instatiation templ1<float,2>
    262 // CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float {{.+}}, float* {{.+}})
    263 // CHECK: store i64 0, i64* [[T1_OMP_IV:[^,]+]]
    264 // ...
    265 // CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID:[0-9]+]]
    266 // CHECK-NEXT: [[CMP1:%.+]] = icmp slt i64 [[IV]], 16
    267 // CHECK-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]]
    268 // CHECK: [[T1_BODY]]
    269 // Loop counters i and j updates:
    270 // CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
    271 // CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4
    272 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1
    273 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]]
    274 // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32
    275 // CHECK-NEXT: store i32 [[I_2]], i32* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
    276 // CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
    277 // CHECK-NEXT: [[J_1:%.+]] = srem i64 [[IV2]], 4
    278 // CHECK-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2
    279 // CHECK-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]]
    280 // CHECK-NEXT: store i64 [[J_2_ADD0]], i64* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
    281 // simd.for.inc:
    282 // CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
    283 // CHECK-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1
    284 // CHECK-NEXT: store i64 [[INC]], i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
    285 // CHECK-NEXT: br label {{%.+}}
    286 // CHECK: [[T1_END]]
    287 // CHECK: ret i32 0
    288 //
    289 void inst_templ1() {
    290   float a;
    291   float z[100];
    292   templ1<float,2> (a, z);
    293 }
    294 
    295 
    296 typedef int MyIdx;
    297 
    298 class IterDouble {
    299   double *Ptr;
    300 public:
    301   IterDouble operator++ () const {
    302     IterDouble n;
    303     n.Ptr = Ptr + 1;
    304     return n;
    305   }
    306   bool operator < (const IterDouble &that) const {
    307     return Ptr < that.Ptr;
    308   }
    309   double & operator *() const {
    310     return *Ptr;
    311   }
    312   MyIdx operator - (const IterDouble &that) const {
    313     return (MyIdx) (Ptr - that.Ptr);
    314   }
    315   IterDouble operator + (int Delta) {
    316     IterDouble re;
    317     re.Ptr = Ptr + Delta;
    318     return re;
    319   }
    320 
    321   ///~IterDouble() {}
    322 };
    323 
    324 // CHECK-LABEL: define {{.*void}} @{{.*}}iter_simple{{.*}}
    325 void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) {
    326 //
    327 // Calculate number of iterations before the loop body.
    328 // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}}
    329 // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1
    330 // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1
    331 // CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1
    332 // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1
    333 // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}}
    334 // CHECK: store i32 0, i32* [[IT_OMP_IV:%[^,]+]]
    335   #pragma omp simd
    336 
    337 // CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}} !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID:[0-9]+]]
    338 // CHECK-NEXT: [[LAST_IT:%.+]] = load i32, i32* [[OMP_LAST_IT]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
    339 // CHECK-NEXT: [[NUM_IT:%.+]] = add nsw i32 [[LAST_IT]], 1
    340 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i32 [[IV]], [[NUM_IT]]
    341 // CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]]
    342   for (IterDouble i = ia; i < ib; ++i) {
    343 // CHECK: [[IT_BODY]]
    344 // Start of body: calculate i from index:
    345 // CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
    346 // Call of operator+ (i, IV).
    347 // CHECK: {{%.+}} = invoke {{.+}} @{{.*}}IterDouble{{.*}}
    348 // ... loop body ...
    349    *i = *ic * 0.5;
    350 // Float multiply and save result.
    351 // CHECK: [[MULR:%.+]] = fmul double {{%.+}}, 5.000000e-01
    352 // CHECK-NEXT: invoke {{.+}} @{{.*}}IterDouble{{.*}}
    353 // CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]], !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
    354    ++ic;
    355 //
    356 // CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
    357 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1
    358 // CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
    359 // br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]]
    360   }
    361 // CHECK: [[IT_END]]
    362 // CHECK: ret void
    363 }
    364 
    365 
    366 // CHECK-LABEL: define {{.*void}} @{{.*}}collapsed{{.*}}
    367 void collapsed(float *a, float *b, float *c, float *d) {
    368   int i; // outer loop counter
    369   unsigned j; // middle loop couter, leads to unsigned icmp in loop header.
    370   // k declared in the loop init below
    371   short l; // inner loop counter
    372 // CHECK: store i32 0, i32* [[OMP_IV:[^,]+]]
    373 //
    374   #pragma omp simd collapse(4)
    375 
    376 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID:[0-9]+]]
    377 // CHECK-NEXT: [[CMP:%.+]] = icmp ult i32 [[IV]], 120
    378 // CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]]
    379   for (i = 1; i < 3; i++) // 2 iterations
    380     for (j = 2u; j < 5u; j++) //3 iterations
    381       for (int k = 3; k <= 6; k++) // 4 iterations
    382         for (l = 4; l < 9; ++l) // 5 iterations
    383         {
    384 // CHECK: [[COLL1_BODY]]
    385 // Start of body: calculate i from index:
    386 // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
    387 // Calculation of the loop counters values.
    388 // CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60
    389 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1
    390 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]]
    391 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
    392 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
    393 // CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20
    394 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3
    395 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1
    396 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]]
    397 // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]]
    398 // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
    399 // CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5
    400 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4
    401 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1
    402 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]]
    403 // CHECK-NEXT: store i32 [[CALC_K_3]], i32* [[LC_K:.+]]
    404 // CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
    405 // CHECK-NEXT: [[CALC_L_1:%.+]] = urem i32 [[IV1_4]], 5
    406 // CHECK-NEXT: [[CALC_L_1_MUL1:%.+]] = mul i32 [[CALC_L_1]], 1
    407 // CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[CALC_L_1_MUL1]]
    408 // CHECK-NEXT: [[CALC_L_3:%.+]] = trunc i32 [[CALC_L_2]] to i16
    409 // CHECK-NEXT: store i16 [[CALC_L_3]], i16* [[LC_L:.+]]
    410 // ... loop body ...
    411 // End of body: store into a[i]:
    412 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
    413     float res = b[j] * c[k];
    414     a[i] = res * d[l];
    415 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
    416 // CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1
    417 // CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
    418 // br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]]
    419 // CHECK: [[COLL1_END]]
    420   }
    421 // i,j,l are updated; k is not updated.
    422 // CHECK: store i32 3, i32*
    423 // CHECK-NEXT: store i32 5, i32*
    424 // CHECK-NEXT: store i32 7, i32*
    425 // CHECK-NEXT: store i16 9, i16*
    426 // CHECK: ret void
    427 }
    428 
    429 extern char foo();
    430 extern double globalfloat;
    431 
    432 // CHECK-LABEL: define {{.*void}} @{{.*}}widened{{.*}}
    433 void widened(float *a, float *b, float *c, float *d) {
    434   int i; // outer loop counter
    435   short j; // inner loop counter
    436   globalfloat = 1.0;
    437   int localint = 1;
    438 // CHECK: store double {{.+}}, double* [[GLOBALFLOAT:@.+]]
    439 // Counter is widened to 64 bits.
    440 // CHECK: store i64 0, i64* [[OMP_IV:[^,]+]]
    441 //
    442   #pragma omp simd collapse(2) private(globalfloat, localint)
    443 
    444 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID:[0-9]+]]
    445 // CHECK-NEXT: [[LI:%.+]] = load i64, i64* [[OMP_LI:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
    446 // CHECK-NEXT: [[NUMIT:%.+]] = add nsw i64 [[LI]], 1
    447 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i64 [[IV]], [[NUMIT]]
    448 // CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]]
    449   for (i = 1; i < 3; i++) // 2 iterations
    450     for (j = 0; j < foo(); j++) // foo() iterations
    451   {
    452 // CHECK: [[WIDE1_BODY]]
    453 // Start of body: calculate i from index:
    454 // CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
    455 // Calculation of the loop counters values...
    456 // CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]]
    457 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
    458 // CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]]
    459 // ... loop body ...
    460 //
    461 // Here we expect store into private double var, not global
    462 // CHECK-NOT: store double {{.+}}, double* [[GLOBALFLOAT]]
    463     globalfloat = (float)j/i;
    464     float res = b[j] * c[j];
    465 // Store into a[i]:
    466 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
    467     a[i] = res * d[i];
    468 // Then there's a store into private var localint:
    469 // CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
    470     localint = (int)j;
    471 // CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
    472 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1
    473 // CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
    474 //
    475 // br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]]
    476 // CHECK: [[WIDE1_END]]
    477   }
    478 // i,j are updated.
    479 // CHECK: store i32 3, i32* [[I:%[^,]+]]
    480 // CHECK: store i16
    481 //
    482 // Here we expect store into original localint, not its privatized version.
    483 // CHECK-NOT: store i32 {{.+}}, i32* [[LOCALINT]]
    484   localint = (int)j;
    485 // CHECK: ret void
    486 }
    487 
    488 // CHECK-LABEL: define {{.*void}} @{{.*}}linear{{.*}}(float* {{.+}})
    489 void linear(float *a) {
    490   // CHECK: [[VAL_ADDR:%.+]] = alloca i64,
    491   // CHECK: [[K_ADDR:%.+]] = alloca i64*,
    492   long long val = 0;
    493   long long &k = val;
    494 
    495   #pragma omp simd linear(k : 3)
    496 // CHECK: store i64* [[VAL_ADDR]], i64** [[K_ADDR]],
    497 // CHECK: [[VAL_REF:%.+]] = load i64*, i64** [[K_ADDR]],
    498 // CHECK: store i64* [[VAL_REF]], i64** [[K_ADDR_REF:%.+]],
    499 // CHECK: store i32 0, i32* [[OMP_IV:%[^,]+]]
    500 // CHECK: [[K_REF:%.+]] = load i64*, i64** [[K_ADDR_REF]],
    501 // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_REF]]
    502 // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]]
    503 
    504 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID:[0-9]+]]
    505 // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV]], 9
    506 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP_BODY:.+]], label %[[SIMPLE_LOOP_END:[^,]+]]
    507   for (int i = 10; i > 1; i--) {
    508 // CHECK: [[SIMPLE_LOOP_BODY]]
    509 // Start of body: calculate i from IV:
    510 // CHECK: [[IV_0:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    511 // FIXME: It is interesting, why the following "mul 1" was not constant folded?
    512 // CHECK-NEXT: [[IV_1:%.+]] = mul nsw i32 [[IV_0]], 1
    513 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV_1]]
    514 // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    515 //
    516 // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    517 // CHECK-NEXT: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    518 // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV_2]], 3
    519 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64
    520 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]]
    521 // Update of the privatized version of linear variable!
    522 // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]]
    523     a[k]++;
    524     k = k + 3;
    525 // CHECK: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    526 // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV_2]], 1
    527 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    528 // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP_ID]]
    529   }
    530 // CHECK: [[SIMPLE_LOOP_END]]
    531 //
    532 // Update linear vars after loop, as the loop was operating on a private version.
    533 // CHECK: [[K_REF:%.+]] = load i64*, i64** [[K_ADDR_REF]],
    534 // CHECK: store i64* [[K_REF]], i64** [[K_PRIV_REF:%.+]],
    535 // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]]
    536 // CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27
    537 // CHECK-NEXT: [[K_REF:%.+]] = load i64*, i64** [[K_PRIV_REF]],
    538 // CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[K_REF]]
    539 //
    540 
    541   #pragma omp simd linear(val(k) : 3)
    542 // CHECK: [[VAL_REF:%.+]] = load i64*, i64** [[K_ADDR]],
    543 // CHECK: store i64* [[VAL_REF]], i64** [[K_ADDR_REF:%.+]],
    544 // CHECK: store i32 0, i32* [[OMP_IV:%[^,]+]]
    545 // CHECK: [[K_REF:%.+]] = load i64*, i64** [[K_ADDR_REF]],
    546 // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_REF]]
    547 // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]]
    548 
    549 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID:[0-9]+]]
    550 // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV]], 9
    551 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP_BODY:.+]], label %[[SIMPLE_LOOP_END:[^,]+]]
    552   for (int i = 10; i > 1; i--) {
    553 // CHECK: [[SIMPLE_LOOP_BODY]]
    554 // Start of body: calculate i from IV:
    555 // CHECK: [[IV_0:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    556 // FIXME: It is interesting, why the following "mul 1" was not constant folded?
    557 // CHECK-NEXT: [[IV_1:%.+]] = mul nsw i32 [[IV_0]], 1
    558 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV_1]]
    559 // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    560 //
    561 // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    562 // CHECK-NEXT: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    563 // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV_2]], 3
    564 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64
    565 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]]
    566 // Update of the privatized version of linear variable!
    567 // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]]
    568     a[k]++;
    569     k = k + 3;
    570 // CHECK: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    571 // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV_2]], 1
    572 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    573 // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP_ID]]
    574   }
    575 // CHECK: [[SIMPLE_LOOP_END]]
    576 //
    577 // Update linear vars after loop, as the loop was operating on a private version.
    578 // CHECK: [[K_REF:%.+]] = load i64*, i64** [[K_ADDR_REF]],
    579 // CHECK: store i64* [[K_REF]], i64** [[K_PRIV_REF:%.+]],
    580 // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]]
    581 // CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27
    582 // CHECK-NEXT: [[K_REF:%.+]] = load i64*, i64** [[K_PRIV_REF]],
    583 // CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[K_REF]]
    584 //
    585   #pragma omp simd linear(uval(k) : 3)
    586 // CHECK: store i32 0, i32* [[OMP_IV:%[^,]+]]
    587 // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[VAL_ADDR]]
    588 // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]]
    589 
    590 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID:[0-9]+]]
    591 // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV]], 9
    592 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP_BODY:.+]], label %[[SIMPLE_LOOP_END:[^,]+]]
    593   for (int i = 10; i > 1; i--) {
    594 // CHECK: [[SIMPLE_LOOP_BODY]]
    595 // Start of body: calculate i from IV:
    596 // CHECK: [[IV_0:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    597 // FIXME: It is interesting, why the following "mul 1" was not constant folded?
    598 // CHECK-NEXT: [[IV_1:%.+]] = mul nsw i32 [[IV_0]], 1
    599 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV_1]]
    600 // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    601 //
    602 // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    603 // CHECK-NEXT: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    604 // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV_2]], 3
    605 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64
    606 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]]
    607 // Update of the privatized version of linear variable!
    608 // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]]
    609     a[k]++;
    610     k = k + 3;
    611 // CHECK: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    612 // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV_2]], 1
    613 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]]
    614 // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP_ID]]
    615   }
    616 // CHECK: [[SIMPLE_LOOP_END]]
    617 //
    618 // Update linear vars after loop, as the loop was operating on a private version.
    619 // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]]
    620 // CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27
    621 // CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[VAL_ADDR]]
    622 //
    623 }
    624 
    625 // TERM_DEBUG-LABEL: bar
    626 int bar() {return 0;};
    627 
    628 // TERM_DEBUG-LABEL: parallel_simd
    629 void parallel_simd(float *a) {
    630 #pragma omp parallel
    631 #pragma omp simd
    632   // TERM_DEBUG-NOT: __kmpc_global_thread_num
    633   // TERM_DEBUG:     invoke i32 {{.*}}bar{{.*}}()
    634   // TERM_DEBUG:     unwind label %[[TERM_LPAD:.+]],
    635   // TERM_DEBUG-NOT: __kmpc_global_thread_num
    636   // TERM_DEBUG:     [[TERM_LPAD]]
    637   // TERM_DEBUG:     call void @__clang_call_terminate
    638   // TERM_DEBUG:     unreachable
    639   for (unsigned i = 131071; i <= 2147483647; i += 127)
    640     a[i] += bar();
    641 }
    642 // TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]],
    643 
    644 // CHECK-LABEL: S8
    645 // CHECK: ptrtoint [[SS_TY]]* %{{.+}} to i64
    646 // CHECK-NEXT: and i64 %{{.+}}, 15
    647 // CHECK-NEXT: icmp eq i64 %{{.+}}, 0
    648 // CHECK-NEXT: call void @llvm.assume(i1
    649 
    650 // CHECK: ptrtoint [[SS_TY]]* %{{.+}} to i64
    651 // CHECK-NEXT: and i64 %{{.+}}, 7
    652 // CHECK-NEXT: icmp eq i64 %{{.+}}, 0
    653 // CHECK-NEXT: call void @llvm.assume(i1
    654 
    655 // CHECK: ptrtoint [[SS_TY]]* %{{.+}} to i64
    656 // CHECK-NEXT: and i64 %{{.+}}, 15
    657 // CHECK-NEXT: icmp eq i64 %{{.+}}, 0
    658 // CHECK-NEXT: call void @llvm.assume(i1
    659 
    660 // CHECK: ptrtoint [[SS_TY]]* %{{.+}} to i64
    661 // CHECK-NEXT: and i64 %{{.+}}, 3
    662 // CHECK-NEXT: icmp eq i64 %{{.+}}, 0
    663 // CHECK-NEXT: call void @llvm.assume(i1
    664 struct SS {
    665   SS(): a(0) {}
    666   SS(int v) : a(v) {}
    667   int a;
    668   typedef int type;
    669 };
    670 
    671 template <typename T>
    672 class S7 : public T {
    673 protected:
    674   T *a;
    675   T b[2];
    676   S7() : a(0) {}
    677 
    678 public:
    679   S7(typename T::type &v) : a((T*)&v) {
    680 #pragma omp simd aligned(a)
    681     for (int k = 0; k < a->a; ++k)
    682       ++this->a->a;
    683 #pragma omp simd aligned(this->b : 8)
    684     for (int k = 0; k < a->a; ++k)
    685       ++a->a;
    686   }
    687 };
    688 
    689 class S8 : private IterDouble, public S7<SS> {
    690   S8() {}
    691 
    692 public:
    693   S8(int v) : S7<SS>(v){
    694 #pragma omp parallel private(a)
    695 #pragma omp simd aligned(S7<SS>::a)
    696     for (int k = 0; k < a->a; ++k)
    697       ++this->a->a;
    698 #pragma omp parallel shared(b)
    699 #pragma omp simd aligned(this->b: 4)
    700     for (int k = 0; k < a->a; ++k)
    701       ++a->a;
    702   }
    703 };
    704 S8 s8(0);
    705 
    706 #endif // HEADER
    707 
    708