Home | History | Annotate | Download | only in OpenMP
      1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
      2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
      3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
      4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
      5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
      6 // expected-no-diagnostics
      7 // REQUIRES: x86-registered-target
      8 #ifndef HEADER
      9 #define HEADER
     10 
     11 volatile double g;
     12 
     13 template <class T>
     14 struct S {
     15   T f;
     16   S(T a) : f(a + g) {}
     17   S() : f(g) {}
     18   operator T() { return T(); }
     19   S &operator&(const S &) { return *this; }
     20   ~S() {}
     21 };
     22 
     23 // CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
     24 // CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
     25 // CHECK-DAG: [[ATOMIC_REDUCE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8*
     26 // CHECK-DAG: [[REDUCTION_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8*
     27 // CHECK-DAG: [[REDUCTION_LOCK:@.+]] = common global [8 x i32] zeroinitializer
     28 
     29 template <typename T>
     30 T tmain() {
     31   T t;
     32   S<T> test;
     33   T t_var = T(), t_var1;
     34   T vec[] = {1, 2};
     35   S<T> s_arr[] = {1, 2};
     36   S<T> var(3), var1;
     37 #pragma omp parallel
     38 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait
     39   {
     40     vec[0] = t_var;
     41 #pragma omp section
     42     s_arr[0] = var;
     43   }
     44   return T();
     45 }
     46 
     47 int main() {
     48 #ifdef LAMBDA
     49   // LAMBDA: [[G:@.+]] = global double
     50   // LAMBDA-LABEL: @main
     51   // LAMBDA: call void [[OUTER_LAMBDA:@.+]](
     52   [&]() {
     53   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
     54   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
     55 #pragma omp parallel
     56 #pragma omp sections reduction(+:g)
     57     {
     58     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
     59     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double,
     60 
     61     // Reduction list for runtime.
     62     // LAMBDA: [[RED_LIST:%.+]] = alloca [1 x i8*],
     63 
     64     // LAMBDA: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
     65     // LAMBDA: call void @__kmpc_for_static_init_4(
     66     g = 1;
     67     // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
     68     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
     69     // LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]]
     70     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
     71     // LAMBDA: call void @__kmpc_for_static_fini(
     72 
     73     // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
     74     // LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
     75     // LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
     76     // LAMBDA: call i32 @__kmpc_reduce(
     77     // LAMBDA: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [
     78     // LAMBDA: i32 1, label %[[CASE1:.+]]
     79     // LAMBDA: i32 2, label %[[CASE2:.+]]
     80     // LAMBDA: [[CASE1]]
     81     // LAMBDA: [[G_VAL:%.+]] = load double, double* [[G]]
     82     // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
     83     // LAMBDA: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]]
     84     // LAMBDA: store double [[ADD]], double* [[G]]
     85     // LAMBDA: call void @__kmpc_end_reduce(
     86     // LAMBDA: br label %[[REDUCTION_DONE]]
     87     // LAMBDA: [[CASE2]]
     88     // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
     89     // LAMBDA: fadd double
     90     // LAMBDA: cmpxchg i64*
     91     // LAMBDA: call void @__kmpc_end_reduce(
     92     // LAMBDA: br label %[[REDUCTION_DONE]]
     93     // LAMBDA: [[REDUCTION_DONE]]
     94     // LAMBDA: ret void
     95 #pragma omp section
     96     [&]() {
     97       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
     98       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
     99       g = 2;
    100       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
    101       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
    102       // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
    103       // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]]
    104     }();
    105   }
    106   }();
    107   return 0;
    108 #elif defined(BLOCKS)
    109   // BLOCKS: [[G:@.+]] = global double
    110   // BLOCKS-LABEL: @main
    111   // BLOCKS: call void {{%.+}}(i8
    112   ^{
    113   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
    114   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
    115 #pragma omp parallel
    116 #pragma omp sections reduction(-:g)
    117     {
    118     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
    119     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca double,
    120 
    121     // Reduction list for runtime.
    122     // BLOCKS: [[RED_LIST:%.+]] = alloca [1 x i8*],
    123 
    124     // BLOCKS: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
    125     g = 1;
    126     // BLOCKS: call void @__kmpc_for_static_init_4(
    127     // BLOCKS: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
    128     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
    129     // BLOCKS: double* [[G_PRIVATE_ADDR]]
    130     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
    131     // BLOCKS: call void {{%.+}}(i8
    132     // BLOCKS: call void @__kmpc_for_static_fini(
    133 
    134     // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
    135     // BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
    136     // BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
    137     // BLOCKS: call i32 @__kmpc_reduce(
    138     // BLOCKS: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [
    139     // BLOCKS: i32 1, label %[[CASE1:.+]]
    140     // BLOCKS: i32 2, label %[[CASE2:.+]]
    141     // BLOCKS: [[CASE1]]
    142     // BLOCKS: [[G_VAL:%.+]] = load double, double* [[G]]
    143     // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
    144     // BLOCKS: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]]
    145     // BLOCKS: store double [[ADD]], double* [[G]]
    146     // BLOCKS: call void @__kmpc_end_reduce(
    147     // BLOCKS: br label %[[REDUCTION_DONE]]
    148     // BLOCKS: [[CASE2]]
    149     // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
    150     // BLOCKS: fadd double
    151     // BLOCKS: cmpxchg i64*
    152     // BLOCKS: call void @__kmpc_end_reduce(
    153     // BLOCKS: br label %[[REDUCTION_DONE]]
    154     // BLOCKS: [[REDUCTION_DONE]]
    155     // BLOCKS: ret void
    156 #pragma omp section
    157     ^{
    158       // BLOCKS: define {{.+}} void {{@.+}}(i8*
    159       g = 2;
    160       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
    161       // BLOCKS: store double 2.0{{.+}}, double*
    162       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
    163       // BLOCKS: ret
    164     }();
    165   }
    166   }();
    167   return 0;
    168 #else
    169   S<float> test;
    170   float t_var = 0, t_var1;
    171   int vec[] = {1, 2};
    172   S<float> s_arr[] = {1, 2};
    173   S<float> var(3), var1;
    174 #pragma omp parallel
    175 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
    176   {
    177     {
    178     vec[0] = t_var;
    179     s_arr[0] = var;
    180     vec[1] = t_var1;
    181     s_arr[1] = var1;
    182     }
    183   }
    184   return tmain<int>();
    185 #endif
    186 }
    187 
    188 // CHECK: define {{.*}}i{{[0-9]+}} @main()
    189 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
    190 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
    191 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, float*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK:@.+]] to void
    192 // CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
    193 // CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
    194 // CHECK: ret
    195 //
    196 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
    197 // CHECK: alloca float,
    198 // CHECK: alloca [[S_FLOAT_TY]],
    199 // CHECK: alloca [[S_FLOAT_TY]],
    200 // CHECK: alloca float,
    201 
    202 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
    203 
    204 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
    205 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
    206 
    207 // CHECK-NOT: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
    208 // CHECK-NOT: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
    209 
    210 // CHECK: call void @__kmpc_for_static_init_4(
    211 // CHECK: call void @__kmpc_for_static_fini(
    212 
    213 // CHECK: call void @__kmpc_barrier(
    214 
    215 // CHECK: ret void
    216 
    217 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
    218 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
    219 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
    220 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [[S_INT_TY]]*, [[S_INT_TY]]*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*)* [[TMAIN_MICROTASK:@.+]] to void
    221 // CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
    222 // CHECK: ret
    223 //
    224 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
    225 // CHECK: alloca i{{[0-9]+}},
    226 // CHECK: alloca i{{[0-9]+}},
    227 // CHECK: alloca i{{[0-9]+}},
    228 // CHECK: alloca i{{[0-9]+}},
    229 // CHECK: alloca i{{[0-9]+}},
    230 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
    231 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]],
    232 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_INT_TY]],
    233 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca i{{[0-9]+}},
    234 
    235 // Reduction list for runtime.
    236 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*],
    237 
    238 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
    239 
    240 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
    241 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
    242 // CHECK: [[VAR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
    243 // CHECK: [[T_VAR1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
    244 
    245 // For + reduction operation initial value of private variable is 0.
    246 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[T_VAR_PRIV]],
    247 
    248 // For & reduction operation initial value of private variable is ones in all bits.
    249 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR_PRIV]])
    250 
    251 // For && reduction operation initial value of private variable is 1.0.
    252 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR1_PRIV]])
    253 
    254 // For min reduction operation initial value of private variable is largest repesentable value.
    255 // CHECK: store i{{[0-9]+}} 2147483647, i{{[0-9]+}}* [[T_VAR1_PRIV]],
    256 
    257 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
    258 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
    259 // CHECK: call void @__kmpc_for_static_init_4(
    260 // Skip checks for internal operations.
    261 // CHECK: call void @__kmpc_for_static_fini(
    262 
    263 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
    264 
    265 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
    266 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8*
    267 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
    268 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
    269 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8*
    270 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
    271 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
    272 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8*
    273 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
    274 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
    275 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8*
    276 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
    277 
    278 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
    279 
    280 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8*
    281 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]])
    282 
    283 // switch(res)
    284 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [
    285 // CHECK: i32 1, label %[[CASE1:.+]]
    286 // CHECK: i32 2, label %[[CASE2:.+]]
    287 // CHECK: ]
    288 
    289 // case 1:
    290 // t_var += t_var_reduction;
    291 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]],
    292 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
    293 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]]
    294 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_REF]],
    295 
    296 // var = var.operator &(var_reduction);
    297 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]])
    298 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8*
    299 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
    300 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
    301 
    302 // var1 = var1.operator &&(var1_reduction);
    303 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]])
    304 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
    305 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
    306 // CHECK: [[TRUE]]
    307 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]])
    308 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
    309 // CHECK: br label %[[END2]]
    310 // CHECK: [[END2]]
    311 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
    312 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
    313 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
    314 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8*
    315 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
    316 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
    317 
    318 // t_var1 = min(t_var1, t_var1_reduction);
    319 // CHECK: [[T_VAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_REF]],
    320 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]],
    321 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]]
    322 // CHECK: br i1 [[CMP]]
    323 // CHECK: [[UP:%.+]] = phi i32
    324 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_REF]],
    325 
    326 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
    327 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]])
    328 
    329 // break;
    330 // CHECK: br label %[[RED_DONE]]
    331 
    332 // case 2:
    333 // t_var += t_var_reduction;
    334 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]]
    335 // CHECK: atomicrmw add i32* [[T_VAR_REF]], i32 [[T_VAR_PRIV_VAL]] monotonic
    336 
    337 // var = var.operator &(var_reduction);
    338 // CHECK: call void @__kmpc_critical(
    339 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]])
    340 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8*
    341 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
    342 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
    343 // CHECK: call void @__kmpc_end_critical(
    344 
    345 // var1 = var1.operator &&(var1_reduction);
    346 // CHECK: call void @__kmpc_critical(
    347 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]])
    348 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
    349 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
    350 // CHECK: [[TRUE]]
    351 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]])
    352 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
    353 // CHECK: br label %[[END2]]
    354 // CHECK: [[END2]]
    355 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
    356 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
    357 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
    358 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8*
    359 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
    360 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
    361 // CHECK: call void @__kmpc_end_critical(
    362 
    363 // t_var1 = min(t_var1, t_var1_reduction);
    364 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]]
    365 // CHECK: atomicrmw min i32* [[T_VAR1_REF]], i32 [[T_VAR1_PRIV_VAL]] monotonic
    366 
    367 // break;
    368 // CHECK: br label %[[RED_DONE]]
    369 // CHECK: [[RED_DONE]]
    370 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
    371 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]*
    372 // CHECK: ret void
    373 
    374 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
    375 //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
    376 //  ...
    377 //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
    378 //  *(Type<n>-1*)rhs[<n>-1]);
    379 // }
    380 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
    381 // t_var_lhs = (i{{[0-9]+}}*)lhs[0];
    382 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
    383 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
    384 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}*
    385 // t_var_rhs = (i{{[0-9]+}}*)rhs[0];
    386 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
    387 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
    388 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}*
    389 
    390 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
    391 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
    392 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
    393 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]*
    394 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
    395 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
    396 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
    397 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]*
    398 
    399 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
    400 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
    401 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
    402 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]*
    403 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
    404 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
    405 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
    406 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]*
    407 
    408 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
    409 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
    410 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
    411 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}*
    412 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
    413 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
    414 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
    415 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}*
    416 
    417 // t_var_lhs += t_var_rhs;
    418 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_LHS]],
    419 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_RHS]],
    420 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]]
    421 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_LHS]],
    422 
    423 // var_lhs = var_lhs.operator &(var_rhs);
    424 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_LHS]], [[S_INT_TY]]* dereferenceable(4) [[VAR_RHS]])
    425 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_LHS]] to i8*
    426 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
    427 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
    428 
    429 // var1_lhs = var1_lhs.operator &&(var1_rhs);
    430 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_LHS]])
    431 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
    432 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
    433 // CHECK: [[TRUE]]
    434 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_RHS]])
    435 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
    436 // CHECK: br label %[[END2]]
    437 // CHECK: [[END2]]
    438 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
    439 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
    440 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
    441 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_LHS]] to i8*
    442 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
    443 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
    444 
    445 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
    446 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_LHS]],
    447 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_RHS]],
    448 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]]
    449 // CHECK: br i1 [[CMP]]
    450 // CHECK: [[UP:%.+]] = phi i32
    451 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_LHS]],
    452 // CHECK: ret void
    453 
    454 #endif
    455 
    456