Home | History | Annotate | Download | only in iomgr
      1 /*
      2  *
      3  * Copyright 2016 gRPC authors.
      4  *
      5  * Licensed under the Apache License, Version 2.0 (the "License");
      6  * you may not use this file except in compliance with the License.
      7  * You may obtain a copy of the License at
      8  *
      9  *     http://www.apache.org/licenses/LICENSE-2.0
     10  *
     11  * Unless required by applicable law or agreed to in writing, software
     12  * distributed under the License is distributed on an "AS IS" BASIS,
     13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14  * See the License for the specific language governing permissions and
     15  * limitations under the License.
     16  *
     17  */
     18 
     19 #include <grpc/support/port_platform.h>
     20 
     21 #include "src/core/lib/iomgr/combiner.h"
     22 
     23 #include <assert.h>
     24 #include <inttypes.h>
     25 #include <string.h>
     26 
     27 #include <grpc/support/alloc.h>
     28 #include <grpc/support/log.h>
     29 
     30 #include "src/core/lib/debug/stats.h"
     31 #include "src/core/lib/iomgr/executor.h"
     32 #include "src/core/lib/profiling/timers.h"
     33 
     34 grpc_core::DebugOnlyTraceFlag grpc_combiner_trace(false, "combiner");
     35 
     36 #define GRPC_COMBINER_TRACE(fn)          \
     37   do {                                   \
     38     if (grpc_combiner_trace.enabled()) { \
     39       fn;                                \
     40     }                                    \
     41   } while (0)
     42 
     43 #define STATE_UNORPHANED 1
     44 #define STATE_ELEM_COUNT_LOW_BIT 2
     45 
     46 struct grpc_combiner {
     47   grpc_combiner* next_combiner_on_this_exec_ctx;
     48   grpc_closure_scheduler scheduler;
     49   grpc_closure_scheduler finally_scheduler;
     50   gpr_mpscq queue;
     51   // either:
     52   // a pointer to the initiating exec ctx if that is the only exec_ctx that has
     53   // ever queued to this combiner, or NULL. If this is non-null, it's not
     54   // dereferencable (since the initiating exec_ctx may have gone out of scope)
     55   gpr_atm initiating_exec_ctx_or_null;
     56   // state is:
     57   // lower bit - zero if orphaned (STATE_UNORPHANED)
     58   // other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT)
     59   gpr_atm state;
     60   bool time_to_execute_final_list;
     61   grpc_closure_list final_list;
     62   grpc_closure offload;
     63   gpr_refcount refs;
     64 };
     65 
     66 static void combiner_run(grpc_closure* closure, grpc_error* error);
     67 static void combiner_exec(grpc_closure* closure, grpc_error* error);
     68 static void combiner_finally_exec(grpc_closure* closure, grpc_error* error);
     69 
     70 static const grpc_closure_scheduler_vtable scheduler = {
     71     combiner_run, combiner_exec, "combiner:immediately"};
     72 static const grpc_closure_scheduler_vtable finally_scheduler = {
     73     combiner_finally_exec, combiner_finally_exec, "combiner:finally"};
     74 
     75 static void offload(void* arg, grpc_error* error);
     76 
     77 grpc_combiner* grpc_combiner_create(void) {
     78   grpc_combiner* lock = static_cast<grpc_combiner*>(gpr_zalloc(sizeof(*lock)));
     79   gpr_ref_init(&lock->refs, 1);
     80   lock->scheduler.vtable = &scheduler;
     81   lock->finally_scheduler.vtable = &finally_scheduler;
     82   gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
     83   gpr_mpscq_init(&lock->queue);
     84   grpc_closure_list_init(&lock->final_list);
     85   GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
     86                     grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
     87   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p create", lock));
     88   return lock;
     89 }
     90 
     91 static void really_destroy(grpc_combiner* lock) {
     92   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p really_destroy", lock));
     93   GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
     94   gpr_mpscq_destroy(&lock->queue);
     95   gpr_free(lock);
     96 }
     97 
     98 static void start_destroy(grpc_combiner* lock) {
     99   gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
    100   GRPC_COMBINER_TRACE(gpr_log(
    101       GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
    102   if (old_state == 1) {
    103     really_destroy(lock);
    104   }
    105 }
    106 
    107 #ifndef NDEBUG
    108 #define GRPC_COMBINER_DEBUG_SPAM(op, delta)                                \
    109   if (grpc_combiner_trace.enabled()) {                                     \
    110     gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,                            \
    111             "C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op),        \
    112             gpr_atm_no_barrier_load(&lock->refs.count),                    \
    113             gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason); \
    114   }
    115 #else
    116 #define GRPC_COMBINER_DEBUG_SPAM(op, delta)
    117 #endif
    118 
    119 void grpc_combiner_unref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
    120   GRPC_COMBINER_DEBUG_SPAM("UNREF", -1);
    121   if (gpr_unref(&lock->refs)) {
    122     start_destroy(lock);
    123   }
    124 }
    125 
    126 grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
    127   GRPC_COMBINER_DEBUG_SPAM("  REF", 1);
    128   gpr_ref(&lock->refs);
    129   return lock;
    130 }
    131 
    132 static void push_last_on_exec_ctx(grpc_combiner* lock) {
    133   lock->next_combiner_on_this_exec_ctx = nullptr;
    134   if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
    135     grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
    136         grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
    137   } else {
    138     grpc_core::ExecCtx::Get()
    139         ->combiner_data()
    140         ->last_combiner->next_combiner_on_this_exec_ctx = lock;
    141     grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
    142   }
    143 }
    144 
    145 static void push_first_on_exec_ctx(grpc_combiner* lock) {
    146   lock->next_combiner_on_this_exec_ctx =
    147       grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
    148   grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = lock;
    149   if (lock->next_combiner_on_this_exec_ctx == nullptr) {
    150     grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
    151   }
    152 }
    153 
    154 #define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
    155   ((grpc_combiner*)(((char*)((closure)->scheduler)) -            \
    156                     offsetof(grpc_combiner, scheduler_name)))
    157 
    158 static void combiner_exec(grpc_closure* cl, grpc_error* error) {
    159   GPR_TIMER_SCOPE("combiner.execute", 0);
    160   GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS();
    161   grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
    162   gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
    163   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
    164                               "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
    165                               lock, cl, last));
    166   if (last == 1) {
    167     GRPC_STATS_INC_COMBINER_LOCKS_INITIATED();
    168     GPR_TIMER_MARK("combiner.initiated", 0);
    169     gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null,
    170                              (gpr_atm)grpc_core::ExecCtx::Get());
    171     // first element on this list: add it to the list of combiner locks
    172     // executing within this exec_ctx
    173     push_last_on_exec_ctx(lock);
    174   } else {
    175     // there may be a race with setting here: if that happens, we may delay
    176     // offload for one or two actions, and that's fine
    177     gpr_atm initiator =
    178         gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null);
    179     if (initiator != 0 && initiator != (gpr_atm)grpc_core::ExecCtx::Get()) {
    180       gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0);
    181     }
    182   }
    183   GPR_ASSERT(last & STATE_UNORPHANED);  // ensure lock has not been destroyed
    184   assert(cl->cb);
    185   cl->error_data.error = error;
    186   gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
    187 }
    188 
    189 static void move_next() {
    190   grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
    191       grpc_core::ExecCtx::Get()
    192           ->combiner_data()
    193           ->active_combiner->next_combiner_on_this_exec_ctx;
    194   if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
    195     grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = nullptr;
    196   }
    197 }
    198 
    199 static void offload(void* arg, grpc_error* error) {
    200   grpc_combiner* lock = static_cast<grpc_combiner*>(arg);
    201   push_last_on_exec_ctx(lock);
    202 }
    203 
    204 static void queue_offload(grpc_combiner* lock) {
    205   GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED();
    206   move_next();
    207   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p queue_offload", lock));
    208   GRPC_CLOSURE_SCHED(&lock->offload, GRPC_ERROR_NONE);
    209 }
    210 
    211 bool grpc_combiner_continue_exec_ctx() {
    212   GPR_TIMER_SCOPE("combiner.continue_exec_ctx", 0);
    213   grpc_combiner* lock =
    214       grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
    215   if (lock == nullptr) {
    216     return false;
    217   }
    218 
    219   bool contended =
    220       gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0;
    221 
    222   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
    223                               "C:%p grpc_combiner_continue_exec_ctx "
    224                               "contended=%d "
    225                               "exec_ctx_ready_to_finish=%d "
    226                               "time_to_execute_final_list=%d",
    227                               lock, contended,
    228                               grpc_core::ExecCtx::Get()->IsReadyToFinish(),
    229                               lock->time_to_execute_final_list));
    230 
    231   if (contended && grpc_core::ExecCtx::Get()->IsReadyToFinish() &&
    232       grpc_executor_is_threaded()) {
    233     GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
    234     // this execution context wants to move on: schedule remaining work to be
    235     // picked up on the executor
    236     queue_offload(lock);
    237     return true;
    238   }
    239 
    240   if (!lock->time_to_execute_final_list ||
    241       // peek to see if something new has shown up, and execute that with
    242       // priority
    243       (gpr_atm_acq_load(&lock->state) >> 1) > 1) {
    244     gpr_mpscq_node* n = gpr_mpscq_pop(&lock->queue);
    245     GRPC_COMBINER_TRACE(
    246         gpr_log(GPR_INFO, "C:%p maybe_finish_one n=%p", lock, n));
    247     if (n == nullptr) {
    248       // queue is in an inconsistent state: use this as a cue that we should
    249       // go off and do something else for a while (and come back later)
    250       GPR_TIMER_MARK("delay_busy", 0);
    251       queue_offload(lock);
    252       return true;
    253     }
    254     GPR_TIMER_SCOPE("combiner.exec1", 0);
    255     grpc_closure* cl = reinterpret_cast<grpc_closure*>(n);
    256     grpc_error* cl_err = cl->error_data.error;
    257 #ifndef NDEBUG
    258     cl->scheduled = false;
    259 #endif
    260     cl->cb(cl->cb_arg, cl_err);
    261     GRPC_ERROR_UNREF(cl_err);
    262   } else {
    263     grpc_closure* c = lock->final_list.head;
    264     GPR_ASSERT(c != nullptr);
    265     grpc_closure_list_init(&lock->final_list);
    266     int loops = 0;
    267     while (c != nullptr) {
    268       GPR_TIMER_SCOPE("combiner.exec_1final", 0);
    269       GRPC_COMBINER_TRACE(
    270           gpr_log(GPR_INFO, "C:%p execute_final[%d] c=%p", lock, loops, c));
    271       grpc_closure* next = c->next_data.next;
    272       grpc_error* error = c->error_data.error;
    273 #ifndef NDEBUG
    274       c->scheduled = false;
    275 #endif
    276       c->cb(c->cb_arg, error);
    277       GRPC_ERROR_UNREF(error);
    278       c = next;
    279     }
    280   }
    281 
    282   GPR_TIMER_MARK("unref", 0);
    283   move_next();
    284   lock->time_to_execute_final_list = false;
    285   gpr_atm old_state =
    286       gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
    287   GRPC_COMBINER_TRACE(
    288       gpr_log(GPR_INFO, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
    289 // Define a macro to ease readability of the following switch statement.
    290 #define OLD_STATE_WAS(orphaned, elem_count) \
    291   (((orphaned) ? 0 : STATE_UNORPHANED) |    \
    292    ((elem_count)*STATE_ELEM_COUNT_LOW_BIT))
    293   // Depending on what the previous state was, we need to perform different
    294   // actions.
    295   switch (old_state) {
    296     default:
    297       // we have multiple queued work items: just continue executing them
    298       break;
    299     case OLD_STATE_WAS(false, 2):
    300     case OLD_STATE_WAS(true, 2):
    301       // we're down to one queued item: if it's the final list we should do that
    302       if (!grpc_closure_list_empty(lock->final_list)) {
    303         lock->time_to_execute_final_list = true;
    304       }
    305       break;
    306     case OLD_STATE_WAS(false, 1):
    307       // had one count, one unorphaned --> unlocked unorphaned
    308       return true;
    309     case OLD_STATE_WAS(true, 1):
    310       // and one count, one orphaned --> unlocked and orphaned
    311       really_destroy(lock);
    312       return true;
    313     case OLD_STATE_WAS(false, 0):
    314     case OLD_STATE_WAS(true, 0):
    315       // these values are illegal - representing an already unlocked or
    316       // deleted lock
    317       GPR_UNREACHABLE_CODE(return true);
    318   }
    319   push_first_on_exec_ctx(lock);
    320   return true;
    321 }
    322 
    323 static void enqueue_finally(void* closure, grpc_error* error);
    324 
    325 static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) {
    326   GPR_TIMER_SCOPE("combiner.execute_finally", 0);
    327   GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS();
    328   grpc_combiner* lock =
    329       COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
    330   GRPC_COMBINER_TRACE(gpr_log(
    331       GPR_INFO, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, closure,
    332       grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
    333   if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) {
    334     GPR_TIMER_MARK("slowpath", 0);
    335     GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure,
    336                                            grpc_combiner_scheduler(lock)),
    337                        error);
    338     return;
    339   }
    340 
    341   if (grpc_closure_list_empty(lock->final_list)) {
    342     gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
    343   }
    344   grpc_closure_list_append(&lock->final_list, closure, error);
    345 }
    346 
    347 static void combiner_run(grpc_closure* closure, grpc_error* error) {
    348   grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler);
    349 #ifndef NDEBUG
    350   closure->scheduled = false;
    351   GRPC_COMBINER_TRACE(gpr_log(
    352       GPR_DEBUG,
    353       "Combiner:%p grpc_combiner_run closure:%p created [%s:%d] run [%s:%d]",
    354       lock, closure, closure->file_created, closure->line_created,
    355       closure->file_initiated, closure->line_initiated));
    356 #endif
    357   GPR_ASSERT(grpc_core::ExecCtx::Get()->combiner_data()->active_combiner ==
    358              lock);
    359   closure->cb(closure->cb_arg, error);
    360   GRPC_ERROR_UNREF(error);
    361 }
    362 
    363 static void enqueue_finally(void* closure, grpc_error* error) {
    364   combiner_finally_exec(static_cast<grpc_closure*>(closure),
    365                         GRPC_ERROR_REF(error));
    366 }
    367 
    368 grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* combiner) {
    369   return &combiner->scheduler;
    370 }
    371 
    372 grpc_closure_scheduler* grpc_combiner_finally_scheduler(
    373     grpc_combiner* combiner) {
    374   return &combiner->finally_scheduler;
    375 }
    376