Home | History | Annotate | Download | only in message_loop
      1 // Copyright 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/message_loop/message_loop.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/bind.h"
     10 #include "base/compiler_specific.h"
     11 #include "base/debug/alias.h"
     12 #include "base/debug/trace_event.h"
     13 #include "base/lazy_instance.h"
     14 #include "base/logging.h"
     15 #include "base/memory/scoped_ptr.h"
     16 #include "base/message_loop/message_pump_default.h"
     17 #include "base/metrics/histogram.h"
     18 #include "base/metrics/statistics_recorder.h"
     19 #include "base/run_loop.h"
     20 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
     21 #include "base/thread_task_runner_handle.h"
     22 #include "base/threading/thread_local.h"
     23 #include "base/time/time.h"
     24 #include "base/tracked_objects.h"
     25 
     26 #if defined(OS_MACOSX)
     27 #include "base/message_loop/message_pump_mac.h"
     28 #endif
     29 #if defined(OS_POSIX) && !defined(OS_IOS)
     30 #include "base/message_loop/message_pump_libevent.h"
     31 #endif
     32 #if defined(OS_ANDROID)
     33 #include "base/message_loop/message_pump_android.h"
     34 #endif
     35 #if defined(USE_GLIB)
     36 #include "base/message_loop/message_pump_glib.h"
     37 #endif
     38 
     39 namespace base {
     40 
     41 namespace {
     42 
     43 // A lazily created thread local storage for quick access to a thread's message
     44 // loop, if one exists.  This should be safe and free of static constructors.
     45 LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
     46     LAZY_INSTANCE_INITIALIZER;
     47 
     48 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
     49 // to get an accounting of messages and actions taken on each thread.
     50 const int kTaskRunEvent = 0x1;
     51 #if !defined(OS_NACL)
     52 const int kTimerEvent = 0x2;
     53 
     54 // Provide range of message IDs for use in histogramming and debug display.
     55 const int kLeastNonZeroMessageId = 1;
     56 const int kMaxMessageId = 1099;
     57 const int kNumberOfDistinctMessagesDisplayed = 1100;
     58 
     59 // Provide a macro that takes an expression (such as a constant, or macro
     60 // constant) and creates a pair to initalize an array of pairs.  In this case,
     61 // our pair consists of the expressions value, and the "stringized" version
     62 // of the expression (i.e., the exrpression put in quotes).  For example, if
     63 // we have:
     64 //    #define FOO 2
     65 //    #define BAR 5
     66 // then the following:
     67 //    VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
     68 // will expand to:
     69 //   {7, "FOO + BAR"}
     70 // We use the resulting array as an argument to our histogram, which reads the
     71 // number as a bucket identifier, and proceeds to use the corresponding name
     72 // in the pair (i.e., the quoted string) when printing out a histogram.
     73 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
     74 
     75 const LinearHistogram::DescriptionPair event_descriptions_[] = {
     76   // Provide some pretty print capability in our histogram for our internal
     77   // messages.
     78 
     79   // A few events we handle (kindred to messages), and used to profile actions.
     80   VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
     81   VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
     82 
     83   {-1, NULL}  // The list must be null terminated, per API to histogram.
     84 };
     85 #endif  // !defined(OS_NACL)
     86 
     87 bool enable_histogrammer_ = false;
     88 
     89 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
     90 
     91 // Returns true if MessagePump::ScheduleWork() must be called one
     92 // time for every task that is added to the MessageLoop incoming queue.
     93 bool AlwaysNotifyPump(MessageLoop::Type type) {
     94 #if defined(OS_ANDROID)
     95   return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
     96 #else
     97   return false;
     98 #endif
     99 }
    100 
    101 #if defined(OS_IOS)
    102 typedef MessagePumpIOSForIO MessagePumpForIO;
    103 #elif defined(OS_NACL)
    104 typedef MessagePumpDefault MessagePumpForIO;
    105 #elif defined(OS_POSIX)
    106 typedef MessagePumpLibevent MessagePumpForIO;
    107 #endif
    108 
    109 MessagePumpForIO* ToPumpIO(MessagePump* pump) {
    110   return static_cast<MessagePumpForIO*>(pump);
    111 }
    112 
    113 }  // namespace
    114 
    115 //------------------------------------------------------------------------------
    116 
    117 MessageLoop::TaskObserver::TaskObserver() {
    118 }
    119 
    120 MessageLoop::TaskObserver::~TaskObserver() {
    121 }
    122 
    123 MessageLoop::DestructionObserver::~DestructionObserver() {
    124 }
    125 
    126 //------------------------------------------------------------------------------
    127 
    128 MessageLoop::MessageLoop(Type type)
    129     : type_(type),
    130       nestable_tasks_allowed_(true),
    131 #if defined(OS_WIN)
    132       os_modal_loop_(false),
    133 #endif  // OS_WIN
    134       message_histogram_(NULL),
    135       run_loop_(NULL) {
    136   Init();
    137 
    138   pump_ = CreateMessagePumpForType(type).Pass();
    139 }
    140 
    141 MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
    142     : pump_(pump.Pass()),
    143       type_(TYPE_CUSTOM),
    144       nestable_tasks_allowed_(true),
    145 #if defined(OS_WIN)
    146       os_modal_loop_(false),
    147 #endif  // OS_WIN
    148       message_histogram_(NULL),
    149       run_loop_(NULL) {
    150   DCHECK(pump_.get());
    151   Init();
    152 }
    153 
    154 MessageLoop::~MessageLoop() {
    155   DCHECK_EQ(this, current());
    156 
    157   DCHECK(!run_loop_);
    158 
    159   // Clean up any unprocessed tasks, but take care: deleting a task could
    160   // result in the addition of more tasks (e.g., via DeleteSoon).  We set a
    161   // limit on the number of times we will allow a deleted task to generate more
    162   // tasks.  Normally, we should only pass through this loop once or twice.  If
    163   // we end up hitting the loop limit, then it is probably due to one task that
    164   // is being stubborn.  Inspect the queues to see who is left.
    165   bool did_work;
    166   for (int i = 0; i < 100; ++i) {
    167     DeletePendingTasks();
    168     ReloadWorkQueue();
    169     // If we end up with empty queues, then break out of the loop.
    170     did_work = DeletePendingTasks();
    171     if (!did_work)
    172       break;
    173   }
    174   DCHECK(!did_work);
    175 
    176   // Let interested parties have one last shot at accessing this.
    177   FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
    178                     WillDestroyCurrentMessageLoop());
    179 
    180   thread_task_runner_handle_.reset();
    181 
    182   // Tell the incoming queue that we are dying.
    183   incoming_task_queue_->WillDestroyCurrentMessageLoop();
    184   incoming_task_queue_ = NULL;
    185   message_loop_proxy_ = NULL;
    186 
    187   // OK, now make it so that no one can find us.
    188   lazy_tls_ptr.Pointer()->Set(NULL);
    189 }
    190 
    191 // static
    192 MessageLoop* MessageLoop::current() {
    193   // TODO(darin): sadly, we cannot enable this yet since people call us even
    194   // when they have no intention of using us.
    195   // DCHECK(loop) << "Ouch, did you forget to initialize me?";
    196   return lazy_tls_ptr.Pointer()->Get();
    197 }
    198 
    199 // static
    200 void MessageLoop::EnableHistogrammer(bool enable) {
    201   enable_histogrammer_ = enable;
    202 }
    203 
    204 // static
    205 bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
    206   if (message_pump_for_ui_factory_)
    207     return false;
    208 
    209   message_pump_for_ui_factory_ = factory;
    210   return true;
    211 }
    212 
    213 // static
    214 scoped_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
    215 // TODO(rvargas): Get rid of the OS guards.
    216 #if defined(USE_GLIB) && !defined(OS_NACL)
    217   typedef MessagePumpGlib MessagePumpForUI;
    218 #elif defined(OS_LINUX) && !defined(OS_NACL)
    219   typedef MessagePumpLibevent MessagePumpForUI;
    220 #endif
    221 
    222 #if defined(OS_IOS) || defined(OS_MACOSX)
    223 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(MessagePumpMac::Create())
    224 #elif defined(OS_NACL)
    225 // Currently NaCl doesn't have a UI MessageLoop.
    226 // TODO(abarth): Figure out if we need this.
    227 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>()
    228 #else
    229 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(new MessagePumpForUI())
    230 #endif
    231 
    232   if (type == MessageLoop::TYPE_UI) {
    233     if (message_pump_for_ui_factory_)
    234       return message_pump_for_ui_factory_();
    235     return MESSAGE_PUMP_UI;
    236   }
    237   if (type == MessageLoop::TYPE_IO)
    238     return scoped_ptr<MessagePump>(new MessagePumpForIO());
    239 
    240 #if defined(OS_ANDROID)
    241   if (type == MessageLoop::TYPE_JAVA)
    242     return scoped_ptr<MessagePump>(new MessagePumpForUI());
    243 #endif
    244 
    245   DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
    246   return scoped_ptr<MessagePump>(new MessagePumpDefault());
    247 }
    248 
    249 void MessageLoop::AddDestructionObserver(
    250     DestructionObserver* destruction_observer) {
    251   DCHECK_EQ(this, current());
    252   destruction_observers_.AddObserver(destruction_observer);
    253 }
    254 
    255 void MessageLoop::RemoveDestructionObserver(
    256     DestructionObserver* destruction_observer) {
    257   DCHECK_EQ(this, current());
    258   destruction_observers_.RemoveObserver(destruction_observer);
    259 }
    260 
    261 void MessageLoop::PostTask(
    262     const tracked_objects::Location& from_here,
    263     const Closure& task) {
    264   DCHECK(!task.is_null()) << from_here.ToString();
    265   incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), true);
    266 }
    267 
    268 void MessageLoop::PostDelayedTask(
    269     const tracked_objects::Location& from_here,
    270     const Closure& task,
    271     TimeDelta delay) {
    272   DCHECK(!task.is_null()) << from_here.ToString();
    273   incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, true);
    274 }
    275 
    276 void MessageLoop::PostNonNestableTask(
    277     const tracked_objects::Location& from_here,
    278     const Closure& task) {
    279   DCHECK(!task.is_null()) << from_here.ToString();
    280   incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), false);
    281 }
    282 
    283 void MessageLoop::PostNonNestableDelayedTask(
    284     const tracked_objects::Location& from_here,
    285     const Closure& task,
    286     TimeDelta delay) {
    287   DCHECK(!task.is_null()) << from_here.ToString();
    288   incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, false);
    289 }
    290 
    291 void MessageLoop::Run() {
    292   RunLoop run_loop;
    293   run_loop.Run();
    294 }
    295 
    296 void MessageLoop::RunUntilIdle() {
    297   RunLoop run_loop;
    298   run_loop.RunUntilIdle();
    299 }
    300 
    301 void MessageLoop::QuitWhenIdle() {
    302   DCHECK_EQ(this, current());
    303   if (run_loop_) {
    304     run_loop_->quit_when_idle_received_ = true;
    305   } else {
    306     NOTREACHED() << "Must be inside Run to call Quit";
    307   }
    308 }
    309 
    310 void MessageLoop::QuitNow() {
    311   DCHECK_EQ(this, current());
    312   if (run_loop_) {
    313     pump_->Quit();
    314   } else {
    315     NOTREACHED() << "Must be inside Run to call Quit";
    316   }
    317 }
    318 
    319 bool MessageLoop::IsType(Type type) const {
    320   return type_ == type;
    321 }
    322 
    323 static void QuitCurrentWhenIdle() {
    324   MessageLoop::current()->QuitWhenIdle();
    325 }
    326 
    327 // static
    328 Closure MessageLoop::QuitWhenIdleClosure() {
    329   return Bind(&QuitCurrentWhenIdle);
    330 }
    331 
    332 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
    333   if (allowed) {
    334     // Kick the native pump just in case we enter a OS-driven nested message
    335     // loop.
    336     pump_->ScheduleWork();
    337   }
    338   nestable_tasks_allowed_ = allowed;
    339 }
    340 
    341 bool MessageLoop::NestableTasksAllowed() const {
    342   return nestable_tasks_allowed_;
    343 }
    344 
    345 bool MessageLoop::IsNested() {
    346   return run_loop_->run_depth_ > 1;
    347 }
    348 
    349 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
    350   DCHECK_EQ(this, current());
    351   task_observers_.AddObserver(task_observer);
    352 }
    353 
    354 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
    355   DCHECK_EQ(this, current());
    356   task_observers_.RemoveObserver(task_observer);
    357 }
    358 
    359 bool MessageLoop::is_running() const {
    360   DCHECK_EQ(this, current());
    361   return run_loop_ != NULL;
    362 }
    363 
    364 bool MessageLoop::IsHighResolutionTimerEnabledForTesting() {
    365   return incoming_task_queue_->IsHighResolutionTimerEnabledForTesting();
    366 }
    367 
    368 bool MessageLoop::IsIdleForTesting() {
    369   // We only check the imcoming queue|, since we don't want to lock the work
    370   // queue.
    371   return incoming_task_queue_->IsIdleForTesting();
    372 }
    373 
    374 //------------------------------------------------------------------------------
    375 
    376 void MessageLoop::Init() {
    377   DCHECK(!current()) << "should only have one message loop per thread";
    378   lazy_tls_ptr.Pointer()->Set(this);
    379 
    380   incoming_task_queue_ = new internal::IncomingTaskQueue(this);
    381   message_loop_proxy_ =
    382       new internal::MessageLoopProxyImpl(incoming_task_queue_);
    383   thread_task_runner_handle_.reset(
    384       new ThreadTaskRunnerHandle(message_loop_proxy_));
    385 }
    386 
    387 void MessageLoop::RunHandler() {
    388   DCHECK_EQ(this, current());
    389 
    390   StartHistogrammer();
    391 
    392 #if defined(OS_WIN)
    393   if (run_loop_->dispatcher_ && type() == TYPE_UI) {
    394     static_cast<MessagePumpForUI*>(pump_.get())->
    395         RunWithDispatcher(this, run_loop_->dispatcher_);
    396     return;
    397   }
    398 #endif
    399 
    400   pump_->Run(this);
    401 }
    402 
    403 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
    404   if (run_loop_->run_depth_ != 1)
    405     return false;
    406 
    407   if (deferred_non_nestable_work_queue_.empty())
    408     return false;
    409 
    410   PendingTask pending_task = deferred_non_nestable_work_queue_.front();
    411   deferred_non_nestable_work_queue_.pop();
    412 
    413   RunTask(pending_task);
    414   return true;
    415 }
    416 
    417 void MessageLoop::RunTask(const PendingTask& pending_task) {
    418   tracked_objects::TrackedTime start_time =
    419       tracked_objects::ThreadData::NowForStartOfRun(pending_task.birth_tally);
    420 
    421   TRACE_EVENT_FLOW_END1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
    422       "MessageLoop::PostTask", TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
    423       "queue_duration",
    424       (start_time - pending_task.EffectiveTimePosted()).InMilliseconds());
    425   // When tracing memory for posted tasks it's more valuable to attribute the
    426   // memory allocations to the source function than generically to "RunTask".
    427   TRACE_EVENT_WITH_MEMORY_TAG2(
    428       "toplevel", "MessageLoop::RunTask",
    429       pending_task.posted_from.function_name(),  // Name for memory tracking.
    430       "src_file", pending_task.posted_from.file_name(),
    431       "src_func", pending_task.posted_from.function_name());
    432 
    433   DCHECK(nestable_tasks_allowed_);
    434   // Execute the task and assume the worst: It is probably not reentrant.
    435   nestable_tasks_allowed_ = false;
    436 
    437   // Before running the task, store the program counter where it was posted
    438   // and deliberately alias it to ensure it is on the stack if the task
    439   // crashes. Be careful not to assume that the variable itself will have the
    440   // expected value when displayed by the optimizer in an optimized build.
    441   // Look at a memory dump of the stack.
    442   const void* program_counter =
    443       pending_task.posted_from.program_counter();
    444   debug::Alias(&program_counter);
    445 
    446   HistogramEvent(kTaskRunEvent);
    447 
    448   FOR_EACH_OBSERVER(TaskObserver, task_observers_,
    449                     WillProcessTask(pending_task));
    450   pending_task.task.Run();
    451   FOR_EACH_OBSERVER(TaskObserver, task_observers_,
    452                     DidProcessTask(pending_task));
    453 
    454   tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
    455       start_time, tracked_objects::ThreadData::NowForEndOfRun());
    456 
    457   nestable_tasks_allowed_ = true;
    458 }
    459 
    460 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
    461   if (pending_task.nestable || run_loop_->run_depth_ == 1) {
    462     RunTask(pending_task);
    463     // Show that we ran a task (Note: a new one might arrive as a
    464     // consequence!).
    465     return true;
    466   }
    467 
    468   // We couldn't run the task now because we're in a nested message loop
    469   // and the task isn't nestable.
    470   deferred_non_nestable_work_queue_.push(pending_task);
    471   return false;
    472 }
    473 
    474 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
    475   // Move to the delayed work queue.
    476   delayed_work_queue_.push(pending_task);
    477 }
    478 
    479 bool MessageLoop::DeletePendingTasks() {
    480   bool did_work = !work_queue_.empty();
    481   while (!work_queue_.empty()) {
    482     PendingTask pending_task = work_queue_.front();
    483     work_queue_.pop();
    484     if (!pending_task.delayed_run_time.is_null()) {
    485       // We want to delete delayed tasks in the same order in which they would
    486       // normally be deleted in case of any funny dependencies between delayed
    487       // tasks.
    488       AddToDelayedWorkQueue(pending_task);
    489     }
    490   }
    491   did_work |= !deferred_non_nestable_work_queue_.empty();
    492   while (!deferred_non_nestable_work_queue_.empty()) {
    493     deferred_non_nestable_work_queue_.pop();
    494   }
    495   did_work |= !delayed_work_queue_.empty();
    496 
    497   // Historically, we always delete the task regardless of valgrind status. It's
    498   // not completely clear why we want to leak them in the loops above.  This
    499   // code is replicating legacy behavior, and should not be considered
    500   // absolutely "correct" behavior.  See TODO above about deleting all tasks
    501   // when it's safe.
    502   while (!delayed_work_queue_.empty()) {
    503     delayed_work_queue_.pop();
    504   }
    505   return did_work;
    506 }
    507 
    508 uint64 MessageLoop::GetTaskTraceID(const PendingTask& task) {
    509   return (static_cast<uint64>(task.sequence_num) << 32) |
    510          ((static_cast<uint64>(reinterpret_cast<intptr_t>(this)) << 32) >> 32);
    511 }
    512 
    513 void MessageLoop::ReloadWorkQueue() {
    514   // We can improve performance of our loading tasks from the incoming queue to
    515   // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
    516   // load. That reduces the number of locks-per-task significantly when our
    517   // queues get large.
    518   if (work_queue_.empty())
    519     incoming_task_queue_->ReloadWorkQueue(&work_queue_);
    520 }
    521 
    522 void MessageLoop::ScheduleWork(bool was_empty) {
    523   // The Android UI message loop needs to get notified each time
    524   // a task is added to the incoming queue.
    525   if (was_empty || AlwaysNotifyPump(type_))
    526     pump_->ScheduleWork();
    527 }
    528 
    529 //------------------------------------------------------------------------------
    530 // Method and data for histogramming events and actions taken by each instance
    531 // on each thread.
    532 
    533 void MessageLoop::StartHistogrammer() {
    534 #if !defined(OS_NACL)  // NaCl build has no metrics code.
    535   if (enable_histogrammer_ && !message_histogram_
    536       && StatisticsRecorder::IsActive()) {
    537     DCHECK(!thread_name_.empty());
    538     message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
    539         "MsgLoop:" + thread_name_,
    540         kLeastNonZeroMessageId, kMaxMessageId,
    541         kNumberOfDistinctMessagesDisplayed,
    542         message_histogram_->kHexRangePrintingFlag,
    543         event_descriptions_);
    544   }
    545 #endif
    546 }
    547 
    548 void MessageLoop::HistogramEvent(int event) {
    549 #if !defined(OS_NACL)
    550   if (message_histogram_)
    551     message_histogram_->Add(event);
    552 #endif
    553 }
    554 
    555 bool MessageLoop::DoWork() {
    556   if (!nestable_tasks_allowed_) {
    557     // Task can't be executed right now.
    558     return false;
    559   }
    560 
    561   for (;;) {
    562     ReloadWorkQueue();
    563     if (work_queue_.empty())
    564       break;
    565 
    566     // Execute oldest task.
    567     do {
    568       PendingTask pending_task = work_queue_.front();
    569       work_queue_.pop();
    570       if (!pending_task.delayed_run_time.is_null()) {
    571         AddToDelayedWorkQueue(pending_task);
    572         // If we changed the topmost task, then it is time to reschedule.
    573         if (delayed_work_queue_.top().task.Equals(pending_task.task))
    574           pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
    575       } else {
    576         if (DeferOrRunPendingTask(pending_task))
    577           return true;
    578       }
    579     } while (!work_queue_.empty());
    580   }
    581 
    582   // Nothing happened.
    583   return false;
    584 }
    585 
    586 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
    587   if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
    588     recent_time_ = *next_delayed_work_time = TimeTicks();
    589     return false;
    590   }
    591 
    592   // When we "fall behind," there will be a lot of tasks in the delayed work
    593   // queue that are ready to run.  To increase efficiency when we fall behind,
    594   // we will only call Time::Now() intermittently, and then process all tasks
    595   // that are ready to run before calling it again.  As a result, the more we
    596   // fall behind (and have a lot of ready-to-run delayed tasks), the more
    597   // efficient we'll be at handling the tasks.
    598 
    599   TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
    600   if (next_run_time > recent_time_) {
    601     recent_time_ = TimeTicks::Now();  // Get a better view of Now();
    602     if (next_run_time > recent_time_) {
    603       *next_delayed_work_time = next_run_time;
    604       return false;
    605     }
    606   }
    607 
    608   PendingTask pending_task = delayed_work_queue_.top();
    609   delayed_work_queue_.pop();
    610 
    611   if (!delayed_work_queue_.empty())
    612     *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
    613 
    614   return DeferOrRunPendingTask(pending_task);
    615 }
    616 
    617 bool MessageLoop::DoIdleWork() {
    618   if (ProcessNextDelayedNonNestableTask())
    619     return true;
    620 
    621   if (run_loop_->quit_when_idle_received_)
    622     pump_->Quit();
    623 
    624   return false;
    625 }
    626 
    627 void MessageLoop::GetQueueingInformation(size_t* queue_size,
    628                                          TimeDelta* queueing_delay) {
    629   *queue_size = work_queue_.size();
    630   if (*queue_size == 0) {
    631     *queueing_delay = TimeDelta();
    632     return;
    633   }
    634 
    635   const PendingTask& next_to_run = work_queue_.front();
    636   tracked_objects::Duration duration =
    637       tracked_objects::TrackedTime::Now() - next_to_run.EffectiveTimePosted();
    638   *queueing_delay = TimeDelta::FromMilliseconds(duration.InMilliseconds());
    639 }
    640 
    641 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
    642                                      void(*deleter)(const void*),
    643                                      const void* object) {
    644   PostNonNestableTask(from_here, Bind(deleter, object));
    645 }
    646 
    647 void MessageLoop::ReleaseSoonInternal(
    648     const tracked_objects::Location& from_here,
    649     void(*releaser)(const void*),
    650     const void* object) {
    651   PostNonNestableTask(from_here, Bind(releaser, object));
    652 }
    653 
    654 #if !defined(OS_NACL)
    655 //------------------------------------------------------------------------------
    656 // MessageLoopForUI
    657 
    658 #if defined(OS_ANDROID)
    659 void MessageLoopForUI::Start() {
    660   // No Histogram support for UI message loop as it is managed by Java side
    661   static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
    662 }
    663 #endif
    664 
    665 #if defined(OS_IOS)
    666 void MessageLoopForUI::Attach() {
    667   static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
    668 }
    669 #endif
    670 
    671 #if defined(OS_WIN)
    672 void MessageLoopForUI::AddObserver(Observer* observer) {
    673   static_cast<MessagePumpWin*>(pump_.get())->AddObserver(observer);
    674 }
    675 
    676 void MessageLoopForUI::RemoveObserver(Observer* observer) {
    677   static_cast<MessagePumpWin*>(pump_.get())->RemoveObserver(observer);
    678 }
    679 #endif  // defined(OS_WIN)
    680 
    681 #if defined(USE_OZONE) || (defined(OS_CHROMEOS) && !defined(USE_GLIB))
    682 bool MessageLoopForUI::WatchFileDescriptor(
    683     int fd,
    684     bool persistent,
    685     MessagePumpLibevent::Mode mode,
    686     MessagePumpLibevent::FileDescriptorWatcher *controller,
    687     MessagePumpLibevent::Watcher *delegate) {
    688   return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
    689       fd,
    690       persistent,
    691       mode,
    692       controller,
    693       delegate);
    694 }
    695 #endif
    696 
    697 #endif  // !defined(OS_NACL)
    698 
    699 //------------------------------------------------------------------------------
    700 // MessageLoopForIO
    701 
    702 #if !defined(OS_NACL)
    703 void MessageLoopForIO::AddIOObserver(
    704     MessageLoopForIO::IOObserver* io_observer) {
    705   ToPumpIO(pump_.get())->AddIOObserver(io_observer);
    706 }
    707 
    708 void MessageLoopForIO::RemoveIOObserver(
    709     MessageLoopForIO::IOObserver* io_observer) {
    710   ToPumpIO(pump_.get())->RemoveIOObserver(io_observer);
    711 }
    712 
    713 #if defined(OS_WIN)
    714 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
    715   ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
    716 }
    717 
    718 bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
    719   return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
    720 }
    721 
    722 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
    723   return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
    724 }
    725 #elif defined(OS_POSIX)
    726 bool MessageLoopForIO::WatchFileDescriptor(int fd,
    727                                            bool persistent,
    728                                            Mode mode,
    729                                            FileDescriptorWatcher *controller,
    730                                            Watcher *delegate) {
    731   return ToPumpIO(pump_.get())->WatchFileDescriptor(
    732       fd,
    733       persistent,
    734       mode,
    735       controller,
    736       delegate);
    737 }
    738 #endif
    739 
    740 #endif  // !defined(OS_NACL)
    741 
    742 }  // namespace base
    743