Home | History | Annotate | Download | only in message_loop
      1 // Copyright 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/message_loop/message_loop.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/bind.h"
     10 #include "base/compiler_specific.h"
     11 #include "base/debug/alias.h"
     12 #include "base/debug/trace_event.h"
     13 #include "base/lazy_instance.h"
     14 #include "base/logging.h"
     15 #include "base/memory/scoped_ptr.h"
     16 #include "base/message_loop/message_pump_default.h"
     17 #include "base/metrics/histogram.h"
     18 #include "base/metrics/statistics_recorder.h"
     19 #include "base/run_loop.h"
     20 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
     21 #include "base/thread_task_runner_handle.h"
     22 #include "base/threading/thread_local.h"
     23 #include "base/time/time.h"
     24 #include "base/tracked_objects.h"
     25 
     26 #if defined(OS_MACOSX)
     27 #include "base/message_loop/message_pump_mac.h"
     28 #endif
     29 #if defined(OS_POSIX) && !defined(OS_IOS)
     30 #include "base/message_loop/message_pump_libevent.h"
     31 #endif
     32 #if defined(OS_ANDROID)
     33 #include "base/message_loop/message_pump_android.h"
     34 #endif
     35 
     36 #if defined(TOOLKIT_GTK)
     37 #include <gdk/gdk.h>
     38 #include <gdk/gdkx.h>
     39 #endif
     40 
     41 namespace base {
     42 
     43 namespace {
     44 
     45 // A lazily created thread local storage for quick access to a thread's message
     46 // loop, if one exists.  This should be safe and free of static constructors.
     47 LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
     48     LAZY_INSTANCE_INITIALIZER;
     49 
     50 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
     51 // to get an accounting of messages and actions taken on each thread.
     52 const int kTaskRunEvent = 0x1;
     53 const int kTimerEvent = 0x2;
     54 
     55 // Provide range of message IDs for use in histogramming and debug display.
     56 const int kLeastNonZeroMessageId = 1;
     57 const int kMaxMessageId = 1099;
     58 const int kNumberOfDistinctMessagesDisplayed = 1100;
     59 
     60 // Provide a macro that takes an expression (such as a constant, or macro
     61 // constant) and creates a pair to initalize an array of pairs.  In this case,
     62 // our pair consists of the expressions value, and the "stringized" version
     63 // of the expression (i.e., the exrpression put in quotes).  For example, if
     64 // we have:
     65 //    #define FOO 2
     66 //    #define BAR 5
     67 // then the following:
     68 //    VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
     69 // will expand to:
     70 //   {7, "FOO + BAR"}
     71 // We use the resulting array as an argument to our histogram, which reads the
     72 // number as a bucket identifier, and proceeds to use the corresponding name
     73 // in the pair (i.e., the quoted string) when printing out a histogram.
     74 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
     75 
     76 const LinearHistogram::DescriptionPair event_descriptions_[] = {
     77   // Provide some pretty print capability in our histogram for our internal
     78   // messages.
     79 
     80   // A few events we handle (kindred to messages), and used to profile actions.
     81   VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
     82   VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
     83 
     84   {-1, NULL}  // The list must be null terminated, per API to histogram.
     85 };
     86 
     87 bool enable_histogrammer_ = false;
     88 
     89 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
     90 
     91 // Returns true if MessagePump::ScheduleWork() must be called one
     92 // time for every task that is added to the MessageLoop incoming queue.
     93 bool AlwaysNotifyPump(MessageLoop::Type type) {
     94 #if defined(OS_ANDROID)
     95   return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
     96 #else
     97   return false;
     98 #endif
     99 }
    100 
    101 }  // namespace
    102 
    103 //------------------------------------------------------------------------------
    104 
    105 #if defined(OS_WIN)
    106 
    107 // Upon a SEH exception in this thread, it restores the original unhandled
    108 // exception filter.
    109 static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter) {
    110   ::SetUnhandledExceptionFilter(old_filter);
    111   return EXCEPTION_CONTINUE_SEARCH;
    112 }
    113 
    114 // Retrieves a pointer to the current unhandled exception filter. There
    115 // is no standalone getter method.
    116 static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() {
    117   LPTOP_LEVEL_EXCEPTION_FILTER top_filter = NULL;
    118   top_filter = ::SetUnhandledExceptionFilter(0);
    119   ::SetUnhandledExceptionFilter(top_filter);
    120   return top_filter;
    121 }
    122 
    123 #endif  // defined(OS_WIN)
    124 
    125 //------------------------------------------------------------------------------
    126 
    127 MessageLoop::TaskObserver::TaskObserver() {
    128 }
    129 
    130 MessageLoop::TaskObserver::~TaskObserver() {
    131 }
    132 
    133 MessageLoop::DestructionObserver::~DestructionObserver() {
    134 }
    135 
    136 //------------------------------------------------------------------------------
    137 
    138 MessageLoop::MessageLoop(Type type)
    139     : type_(type),
    140       exception_restoration_(false),
    141       nestable_tasks_allowed_(true),
    142 #if defined(OS_WIN)
    143       os_modal_loop_(false),
    144 #endif  // OS_WIN
    145       message_histogram_(NULL),
    146       run_loop_(NULL) {
    147   Init();
    148 
    149   pump_.reset(CreateMessagePumpForType(type));
    150 }
    151 
    152 MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
    153     : pump_(pump.Pass()),
    154       type_(TYPE_CUSTOM),
    155       exception_restoration_(false),
    156       nestable_tasks_allowed_(true),
    157 #if defined(OS_WIN)
    158       os_modal_loop_(false),
    159 #endif  // OS_WIN
    160       message_histogram_(NULL),
    161       run_loop_(NULL) {
    162   DCHECK(pump_.get());
    163   Init();
    164 }
    165 
    166 MessageLoop::~MessageLoop() {
    167   DCHECK_EQ(this, current());
    168 
    169   DCHECK(!run_loop_);
    170 
    171   // Clean up any unprocessed tasks, but take care: deleting a task could
    172   // result in the addition of more tasks (e.g., via DeleteSoon).  We set a
    173   // limit on the number of times we will allow a deleted task to generate more
    174   // tasks.  Normally, we should only pass through this loop once or twice.  If
    175   // we end up hitting the loop limit, then it is probably due to one task that
    176   // is being stubborn.  Inspect the queues to see who is left.
    177   bool did_work;
    178   for (int i = 0; i < 100; ++i) {
    179     DeletePendingTasks();
    180     ReloadWorkQueue();
    181     // If we end up with empty queues, then break out of the loop.
    182     did_work = DeletePendingTasks();
    183     if (!did_work)
    184       break;
    185   }
    186   DCHECK(!did_work);
    187 
    188   // Let interested parties have one last shot at accessing this.
    189   FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
    190                     WillDestroyCurrentMessageLoop());
    191 
    192   thread_task_runner_handle_.reset();
    193 
    194   // Tell the incoming queue that we are dying.
    195   incoming_task_queue_->WillDestroyCurrentMessageLoop();
    196   incoming_task_queue_ = NULL;
    197   message_loop_proxy_ = NULL;
    198 
    199   // OK, now make it so that no one can find us.
    200   lazy_tls_ptr.Pointer()->Set(NULL);
    201 }
    202 
    203 // static
    204 MessageLoop* MessageLoop::current() {
    205   // TODO(darin): sadly, we cannot enable this yet since people call us even
    206   // when they have no intention of using us.
    207   // DCHECK(loop) << "Ouch, did you forget to initialize me?";
    208   return lazy_tls_ptr.Pointer()->Get();
    209 }
    210 
    211 // static
    212 void MessageLoop::EnableHistogrammer(bool enable) {
    213   enable_histogrammer_ = enable;
    214 }
    215 
    216 // static
    217 bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
    218   if (message_pump_for_ui_factory_)
    219     return false;
    220 
    221   message_pump_for_ui_factory_ = factory;
    222   return true;
    223 }
    224 
    225 // static
    226 MessagePump* MessageLoop::CreateMessagePumpForType(Type type) {
    227 // TODO(rvargas): Get rid of the OS guards.
    228 #if defined(OS_WIN)
    229 #define MESSAGE_PUMP_UI new MessagePumpForUI()
    230 #define MESSAGE_PUMP_IO new MessagePumpForIO()
    231 #elif defined(OS_IOS)
    232 #define MESSAGE_PUMP_UI MessagePumpMac::Create()
    233 #define MESSAGE_PUMP_IO new MessagePumpIOSForIO()
    234 #elif defined(OS_MACOSX)
    235 #define MESSAGE_PUMP_UI MessagePumpMac::Create()
    236 #define MESSAGE_PUMP_IO new MessagePumpLibevent()
    237 #elif defined(OS_NACL)
    238 // Currently NaCl doesn't have a UI MessageLoop.
    239 // TODO(abarth): Figure out if we need this.
    240 #define MESSAGE_PUMP_UI NULL
    241 // ipc_channel_nacl.cc uses a worker thread to do socket reads currently, and
    242 // doesn't require extra support for watching file descriptors.
    243 #define MESSAGE_PUMP_IO new MessagePumpDefault()
    244 #elif defined(OS_POSIX)  // POSIX but not MACOSX.
    245 #define MESSAGE_PUMP_UI new MessagePumpForUI()
    246 #define MESSAGE_PUMP_IO new MessagePumpLibevent()
    247 #else
    248 #error Not implemented
    249 #endif
    250 
    251   if (type == MessageLoop::TYPE_UI) {
    252     if (message_pump_for_ui_factory_)
    253       return message_pump_for_ui_factory_();
    254     return MESSAGE_PUMP_UI;
    255   }
    256   if (type == MessageLoop::TYPE_IO)
    257     return MESSAGE_PUMP_IO;
    258 #if defined(TOOLKIT_GTK)
    259   if (type == MessageLoop::TYPE_GPU)
    260     return new MessagePumpX11();
    261 #endif
    262 #if defined(OS_ANDROID)
    263   if (type == MessageLoop::TYPE_JAVA)
    264     return MESSAGE_PUMP_UI;
    265 #endif
    266   DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
    267   return new MessagePumpDefault();
    268 }
    269 
    270 void MessageLoop::AddDestructionObserver(
    271     DestructionObserver* destruction_observer) {
    272   DCHECK_EQ(this, current());
    273   destruction_observers_.AddObserver(destruction_observer);
    274 }
    275 
    276 void MessageLoop::RemoveDestructionObserver(
    277     DestructionObserver* destruction_observer) {
    278   DCHECK_EQ(this, current());
    279   destruction_observers_.RemoveObserver(destruction_observer);
    280 }
    281 
    282 void MessageLoop::PostTask(
    283     const tracked_objects::Location& from_here,
    284     const Closure& task) {
    285   DCHECK(!task.is_null()) << from_here.ToString();
    286   incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), true);
    287 }
    288 
    289 bool MessageLoop::TryPostTask(
    290     const tracked_objects::Location& from_here,
    291     const Closure& task) {
    292   DCHECK(!task.is_null()) << from_here.ToString();
    293   return incoming_task_queue_->TryAddToIncomingQueue(from_here, task);
    294 }
    295 
    296 void MessageLoop::PostDelayedTask(
    297     const tracked_objects::Location& from_here,
    298     const Closure& task,
    299     TimeDelta delay) {
    300   DCHECK(!task.is_null()) << from_here.ToString();
    301   incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, true);
    302 }
    303 
    304 void MessageLoop::PostNonNestableTask(
    305     const tracked_objects::Location& from_here,
    306     const Closure& task) {
    307   DCHECK(!task.is_null()) << from_here.ToString();
    308   incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), false);
    309 }
    310 
    311 void MessageLoop::PostNonNestableDelayedTask(
    312     const tracked_objects::Location& from_here,
    313     const Closure& task,
    314     TimeDelta delay) {
    315   DCHECK(!task.is_null()) << from_here.ToString();
    316   incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, false);
    317 }
    318 
    319 void MessageLoop::Run() {
    320   RunLoop run_loop;
    321   run_loop.Run();
    322 }
    323 
    324 void MessageLoop::RunUntilIdle() {
    325   RunLoop run_loop;
    326   run_loop.RunUntilIdle();
    327 }
    328 
    329 void MessageLoop::QuitWhenIdle() {
    330   DCHECK_EQ(this, current());
    331   if (run_loop_) {
    332     run_loop_->quit_when_idle_received_ = true;
    333   } else {
    334     NOTREACHED() << "Must be inside Run to call Quit";
    335   }
    336 }
    337 
    338 void MessageLoop::QuitNow() {
    339   DCHECK_EQ(this, current());
    340   if (run_loop_) {
    341     pump_->Quit();
    342   } else {
    343     NOTREACHED() << "Must be inside Run to call Quit";
    344   }
    345 }
    346 
    347 bool MessageLoop::IsType(Type type) const {
    348   return type_ == type;
    349 }
    350 
    351 static void QuitCurrentWhenIdle() {
    352   MessageLoop::current()->QuitWhenIdle();
    353 }
    354 
    355 // static
    356 Closure MessageLoop::QuitWhenIdleClosure() {
    357   return Bind(&QuitCurrentWhenIdle);
    358 }
    359 
    360 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
    361   if (allowed) {
    362     // Kick the native pump just in case we enter a OS-driven nested message
    363     // loop.
    364     pump_->ScheduleWork();
    365   }
    366   nestable_tasks_allowed_ = allowed;
    367 }
    368 
    369 bool MessageLoop::NestableTasksAllowed() const {
    370   return nestable_tasks_allowed_;
    371 }
    372 
    373 bool MessageLoop::IsNested() {
    374   return run_loop_->run_depth_ > 1;
    375 }
    376 
    377 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
    378   DCHECK_EQ(this, current());
    379   task_observers_.AddObserver(task_observer);
    380 }
    381 
    382 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
    383   DCHECK_EQ(this, current());
    384   task_observers_.RemoveObserver(task_observer);
    385 }
    386 
    387 bool MessageLoop::is_running() const {
    388   DCHECK_EQ(this, current());
    389   return run_loop_ != NULL;
    390 }
    391 
    392 bool MessageLoop::IsHighResolutionTimerEnabledForTesting() {
    393   return incoming_task_queue_->IsHighResolutionTimerEnabledForTesting();
    394 }
    395 
    396 bool MessageLoop::IsIdleForTesting() {
    397   // We only check the imcoming queue|, since we don't want to lock the work
    398   // queue.
    399   return incoming_task_queue_->IsIdleForTesting();
    400 }
    401 
    402 void MessageLoop::LockWaitUnLockForTesting(WaitableEvent* caller_wait,
    403                                            WaitableEvent* caller_signal) {
    404   incoming_task_queue_->LockWaitUnLockForTesting(caller_wait, caller_signal);
    405 }
    406 
    407 //------------------------------------------------------------------------------
    408 
    409 void MessageLoop::Init() {
    410   DCHECK(!current()) << "should only have one message loop per thread";
    411   lazy_tls_ptr.Pointer()->Set(this);
    412 
    413   incoming_task_queue_ = new internal::IncomingTaskQueue(this);
    414   message_loop_proxy_ =
    415       new internal::MessageLoopProxyImpl(incoming_task_queue_);
    416   thread_task_runner_handle_.reset(
    417       new ThreadTaskRunnerHandle(message_loop_proxy_));
    418 }
    419 
    420 // Runs the loop in two different SEH modes:
    421 // enable_SEH_restoration_ = false : any unhandled exception goes to the last
    422 // one that calls SetUnhandledExceptionFilter().
    423 // enable_SEH_restoration_ = true : any unhandled exception goes to the filter
    424 // that was existed before the loop was run.
    425 void MessageLoop::RunHandler() {
    426 #if defined(OS_WIN)
    427   if (exception_restoration_) {
    428     RunInternalInSEHFrame();
    429     return;
    430   }
    431 #endif
    432 
    433   RunInternal();
    434 }
    435 
    436 #if defined(OS_WIN)
    437 __declspec(noinline) void MessageLoop::RunInternalInSEHFrame() {
    438   LPTOP_LEVEL_EXCEPTION_FILTER current_filter = GetTopSEHFilter();
    439   __try {
    440     RunInternal();
    441   } __except(SEHFilter(current_filter)) {
    442   }
    443   return;
    444 }
    445 #endif
    446 
    447 void MessageLoop::RunInternal() {
    448   DCHECK_EQ(this, current());
    449 
    450   StartHistogrammer();
    451 
    452 #if !defined(OS_MACOSX) && !defined(OS_ANDROID) && \
    453     !defined(USE_GTK_MESSAGE_PUMP)
    454   if (run_loop_->dispatcher_ && type() == TYPE_UI) {
    455     static_cast<MessagePumpForUI*>(pump_.get())->
    456         RunWithDispatcher(this, run_loop_->dispatcher_);
    457     return;
    458   }
    459 #endif
    460 
    461   pump_->Run(this);
    462 }
    463 
    464 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
    465   if (run_loop_->run_depth_ != 1)
    466     return false;
    467 
    468   if (deferred_non_nestable_work_queue_.empty())
    469     return false;
    470 
    471   PendingTask pending_task = deferred_non_nestable_work_queue_.front();
    472   deferred_non_nestable_work_queue_.pop();
    473 
    474   RunTask(pending_task);
    475   return true;
    476 }
    477 
    478 void MessageLoop::RunTask(const PendingTask& pending_task) {
    479   tracked_objects::TrackedTime start_time =
    480       tracked_objects::ThreadData::NowForStartOfRun(pending_task.birth_tally);
    481 
    482   TRACE_EVENT_FLOW_END1("task", "MessageLoop::PostTask",
    483       TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
    484       "queue_duration",
    485       (start_time - pending_task.EffectiveTimePosted()).InMilliseconds());
    486   // When tracing memory for posted tasks it's more valuable to attribute the
    487   // memory allocations to the source function than generically to "RunTask".
    488   TRACE_EVENT_WITH_MEMORY_TAG2(
    489       "task", "MessageLoop::RunTask",
    490       pending_task.posted_from.function_name(),  // Name for memory tracking.
    491       "src_file", pending_task.posted_from.file_name(),
    492       "src_func", pending_task.posted_from.function_name());
    493 
    494   DCHECK(nestable_tasks_allowed_);
    495   // Execute the task and assume the worst: It is probably not reentrant.
    496   nestable_tasks_allowed_ = false;
    497 
    498   // Before running the task, store the program counter where it was posted
    499   // and deliberately alias it to ensure it is on the stack if the task
    500   // crashes. Be careful not to assume that the variable itself will have the
    501   // expected value when displayed by the optimizer in an optimized build.
    502   // Look at a memory dump of the stack.
    503   const void* program_counter =
    504       pending_task.posted_from.program_counter();
    505   debug::Alias(&program_counter);
    506 
    507   HistogramEvent(kTaskRunEvent);
    508 
    509   FOR_EACH_OBSERVER(TaskObserver, task_observers_,
    510                     WillProcessTask(pending_task));
    511   pending_task.task.Run();
    512   FOR_EACH_OBSERVER(TaskObserver, task_observers_,
    513                     DidProcessTask(pending_task));
    514 
    515   tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
    516       start_time, tracked_objects::ThreadData::NowForEndOfRun());
    517 
    518   nestable_tasks_allowed_ = true;
    519 }
    520 
    521 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
    522   if (pending_task.nestable || run_loop_->run_depth_ == 1) {
    523     RunTask(pending_task);
    524     // Show that we ran a task (Note: a new one might arrive as a
    525     // consequence!).
    526     return true;
    527   }
    528 
    529   // We couldn't run the task now because we're in a nested message loop
    530   // and the task isn't nestable.
    531   deferred_non_nestable_work_queue_.push(pending_task);
    532   return false;
    533 }
    534 
    535 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
    536   // Move to the delayed work queue.
    537   delayed_work_queue_.push(pending_task);
    538 }
    539 
    540 bool MessageLoop::DeletePendingTasks() {
    541   bool did_work = !work_queue_.empty();
    542   while (!work_queue_.empty()) {
    543     PendingTask pending_task = work_queue_.front();
    544     work_queue_.pop();
    545     if (!pending_task.delayed_run_time.is_null()) {
    546       // We want to delete delayed tasks in the same order in which they would
    547       // normally be deleted in case of any funny dependencies between delayed
    548       // tasks.
    549       AddToDelayedWorkQueue(pending_task);
    550     }
    551   }
    552   did_work |= !deferred_non_nestable_work_queue_.empty();
    553   while (!deferred_non_nestable_work_queue_.empty()) {
    554     deferred_non_nestable_work_queue_.pop();
    555   }
    556   did_work |= !delayed_work_queue_.empty();
    557 
    558   // Historically, we always delete the task regardless of valgrind status. It's
    559   // not completely clear why we want to leak them in the loops above.  This
    560   // code is replicating legacy behavior, and should not be considered
    561   // absolutely "correct" behavior.  See TODO above about deleting all tasks
    562   // when it's safe.
    563   while (!delayed_work_queue_.empty()) {
    564     delayed_work_queue_.pop();
    565   }
    566   return did_work;
    567 }
    568 
    569 uint64 MessageLoop::GetTaskTraceID(const PendingTask& task) {
    570   return (static_cast<uint64>(task.sequence_num) << 32) |
    571          ((static_cast<uint64>(reinterpret_cast<intptr_t>(this)) << 32) >> 32);
    572 }
    573 
    574 void MessageLoop::ReloadWorkQueue() {
    575   // We can improve performance of our loading tasks from the incoming queue to
    576   // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
    577   // load. That reduces the number of locks-per-task significantly when our
    578   // queues get large.
    579   if (work_queue_.empty())
    580     incoming_task_queue_->ReloadWorkQueue(&work_queue_);
    581 }
    582 
    583 void MessageLoop::ScheduleWork(bool was_empty) {
    584   // The Android UI message loop needs to get notified each time
    585   // a task is added to the incoming queue.
    586   if (was_empty || AlwaysNotifyPump(type_))
    587     pump_->ScheduleWork();
    588 }
    589 
    590 //------------------------------------------------------------------------------
    591 // Method and data for histogramming events and actions taken by each instance
    592 // on each thread.
    593 
    594 void MessageLoop::StartHistogrammer() {
    595 #if !defined(OS_NACL)  // NaCl build has no metrics code.
    596   if (enable_histogrammer_ && !message_histogram_
    597       && StatisticsRecorder::IsActive()) {
    598     DCHECK(!thread_name_.empty());
    599     message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
    600         "MsgLoop:" + thread_name_,
    601         kLeastNonZeroMessageId, kMaxMessageId,
    602         kNumberOfDistinctMessagesDisplayed,
    603         message_histogram_->kHexRangePrintingFlag,
    604         event_descriptions_);
    605   }
    606 #endif
    607 }
    608 
    609 void MessageLoop::HistogramEvent(int event) {
    610 #if !defined(OS_NACL)
    611   if (message_histogram_)
    612     message_histogram_->Add(event);
    613 #endif
    614 }
    615 
    616 bool MessageLoop::DoWork() {
    617   if (!nestable_tasks_allowed_) {
    618     // Task can't be executed right now.
    619     return false;
    620   }
    621 
    622   for (;;) {
    623     ReloadWorkQueue();
    624     if (work_queue_.empty())
    625       break;
    626 
    627     // Execute oldest task.
    628     do {
    629       PendingTask pending_task = work_queue_.front();
    630       work_queue_.pop();
    631       if (!pending_task.delayed_run_time.is_null()) {
    632         AddToDelayedWorkQueue(pending_task);
    633         // If we changed the topmost task, then it is time to reschedule.
    634         if (delayed_work_queue_.top().task.Equals(pending_task.task))
    635           pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
    636       } else {
    637         if (DeferOrRunPendingTask(pending_task))
    638           return true;
    639       }
    640     } while (!work_queue_.empty());
    641   }
    642 
    643   // Nothing happened.
    644   return false;
    645 }
    646 
    647 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
    648   if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
    649     recent_time_ = *next_delayed_work_time = TimeTicks();
    650     return false;
    651   }
    652 
    653   // When we "fall behind," there will be a lot of tasks in the delayed work
    654   // queue that are ready to run.  To increase efficiency when we fall behind,
    655   // we will only call Time::Now() intermittently, and then process all tasks
    656   // that are ready to run before calling it again.  As a result, the more we
    657   // fall behind (and have a lot of ready-to-run delayed tasks), the more
    658   // efficient we'll be at handling the tasks.
    659 
    660   TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
    661   if (next_run_time > recent_time_) {
    662     recent_time_ = TimeTicks::Now();  // Get a better view of Now();
    663     if (next_run_time > recent_time_) {
    664       *next_delayed_work_time = next_run_time;
    665       return false;
    666     }
    667   }
    668 
    669   PendingTask pending_task = delayed_work_queue_.top();
    670   delayed_work_queue_.pop();
    671 
    672   if (!delayed_work_queue_.empty())
    673     *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
    674 
    675   return DeferOrRunPendingTask(pending_task);
    676 }
    677 
    678 bool MessageLoop::DoIdleWork() {
    679   if (ProcessNextDelayedNonNestableTask())
    680     return true;
    681 
    682   if (run_loop_->quit_when_idle_received_)
    683     pump_->Quit();
    684 
    685   return false;
    686 }
    687 
    688 void MessageLoop::GetQueueingInformation(size_t* queue_size,
    689                                          TimeDelta* queueing_delay) {
    690   *queue_size = work_queue_.size();
    691   if (*queue_size == 0) {
    692     *queueing_delay = TimeDelta();
    693     return;
    694   }
    695 
    696   const PendingTask& next_to_run = work_queue_.front();
    697   tracked_objects::Duration duration =
    698       tracked_objects::TrackedTime::Now() - next_to_run.EffectiveTimePosted();
    699   *queueing_delay = TimeDelta::FromMilliseconds(duration.InMilliseconds());
    700 }
    701 
    702 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
    703                                      void(*deleter)(const void*),
    704                                      const void* object) {
    705   PostNonNestableTask(from_here, Bind(deleter, object));
    706 }
    707 
    708 void MessageLoop::ReleaseSoonInternal(
    709     const tracked_objects::Location& from_here,
    710     void(*releaser)(const void*),
    711     const void* object) {
    712   PostNonNestableTask(from_here, Bind(releaser, object));
    713 }
    714 
    715 //------------------------------------------------------------------------------
    716 // MessageLoopForUI
    717 
    718 #if defined(OS_ANDROID)
    719 void MessageLoopForUI::Start() {
    720   // No Histogram support for UI message loop as it is managed by Java side
    721   static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
    722 }
    723 #endif
    724 
    725 #if defined(OS_IOS)
    726 void MessageLoopForUI::Attach() {
    727   static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
    728 }
    729 #endif
    730 
    731 #if !defined(OS_MACOSX) && !defined(OS_NACL) && !defined(OS_ANDROID)
    732 void MessageLoopForUI::AddObserver(Observer* observer) {
    733   pump_ui()->AddObserver(observer);
    734 }
    735 
    736 void MessageLoopForUI::RemoveObserver(Observer* observer) {
    737   pump_ui()->RemoveObserver(observer);
    738 }
    739 
    740 #endif  //  !defined(OS_MACOSX) && !defined(OS_NACL) && !defined(OS_ANDROID)
    741 
    742 //------------------------------------------------------------------------------
    743 // MessageLoopForIO
    744 
    745 #if defined(OS_WIN)
    746 
    747 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
    748   pump_io()->RegisterIOHandler(file, handler);
    749 }
    750 
    751 bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
    752   return pump_io()->RegisterJobObject(job, handler);
    753 }
    754 
    755 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
    756   return pump_io()->WaitForIOCompletion(timeout, filter);
    757 }
    758 
    759 #elif defined(OS_IOS)
    760 
    761 bool MessageLoopForIO::WatchFileDescriptor(int fd,
    762                                            bool persistent,
    763                                            Mode mode,
    764                                            FileDescriptorWatcher *controller,
    765                                            Watcher *delegate) {
    766   return pump_io()->WatchFileDescriptor(
    767       fd,
    768       persistent,
    769       mode,
    770       controller,
    771       delegate);
    772 }
    773 
    774 #elif defined(OS_POSIX) && !defined(OS_NACL)
    775 
    776 bool MessageLoopForIO::WatchFileDescriptor(int fd,
    777                                            bool persistent,
    778                                            Mode mode,
    779                                            FileDescriptorWatcher *controller,
    780                                            Watcher *delegate) {
    781   return pump_libevent()->WatchFileDescriptor(
    782       fd,
    783       persistent,
    784       mode,
    785       controller,
    786       delegate);
    787 }
    788 
    789 #endif
    790 
    791 }  // namespace base
    792