Home | History | Annotate | Download | only in service
      1 // Copyright 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "gpu/command_buffer/service/async_pixel_transfer_manager_egl.h"
      6 
      7 #include <list>
      8 #include <string>
      9 
     10 #include "base/bind.h"
     11 #include "base/debug/trace_event.h"
     12 #include "base/lazy_instance.h"
     13 #include "base/logging.h"
     14 #include "base/memory/ref_counted.h"
     15 #include "base/synchronization/waitable_event.h"
     16 #include "base/threading/thread.h"
     17 #include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
     18 #include "gpu/command_buffer/service/safe_shared_memory_pool.h"
     19 #include "ui/gl/gl_context.h"
     20 #include "ui/gl/gl_surface_egl.h"
     21 #include "ui/gl/scoped_binders.h"
     22 
     23 namespace gpu {
     24 
     25 namespace {
     26 
     27 bool CheckErrors(const char* file, int line) {
     28   EGLint eglerror;
     29   GLenum glerror;
     30   bool success = true;
     31   while ((eglerror = eglGetError()) != EGL_SUCCESS) {
     32      LOG(ERROR) << "Async transfer EGL error at "
     33                 << file << ":" << line << " " << eglerror;
     34      success = false;
     35   }
     36   while ((glerror = glGetError()) != GL_NO_ERROR) {
     37      LOG(ERROR) << "Async transfer OpenGL error at "
     38                 << file << ":" << line << " " << glerror;
     39      success = false;
     40   }
     41   return success;
     42 }
     43 #define CHECK_GL() CheckErrors(__FILE__, __LINE__)
     44 
     45 const char kAsyncTransferThreadName[] = "AsyncTransferThread";
     46 
     47 // Regular glTexImage2D call.
     48 void DoTexImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
     49   glTexImage2D(
     50       GL_TEXTURE_2D, tex_params.level, tex_params.internal_format,
     51       tex_params.width, tex_params.height,
     52       tex_params.border, tex_params.format, tex_params.type, data);
     53 }
     54 
     55 // Regular glTexSubImage2D call.
     56 void DoTexSubImage2D(const AsyncTexSubImage2DParams& tex_params, void* data) {
     57   glTexSubImage2D(
     58       GL_TEXTURE_2D, tex_params.level,
     59       tex_params.xoffset, tex_params.yoffset,
     60       tex_params.width, tex_params.height,
     61       tex_params.format, tex_params.type, data);
     62 }
     63 
     64 // Full glTexSubImage2D call, from glTexImage2D params.
     65 void DoFullTexSubImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
     66   glTexSubImage2D(
     67       GL_TEXTURE_2D, tex_params.level,
     68       0, 0, tex_params.width, tex_params.height,
     69       tex_params.format, tex_params.type, data);
     70 }
     71 
     72 void SetGlParametersForEglImageTexture() {
     73   // These params are needed for EGLImage creation to succeed on several
     74   // Android devices. I couldn't find this requirement in the EGLImage
     75   // extension spec, but several devices fail without it.
     76   glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
     77   glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
     78   glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
     79 }
     80 
     81 void PerformNotifyCompletion(
     82     AsyncMemoryParams mem_params,
     83     ScopedSafeSharedMemory* safe_shared_memory,
     84     scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
     85   TRACE_EVENT0("gpu", "PerformNotifyCompletion");
     86   AsyncMemoryParams safe_mem_params = mem_params;
     87   safe_mem_params.shared_memory = safe_shared_memory->shared_memory();
     88   observer->DidComplete(safe_mem_params);
     89 }
     90 
     91 class TransferThread : public base::Thread {
     92  public:
     93   TransferThread() : base::Thread(kAsyncTransferThreadName) {
     94     Start();
     95 #if defined(OS_ANDROID) || defined(OS_LINUX)
     96     SetPriority(base::kThreadPriority_Background);
     97 #endif
     98   }
     99   virtual ~TransferThread() {
    100     Stop();
    101   }
    102 
    103   virtual void Init() OVERRIDE {
    104     gfx::GLShareGroup* share_group = NULL;
    105     surface_ = new gfx::PbufferGLSurfaceEGL(gfx::Size(1, 1));
    106     surface_->Initialize();
    107     context_ = gfx::GLContext::CreateGLContext(
    108         share_group, surface_.get(), gfx::PreferDiscreteGpu);
    109     bool is_current = context_->MakeCurrent(surface_.get());
    110     DCHECK(is_current);
    111   }
    112 
    113   virtual void CleanUp() OVERRIDE {
    114     surface_ = NULL;
    115     context_->ReleaseCurrent(surface_.get());
    116     context_ = NULL;
    117   }
    118 
    119   SafeSharedMemoryPool* safe_shared_memory_pool() {
    120       return &safe_shared_memory_pool_;
    121   }
    122 
    123  private:
    124   scoped_refptr<gfx::GLContext> context_;
    125   scoped_refptr<gfx::GLSurface> surface_;
    126 
    127   SafeSharedMemoryPool safe_shared_memory_pool_;
    128 
    129   DISALLOW_COPY_AND_ASSIGN(TransferThread);
    130 };
    131 
    132 base::LazyInstance<TransferThread>
    133     g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
    134 
    135 base::MessageLoopProxy* transfer_message_loop_proxy() {
    136   return g_transfer_thread.Pointer()->message_loop_proxy().get();
    137 }
    138 
    139 SafeSharedMemoryPool* safe_shared_memory_pool() {
    140   return g_transfer_thread.Pointer()->safe_shared_memory_pool();
    141 }
    142 
    143 // Class which holds async pixel transfers state (EGLImage).
    144 // The EGLImage is accessed by either thread, but everything
    145 // else accessed only on the main thread.
    146 class TransferStateInternal
    147     : public base::RefCountedThreadSafe<TransferStateInternal> {
    148  public:
    149   TransferStateInternal(GLuint texture_id,
    150                         const AsyncTexImage2DParams& define_params,
    151                         bool wait_for_uploads,
    152                         bool wait_for_creation,
    153                         bool use_image_preserved)
    154       : texture_id_(texture_id),
    155         thread_texture_id_(0),
    156         transfer_completion_(true, true),
    157         egl_image_(EGL_NO_IMAGE_KHR),
    158         wait_for_uploads_(wait_for_uploads),
    159         wait_for_creation_(wait_for_creation),
    160         use_image_preserved_(use_image_preserved) {
    161     define_params_ = define_params;
    162   }
    163 
    164   bool TransferIsInProgress() {
    165     return !transfer_completion_.IsSignaled();
    166   }
    167 
    168   void BindTransfer() {
    169     TRACE_EVENT2("gpu", "BindAsyncTransfer glEGLImageTargetTexture2DOES",
    170                  "width", define_params_.width,
    171                  "height", define_params_.height);
    172     DCHECK(texture_id_);
    173     DCHECK_NE(EGL_NO_IMAGE_KHR, egl_image_);
    174 
    175     glBindTexture(GL_TEXTURE_2D, texture_id_);
    176     glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
    177     bind_callback_.Run();
    178 
    179     DCHECK(CHECK_GL());
    180   }
    181 
    182   void CreateEglImage(GLuint texture_id) {
    183     TRACE_EVENT0("gpu", "eglCreateImageKHR");
    184     DCHECK(texture_id);
    185     DCHECK_EQ(egl_image_, EGL_NO_IMAGE_KHR);
    186 
    187     EGLDisplay egl_display = eglGetCurrentDisplay();
    188     EGLContext egl_context = eglGetCurrentContext();
    189     EGLenum egl_target = EGL_GL_TEXTURE_2D_KHR;
    190     EGLClientBuffer egl_buffer =
    191         reinterpret_cast<EGLClientBuffer>(texture_id);
    192 
    193     EGLint image_preserved = use_image_preserved_ ? EGL_TRUE : EGL_FALSE;
    194     EGLint egl_attrib_list[] = {
    195         EGL_GL_TEXTURE_LEVEL_KHR, 0, // mip-level.
    196         EGL_IMAGE_PRESERVED_KHR, image_preserved,
    197         EGL_NONE
    198     };
    199     egl_image_ = eglCreateImageKHR(
    200         egl_display,
    201         egl_context,
    202         egl_target,
    203         egl_buffer,
    204         egl_attrib_list);
    205 
    206     DCHECK_NE(EGL_NO_IMAGE_KHR, egl_image_);
    207   }
    208 
    209   void CreateEglImageOnUploadThread() {
    210     CreateEglImage(thread_texture_id_);
    211   }
    212 
    213   void CreateEglImageOnMainThreadIfNeeded() {
    214     if (egl_image_ == EGL_NO_IMAGE_KHR) {
    215       CreateEglImage(texture_id_);
    216       if (wait_for_creation_) {
    217         TRACE_EVENT0("gpu", "glFinish creation");
    218         glFinish();
    219       }
    220     }
    221   }
    222 
    223   void WaitForLastUpload() {
    224     // This glFinish is just a safe-guard for if uploads have some
    225     // GPU action that needs to occur. We could use fences and try
    226     // to do this less often. However, on older drivers fences are
    227     // not always reliable (eg. Mali-400 just blocks forever).
    228     if (wait_for_uploads_) {
    229       TRACE_EVENT0("gpu", "glFinish");
    230       glFinish();
    231     }
    232   }
    233 
    234   void MarkAsTransferIsInProgress() {
    235     transfer_completion_.Reset();
    236   }
    237 
    238   void MarkAsCompleted() {
    239     transfer_completion_.Signal();
    240   }
    241 
    242   void WaitForTransferCompletion() {
    243     TRACE_EVENT0("gpu", "WaitForTransferCompletion");
    244     // TODO(backer): Deschedule the channel rather than blocking the main GPU
    245     // thread (crbug.com/240265).
    246     transfer_completion_.Wait();
    247   }
    248 
    249   void PerformAsyncTexImage2D(
    250       AsyncTexImage2DParams tex_params,
    251       AsyncMemoryParams mem_params,
    252       ScopedSafeSharedMemory* safe_shared_memory,
    253       scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
    254     TRACE_EVENT2("gpu",
    255                  "PerformAsyncTexImage",
    256                  "width",
    257                  tex_params.width,
    258                  "height",
    259                  tex_params.height);
    260     DCHECK(!thread_texture_id_);
    261     DCHECK_EQ(0, tex_params.level);
    262     DCHECK_EQ(EGL_NO_IMAGE_KHR, egl_image_);
    263 
    264     void* data =
    265         AsyncPixelTransferDelegate::GetAddress(safe_shared_memory, mem_params);
    266 
    267     base::TimeTicks begin_time;
    268     if (texture_upload_stats.get())
    269       begin_time = base::TimeTicks::HighResNow();
    270 
    271     {
    272       TRACE_EVENT0("gpu", "glTexImage2D no data");
    273       glGenTextures(1, &thread_texture_id_);
    274       glActiveTexture(GL_TEXTURE0);
    275       glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
    276 
    277       SetGlParametersForEglImageTexture();
    278 
    279       // If we need to use image_preserved, we pass the data with
    280       // the allocation. Otherwise we use a NULL allocation to
    281       // try to avoid any costs associated with creating the EGLImage.
    282       if (use_image_preserved_)
    283         DoTexImage2D(tex_params, data);
    284       else
    285         DoTexImage2D(tex_params, NULL);
    286     }
    287 
    288     CreateEglImageOnUploadThread();
    289 
    290     {
    291       TRACE_EVENT0("gpu", "glTexSubImage2D with data");
    292 
    293       // If we didn't use image_preserved, we haven't uploaded
    294       // the data yet, so we do this with a full texSubImage.
    295       if (!use_image_preserved_)
    296         DoFullTexSubImage2D(tex_params, data);
    297     }
    298 
    299     WaitForLastUpload();
    300     MarkAsCompleted();
    301 
    302     DCHECK(CHECK_GL());
    303     if (texture_upload_stats.get()) {
    304       texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
    305                                       begin_time);
    306     }
    307   }
    308 
    309   void PerformAsyncTexSubImage2D(
    310       AsyncTexSubImage2DParams tex_params,
    311       AsyncMemoryParams mem_params,
    312       ScopedSafeSharedMemory* safe_shared_memory,
    313       scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
    314     TRACE_EVENT2("gpu",
    315                  "PerformAsyncTexSubImage2D",
    316                  "width",
    317                  tex_params.width,
    318                  "height",
    319                  tex_params.height);
    320 
    321     DCHECK_NE(EGL_NO_IMAGE_KHR, egl_image_);
    322     DCHECK_EQ(0, tex_params.level);
    323 
    324     void* data =
    325         AsyncPixelTransferDelegate::GetAddress(safe_shared_memory, mem_params);
    326 
    327     base::TimeTicks begin_time;
    328     if (texture_upload_stats.get())
    329       begin_time = base::TimeTicks::HighResNow();
    330 
    331     if (!thread_texture_id_) {
    332       TRACE_EVENT0("gpu", "glEGLImageTargetTexture2DOES");
    333       glGenTextures(1, &thread_texture_id_);
    334       glActiveTexture(GL_TEXTURE0);
    335       glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
    336       glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
    337     } else {
    338       glActiveTexture(GL_TEXTURE0);
    339       glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
    340     }
    341     {
    342       TRACE_EVENT0("gpu", "glTexSubImage2D");
    343       DoTexSubImage2D(tex_params, data);
    344     }
    345     WaitForLastUpload();
    346     MarkAsCompleted();
    347 
    348     DCHECK(CHECK_GL());
    349     if (texture_upload_stats.get()) {
    350       texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
    351                                       begin_time);
    352     }
    353   }
    354 
    355  protected:
    356   friend class base::RefCountedThreadSafe<TransferStateInternal>;
    357   friend class gpu::AsyncPixelTransferDelegateEGL;
    358 
    359   static void DeleteTexture(GLuint id) {
    360     glDeleteTextures(1, &id);
    361   }
    362 
    363   virtual ~TransferStateInternal() {
    364     if (egl_image_ != EGL_NO_IMAGE_KHR) {
    365       EGLDisplay display = eglGetCurrentDisplay();
    366       eglDestroyImageKHR(display, egl_image_);
    367     }
    368     if (thread_texture_id_) {
    369       transfer_message_loop_proxy()->PostTask(FROM_HERE,
    370           base::Bind(&DeleteTexture, thread_texture_id_));
    371     }
    372   }
    373 
    374   // The 'real' texture.
    375   GLuint texture_id_;
    376 
    377   // The EGLImage sibling on the upload thread.
    378   GLuint thread_texture_id_;
    379 
    380   // Definition params for texture that needs binding.
    381   AsyncTexImage2DParams define_params_;
    382 
    383   // Indicates that an async transfer is in progress.
    384   base::WaitableEvent transfer_completion_;
    385 
    386   // It would be nice if we could just create a new EGLImage for
    387   // every upload, but I found that didn't work, so this stores
    388   // one for the lifetime of the texture.
    389   EGLImageKHR egl_image_;
    390 
    391   // Callback to invoke when AsyncTexImage2D is complete
    392   // and the client can safely use the texture. This occurs
    393   // during BindCompletedAsyncTransfers().
    394   base::Closure bind_callback_;
    395 
    396   // Customize when we block on fences (these are work-arounds).
    397   bool wait_for_uploads_;
    398   bool wait_for_creation_;
    399   bool use_image_preserved_;
    400 };
    401 
    402 }  // namespace
    403 
    404 // Class which handles async pixel transfers using EGLImageKHR and another
    405 // upload thread
    406 class AsyncPixelTransferDelegateEGL
    407     : public AsyncPixelTransferDelegate,
    408       public base::SupportsWeakPtr<AsyncPixelTransferDelegateEGL> {
    409  public:
    410   AsyncPixelTransferDelegateEGL(
    411       AsyncPixelTransferManagerEGL::SharedState* shared_state,
    412       GLuint texture_id,
    413       const AsyncTexImage2DParams& define_params);
    414   virtual ~AsyncPixelTransferDelegateEGL();
    415 
    416   void BindTransfer() { state_->BindTransfer(); }
    417 
    418   // Implement AsyncPixelTransferDelegate:
    419   virtual void AsyncTexImage2D(
    420       const AsyncTexImage2DParams& tex_params,
    421       const AsyncMemoryParams& mem_params,
    422       const base::Closure& bind_callback) OVERRIDE;
    423   virtual void AsyncTexSubImage2D(
    424       const AsyncTexSubImage2DParams& tex_params,
    425       const AsyncMemoryParams& mem_params) OVERRIDE;
    426   virtual bool TransferIsInProgress() OVERRIDE;
    427   virtual void WaitForTransferCompletion() OVERRIDE;
    428 
    429  private:
    430   // Returns true if a work-around was used.
    431   bool WorkAroundAsyncTexImage2D(
    432       const AsyncTexImage2DParams& tex_params,
    433       const AsyncMemoryParams& mem_params,
    434       const base::Closure& bind_callback);
    435   bool WorkAroundAsyncTexSubImage2D(
    436       const AsyncTexSubImage2DParams& tex_params,
    437       const AsyncMemoryParams& mem_params);
    438 
    439   // A raw pointer is safe because the SharedState is owned by the Manager,
    440   // which owns this Delegate.
    441   AsyncPixelTransferManagerEGL::SharedState* shared_state_;
    442   scoped_refptr<TransferStateInternal> state_;
    443 
    444   DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateEGL);
    445 };
    446 
    447 AsyncPixelTransferDelegateEGL::AsyncPixelTransferDelegateEGL(
    448     AsyncPixelTransferManagerEGL::SharedState* shared_state,
    449     GLuint texture_id,
    450     const AsyncTexImage2DParams& define_params)
    451     : shared_state_(shared_state) {
    452   // We can't wait on uploads on imagination (it can take 200ms+).
    453   // In practice, they are complete when the CPU glTexSubImage2D completes.
    454   bool wait_for_uploads = !shared_state_->is_imagination;
    455 
    456   // Qualcomm runs into texture corruption problems if the same texture is
    457   // uploaded to with both async and normal uploads. Synchronize after EGLImage
    458   // creation on the main thread as a work-around.
    459   bool wait_for_creation = shared_state_->is_qualcomm;
    460 
    461   // Qualcomm has a race when using image_preserved=FALSE,
    462   // which can result in black textures even after the first upload.
    463   // Since using FALSE is mainly for performance (to avoid layout changes),
    464   // but Qualcomm itself doesn't seem to get any performance benefit,
    465   // we just using image_preservedd=TRUE on Qualcomm as a work-around.
    466   bool use_image_preserved =
    467       shared_state_->is_qualcomm || shared_state_->is_imagination;
    468 
    469   state_ = new TransferStateInternal(texture_id,
    470                                    define_params,
    471                                    wait_for_uploads,
    472                                    wait_for_creation,
    473                                    use_image_preserved);
    474 }
    475 
    476 AsyncPixelTransferDelegateEGL::~AsyncPixelTransferDelegateEGL() {}
    477 
    478 bool AsyncPixelTransferDelegateEGL::TransferIsInProgress() {
    479   return state_->TransferIsInProgress();
    480 }
    481 
    482 void AsyncPixelTransferDelegateEGL::WaitForTransferCompletion() {
    483   if (state_->TransferIsInProgress()) {
    484 #if defined(OS_ANDROID) || defined(OS_LINUX)
    485     g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Display);
    486 #endif
    487 
    488     state_->WaitForTransferCompletion();
    489     DCHECK(!state_->TransferIsInProgress());
    490 
    491 #if defined(OS_ANDROID) || defined(OS_LINUX)
    492     g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Background);
    493 #endif
    494   }
    495 }
    496 
    497 void AsyncPixelTransferDelegateEGL::AsyncTexImage2D(
    498     const AsyncTexImage2DParams& tex_params,
    499     const AsyncMemoryParams& mem_params,
    500     const base::Closure& bind_callback) {
    501   if (WorkAroundAsyncTexImage2D(tex_params, mem_params, bind_callback))
    502     return;
    503 
    504   DCHECK(mem_params.shared_memory);
    505   DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
    506             mem_params.shm_size);
    507   DCHECK(!state_->TransferIsInProgress());
    508   DCHECK_EQ(state_->egl_image_, EGL_NO_IMAGE_KHR);
    509   DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
    510   DCHECK_EQ(tex_params.level, 0);
    511 
    512   // Mark the transfer in progress and save the late bind
    513   // callback, so we can notify the client when it is bound.
    514   shared_state_->pending_allocations.push_back(AsWeakPtr());
    515   state_->bind_callback_ = bind_callback;
    516 
    517   // Mark the transfer in progress.
    518   state_->MarkAsTransferIsInProgress();
    519 
    520   // Duplicate the shared memory so there is no way we can get
    521   // a use-after-free of the raw pixels.
    522   transfer_message_loop_proxy()->PostTask(FROM_HERE,
    523       base::Bind(
    524           &TransferStateInternal::PerformAsyncTexImage2D,
    525           state_,
    526           tex_params,
    527           mem_params,
    528           base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(),
    529                                                  mem_params.shared_memory,
    530                                                  mem_params.shm_size)),
    531           shared_state_->texture_upload_stats));
    532 
    533   DCHECK(CHECK_GL());
    534 }
    535 
    536 void AsyncPixelTransferDelegateEGL::AsyncTexSubImage2D(
    537     const AsyncTexSubImage2DParams& tex_params,
    538     const AsyncMemoryParams& mem_params) {
    539   TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
    540                "width", tex_params.width,
    541                "height", tex_params.height);
    542   if (WorkAroundAsyncTexSubImage2D(tex_params, mem_params))
    543     return;
    544   DCHECK(!state_->TransferIsInProgress());
    545   DCHECK(mem_params.shared_memory);
    546   DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
    547             mem_params.shm_size);
    548   DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
    549   DCHECK_EQ(tex_params.level, 0);
    550 
    551   // Mark the transfer in progress.
    552   state_->MarkAsTransferIsInProgress();
    553 
    554   // If this wasn't async allocated, we don't have an EGLImage yet.
    555   // Create the EGLImage if it hasn't already been created.
    556   state_->CreateEglImageOnMainThreadIfNeeded();
    557 
    558   // Duplicate the shared memory so there are no way we can get
    559   // a use-after-free of the raw pixels.
    560   transfer_message_loop_proxy()->PostTask(FROM_HERE,
    561       base::Bind(
    562           &TransferStateInternal::PerformAsyncTexSubImage2D,
    563           state_,
    564           tex_params,
    565           mem_params,
    566           base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(),
    567                                                  mem_params.shared_memory,
    568                                                  mem_params.shm_size)),
    569           shared_state_->texture_upload_stats));
    570 
    571   DCHECK(CHECK_GL());
    572 }
    573 
    574 namespace {
    575 bool IsPowerOfTwo (unsigned int x) {
    576   return ((x != 0) && !(x & (x - 1)));
    577 }
    578 
    579 bool IsMultipleOfEight(unsigned int x) {
    580   return (x & 7) == 0;
    581 }
    582 
    583 bool DimensionsSupportImgFastPath(int width, int height) {
    584   // Multiple of eight, but not a power of two.
    585   return IsMultipleOfEight(width) &&
    586          IsMultipleOfEight(height) &&
    587          !(IsPowerOfTwo(width) &&
    588            IsPowerOfTwo(height));
    589 }
    590 }  // namespace
    591 
    592 // It is very difficult to stream uploads on Imagination GPUs:
    593 // - glTexImage2D defers a swizzle/stall until draw-time
    594 // - glTexSubImage2D will sleep for 16ms on a good day, and 100ms
    595 //   or longer if OpenGL is in heavy use by another thread.
    596 // The one combination that avoids these problems requires:
    597 // a.) Allocations/Uploads must occur on different threads/contexts.
    598 // b.) Texture size must be non-power-of-two.
    599 // When using a+b, uploads will be incorrect/corrupt unless:
    600 // c.) Texture size must be a multiple-of-eight.
    601 //
    602 // To achieve a.) we allocate synchronously on the main thread followed
    603 // by uploading on the upload thread. When b/c are not true we fall back
    604 // on purely synchronous allocation/upload on the main thread.
    605 
    606 bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexImage2D(
    607     const AsyncTexImage2DParams& tex_params,
    608     const AsyncMemoryParams& mem_params,
    609     const base::Closure& bind_callback) {
    610   if (!shared_state_->is_imagination)
    611     return false;
    612 
    613   // On imagination we allocate synchronously all the time, even
    614   // if the dimensions support fast uploads. This is for part a.)
    615   // above, so allocations occur on a different thread/context as uploads.
    616   void* data = GetAddress(mem_params);
    617   SetGlParametersForEglImageTexture();
    618 
    619   {
    620     TRACE_EVENT0("gpu", "glTexImage2D with data");
    621     DoTexImage2D(tex_params, data);
    622   }
    623 
    624   // The allocation has already occured, so mark it as finished
    625   // and ready for binding.
    626   CHECK(!state_->TransferIsInProgress());
    627 
    628   // If the dimensions support fast async uploads, create the
    629   // EGLImage for future uploads. The late bind should not
    630   // be needed since the EGLImage was created from the main thread
    631   // texture, but this is required to prevent an imagination driver crash.
    632   if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height)) {
    633     state_->CreateEglImageOnMainThreadIfNeeded();
    634     shared_state_->pending_allocations.push_back(AsWeakPtr());
    635     state_->bind_callback_ = bind_callback;
    636   }
    637 
    638   DCHECK(CHECK_GL());
    639   return true;
    640 }
    641 
    642 bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexSubImage2D(
    643     const AsyncTexSubImage2DParams& tex_params,
    644     const AsyncMemoryParams& mem_params) {
    645   if (!shared_state_->is_imagination)
    646     return false;
    647 
    648   // If the dimensions support fast async uploads, we can use the
    649   // normal async upload path for uploads.
    650   if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height))
    651     return false;
    652 
    653   // Fall back on a synchronous stub as we don't have a known fast path.
    654   // Also, older ICS drivers crash when we do any glTexSubImage2D on the
    655   // same thread. To work around this we do glTexImage2D instead. Since
    656   // we didn't create an EGLImage for this texture (see above), this is
    657   // okay, but it limits this API to full updates for now.
    658   DCHECK(!state_->egl_image_);
    659   DCHECK_EQ(tex_params.xoffset, 0);
    660   DCHECK_EQ(tex_params.yoffset, 0);
    661   DCHECK_EQ(state_->define_params_.width, tex_params.width);
    662   DCHECK_EQ(state_->define_params_.height, tex_params.height);
    663   DCHECK_EQ(state_->define_params_.level, tex_params.level);
    664   DCHECK_EQ(state_->define_params_.format, tex_params.format);
    665   DCHECK_EQ(state_->define_params_.type, tex_params.type);
    666 
    667   void* data = GetAddress(mem_params);
    668   base::TimeTicks begin_time;
    669   if (shared_state_->texture_upload_stats.get())
    670     begin_time = base::TimeTicks::HighResNow();
    671   {
    672     TRACE_EVENT0("gpu", "glTexSubImage2D");
    673     // Note we use define_params_ instead of tex_params.
    674     // The DCHECKs above verify this is always the same.
    675     DoTexImage2D(state_->define_params_, data);
    676   }
    677   if (shared_state_->texture_upload_stats.get()) {
    678     shared_state_->texture_upload_stats
    679         ->AddUpload(base::TimeTicks::HighResNow() - begin_time);
    680   }
    681 
    682   DCHECK(CHECK_GL());
    683   return true;
    684 }
    685 
    686 AsyncPixelTransferManagerEGL::SharedState::SharedState()
    687     // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
    688     : texture_upload_stats(new AsyncPixelTransferUploadStats) {
    689   std::string vendor;
    690   vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR));
    691   is_imagination = vendor.find("Imagination") != std::string::npos;
    692   is_qualcomm = vendor.find("Qualcomm") != std::string::npos;
    693 }
    694 
    695 AsyncPixelTransferManagerEGL::SharedState::~SharedState() {}
    696 
    697 AsyncPixelTransferManagerEGL::AsyncPixelTransferManagerEGL() {}
    698 
    699 AsyncPixelTransferManagerEGL::~AsyncPixelTransferManagerEGL() {}
    700 
    701 void AsyncPixelTransferManagerEGL::BindCompletedAsyncTransfers() {
    702   scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
    703 
    704   while(!shared_state_.pending_allocations.empty()) {
    705     if (!shared_state_.pending_allocations.front().get()) {
    706       shared_state_.pending_allocations.pop_front();
    707       continue;
    708     }
    709     AsyncPixelTransferDelegateEGL* delegate =
    710         shared_state_.pending_allocations.front().get();
    711     // Terminate early, as all transfers finish in order, currently.
    712     if (delegate->TransferIsInProgress())
    713       break;
    714 
    715     if (!texture_binder)
    716       texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
    717 
    718     // If the transfer is finished, bind it to the texture
    719     // and remove it from pending list.
    720     delegate->BindTransfer();
    721     shared_state_.pending_allocations.pop_front();
    722   }
    723 }
    724 
    725 void AsyncPixelTransferManagerEGL::AsyncNotifyCompletion(
    726     const AsyncMemoryParams& mem_params,
    727     AsyncPixelTransferCompletionObserver* observer) {
    728   DCHECK(mem_params.shared_memory);
    729   DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
    730             mem_params.shm_size);
    731   // Post a PerformNotifyCompletion task to the upload thread. This task
    732   // will run after all async transfers are complete.
    733   transfer_message_loop_proxy()->PostTask(
    734       FROM_HERE,
    735       base::Bind(&PerformNotifyCompletion,
    736                  mem_params,
    737                  base::Owned(
    738                      new ScopedSafeSharedMemory(safe_shared_memory_pool(),
    739                                                 mem_params.shared_memory,
    740                                                 mem_params.shm_size)),
    741                  make_scoped_refptr(observer)));
    742 }
    743 
    744 uint32 AsyncPixelTransferManagerEGL::GetTextureUploadCount() {
    745   return shared_state_.texture_upload_stats->GetStats(NULL);
    746 }
    747 
    748 base::TimeDelta AsyncPixelTransferManagerEGL::GetTotalTextureUploadTime() {
    749   base::TimeDelta total_texture_upload_time;
    750   shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
    751   return total_texture_upload_time;
    752 }
    753 
    754 void AsyncPixelTransferManagerEGL::ProcessMorePendingTransfers() {
    755 }
    756 
    757 bool AsyncPixelTransferManagerEGL::NeedsProcessMorePendingTransfers() {
    758   return false;
    759 }
    760 
    761 AsyncPixelTransferDelegate*
    762 AsyncPixelTransferManagerEGL::CreatePixelTransferDelegateImpl(
    763     gles2::TextureRef* ref,
    764     const AsyncTexImage2DParams& define_params) {
    765   return new AsyncPixelTransferDelegateEGL(
    766       &shared_state_, ref->service_id(), define_params);
    767 }
    768 
    769 }  // namespace gpu
    770