Home | History | Annotate | Download | only in gl
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "ui/gl/gl_context_cgl.h"
      6 
      7 #include <OpenGL/CGLRenderers.h>
      8 #include <OpenGL/CGLTypes.h>
      9 #include <vector>
     10 
     11 #include "base/debug/trace_event.h"
     12 #include "base/logging.h"
     13 #include "base/memory/scoped_ptr.h"
     14 #include "ui/gl/gl_bindings.h"
     15 #include "ui/gl/gl_implementation.h"
     16 #include "ui/gl/gl_surface.h"
     17 #include "ui/gl/gpu_switching_manager.h"
     18 
     19 namespace gfx {
     20 
     21 namespace {
     22 
     23 bool g_support_renderer_switching;
     24 
     25 struct CGLRendererInfoObjDeleter {
     26   void operator()(CGLRendererInfoObj* x) {
     27     if (x)
     28       CGLDestroyRendererInfo(*x);
     29   }
     30 };
     31 
     32 }  // namespace
     33 
     34 static CGLPixelFormatObj GetPixelFormat() {
     35   static CGLPixelFormatObj format;
     36   if (format)
     37     return format;
     38   std::vector<CGLPixelFormatAttribute> attribs;
     39   // If the system supports dual gpus then allow offline renderers for every
     40   // context, so that they can all be in the same share group.
     41   if (ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus()) {
     42     attribs.push_back(kCGLPFAAllowOfflineRenderers);
     43     g_support_renderer_switching = true;
     44   }
     45   if (GetGLImplementation() == kGLImplementationAppleGL) {
     46     attribs.push_back(kCGLPFARendererID);
     47     attribs.push_back((CGLPixelFormatAttribute) kCGLRendererGenericFloatID);
     48     g_support_renderer_switching = false;
     49   }
     50   attribs.push_back((CGLPixelFormatAttribute) 0);
     51 
     52   GLint num_virtual_screens;
     53   if (CGLChoosePixelFormat(&attribs.front(),
     54                            &format,
     55                            &num_virtual_screens) != kCGLNoError) {
     56     LOG(ERROR) << "Error choosing pixel format.";
     57     return NULL;
     58   }
     59   if (!format) {
     60     LOG(ERROR) << "format == 0.";
     61     return NULL;
     62   }
     63   DCHECK_NE(num_virtual_screens, 0);
     64   return format;
     65 }
     66 
     67 GLContextCGL::GLContextCGL(GLShareGroup* share_group)
     68   : GLContextReal(share_group),
     69     context_(NULL),
     70     gpu_preference_(PreferIntegratedGpu),
     71     discrete_pixelformat_(NULL),
     72     screen_(-1),
     73     renderer_id_(-1),
     74     safe_to_force_gpu_switch_(false) {
     75 }
     76 
     77 bool GLContextCGL::Initialize(GLSurface* compatible_surface,
     78                               GpuPreference gpu_preference) {
     79   DCHECK(compatible_surface);
     80 
     81   gpu_preference = ui::GpuSwitchingManager::GetInstance()->AdjustGpuPreference(
     82       gpu_preference);
     83 
     84   GLContextCGL* share_context = share_group() ?
     85       static_cast<GLContextCGL*>(share_group()->GetContext()) : NULL;
     86 
     87   CGLPixelFormatObj format = GetPixelFormat();
     88   if (!format)
     89     return false;
     90 
     91   // If using the discrete gpu, create a pixel format requiring it before we
     92   // create the context.
     93   if (!ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus() ||
     94       gpu_preference == PreferDiscreteGpu) {
     95     std::vector<CGLPixelFormatAttribute> discrete_attribs;
     96     discrete_attribs.push_back((CGLPixelFormatAttribute) 0);
     97     GLint num_pixel_formats;
     98     if (CGLChoosePixelFormat(&discrete_attribs.front(),
     99                              &discrete_pixelformat_,
    100                              &num_pixel_formats) != kCGLNoError) {
    101       LOG(ERROR) << "Error choosing pixel format.";
    102       return false;
    103     }
    104     // The renderer might be switched after this, so ignore the saved ID.
    105     share_group()->SetRendererID(-1);
    106   }
    107 
    108   CGLError res = CGLCreateContext(
    109       format,
    110       share_context ?
    111           static_cast<CGLContextObj>(share_context->GetHandle()) : NULL,
    112       reinterpret_cast<CGLContextObj*>(&context_));
    113   if (res != kCGLNoError) {
    114     LOG(ERROR) << "Error creating context.";
    115     Destroy();
    116     return false;
    117   }
    118 
    119   gpu_preference_ = gpu_preference;
    120   return true;
    121 }
    122 
    123 void GLContextCGL::Destroy() {
    124   if (discrete_pixelformat_) {
    125     // Delay releasing the pixel format for 10 seconds to reduce the number of
    126     // unnecessary GPU switches.
    127     base::MessageLoop::current()->PostDelayedTask(
    128         FROM_HERE,
    129         base::Bind(&CGLReleasePixelFormat, discrete_pixelformat_),
    130         base::TimeDelta::FromSeconds(10));
    131     discrete_pixelformat_ = NULL;
    132   }
    133   if (context_) {
    134     CGLDestroyContext(static_cast<CGLContextObj>(context_));
    135     context_ = NULL;
    136   }
    137 }
    138 
    139 bool GLContextCGL::MakeCurrent(GLSurface* surface) {
    140   DCHECK(context_);
    141 
    142   // The call to CGLSetVirtualScreen can hang on some AMD drivers
    143   // http://crbug.com/227228
    144   if (safe_to_force_gpu_switch_) {
    145     int renderer_id = share_group()->GetRendererID();
    146     int screen;
    147     CGLGetVirtualScreen(static_cast<CGLContextObj>(context_), &screen);
    148 
    149     if (g_support_renderer_switching &&
    150         !discrete_pixelformat_ && renderer_id != -1 &&
    151         (screen != screen_ || renderer_id != renderer_id_)) {
    152       // Attempt to find a virtual screen that's using the requested renderer,
    153       // and switch the context to use that screen. Don't attempt to switch if
    154       // the context requires the discrete GPU.
    155       CGLPixelFormatObj format = GetPixelFormat();
    156       int virtual_screen_count;
    157       if (CGLDescribePixelFormat(format, 0, kCGLPFAVirtualScreenCount,
    158                                  &virtual_screen_count) != kCGLNoError)
    159         return false;
    160 
    161       for (int i = 0; i < virtual_screen_count; ++i) {
    162         int screen_renderer_id;
    163         if (CGLDescribePixelFormat(format, i, kCGLPFARendererID,
    164                                    &screen_renderer_id) != kCGLNoError)
    165           return false;
    166 
    167         screen_renderer_id &= kCGLRendererIDMatchingMask;
    168         if (screen_renderer_id == renderer_id) {
    169           CGLSetVirtualScreen(static_cast<CGLContextObj>(context_), i);
    170           screen_ = i;
    171           break;
    172         }
    173       }
    174       renderer_id_ = renderer_id;
    175     }
    176   }
    177 
    178   if (IsCurrent(surface))
    179     return true;
    180 
    181   ScopedReleaseCurrent release_current;
    182   TRACE_EVENT0("gpu", "GLContextCGL::MakeCurrent");
    183 
    184   if (CGLSetCurrentContext(
    185       static_cast<CGLContextObj>(context_)) != kCGLNoError) {
    186     LOG(ERROR) << "Unable to make gl context current.";
    187     return false;
    188   }
    189 
    190   // Set this as soon as the context is current, since we might call into GL.
    191   SetRealGLApi();
    192 
    193   SetCurrent(surface);
    194   if (!InitializeDynamicBindings()) {
    195     return false;
    196   }
    197 
    198   if (!surface->OnMakeCurrent(this)) {
    199     LOG(ERROR) << "Unable to make gl context current.";
    200     return false;
    201   }
    202 
    203   release_current.Cancel();
    204   return true;
    205 }
    206 
    207 void GLContextCGL::ReleaseCurrent(GLSurface* surface) {
    208   if (!IsCurrent(surface))
    209     return;
    210 
    211   SetCurrent(NULL);
    212   CGLSetCurrentContext(NULL);
    213 }
    214 
    215 bool GLContextCGL::IsCurrent(GLSurface* surface) {
    216   bool native_context_is_current = CGLGetCurrentContext() == context_;
    217 
    218   // If our context is current then our notion of which GLContext is
    219   // current must be correct. On the other hand, third-party code
    220   // using OpenGL might change the current context.
    221   DCHECK(!native_context_is_current || (GetRealCurrent() == this));
    222 
    223   if (!native_context_is_current)
    224     return false;
    225 
    226   return true;
    227 }
    228 
    229 void* GLContextCGL::GetHandle() {
    230   return context_;
    231 }
    232 
    233 void GLContextCGL::SetSwapInterval(int interval) {
    234   DCHECK(IsCurrent(NULL));
    235   LOG(WARNING) << "GLContex: GLContextCGL::SetSwapInterval is ignored.";
    236 }
    237 
    238 
    239 bool GLContextCGL::GetTotalGpuMemory(size_t* bytes) {
    240   DCHECK(bytes);
    241   *bytes = 0;
    242 
    243   CGLContextObj context = reinterpret_cast<CGLContextObj>(context_);
    244   if (!context)
    245     return false;
    246 
    247   // Retrieve the current renderer ID
    248   GLint current_renderer_id = 0;
    249   if (CGLGetParameter(context,
    250                       kCGLCPCurrentRendererID,
    251                       &current_renderer_id) != kCGLNoError)
    252     return false;
    253 
    254   // Iterate through the list of all renderers
    255   GLuint display_mask = static_cast<GLuint>(-1);
    256   CGLRendererInfoObj renderer_info = NULL;
    257   GLint num_renderers = 0;
    258   if (CGLQueryRendererInfo(display_mask,
    259                            &renderer_info,
    260                            &num_renderers) != kCGLNoError)
    261     return false;
    262 
    263   scoped_ptr<CGLRendererInfoObj,
    264       CGLRendererInfoObjDeleter> scoper(&renderer_info);
    265 
    266   for (GLint renderer_index = 0;
    267        renderer_index < num_renderers;
    268        ++renderer_index) {
    269     // Skip this if this renderer is not the current renderer.
    270     GLint renderer_id = 0;
    271     if (CGLDescribeRenderer(renderer_info,
    272                             renderer_index,
    273                             kCGLRPRendererID,
    274                             &renderer_id) != kCGLNoError)
    275         continue;
    276     if (renderer_id != current_renderer_id)
    277         continue;
    278     // Retrieve the video memory for the renderer.
    279     GLint video_memory = 0;
    280     if (CGLDescribeRenderer(renderer_info,
    281                             renderer_index,
    282                             kCGLRPVideoMemory,
    283                             &video_memory) != kCGLNoError)
    284         continue;
    285     *bytes = video_memory;
    286     return true;
    287   }
    288 
    289   return false;
    290 }
    291 
    292 void GLContextCGL::SetSafeToForceGpuSwitch() {
    293   safe_to_force_gpu_switch_ = true;
    294 }
    295 
    296 
    297 GLContextCGL::~GLContextCGL() {
    298   Destroy();
    299 }
    300 
    301 GpuPreference GLContextCGL::GetGpuPreference() {
    302   return gpu_preference_;
    303 }
    304 
    305 }  // namespace gfx
    306