Home | History | Annotate | Download | only in intel
      1 /**************************************************************************
      2  *
      3  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     21  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 
     28 
     29 #include "main/glheader.h"
     30 #include "main/context.h"
     31 #include "main/extensions.h"
     32 #include "main/fbobject.h"
     33 #include "main/framebuffer.h"
     34 #include "main/imports.h"
     35 #include "main/points.h"
     36 #include "main/renderbuffer.h"
     37 
     38 #include "swrast/swrast.h"
     39 #include "swrast_setup/swrast_setup.h"
     40 #include "tnl/tnl.h"
     41 #include "drivers/common/driverfuncs.h"
     42 #include "drivers/common/meta.h"
     43 
     44 #include "intel_chipset.h"
     45 #include "intel_buffers.h"
     46 #include "intel_tex.h"
     47 #include "intel_batchbuffer.h"
     48 #include "intel_clear.h"
     49 #include "intel_extensions.h"
     50 #include "intel_pixel.h"
     51 #include "intel_regions.h"
     52 #include "intel_buffer_objects.h"
     53 #include "intel_fbo.h"
     54 #include "intel_bufmgr.h"
     55 #include "intel_screen.h"
     56 #include "intel_mipmap_tree.h"
     57 
     58 #include "utils.h"
     59 #include "../glsl/ralloc.h"
     60 
     61 #ifndef INTEL_DEBUG
     62 int INTEL_DEBUG = (0);
     63 #endif
     64 
     65 
     66 static const GLubyte *
     67 intelGetString(struct gl_context * ctx, GLenum name)
     68 {
     69    const struct intel_context *const intel = intel_context(ctx);
     70    const char *chipset;
     71    static char buffer[128];
     72 
     73    switch (name) {
     74    case GL_VENDOR:
     75       return (GLubyte *) "Intel Open Source Technology Center";
     76       break;
     77 
     78    case GL_RENDERER:
     79       switch (intel->intelScreen->deviceID) {
     80       case PCI_CHIP_845_G:
     81          chipset = "Intel(R) 845G";
     82          break;
     83       case PCI_CHIP_I830_M:
     84          chipset = "Intel(R) 830M";
     85          break;
     86       case PCI_CHIP_I855_GM:
     87          chipset = "Intel(R) 852GM/855GM";
     88          break;
     89       case PCI_CHIP_I865_G:
     90          chipset = "Intel(R) 865G";
     91          break;
     92       case PCI_CHIP_I915_G:
     93          chipset = "Intel(R) 915G";
     94          break;
     95       case PCI_CHIP_E7221_G:
     96 	 chipset = "Intel (R) E7221G (i915)";
     97 	 break;
     98       case PCI_CHIP_I915_GM:
     99          chipset = "Intel(R) 915GM";
    100          break;
    101       case PCI_CHIP_I945_G:
    102          chipset = "Intel(R) 945G";
    103          break;
    104       case PCI_CHIP_I945_GM:
    105          chipset = "Intel(R) 945GM";
    106          break;
    107       case PCI_CHIP_I945_GME:
    108          chipset = "Intel(R) 945GME";
    109          break;
    110       case PCI_CHIP_G33_G:
    111 	 chipset = "Intel(R) G33";
    112 	 break;
    113       case PCI_CHIP_Q35_G:
    114 	 chipset = "Intel(R) Q35";
    115 	 break;
    116       case PCI_CHIP_Q33_G:
    117 	 chipset = "Intel(R) Q33";
    118 	 break;
    119       case PCI_CHIP_IGD_GM:
    120       case PCI_CHIP_IGD_G:
    121 	 chipset = "Intel(R) IGD";
    122 	 break;
    123       case PCI_CHIP_I965_Q:
    124 	 chipset = "Intel(R) 965Q";
    125 	 break;
    126       case PCI_CHIP_I965_G:
    127       case PCI_CHIP_I965_G_1:
    128 	 chipset = "Intel(R) 965G";
    129 	 break;
    130       case PCI_CHIP_I946_GZ:
    131 	 chipset = "Intel(R) 946GZ";
    132 	 break;
    133       case PCI_CHIP_I965_GM:
    134 	 chipset = "Intel(R) 965GM";
    135 	 break;
    136       case PCI_CHIP_I965_GME:
    137 	 chipset = "Intel(R) 965GME/GLE";
    138 	 break;
    139       case PCI_CHIP_GM45_GM:
    140 	 chipset = "Mobile Intel GM45 Express Chipset";
    141 	 break;
    142       case PCI_CHIP_IGD_E_G:
    143 	 chipset = "Intel(R) Integrated Graphics Device";
    144 	 break;
    145       case PCI_CHIP_G45_G:
    146          chipset = "Intel(R) G45/G43";
    147          break;
    148       case PCI_CHIP_Q45_G:
    149          chipset = "Intel(R) Q45/Q43";
    150          break;
    151       case PCI_CHIP_G41_G:
    152          chipset = "Intel(R) G41";
    153          break;
    154       case PCI_CHIP_B43_G:
    155       case PCI_CHIP_B43_G1:
    156          chipset = "Intel(R) B43";
    157          break;
    158       case PCI_CHIP_ILD_G:
    159          chipset = "Intel(R) Ironlake Desktop";
    160          break;
    161       case PCI_CHIP_ILM_G:
    162          chipset = "Intel(R) Ironlake Mobile";
    163          break;
    164       case PCI_CHIP_SANDYBRIDGE_GT1:
    165       case PCI_CHIP_SANDYBRIDGE_GT2:
    166       case PCI_CHIP_SANDYBRIDGE_GT2_PLUS:
    167 	 chipset = "Intel(R) Sandybridge Desktop";
    168 	 break;
    169       case PCI_CHIP_SANDYBRIDGE_M_GT1:
    170       case PCI_CHIP_SANDYBRIDGE_M_GT2:
    171       case PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS:
    172 	 chipset = "Intel(R) Sandybridge Mobile";
    173 	 break;
    174       case PCI_CHIP_SANDYBRIDGE_S:
    175 	 chipset = "Intel(R) Sandybridge Server";
    176 	 break;
    177       case PCI_CHIP_IVYBRIDGE_GT1:
    178       case PCI_CHIP_IVYBRIDGE_GT2:
    179 	 chipset = "Intel(R) Ivybridge Desktop";
    180 	 break;
    181       case PCI_CHIP_IVYBRIDGE_M_GT1:
    182       case PCI_CHIP_IVYBRIDGE_M_GT2:
    183 	 chipset = "Intel(R) Ivybridge Mobile";
    184 	 break;
    185       case PCI_CHIP_IVYBRIDGE_S_GT1:
    186       case PCI_CHIP_IVYBRIDGE_S_GT2:
    187 	 chipset = "Intel(R) Ivybridge Server";
    188 	 break;
    189       case PCI_CHIP_HASWELL_GT1:
    190       case PCI_CHIP_HASWELL_GT2:
    191       case PCI_CHIP_HASWELL_GT2_PLUS:
    192       case PCI_CHIP_HASWELL_SDV_GT1:
    193       case PCI_CHIP_HASWELL_SDV_GT2:
    194       case PCI_CHIP_HASWELL_SDV_GT2_PLUS:
    195       case PCI_CHIP_HASWELL_ULT_GT1:
    196       case PCI_CHIP_HASWELL_ULT_GT2:
    197       case PCI_CHIP_HASWELL_ULT_GT2_PLUS:
    198       case PCI_CHIP_HASWELL_CRW_GT1:
    199       case PCI_CHIP_HASWELL_CRW_GT2:
    200       case PCI_CHIP_HASWELL_CRW_GT2_PLUS:
    201 	 chipset = "Intel(R) Haswell Desktop";
    202 	 break;
    203       case PCI_CHIP_HASWELL_M_GT1:
    204       case PCI_CHIP_HASWELL_M_GT2:
    205       case PCI_CHIP_HASWELL_M_GT2_PLUS:
    206       case PCI_CHIP_HASWELL_SDV_M_GT1:
    207       case PCI_CHIP_HASWELL_SDV_M_GT2:
    208       case PCI_CHIP_HASWELL_SDV_M_GT2_PLUS:
    209       case PCI_CHIP_HASWELL_ULT_M_GT1:
    210       case PCI_CHIP_HASWELL_ULT_M_GT2:
    211       case PCI_CHIP_HASWELL_ULT_M_GT2_PLUS:
    212       case PCI_CHIP_HASWELL_CRW_M_GT1:
    213       case PCI_CHIP_HASWELL_CRW_M_GT2:
    214       case PCI_CHIP_HASWELL_CRW_M_GT2_PLUS:
    215 	 chipset = "Intel(R) Haswell Mobile";
    216 	 break;
    217       case PCI_CHIP_HASWELL_S_GT1:
    218       case PCI_CHIP_HASWELL_S_GT2:
    219       case PCI_CHIP_HASWELL_S_GT2_PLUS:
    220       case PCI_CHIP_HASWELL_SDV_S_GT1:
    221       case PCI_CHIP_HASWELL_SDV_S_GT2:
    222       case PCI_CHIP_HASWELL_SDV_S_GT2_PLUS:
    223       case PCI_CHIP_HASWELL_ULT_S_GT1:
    224       case PCI_CHIP_HASWELL_ULT_S_GT2:
    225       case PCI_CHIP_HASWELL_ULT_S_GT2_PLUS:
    226       case PCI_CHIP_HASWELL_CRW_S_GT1:
    227       case PCI_CHIP_HASWELL_CRW_S_GT2:
    228       case PCI_CHIP_HASWELL_CRW_S_GT2_PLUS:
    229 	 chipset = "Intel(R) Haswell Server";
    230 	 break;
    231       default:
    232          chipset = "Unknown Intel Chipset";
    233          break;
    234       }
    235 
    236       (void) driGetRendererString(buffer, chipset, 0);
    237       return (GLubyte *) buffer;
    238 
    239    default:
    240       return NULL;
    241    }
    242 }
    243 
    244 void
    245 intel_downsample_for_dri2_flush(struct intel_context *intel,
    246                                 __DRIdrawable *drawable)
    247 {
    248    if (intel->gen < 6) {
    249       /* MSAA is not supported, so don't waste time checking for
    250        * a multisample buffer.
    251        */
    252       return;
    253    }
    254 
    255    struct gl_framebuffer *fb = drawable->driverPrivate;
    256    struct intel_renderbuffer *rb;
    257 
    258    /* Usually, only the back buffer will need to be downsampled. However,
    259     * the front buffer will also need it if the user has rendered into it.
    260     */
    261    static const gl_buffer_index buffers[2] = {
    262          BUFFER_BACK_LEFT,
    263          BUFFER_FRONT_LEFT,
    264    };
    265 
    266    for (int i = 0; i < 2; ++i) {
    267       rb = intel_get_renderbuffer(fb, buffers[i]);
    268       if (rb == NULL || rb->mt == NULL)
    269          continue;
    270       intel_miptree_downsample(intel, rb->mt);
    271    }
    272 }
    273 
    274 static void
    275 intel_flush_front(struct gl_context *ctx)
    276 {
    277    struct intel_context *intel = intel_context(ctx);
    278     __DRIcontext *driContext = intel->driContext;
    279     __DRIdrawable *driDrawable = driContext->driDrawablePriv;
    280     __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
    281 
    282     if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && intel->front_buffer_dirty) {
    283       if (screen->dri2.loader->flushFrontBuffer != NULL &&
    284           driDrawable &&
    285           driDrawable->loaderPrivate) {
    286 
    287          /* Downsample before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
    288           *
    289           * This potentially downsamples both front and back buffer. It
    290           * is unnecessary to downsample the back, but harms nothing except
    291           * performance. And no one cares about front-buffer render
    292           * performance.
    293           */
    294          intel_downsample_for_dri2_flush(intel, driDrawable);
    295 
    296          screen->dri2.loader->flushFrontBuffer(driDrawable,
    297                                                driDrawable->loaderPrivate);
    298 
    299 	 /* We set the dirty bit in intel_prepare_render() if we're
    300 	  * front buffer rendering once we get there.
    301 	  */
    302 	 intel->front_buffer_dirty = false;
    303       }
    304    }
    305 }
    306 
    307 static unsigned
    308 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
    309 {
    310    return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
    311 }
    312 
    313 static void
    314 intel_query_dri2_buffers(struct intel_context *intel,
    315 			 __DRIdrawable *drawable,
    316 			 __DRIbuffer **buffers,
    317 			 int *count);
    318 
    319 static void
    320 intel_process_dri2_buffer(struct intel_context *intel,
    321 			  __DRIdrawable *drawable,
    322 			  __DRIbuffer *buffer,
    323 			  struct intel_renderbuffer *rb,
    324 			  const char *buffer_name);
    325 
    326 void
    327 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
    328 {
    329    struct gl_framebuffer *fb = drawable->driverPrivate;
    330    struct intel_renderbuffer *rb;
    331    struct intel_context *intel = context->driverPrivate;
    332    __DRIbuffer *buffers = NULL;
    333    int i, count;
    334    const char *region_name;
    335 
    336    /* If we're rendering to the fake front buffer, make sure all the
    337     * pending drawing has landed on the real front buffer.  Otherwise
    338     * when we eventually get to DRI2GetBuffersWithFormat the stale
    339     * real front buffer contents will get copied to the new fake front
    340     * buffer.
    341     */
    342    if (intel->is_front_buffer_rendering) {
    343       intel_flush(&intel->ctx);
    344       intel_flush_front(&intel->ctx);
    345    }
    346 
    347    /* Set this up front, so that in case our buffers get invalidated
    348     * while we're getting new buffers, we don't clobber the stamp and
    349     * thus ignore the invalidate. */
    350    drawable->lastStamp = drawable->dri2.stamp;
    351 
    352    if (unlikely(INTEL_DEBUG & DEBUG_DRI))
    353       fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
    354 
    355    intel_query_dri2_buffers(intel, drawable, &buffers, &count);
    356 
    357    if (buffers == NULL)
    358       return;
    359 
    360    for (i = 0; i < count; i++) {
    361        switch (buffers[i].attachment) {
    362        case __DRI_BUFFER_FRONT_LEFT:
    363 	   rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
    364 	   region_name = "dri2 front buffer";
    365 	   break;
    366 
    367        case __DRI_BUFFER_FAKE_FRONT_LEFT:
    368 	   rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
    369 	   region_name = "dri2 fake front buffer";
    370 	   break;
    371 
    372        case __DRI_BUFFER_BACK_LEFT:
    373 	   rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
    374 	   region_name = "dri2 back buffer";
    375 	   break;
    376 
    377        case __DRI_BUFFER_DEPTH:
    378        case __DRI_BUFFER_HIZ:
    379        case __DRI_BUFFER_DEPTH_STENCIL:
    380        case __DRI_BUFFER_STENCIL:
    381        case __DRI_BUFFER_ACCUM:
    382        default:
    383 	   fprintf(stderr,
    384 		   "unhandled buffer attach event, attachment type %d\n",
    385 		   buffers[i].attachment);
    386 	   return;
    387        }
    388 
    389        intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
    390    }
    391 
    392    driUpdateFramebufferSize(&intel->ctx, drawable);
    393 }
    394 
    395 /**
    396  * intel_prepare_render should be called anywhere that curent read/drawbuffer
    397  * state is required.
    398  */
    399 void
    400 intel_prepare_render(struct intel_context *intel)
    401 {
    402    __DRIcontext *driContext = intel->driContext;
    403    __DRIdrawable *drawable;
    404 
    405    drawable = driContext->driDrawablePriv;
    406    if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
    407       if (drawable->lastStamp != drawable->dri2.stamp)
    408 	 intel_update_renderbuffers(driContext, drawable);
    409       intel_draw_buffer(&intel->ctx);
    410       driContext->dri2.draw_stamp = drawable->dri2.stamp;
    411    }
    412 
    413    drawable = driContext->driReadablePriv;
    414    if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
    415       if (drawable->lastStamp != drawable->dri2.stamp)
    416 	 intel_update_renderbuffers(driContext, drawable);
    417       driContext->dri2.read_stamp = drawable->dri2.stamp;
    418    }
    419 
    420    /* If we're currently rendering to the front buffer, the rendering
    421     * that will happen next will probably dirty the front buffer.  So
    422     * mark it as dirty here.
    423     */
    424    if (intel->is_front_buffer_rendering)
    425       intel->front_buffer_dirty = true;
    426 
    427    /* Wait for the swapbuffers before the one we just emitted, so we
    428     * don't get too many swaps outstanding for apps that are GPU-heavy
    429     * but not CPU-heavy.
    430     *
    431     * We're using intelDRI2Flush (called from the loader before
    432     * swapbuffer) and glFlush (for front buffer rendering) as the
    433     * indicator that a frame is done and then throttle when we get
    434     * here as we prepare to render the next frame.  At this point for
    435     * round trips for swap/copy and getting new buffers are done and
    436     * we'll spend less time waiting on the GPU.
    437     *
    438     * Unfortunately, we don't have a handle to the batch containing
    439     * the swap, and getting our hands on that doesn't seem worth it,
    440     * so we just us the first batch we emitted after the last swap.
    441     */
    442    if (intel->need_throttle && intel->first_post_swapbuffers_batch) {
    443       drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
    444       drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
    445       intel->first_post_swapbuffers_batch = NULL;
    446       intel->need_throttle = false;
    447    }
    448 }
    449 
    450 static void
    451 intel_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
    452 {
    453     struct intel_context *intel = intel_context(ctx);
    454     __DRIcontext *driContext = intel->driContext;
    455 
    456     if (intel->saved_viewport)
    457 	intel->saved_viewport(ctx, x, y, w, h);
    458 
    459     if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
    460        dri2InvalidateDrawable(driContext->driDrawablePriv);
    461        dri2InvalidateDrawable(driContext->driReadablePriv);
    462     }
    463 }
    464 
    465 static const struct dri_debug_control debug_control[] = {
    466    { "tex",   DEBUG_TEXTURE},
    467    { "state", DEBUG_STATE},
    468    { "ioctl", DEBUG_IOCTL},
    469    { "blit",  DEBUG_BLIT},
    470    { "mip",   DEBUG_MIPTREE},
    471    { "fall",  DEBUG_PERF},
    472    { "perf",  DEBUG_PERF},
    473    { "verb",  DEBUG_VERBOSE},
    474    { "bat",   DEBUG_BATCH},
    475    { "pix",   DEBUG_PIXEL},
    476    { "buf",   DEBUG_BUFMGR},
    477    { "reg",   DEBUG_REGION},
    478    { "fbo",   DEBUG_FBO},
    479    { "gs",    DEBUG_GS},
    480    { "sync",  DEBUG_SYNC},
    481    { "prim",  DEBUG_PRIMS },
    482    { "vert",  DEBUG_VERTS },
    483    { "dri",   DEBUG_DRI },
    484    { "sf",    DEBUG_SF },
    485    { "san",   DEBUG_SANITY },
    486    { "sleep", DEBUG_SLEEP },
    487    { "stats", DEBUG_STATS },
    488    { "tile",  DEBUG_TILE },
    489    { "wm",    DEBUG_WM },
    490    { "urb",   DEBUG_URB },
    491    { "vs",    DEBUG_VS },
    492    { "clip",  DEBUG_CLIP },
    493    { "aub",   DEBUG_AUB },
    494    { NULL,    0 }
    495 };
    496 
    497 
    498 static void
    499 intelInvalidateState(struct gl_context * ctx, GLuint new_state)
    500 {
    501     struct intel_context *intel = intel_context(ctx);
    502 
    503     if (ctx->swrast_context)
    504        _swrast_InvalidateState(ctx, new_state);
    505    _vbo_InvalidateState(ctx, new_state);
    506 
    507    intel->NewGLState |= new_state;
    508 
    509    if (intel->vtbl.invalidate_state)
    510       intel->vtbl.invalidate_state( intel, new_state );
    511 }
    512 
    513 void
    514 intel_flush_rendering_to_batch(struct gl_context *ctx)
    515 {
    516    struct intel_context *intel = intel_context(ctx);
    517 
    518    if (intel->Fallback)
    519       _swrast_flush(ctx);
    520 
    521    if (intel->gen < 4)
    522       INTEL_FIREVERTICES(intel);
    523 }
    524 
    525 void
    526 _intel_flush(struct gl_context *ctx, const char *file, int line)
    527 {
    528    struct intel_context *intel = intel_context(ctx);
    529 
    530    intel_flush_rendering_to_batch(ctx);
    531 
    532    if (intel->batch.used)
    533       _intel_batchbuffer_flush(intel, file, line);
    534 }
    535 
    536 static void
    537 intel_glFlush(struct gl_context *ctx)
    538 {
    539    struct intel_context *intel = intel_context(ctx);
    540 
    541    intel_flush(ctx);
    542    intel_flush_front(ctx);
    543    if (intel->is_front_buffer_rendering)
    544       intel->need_throttle = true;
    545 }
    546 
    547 void
    548 intelFinish(struct gl_context * ctx)
    549 {
    550    struct intel_context *intel = intel_context(ctx);
    551 
    552    intel_flush(ctx);
    553    intel_flush_front(ctx);
    554 
    555    if (intel->batch.last_bo)
    556       drm_intel_bo_wait_rendering(intel->batch.last_bo);
    557 }
    558 
    559 void
    560 intelInitDriverFunctions(struct dd_function_table *functions)
    561 {
    562    _mesa_init_driver_functions(functions);
    563 
    564    functions->Flush = intel_glFlush;
    565    functions->Finish = intelFinish;
    566    functions->GetString = intelGetString;
    567    functions->UpdateState = intelInvalidateState;
    568 
    569    intelInitTextureFuncs(functions);
    570    intelInitTextureImageFuncs(functions);
    571    intelInitTextureSubImageFuncs(functions);
    572    intelInitTextureCopyImageFuncs(functions);
    573    intelInitClearFuncs(functions);
    574    intelInitBufferFuncs(functions);
    575    intelInitPixelFuncs(functions);
    576    intelInitBufferObjectFuncs(functions);
    577    intel_init_syncobj_functions(functions);
    578 }
    579 
    580 bool
    581 intelInitContext(struct intel_context *intel,
    582 		 int api,
    583                  const struct gl_config * mesaVis,
    584                  __DRIcontext * driContextPriv,
    585                  void *sharedContextPrivate,
    586                  struct dd_function_table *functions)
    587 {
    588    struct gl_context *ctx = &intel->ctx;
    589    struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
    590    __DRIscreen *sPriv = driContextPriv->driScreenPriv;
    591    struct intel_screen *intelScreen = sPriv->driverPrivate;
    592    int bo_reuse_mode;
    593    struct gl_config visual;
    594 
    595    /* we can't do anything without a connection to the device */
    596    if (intelScreen->bufmgr == NULL)
    597       return false;
    598 
    599    /* Can't rely on invalidate events, fall back to glViewport hack */
    600    if (!driContextPriv->driScreenPriv->dri2.useInvalidate) {
    601       intel->saved_viewport = functions->Viewport;
    602       functions->Viewport = intel_viewport;
    603    }
    604 
    605    if (mesaVis == NULL) {
    606       memset(&visual, 0, sizeof visual);
    607       mesaVis = &visual;
    608    }
    609 
    610    if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
    611                                  functions, (void *) intel)) {
    612       printf("%s: failed to init mesa context\n", __FUNCTION__);
    613       return false;
    614    }
    615 
    616    driContextPriv->driverPrivate = intel;
    617    intel->intelScreen = intelScreen;
    618    intel->driContext = driContextPriv;
    619    intel->driFd = sPriv->fd;
    620 
    621    intel->gen = intelScreen->gen;
    622 
    623    const int devID = intelScreen->deviceID;
    624    if (IS_SNB_GT1(devID) || IS_IVB_GT1(devID) || IS_HSW_GT1(devID))
    625       intel->gt = 1;
    626    else if (IS_SNB_GT2(devID) || IS_IVB_GT2(devID) || IS_HSW_GT2(devID))
    627       intel->gt = 2;
    628    else
    629       intel->gt = 0;
    630 
    631    if (IS_HASWELL(devID)) {
    632       intel->is_haswell = true;
    633    } else if (IS_G4X(devID)) {
    634       intel->is_g4x = true;
    635    } else if (IS_945(devID)) {
    636       intel->is_945 = true;
    637    }
    638 
    639    if (intel->gen >= 5) {
    640       intel->needs_ff_sync = true;
    641    }
    642 
    643    intel->has_separate_stencil = intel->intelScreen->hw_has_separate_stencil;
    644    intel->must_use_separate_stencil = intel->intelScreen->hw_must_use_separate_stencil;
    645    intel->has_hiz = intel->gen >= 6 && !intel->is_haswell;
    646    intel->has_llc = intel->intelScreen->hw_has_llc;
    647    intel->has_swizzling = intel->intelScreen->hw_has_swizzling;
    648 
    649    memset(&ctx->TextureFormatSupported,
    650 	  0, sizeof(ctx->TextureFormatSupported));
    651 
    652    driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
    653                        sPriv->myNum, (intel->gen >= 4) ? "i965" : "i915");
    654    if (intel->gen < 4)
    655       intel->maxBatchSize = 4096;
    656    else
    657       intel->maxBatchSize = sizeof(intel->batch.map);
    658 
    659    intel->bufmgr = intelScreen->bufmgr;
    660 
    661    bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
    662    switch (bo_reuse_mode) {
    663    case DRI_CONF_BO_REUSE_DISABLED:
    664       break;
    665    case DRI_CONF_BO_REUSE_ALL:
    666       intel_bufmgr_gem_enable_reuse(intel->bufmgr);
    667       break;
    668    }
    669 
    670    ctx->Const.MinLineWidth = 1.0;
    671    ctx->Const.MinLineWidthAA = 1.0;
    672    ctx->Const.MaxLineWidth = 5.0;
    673    ctx->Const.MaxLineWidthAA = 5.0;
    674    ctx->Const.LineWidthGranularity = 0.5;
    675 
    676    ctx->Const.MinPointSize = 1.0;
    677    ctx->Const.MinPointSizeAA = 1.0;
    678    ctx->Const.MaxPointSize = 255.0;
    679    ctx->Const.MaxPointSizeAA = 3.0;
    680    ctx->Const.PointSizeGranularity = 1.0;
    681 
    682    ctx->Const.MaxSamples = 1.0;
    683 
    684    if (intel->gen >= 6)
    685       ctx->Const.MaxClipPlanes = 8;
    686 
    687    ctx->Const.StripTextureBorder = GL_TRUE;
    688 
    689    /* reinitialize the context point state.
    690     * It depend on constants in __struct gl_contextRec::Const
    691     */
    692    _mesa_init_point(ctx);
    693 
    694    if (intel->gen >= 4) {
    695       ctx->Const.MaxRenderbufferSize = 8192;
    696    } else {
    697       ctx->Const.MaxRenderbufferSize = 2048;
    698    }
    699 
    700    /* Initialize the software rasterizer and helper modules.
    701     *
    702     * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
    703     * software fallbacks (which we have to support on legacy GL to do weird
    704     * glDrawPixels(), glBitmap(), and other functions).
    705     */
    706    if (intel->gen <= 3 || api != API_OPENGL_CORE) {
    707       _swrast_CreateContext(ctx);
    708    }
    709 
    710    _vbo_CreateContext(ctx);
    711    if (ctx->swrast_context) {
    712       _tnl_CreateContext(ctx);
    713       _swsetup_CreateContext(ctx);
    714 
    715       /* Configure swrast to match hardware characteristics: */
    716       _swrast_allow_pixel_fog(ctx, false);
    717       _swrast_allow_vertex_fog(ctx, true);
    718    }
    719 
    720    _mesa_meta_init(ctx);
    721 
    722    intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
    723    intel->hw_stipple = 1;
    724 
    725    /* XXX FBO: this doesn't seem to be used anywhere */
    726    switch (mesaVis->depthBits) {
    727    case 0:                     /* what to do in this case? */
    728    case 16:
    729       intel->polygon_offset_scale = 1.0;
    730       break;
    731    case 24:
    732       intel->polygon_offset_scale = 2.0;     /* req'd to pass glean */
    733       break;
    734    default:
    735       assert(0);
    736       break;
    737    }
    738 
    739    if (intel->gen >= 4)
    740       intel->polygon_offset_scale /= 0xffff;
    741 
    742    intel->RenderIndex = ~0;
    743 
    744    intelInitExtensions(ctx);
    745 
    746    INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
    747    if (INTEL_DEBUG & DEBUG_BUFMGR)
    748       dri_bufmgr_set_debug(intel->bufmgr, true);
    749 
    750    if (INTEL_DEBUG & DEBUG_AUB)
    751       drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
    752 
    753    intel_batchbuffer_init(intel);
    754 
    755    intel_fbo_init(intel);
    756 
    757    intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
    758 					       "texture_tiling");
    759    intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");
    760 
    761    if (!driQueryOptionb(&intel->optionCache, "hiz")) {
    762        intel->has_hiz = false;
    763        /* On gen6, you can only do separate stencil with HIZ. */
    764        if (intel->gen == 6)
    765 	  intel->has_separate_stencil = false;
    766    }
    767 
    768    intel->prim.primitive = ~0;
    769 
    770    /* Force all software fallbacks */
    771 #ifdef I915
    772    if (driQueryOptionb(&intel->optionCache, "no_rast")) {
    773       fprintf(stderr, "disabling 3D rasterization\n");
    774       intel->no_rast = 1;
    775    }
    776 #endif
    777 
    778    if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
    779       fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
    780       intel->always_flush_batch = 1;
    781    }
    782 
    783    if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
    784       fprintf(stderr, "flushing GPU caches before/after each draw call\n");
    785       intel->always_flush_cache = 1;
    786    }
    787 
    788    return true;
    789 }
    790 
    791 void
    792 intelDestroyContext(__DRIcontext * driContextPriv)
    793 {
    794    struct intel_context *intel =
    795       (struct intel_context *) driContextPriv->driverPrivate;
    796    struct gl_context *ctx = &intel->ctx;
    797 
    798    assert(intel);               /* should never be null */
    799    if (intel) {
    800       INTEL_FIREVERTICES(intel);
    801 
    802       /* Dump a final BMP in case the application doesn't call SwapBuffers */
    803       if (INTEL_DEBUG & DEBUG_AUB) {
    804          intel_batchbuffer_flush(intel);
    805 	 aub_dump_bmp(&intel->ctx);
    806       }
    807 
    808       _mesa_meta_free(&intel->ctx);
    809 
    810       intel->vtbl.destroy(intel);
    811 
    812       if (ctx->swrast_context) {
    813          _swsetup_DestroyContext(&intel->ctx);
    814          _tnl_DestroyContext(&intel->ctx);
    815       }
    816       _vbo_DestroyContext(&intel->ctx);
    817 
    818       if (ctx->swrast_context)
    819          _swrast_DestroyContext(&intel->ctx);
    820       intel->Fallback = 0x0;      /* don't call _swrast_Flush later */
    821 
    822       intel_batchbuffer_free(intel);
    823 
    824       free(intel->prim.vb);
    825       intel->prim.vb = NULL;
    826       drm_intel_bo_unreference(intel->prim.vb_bo);
    827       intel->prim.vb_bo = NULL;
    828       drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
    829       intel->first_post_swapbuffers_batch = NULL;
    830 
    831       driDestroyOptionCache(&intel->optionCache);
    832 
    833       /* free the Mesa context */
    834       _mesa_free_context_data(&intel->ctx);
    835 
    836       _math_matrix_dtr(&intel->ViewportMatrix);
    837 
    838       ralloc_free(intel);
    839       driContextPriv->driverPrivate = NULL;
    840    }
    841 }
    842 
    843 GLboolean
    844 intelUnbindContext(__DRIcontext * driContextPriv)
    845 {
    846    /* Unset current context and dispath table */
    847    _mesa_make_current(NULL, NULL, NULL);
    848 
    849    return true;
    850 }
    851 
    852 GLboolean
    853 intelMakeCurrent(__DRIcontext * driContextPriv,
    854                  __DRIdrawable * driDrawPriv,
    855                  __DRIdrawable * driReadPriv)
    856 {
    857    struct intel_context *intel;
    858    GET_CURRENT_CONTEXT(curCtx);
    859 
    860    if (driContextPriv)
    861       intel = (struct intel_context *) driContextPriv->driverPrivate;
    862    else
    863       intel = NULL;
    864 
    865    /* According to the glXMakeCurrent() man page: "Pending commands to
    866     * the previous context, if any, are flushed before it is released."
    867     * But only flush if we're actually changing contexts.
    868     */
    869    if (intel_context(curCtx) && intel_context(curCtx) != intel) {
    870       _mesa_flush(curCtx);
    871    }
    872 
    873    if (driContextPriv) {
    874       struct gl_framebuffer *fb, *readFb;
    875 
    876       if (driDrawPriv == NULL && driReadPriv == NULL) {
    877 	 fb = _mesa_get_incomplete_framebuffer();
    878 	 readFb = _mesa_get_incomplete_framebuffer();
    879       } else {
    880 	 fb = driDrawPriv->driverPrivate;
    881 	 readFb = driReadPriv->driverPrivate;
    882 	 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
    883 	 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
    884       }
    885 
    886       intel_prepare_render(intel);
    887       _mesa_make_current(&intel->ctx, fb, readFb);
    888 
    889       /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
    890        * is NULL at that point.  We can't call _mesa_makecurrent()
    891        * first, since we need the buffer size for the initial
    892        * viewport.  So just call intel_draw_buffer() again here. */
    893       intel_draw_buffer(&intel->ctx);
    894    }
    895    else {
    896       _mesa_make_current(NULL, NULL, NULL);
    897    }
    898 
    899    return true;
    900 }
    901 
    902 /**
    903  * \brief Query DRI2 to obtain a DRIdrawable's buffers.
    904  *
    905  * To determine which DRI buffers to request, examine the renderbuffers
    906  * attached to the drawable's framebuffer. Then request the buffers with
    907  * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
    908  *
    909  * This is called from intel_update_renderbuffers().
    910  *
    911  * \param drawable      Drawable whose buffers are queried.
    912  * \param buffers       [out] List of buffers returned by DRI2 query.
    913  * \param buffer_count  [out] Number of buffers returned.
    914  *
    915  * \see intel_update_renderbuffers()
    916  * \see DRI2GetBuffers()
    917  * \see DRI2GetBuffersWithFormat()
    918  */
    919 static void
    920 intel_query_dri2_buffers(struct intel_context *intel,
    921 			 __DRIdrawable *drawable,
    922 			 __DRIbuffer **buffers,
    923 			 int *buffer_count)
    924 {
    925    __DRIscreen *screen = intel->intelScreen->driScrnPriv;
    926    struct gl_framebuffer *fb = drawable->driverPrivate;
    927    int i = 0;
    928    const int max_attachments = 4;
    929    unsigned *attachments = calloc(2 * max_attachments, sizeof(unsigned));
    930 
    931    struct intel_renderbuffer *front_rb;
    932    struct intel_renderbuffer *back_rb;
    933 
    934    front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
    935    back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
    936 
    937    if ((intel->is_front_buffer_rendering ||
    938 	intel->is_front_buffer_reading ||
    939 	!back_rb) && front_rb) {
    940       attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
    941       attachments[i++] = intel_bits_per_pixel(front_rb);
    942    }
    943 
    944    if (back_rb) {
    945       attachments[i++] = __DRI_BUFFER_BACK_LEFT;
    946       attachments[i++] = intel_bits_per_pixel(back_rb);
    947    }
    948 
    949    assert(i <= 2 * max_attachments);
    950 
    951    *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
    952 							&drawable->w,
    953 							&drawable->h,
    954 							attachments, i / 2,
    955 							buffer_count,
    956 							drawable->loaderPrivate);
    957    free(attachments);
    958 }
    959 
    960 /**
    961  * \brief Assign a DRI buffer's DRM region to a renderbuffer.
    962  *
    963  * This is called from intel_update_renderbuffers().
    964  *
    965  * \par Note:
    966  *    DRI buffers whose attachment point is DRI2BufferStencil or
    967  *    DRI2BufferDepthStencil are handled as special cases.
    968  *
    969  * \param buffer_name is a human readable name, such as "dri2 front buffer",
    970  *        that is passed to intel_region_alloc_for_handle().
    971  *
    972  * \see intel_update_renderbuffers()
    973  * \see intel_region_alloc_for_handle()
    974  */
    975 static void
    976 intel_process_dri2_buffer(struct intel_context *intel,
    977 			  __DRIdrawable *drawable,
    978 			  __DRIbuffer *buffer,
    979 			  struct intel_renderbuffer *rb,
    980 			  const char *buffer_name)
    981 {
    982    struct intel_region *region = NULL;
    983 
    984    if (!rb)
    985       return;
    986 
    987    unsigned num_samples = rb->Base.Base.NumSamples;
    988 
    989    /* We try to avoid closing and reopening the same BO name, because the first
    990     * use of a mapping of the buffer involves a bunch of page faulting which is
    991     * moderately expensive.
    992     */
    993    if (num_samples == 0) {
    994        if (rb->mt &&
    995            rb->mt->region &&
    996            rb->mt->region->name == buffer->name)
    997           return;
    998    } else {
    999        if (rb->mt &&
   1000            rb->mt->singlesample_mt &&
   1001            rb->mt->singlesample_mt->region &&
   1002            rb->mt->singlesample_mt->region->name == buffer->name)
   1003           return;
   1004    }
   1005 
   1006    if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
   1007       fprintf(stderr,
   1008 	      "attaching buffer %d, at %d, cpp %d, pitch %d\n",
   1009 	      buffer->name, buffer->attachment,
   1010 	      buffer->cpp, buffer->pitch);
   1011    }
   1012 
   1013    intel_miptree_release(&rb->mt);
   1014    region = intel_region_alloc_for_handle(intel->intelScreen,
   1015                                           buffer->cpp,
   1016                                           drawable->w,
   1017                                           drawable->h,
   1018                                           buffer->pitch / buffer->cpp,
   1019                                           buffer->name,
   1020                                           buffer_name);
   1021    if (!region)
   1022       return;
   1023 
   1024    rb->mt = intel_miptree_create_for_dri2_buffer(intel,
   1025                                                  buffer->attachment,
   1026                                                  intel_rb_format(rb),
   1027                                                  num_samples,
   1028                                                  region);
   1029    intel_region_release(&region);
   1030 }
   1031