/external/chromium_org/third_party/mesa/src/src/mesa/drivers/dri/i965/ |
gen6_urb.c | 50 gen6_upload_urb( struct brw_context *brw ) 52 struct intel_context *intel = &brw->intel; 54 int total_urb_size = brw->urb.size * 1024; /* in bytes */ 57 brw->urb.vs_size = MAX2(brw->vs.prog_data->urb_entry_size, 1); 65 brw->urb.gs_size = brw->urb.vs_size; 68 if (brw->gs.prog_active) { 69 nr_vs_entries = (total_urb_size/2) / (brw->urb.vs_size * 128); 70 nr_gs_entries = (total_urb_size/2) / (brw->urb.gs_size * 128) [all...] |
brw_urb.c | 100 static bool check_urb_layout(struct brw_context *brw) 102 brw->urb.vs_start = 0; 103 brw->urb.gs_start = brw->urb.nr_vs_entries * brw->urb.vsize; 104 brw->urb.clip_start = brw->urb.gs_start + brw->urb.nr_gs_entries * brw->urb.vsize; 105 brw->urb.sf_start = brw->urb.clip_start + brw->urb.nr_clip_entries * brw->urb.vsize [all...] |
gen7_cc_state.c | 32 upload_cc_state_pointers(struct brw_context *brw) 34 struct intel_context *intel = &brw->intel; 38 OUT_BATCH(brw->cc.state_offset | 1); 45 .brw = BRW_NEW_BATCH, 52 upload_blend_state_pointer(struct brw_context *brw) 54 struct intel_context *intel = &brw->intel; 58 OUT_BATCH(brw->cc.blend_state_offset | 1); 65 .brw = BRW_NEW_BATCH, 72 upload_depth_stencil_state_pointer(struct brw_context *brw) 74 struct intel_context *intel = &brw->intel [all...] |
brw_vtbl.c | 70 struct brw_context *brw = brw_context(&intel->ctx); local 72 brw_destroy_state(brw); 73 brw_draw_destroy( brw ); 75 ralloc_free(brw->wm.compile_data); 77 dri_bo_release(&brw->curbe.curbe_bo); 78 dri_bo_release(&brw->vs.const_bo); 79 dri_bo_release(&brw->wm.const_bo); 81 free(brw->curbe.last_buf); 82 free(brw->curbe.next_buf); 157 struct brw_context *brw = brw_context(&intel->ctx) local 173 struct brw_context *brw = brw_context(&intel->ctx); local [all...] |
gen7_urb.c | 54 gen7_allocate_push_constants(struct brw_context *brw) 56 struct intel_context *intel = &brw->intel; 71 .brw = BRW_NEW_CONTEXT, 78 gen7_upload_urb(struct brw_context *brw) 80 struct intel_context *intel = &brw->intel; 82 int handle_region_size = (brw->urb.size - 16) * 1024; /* bytes */ 85 brw->urb.vs_size = MAX2(brw->vs.prog_data->urb_entry_size, 1); 87 int nr_vs_entries = handle_region_size / (brw->urb.vs_size * 64); 88 if (nr_vs_entries > brw->urb.max_vs_entries [all...] |
brw_vs_surface_state.c | 45 brw_upload_vs_pull_constants(struct brw_context *brw) 47 struct gl_context *ctx = &brw->intel.ctx; 48 struct intel_context *intel = &brw->intel; 51 (struct brw_vertex_program *) brw->vertex_program; 61 _mesa_load_state_parameters(&brw->intel.ctx, vp->program.Base.Parameters); 64 if (!brw->vs.prog_data->nr_pull_params) { 65 if (brw->vs.const_bo) { 66 drm_intel_bo_unreference(brw->vs.const_bo); 67 brw->vs.const_bo = NULL; 68 brw->vs.surf_offset[SURF_INDEX_VERT_CONST_BUFFER] = 0 [all...] |
brw_curbe.c | 56 static void calculate_curbe_offsets( struct brw_context *brw ) 58 struct gl_context *ctx = &brw->intel.ctx; 60 const GLuint nr_fp_regs = (brw->wm.prog_data->nr_params + 15) / 16; 63 const GLuint nr_vp_regs = (brw->vs.prog_data->nr_params + 15) / 16; 97 if (nr_fp_regs > brw->curbe.wm_size || 98 nr_vp_regs > brw->curbe.vs_size || 99 nr_clip_regs != brw->curbe.clip_size || 100 (total_regs < brw->curbe.total_size / 4 && 101 brw->curbe.total_size > 16)) { 108 brw->curbe.wm_start = reg [all...] |
brw_vs_state.c | 40 brw_upload_vs_unit(struct brw_context *brw) 42 struct intel_context *intel = &brw->intel; 46 vs = brw_state_batch(brw, AUB_TRACE_VS_STATE, 47 sizeof(*vs), 32, &brw->vs.state_offset); 51 vs->thread0.grf_reg_count = ALIGN(brw->vs.prog_data->total_grf, 16) / 16 - 1; 53 brw_program_reloc(brw, 54 brw->vs.state_offset + 56 brw->vs.prog_offset + 76 if (brw->vs.prog_data->total_scratch != 0) { 78 brw->vs.scratch_bo->offset >> 10; /* reloc * [all...] |
gen6_sampler_state.c | 34 upload_sampler_state_pointers(struct brw_context *brw) 36 struct intel_context *intel = &brw->intel; 44 OUT_BATCH(brw->sampler.offset); /* VS */ 46 OUT_BATCH(brw->sampler.offset); 53 .brw = (BRW_NEW_BATCH |
|
brw_gs_state.c | 39 brw_upload_gs_unit(struct brw_context *brw) 41 struct intel_context *intel = &brw->intel; 44 gs = brw_state_batch(brw, AUB_TRACE_GS_STATE, 45 sizeof(*gs), 32, &brw->gs.state_offset); 50 if (brw->gs.prog_active) { 51 gs->thread0.grf_reg_count = (ALIGN(brw->gs.prog_data->total_grf, 16) / 55 brw_program_reloc(brw, 56 brw->gs.state_offset + 58 brw->gs.prog_offset + 68 gs->thread3.urb_entry_read_length = brw->gs.prog_data->urb_read_length [all...] |
brw_context.c | 128 struct brw_context *brw = rzalloc(NULL, struct brw_context); local 129 if (!brw) { 138 brw->intel.gen = screen->gen; 140 brwInitVtbl( brw ); 144 struct intel_context *intel = &brw->intel; 154 brw_init_surface_formats(brw); 280 brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS; 281 brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45; 282 brw->has_surface_tile_offset = true; 284 brw->has_compr4 = true [all...] |
brw_draw.c | 88 static void brw_set_prim(struct brw_context *brw, 91 struct gl_context *ctx = &brw->intel.ctx; 111 if (hw_prim != brw->primitive) { 112 brw->primitive = hw_prim; 113 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE; 115 if (reduced_prim[prim->mode] != brw->intel.reduced_primitive) { 116 brw->intel.reduced_primitive = reduced_prim[prim->mode]; 117 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE 418 struct brw_context *brw = brw_context(ctx); local [all...] |
brw_state_dump.c | 35 batch_out(struct brw_context *brw, const char *name, uint32_t offset, 39 batch_out(struct brw_context *brw, const char *name, uint32_t offset, 42 struct intel_context *intel = &brw->intel; 80 static void dump_vs_state(struct brw_context *brw, uint32_t offset) 82 struct intel_context *intel = &brw->intel; 86 batch_out(brw, name, offset, 0, "thread0\n"); 87 batch_out(brw, name, offset, 1, "thread1\n"); 88 batch_out(brw, name, offset, 2, "thread2\n"); 89 batch_out(brw, name, offset, 3, "thread3\n"); 90 batch_out(brw, name, offset, 4, "thread4: %d threads\n" 642 struct brw_context *brw = brw_context(&intel->ctx); local [all...] |
/external/mesa3d/src/mesa/drivers/dri/i965/ |
gen6_urb.c | 50 gen6_upload_urb( struct brw_context *brw ) 52 struct intel_context *intel = &brw->intel; 54 int total_urb_size = brw->urb.size * 1024; /* in bytes */ 57 brw->urb.vs_size = MAX2(brw->vs.prog_data->urb_entry_size, 1); 65 brw->urb.gs_size = brw->urb.vs_size; 68 if (brw->gs.prog_active) { 69 nr_vs_entries = (total_urb_size/2) / (brw->urb.vs_size * 128); 70 nr_gs_entries = (total_urb_size/2) / (brw->urb.gs_size * 128) [all...] |
brw_urb.c | 100 static bool check_urb_layout(struct brw_context *brw) 102 brw->urb.vs_start = 0; 103 brw->urb.gs_start = brw->urb.nr_vs_entries * brw->urb.vsize; 104 brw->urb.clip_start = brw->urb.gs_start + brw->urb.nr_gs_entries * brw->urb.vsize; 105 brw->urb.sf_start = brw->urb.clip_start + brw->urb.nr_clip_entries * brw->urb.vsize [all...] |
gen7_cc_state.c | 32 upload_cc_state_pointers(struct brw_context *brw) 34 struct intel_context *intel = &brw->intel; 38 OUT_BATCH(brw->cc.state_offset | 1); 45 .brw = BRW_NEW_BATCH, 52 upload_blend_state_pointer(struct brw_context *brw) 54 struct intel_context *intel = &brw->intel; 58 OUT_BATCH(brw->cc.blend_state_offset | 1); 65 .brw = BRW_NEW_BATCH, 72 upload_depth_stencil_state_pointer(struct brw_context *brw) 74 struct intel_context *intel = &brw->intel [all...] |
brw_vtbl.c | 70 struct brw_context *brw = brw_context(&intel->ctx); local 72 brw_destroy_state(brw); 73 brw_draw_destroy( brw ); 75 ralloc_free(brw->wm.compile_data); 77 dri_bo_release(&brw->curbe.curbe_bo); 78 dri_bo_release(&brw->vs.const_bo); 79 dri_bo_release(&brw->wm.const_bo); 81 free(brw->curbe.last_buf); 82 free(brw->curbe.next_buf); 157 struct brw_context *brw = brw_context(&intel->ctx) local 173 struct brw_context *brw = brw_context(&intel->ctx); local [all...] |
gen7_urb.c | 54 gen7_allocate_push_constants(struct brw_context *brw) 56 struct intel_context *intel = &brw->intel; 71 .brw = BRW_NEW_CONTEXT, 78 gen7_upload_urb(struct brw_context *brw) 80 struct intel_context *intel = &brw->intel; 82 int handle_region_size = (brw->urb.size - 16) * 1024; /* bytes */ 85 brw->urb.vs_size = MAX2(brw->vs.prog_data->urb_entry_size, 1); 87 int nr_vs_entries = handle_region_size / (brw->urb.vs_size * 64); 88 if (nr_vs_entries > brw->urb.max_vs_entries [all...] |
brw_vs_surface_state.c | 45 brw_upload_vs_pull_constants(struct brw_context *brw) 47 struct gl_context *ctx = &brw->intel.ctx; 48 struct intel_context *intel = &brw->intel; 51 (struct brw_vertex_program *) brw->vertex_program; 61 _mesa_load_state_parameters(&brw->intel.ctx, vp->program.Base.Parameters); 64 if (!brw->vs.prog_data->nr_pull_params) { 65 if (brw->vs.const_bo) { 66 drm_intel_bo_unreference(brw->vs.const_bo); 67 brw->vs.const_bo = NULL; 68 brw->vs.surf_offset[SURF_INDEX_VERT_CONST_BUFFER] = 0 [all...] |
brw_curbe.c | 56 static void calculate_curbe_offsets( struct brw_context *brw ) 58 struct gl_context *ctx = &brw->intel.ctx; 60 const GLuint nr_fp_regs = (brw->wm.prog_data->nr_params + 15) / 16; 63 const GLuint nr_vp_regs = (brw->vs.prog_data->nr_params + 15) / 16; 97 if (nr_fp_regs > brw->curbe.wm_size || 98 nr_vp_regs > brw->curbe.vs_size || 99 nr_clip_regs != brw->curbe.clip_size || 100 (total_regs < brw->curbe.total_size / 4 && 101 brw->curbe.total_size > 16)) { 108 brw->curbe.wm_start = reg [all...] |
brw_vs_state.c | 40 brw_upload_vs_unit(struct brw_context *brw) 42 struct intel_context *intel = &brw->intel; 46 vs = brw_state_batch(brw, AUB_TRACE_VS_STATE, 47 sizeof(*vs), 32, &brw->vs.state_offset); 51 vs->thread0.grf_reg_count = ALIGN(brw->vs.prog_data->total_grf, 16) / 16 - 1; 53 brw_program_reloc(brw, 54 brw->vs.state_offset + 56 brw->vs.prog_offset + 76 if (brw->vs.prog_data->total_scratch != 0) { 78 brw->vs.scratch_bo->offset >> 10; /* reloc * [all...] |
gen6_sampler_state.c | 34 upload_sampler_state_pointers(struct brw_context *brw) 36 struct intel_context *intel = &brw->intel; 44 OUT_BATCH(brw->sampler.offset); /* VS */ 46 OUT_BATCH(brw->sampler.offset); 53 .brw = (BRW_NEW_BATCH |
|
brw_gs_state.c | 39 brw_upload_gs_unit(struct brw_context *brw) 41 struct intel_context *intel = &brw->intel; 44 gs = brw_state_batch(brw, AUB_TRACE_GS_STATE, 45 sizeof(*gs), 32, &brw->gs.state_offset); 50 if (brw->gs.prog_active) { 51 gs->thread0.grf_reg_count = (ALIGN(brw->gs.prog_data->total_grf, 16) / 55 brw_program_reloc(brw, 56 brw->gs.state_offset + 58 brw->gs.prog_offset + 68 gs->thread3.urb_entry_read_length = brw->gs.prog_data->urb_read_length [all...] |
brw_context.c | 128 struct brw_context *brw = rzalloc(NULL, struct brw_context); local 129 if (!brw) { 138 brw->intel.gen = screen->gen; 140 brwInitVtbl( brw ); 144 struct intel_context *intel = &brw->intel; 154 brw_init_surface_formats(brw); 280 brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS; 281 brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45; 282 brw->has_surface_tile_offset = true; 284 brw->has_compr4 = true [all...] |
brw_draw.c | 88 static void brw_set_prim(struct brw_context *brw, 91 struct gl_context *ctx = &brw->intel.ctx; 111 if (hw_prim != brw->primitive) { 112 brw->primitive = hw_prim; 113 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE; 115 if (reduced_prim[prim->mode] != brw->intel.reduced_primitive) { 116 brw->intel.reduced_primitive = reduced_prim[prim->mode]; 117 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE 418 struct brw_context *brw = brw_context(ctx); local [all...] |