Home | History | Annotate | Download | only in i965

Lines Matching refs:batch

39 intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
43 intel_batchbuffer_init(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
46 intel_batchbuffer_reset(batch, bufmgr, has_llc);
49 batch->cpu_map = malloc(BATCH_SZ);
50 batch->map = batch->cpu_map;
51 batch->map_next = batch->cpu_map;
56 intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
59 if (batch->last_bo != NULL) {
60 drm_intel_bo_unreference(batch->last_bo);
61 batch->last_bo = NULL;
63 batch->last_bo = batch->bo;
65 batch->bo = drm_intel_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
67 drm_intel_bo_map(batch->bo, true);
68 batch->map = batch->bo->virtual;
70 batch->map_next = batch->map;
72 batch->reserved_space = BATCH_RESERVED;
73 batch->state_batch_offset = batch->bo->size;
74 batch->needs_sol_reset = false;
75 batch->state_base_address_emitted = false;
77 /* We don't know what ring the new batch will be sent to until we see the
80 batch->ring = UNKNOWN_RING;
86 intel_batchbuffer_reset(&brw->batch, brw->bufmgr, brw->has_llc);
93 brw->batch.saved.map_next = brw->batch.map_next;
94 brw->batch.saved.reloc_count =
95 drm_intel_gem_bo_get_reloc_count(brw->batch.bo);
101 drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
103 brw->batch.map_next = brw->batch.saved.map_next;
104 if (USED_BATCH(brw->batch) == 0)
105 brw->batch.ring = UNKNOWN_RING;
109 intel_batchbuffer_free(struct intel_batchbuffer *batch)
111 free(batch->cpu_map);
112 drm_intel_bo_unreference(batch->last_bo);
113 drm_intel_bo_unreference(batch->bo);
120 /* If we're switching rings, implicitly flush the batch. */
121 if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
129 if (intel_batchbuffer_space(&brw->batch) < sz)
132 enum brw_gpu_ring prev_ring = brw->batch.ring;
134 * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
136 brw->batch.ring = ring;
146 struct intel_batchbuffer *batch = &brw->batch;
153 ret = drm_intel_bo_map(batch->bo, false);
156 batch->bo->virtual,
157 batch->bo->offset64,
158 USED_BATCH(*batch));
165 batch->map,
166 batch->bo->offset64,
167 USED_BATCH(*batch));
176 drm_intel_bo_unmap(batch->bo);
189 * Called when starting a new batch buffer.
195 drm_intel_gem_bo_clear_relocs(brw->batch.bo, 0);
200 * to be in every batch. Otherwise we need to re-emit all the state that
239 if (brw->batch.ring == RENDER_RING) {
252 * 3D batch buffer followed by a PIPE_CONTROL with RC flush and CS stall."
271 * next batch.
290 * Unfortunately, we don't have a handle to the batch containing
292 * so we just use the first batch we emitted after the last swap.
324 struct intel_batchbuffer *batch = &brw->batch;
328 drm_intel_bo_unmap(batch->bo);
330 ret = drm_intel_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
331 if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
332 ret = drm_intel_bo_subdata(batch->bo,
333 batch->state_batch_offset,
334 batch->bo->size - batch->state_batch_offset,
335 (char *)batch->map + batch->state_batch_offset);
342 if (brw->gen >= 6 && batch->ring == BLT_RING) {
348 if (batch->needs_sol_reset)
355 if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
356 ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
359 ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
360 4 * USED_BATCH(*batch), flags);
387 if (USED_BATCH(brw->batch) == 0)
391 brw->throttle_batch[0] = brw->batch.bo;
396 int bytes_for_commands = 4 * USED_BATCH(brw->batch);
397 int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
406 brw->batch.reserved_space = 0;
411 intel_batchbuffer_emit_dword(&brw->batch, MI_BATCH_BUFFER_END);
412 if (USED_BATCH(brw->batch) & 1) {
414 intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP);
426 drm_intel_bo_wait_rendering(brw->batch.bo);
432 /* Start a new batch buffer. */
442 intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
449 ret = drm_intel_bo_emit_reloc(batch->bo, offset,
463 intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
468 int ret = drm_intel_bo_emit_reloc(batch->bo, offset,
488 memcpy(brw->batch.map_next, data, bytes);
489 brw->batch.map_next += bytes >> 2;