Home | History | Annotate | Download | only in i965
      1 #ifndef INTEL_BATCHBUFFER_H
      2 #define INTEL_BATCHBUFFER_H
      3 
      4 #include "main/mtypes.h"
      5 
      6 #include "brw_context.h"
      7 #include "brw_bufmgr.h"
      8 
      9 #ifdef __cplusplus
     10 extern "C" {
     11 #endif
     12 
     13 /* The kernel assumes batchbuffers are smaller than 256kB. */
     14 #define MAX_BATCH_SIZE (256 * 1024)
     15 
     16 /* 3DSTATE_BINDING_TABLE_POINTERS has a U16 offset from Surface State Base
     17  * Address, which means that we can't put binding tables beyond 64kB.  This
     18  * effectively limits the maximum statebuffer size to 64kB.
     19  */
     20 #define MAX_STATE_SIZE (64 * 1024)
     21 
     22 struct intel_batchbuffer;
     23 
     24 void intel_batchbuffer_init(struct brw_context *brw);
     25 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
     26 void intel_batchbuffer_save_state(struct brw_context *brw);
     27 void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
     28 void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
     29                                      enum brw_gpu_ring ring);
     30 int _intel_batchbuffer_flush_fence(struct brw_context *brw,
     31                                    int in_fence_fd, int *out_fence_fd,
     32                                    const char *file, int line);
     33 
     34 #define intel_batchbuffer_flush(brw) \
     35    _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
     36 
     37 #define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
     38    _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
     39                                   __FILE__, __LINE__)
     40 
     41 /* Unlike bmBufferData, this currently requires the buffer be mapped.
     42  * Consider it a convenience function wrapping multple
     43  * intel_buffer_dword() calls.
     44  */
     45 void intel_batchbuffer_data(struct brw_context *brw,
     46                             const void *data, GLuint bytes,
     47                             enum brw_gpu_ring ring);
     48 
     49 bool brw_batch_has_aperture_space(struct brw_context *brw,
     50                                   unsigned extra_space_in_bytes);
     51 
     52 bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
     53 
     54 #define RELOC_WRITE EXEC_OBJECT_WRITE
     55 #define RELOC_NEEDS_GGTT EXEC_OBJECT_NEEDS_GTT
     56 uint64_t brw_batch_reloc(struct intel_batchbuffer *batch,
     57                          uint32_t batch_offset,
     58                          struct brw_bo *target,
     59                          uint32_t target_offset,
     60                          unsigned flags);
     61 uint64_t brw_state_reloc(struct intel_batchbuffer *batch,
     62                          uint32_t batch_offset,
     63                          struct brw_bo *target,
     64                          uint32_t target_offset,
     65                          unsigned flags);
     66 
     67 #define USED_BATCH(_batch) \
     68    ((uintptr_t)((_batch).map_next - (_batch).batch.map))
     69 
     70 static inline uint32_t float_as_int(float f)
     71 {
     72    union {
     73       float f;
     74       uint32_t d;
     75    } fi;
     76 
     77    fi.f = f;
     78    return fi.d;
     79 }
     80 
     81 static inline void
     82 intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
     83 {
     84    intel_batchbuffer_require_space(brw, n * 4, ring);
     85 
     86 #ifdef DEBUG
     87    brw->batch.emit = USED_BATCH(brw->batch);
     88    brw->batch.total = n;
     89 #endif
     90 }
     91 
     92 static inline void
     93 intel_batchbuffer_advance(struct brw_context *brw)
     94 {
     95 #ifdef DEBUG
     96    struct intel_batchbuffer *batch = &brw->batch;
     97    unsigned int _n = USED_BATCH(*batch) - batch->emit;
     98    assert(batch->total != 0);
     99    if (_n != batch->total) {
    100       fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
    101 	      _n, batch->total);
    102       abort();
    103    }
    104    batch->total = 0;
    105 #else
    106    (void) brw;
    107 #endif
    108 }
    109 
    110 static inline bool
    111 brw_ptr_in_state_buffer(struct intel_batchbuffer *batch, void *p)
    112 {
    113    return (char *) p >= (char *) batch->state.map &&
    114           (char *) p < (char *) batch->state.map + batch->state.bo->size;
    115 }
    116 
    117 #define BEGIN_BATCH(n) do {                            \
    118    intel_batchbuffer_begin(brw, (n), RENDER_RING);     \
    119    uint32_t *__map = brw->batch.map_next;              \
    120    brw->batch.map_next += (n)
    121 
    122 #define BEGIN_BATCH_BLT(n) do {                        \
    123    intel_batchbuffer_begin(brw, (n), BLT_RING);        \
    124    uint32_t *__map = brw->batch.map_next;              \
    125    brw->batch.map_next += (n)
    126 
    127 #define OUT_BATCH(d) *__map++ = (d)
    128 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
    129 
    130 #define OUT_RELOC(buf, flags, delta) do {          \
    131    uint32_t __offset = (__map - brw->batch.batch.map) * 4;              \
    132    uint32_t reloc =                                                     \
    133       brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags));  \
    134    OUT_BATCH(reloc);                                                    \
    135 } while (0)
    136 
    137 /* Handle 48-bit address relocations for Gen8+ */
    138 #define OUT_RELOC64(buf, flags, delta) do {        \
    139    uint32_t __offset = (__map - brw->batch.batch.map) * 4;              \
    140    uint64_t reloc64 =                                                   \
    141       brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags));  \
    142    OUT_BATCH(reloc64);                                                  \
    143    OUT_BATCH(reloc64 >> 32);                                            \
    144 } while (0)
    145 
    146 #define ADVANCE_BATCH()                  \
    147    assert(__map == brw->batch.map_next); \
    148    intel_batchbuffer_advance(brw);       \
    149 } while (0)
    150 
    151 #ifdef __cplusplus
    152 }
    153 #endif
    154 
    155 #endif
    156