1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3 
4 #include "main/mtypes.h"
5 
6 #include "brw_context.h"
7 #include "brw_bufmgr.h"
8 
9 #ifdef __cplusplus
10 extern "C" {
11 #endif
12 
13 /* The kernel assumes batchbuffers are smaller than 256kB. */
14 #define MAX_BATCH_SIZE (256 * 1024)
15 
16 /* 3DSTATE_BINDING_TABLE_POINTERS has a U16 offset from Surface State Base
17  * Address, which means that we can't put binding tables beyond 64kB.  This
18  * effectively limits the maximum statebuffer size to 64kB.
19  */
20 #define MAX_STATE_SIZE (64 * 1024)
21 
22 struct intel_batchbuffer;
23 
24 void intel_batchbuffer_init(struct brw_context *brw);
25 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
26 void intel_batchbuffer_save_state(struct brw_context *brw);
27 bool intel_batchbuffer_saved_state_is_empty(struct brw_context *brw);
28 void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
29 void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz);
30 int _intel_batchbuffer_flush_fence(struct brw_context *brw,
31                                    int in_fence_fd, int *out_fence_fd,
32                                    const char *file, int line);
33 void intel_batchbuffer_maybe_noop(struct brw_context *brw);
34 
35 #define intel_batchbuffer_flush(brw) \
36    _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
37 
38 #define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
39    _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
40                                   __FILE__, __LINE__)
41 
42 /* Unlike bmBufferData, this currently requires the buffer be mapped.
43  * Consider it a convenience function wrapping multple
44  * intel_buffer_dword() calls.
45  */
46 void intel_batchbuffer_data(struct brw_context *brw,
47                             const void *data, GLuint bytes);
48 
49 static inline bool
brw_batch_has_aperture_space(struct brw_context * brw,uint64_t extra_space)50 brw_batch_has_aperture_space(struct brw_context *brw, uint64_t extra_space)
51 {
52    return brw->batch.aperture_space + extra_space <=
53           brw->screen->aperture_threshold;
54 }
55 
56 bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
57 
58 #define RELOC_WRITE EXEC_OBJECT_WRITE
59 #define RELOC_NEEDS_GGTT EXEC_OBJECT_NEEDS_GTT
60 /* Inverted meaning, but using the same bit...emit_reloc will flip it. */
61 #define RELOC_32BIT EXEC_OBJECT_SUPPORTS_48B_ADDRESS
62 
63 void brw_use_pinned_bo(struct intel_batchbuffer *batch, struct brw_bo *bo,
64                        unsigned writeable_flag);
65 
66 uint64_t brw_batch_reloc(struct intel_batchbuffer *batch,
67                          uint32_t batch_offset,
68                          struct brw_bo *target,
69                          uint32_t target_offset,
70                          unsigned flags);
71 uint64_t brw_state_reloc(struct intel_batchbuffer *batch,
72                          uint32_t batch_offset,
73                          struct brw_bo *target,
74                          uint32_t target_offset,
75                          unsigned flags);
76 
77 #define USED_BATCH(_batch) \
78    ((uintptr_t)((_batch).map_next - (_batch).batch.map))
79 
float_as_int(float f)80 static inline uint32_t float_as_int(float f)
81 {
82    union {
83       float f;
84       uint32_t d;
85    } fi;
86 
87    fi.f = f;
88    return fi.d;
89 }
90 
91 static inline void
intel_batchbuffer_begin(struct brw_context * brw,int n)92 intel_batchbuffer_begin(struct brw_context *brw, int n)
93 {
94    intel_batchbuffer_require_space(brw, n * 4);
95 
96 #ifdef DEBUG
97    brw->batch.emit = USED_BATCH(brw->batch);
98    brw->batch.total = n;
99 #endif
100 }
101 
102 static inline void
intel_batchbuffer_advance(struct brw_context * brw)103 intel_batchbuffer_advance(struct brw_context *brw)
104 {
105 #ifdef DEBUG
106    struct intel_batchbuffer *batch = &brw->batch;
107    unsigned int _n = USED_BATCH(*batch) - batch->emit;
108    assert(batch->total != 0);
109    if (_n != batch->total) {
110       fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
111 	      _n, batch->total);
112       abort();
113    }
114    batch->total = 0;
115 #else
116    (void) brw;
117 #endif
118 }
119 
120 static inline bool
brw_ptr_in_state_buffer(struct intel_batchbuffer * batch,void * p)121 brw_ptr_in_state_buffer(struct intel_batchbuffer *batch, void *p)
122 {
123    return (char *) p >= (char *) batch->state.map &&
124           (char *) p < (char *) batch->state.map + batch->state.bo->size;
125 }
126 
127 #define BEGIN_BATCH(n) do {                            \
128    intel_batchbuffer_begin(brw, (n));                  \
129    uint32_t *__map = brw->batch.map_next;              \
130    brw->batch.map_next += (n)
131 
132 #define BEGIN_BATCH_BLT(n) do {                        \
133    assert(brw->screen->devinfo.gen < 6);               \
134    intel_batchbuffer_begin(brw, (n));                  \
135    uint32_t *__map = brw->batch.map_next;              \
136    brw->batch.map_next += (n)
137 
138 #define OUT_BATCH(d) *__map++ = (d)
139 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
140 
141 #define OUT_RELOC(buf, flags, delta) do {          \
142    uint32_t __offset = (__map - brw->batch.batch.map) * 4;              \
143    uint32_t reloc =                                                     \
144       brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags));  \
145    OUT_BATCH(reloc);                                                    \
146 } while (0)
147 
148 /* Handle 48-bit address relocations for Gen8+ */
149 #define OUT_RELOC64(buf, flags, delta) do {        \
150    uint32_t __offset = (__map - brw->batch.batch.map) * 4;              \
151    uint64_t reloc64 =                                                   \
152       brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags));  \
153    OUT_BATCH(reloc64);                                                  \
154    OUT_BATCH(reloc64 >> 32);                                            \
155 } while (0)
156 
157 #define ADVANCE_BATCH()                  \
158    assert(__map == brw->batch.map_next); \
159    intel_batchbuffer_advance(brw);       \
160 } while (0)
161 
162 #ifdef __cplusplus
163 }
164 #endif
165 
166 #endif
167