1 
2 #ifndef COMMON_CONTEXT_H
3 #define COMMON_CONTEXT_H
4 
5 #include "main/mm.h"
6 #include "math/m_vector.h"
7 #include "tnl/t_context.h"
8 #include "main/colormac.h"
9 
10 #include "radeon_debug.h"
11 #include "radeon_screen.h"
12 #include "radeon_drm.h"
13 #include "dri_util.h"
14 #include "tnl/t_vertex.h"
15 #include "swrast/s_context.h"
16 
17 struct radeon_context;
18 
19 #include "radeon_bo_gem.h"
20 #include "radeon_cs_gem.h"
21 
22 /* This union is used to avoid warnings/miscompilation
23    with float to uint32_t casts due to strict-aliasing */
24 typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
25 
26 struct radeon_context;
27 typedef struct radeon_context radeonContextRec;
28 typedef struct radeon_context *radeonContextPtr;
29 
30 
31 #define TEX_0   0x1
32 #define TEX_1   0x2
33 #define TEX_2   0x4
34 #define TEX_3	0x8
35 #define TEX_4	0x10
36 #define TEX_5	0x20
37 
38 /* Rasterizing fallbacks */
39 /* See correponding strings in r200_swtcl.c */
40 #define RADEON_FALLBACK_TEXTURE		0x0001
41 #define RADEON_FALLBACK_DRAW_BUFFER	0x0002
42 #define RADEON_FALLBACK_STENCIL		0x0004
43 #define RADEON_FALLBACK_RENDER_MODE	0x0008
44 #define RADEON_FALLBACK_BLEND_EQ	0x0010
45 #define RADEON_FALLBACK_BLEND_FUNC	0x0020
46 #define RADEON_FALLBACK_DISABLE 	0x0040
47 #define RADEON_FALLBACK_BORDER_MODE	0x0080
48 #define RADEON_FALLBACK_DEPTH_BUFFER	0x0100
49 #define RADEON_FALLBACK_STENCIL_BUFFER  0x0200
50 
51 #define R200_FALLBACK_TEXTURE           0x01
52 #define R200_FALLBACK_DRAW_BUFFER       0x02
53 #define R200_FALLBACK_STENCIL           0x04
54 #define R200_FALLBACK_RENDER_MODE       0x08
55 #define R200_FALLBACK_DISABLE           0x10
56 #define R200_FALLBACK_BORDER_MODE       0x20
57 
58 #define RADEON_TCL_FALLBACK_RASTER            0x1 /* rasterization */
59 #define RADEON_TCL_FALLBACK_UNFILLED          0x2 /* unfilled tris */
60 #define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE     0x4 /* twoside tris */
61 #define RADEON_TCL_FALLBACK_MATERIAL          0x8 /* material in vb */
62 #define RADEON_TCL_FALLBACK_TEXGEN_0          0x10 /* texgen, unit 0 */
63 #define RADEON_TCL_FALLBACK_TEXGEN_1          0x20 /* texgen, unit 1 */
64 #define RADEON_TCL_FALLBACK_TEXGEN_2          0x40 /* texgen, unit 2 */
65 #define RADEON_TCL_FALLBACK_TCL_DISABLE       0x80 /* user disable */
66 #define RADEON_TCL_FALLBACK_FOGCOORDSPEC      0x100 /* fogcoord, sep. spec light */
67 
68 /* The blit width for texture uploads
69  */
70 #define BLIT_WIDTH_BYTES 1024
71 
72 /* Use the templated vertex format:
73  */
74 #define COLOR_IS_RGBA
75 #define TAG(x) radeon##x
76 #include "tnl_dd/t_dd_vertex.h"
77 #undef TAG
78 
79 #define RADEON_RB_CLASS 0xdeadbeef
80 
81 struct radeon_renderbuffer
82 {
83 	struct swrast_renderbuffer base;
84 
85 	struct radeon_bo *bo;
86 	unsigned int cpp;
87 	/* unsigned int offset; */
88 	unsigned int pitch;
89 
90 	struct radeon_bo *map_bo;
91 	GLbitfield map_mode;
92 	int map_x, map_y, map_w, map_h;
93 	int map_pitch;
94 	void *map_buffer;
95 
96 	uint32_t draw_offset; /* FBO */
97 	/* boo Xorg 6.8.2 compat */
98 	int has_surface;
99 
100 	GLuint pf_pending;  /**< sequence number of pending flip */
101 	__DRIdrawable *dPriv;
102 };
103 
104 struct radeon_framebuffer
105 {
106 	struct gl_framebuffer base;
107 
108 	struct radeon_renderbuffer *color_rb[2];
109 };
110 
111 
112 struct radeon_colorbuffer_state {
113 	int roundEnable;
114 	struct gl_renderbuffer *rb;
115 	uint32_t draw_offset; /* offset into color renderbuffer - FBOs */
116 };
117 
118 struct radeon_depthbuffer_state {
119 	struct gl_renderbuffer *rb;
120 };
121 
122 struct radeon_scissor_state {
123 	drm_clip_rect_t rect;
124 	GLboolean enabled;
125 };
126 
127 struct radeon_state_atom {
128 	struct radeon_state_atom *next, *prev;
129 	const char *name;	/* for debug */
130 	int cmd_size;		/* size in bytes */
131         GLuint idx;
132 	GLuint is_tcl;
133         GLuint *cmd;		/* one or more cmd's */
134 	GLuint *lastcmd;		/* one or more cmd's */
135 	GLboolean dirty;	/* dirty-mark in emit_state_list */
136         int (*check) (struct gl_context *, struct radeon_state_atom *atom); /* is this state active? */
137         void (*emit) (struct gl_context *, struct radeon_state_atom *atom);
138 };
139 
140 struct radeon_hw_state {
141   	/* Head of the linked list of state atoms. */
142 	struct radeon_state_atom atomlist;
143 	int max_state_size;	/* Number of bytes necessary for a full state emit. */
144 	int max_post_flush_size; /* Number of bytes necessary for post flushing emits */
145 	GLboolean is_dirty, all_dirty;
146 };
147 
148 
149 /* Texture related */
150 typedef struct _radeon_texture_image radeon_texture_image;
151 
152 
153 /**
154  * This is a subclass of swrast_texture_image since we use swrast
155  * for software fallback rendering.
156  */
157 struct _radeon_texture_image {
158 	struct swrast_texture_image base;
159 
160 	/**
161 	 * If mt != 0, the image is stored in hardware format in the
162 	 * given mipmap tree. In this case, base.Data may point into the
163 	 * mapping of the buffer object that contains the mipmap tree.
164 	 *
165 	 * If mt == 0, the image is stored in normal memory pointed to
166 	 * by base.Data.
167 	 */
168 	struct _radeon_mipmap_tree *mt;
169 	struct radeon_bo *bo;
170 	GLboolean used_as_render_target;
171 };
172 
173 
get_radeon_texture_image(struct gl_texture_image * image)174 static INLINE radeon_texture_image *get_radeon_texture_image(struct gl_texture_image *image)
175 {
176 	return (radeon_texture_image*)image;
177 }
178 
179 
180 typedef struct radeon_tex_obj radeonTexObj, *radeonTexObjPtr;
181 
182 #define RADEON_TXO_MICRO_TILE               (1 << 3)
183 
184 /* Texture object in locally shared texture space.
185  */
186 struct radeon_tex_obj {
187 	struct gl_texture_object base;
188 	struct _radeon_mipmap_tree *mt;
189 
190 	/**
191 	 * This is true if we've verified that the mipmap tree above is complete
192 	 * and so on.
193 	 */
194 	GLboolean validated;
195 	/* Minimum LOD to be used during rendering */
196 	unsigned minLod;
197 	/* Miximum LOD to be used during rendering */
198 	unsigned maxLod;
199 
200 	GLuint override_offset;
201 	GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
202 	GLuint tile_bits;	/* hw texture tile bits used on this texture */
203         struct radeon_bo *bo;
204 
205 	GLuint pp_txfilter;	/* hardware register values */
206 	GLuint pp_txformat;
207 	GLuint pp_txformat_x;
208 	GLuint pp_txsize;	/* npot only */
209 	GLuint pp_txpitch;	/* npot only */
210 	GLuint pp_border_color;
211 	GLuint pp_cubic_faces;	/* cube face 1,2,3,4 log2 sizes */
212 
213 	GLboolean border_fallback;
214 };
215 
radeon_tex_obj(struct gl_texture_object * texObj)216 static INLINE radeonTexObj* radeon_tex_obj(struct gl_texture_object *texObj)
217 {
218 	return (radeonTexObj*)texObj;
219 }
220 
221 /* occlusion query */
222 struct radeon_query_object {
223 	struct gl_query_object Base;
224 	struct radeon_bo *bo;
225 	int curr_offset;
226 	GLboolean emitted_begin;
227 
228 	/* Double linked list of not flushed query objects */
229 	struct radeon_query_object *prev, *next;
230 };
231 
232 /* Need refcounting on dma buffers:
233  */
234 struct radeon_dma_buffer {
235 	int refcount;		/* the number of retained regions in buf */
236 	drmBufPtr buf;
237 };
238 
239 struct radeon_aos {
240 	struct radeon_bo *bo; /** Buffer object where vertex data is stored */
241 	int offset; /** Offset into buffer object, in bytes */
242 	int components; /** Number of components per vertex */
243 	int stride; /** Stride in dwords (may be 0 for repeating) */
244 	int count; /** Number of vertices */
245 };
246 
247 #define DMA_BO_FREE_TIME 100
248 
249 struct radeon_dma_bo {
250   struct radeon_dma_bo *next, *prev;
251   struct radeon_bo *bo;
252   int expire_counter;
253 };
254 
255 struct radeon_dma {
256         /* Active dma region.  Allocations for vertices and retained
257          * regions come from here.  Also used for emitting random vertices,
258          * these may be flushed by calling flush_current();
259          */
260 	struct radeon_dma_bo free;
261 	struct radeon_dma_bo wait;
262 	struct radeon_dma_bo reserved;
263         size_t current_used; /** Number of bytes allocated and forgotten about */
264         size_t current_vertexptr; /** End of active vertex region */
265         size_t minimum_size;
266 
267         /**
268          * If current_vertexptr != current_used then flush must be non-zero.
269          * flush must be called before non-active vertex allocations can be
270          * performed.
271          */
272         void (*flush) (struct gl_context *);
273 };
274 
275 /* radeon_swtcl.c
276  */
277 struct radeon_swtcl_info {
278 
279 	GLuint RenderIndex;
280 	GLuint vertex_size;
281 	GLubyte *verts;
282 
283 	/* Fallback rasterization functions
284 	 */
285 	GLuint hw_primitive;
286 	GLenum render_primitive;
287 	GLuint numverts;
288 
289 	struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
290 	GLuint vertex_attr_count;
291 
292 	GLuint emit_prediction;
293         struct radeon_bo *bo;
294 };
295 
296 #define RADEON_MAX_AOS_ARRAYS		16
297 struct radeon_tcl_info {
298 	struct radeon_aos aos[RADEON_MAX_AOS_ARRAYS];
299 	GLuint aos_count;
300 	struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */
301 	int elt_dma_offset; /** Offset into this buffer object, in bytes */
302 };
303 
304 struct radeon_ioctl {
305 	GLuint vertex_offset;
306 	GLuint vertex_max;
307 	struct radeon_bo *bo;
308 	GLuint vertex_size;
309 };
310 
311 #define RADEON_MAX_PRIMS 64
312 
313 struct radeon_prim {
314 	GLuint start;
315 	GLuint end;
316 	GLuint prim;
317 };
318 
radeonPackColor(GLuint cpp,GLubyte r,GLubyte g,GLubyte b,GLubyte a)319 static INLINE GLuint radeonPackColor(GLuint cpp,
320                                      GLubyte r, GLubyte g,
321                                      GLubyte b, GLubyte a)
322 {
323 	switch (cpp) {
324 	case 2:
325 		return PACK_COLOR_565(r, g, b);
326 	case 4:
327 		return PACK_COLOR_8888(a, r, g, b);
328 	default:
329 		return 0;
330 	}
331 }
332 
333 #define MAX_CMD_BUF_SZ (16*1024)
334 
335 #define MAX_DMA_BUF_SZ (64*1024)
336 
337 struct radeon_store {
338 	GLuint statenr;
339 	GLuint primnr;
340 	char cmd_buf[MAX_CMD_BUF_SZ];
341 	int cmd_used;
342 	int elts_start;
343 };
344 
345 struct radeon_dri_mirror {
346 	__DRIcontext *context;	/* DRI context */
347 	__DRIscreen *screen;	/* DRI screen */
348 
349 	drm_context_t hwContext;
350 	drm_hw_lock_t *hwLock;
351 	int hwLockCount;
352 	int fd;
353 	int drmMinor;
354 };
355 
356 typedef void (*radeon_tri_func) (radeonContextPtr,
357 				 radeonVertex *,
358 				 radeonVertex *, radeonVertex *);
359 
360 typedef void (*radeon_line_func) (radeonContextPtr,
361 				  radeonVertex *, radeonVertex *);
362 
363 typedef void (*radeon_point_func) (radeonContextPtr, radeonVertex *);
364 
365 #define RADEON_MAX_BOS 32
366 struct radeon_state {
367 	struct radeon_colorbuffer_state color;
368 	struct radeon_depthbuffer_state depth;
369 	struct radeon_scissor_state scissor;
370 };
371 
372 /**
373  * This structure holds the command buffer while it is being constructed.
374  *
375  * The first batch of commands in the buffer is always the state that needs
376  * to be re-emitted when the context is lost. This batch can be skipped
377  * otherwise.
378  */
379 struct radeon_cmdbuf {
380 	struct radeon_cs_manager    *csm;
381 	struct radeon_cs            *cs;
382 	int size; /** # of dwords total */
383 	unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
384 };
385 
386 struct radeon_context {
387    struct gl_context *glCtx;
388    radeonScreenPtr radeonScreen;	/* Screen private DRI data */
389 
390    /* Texture object bookkeeping
391     */
392    int                   texture_depth;
393    float                 initialMaxAnisotropy;
394    uint32_t              texture_row_align;
395    uint32_t              texture_rect_row_align;
396    uint32_t              texture_compressed_row_align;
397 
398   struct radeon_dma dma;
399   struct radeon_hw_state hw;
400    /* Rasterization and vertex state:
401     */
402    GLuint TclFallback;
403    GLuint Fallback;
404    GLuint NewGLState;
405    GLbitfield64 tnl_index_bitset;	/* index of bits for last tnl_install_attrs */
406 
407    /* Drawable information */
408    unsigned int lastStamp;
409    drm_radeon_sarea_t *sarea;	/* Private SAREA data */
410 
411    /* Mirrors of some DRI state */
412    struct radeon_dri_mirror dri;
413 
414    /* Busy waiting */
415    GLuint do_usleeps;
416    GLuint do_irqs;
417    GLuint irqsEmitted;
418    drm_radeon_irq_wait_t iw;
419 
420    /* Derived state - for r300 only */
421    struct radeon_state state;
422 
423    struct radeon_swtcl_info swtcl;
424    struct radeon_tcl_info tcl;
425    /* Configuration cache
426     */
427    driOptionCache optionCache;
428 
429    struct radeon_cmdbuf cmdbuf;
430 
431    struct radeon_debug debug;
432 
433   drm_clip_rect_t fboRect;
434   GLboolean front_cliprects;
435 
436    /**
437     * Set if rendering has occured to the drawable's front buffer.
438     *
439     * This is used in the DRI2 case to detect that glFlush should also copy
440     * the contents of the fake front buffer to the real front buffer.
441     */
442    GLboolean front_buffer_dirty;
443 
444    /**
445     * Track whether front-buffer rendering is currently enabled
446     *
447     * A separate flag is used to track this in order to support MRT more
448     * easily.
449     */
450    GLboolean is_front_buffer_rendering;
451 
452    /**
453     * Track whether front-buffer is the current read target.
454     *
455     * This is closely associated with is_front_buffer_rendering, but may
456     * be set separately.  The DRI2 fake front buffer must be referenced
457     * either way.
458     */
459    GLboolean is_front_buffer_reading;
460 
461    struct {
462 	struct radeon_query_object *current;
463 	struct radeon_state_atom queryobj;
464    } query;
465 
466    struct {
467 	   void (*get_lock)(radeonContextPtr radeon);
468 	   void (*update_viewport_offset)(struct gl_context *ctx);
469 	   void (*emit_cs_header)(struct radeon_cs *cs, radeonContextPtr rmesa);
470 	   void (*swtcl_flush)(struct gl_context *ctx, uint32_t offset);
471 	   void (*pre_emit_atoms)(radeonContextPtr rmesa);
472 	   void (*pre_emit_state)(radeonContextPtr rmesa);
473 	   void (*fallback)(struct gl_context *ctx, GLuint bit, GLboolean mode);
474 	   void (*free_context)(struct gl_context *ctx);
475 	   void (*emit_query_finish)(radeonContextPtr radeon);
476 	   void (*update_scissor)(struct gl_context *ctx);
477 	   unsigned (*check_blit)(gl_format mesa_format, uint32_t dst_pitch);
478 	   unsigned (*blit)(struct gl_context *ctx,
479                         struct radeon_bo *src_bo,
480                         intptr_t src_offset,
481                         gl_format src_mesaformat,
482                         unsigned src_pitch,
483                         unsigned src_width,
484                         unsigned src_height,
485                         unsigned src_x_offset,
486                         unsigned src_y_offset,
487                         struct radeon_bo *dst_bo,
488                         intptr_t dst_offset,
489                         gl_format dst_mesaformat,
490                         unsigned dst_pitch,
491                         unsigned dst_width,
492                         unsigned dst_height,
493                         unsigned dst_x_offset,
494                         unsigned dst_y_offset,
495                         unsigned reg_width,
496                         unsigned reg_height,
497                         unsigned flip_y);
498 	   unsigned (*is_format_renderable)(gl_format mesa_format);
499    } vtbl;
500 };
501 
502 #define RADEON_CONTEXT(glctx) ((radeonContextPtr)(ctx->DriverCtx))
503 
radeon_get_drawable(radeonContextPtr radeon)504 static inline __DRIdrawable* radeon_get_drawable(radeonContextPtr radeon)
505 {
506 	return radeon->dri.context->driDrawablePriv;
507 }
508 
radeon_get_readable(radeonContextPtr radeon)509 static inline __DRIdrawable* radeon_get_readable(radeonContextPtr radeon)
510 {
511 	return radeon->dri.context->driReadablePriv;
512 }
513 
514 GLboolean radeonInitContext(radeonContextPtr radeon,
515 			    struct dd_function_table* functions,
516 			    const struct gl_config * glVisual,
517 			    __DRIcontext * driContextPriv,
518 			    void *sharedContextPrivate);
519 
520 void radeonCleanupContext(radeonContextPtr radeon);
521 GLboolean radeonUnbindContext(__DRIcontext * driContextPriv);
522 void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
523 				 GLboolean front_only);
524 GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
525 			    __DRIdrawable * driDrawPriv,
526 			    __DRIdrawable * driReadPriv);
527 extern void radeonDestroyContext(__DRIcontext * driContextPriv);
528 void radeon_prepare_render(radeonContextPtr radeon);
529 
530 #endif
531