1 /*
2 * (C) Copyright IBM Corporation 2004, 2005
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * IBM,
20 * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
22 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include <inttypes.h>
27 #include <assert.h>
28 #include <string.h>
29
30 #include "glxclient.h"
31 #include "indirect.h"
32 #include <GL/glxproto.h>
33 #include "glxextensions.h"
34 #include "indirect_vertex_array.h"
35 #include "indirect_vertex_array_priv.h"
36
37 #define __GLX_PAD(n) (((n)+3) & ~3)
38
39 /**
40 * \file indirect_vertex_array.c
41 * Implement GLX protocol for vertex arrays and vertex buffer objects.
42 *
43 * The most important function in this fill is \c fill_array_info_cache.
44 * The \c array_state_vector contains a cache of the ARRAY_INFO data sent
45 * in the DrawArrays protocol. Certain operations, such as enabling or
46 * disabling an array, can invalidate this cache. \c fill_array_info_cache
47 * fills-in this data. Additionally, it examines the enabled state and
48 * other factors to determine what "version" of DrawArrays protocoal can be
49 * used.
50 *
51 * Current, only two versions of DrawArrays protocol are implemented. The
52 * first version is the "none" protocol. This is the fallback when the
53 * server does not support GL 1.1 / EXT_vertex_arrays. It is implemented
54 * by sending batches of immediate mode commands that are equivalent to the
55 * DrawArrays protocol.
56 *
57 * The other protocol that is currently implemented is the "old" protocol.
58 * This is the GL 1.1 DrawArrays protocol. The only difference between GL
59 * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command.
60 * This protocol is called "old" because the ARB is in the process of
61 * defining a new protocol, which will probably be called wither "new" or
62 * "vbo", to support multiple texture coordinate arrays, generic attributes,
63 * and vertex buffer objects.
64 *
65 * \author Ian Romanick <ian.d.romanick@intel.com>
66 */
67
68 static void emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count);
69 static void emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count);
70
71 static void emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
72 const GLvoid * indices);
73 static void emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
74 const GLvoid * indices);
75
76
77 static GLubyte *emit_element_none(GLubyte * dst,
78 const struct array_state_vector *arrays,
79 unsigned index);
80 static GLubyte *emit_element_old(GLubyte * dst,
81 const struct array_state_vector *arrays,
82 unsigned index);
83 static struct array_state *get_array_entry(const struct array_state_vector
84 *arrays, GLenum key,
85 unsigned index);
86 static void fill_array_info_cache(struct array_state_vector *arrays);
87 static GLboolean validate_mode(struct glx_context * gc, GLenum mode);
88 static GLboolean validate_count(struct glx_context * gc, GLsizei count);
89 static GLboolean validate_type(struct glx_context * gc, GLenum type);
90
91
92 /**
93 * Table of sizes, in bytes, of a GL types. All of the type enums are be in
94 * the range 0x1400 - 0x140F. That includes types added by extensions (i.e.,
95 * \c GL_HALF_FLOAT_NV). This elements of this table correspond to the
96 * type enums masked with 0x0f.
97 *
98 * \notes
99 * \c GL_HALF_FLOAT_NV is not included. Neither are \c GL_2_BYTES,
100 * \c GL_3_BYTES, or \c GL_4_BYTES.
101 */
102 const GLuint __glXTypeSize_table[16] = {
103 1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0
104 };
105
106
107 /**
108 * Free the per-context array state that was allocated with
109 * __glXInitVertexArrayState().
110 */
111 void
__glXFreeVertexArrayState(struct glx_context * gc)112 __glXFreeVertexArrayState(struct glx_context * gc)
113 {
114 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
115 struct array_state_vector *arrays = state->array_state;
116
117 if (arrays) {
118 if (arrays->stack) {
119 free(arrays->stack);
120 arrays->stack = NULL;
121 }
122 if (arrays->arrays) {
123 free(arrays->arrays);
124 arrays->arrays = NULL;
125 }
126 free(arrays);
127 state->array_state = NULL;
128 }
129 }
130
131
132 /**
133 * Initialize vertex array state of a GLX context.
134 *
135 * \param gc GLX context whose vertex array state is to be initialized.
136 *
137 * \warning
138 * This function may only be called after struct glx_context::gl_extension_bits,
139 * struct glx_context::server_minor, and __GLXcontext::server_major have been
140 * initialized. These values are used to determine what vertex arrays are
141 * supported.
142 *
143 * \bug
144 * Return values from malloc are not properly tested.
145 */
146 void
__glXInitVertexArrayState(struct glx_context * gc)147 __glXInitVertexArrayState(struct glx_context * gc)
148 {
149 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
150 struct array_state_vector *arrays;
151
152 unsigned array_count;
153 int texture_units = 1, vertex_program_attribs = 0;
154 unsigned i, j;
155
156 GLboolean got_fog = GL_FALSE;
157 GLboolean got_secondary_color = GL_FALSE;
158
159
160 arrays = calloc(1, sizeof(struct array_state_vector));
161 state->array_state = arrays;
162
163 arrays->old_DrawArrays_possible = !state->NoDrawArraysProtocol;
164 arrays->new_DrawArrays_possible = GL_FALSE;
165 arrays->DrawArrays = NULL;
166
167 arrays->active_texture_unit = 0;
168
169
170 /* Determine how many arrays are actually needed. Only arrays that
171 * are supported by the server are create. For example, if the server
172 * supports only 2 texture units, then only 2 texture coordinate arrays
173 * are created.
174 *
175 * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY,
176 * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and
177 * GL_EDGE_FLAG_ARRAY are supported.
178 */
179
180 array_count = 5;
181
182 if (__glExtensionBitIsEnabled(gc, GL_EXT_fog_coord_bit)
183 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
184 got_fog = GL_TRUE;
185 array_count++;
186 }
187
188 if (__glExtensionBitIsEnabled(gc, GL_EXT_secondary_color_bit)
189 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
190 got_secondary_color = GL_TRUE;
191 array_count++;
192 }
193
194 if (__glExtensionBitIsEnabled(gc, GL_ARB_multitexture_bit)
195 || (gc->server_major > 1) || (gc->server_minor >= 3)) {
196 __indirect_glGetIntegerv(GL_MAX_TEXTURE_UNITS, &texture_units);
197 }
198
199 if (__glExtensionBitIsEnabled(gc, GL_ARB_vertex_program_bit)) {
200 __indirect_glGetProgramivARB(GL_VERTEX_PROGRAM_ARB,
201 GL_MAX_PROGRAM_ATTRIBS_ARB,
202 &vertex_program_attribs);
203 }
204
205 arrays->num_texture_units = texture_units;
206 arrays->num_vertex_program_attribs = vertex_program_attribs;
207 array_count += texture_units + vertex_program_attribs;
208 arrays->num_arrays = array_count;
209 arrays->arrays = calloc(array_count, sizeof(struct array_state));
210
211 arrays->arrays[0].data_type = GL_FLOAT;
212 arrays->arrays[0].count = 3;
213 arrays->arrays[0].key = GL_NORMAL_ARRAY;
214 arrays->arrays[0].normalized = GL_TRUE;
215 arrays->arrays[0].old_DrawArrays_possible = GL_TRUE;
216
217 arrays->arrays[1].data_type = GL_FLOAT;
218 arrays->arrays[1].count = 4;
219 arrays->arrays[1].key = GL_COLOR_ARRAY;
220 arrays->arrays[1].normalized = GL_TRUE;
221 arrays->arrays[1].old_DrawArrays_possible = GL_TRUE;
222
223 arrays->arrays[2].data_type = GL_FLOAT;
224 arrays->arrays[2].count = 1;
225 arrays->arrays[2].key = GL_INDEX_ARRAY;
226 arrays->arrays[2].old_DrawArrays_possible = GL_TRUE;
227
228 arrays->arrays[3].data_type = GL_UNSIGNED_BYTE;
229 arrays->arrays[3].count = 1;
230 arrays->arrays[3].key = GL_EDGE_FLAG_ARRAY;
231 arrays->arrays[3].old_DrawArrays_possible = GL_TRUE;
232
233 for (i = 0; i < texture_units; i++) {
234 arrays->arrays[4 + i].data_type = GL_FLOAT;
235 arrays->arrays[4 + i].count = 4;
236 arrays->arrays[4 + i].key = GL_TEXTURE_COORD_ARRAY;
237
238 arrays->arrays[4 + i].old_DrawArrays_possible = (i == 0);
239 arrays->arrays[4 + i].index = i;
240
241 arrays->arrays[4 + i].header[1] = i + GL_TEXTURE0;
242 }
243
244 i = 4 + texture_units;
245
246 if (got_fog) {
247 arrays->arrays[i].data_type = GL_FLOAT;
248 arrays->arrays[i].count = 1;
249 arrays->arrays[i].key = GL_FOG_COORDINATE_ARRAY;
250 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
251 i++;
252 }
253
254 if (got_secondary_color) {
255 arrays->arrays[i].data_type = GL_FLOAT;
256 arrays->arrays[i].count = 3;
257 arrays->arrays[i].key = GL_SECONDARY_COLOR_ARRAY;
258 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
259 arrays->arrays[i].normalized = GL_TRUE;
260 i++;
261 }
262
263
264 for (j = 0; j < vertex_program_attribs; j++) {
265 const unsigned idx = (vertex_program_attribs - (j + 1));
266
267
268 arrays->arrays[idx + i].data_type = GL_FLOAT;
269 arrays->arrays[idx + i].count = 4;
270 arrays->arrays[idx + i].key = GL_VERTEX_ATTRIB_ARRAY_POINTER;
271
272 arrays->arrays[idx + i].old_DrawArrays_possible = 0;
273 arrays->arrays[idx + i].index = idx;
274
275 arrays->arrays[idx + i].header[1] = idx;
276 }
277
278 i += vertex_program_attribs;
279
280
281 /* Vertex array *must* be last becuase of the way that
282 * emit_DrawArrays_none works.
283 */
284
285 arrays->arrays[i].data_type = GL_FLOAT;
286 arrays->arrays[i].count = 4;
287 arrays->arrays[i].key = GL_VERTEX_ARRAY;
288 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
289
290 assert((i + 1) == arrays->num_arrays);
291
292 arrays->stack_index = 0;
293 arrays->stack = malloc(sizeof(struct array_stack_state)
294 * arrays->num_arrays
295 * __GL_CLIENT_ATTRIB_STACK_DEPTH);
296 }
297
298
299 /**
300 * Calculate the size of a single vertex for the "none" protocol. This is
301 * essentially the size of all the immediate-mode commands required to
302 * implement the enabled vertex arrays.
303 */
304 static size_t
calculate_single_vertex_size_none(const struct array_state_vector * arrays)305 calculate_single_vertex_size_none(const struct array_state_vector *arrays)
306 {
307 size_t single_vertex_size = 0;
308 unsigned i;
309
310
311 for (i = 0; i < arrays->num_arrays; i++) {
312 if (arrays->arrays[i].enabled) {
313 single_vertex_size += ((uint16_t *) arrays->arrays[i].header)[0];
314 }
315 }
316
317 return single_vertex_size;
318 }
319
320
321 /**
322 * Emit a single element using non-DrawArrays protocol.
323 */
324 GLubyte *
emit_element_none(GLubyte * dst,const struct array_state_vector * arrays,unsigned index)325 emit_element_none(GLubyte * dst,
326 const struct array_state_vector * arrays, unsigned index)
327 {
328 unsigned i;
329
330
331 for (i = 0; i < arrays->num_arrays; i++) {
332 if (arrays->arrays[i].enabled) {
333 const size_t offset = index * arrays->arrays[i].true_stride;
334
335 /* The generic attributes can have more data than is in the
336 * elements. This is because a vertex array can be a 2 element,
337 * normalized, unsigned short, but the "closest" immediate mode
338 * protocol is for a 4Nus. Since the sizes are small, the
339 * performance impact on modern processors should be negligible.
340 */
341 (void) memset(dst, 0, ((uint16_t *) arrays->arrays[i].header)[0]);
342
343 (void) memcpy(dst, arrays->arrays[i].header,
344 arrays->arrays[i].header_size);
345
346 dst += arrays->arrays[i].header_size;
347
348 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
349 arrays->arrays[i].element_size);
350
351 dst += __GLX_PAD(arrays->arrays[i].element_size);
352 }
353 }
354
355 return dst;
356 }
357
358
359 /**
360 * Emit a single element using "old" DrawArrays protocol from
361 * EXT_vertex_arrays / OpenGL 1.1.
362 */
363 GLubyte *
emit_element_old(GLubyte * dst,const struct array_state_vector * arrays,unsigned index)364 emit_element_old(GLubyte * dst,
365 const struct array_state_vector * arrays, unsigned index)
366 {
367 unsigned i;
368
369
370 for (i = 0; i < arrays->num_arrays; i++) {
371 if (arrays->arrays[i].enabled) {
372 const size_t offset = index * arrays->arrays[i].true_stride;
373
374 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
375 arrays->arrays[i].element_size);
376
377 dst += __GLX_PAD(arrays->arrays[i].element_size);
378 }
379 }
380
381 return dst;
382 }
383
384
385 struct array_state *
get_array_entry(const struct array_state_vector * arrays,GLenum key,unsigned index)386 get_array_entry(const struct array_state_vector *arrays,
387 GLenum key, unsigned index)
388 {
389 unsigned i;
390
391 for (i = 0; i < arrays->num_arrays; i++) {
392 if ((arrays->arrays[i].key == key)
393 && (arrays->arrays[i].index == index)) {
394 return &arrays->arrays[i];
395 }
396 }
397
398 return NULL;
399 }
400
401
402 static GLboolean
allocate_array_info_cache(struct array_state_vector * arrays,size_t required_size)403 allocate_array_info_cache(struct array_state_vector *arrays,
404 size_t required_size)
405 {
406 #define MAX_HEADER_SIZE 20
407 if (arrays->array_info_cache_buffer_size < required_size) {
408 GLubyte *temp = realloc(arrays->array_info_cache_base,
409 required_size + MAX_HEADER_SIZE);
410
411 if (temp == NULL) {
412 return GL_FALSE;
413 }
414
415 arrays->array_info_cache_base = temp;
416 arrays->array_info_cache = temp + MAX_HEADER_SIZE;
417 arrays->array_info_cache_buffer_size = required_size;
418 }
419
420 arrays->array_info_cache_size = required_size;
421 return GL_TRUE;
422 }
423
424
425 /**
426 */
427 void
fill_array_info_cache(struct array_state_vector * arrays)428 fill_array_info_cache(struct array_state_vector *arrays)
429 {
430 GLboolean old_DrawArrays_possible;
431 unsigned i;
432
433
434 /* Determine how many arrays are enabled.
435 */
436
437 arrays->enabled_client_array_count = 0;
438 old_DrawArrays_possible = arrays->old_DrawArrays_possible;
439 for (i = 0; i < arrays->num_arrays; i++) {
440 if (arrays->arrays[i].enabled) {
441 arrays->enabled_client_array_count++;
442 old_DrawArrays_possible &= arrays->arrays[i].old_DrawArrays_possible;
443 }
444 }
445
446 if (arrays->new_DrawArrays_possible) {
447 assert(!arrays->new_DrawArrays_possible);
448 }
449 else if (old_DrawArrays_possible) {
450 const size_t required_size = arrays->enabled_client_array_count * 12;
451 uint32_t *info;
452
453
454 if (!allocate_array_info_cache(arrays, required_size)) {
455 return;
456 }
457
458
459 info = (uint32_t *) arrays->array_info_cache;
460 for (i = 0; i < arrays->num_arrays; i++) {
461 if (arrays->arrays[i].enabled) {
462 *(info++) = arrays->arrays[i].data_type;
463 *(info++) = arrays->arrays[i].count;
464 *(info++) = arrays->arrays[i].key;
465 }
466 }
467
468 arrays->DrawArrays = emit_DrawArrays_old;
469 arrays->DrawElements = emit_DrawElements_old;
470 }
471 else {
472 arrays->DrawArrays = emit_DrawArrays_none;
473 arrays->DrawElements = emit_DrawElements_none;
474 }
475
476 arrays->array_info_cache_valid = GL_TRUE;
477 }
478
479
480 /**
481 * Emit a \c glDrawArrays command using the "none" protocol. That is,
482 * emit immediate-mode commands that are equivalent to the requiested
483 * \c glDrawArrays command. This is used with servers that don't support
484 * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
485 * vertex state is enabled that is not compatible with that protocol.
486 */
487 void
emit_DrawArrays_none(GLenum mode,GLint first,GLsizei count)488 emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count)
489 {
490 struct glx_context *gc = __glXGetCurrentContext();
491 const __GLXattribute *state =
492 (const __GLXattribute *) (gc->client_state_private);
493 struct array_state_vector *arrays = state->array_state;
494
495 size_t single_vertex_size;
496 GLubyte *pc;
497 unsigned i;
498 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
499 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
500
501
502 single_vertex_size = calculate_single_vertex_size_none(arrays);
503
504 pc = gc->pc;
505
506 (void) memcpy(pc, begin_cmd, 4);
507 *(int *) (pc + 4) = mode;
508
509 pc += 8;
510
511 for (i = 0; i < count; i++) {
512 if ((pc + single_vertex_size) >= gc->bufEnd) {
513 pc = __glXFlushRenderBuffer(gc, pc);
514 }
515
516 pc = emit_element_none(pc, arrays, first + i);
517 }
518
519 if ((pc + 4) >= gc->bufEnd) {
520 pc = __glXFlushRenderBuffer(gc, pc);
521 }
522
523 (void) memcpy(pc, end_cmd, 4);
524 pc += 4;
525
526 gc->pc = pc;
527 if (gc->pc > gc->limit) {
528 (void) __glXFlushRenderBuffer(gc, gc->pc);
529 }
530 }
531
532
533 /**
534 * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
535 * protocol.
536 *
537 * \param gc GLX context.
538 * \param arrays Array state.
539 * \param elements_per_request Location to store the number of elements that
540 * can fit in a single Render / RenderLarge
541 * command.
542 * \param total_request Total number of requests for a RenderLarge
543 * command. If a Render command is used, this
544 * will be zero.
545 * \param mode Drawing mode.
546 * \param count Number of vertices.
547 *
548 * \returns
549 * A pointer to the buffer for array data.
550 */
551 static GLubyte *
emit_DrawArrays_header_old(struct glx_context * gc,struct array_state_vector * arrays,size_t * elements_per_request,unsigned int * total_requests,GLenum mode,GLsizei count)552 emit_DrawArrays_header_old(struct glx_context * gc,
553 struct array_state_vector *arrays,
554 size_t * elements_per_request,
555 unsigned int *total_requests,
556 GLenum mode, GLsizei count)
557 {
558 size_t command_size;
559 size_t single_vertex_size;
560 const unsigned header_size = 16;
561 unsigned i;
562 GLubyte *pc;
563
564
565 /* Determine the size of the whole command. This includes the header,
566 * the ARRAY_INFO data and the array data. Once this size is calculated,
567 * it will be known whether a Render or RenderLarge command is needed.
568 */
569
570 single_vertex_size = 0;
571 for (i = 0; i < arrays->num_arrays; i++) {
572 if (arrays->arrays[i].enabled) {
573 single_vertex_size += __GLX_PAD(arrays->arrays[i].element_size);
574 }
575 }
576
577 command_size = arrays->array_info_cache_size + header_size
578 + (single_vertex_size * count);
579
580
581 /* Write the header for either a Render command or a RenderLarge
582 * command. After the header is written, write the ARRAY_INFO data.
583 */
584
585 if (command_size > gc->maxSmallRenderCommandSize) {
586 /* maxSize is the maximum amount of data can be stuffed into a single
587 * packet. sz_xGLXRenderReq is added because bufSize is the maximum
588 * packet size minus sz_xGLXRenderReq.
589 */
590 const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq)
591 - sz_xGLXRenderLargeReq;
592 unsigned vertex_requests;
593
594
595 /* Calculate the number of data packets that will be required to send
596 * the whole command. To do this, the number of verticies that
597 * will fit in a single buffer must be calculated.
598 *
599 * The important value here is elements_per_request. This is the
600 * number of complete array elements that will fit in a single
601 * buffer. There may be some wasted space at the end of the buffer,
602 * but splitting elements across buffer boundries would be painful.
603 */
604
605 elements_per_request[0] = maxSize / single_vertex_size;
606
607 vertex_requests = (count + elements_per_request[0] - 1)
608 / elements_per_request[0];
609
610 *total_requests = vertex_requests + 1;
611
612
613 __glXFlushRenderBuffer(gc, gc->pc);
614
615 command_size += 4;
616
617 pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4);
618 *(uint32_t *) (pc + 0) = command_size;
619 *(uint32_t *) (pc + 4) = X_GLrop_DrawArrays;
620 *(uint32_t *) (pc + 8) = count;
621 *(uint32_t *) (pc + 12) = arrays->enabled_client_array_count;
622 *(uint32_t *) (pc + 16) = mode;
623
624 __glXSendLargeChunk(gc, 1, *total_requests, pc,
625 header_size + 4 + arrays->array_info_cache_size);
626
627 pc = gc->pc;
628 }
629 else {
630 if ((gc->pc + command_size) >= gc->bufEnd) {
631 (void) __glXFlushRenderBuffer(gc, gc->pc);
632 }
633
634 pc = gc->pc;
635 *(uint16_t *) (pc + 0) = command_size;
636 *(uint16_t *) (pc + 2) = X_GLrop_DrawArrays;
637 *(uint32_t *) (pc + 4) = count;
638 *(uint32_t *) (pc + 8) = arrays->enabled_client_array_count;
639 *(uint32_t *) (pc + 12) = mode;
640
641 pc += header_size;
642
643 (void) memcpy(pc, arrays->array_info_cache,
644 arrays->array_info_cache_size);
645 pc += arrays->array_info_cache_size;
646
647 *elements_per_request = count;
648 *total_requests = 0;
649 }
650
651
652 return pc;
653 }
654
655
656 /**
657 */
658 void
emit_DrawArrays_old(GLenum mode,GLint first,GLsizei count)659 emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count)
660 {
661 struct glx_context *gc = __glXGetCurrentContext();
662 const __GLXattribute *state =
663 (const __GLXattribute *) (gc->client_state_private);
664 struct array_state_vector *arrays = state->array_state;
665
666 GLubyte *pc;
667 size_t elements_per_request;
668 unsigned total_requests = 0;
669 unsigned i;
670 size_t total_sent = 0;
671
672
673 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
674 &total_requests, mode, count);
675
676
677 /* Write the arrays.
678 */
679
680 if (total_requests == 0) {
681 assert(elements_per_request >= count);
682
683 for (i = 0; i < count; i++) {
684 pc = emit_element_old(pc, arrays, i + first);
685 }
686
687 assert(pc <= gc->bufEnd);
688
689 gc->pc = pc;
690 if (gc->pc > gc->limit) {
691 (void) __glXFlushRenderBuffer(gc, gc->pc);
692 }
693 }
694 else {
695 unsigned req;
696
697
698 for (req = 2; req <= total_requests; req++) {
699 if (count < elements_per_request) {
700 elements_per_request = count;
701 }
702
703 pc = gc->pc;
704 for (i = 0; i < elements_per_request; i++) {
705 pc = emit_element_old(pc, arrays, i + first);
706 }
707
708 first += elements_per_request;
709
710 total_sent += (size_t) (pc - gc->pc);
711 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
712
713 count -= elements_per_request;
714 }
715 }
716 }
717
718
719 void
emit_DrawElements_none(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)720 emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
721 const GLvoid * indices)
722 {
723 struct glx_context *gc = __glXGetCurrentContext();
724 const __GLXattribute *state =
725 (const __GLXattribute *) (gc->client_state_private);
726 struct array_state_vector *arrays = state->array_state;
727 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
728 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
729
730 GLubyte *pc;
731 size_t single_vertex_size;
732 unsigned i;
733
734
735 single_vertex_size = calculate_single_vertex_size_none(arrays);
736
737
738 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
739 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
740 }
741
742 pc = gc->pc;
743
744 (void) memcpy(pc, begin_cmd, 4);
745 *(int *) (pc + 4) = mode;
746
747 pc += 8;
748
749 for (i = 0; i < count; i++) {
750 unsigned index = 0;
751
752 if ((pc + single_vertex_size) >= gc->bufEnd) {
753 pc = __glXFlushRenderBuffer(gc, pc);
754 }
755
756 switch (type) {
757 case GL_UNSIGNED_INT:
758 index = (unsigned) (((GLuint *) indices)[i]);
759 break;
760 case GL_UNSIGNED_SHORT:
761 index = (unsigned) (((GLushort *) indices)[i]);
762 break;
763 case GL_UNSIGNED_BYTE:
764 index = (unsigned) (((GLubyte *) indices)[i]);
765 break;
766 }
767 pc = emit_element_none(pc, arrays, index);
768 }
769
770 if ((pc + 4) >= gc->bufEnd) {
771 pc = __glXFlushRenderBuffer(gc, pc);
772 }
773
774 (void) memcpy(pc, end_cmd, 4);
775 pc += 4;
776
777 gc->pc = pc;
778 if (gc->pc > gc->limit) {
779 (void) __glXFlushRenderBuffer(gc, gc->pc);
780 }
781 }
782
783
784 /**
785 */
786 void
emit_DrawElements_old(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)787 emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
788 const GLvoid * indices)
789 {
790 struct glx_context *gc = __glXGetCurrentContext();
791 const __GLXattribute *state =
792 (const __GLXattribute *) (gc->client_state_private);
793 struct array_state_vector *arrays = state->array_state;
794
795 GLubyte *pc;
796 size_t elements_per_request;
797 unsigned total_requests = 0;
798 unsigned i;
799 unsigned req;
800 unsigned req_element = 0;
801
802
803 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
804 &total_requests, mode, count);
805
806
807 /* Write the arrays.
808 */
809
810 req = 2;
811 while (count > 0) {
812 if (count < elements_per_request) {
813 elements_per_request = count;
814 }
815
816 switch (type) {
817 case GL_UNSIGNED_INT:{
818 const GLuint *ui_ptr = (const GLuint *) indices + req_element;
819
820 for (i = 0; i < elements_per_request; i++) {
821 const GLint index = (GLint) * (ui_ptr++);
822 pc = emit_element_old(pc, arrays, index);
823 }
824 break;
825 }
826 case GL_UNSIGNED_SHORT:{
827 const GLushort *us_ptr = (const GLushort *) indices + req_element;
828
829 for (i = 0; i < elements_per_request; i++) {
830 const GLint index = (GLint) * (us_ptr++);
831 pc = emit_element_old(pc, arrays, index);
832 }
833 break;
834 }
835 case GL_UNSIGNED_BYTE:{
836 const GLubyte *ub_ptr = (const GLubyte *) indices + req_element;
837
838 for (i = 0; i < elements_per_request; i++) {
839 const GLint index = (GLint) * (ub_ptr++);
840 pc = emit_element_old(pc, arrays, index);
841 }
842 break;
843 }
844 }
845
846 if (total_requests != 0) {
847 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
848 pc = gc->pc;
849 req++;
850 }
851
852 count -= elements_per_request;
853 req_element += elements_per_request;
854 }
855
856
857 assert((total_requests == 0) || ((req - 1) == total_requests));
858
859 if (total_requests == 0) {
860 assert(pc <= gc->bufEnd);
861
862 gc->pc = pc;
863 if (gc->pc > gc->limit) {
864 (void) __glXFlushRenderBuffer(gc, gc->pc);
865 }
866 }
867 }
868
869
870 /**
871 * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid.
872 * If it is not valid, then an error code is set in the GLX context.
873 *
874 * \returns
875 * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not.
876 */
877 static GLboolean
validate_mode(struct glx_context * gc,GLenum mode)878 validate_mode(struct glx_context * gc, GLenum mode)
879 {
880 switch (mode) {
881 case GL_POINTS:
882 case GL_LINE_STRIP:
883 case GL_LINE_LOOP:
884 case GL_LINES:
885 case GL_TRIANGLE_STRIP:
886 case GL_TRIANGLE_FAN:
887 case GL_TRIANGLES:
888 case GL_QUAD_STRIP:
889 case GL_QUADS:
890 case GL_POLYGON:
891 break;
892 default:
893 __glXSetError(gc, GL_INVALID_ENUM);
894 return GL_FALSE;
895 }
896
897 return GL_TRUE;
898 }
899
900
901 /**
902 * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid.
903 * A value less than zero is invalid and will result in \c GL_INVALID_VALUE
904 * being set. A value of zero will not result in an error being set, but
905 * will result in \c GL_FALSE being returned.
906 *
907 * \returns
908 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
909 */
910 static GLboolean
validate_count(struct glx_context * gc,GLsizei count)911 validate_count(struct glx_context * gc, GLsizei count)
912 {
913 if (count < 0) {
914 __glXSetError(gc, GL_INVALID_VALUE);
915 }
916
917 return (count > 0);
918 }
919
920
921 /**
922 * Validate that the \c type parameter to \c glDrawElements, et. al. is
923 * valid. Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and
924 * \c GL_UNSIGNED_INT are valid.
925 *
926 * \returns
927 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
928 */
929 static GLboolean
validate_type(struct glx_context * gc,GLenum type)930 validate_type(struct glx_context * gc, GLenum type)
931 {
932 switch (type) {
933 case GL_UNSIGNED_INT:
934 case GL_UNSIGNED_SHORT:
935 case GL_UNSIGNED_BYTE:
936 return GL_TRUE;
937 default:
938 __glXSetError(gc, GL_INVALID_ENUM);
939 return GL_FALSE;
940 }
941 }
942
943
944 void
__indirect_glDrawArrays(GLenum mode,GLint first,GLsizei count)945 __indirect_glDrawArrays(GLenum mode, GLint first, GLsizei count)
946 {
947 struct glx_context *gc = __glXGetCurrentContext();
948 const __GLXattribute *state =
949 (const __GLXattribute *) (gc->client_state_private);
950 struct array_state_vector *arrays = state->array_state;
951
952
953 if (validate_mode(gc, mode) && validate_count(gc, count)) {
954 if (!arrays->array_info_cache_valid) {
955 fill_array_info_cache(arrays);
956 }
957
958 arrays->DrawArrays(mode, first, count);
959 }
960 }
961
962
963 void
__indirect_glArrayElement(GLint index)964 __indirect_glArrayElement(GLint index)
965 {
966 struct glx_context *gc = __glXGetCurrentContext();
967 const __GLXattribute *state =
968 (const __GLXattribute *) (gc->client_state_private);
969 struct array_state_vector *arrays = state->array_state;
970
971 size_t single_vertex_size;
972
973
974 single_vertex_size = calculate_single_vertex_size_none(arrays);
975
976 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
977 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
978 }
979
980 gc->pc = emit_element_none(gc->pc, arrays, index);
981
982 if (gc->pc > gc->limit) {
983 (void) __glXFlushRenderBuffer(gc, gc->pc);
984 }
985 }
986
987
988 void
__indirect_glDrawElements(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)989 __indirect_glDrawElements(GLenum mode, GLsizei count, GLenum type,
990 const GLvoid * indices)
991 {
992 struct glx_context *gc = __glXGetCurrentContext();
993 const __GLXattribute *state =
994 (const __GLXattribute *) (gc->client_state_private);
995 struct array_state_vector *arrays = state->array_state;
996
997
998 if (validate_mode(gc, mode) && validate_count(gc, count)
999 && validate_type(gc, type)) {
1000 if (!arrays->array_info_cache_valid) {
1001 fill_array_info_cache(arrays);
1002 }
1003
1004 arrays->DrawElements(mode, count, type, indices);
1005 }
1006 }
1007
1008
1009 void
__indirect_glDrawRangeElements(GLenum mode,GLuint start,GLuint end,GLsizei count,GLenum type,const GLvoid * indices)1010 __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end,
1011 GLsizei count, GLenum type,
1012 const GLvoid * indices)
1013 {
1014 struct glx_context *gc = __glXGetCurrentContext();
1015 const __GLXattribute *state =
1016 (const __GLXattribute *) (gc->client_state_private);
1017 struct array_state_vector *arrays = state->array_state;
1018
1019
1020 if (validate_mode(gc, mode) && validate_count(gc, count)
1021 && validate_type(gc, type)) {
1022 if (end < start) {
1023 __glXSetError(gc, GL_INVALID_VALUE);
1024 return;
1025 }
1026
1027 if (!arrays->array_info_cache_valid) {
1028 fill_array_info_cache(arrays);
1029 }
1030
1031 arrays->DrawElements(mode, count, type, indices);
1032 }
1033 }
1034
1035
1036 void
__indirect_glMultiDrawArraysEXT(GLenum mode,const GLint * first,const GLsizei * count,GLsizei primcount)1037 __indirect_glMultiDrawArraysEXT(GLenum mode, const GLint *first,
1038 const GLsizei *count, GLsizei primcount)
1039 {
1040 struct glx_context *gc = __glXGetCurrentContext();
1041 const __GLXattribute *state =
1042 (const __GLXattribute *) (gc->client_state_private);
1043 struct array_state_vector *arrays = state->array_state;
1044 GLsizei i;
1045
1046
1047 if (validate_mode(gc, mode)) {
1048 if (!arrays->array_info_cache_valid) {
1049 fill_array_info_cache(arrays);
1050 }
1051
1052 for (i = 0; i < primcount; i++) {
1053 if (validate_count(gc, count[i])) {
1054 arrays->DrawArrays(mode, first[i], count[i]);
1055 }
1056 }
1057 }
1058 }
1059
1060
1061 void
__indirect_glMultiDrawElementsEXT(GLenum mode,const GLsizei * count,GLenum type,const GLvoid ** indices,GLsizei primcount)1062 __indirect_glMultiDrawElementsEXT(GLenum mode, const GLsizei * count,
1063 GLenum type, const GLvoid ** indices,
1064 GLsizei primcount)
1065 {
1066 struct glx_context *gc = __glXGetCurrentContext();
1067 const __GLXattribute *state =
1068 (const __GLXattribute *) (gc->client_state_private);
1069 struct array_state_vector *arrays = state->array_state;
1070 GLsizei i;
1071
1072
1073 if (validate_mode(gc, mode) && validate_type(gc, type)) {
1074 if (!arrays->array_info_cache_valid) {
1075 fill_array_info_cache(arrays);
1076 }
1077
1078 for (i = 0; i < primcount; i++) {
1079 if (validate_count(gc, count[i])) {
1080 arrays->DrawElements(mode, count[i], type, indices[i]);
1081 }
1082 }
1083 }
1084 }
1085
1086
1087 #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \
1088 do { \
1089 (a)->data = PTR; \
1090 (a)->data_type = TYPE; \
1091 (a)->user_stride = STRIDE; \
1092 (a)->count = COUNT; \
1093 (a)->normalized = NORMALIZED; \
1094 \
1095 (a)->element_size = __glXTypeSize( TYPE ) * COUNT; \
1096 (a)->true_stride = (STRIDE == 0) \
1097 ? (a)->element_size : STRIDE; \
1098 \
1099 (a)->header_size = HDR_SIZE; \
1100 ((uint16_t *) (a)->header)[0] = __GLX_PAD((a)->header_size + (a)->element_size); \
1101 ((uint16_t *) (a)->header)[1] = OPCODE; \
1102 } while(0)
1103
1104
1105 void
__indirect_glVertexPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1106 __indirect_glVertexPointer(GLint size, GLenum type, GLsizei stride,
1107 const GLvoid * pointer)
1108 {
1109 static const uint16_t short_ops[5] = {
1110 0, 0, X_GLrop_Vertex2sv, X_GLrop_Vertex3sv, X_GLrop_Vertex4sv
1111 };
1112 static const uint16_t int_ops[5] = {
1113 0, 0, X_GLrop_Vertex2iv, X_GLrop_Vertex3iv, X_GLrop_Vertex4iv
1114 };
1115 static const uint16_t float_ops[5] = {
1116 0, 0, X_GLrop_Vertex2fv, X_GLrop_Vertex3fv, X_GLrop_Vertex4fv
1117 };
1118 static const uint16_t double_ops[5] = {
1119 0, 0, X_GLrop_Vertex2dv, X_GLrop_Vertex3dv, X_GLrop_Vertex4dv
1120 };
1121 uint16_t opcode;
1122 struct glx_context *gc = __glXGetCurrentContext();
1123 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1124 struct array_state_vector *arrays = state->array_state;
1125 struct array_state *a;
1126
1127
1128 if (size < 2 || size > 4 || stride < 0) {
1129 __glXSetError(gc, GL_INVALID_VALUE);
1130 return;
1131 }
1132
1133 switch (type) {
1134 case GL_SHORT:
1135 opcode = short_ops[size];
1136 break;
1137 case GL_INT:
1138 opcode = int_ops[size];
1139 break;
1140 case GL_FLOAT:
1141 opcode = float_ops[size];
1142 break;
1143 case GL_DOUBLE:
1144 opcode = double_ops[size];
1145 break;
1146 default:
1147 __glXSetError(gc, GL_INVALID_ENUM);
1148 return;
1149 }
1150
1151 a = get_array_entry(arrays, GL_VERTEX_ARRAY, 0);
1152 assert(a != NULL);
1153 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE, 4,
1154 opcode);
1155
1156 if (a->enabled) {
1157 arrays->array_info_cache_valid = GL_FALSE;
1158 }
1159 }
1160
1161
1162 void
__indirect_glNormalPointer(GLenum type,GLsizei stride,const GLvoid * pointer)1163 __indirect_glNormalPointer(GLenum type, GLsizei stride,
1164 const GLvoid * pointer)
1165 {
1166 uint16_t opcode;
1167 struct glx_context *gc = __glXGetCurrentContext();
1168 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1169 struct array_state_vector *arrays = state->array_state;
1170 struct array_state *a;
1171
1172
1173 if (stride < 0) {
1174 __glXSetError(gc, GL_INVALID_VALUE);
1175 return;
1176 }
1177
1178 switch (type) {
1179 case GL_BYTE:
1180 opcode = X_GLrop_Normal3bv;
1181 break;
1182 case GL_SHORT:
1183 opcode = X_GLrop_Normal3sv;
1184 break;
1185 case GL_INT:
1186 opcode = X_GLrop_Normal3iv;
1187 break;
1188 case GL_FLOAT:
1189 opcode = X_GLrop_Normal3fv;
1190 break;
1191 case GL_DOUBLE:
1192 opcode = X_GLrop_Normal3dv;
1193 break;
1194 default:
1195 __glXSetError(gc, GL_INVALID_ENUM);
1196 return;
1197 }
1198
1199 a = get_array_entry(arrays, GL_NORMAL_ARRAY, 0);
1200 assert(a != NULL);
1201 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 3, GL_TRUE, 4, opcode);
1202
1203 if (a->enabled) {
1204 arrays->array_info_cache_valid = GL_FALSE;
1205 }
1206 }
1207
1208
1209 void
__indirect_glColorPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1210 __indirect_glColorPointer(GLint size, GLenum type, GLsizei stride,
1211 const GLvoid * pointer)
1212 {
1213 static const uint16_t byte_ops[5] = {
1214 0, 0, 0, X_GLrop_Color3bv, X_GLrop_Color4bv
1215 };
1216 static const uint16_t ubyte_ops[5] = {
1217 0, 0, 0, X_GLrop_Color3ubv, X_GLrop_Color4ubv
1218 };
1219 static const uint16_t short_ops[5] = {
1220 0, 0, 0, X_GLrop_Color3sv, X_GLrop_Color4sv
1221 };
1222 static const uint16_t ushort_ops[5] = {
1223 0, 0, 0, X_GLrop_Color3usv, X_GLrop_Color4usv
1224 };
1225 static const uint16_t int_ops[5] = {
1226 0, 0, 0, X_GLrop_Color3iv, X_GLrop_Color4iv
1227 };
1228 static const uint16_t uint_ops[5] = {
1229 0, 0, 0, X_GLrop_Color3uiv, X_GLrop_Color4uiv
1230 };
1231 static const uint16_t float_ops[5] = {
1232 0, 0, 0, X_GLrop_Color3fv, X_GLrop_Color4fv
1233 };
1234 static const uint16_t double_ops[5] = {
1235 0, 0, 0, X_GLrop_Color3dv, X_GLrop_Color4dv
1236 };
1237 uint16_t opcode;
1238 struct glx_context *gc = __glXGetCurrentContext();
1239 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1240 struct array_state_vector *arrays = state->array_state;
1241 struct array_state *a;
1242
1243
1244 if (size < 3 || size > 4 || stride < 0) {
1245 __glXSetError(gc, GL_INVALID_VALUE);
1246 return;
1247 }
1248
1249 switch (type) {
1250 case GL_BYTE:
1251 opcode = byte_ops[size];
1252 break;
1253 case GL_UNSIGNED_BYTE:
1254 opcode = ubyte_ops[size];
1255 break;
1256 case GL_SHORT:
1257 opcode = short_ops[size];
1258 break;
1259 case GL_UNSIGNED_SHORT:
1260 opcode = ushort_ops[size];
1261 break;
1262 case GL_INT:
1263 opcode = int_ops[size];
1264 break;
1265 case GL_UNSIGNED_INT:
1266 opcode = uint_ops[size];
1267 break;
1268 case GL_FLOAT:
1269 opcode = float_ops[size];
1270 break;
1271 case GL_DOUBLE:
1272 opcode = double_ops[size];
1273 break;
1274 default:
1275 __glXSetError(gc, GL_INVALID_ENUM);
1276 return;
1277 }
1278
1279 a = get_array_entry(arrays, GL_COLOR_ARRAY, 0);
1280 assert(a != NULL);
1281 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1282
1283 if (a->enabled) {
1284 arrays->array_info_cache_valid = GL_FALSE;
1285 }
1286 }
1287
1288
1289 void
__indirect_glIndexPointer(GLenum type,GLsizei stride,const GLvoid * pointer)1290 __indirect_glIndexPointer(GLenum type, GLsizei stride, const GLvoid * pointer)
1291 {
1292 uint16_t opcode;
1293 struct glx_context *gc = __glXGetCurrentContext();
1294 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1295 struct array_state_vector *arrays = state->array_state;
1296 struct array_state *a;
1297
1298
1299 if (stride < 0) {
1300 __glXSetError(gc, GL_INVALID_VALUE);
1301 return;
1302 }
1303
1304 switch (type) {
1305 case GL_UNSIGNED_BYTE:
1306 opcode = X_GLrop_Indexubv;
1307 break;
1308 case GL_SHORT:
1309 opcode = X_GLrop_Indexsv;
1310 break;
1311 case GL_INT:
1312 opcode = X_GLrop_Indexiv;
1313 break;
1314 case GL_FLOAT:
1315 opcode = X_GLrop_Indexfv;
1316 break;
1317 case GL_DOUBLE:
1318 opcode = X_GLrop_Indexdv;
1319 break;
1320 default:
1321 __glXSetError(gc, GL_INVALID_ENUM);
1322 return;
1323 }
1324
1325 a = get_array_entry(arrays, GL_INDEX_ARRAY, 0);
1326 assert(a != NULL);
1327 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1328
1329 if (a->enabled) {
1330 arrays->array_info_cache_valid = GL_FALSE;
1331 }
1332 }
1333
1334
1335 void
__indirect_glEdgeFlagPointer(GLsizei stride,const GLvoid * pointer)1336 __indirect_glEdgeFlagPointer(GLsizei stride, const GLvoid * pointer)
1337 {
1338 struct glx_context *gc = __glXGetCurrentContext();
1339 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1340 struct array_state_vector *arrays = state->array_state;
1341 struct array_state *a;
1342
1343
1344 if (stride < 0) {
1345 __glXSetError(gc, GL_INVALID_VALUE);
1346 return;
1347 }
1348
1349
1350 a = get_array_entry(arrays, GL_EDGE_FLAG_ARRAY, 0);
1351 assert(a != NULL);
1352 COMMON_ARRAY_DATA_INIT(a, pointer, GL_UNSIGNED_BYTE, stride, 1, GL_FALSE,
1353 4, X_GLrop_EdgeFlagv);
1354
1355 if (a->enabled) {
1356 arrays->array_info_cache_valid = GL_FALSE;
1357 }
1358 }
1359
1360
1361 void
__indirect_glTexCoordPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1362 __indirect_glTexCoordPointer(GLint size, GLenum type, GLsizei stride,
1363 const GLvoid * pointer)
1364 {
1365 static const uint16_t short_ops[5] = {
1366 0, X_GLrop_TexCoord1sv, X_GLrop_TexCoord2sv, X_GLrop_TexCoord3sv,
1367 X_GLrop_TexCoord4sv
1368 };
1369 static const uint16_t int_ops[5] = {
1370 0, X_GLrop_TexCoord1iv, X_GLrop_TexCoord2iv, X_GLrop_TexCoord3iv,
1371 X_GLrop_TexCoord4iv
1372 };
1373 static const uint16_t float_ops[5] = {
1374 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2fv, X_GLrop_TexCoord3fv,
1375 X_GLrop_TexCoord4fv
1376 };
1377 static const uint16_t double_ops[5] = {
1378 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2dv, X_GLrop_TexCoord3dv,
1379 X_GLrop_TexCoord4dv
1380 };
1381
1382 static const uint16_t mshort_ops[5] = {
1383 0, X_GLrop_MultiTexCoord1svARB, X_GLrop_MultiTexCoord2svARB,
1384 X_GLrop_MultiTexCoord3svARB, X_GLrop_MultiTexCoord4svARB
1385 };
1386 static const uint16_t mint_ops[5] = {
1387 0, X_GLrop_MultiTexCoord1ivARB, X_GLrop_MultiTexCoord2ivARB,
1388 X_GLrop_MultiTexCoord3ivARB, X_GLrop_MultiTexCoord4ivARB
1389 };
1390 static const uint16_t mfloat_ops[5] = {
1391 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2fvARB,
1392 X_GLrop_MultiTexCoord3fvARB, X_GLrop_MultiTexCoord4fvARB
1393 };
1394 static const uint16_t mdouble_ops[5] = {
1395 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2dvARB,
1396 X_GLrop_MultiTexCoord3dvARB, X_GLrop_MultiTexCoord4dvARB
1397 };
1398
1399 uint16_t opcode;
1400 struct glx_context *gc = __glXGetCurrentContext();
1401 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1402 struct array_state_vector *arrays = state->array_state;
1403 struct array_state *a;
1404 unsigned header_size;
1405 unsigned index;
1406
1407
1408 if (size < 1 || size > 4 || stride < 0) {
1409 __glXSetError(gc, GL_INVALID_VALUE);
1410 return;
1411 }
1412
1413 index = arrays->active_texture_unit;
1414 if (index == 0) {
1415 switch (type) {
1416 case GL_SHORT:
1417 opcode = short_ops[size];
1418 break;
1419 case GL_INT:
1420 opcode = int_ops[size];
1421 break;
1422 case GL_FLOAT:
1423 opcode = float_ops[size];
1424 break;
1425 case GL_DOUBLE:
1426 opcode = double_ops[size];
1427 break;
1428 default:
1429 __glXSetError(gc, GL_INVALID_ENUM);
1430 return;
1431 }
1432
1433 header_size = 4;
1434 }
1435 else {
1436 switch (type) {
1437 case GL_SHORT:
1438 opcode = mshort_ops[size];
1439 break;
1440 case GL_INT:
1441 opcode = mint_ops[size];
1442 break;
1443 case GL_FLOAT:
1444 opcode = mfloat_ops[size];
1445 break;
1446 case GL_DOUBLE:
1447 opcode = mdouble_ops[size];
1448 break;
1449 default:
1450 __glXSetError(gc, GL_INVALID_ENUM);
1451 return;
1452 }
1453
1454 header_size = 8;
1455 }
1456
1457 a = get_array_entry(arrays, GL_TEXTURE_COORD_ARRAY, index);
1458 assert(a != NULL);
1459 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE,
1460 header_size, opcode);
1461
1462 if (a->enabled) {
1463 arrays->array_info_cache_valid = GL_FALSE;
1464 }
1465 }
1466
1467
1468 void
__indirect_glSecondaryColorPointerEXT(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1469 __indirect_glSecondaryColorPointerEXT(GLint size, GLenum type, GLsizei stride,
1470 const GLvoid * pointer)
1471 {
1472 uint16_t opcode;
1473 struct glx_context *gc = __glXGetCurrentContext();
1474 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1475 struct array_state_vector *arrays = state->array_state;
1476 struct array_state *a;
1477
1478
1479 if (size != 3 || stride < 0) {
1480 __glXSetError(gc, GL_INVALID_VALUE);
1481 return;
1482 }
1483
1484 switch (type) {
1485 case GL_BYTE:
1486 opcode = 4126;
1487 break;
1488 case GL_UNSIGNED_BYTE:
1489 opcode = 4131;
1490 break;
1491 case GL_SHORT:
1492 opcode = 4127;
1493 break;
1494 case GL_UNSIGNED_SHORT:
1495 opcode = 4132;
1496 break;
1497 case GL_INT:
1498 opcode = 4128;
1499 break;
1500 case GL_UNSIGNED_INT:
1501 opcode = 4133;
1502 break;
1503 case GL_FLOAT:
1504 opcode = 4129;
1505 break;
1506 case GL_DOUBLE:
1507 opcode = 4130;
1508 break;
1509 default:
1510 __glXSetError(gc, GL_INVALID_ENUM);
1511 return;
1512 }
1513
1514 a = get_array_entry(arrays, GL_SECONDARY_COLOR_ARRAY, 0);
1515 if (a == NULL) {
1516 __glXSetError(gc, GL_INVALID_OPERATION);
1517 return;
1518 }
1519
1520 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1521
1522 if (a->enabled) {
1523 arrays->array_info_cache_valid = GL_FALSE;
1524 }
1525 }
1526
1527
1528 void
__indirect_glFogCoordPointerEXT(GLenum type,GLsizei stride,const GLvoid * pointer)1529 __indirect_glFogCoordPointerEXT(GLenum type, GLsizei stride,
1530 const GLvoid * pointer)
1531 {
1532 uint16_t opcode;
1533 struct glx_context *gc = __glXGetCurrentContext();
1534 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1535 struct array_state_vector *arrays = state->array_state;
1536 struct array_state *a;
1537
1538
1539 if (stride < 0) {
1540 __glXSetError(gc, GL_INVALID_VALUE);
1541 return;
1542 }
1543
1544 switch (type) {
1545 case GL_FLOAT:
1546 opcode = 4124;
1547 break;
1548 case GL_DOUBLE:
1549 opcode = 4125;
1550 break;
1551 default:
1552 __glXSetError(gc, GL_INVALID_ENUM);
1553 return;
1554 }
1555
1556 a = get_array_entry(arrays, GL_FOG_COORD_ARRAY, 0);
1557 if (a == NULL) {
1558 __glXSetError(gc, GL_INVALID_OPERATION);
1559 return;
1560 }
1561
1562 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1563
1564 if (a->enabled) {
1565 arrays->array_info_cache_valid = GL_FALSE;
1566 }
1567 }
1568
1569
1570 void
__indirect_glVertexAttribPointerARB(GLuint index,GLint size,GLenum type,GLboolean normalized,GLsizei stride,const GLvoid * pointer)1571 __indirect_glVertexAttribPointerARB(GLuint index, GLint size,
1572 GLenum type, GLboolean normalized,
1573 GLsizei stride, const GLvoid * pointer)
1574 {
1575 static const uint16_t short_ops[5] = { 0, 4189, 4190, 4191, 4192 };
1576 static const uint16_t float_ops[5] = { 0, 4193, 4194, 4195, 4196 };
1577 static const uint16_t double_ops[5] = { 0, 4197, 4198, 4199, 4200 };
1578
1579 uint16_t opcode;
1580 struct glx_context *gc = __glXGetCurrentContext();
1581 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1582 struct array_state_vector *arrays = state->array_state;
1583 struct array_state *a;
1584 unsigned true_immediate_count;
1585 unsigned true_immediate_size;
1586
1587
1588 if ((size < 1) || (size > 4) || (stride < 0)
1589 || (index > arrays->num_vertex_program_attribs)) {
1590 __glXSetError(gc, GL_INVALID_VALUE);
1591 return;
1592 }
1593
1594 if (normalized && (type != GL_FLOAT) && (type != GL_DOUBLE)) {
1595 switch (type) {
1596 case GL_BYTE:
1597 opcode = X_GLrop_VertexAttrib4NbvARB;
1598 break;
1599 case GL_UNSIGNED_BYTE:
1600 opcode = X_GLrop_VertexAttrib4NubvARB;
1601 break;
1602 case GL_SHORT:
1603 opcode = X_GLrop_VertexAttrib4NsvARB;
1604 break;
1605 case GL_UNSIGNED_SHORT:
1606 opcode = X_GLrop_VertexAttrib4NusvARB;
1607 break;
1608 case GL_INT:
1609 opcode = X_GLrop_VertexAttrib4NivARB;
1610 break;
1611 case GL_UNSIGNED_INT:
1612 opcode = X_GLrop_VertexAttrib4NuivARB;
1613 break;
1614 default:
1615 __glXSetError(gc, GL_INVALID_ENUM);
1616 return;
1617 }
1618
1619 true_immediate_count = 4;
1620 }
1621 else {
1622 true_immediate_count = size;
1623
1624 switch (type) {
1625 case GL_BYTE:
1626 opcode = X_GLrop_VertexAttrib4bvARB;
1627 true_immediate_count = 4;
1628 break;
1629 case GL_UNSIGNED_BYTE:
1630 opcode = X_GLrop_VertexAttrib4ubvARB;
1631 true_immediate_count = 4;
1632 break;
1633 case GL_SHORT:
1634 opcode = short_ops[size];
1635 break;
1636 case GL_UNSIGNED_SHORT:
1637 opcode = X_GLrop_VertexAttrib4usvARB;
1638 true_immediate_count = 4;
1639 break;
1640 case GL_INT:
1641 opcode = X_GLrop_VertexAttrib4ivARB;
1642 true_immediate_count = 4;
1643 break;
1644 case GL_UNSIGNED_INT:
1645 opcode = X_GLrop_VertexAttrib4uivARB;
1646 true_immediate_count = 4;
1647 break;
1648 case GL_FLOAT:
1649 opcode = float_ops[size];
1650 break;
1651 case GL_DOUBLE:
1652 opcode = double_ops[size];
1653 break;
1654 default:
1655 __glXSetError(gc, GL_INVALID_ENUM);
1656 return;
1657 }
1658 }
1659
1660 a = get_array_entry(arrays, GL_VERTEX_ATTRIB_ARRAY_POINTER, index);
1661 if (a == NULL) {
1662 __glXSetError(gc, GL_INVALID_OPERATION);
1663 return;
1664 }
1665
1666 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, normalized, 8,
1667 opcode);
1668
1669 true_immediate_size = __glXTypeSize(type) * true_immediate_count;
1670 ((uint16_t *) (a)->header)[0] = __GLX_PAD(a->header_size
1671 + true_immediate_size);
1672
1673 if (a->enabled) {
1674 arrays->array_info_cache_valid = GL_FALSE;
1675 }
1676 }
1677
1678
1679 /**
1680 * I don't have 100% confidence that this is correct. The different rules
1681 * about whether or not generic vertex attributes alias "classic" vertex
1682 * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
1683 * ARB_vertex_shader, and NV_vertex_program are a bit confusing. My
1684 * feeling is that the client-side doesn't have to worry about it. The
1685 * client just sends all the data to the server and lets the server deal
1686 * with it.
1687 */
1688 void
__indirect_glVertexAttribPointerNV(GLuint index,GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1689 __indirect_glVertexAttribPointerNV(GLuint index, GLint size,
1690 GLenum type, GLsizei stride,
1691 const GLvoid * pointer)
1692 {
1693 struct glx_context *gc = __glXGetCurrentContext();
1694 GLboolean normalized = GL_FALSE;
1695
1696
1697 switch (type) {
1698 case GL_UNSIGNED_BYTE:
1699 if (size != 4) {
1700 __glXSetError(gc, GL_INVALID_VALUE);
1701 return;
1702 }
1703 normalized = GL_TRUE;
1704
1705 case GL_SHORT:
1706 case GL_FLOAT:
1707 case GL_DOUBLE:
1708 __indirect_glVertexAttribPointerARB(index, size, type,
1709 normalized, stride, pointer);
1710 return;
1711 default:
1712 __glXSetError(gc, GL_INVALID_ENUM);
1713 return;
1714 }
1715 }
1716
1717
1718 void
__indirect_glClientActiveTextureARB(GLenum texture)1719 __indirect_glClientActiveTextureARB(GLenum texture)
1720 {
1721 struct glx_context *const gc = __glXGetCurrentContext();
1722 __GLXattribute *const state =
1723 (__GLXattribute *) (gc->client_state_private);
1724 struct array_state_vector *const arrays = state->array_state;
1725 const GLint unit = (GLint) texture - GL_TEXTURE0;
1726
1727
1728 if ((unit < 0) || (unit >= arrays->num_texture_units)) {
1729 __glXSetError(gc, GL_INVALID_ENUM);
1730 return;
1731 }
1732
1733 arrays->active_texture_unit = unit;
1734 }
1735
1736
1737 /**
1738 * Modify the enable state for the selected array
1739 */
1740 GLboolean
__glXSetArrayEnable(__GLXattribute * state,GLenum key,unsigned index,GLboolean enable)1741 __glXSetArrayEnable(__GLXattribute * state, GLenum key, unsigned index,
1742 GLboolean enable)
1743 {
1744 struct array_state_vector *arrays = state->array_state;
1745 struct array_state *a;
1746
1747
1748 /* Texture coordinate arrays have an implict index set when the
1749 * application calls glClientActiveTexture.
1750 */
1751 if (key == GL_TEXTURE_COORD_ARRAY) {
1752 index = arrays->active_texture_unit;
1753 }
1754
1755 a = get_array_entry(arrays, key, index);
1756
1757 if ((a != NULL) && (a->enabled != enable)) {
1758 a->enabled = enable;
1759 arrays->array_info_cache_valid = GL_FALSE;
1760 }
1761
1762 return (a != NULL);
1763 }
1764
1765
1766 void
__glXArrayDisableAll(__GLXattribute * state)1767 __glXArrayDisableAll(__GLXattribute * state)
1768 {
1769 struct array_state_vector *arrays = state->array_state;
1770 unsigned i;
1771
1772
1773 for (i = 0; i < arrays->num_arrays; i++) {
1774 arrays->arrays[i].enabled = GL_FALSE;
1775 }
1776
1777 arrays->array_info_cache_valid = GL_FALSE;
1778 }
1779
1780
1781 /**
1782 */
1783 GLboolean
__glXGetArrayEnable(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1784 __glXGetArrayEnable(const __GLXattribute * const state,
1785 GLenum key, unsigned index, GLintptr * dest)
1786 {
1787 const struct array_state_vector *arrays = state->array_state;
1788 const struct array_state *a =
1789 get_array_entry((struct array_state_vector *) arrays,
1790 key, index);
1791
1792 if (a != NULL) {
1793 *dest = (GLintptr) a->enabled;
1794 }
1795
1796 return (a != NULL);
1797 }
1798
1799
1800 /**
1801 */
1802 GLboolean
__glXGetArrayType(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1803 __glXGetArrayType(const __GLXattribute * const state,
1804 GLenum key, unsigned index, GLintptr * dest)
1805 {
1806 const struct array_state_vector *arrays = state->array_state;
1807 const struct array_state *a =
1808 get_array_entry((struct array_state_vector *) arrays,
1809 key, index);
1810
1811 if (a != NULL) {
1812 *dest = (GLintptr) a->data_type;
1813 }
1814
1815 return (a != NULL);
1816 }
1817
1818
1819 /**
1820 */
1821 GLboolean
__glXGetArraySize(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1822 __glXGetArraySize(const __GLXattribute * const state,
1823 GLenum key, unsigned index, GLintptr * dest)
1824 {
1825 const struct array_state_vector *arrays = state->array_state;
1826 const struct array_state *a =
1827 get_array_entry((struct array_state_vector *) arrays,
1828 key, index);
1829
1830 if (a != NULL) {
1831 *dest = (GLintptr) a->count;
1832 }
1833
1834 return (a != NULL);
1835 }
1836
1837
1838 /**
1839 */
1840 GLboolean
__glXGetArrayStride(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1841 __glXGetArrayStride(const __GLXattribute * const state,
1842 GLenum key, unsigned index, GLintptr * dest)
1843 {
1844 const struct array_state_vector *arrays = state->array_state;
1845 const struct array_state *a =
1846 get_array_entry((struct array_state_vector *) arrays,
1847 key, index);
1848
1849 if (a != NULL) {
1850 *dest = (GLintptr) a->user_stride;
1851 }
1852
1853 return (a != NULL);
1854 }
1855
1856
1857 /**
1858 */
1859 GLboolean
__glXGetArrayPointer(const __GLXattribute * const state,GLenum key,unsigned index,void ** dest)1860 __glXGetArrayPointer(const __GLXattribute * const state,
1861 GLenum key, unsigned index, void **dest)
1862 {
1863 const struct array_state_vector *arrays = state->array_state;
1864 const struct array_state *a =
1865 get_array_entry((struct array_state_vector *) arrays,
1866 key, index);
1867
1868
1869 if (a != NULL) {
1870 *dest = (void *) (a->data);
1871 }
1872
1873 return (a != NULL);
1874 }
1875
1876
1877 /**
1878 */
1879 GLboolean
__glXGetArrayNormalized(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1880 __glXGetArrayNormalized(const __GLXattribute * const state,
1881 GLenum key, unsigned index, GLintptr * dest)
1882 {
1883 const struct array_state_vector *arrays = state->array_state;
1884 const struct array_state *a =
1885 get_array_entry((struct array_state_vector *) arrays,
1886 key, index);
1887
1888
1889 if (a != NULL) {
1890 *dest = (GLintptr) a->normalized;
1891 }
1892
1893 return (a != NULL);
1894 }
1895
1896
1897 /**
1898 */
1899 GLuint
__glXGetActiveTextureUnit(const __GLXattribute * const state)1900 __glXGetActiveTextureUnit(const __GLXattribute * const state)
1901 {
1902 return state->array_state->active_texture_unit;
1903 }
1904
1905
1906 void
__glXPushArrayState(__GLXattribute * state)1907 __glXPushArrayState(__GLXattribute * state)
1908 {
1909 struct array_state_vector *arrays = state->array_state;
1910 struct array_stack_state *stack =
1911 &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1912 unsigned i;
1913
1914 /* XXX are we pushing _all_ the necessary fields? */
1915 for (i = 0; i < arrays->num_arrays; i++) {
1916 stack[i].data = arrays->arrays[i].data;
1917 stack[i].data_type = arrays->arrays[i].data_type;
1918 stack[i].user_stride = arrays->arrays[i].user_stride;
1919 stack[i].count = arrays->arrays[i].count;
1920 stack[i].key = arrays->arrays[i].key;
1921 stack[i].index = arrays->arrays[i].index;
1922 stack[i].enabled = arrays->arrays[i].enabled;
1923 }
1924
1925 arrays->active_texture_unit_stack[arrays->stack_index] =
1926 arrays->active_texture_unit;
1927
1928 arrays->stack_index++;
1929 }
1930
1931
1932 void
__glXPopArrayState(__GLXattribute * state)1933 __glXPopArrayState(__GLXattribute * state)
1934 {
1935 struct array_state_vector *arrays = state->array_state;
1936 struct array_stack_state *stack;
1937 unsigned i;
1938
1939
1940 arrays->stack_index--;
1941 stack = &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1942
1943 for (i = 0; i < arrays->num_arrays; i++) {
1944 switch (stack[i].key) {
1945 case GL_NORMAL_ARRAY:
1946 __indirect_glNormalPointer(stack[i].data_type,
1947 stack[i].user_stride, stack[i].data);
1948 break;
1949 case GL_COLOR_ARRAY:
1950 __indirect_glColorPointer(stack[i].count,
1951 stack[i].data_type,
1952 stack[i].user_stride, stack[i].data);
1953 break;
1954 case GL_INDEX_ARRAY:
1955 __indirect_glIndexPointer(stack[i].data_type,
1956 stack[i].user_stride, stack[i].data);
1957 break;
1958 case GL_EDGE_FLAG_ARRAY:
1959 __indirect_glEdgeFlagPointer(stack[i].user_stride, stack[i].data);
1960 break;
1961 case GL_TEXTURE_COORD_ARRAY:
1962 arrays->active_texture_unit = stack[i].index;
1963 __indirect_glTexCoordPointer(stack[i].count,
1964 stack[i].data_type,
1965 stack[i].user_stride, stack[i].data);
1966 break;
1967 case GL_SECONDARY_COLOR_ARRAY:
1968 __indirect_glSecondaryColorPointerEXT(stack[i].count,
1969 stack[i].data_type,
1970 stack[i].user_stride,
1971 stack[i].data);
1972 break;
1973 case GL_FOG_COORDINATE_ARRAY:
1974 __indirect_glFogCoordPointerEXT(stack[i].data_type,
1975 stack[i].user_stride, stack[i].data);
1976 break;
1977
1978 }
1979
1980 __glXSetArrayEnable(state, stack[i].key, stack[i].index,
1981 stack[i].enabled);
1982 }
1983
1984 arrays->active_texture_unit =
1985 arrays->active_texture_unit_stack[arrays->stack_index];
1986 }
1987