1 /* 2 * (C) Copyright IBM Corporation 2004, 2005 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sub license, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * IBM, 20 * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF 22 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 */ 25 26 #include <inttypes.h> 27 #include <assert.h> 28 #include <string.h> 29 30 #include "glxclient.h" 31 #include "indirect.h" 32 #include <GL/glxproto.h> 33 #include "glxextensions.h" 34 #include "indirect_vertex_array.h" 35 #include "indirect_vertex_array_priv.h" 36 37 #define __GLX_PAD(n) (((n)+3) & ~3) 38 39 /** 40 * \file indirect_vertex_array.c 41 * Implement GLX protocol for vertex arrays and vertex buffer objects. 42 * 43 * The most important function in this fill is \c fill_array_info_cache. 44 * The \c array_state_vector contains a cache of the ARRAY_INFO data sent 45 * in the DrawArrays protocol. Certain operations, such as enabling or 46 * disabling an array, can invalidate this cache. \c fill_array_info_cache 47 * fills-in this data. Additionally, it examines the enabled state and 48 * other factors to determine what "version" of DrawArrays protocoal can be 49 * used. 50 * 51 * Current, only two versions of DrawArrays protocol are implemented. The 52 * first version is the "none" protocol. This is the fallback when the 53 * server does not support GL 1.1 / EXT_vertex_arrays. It is implemented 54 * by sending batches of immediate mode commands that are equivalent to the 55 * DrawArrays protocol. 56 * 57 * The other protocol that is currently implemented is the "old" protocol. 58 * This is the GL 1.1 DrawArrays protocol. The only difference between GL 59 * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command. 60 * This protocol is called "old" because the ARB is in the process of 61 * defining a new protocol, which will probably be called wither "new" or 62 * "vbo", to support multiple texture coordinate arrays, generic attributes, 63 * and vertex buffer objects. 64 * 65 * \author Ian Romanick <ian.d.romanick (at) intel.com> 66 */ 67 68 static void emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count); 69 static void emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count); 70 71 static void emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type, 72 const GLvoid * indices); 73 static void emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type, 74 const GLvoid * indices); 75 76 77 static GLubyte *emit_element_none(GLubyte * dst, 78 const struct array_state_vector *arrays, 79 unsigned index); 80 static GLubyte *emit_element_old(GLubyte * dst, 81 const struct array_state_vector *arrays, 82 unsigned index); 83 static struct array_state *get_array_entry(const struct array_state_vector 84 *arrays, GLenum key, 85 unsigned index); 86 static void fill_array_info_cache(struct array_state_vector *arrays); 87 static GLboolean validate_mode(struct glx_context * gc, GLenum mode); 88 static GLboolean validate_count(struct glx_context * gc, GLsizei count); 89 static GLboolean validate_type(struct glx_context * gc, GLenum type); 90 91 92 /** 93 * Table of sizes, in bytes, of a GL types. All of the type enums are be in 94 * the range 0x1400 - 0x140F. That includes types added by extensions (i.e., 95 * \c GL_HALF_FLOAT_NV). This elements of this table correspond to the 96 * type enums masked with 0x0f. 97 * 98 * \notes 99 * \c GL_HALF_FLOAT_NV is not included. Neither are \c GL_2_BYTES, 100 * \c GL_3_BYTES, or \c GL_4_BYTES. 101 */ 102 const GLuint __glXTypeSize_table[16] = { 103 1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0 104 }; 105 106 107 /** 108 * Free the per-context array state that was allocated with 109 * __glXInitVertexArrayState(). 110 */ 111 void 112 __glXFreeVertexArrayState(struct glx_context * gc) 113 { 114 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 115 struct array_state_vector *arrays = state->array_state; 116 117 if (arrays) { 118 free(arrays->stack); 119 arrays->stack = NULL; 120 free(arrays->arrays); 121 arrays->arrays = NULL; 122 free(arrays); 123 state->array_state = NULL; 124 } 125 } 126 127 128 /** 129 * Initialize vertex array state of a GLX context. 130 * 131 * \param gc GLX context whose vertex array state is to be initialized. 132 * 133 * \warning 134 * This function may only be called after struct glx_context::gl_extension_bits, 135 * struct glx_context::server_minor, and __GLXcontext::server_major have been 136 * initialized. These values are used to determine what vertex arrays are 137 * supported. 138 */ 139 void 140 __glXInitVertexArrayState(struct glx_context * gc) 141 { 142 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 143 struct array_state_vector *arrays; 144 145 unsigned array_count; 146 int texture_units = 1, vertex_program_attribs = 0; 147 unsigned i, j; 148 149 GLboolean got_fog = GL_FALSE; 150 GLboolean got_secondary_color = GL_FALSE; 151 152 153 arrays = calloc(1, sizeof(struct array_state_vector)); 154 state->array_state = arrays; 155 156 if (arrays == NULL) { 157 __glXSetError(gc, GL_OUT_OF_MEMORY); 158 return; 159 } 160 161 arrays->old_DrawArrays_possible = !state->NoDrawArraysProtocol; 162 arrays->new_DrawArrays_possible = GL_FALSE; 163 arrays->DrawArrays = NULL; 164 165 arrays->active_texture_unit = 0; 166 167 168 /* Determine how many arrays are actually needed. Only arrays that 169 * are supported by the server are create. For example, if the server 170 * supports only 2 texture units, then only 2 texture coordinate arrays 171 * are created. 172 * 173 * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY, 174 * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and 175 * GL_EDGE_FLAG_ARRAY are supported. 176 */ 177 178 array_count = 5; 179 180 if (__glExtensionBitIsEnabled(gc, GL_EXT_fog_coord_bit) 181 || (gc->server_major > 1) || (gc->server_minor >= 4)) { 182 got_fog = GL_TRUE; 183 array_count++; 184 } 185 186 if (__glExtensionBitIsEnabled(gc, GL_EXT_secondary_color_bit) 187 || (gc->server_major > 1) || (gc->server_minor >= 4)) { 188 got_secondary_color = GL_TRUE; 189 array_count++; 190 } 191 192 if (__glExtensionBitIsEnabled(gc, GL_ARB_multitexture_bit) 193 || (gc->server_major > 1) || (gc->server_minor >= 3)) { 194 __indirect_glGetIntegerv(GL_MAX_TEXTURE_UNITS, &texture_units); 195 } 196 197 if (__glExtensionBitIsEnabled(gc, GL_ARB_vertex_program_bit)) { 198 __indirect_glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, 199 GL_MAX_PROGRAM_ATTRIBS_ARB, 200 &vertex_program_attribs); 201 } 202 203 arrays->num_texture_units = texture_units; 204 arrays->num_vertex_program_attribs = vertex_program_attribs; 205 array_count += texture_units + vertex_program_attribs; 206 arrays->num_arrays = array_count; 207 arrays->arrays = calloc(array_count, sizeof(struct array_state)); 208 209 if (arrays->arrays == NULL) { 210 state->array_state = NULL; 211 free(arrays); 212 __glXSetError(gc, GL_OUT_OF_MEMORY); 213 return; 214 } 215 216 arrays->arrays[0].data_type = GL_FLOAT; 217 arrays->arrays[0].count = 3; 218 arrays->arrays[0].key = GL_NORMAL_ARRAY; 219 arrays->arrays[0].normalized = GL_TRUE; 220 arrays->arrays[0].old_DrawArrays_possible = GL_TRUE; 221 222 arrays->arrays[1].data_type = GL_FLOAT; 223 arrays->arrays[1].count = 4; 224 arrays->arrays[1].key = GL_COLOR_ARRAY; 225 arrays->arrays[1].normalized = GL_TRUE; 226 arrays->arrays[1].old_DrawArrays_possible = GL_TRUE; 227 228 arrays->arrays[2].data_type = GL_FLOAT; 229 arrays->arrays[2].count = 1; 230 arrays->arrays[2].key = GL_INDEX_ARRAY; 231 arrays->arrays[2].old_DrawArrays_possible = GL_TRUE; 232 233 arrays->arrays[3].data_type = GL_UNSIGNED_BYTE; 234 arrays->arrays[3].count = 1; 235 arrays->arrays[3].key = GL_EDGE_FLAG_ARRAY; 236 arrays->arrays[3].old_DrawArrays_possible = GL_TRUE; 237 238 for (i = 0; i < texture_units; i++) { 239 arrays->arrays[4 + i].data_type = GL_FLOAT; 240 arrays->arrays[4 + i].count = 4; 241 arrays->arrays[4 + i].key = GL_TEXTURE_COORD_ARRAY; 242 243 arrays->arrays[4 + i].old_DrawArrays_possible = (i == 0); 244 arrays->arrays[4 + i].index = i; 245 } 246 247 i = 4 + texture_units; 248 249 if (got_fog) { 250 arrays->arrays[i].data_type = GL_FLOAT; 251 arrays->arrays[i].count = 1; 252 arrays->arrays[i].key = GL_FOG_COORDINATE_ARRAY; 253 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE; 254 i++; 255 } 256 257 if (got_secondary_color) { 258 arrays->arrays[i].data_type = GL_FLOAT; 259 arrays->arrays[i].count = 3; 260 arrays->arrays[i].key = GL_SECONDARY_COLOR_ARRAY; 261 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE; 262 arrays->arrays[i].normalized = GL_TRUE; 263 i++; 264 } 265 266 267 for (j = 0; j < vertex_program_attribs; j++) { 268 const unsigned idx = (vertex_program_attribs - (j + 1)); 269 270 271 arrays->arrays[idx + i].data_type = GL_FLOAT; 272 arrays->arrays[idx + i].count = 4; 273 arrays->arrays[idx + i].key = GL_VERTEX_ATTRIB_ARRAY_POINTER; 274 275 arrays->arrays[idx + i].old_DrawArrays_possible = 0; 276 arrays->arrays[idx + i].index = idx; 277 } 278 279 i += vertex_program_attribs; 280 281 282 /* Vertex array *must* be last because of the way that 283 * emit_DrawArrays_none works. 284 */ 285 286 arrays->arrays[i].data_type = GL_FLOAT; 287 arrays->arrays[i].count = 4; 288 arrays->arrays[i].key = GL_VERTEX_ARRAY; 289 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE; 290 291 assert((i + 1) == arrays->num_arrays); 292 293 arrays->stack_index = 0; 294 arrays->stack = malloc(sizeof(struct array_stack_state) 295 * arrays->num_arrays 296 * __GL_CLIENT_ATTRIB_STACK_DEPTH); 297 298 if (arrays->stack == NULL) { 299 state->array_state = NULL; 300 free(arrays->arrays); 301 free(arrays); 302 __glXSetError(gc, GL_OUT_OF_MEMORY); 303 return; 304 } 305 } 306 307 308 /** 309 * Calculate the size of a single vertex for the "none" protocol. This is 310 * essentially the size of all the immediate-mode commands required to 311 * implement the enabled vertex arrays. 312 */ 313 static size_t 314 calculate_single_vertex_size_none(const struct array_state_vector *arrays) 315 { 316 size_t single_vertex_size = 0; 317 unsigned i; 318 319 320 for (i = 0; i < arrays->num_arrays; i++) { 321 if (arrays->arrays[i].enabled) { 322 single_vertex_size += arrays->arrays[i].header[0]; 323 } 324 } 325 326 return single_vertex_size; 327 } 328 329 330 /** 331 * Emit a single element using non-DrawArrays protocol. 332 */ 333 GLubyte * 334 emit_element_none(GLubyte * dst, 335 const struct array_state_vector * arrays, unsigned index) 336 { 337 unsigned i; 338 339 340 for (i = 0; i < arrays->num_arrays; i++) { 341 if (arrays->arrays[i].enabled) { 342 const size_t offset = index * arrays->arrays[i].true_stride; 343 344 /* The generic attributes can have more data than is in the 345 * elements. This is because a vertex array can be a 2 element, 346 * normalized, unsigned short, but the "closest" immediate mode 347 * protocol is for a 4Nus. Since the sizes are small, the 348 * performance impact on modern processors should be negligible. 349 */ 350 (void) memset(dst, 0, arrays->arrays[i].header[0]); 351 352 (void) memcpy(dst, arrays->arrays[i].header, 4); 353 354 dst += 4; 355 356 if (arrays->arrays[i].key == GL_TEXTURE_COORD_ARRAY && 357 arrays->arrays[i].index > 0) { 358 /* Multi-texture coordinate arrays require the texture target 359 * to be sent. For doubles it is after the data, for everything 360 * else it is before. 361 */ 362 GLenum texture = arrays->arrays[i].index + GL_TEXTURE0; 363 if (arrays->arrays[i].data_type == GL_DOUBLE) { 364 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset, 365 arrays->arrays[i].element_size); 366 dst += arrays->arrays[i].element_size; 367 (void) memcpy(dst, &texture, 4); 368 dst += 4; 369 } else { 370 (void) memcpy(dst, &texture, 4); 371 dst += 4; 372 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset, 373 arrays->arrays[i].element_size); 374 dst += __GLX_PAD(arrays->arrays[i].element_size); 375 } 376 } else if (arrays->arrays[i].key == GL_VERTEX_ATTRIB_ARRAY_POINTER) { 377 /* Vertex attribute data requires the index sent first. 378 */ 379 (void) memcpy(dst, &arrays->arrays[i].index, 4); 380 dst += 4; 381 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset, 382 arrays->arrays[i].element_size); 383 dst += __GLX_PAD(arrays->arrays[i].element_size); 384 } else { 385 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset, 386 arrays->arrays[i].element_size); 387 dst += __GLX_PAD(arrays->arrays[i].element_size); 388 } 389 } 390 } 391 392 return dst; 393 } 394 395 396 /** 397 * Emit a single element using "old" DrawArrays protocol from 398 * EXT_vertex_arrays / OpenGL 1.1. 399 */ 400 GLubyte * 401 emit_element_old(GLubyte * dst, 402 const struct array_state_vector * arrays, unsigned index) 403 { 404 unsigned i; 405 406 407 for (i = 0; i < arrays->num_arrays; i++) { 408 if (arrays->arrays[i].enabled) { 409 const size_t offset = index * arrays->arrays[i].true_stride; 410 411 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset, 412 arrays->arrays[i].element_size); 413 414 dst += __GLX_PAD(arrays->arrays[i].element_size); 415 } 416 } 417 418 return dst; 419 } 420 421 422 struct array_state * 423 get_array_entry(const struct array_state_vector *arrays, 424 GLenum key, unsigned index) 425 { 426 unsigned i; 427 428 for (i = 0; i < arrays->num_arrays; i++) { 429 if ((arrays->arrays[i].key == key) 430 && (arrays->arrays[i].index == index)) { 431 return &arrays->arrays[i]; 432 } 433 } 434 435 return NULL; 436 } 437 438 439 static GLboolean 440 allocate_array_info_cache(struct array_state_vector *arrays, 441 size_t required_size) 442 { 443 #define MAX_HEADER_SIZE 20 444 if (arrays->array_info_cache_buffer_size < required_size) { 445 GLubyte *temp = realloc(arrays->array_info_cache_base, 446 required_size + MAX_HEADER_SIZE); 447 448 if (temp == NULL) { 449 return GL_FALSE; 450 } 451 452 arrays->array_info_cache_base = temp; 453 arrays->array_info_cache = temp + MAX_HEADER_SIZE; 454 arrays->array_info_cache_buffer_size = required_size; 455 } 456 457 arrays->array_info_cache_size = required_size; 458 return GL_TRUE; 459 } 460 461 462 /** 463 */ 464 void 465 fill_array_info_cache(struct array_state_vector *arrays) 466 { 467 GLboolean old_DrawArrays_possible; 468 unsigned i; 469 470 471 /* Determine how many arrays are enabled. 472 */ 473 474 arrays->enabled_client_array_count = 0; 475 old_DrawArrays_possible = arrays->old_DrawArrays_possible; 476 for (i = 0; i < arrays->num_arrays; i++) { 477 if (arrays->arrays[i].enabled) { 478 arrays->enabled_client_array_count++; 479 old_DrawArrays_possible &= arrays->arrays[i].old_DrawArrays_possible; 480 } 481 } 482 483 if (arrays->new_DrawArrays_possible) { 484 assert(!arrays->new_DrawArrays_possible); 485 } 486 else if (old_DrawArrays_possible) { 487 const size_t required_size = arrays->enabled_client_array_count * 12; 488 uint32_t *info; 489 490 491 if (!allocate_array_info_cache(arrays, required_size)) { 492 return; 493 } 494 495 496 info = (uint32_t *) arrays->array_info_cache; 497 for (i = 0; i < arrays->num_arrays; i++) { 498 if (arrays->arrays[i].enabled) { 499 *(info++) = arrays->arrays[i].data_type; 500 *(info++) = arrays->arrays[i].count; 501 *(info++) = arrays->arrays[i].key; 502 } 503 } 504 505 arrays->DrawArrays = emit_DrawArrays_old; 506 arrays->DrawElements = emit_DrawElements_old; 507 } 508 else { 509 arrays->DrawArrays = emit_DrawArrays_none; 510 arrays->DrawElements = emit_DrawElements_none; 511 } 512 513 arrays->array_info_cache_valid = GL_TRUE; 514 } 515 516 517 /** 518 * Emit a \c glDrawArrays command using the "none" protocol. That is, 519 * emit immediate-mode commands that are equivalent to the requiested 520 * \c glDrawArrays command. This is used with servers that don't support 521 * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where 522 * vertex state is enabled that is not compatible with that protocol. 523 */ 524 void 525 emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count) 526 { 527 struct glx_context *gc = __glXGetCurrentContext(); 528 const __GLXattribute *state = 529 (const __GLXattribute *) (gc->client_state_private); 530 struct array_state_vector *arrays = state->array_state; 531 532 size_t single_vertex_size; 533 GLubyte *pc; 534 unsigned i; 535 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin }; 536 static const uint16_t end_cmd[2] = { 4, X_GLrop_End }; 537 538 539 single_vertex_size = calculate_single_vertex_size_none(arrays); 540 541 pc = gc->pc; 542 543 (void) memcpy(pc, begin_cmd, 4); 544 *(int *) (pc + 4) = mode; 545 546 pc += 8; 547 548 for (i = 0; i < count; i++) { 549 if ((pc + single_vertex_size) >= gc->bufEnd) { 550 pc = __glXFlushRenderBuffer(gc, pc); 551 } 552 553 pc = emit_element_none(pc, arrays, first + i); 554 } 555 556 if ((pc + 4) >= gc->bufEnd) { 557 pc = __glXFlushRenderBuffer(gc, pc); 558 } 559 560 (void) memcpy(pc, end_cmd, 4); 561 pc += 4; 562 563 gc->pc = pc; 564 if (gc->pc > gc->limit) { 565 (void) __glXFlushRenderBuffer(gc, gc->pc); 566 } 567 } 568 569 570 /** 571 * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays 572 * protocol. 573 * 574 * \param gc GLX context. 575 * \param arrays Array state. 576 * \param elements_per_request Location to store the number of elements that 577 * can fit in a single Render / RenderLarge 578 * command. 579 * \param total_request Total number of requests for a RenderLarge 580 * command. If a Render command is used, this 581 * will be zero. 582 * \param mode Drawing mode. 583 * \param count Number of vertices. 584 * 585 * \returns 586 * A pointer to the buffer for array data. 587 */ 588 static GLubyte * 589 emit_DrawArrays_header_old(struct glx_context * gc, 590 struct array_state_vector *arrays, 591 size_t * elements_per_request, 592 unsigned int *total_requests, 593 GLenum mode, GLsizei count) 594 { 595 size_t command_size; 596 size_t single_vertex_size; 597 const unsigned header_size = 16; 598 unsigned i; 599 GLubyte *pc; 600 601 602 /* Determine the size of the whole command. This includes the header, 603 * the ARRAY_INFO data and the array data. Once this size is calculated, 604 * it will be known whether a Render or RenderLarge command is needed. 605 */ 606 607 single_vertex_size = 0; 608 for (i = 0; i < arrays->num_arrays; i++) { 609 if (arrays->arrays[i].enabled) { 610 single_vertex_size += __GLX_PAD(arrays->arrays[i].element_size); 611 } 612 } 613 614 command_size = arrays->array_info_cache_size + header_size 615 + (single_vertex_size * count); 616 617 618 /* Write the header for either a Render command or a RenderLarge 619 * command. After the header is written, write the ARRAY_INFO data. 620 */ 621 622 if (command_size > gc->maxSmallRenderCommandSize) { 623 /* maxSize is the maximum amount of data can be stuffed into a single 624 * packet. sz_xGLXRenderReq is added because bufSize is the maximum 625 * packet size minus sz_xGLXRenderReq. 626 */ 627 const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq) 628 - sz_xGLXRenderLargeReq; 629 unsigned vertex_requests; 630 631 632 /* Calculate the number of data packets that will be required to send 633 * the whole command. To do this, the number of verticies that 634 * will fit in a single buffer must be calculated. 635 * 636 * The important value here is elements_per_request. This is the 637 * number of complete array elements that will fit in a single 638 * buffer. There may be some wasted space at the end of the buffer, 639 * but splitting elements across buffer boundries would be painful. 640 */ 641 642 elements_per_request[0] = maxSize / single_vertex_size; 643 644 vertex_requests = (count + elements_per_request[0] - 1) 645 / elements_per_request[0]; 646 647 *total_requests = vertex_requests + 1; 648 649 650 __glXFlushRenderBuffer(gc, gc->pc); 651 652 command_size += 4; 653 654 pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4); 655 *(uint32_t *) (pc + 0) = command_size; 656 *(uint32_t *) (pc + 4) = X_GLrop_DrawArrays; 657 *(uint32_t *) (pc + 8) = count; 658 *(uint32_t *) (pc + 12) = arrays->enabled_client_array_count; 659 *(uint32_t *) (pc + 16) = mode; 660 661 __glXSendLargeChunk(gc, 1, *total_requests, pc, 662 header_size + 4 + arrays->array_info_cache_size); 663 664 pc = gc->pc; 665 } 666 else { 667 if ((gc->pc + command_size) >= gc->bufEnd) { 668 (void) __glXFlushRenderBuffer(gc, gc->pc); 669 } 670 671 pc = gc->pc; 672 *(uint16_t *) (pc + 0) = command_size; 673 *(uint16_t *) (pc + 2) = X_GLrop_DrawArrays; 674 *(uint32_t *) (pc + 4) = count; 675 *(uint32_t *) (pc + 8) = arrays->enabled_client_array_count; 676 *(uint32_t *) (pc + 12) = mode; 677 678 pc += header_size; 679 680 (void) memcpy(pc, arrays->array_info_cache, 681 arrays->array_info_cache_size); 682 pc += arrays->array_info_cache_size; 683 684 *elements_per_request = count; 685 *total_requests = 0; 686 } 687 688 689 return pc; 690 } 691 692 693 /** 694 */ 695 void 696 emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count) 697 { 698 struct glx_context *gc = __glXGetCurrentContext(); 699 const __GLXattribute *state = 700 (const __GLXattribute *) (gc->client_state_private); 701 struct array_state_vector *arrays = state->array_state; 702 703 GLubyte *pc; 704 size_t elements_per_request; 705 unsigned total_requests = 0; 706 unsigned i; 707 size_t total_sent = 0; 708 709 710 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request, 711 &total_requests, mode, count); 712 713 714 /* Write the arrays. 715 */ 716 717 if (total_requests == 0) { 718 assert(elements_per_request >= count); 719 720 for (i = 0; i < count; i++) { 721 pc = emit_element_old(pc, arrays, i + first); 722 } 723 724 assert(pc <= gc->bufEnd); 725 726 gc->pc = pc; 727 if (gc->pc > gc->limit) { 728 (void) __glXFlushRenderBuffer(gc, gc->pc); 729 } 730 } 731 else { 732 unsigned req; 733 734 735 for (req = 2; req <= total_requests; req++) { 736 if (count < elements_per_request) { 737 elements_per_request = count; 738 } 739 740 pc = gc->pc; 741 for (i = 0; i < elements_per_request; i++) { 742 pc = emit_element_old(pc, arrays, i + first); 743 } 744 745 first += elements_per_request; 746 747 total_sent += (size_t) (pc - gc->pc); 748 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc); 749 750 count -= elements_per_request; 751 } 752 } 753 } 754 755 756 void 757 emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type, 758 const GLvoid * indices) 759 { 760 struct glx_context *gc = __glXGetCurrentContext(); 761 const __GLXattribute *state = 762 (const __GLXattribute *) (gc->client_state_private); 763 struct array_state_vector *arrays = state->array_state; 764 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin }; 765 static const uint16_t end_cmd[2] = { 4, X_GLrop_End }; 766 767 GLubyte *pc; 768 size_t single_vertex_size; 769 unsigned i; 770 771 772 single_vertex_size = calculate_single_vertex_size_none(arrays); 773 774 775 if ((gc->pc + single_vertex_size) >= gc->bufEnd) { 776 gc->pc = __glXFlushRenderBuffer(gc, gc->pc); 777 } 778 779 pc = gc->pc; 780 781 (void) memcpy(pc, begin_cmd, 4); 782 *(int *) (pc + 4) = mode; 783 784 pc += 8; 785 786 for (i = 0; i < count; i++) { 787 unsigned index = 0; 788 789 if ((pc + single_vertex_size) >= gc->bufEnd) { 790 pc = __glXFlushRenderBuffer(gc, pc); 791 } 792 793 switch (type) { 794 case GL_UNSIGNED_INT: 795 index = (unsigned) (((GLuint *) indices)[i]); 796 break; 797 case GL_UNSIGNED_SHORT: 798 index = (unsigned) (((GLushort *) indices)[i]); 799 break; 800 case GL_UNSIGNED_BYTE: 801 index = (unsigned) (((GLubyte *) indices)[i]); 802 break; 803 } 804 pc = emit_element_none(pc, arrays, index); 805 } 806 807 if ((pc + 4) >= gc->bufEnd) { 808 pc = __glXFlushRenderBuffer(gc, pc); 809 } 810 811 (void) memcpy(pc, end_cmd, 4); 812 pc += 4; 813 814 gc->pc = pc; 815 if (gc->pc > gc->limit) { 816 (void) __glXFlushRenderBuffer(gc, gc->pc); 817 } 818 } 819 820 821 /** 822 */ 823 void 824 emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type, 825 const GLvoid * indices) 826 { 827 struct glx_context *gc = __glXGetCurrentContext(); 828 const __GLXattribute *state = 829 (const __GLXattribute *) (gc->client_state_private); 830 struct array_state_vector *arrays = state->array_state; 831 832 GLubyte *pc; 833 size_t elements_per_request; 834 unsigned total_requests = 0; 835 unsigned i; 836 unsigned req; 837 unsigned req_element = 0; 838 839 840 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request, 841 &total_requests, mode, count); 842 843 844 /* Write the arrays. 845 */ 846 847 req = 2; 848 while (count > 0) { 849 if (count < elements_per_request) { 850 elements_per_request = count; 851 } 852 853 switch (type) { 854 case GL_UNSIGNED_INT:{ 855 const GLuint *ui_ptr = (const GLuint *) indices + req_element; 856 857 for (i = 0; i < elements_per_request; i++) { 858 const GLint index = (GLint) * (ui_ptr++); 859 pc = emit_element_old(pc, arrays, index); 860 } 861 break; 862 } 863 case GL_UNSIGNED_SHORT:{ 864 const GLushort *us_ptr = (const GLushort *) indices + req_element; 865 866 for (i = 0; i < elements_per_request; i++) { 867 const GLint index = (GLint) * (us_ptr++); 868 pc = emit_element_old(pc, arrays, index); 869 } 870 break; 871 } 872 case GL_UNSIGNED_BYTE:{ 873 const GLubyte *ub_ptr = (const GLubyte *) indices + req_element; 874 875 for (i = 0; i < elements_per_request; i++) { 876 const GLint index = (GLint) * (ub_ptr++); 877 pc = emit_element_old(pc, arrays, index); 878 } 879 break; 880 } 881 } 882 883 if (total_requests != 0) { 884 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc); 885 pc = gc->pc; 886 req++; 887 } 888 889 count -= elements_per_request; 890 req_element += elements_per_request; 891 } 892 893 894 assert((total_requests == 0) || ((req - 1) == total_requests)); 895 896 if (total_requests == 0) { 897 assert(pc <= gc->bufEnd); 898 899 gc->pc = pc; 900 if (gc->pc > gc->limit) { 901 (void) __glXFlushRenderBuffer(gc, gc->pc); 902 } 903 } 904 } 905 906 907 /** 908 * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid. 909 * If it is not valid, then an error code is set in the GLX context. 910 * 911 * \returns 912 * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not. 913 */ 914 static GLboolean 915 validate_mode(struct glx_context * gc, GLenum mode) 916 { 917 switch (mode) { 918 case GL_POINTS: 919 case GL_LINE_STRIP: 920 case GL_LINE_LOOP: 921 case GL_LINES: 922 case GL_TRIANGLE_STRIP: 923 case GL_TRIANGLE_FAN: 924 case GL_TRIANGLES: 925 case GL_QUAD_STRIP: 926 case GL_QUADS: 927 case GL_POLYGON: 928 break; 929 default: 930 __glXSetError(gc, GL_INVALID_ENUM); 931 return GL_FALSE; 932 } 933 934 return GL_TRUE; 935 } 936 937 938 /** 939 * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid. 940 * A value less than zero is invalid and will result in \c GL_INVALID_VALUE 941 * being set. A value of zero will not result in an error being set, but 942 * will result in \c GL_FALSE being returned. 943 * 944 * \returns 945 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not. 946 */ 947 static GLboolean 948 validate_count(struct glx_context * gc, GLsizei count) 949 { 950 if (count < 0) { 951 __glXSetError(gc, GL_INVALID_VALUE); 952 } 953 954 return (count > 0); 955 } 956 957 958 /** 959 * Validate that the \c type parameter to \c glDrawElements, et. al. is 960 * valid. Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and 961 * \c GL_UNSIGNED_INT are valid. 962 * 963 * \returns 964 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not. 965 */ 966 static GLboolean 967 validate_type(struct glx_context * gc, GLenum type) 968 { 969 switch (type) { 970 case GL_UNSIGNED_INT: 971 case GL_UNSIGNED_SHORT: 972 case GL_UNSIGNED_BYTE: 973 return GL_TRUE; 974 default: 975 __glXSetError(gc, GL_INVALID_ENUM); 976 return GL_FALSE; 977 } 978 } 979 980 981 void 982 __indirect_glDrawArrays(GLenum mode, GLint first, GLsizei count) 983 { 984 struct glx_context *gc = __glXGetCurrentContext(); 985 const __GLXattribute *state = 986 (const __GLXattribute *) (gc->client_state_private); 987 struct array_state_vector *arrays = state->array_state; 988 989 990 if (validate_mode(gc, mode) && validate_count(gc, count)) { 991 if (!arrays->array_info_cache_valid) { 992 fill_array_info_cache(arrays); 993 } 994 995 arrays->DrawArrays(mode, first, count); 996 } 997 } 998 999 1000 void 1001 __indirect_glArrayElement(GLint index) 1002 { 1003 struct glx_context *gc = __glXGetCurrentContext(); 1004 const __GLXattribute *state = 1005 (const __GLXattribute *) (gc->client_state_private); 1006 struct array_state_vector *arrays = state->array_state; 1007 1008 size_t single_vertex_size; 1009 1010 1011 single_vertex_size = calculate_single_vertex_size_none(arrays); 1012 1013 if ((gc->pc + single_vertex_size) >= gc->bufEnd) { 1014 gc->pc = __glXFlushRenderBuffer(gc, gc->pc); 1015 } 1016 1017 gc->pc = emit_element_none(gc->pc, arrays, index); 1018 1019 if (gc->pc > gc->limit) { 1020 (void) __glXFlushRenderBuffer(gc, gc->pc); 1021 } 1022 } 1023 1024 1025 void 1026 __indirect_glDrawElements(GLenum mode, GLsizei count, GLenum type, 1027 const GLvoid * indices) 1028 { 1029 struct glx_context *gc = __glXGetCurrentContext(); 1030 const __GLXattribute *state = 1031 (const __GLXattribute *) (gc->client_state_private); 1032 struct array_state_vector *arrays = state->array_state; 1033 1034 1035 if (validate_mode(gc, mode) && validate_count(gc, count) 1036 && validate_type(gc, type)) { 1037 if (!arrays->array_info_cache_valid) { 1038 fill_array_info_cache(arrays); 1039 } 1040 1041 arrays->DrawElements(mode, count, type, indices); 1042 } 1043 } 1044 1045 1046 void 1047 __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end, 1048 GLsizei count, GLenum type, 1049 const GLvoid * indices) 1050 { 1051 struct glx_context *gc = __glXGetCurrentContext(); 1052 const __GLXattribute *state = 1053 (const __GLXattribute *) (gc->client_state_private); 1054 struct array_state_vector *arrays = state->array_state; 1055 1056 1057 if (validate_mode(gc, mode) && validate_count(gc, count) 1058 && validate_type(gc, type)) { 1059 if (end < start) { 1060 __glXSetError(gc, GL_INVALID_VALUE); 1061 return; 1062 } 1063 1064 if (!arrays->array_info_cache_valid) { 1065 fill_array_info_cache(arrays); 1066 } 1067 1068 arrays->DrawElements(mode, count, type, indices); 1069 } 1070 } 1071 1072 1073 void 1074 __indirect_glMultiDrawArrays(GLenum mode, const GLint *first, 1075 const GLsizei *count, GLsizei primcount) 1076 { 1077 struct glx_context *gc = __glXGetCurrentContext(); 1078 const __GLXattribute *state = 1079 (const __GLXattribute *) (gc->client_state_private); 1080 struct array_state_vector *arrays = state->array_state; 1081 GLsizei i; 1082 1083 1084 if (validate_mode(gc, mode)) { 1085 if (!arrays->array_info_cache_valid) { 1086 fill_array_info_cache(arrays); 1087 } 1088 1089 for (i = 0; i < primcount; i++) { 1090 if (validate_count(gc, count[i])) { 1091 arrays->DrawArrays(mode, first[i], count[i]); 1092 } 1093 } 1094 } 1095 } 1096 1097 1098 void 1099 __indirect_glMultiDrawElementsEXT(GLenum mode, const GLsizei * count, 1100 GLenum type, const GLvoid * const * indices, 1101 GLsizei primcount) 1102 { 1103 struct glx_context *gc = __glXGetCurrentContext(); 1104 const __GLXattribute *state = 1105 (const __GLXattribute *) (gc->client_state_private); 1106 struct array_state_vector *arrays = state->array_state; 1107 GLsizei i; 1108 1109 1110 if (validate_mode(gc, mode) && validate_type(gc, type)) { 1111 if (!arrays->array_info_cache_valid) { 1112 fill_array_info_cache(arrays); 1113 } 1114 1115 for (i = 0; i < primcount; i++) { 1116 if (validate_count(gc, count[i])) { 1117 arrays->DrawElements(mode, count[i], type, indices[i]); 1118 } 1119 } 1120 } 1121 } 1122 1123 1124 /* The HDR_SIZE macro argument is the command header size (4 bytes) 1125 * plus any additional index word e.g. for texture units or vertex 1126 * attributes. 1127 */ 1128 #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \ 1129 do { \ 1130 (a)->data = PTR; \ 1131 (a)->data_type = TYPE; \ 1132 (a)->user_stride = STRIDE; \ 1133 (a)->count = COUNT; \ 1134 (a)->normalized = NORMALIZED; \ 1135 \ 1136 (a)->element_size = __glXTypeSize( TYPE ) * COUNT; \ 1137 (a)->true_stride = (STRIDE == 0) \ 1138 ? (a)->element_size : STRIDE; \ 1139 \ 1140 (a)->header[0] = __GLX_PAD(HDR_SIZE + (a)->element_size); \ 1141 (a)->header[1] = OPCODE; \ 1142 } while(0) 1143 1144 1145 void 1146 __indirect_glVertexPointer(GLint size, GLenum type, GLsizei stride, 1147 const GLvoid * pointer) 1148 { 1149 static const uint16_t short_ops[5] = { 1150 0, 0, X_GLrop_Vertex2sv, X_GLrop_Vertex3sv, X_GLrop_Vertex4sv 1151 }; 1152 static const uint16_t int_ops[5] = { 1153 0, 0, X_GLrop_Vertex2iv, X_GLrop_Vertex3iv, X_GLrop_Vertex4iv 1154 }; 1155 static const uint16_t float_ops[5] = { 1156 0, 0, X_GLrop_Vertex2fv, X_GLrop_Vertex3fv, X_GLrop_Vertex4fv 1157 }; 1158 static const uint16_t double_ops[5] = { 1159 0, 0, X_GLrop_Vertex2dv, X_GLrop_Vertex3dv, X_GLrop_Vertex4dv 1160 }; 1161 uint16_t opcode; 1162 struct glx_context *gc = __glXGetCurrentContext(); 1163 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 1164 struct array_state_vector *arrays = state->array_state; 1165 struct array_state *a; 1166 1167 1168 if (size < 2 || size > 4 || stride < 0) { 1169 __glXSetError(gc, GL_INVALID_VALUE); 1170 return; 1171 } 1172 1173 switch (type) { 1174 case GL_SHORT: 1175 opcode = short_ops[size]; 1176 break; 1177 case GL_INT: 1178 opcode = int_ops[size]; 1179 break; 1180 case GL_FLOAT: 1181 opcode = float_ops[size]; 1182 break; 1183 case GL_DOUBLE: 1184 opcode = double_ops[size]; 1185 break; 1186 default: 1187 __glXSetError(gc, GL_INVALID_ENUM); 1188 return; 1189 } 1190 1191 a = get_array_entry(arrays, GL_VERTEX_ARRAY, 0); 1192 assert(a != NULL); 1193 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE, 4, 1194 opcode); 1195 1196 if (a->enabled) { 1197 arrays->array_info_cache_valid = GL_FALSE; 1198 } 1199 } 1200 1201 1202 void 1203 __indirect_glNormalPointer(GLenum type, GLsizei stride, 1204 const GLvoid * pointer) 1205 { 1206 uint16_t opcode; 1207 struct glx_context *gc = __glXGetCurrentContext(); 1208 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 1209 struct array_state_vector *arrays = state->array_state; 1210 struct array_state *a; 1211 1212 1213 if (stride < 0) { 1214 __glXSetError(gc, GL_INVALID_VALUE); 1215 return; 1216 } 1217 1218 switch (type) { 1219 case GL_BYTE: 1220 opcode = X_GLrop_Normal3bv; 1221 break; 1222 case GL_SHORT: 1223 opcode = X_GLrop_Normal3sv; 1224 break; 1225 case GL_INT: 1226 opcode = X_GLrop_Normal3iv; 1227 break; 1228 case GL_FLOAT: 1229 opcode = X_GLrop_Normal3fv; 1230 break; 1231 case GL_DOUBLE: 1232 opcode = X_GLrop_Normal3dv; 1233 break; 1234 default: 1235 __glXSetError(gc, GL_INVALID_ENUM); 1236 return; 1237 } 1238 1239 a = get_array_entry(arrays, GL_NORMAL_ARRAY, 0); 1240 assert(a != NULL); 1241 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 3, GL_TRUE, 4, opcode); 1242 1243 if (a->enabled) { 1244 arrays->array_info_cache_valid = GL_FALSE; 1245 } 1246 } 1247 1248 1249 void 1250 __indirect_glColorPointer(GLint size, GLenum type, GLsizei stride, 1251 const GLvoid * pointer) 1252 { 1253 static const uint16_t byte_ops[5] = { 1254 0, 0, 0, X_GLrop_Color3bv, X_GLrop_Color4bv 1255 }; 1256 static const uint16_t ubyte_ops[5] = { 1257 0, 0, 0, X_GLrop_Color3ubv, X_GLrop_Color4ubv 1258 }; 1259 static const uint16_t short_ops[5] = { 1260 0, 0, 0, X_GLrop_Color3sv, X_GLrop_Color4sv 1261 }; 1262 static const uint16_t ushort_ops[5] = { 1263 0, 0, 0, X_GLrop_Color3usv, X_GLrop_Color4usv 1264 }; 1265 static const uint16_t int_ops[5] = { 1266 0, 0, 0, X_GLrop_Color3iv, X_GLrop_Color4iv 1267 }; 1268 static const uint16_t uint_ops[5] = { 1269 0, 0, 0, X_GLrop_Color3uiv, X_GLrop_Color4uiv 1270 }; 1271 static const uint16_t float_ops[5] = { 1272 0, 0, 0, X_GLrop_Color3fv, X_GLrop_Color4fv 1273 }; 1274 static const uint16_t double_ops[5] = { 1275 0, 0, 0, X_GLrop_Color3dv, X_GLrop_Color4dv 1276 }; 1277 uint16_t opcode; 1278 struct glx_context *gc = __glXGetCurrentContext(); 1279 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 1280 struct array_state_vector *arrays = state->array_state; 1281 struct array_state *a; 1282 1283 1284 if (size < 3 || size > 4 || stride < 0) { 1285 __glXSetError(gc, GL_INVALID_VALUE); 1286 return; 1287 } 1288 1289 switch (type) { 1290 case GL_BYTE: 1291 opcode = byte_ops[size]; 1292 break; 1293 case GL_UNSIGNED_BYTE: 1294 opcode = ubyte_ops[size]; 1295 break; 1296 case GL_SHORT: 1297 opcode = short_ops[size]; 1298 break; 1299 case GL_UNSIGNED_SHORT: 1300 opcode = ushort_ops[size]; 1301 break; 1302 case GL_INT: 1303 opcode = int_ops[size]; 1304 break; 1305 case GL_UNSIGNED_INT: 1306 opcode = uint_ops[size]; 1307 break; 1308 case GL_FLOAT: 1309 opcode = float_ops[size]; 1310 break; 1311 case GL_DOUBLE: 1312 opcode = double_ops[size]; 1313 break; 1314 default: 1315 __glXSetError(gc, GL_INVALID_ENUM); 1316 return; 1317 } 1318 1319 a = get_array_entry(arrays, GL_COLOR_ARRAY, 0); 1320 assert(a != NULL); 1321 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode); 1322 1323 if (a->enabled) { 1324 arrays->array_info_cache_valid = GL_FALSE; 1325 } 1326 } 1327 1328 1329 void 1330 __indirect_glIndexPointer(GLenum type, GLsizei stride, const GLvoid * pointer) 1331 { 1332 uint16_t opcode; 1333 struct glx_context *gc = __glXGetCurrentContext(); 1334 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 1335 struct array_state_vector *arrays = state->array_state; 1336 struct array_state *a; 1337 1338 1339 if (stride < 0) { 1340 __glXSetError(gc, GL_INVALID_VALUE); 1341 return; 1342 } 1343 1344 switch (type) { 1345 case GL_UNSIGNED_BYTE: 1346 opcode = X_GLrop_Indexubv; 1347 break; 1348 case GL_SHORT: 1349 opcode = X_GLrop_Indexsv; 1350 break; 1351 case GL_INT: 1352 opcode = X_GLrop_Indexiv; 1353 break; 1354 case GL_FLOAT: 1355 opcode = X_GLrop_Indexfv; 1356 break; 1357 case GL_DOUBLE: 1358 opcode = X_GLrop_Indexdv; 1359 break; 1360 default: 1361 __glXSetError(gc, GL_INVALID_ENUM); 1362 return; 1363 } 1364 1365 a = get_array_entry(arrays, GL_INDEX_ARRAY, 0); 1366 assert(a != NULL); 1367 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode); 1368 1369 if (a->enabled) { 1370 arrays->array_info_cache_valid = GL_FALSE; 1371 } 1372 } 1373 1374 1375 void 1376 __indirect_glEdgeFlagPointer(GLsizei stride, const GLvoid * pointer) 1377 { 1378 struct glx_context *gc = __glXGetCurrentContext(); 1379 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 1380 struct array_state_vector *arrays = state->array_state; 1381 struct array_state *a; 1382 1383 1384 if (stride < 0) { 1385 __glXSetError(gc, GL_INVALID_VALUE); 1386 return; 1387 } 1388 1389 1390 a = get_array_entry(arrays, GL_EDGE_FLAG_ARRAY, 0); 1391 assert(a != NULL); 1392 COMMON_ARRAY_DATA_INIT(a, pointer, GL_UNSIGNED_BYTE, stride, 1, GL_FALSE, 1393 4, X_GLrop_EdgeFlagv); 1394 1395 if (a->enabled) { 1396 arrays->array_info_cache_valid = GL_FALSE; 1397 } 1398 } 1399 1400 1401 void 1402 __indirect_glTexCoordPointer(GLint size, GLenum type, GLsizei stride, 1403 const GLvoid * pointer) 1404 { 1405 static const uint16_t short_ops[5] = { 1406 0, X_GLrop_TexCoord1sv, X_GLrop_TexCoord2sv, X_GLrop_TexCoord3sv, 1407 X_GLrop_TexCoord4sv 1408 }; 1409 static const uint16_t int_ops[5] = { 1410 0, X_GLrop_TexCoord1iv, X_GLrop_TexCoord2iv, X_GLrop_TexCoord3iv, 1411 X_GLrop_TexCoord4iv 1412 }; 1413 static const uint16_t float_ops[5] = { 1414 0, X_GLrop_TexCoord1fv, X_GLrop_TexCoord2fv, X_GLrop_TexCoord3fv, 1415 X_GLrop_TexCoord4fv 1416 }; 1417 static const uint16_t double_ops[5] = { 1418 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2dv, X_GLrop_TexCoord3dv, 1419 X_GLrop_TexCoord4dv 1420 }; 1421 1422 static const uint16_t mshort_ops[5] = { 1423 0, X_GLrop_MultiTexCoord1svARB, X_GLrop_MultiTexCoord2svARB, 1424 X_GLrop_MultiTexCoord3svARB, X_GLrop_MultiTexCoord4svARB 1425 }; 1426 static const uint16_t mint_ops[5] = { 1427 0, X_GLrop_MultiTexCoord1ivARB, X_GLrop_MultiTexCoord2ivARB, 1428 X_GLrop_MultiTexCoord3ivARB, X_GLrop_MultiTexCoord4ivARB 1429 }; 1430 static const uint16_t mfloat_ops[5] = { 1431 0, X_GLrop_MultiTexCoord1fvARB, X_GLrop_MultiTexCoord2fvARB, 1432 X_GLrop_MultiTexCoord3fvARB, X_GLrop_MultiTexCoord4fvARB 1433 }; 1434 static const uint16_t mdouble_ops[5] = { 1435 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2dvARB, 1436 X_GLrop_MultiTexCoord3dvARB, X_GLrop_MultiTexCoord4dvARB 1437 }; 1438 1439 uint16_t opcode; 1440 struct glx_context *gc = __glXGetCurrentContext(); 1441 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 1442 struct array_state_vector *arrays = state->array_state; 1443 struct array_state *a; 1444 unsigned header_size; 1445 unsigned index; 1446 1447 1448 if (size < 1 || size > 4 || stride < 0) { 1449 __glXSetError(gc, GL_INVALID_VALUE); 1450 return; 1451 } 1452 1453 index = arrays->active_texture_unit; 1454 if (index == 0) { 1455 switch (type) { 1456 case GL_SHORT: 1457 opcode = short_ops[size]; 1458 break; 1459 case GL_INT: 1460 opcode = int_ops[size]; 1461 break; 1462 case GL_FLOAT: 1463 opcode = float_ops[size]; 1464 break; 1465 case GL_DOUBLE: 1466 opcode = double_ops[size]; 1467 break; 1468 default: 1469 __glXSetError(gc, GL_INVALID_ENUM); 1470 return; 1471 } 1472 1473 header_size = 4; 1474 } 1475 else { 1476 switch (type) { 1477 case GL_SHORT: 1478 opcode = mshort_ops[size]; 1479 break; 1480 case GL_INT: 1481 opcode = mint_ops[size]; 1482 break; 1483 case GL_FLOAT: 1484 opcode = mfloat_ops[size]; 1485 break; 1486 case GL_DOUBLE: 1487 opcode = mdouble_ops[size]; 1488 break; 1489 default: 1490 __glXSetError(gc, GL_INVALID_ENUM); 1491 return; 1492 } 1493 1494 header_size = 8; 1495 } 1496 1497 a = get_array_entry(arrays, GL_TEXTURE_COORD_ARRAY, index); 1498 assert(a != NULL); 1499 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE, 1500 header_size, opcode); 1501 1502 if (a->enabled) { 1503 arrays->array_info_cache_valid = GL_FALSE; 1504 } 1505 } 1506 1507 1508 void 1509 __indirect_glSecondaryColorPointer(GLint size, GLenum type, GLsizei stride, 1510 const GLvoid * pointer) 1511 { 1512 uint16_t opcode; 1513 struct glx_context *gc = __glXGetCurrentContext(); 1514 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 1515 struct array_state_vector *arrays = state->array_state; 1516 struct array_state *a; 1517 1518 1519 if (size != 3 || stride < 0) { 1520 __glXSetError(gc, GL_INVALID_VALUE); 1521 return; 1522 } 1523 1524 switch (type) { 1525 case GL_BYTE: 1526 opcode = 4126; 1527 break; 1528 case GL_UNSIGNED_BYTE: 1529 opcode = 4131; 1530 break; 1531 case GL_SHORT: 1532 opcode = 4127; 1533 break; 1534 case GL_UNSIGNED_SHORT: 1535 opcode = 4132; 1536 break; 1537 case GL_INT: 1538 opcode = 4128; 1539 break; 1540 case GL_UNSIGNED_INT: 1541 opcode = 4133; 1542 break; 1543 case GL_FLOAT: 1544 opcode = 4129; 1545 break; 1546 case GL_DOUBLE: 1547 opcode = 4130; 1548 break; 1549 default: 1550 __glXSetError(gc, GL_INVALID_ENUM); 1551 return; 1552 } 1553 1554 a = get_array_entry(arrays, GL_SECONDARY_COLOR_ARRAY, 0); 1555 if (a == NULL) { 1556 __glXSetError(gc, GL_INVALID_OPERATION); 1557 return; 1558 } 1559 1560 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode); 1561 1562 if (a->enabled) { 1563 arrays->array_info_cache_valid = GL_FALSE; 1564 } 1565 } 1566 1567 1568 void 1569 __indirect_glFogCoordPointer(GLenum type, GLsizei stride, 1570 const GLvoid * pointer) 1571 { 1572 uint16_t opcode; 1573 struct glx_context *gc = __glXGetCurrentContext(); 1574 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 1575 struct array_state_vector *arrays = state->array_state; 1576 struct array_state *a; 1577 1578 1579 if (stride < 0) { 1580 __glXSetError(gc, GL_INVALID_VALUE); 1581 return; 1582 } 1583 1584 switch (type) { 1585 case GL_FLOAT: 1586 opcode = 4124; 1587 break; 1588 case GL_DOUBLE: 1589 opcode = 4125; 1590 break; 1591 default: 1592 __glXSetError(gc, GL_INVALID_ENUM); 1593 return; 1594 } 1595 1596 a = get_array_entry(arrays, GL_FOG_COORD_ARRAY, 0); 1597 if (a == NULL) { 1598 __glXSetError(gc, GL_INVALID_OPERATION); 1599 return; 1600 } 1601 1602 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode); 1603 1604 if (a->enabled) { 1605 arrays->array_info_cache_valid = GL_FALSE; 1606 } 1607 } 1608 1609 1610 void 1611 __indirect_glVertexAttribPointer(GLuint index, GLint size, 1612 GLenum type, GLboolean normalized, 1613 GLsizei stride, const GLvoid * pointer) 1614 { 1615 static const uint16_t short_ops[5] = { 1616 0, X_GLrop_VertexAttrib1svARB, X_GLrop_VertexAttrib2svARB, 1617 X_GLrop_VertexAttrib3svARB, X_GLrop_VertexAttrib4svARB 1618 }; 1619 static const uint16_t float_ops[5] = { 1620 0, X_GLrop_VertexAttrib1fvARB, X_GLrop_VertexAttrib2fvARB, 1621 X_GLrop_VertexAttrib3fvARB, X_GLrop_VertexAttrib4fvARB 1622 }; 1623 static const uint16_t double_ops[5] = { 1624 0, X_GLrop_VertexAttrib1dvARB, X_GLrop_VertexAttrib2dvARB, 1625 X_GLrop_VertexAttrib3dvARB, X_GLrop_VertexAttrib4dvARB 1626 }; 1627 1628 uint16_t opcode; 1629 struct glx_context *gc = __glXGetCurrentContext(); 1630 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private); 1631 struct array_state_vector *arrays = state->array_state; 1632 struct array_state *a; 1633 unsigned true_immediate_count; 1634 unsigned true_immediate_size; 1635 1636 1637 if ((size < 1) || (size > 4) || (stride < 0) 1638 || (index > arrays->num_vertex_program_attribs)) { 1639 __glXSetError(gc, GL_INVALID_VALUE); 1640 return; 1641 } 1642 1643 if (normalized && (type != GL_FLOAT) && (type != GL_DOUBLE)) { 1644 switch (type) { 1645 case GL_BYTE: 1646 opcode = X_GLrop_VertexAttrib4NbvARB; 1647 break; 1648 case GL_UNSIGNED_BYTE: 1649 opcode = X_GLrop_VertexAttrib4NubvARB; 1650 break; 1651 case GL_SHORT: 1652 opcode = X_GLrop_VertexAttrib4NsvARB; 1653 break; 1654 case GL_UNSIGNED_SHORT: 1655 opcode = X_GLrop_VertexAttrib4NusvARB; 1656 break; 1657 case GL_INT: 1658 opcode = X_GLrop_VertexAttrib4NivARB; 1659 break; 1660 case GL_UNSIGNED_INT: 1661 opcode = X_GLrop_VertexAttrib4NuivARB; 1662 break; 1663 default: 1664 __glXSetError(gc, GL_INVALID_ENUM); 1665 return; 1666 } 1667 1668 true_immediate_count = 4; 1669 } 1670 else { 1671 true_immediate_count = size; 1672 1673 switch (type) { 1674 case GL_BYTE: 1675 opcode = X_GLrop_VertexAttrib4bvARB; 1676 true_immediate_count = 4; 1677 break; 1678 case GL_UNSIGNED_BYTE: 1679 opcode = X_GLrop_VertexAttrib4ubvARB; 1680 true_immediate_count = 4; 1681 break; 1682 case GL_SHORT: 1683 opcode = short_ops[size]; 1684 break; 1685 case GL_UNSIGNED_SHORT: 1686 opcode = X_GLrop_VertexAttrib4usvARB; 1687 true_immediate_count = 4; 1688 break; 1689 case GL_INT: 1690 opcode = X_GLrop_VertexAttrib4ivARB; 1691 true_immediate_count = 4; 1692 break; 1693 case GL_UNSIGNED_INT: 1694 opcode = X_GLrop_VertexAttrib4uivARB; 1695 true_immediate_count = 4; 1696 break; 1697 case GL_FLOAT: 1698 opcode = float_ops[size]; 1699 break; 1700 case GL_DOUBLE: 1701 opcode = double_ops[size]; 1702 break; 1703 default: 1704 __glXSetError(gc, GL_INVALID_ENUM); 1705 return; 1706 } 1707 } 1708 1709 a = get_array_entry(arrays, GL_VERTEX_ATTRIB_ARRAY_POINTER, index); 1710 if (a == NULL) { 1711 __glXSetError(gc, GL_INVALID_OPERATION); 1712 return; 1713 } 1714 1715 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, normalized, 8, 1716 opcode); 1717 1718 true_immediate_size = __glXTypeSize(type) * true_immediate_count; 1719 a->header[0] = __GLX_PAD(8 + true_immediate_size); 1720 1721 if (a->enabled) { 1722 arrays->array_info_cache_valid = GL_FALSE; 1723 } 1724 } 1725 1726 1727 /** 1728 * I don't have 100% confidence that this is correct. The different rules 1729 * about whether or not generic vertex attributes alias "classic" vertex 1730 * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program, 1731 * ARB_vertex_shader, and NV_vertex_program are a bit confusing. My 1732 * feeling is that the client-side doesn't have to worry about it. The 1733 * client just sends all the data to the server and lets the server deal 1734 * with it. 1735 */ 1736 void 1737 __indirect_glVertexAttribPointerNV(GLuint index, GLint size, 1738 GLenum type, GLsizei stride, 1739 const GLvoid * pointer) 1740 { 1741 struct glx_context *gc = __glXGetCurrentContext(); 1742 GLboolean normalized = GL_FALSE; 1743 1744 1745 switch (type) { 1746 case GL_UNSIGNED_BYTE: 1747 if (size != 4) { 1748 __glXSetError(gc, GL_INVALID_VALUE); 1749 return; 1750 } 1751 normalized = GL_TRUE; 1752 1753 case GL_SHORT: 1754 case GL_FLOAT: 1755 case GL_DOUBLE: 1756 __indirect_glVertexAttribPointer(index, size, type, 1757 normalized, stride, pointer); 1758 return; 1759 default: 1760 __glXSetError(gc, GL_INVALID_ENUM); 1761 return; 1762 } 1763 } 1764 1765 1766 void 1767 __indirect_glClientActiveTexture(GLenum texture) 1768 { 1769 struct glx_context *const gc = __glXGetCurrentContext(); 1770 __GLXattribute *const state = 1771 (__GLXattribute *) (gc->client_state_private); 1772 struct array_state_vector *const arrays = state->array_state; 1773 const GLint unit = (GLint) texture - GL_TEXTURE0; 1774 1775 1776 if ((unit < 0) || (unit >= arrays->num_texture_units)) { 1777 __glXSetError(gc, GL_INVALID_ENUM); 1778 return; 1779 } 1780 1781 arrays->active_texture_unit = unit; 1782 } 1783 1784 1785 /** 1786 * Modify the enable state for the selected array 1787 */ 1788 GLboolean 1789 __glXSetArrayEnable(__GLXattribute * state, GLenum key, unsigned index, 1790 GLboolean enable) 1791 { 1792 struct array_state_vector *arrays = state->array_state; 1793 struct array_state *a; 1794 1795 1796 /* Texture coordinate arrays have an implict index set when the 1797 * application calls glClientActiveTexture. 1798 */ 1799 if (key == GL_TEXTURE_COORD_ARRAY) { 1800 index = arrays->active_texture_unit; 1801 } 1802 1803 a = get_array_entry(arrays, key, index); 1804 1805 if ((a != NULL) && (a->enabled != enable)) { 1806 a->enabled = enable; 1807 arrays->array_info_cache_valid = GL_FALSE; 1808 } 1809 1810 return (a != NULL); 1811 } 1812 1813 1814 void 1815 __glXArrayDisableAll(__GLXattribute * state) 1816 { 1817 struct array_state_vector *arrays = state->array_state; 1818 unsigned i; 1819 1820 1821 for (i = 0; i < arrays->num_arrays; i++) { 1822 arrays->arrays[i].enabled = GL_FALSE; 1823 } 1824 1825 arrays->array_info_cache_valid = GL_FALSE; 1826 } 1827 1828 1829 /** 1830 */ 1831 GLboolean 1832 __glXGetArrayEnable(const __GLXattribute * const state, 1833 GLenum key, unsigned index, GLintptr * dest) 1834 { 1835 const struct array_state_vector *arrays = state->array_state; 1836 const struct array_state *a = 1837 get_array_entry((struct array_state_vector *) arrays, 1838 key, index); 1839 1840 if (a != NULL) { 1841 *dest = (GLintptr) a->enabled; 1842 } 1843 1844 return (a != NULL); 1845 } 1846 1847 1848 /** 1849 */ 1850 GLboolean 1851 __glXGetArrayType(const __GLXattribute * const state, 1852 GLenum key, unsigned index, GLintptr * dest) 1853 { 1854 const struct array_state_vector *arrays = state->array_state; 1855 const struct array_state *a = 1856 get_array_entry((struct array_state_vector *) arrays, 1857 key, index); 1858 1859 if (a != NULL) { 1860 *dest = (GLintptr) a->data_type; 1861 } 1862 1863 return (a != NULL); 1864 } 1865 1866 1867 /** 1868 */ 1869 GLboolean 1870 __glXGetArraySize(const __GLXattribute * const state, 1871 GLenum key, unsigned index, GLintptr * dest) 1872 { 1873 const struct array_state_vector *arrays = state->array_state; 1874 const struct array_state *a = 1875 get_array_entry((struct array_state_vector *) arrays, 1876 key, index); 1877 1878 if (a != NULL) { 1879 *dest = (GLintptr) a->count; 1880 } 1881 1882 return (a != NULL); 1883 } 1884 1885 1886 /** 1887 */ 1888 GLboolean 1889 __glXGetArrayStride(const __GLXattribute * const state, 1890 GLenum key, unsigned index, GLintptr * dest) 1891 { 1892 const struct array_state_vector *arrays = state->array_state; 1893 const struct array_state *a = 1894 get_array_entry((struct array_state_vector *) arrays, 1895 key, index); 1896 1897 if (a != NULL) { 1898 *dest = (GLintptr) a->user_stride; 1899 } 1900 1901 return (a != NULL); 1902 } 1903 1904 1905 /** 1906 */ 1907 GLboolean 1908 __glXGetArrayPointer(const __GLXattribute * const state, 1909 GLenum key, unsigned index, void **dest) 1910 { 1911 const struct array_state_vector *arrays = state->array_state; 1912 const struct array_state *a = 1913 get_array_entry((struct array_state_vector *) arrays, 1914 key, index); 1915 1916 1917 if (a != NULL) { 1918 *dest = (void *) (a->data); 1919 } 1920 1921 return (a != NULL); 1922 } 1923 1924 1925 /** 1926 */ 1927 GLboolean 1928 __glXGetArrayNormalized(const __GLXattribute * const state, 1929 GLenum key, unsigned index, GLintptr * dest) 1930 { 1931 const struct array_state_vector *arrays = state->array_state; 1932 const struct array_state *a = 1933 get_array_entry((struct array_state_vector *) arrays, 1934 key, index); 1935 1936 1937 if (a != NULL) { 1938 *dest = (GLintptr) a->normalized; 1939 } 1940 1941 return (a != NULL); 1942 } 1943 1944 1945 /** 1946 */ 1947 GLuint 1948 __glXGetActiveTextureUnit(const __GLXattribute * const state) 1949 { 1950 return state->array_state->active_texture_unit; 1951 } 1952 1953 1954 void 1955 __glXPushArrayState(__GLXattribute * state) 1956 { 1957 struct array_state_vector *arrays = state->array_state; 1958 struct array_stack_state *stack = 1959 &arrays->stack[(arrays->stack_index * arrays->num_arrays)]; 1960 unsigned i; 1961 1962 /* XXX are we pushing _all_ the necessary fields? */ 1963 for (i = 0; i < arrays->num_arrays; i++) { 1964 stack[i].data = arrays->arrays[i].data; 1965 stack[i].data_type = arrays->arrays[i].data_type; 1966 stack[i].user_stride = arrays->arrays[i].user_stride; 1967 stack[i].count = arrays->arrays[i].count; 1968 stack[i].key = arrays->arrays[i].key; 1969 stack[i].index = arrays->arrays[i].index; 1970 stack[i].enabled = arrays->arrays[i].enabled; 1971 } 1972 1973 arrays->active_texture_unit_stack[arrays->stack_index] = 1974 arrays->active_texture_unit; 1975 1976 arrays->stack_index++; 1977 } 1978 1979 1980 void 1981 __glXPopArrayState(__GLXattribute * state) 1982 { 1983 struct array_state_vector *arrays = state->array_state; 1984 struct array_stack_state *stack; 1985 unsigned i; 1986 1987 1988 arrays->stack_index--; 1989 stack = &arrays->stack[(arrays->stack_index * arrays->num_arrays)]; 1990 1991 for (i = 0; i < arrays->num_arrays; i++) { 1992 switch (stack[i].key) { 1993 case GL_NORMAL_ARRAY: 1994 __indirect_glNormalPointer(stack[i].data_type, 1995 stack[i].user_stride, stack[i].data); 1996 break; 1997 case GL_COLOR_ARRAY: 1998 __indirect_glColorPointer(stack[i].count, 1999 stack[i].data_type, 2000 stack[i].user_stride, stack[i].data); 2001 break; 2002 case GL_INDEX_ARRAY: 2003 __indirect_glIndexPointer(stack[i].data_type, 2004 stack[i].user_stride, stack[i].data); 2005 break; 2006 case GL_EDGE_FLAG_ARRAY: 2007 __indirect_glEdgeFlagPointer(stack[i].user_stride, stack[i].data); 2008 break; 2009 case GL_TEXTURE_COORD_ARRAY: 2010 arrays->active_texture_unit = stack[i].index; 2011 __indirect_glTexCoordPointer(stack[i].count, 2012 stack[i].data_type, 2013 stack[i].user_stride, stack[i].data); 2014 break; 2015 case GL_SECONDARY_COLOR_ARRAY: 2016 __indirect_glSecondaryColorPointer(stack[i].count, 2017 stack[i].data_type, 2018 stack[i].user_stride, 2019 stack[i].data); 2020 break; 2021 case GL_FOG_COORDINATE_ARRAY: 2022 __indirect_glFogCoordPointer(stack[i].data_type, 2023 stack[i].user_stride, stack[i].data); 2024 break; 2025 2026 } 2027 2028 __glXSetArrayEnable(state, stack[i].key, stack[i].index, 2029 stack[i].enabled); 2030 } 2031 2032 arrays->active_texture_unit = 2033 arrays->active_texture_unit_stack[arrays->stack_index]; 2034 } 2035