1 // 2 // Copyright (C) 2002-2005 3Dlabs Inc. Ltd. 3 // Copyright (C) 2012-2016 LunarG, Inc. 4 // 5 // All rights reserved. 6 // 7 // Redistribution and use in source and binary forms, with or without 8 // modification, are permitted provided that the following conditions 9 // are met: 10 // 11 // Redistributions of source code must retain the above copyright 12 // notice, this list of conditions and the following disclaimer. 13 // 14 // Redistributions in binary form must reproduce the above 15 // copyright notice, this list of conditions and the following 16 // disclaimer in the documentation and/or other materials provided 17 // with the distribution. 18 // 19 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its 20 // contributors may be used to endorse or promote products derived 21 // from this software without specific prior written permission. 22 // 23 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 31 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 33 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 // POSSIBILITY OF SUCH DAMAGE. 35 // 36 37 // 38 // Definition of the in-memory high-level intermediate representation 39 // of shaders. This is a tree that parser creates. 40 // 41 // Nodes in the tree are defined as a hierarchy of classes derived from 42 // TIntermNode. Each is a node in a tree. There is no preset branching factor; 43 // each node can have it's own type of list of children. 44 // 45 46 #ifndef __INTERMEDIATE_H 47 #define __INTERMEDIATE_H 48 49 #if _MSC_VER >= 1900 50 #pragma warning(disable : 4464) // relative include path contains '..' 51 #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted 52 #endif 53 54 #include "../Include/Common.h" 55 #include "../Include/Types.h" 56 #include "../Include/ConstantUnion.h" 57 58 namespace glslang { 59 60 class TIntermediate; 61 62 // 63 // Operators used by the high-level (parse tree) representation. 64 // 65 enum TOperator { 66 EOpNull, // if in a node, should only mean a node is still being built 67 EOpSequence, // denotes a list of statements, or parameters, etc. 68 EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST 69 EOpFunctionCall, 70 EOpFunction, // For function definition 71 EOpParameters, // an aggregate listing the parameters to a function 72 73 // 74 // Unary operators 75 // 76 77 EOpNegative, 78 EOpLogicalNot, 79 EOpVectorLogicalNot, 80 EOpBitwiseNot, 81 82 EOpPostIncrement, 83 EOpPostDecrement, 84 EOpPreIncrement, 85 EOpPreDecrement, 86 87 EOpConvIntToBool, 88 EOpConvUintToBool, 89 EOpConvFloatToBool, 90 EOpConvDoubleToBool, 91 EOpConvInt64ToBool, 92 EOpConvUint64ToBool, 93 EOpConvBoolToFloat, 94 EOpConvIntToFloat, 95 EOpConvUintToFloat, 96 EOpConvDoubleToFloat, 97 EOpConvInt64ToFloat, 98 EOpConvUint64ToFloat, 99 EOpConvUintToInt, 100 EOpConvFloatToInt, 101 EOpConvBoolToInt, 102 EOpConvDoubleToInt, 103 EOpConvInt64ToInt, 104 EOpConvUint64ToInt, 105 EOpConvIntToUint, 106 EOpConvFloatToUint, 107 EOpConvBoolToUint, 108 EOpConvDoubleToUint, 109 EOpConvInt64ToUint, 110 EOpConvUint64ToUint, 111 EOpConvIntToDouble, 112 EOpConvUintToDouble, 113 EOpConvFloatToDouble, 114 EOpConvBoolToDouble, 115 EOpConvInt64ToDouble, 116 EOpConvUint64ToDouble, 117 EOpConvBoolToInt64, 118 EOpConvIntToInt64, 119 EOpConvUintToInt64, 120 EOpConvFloatToInt64, 121 EOpConvDoubleToInt64, 122 EOpConvUint64ToInt64, 123 EOpConvBoolToUint64, 124 EOpConvIntToUint64, 125 EOpConvUintToUint64, 126 EOpConvFloatToUint64, 127 EOpConvDoubleToUint64, 128 EOpConvInt64ToUint64, 129 #ifdef AMD_EXTENSIONS 130 EOpConvBoolToFloat16, 131 EOpConvIntToFloat16, 132 EOpConvUintToFloat16, 133 EOpConvFloatToFloat16, 134 EOpConvDoubleToFloat16, 135 EOpConvInt64ToFloat16, 136 EOpConvUint64ToFloat16, 137 EOpConvFloat16ToBool, 138 EOpConvFloat16ToInt, 139 EOpConvFloat16ToUint, 140 EOpConvFloat16ToFloat, 141 EOpConvFloat16ToDouble, 142 EOpConvFloat16ToInt64, 143 EOpConvFloat16ToUint64, 144 145 EOpConvBoolToInt16, 146 EOpConvIntToInt16, 147 EOpConvUintToInt16, 148 EOpConvFloatToInt16, 149 EOpConvDoubleToInt16, 150 EOpConvFloat16ToInt16, 151 EOpConvInt64ToInt16, 152 EOpConvUint64ToInt16, 153 EOpConvUint16ToInt16, 154 EOpConvInt16ToBool, 155 EOpConvInt16ToInt, 156 EOpConvInt16ToUint, 157 EOpConvInt16ToFloat, 158 EOpConvInt16ToDouble, 159 EOpConvInt16ToFloat16, 160 EOpConvInt16ToInt64, 161 EOpConvInt16ToUint64, 162 163 EOpConvBoolToUint16, 164 EOpConvIntToUint16, 165 EOpConvUintToUint16, 166 EOpConvFloatToUint16, 167 EOpConvDoubleToUint16, 168 EOpConvFloat16ToUint16, 169 EOpConvInt64ToUint16, 170 EOpConvUint64ToUint16, 171 EOpConvInt16ToUint16, 172 EOpConvUint16ToBool, 173 EOpConvUint16ToInt, 174 EOpConvUint16ToUint, 175 EOpConvUint16ToFloat, 176 EOpConvUint16ToDouble, 177 EOpConvUint16ToFloat16, 178 EOpConvUint16ToInt64, 179 EOpConvUint16ToUint64, 180 #endif 181 182 // 183 // binary operations 184 // 185 186 EOpAdd, 187 EOpSub, 188 EOpMul, 189 EOpDiv, 190 EOpMod, 191 EOpRightShift, 192 EOpLeftShift, 193 EOpAnd, 194 EOpInclusiveOr, 195 EOpExclusiveOr, 196 EOpEqual, 197 EOpNotEqual, 198 EOpVectorEqual, 199 EOpVectorNotEqual, 200 EOpLessThan, 201 EOpGreaterThan, 202 EOpLessThanEqual, 203 EOpGreaterThanEqual, 204 EOpComma, 205 206 EOpVectorTimesScalar, 207 EOpVectorTimesMatrix, 208 EOpMatrixTimesVector, 209 EOpMatrixTimesScalar, 210 211 EOpLogicalOr, 212 EOpLogicalXor, 213 EOpLogicalAnd, 214 215 EOpIndexDirect, 216 EOpIndexIndirect, 217 EOpIndexDirectStruct, 218 219 EOpVectorSwizzle, 220 221 EOpMethod, 222 EOpScoping, 223 224 // 225 // Built-in functions mapped to operators 226 // 227 228 EOpRadians, 229 EOpDegrees, 230 EOpSin, 231 EOpCos, 232 EOpTan, 233 EOpAsin, 234 EOpAcos, 235 EOpAtan, 236 EOpSinh, 237 EOpCosh, 238 EOpTanh, 239 EOpAsinh, 240 EOpAcosh, 241 EOpAtanh, 242 243 EOpPow, 244 EOpExp, 245 EOpLog, 246 EOpExp2, 247 EOpLog2, 248 EOpSqrt, 249 EOpInverseSqrt, 250 251 EOpAbs, 252 EOpSign, 253 EOpFloor, 254 EOpTrunc, 255 EOpRound, 256 EOpRoundEven, 257 EOpCeil, 258 EOpFract, 259 EOpModf, 260 EOpMin, 261 EOpMax, 262 EOpClamp, 263 EOpMix, 264 EOpStep, 265 EOpSmoothStep, 266 267 EOpIsNan, 268 EOpIsInf, 269 270 EOpFma, 271 272 EOpFrexp, 273 EOpLdexp, 274 275 EOpFloatBitsToInt, 276 EOpFloatBitsToUint, 277 EOpIntBitsToFloat, 278 EOpUintBitsToFloat, 279 EOpDoubleBitsToInt64, 280 EOpDoubleBitsToUint64, 281 EOpInt64BitsToDouble, 282 EOpUint64BitsToDouble, 283 #ifdef AMD_EXTENSIONS 284 EOpFloat16BitsToInt16, 285 EOpFloat16BitsToUint16, 286 EOpInt16BitsToFloat16, 287 EOpUint16BitsToFloat16, 288 #endif 289 EOpPackSnorm2x16, 290 EOpUnpackSnorm2x16, 291 EOpPackUnorm2x16, 292 EOpUnpackUnorm2x16, 293 EOpPackSnorm4x8, 294 EOpUnpackSnorm4x8, 295 EOpPackUnorm4x8, 296 EOpUnpackUnorm4x8, 297 EOpPackHalf2x16, 298 EOpUnpackHalf2x16, 299 EOpPackDouble2x32, 300 EOpUnpackDouble2x32, 301 EOpPackInt2x32, 302 EOpUnpackInt2x32, 303 EOpPackUint2x32, 304 EOpUnpackUint2x32, 305 #ifdef AMD_EXTENSIONS 306 EOpPackFloat2x16, 307 EOpUnpackFloat2x16, 308 EOpPackInt2x16, 309 EOpUnpackInt2x16, 310 EOpPackUint2x16, 311 EOpUnpackUint2x16, 312 EOpPackInt4x16, 313 EOpUnpackInt4x16, 314 EOpPackUint4x16, 315 EOpUnpackUint4x16, 316 #endif 317 318 EOpLength, 319 EOpDistance, 320 EOpDot, 321 EOpCross, 322 EOpNormalize, 323 EOpFaceForward, 324 EOpReflect, 325 EOpRefract, 326 327 #ifdef AMD_EXTENSIONS 328 EOpMin3, 329 EOpMax3, 330 EOpMid3, 331 #endif 332 333 EOpDPdx, // Fragment only 334 EOpDPdy, // Fragment only 335 EOpFwidth, // Fragment only 336 EOpDPdxFine, // Fragment only 337 EOpDPdyFine, // Fragment only 338 EOpFwidthFine, // Fragment only 339 EOpDPdxCoarse, // Fragment only 340 EOpDPdyCoarse, // Fragment only 341 EOpFwidthCoarse, // Fragment only 342 343 EOpInterpolateAtCentroid, // Fragment only 344 EOpInterpolateAtSample, // Fragment only 345 EOpInterpolateAtOffset, // Fragment only 346 347 #ifdef AMD_EXTENSIONS 348 EOpInterpolateAtVertex, 349 #endif 350 351 EOpMatrixTimesMatrix, 352 EOpOuterProduct, 353 EOpDeterminant, 354 EOpMatrixInverse, 355 EOpTranspose, 356 357 EOpFtransform, 358 359 EOpNoise, 360 361 EOpEmitVertex, // geometry only 362 EOpEndPrimitive, // geometry only 363 EOpEmitStreamVertex, // geometry only 364 EOpEndStreamPrimitive, // geometry only 365 366 EOpBarrier, 367 EOpMemoryBarrier, 368 EOpMemoryBarrierAtomicCounter, 369 EOpMemoryBarrierBuffer, 370 EOpMemoryBarrierImage, 371 EOpMemoryBarrierShared, // compute only 372 EOpGroupMemoryBarrier, // compute only 373 374 EOpBallot, 375 EOpReadInvocation, 376 EOpReadFirstInvocation, 377 378 EOpAnyInvocation, 379 EOpAllInvocations, 380 EOpAllInvocationsEqual, 381 382 #ifdef AMD_EXTENSIONS 383 EOpMinInvocations, 384 EOpMaxInvocations, 385 EOpAddInvocations, 386 EOpMinInvocationsNonUniform, 387 EOpMaxInvocationsNonUniform, 388 EOpAddInvocationsNonUniform, 389 EOpMinInvocationsInclusiveScan, 390 EOpMaxInvocationsInclusiveScan, 391 EOpAddInvocationsInclusiveScan, 392 EOpMinInvocationsInclusiveScanNonUniform, 393 EOpMaxInvocationsInclusiveScanNonUniform, 394 EOpAddInvocationsInclusiveScanNonUniform, 395 EOpMinInvocationsExclusiveScan, 396 EOpMaxInvocationsExclusiveScan, 397 EOpAddInvocationsExclusiveScan, 398 EOpMinInvocationsExclusiveScanNonUniform, 399 EOpMaxInvocationsExclusiveScanNonUniform, 400 EOpAddInvocationsExclusiveScanNonUniform, 401 EOpSwizzleInvocations, 402 EOpSwizzleInvocationsMasked, 403 EOpWriteInvocation, 404 EOpMbcnt, 405 406 EOpCubeFaceIndex, 407 EOpCubeFaceCoord, 408 EOpTime, 409 #endif 410 411 EOpAtomicAdd, 412 EOpAtomicMin, 413 EOpAtomicMax, 414 EOpAtomicAnd, 415 EOpAtomicOr, 416 EOpAtomicXor, 417 EOpAtomicExchange, 418 EOpAtomicCompSwap, 419 420 EOpAtomicCounterIncrement, 421 EOpAtomicCounterDecrement, 422 EOpAtomicCounter, 423 EOpAtomicCounterAdd, 424 EOpAtomicCounterSubtract, 425 EOpAtomicCounterMin, 426 EOpAtomicCounterMax, 427 EOpAtomicCounterAnd, 428 EOpAtomicCounterOr, 429 EOpAtomicCounterXor, 430 EOpAtomicCounterExchange, 431 EOpAtomicCounterCompSwap, 432 433 EOpAny, 434 EOpAll, 435 436 // 437 // Branch 438 // 439 440 EOpKill, // Fragment only 441 EOpReturn, 442 EOpBreak, 443 EOpContinue, 444 EOpCase, 445 EOpDefault, 446 447 // 448 // Constructors 449 // 450 451 EOpConstructGuardStart, 452 EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed 453 EOpConstructUint, 454 EOpConstructInt64, 455 EOpConstructUint64, 456 #ifdef AMD_EXTENSIONS 457 EOpConstructInt16, 458 EOpConstructUint16, 459 #endif 460 EOpConstructBool, 461 EOpConstructFloat, 462 EOpConstructDouble, 463 #ifdef AMD_EXTENSIONS 464 EOpConstructFloat16, 465 #endif 466 EOpConstructVec2, 467 EOpConstructVec3, 468 EOpConstructVec4, 469 EOpConstructDVec2, 470 EOpConstructDVec3, 471 EOpConstructDVec4, 472 #ifdef AMD_EXTENSIONS 473 EOpConstructF16Vec2, 474 EOpConstructF16Vec3, 475 EOpConstructF16Vec4, 476 #endif 477 EOpConstructBVec2, 478 EOpConstructBVec3, 479 EOpConstructBVec4, 480 EOpConstructIVec2, 481 EOpConstructIVec3, 482 EOpConstructIVec4, 483 EOpConstructUVec2, 484 EOpConstructUVec3, 485 EOpConstructUVec4, 486 EOpConstructI64Vec2, 487 EOpConstructI64Vec3, 488 EOpConstructI64Vec4, 489 EOpConstructU64Vec2, 490 EOpConstructU64Vec3, 491 EOpConstructU64Vec4, 492 #ifdef AMD_EXTENSIONS 493 EOpConstructI16Vec2, 494 EOpConstructI16Vec3, 495 EOpConstructI16Vec4, 496 EOpConstructU16Vec2, 497 EOpConstructU16Vec3, 498 EOpConstructU16Vec4, 499 #endif 500 EOpConstructMat2x2, 501 EOpConstructMat2x3, 502 EOpConstructMat2x4, 503 EOpConstructMat3x2, 504 EOpConstructMat3x3, 505 EOpConstructMat3x4, 506 EOpConstructMat4x2, 507 EOpConstructMat4x3, 508 EOpConstructMat4x4, 509 EOpConstructDMat2x2, 510 EOpConstructDMat2x3, 511 EOpConstructDMat2x4, 512 EOpConstructDMat3x2, 513 EOpConstructDMat3x3, 514 EOpConstructDMat3x4, 515 EOpConstructDMat4x2, 516 EOpConstructDMat4x3, 517 EOpConstructDMat4x4, 518 EOpConstructIMat2x2, 519 EOpConstructIMat2x3, 520 EOpConstructIMat2x4, 521 EOpConstructIMat3x2, 522 EOpConstructIMat3x3, 523 EOpConstructIMat3x4, 524 EOpConstructIMat4x2, 525 EOpConstructIMat4x3, 526 EOpConstructIMat4x4, 527 EOpConstructUMat2x2, 528 EOpConstructUMat2x3, 529 EOpConstructUMat2x4, 530 EOpConstructUMat3x2, 531 EOpConstructUMat3x3, 532 EOpConstructUMat3x4, 533 EOpConstructUMat4x2, 534 EOpConstructUMat4x3, 535 EOpConstructUMat4x4, 536 EOpConstructBMat2x2, 537 EOpConstructBMat2x3, 538 EOpConstructBMat2x4, 539 EOpConstructBMat3x2, 540 EOpConstructBMat3x3, 541 EOpConstructBMat3x4, 542 EOpConstructBMat4x2, 543 EOpConstructBMat4x3, 544 EOpConstructBMat4x4, 545 #ifdef AMD_EXTENSIONS 546 EOpConstructF16Mat2x2, 547 EOpConstructF16Mat2x3, 548 EOpConstructF16Mat2x4, 549 EOpConstructF16Mat3x2, 550 EOpConstructF16Mat3x3, 551 EOpConstructF16Mat3x4, 552 EOpConstructF16Mat4x2, 553 EOpConstructF16Mat4x3, 554 EOpConstructF16Mat4x4, 555 #endif 556 EOpConstructStruct, 557 EOpConstructTextureSampler, 558 EOpConstructGuardEnd, 559 560 // 561 // moves 562 // 563 564 EOpAssign, 565 EOpAddAssign, 566 EOpSubAssign, 567 EOpMulAssign, 568 EOpVectorTimesMatrixAssign, 569 EOpVectorTimesScalarAssign, 570 EOpMatrixTimesScalarAssign, 571 EOpMatrixTimesMatrixAssign, 572 EOpDivAssign, 573 EOpModAssign, 574 EOpAndAssign, 575 EOpInclusiveOrAssign, 576 EOpExclusiveOrAssign, 577 EOpLeftShiftAssign, 578 EOpRightShiftAssign, 579 580 // 581 // Array operators 582 // 583 584 EOpArrayLength, // "Array" distinguishes from length(v) built-in function, but it applies to vectors and matrices as well. 585 586 // 587 // Image operations 588 // 589 590 EOpImageGuardBegin, 591 592 EOpImageQuerySize, 593 EOpImageQuerySamples, 594 EOpImageLoad, 595 EOpImageStore, 596 #ifdef AMD_EXTENSIONS 597 EOpImageLoadLod, 598 EOpImageStoreLod, 599 #endif 600 EOpImageAtomicAdd, 601 EOpImageAtomicMin, 602 EOpImageAtomicMax, 603 EOpImageAtomicAnd, 604 EOpImageAtomicOr, 605 EOpImageAtomicXor, 606 EOpImageAtomicExchange, 607 EOpImageAtomicCompSwap, 608 609 EOpSubpassLoad, 610 EOpSubpassLoadMS, 611 EOpSparseImageLoad, 612 #ifdef AMD_EXTENSIONS 613 EOpSparseImageLoadLod, 614 #endif 615 616 EOpImageGuardEnd, 617 618 // 619 // Texture operations 620 // 621 622 EOpTextureGuardBegin, 623 624 EOpTextureQuerySize, 625 EOpTextureQueryLod, 626 EOpTextureQueryLevels, 627 EOpTextureQuerySamples, 628 629 EOpSamplingGuardBegin, 630 631 EOpTexture, 632 EOpTextureProj, 633 EOpTextureLod, 634 EOpTextureOffset, 635 EOpTextureFetch, 636 EOpTextureFetchOffset, 637 EOpTextureProjOffset, 638 EOpTextureLodOffset, 639 EOpTextureProjLod, 640 EOpTextureProjLodOffset, 641 EOpTextureGrad, 642 EOpTextureGradOffset, 643 EOpTextureProjGrad, 644 EOpTextureProjGradOffset, 645 EOpTextureGather, 646 EOpTextureGatherOffset, 647 EOpTextureGatherOffsets, 648 EOpTextureClamp, 649 EOpTextureOffsetClamp, 650 EOpTextureGradClamp, 651 EOpTextureGradOffsetClamp, 652 #ifdef AMD_EXTENSIONS 653 EOpTextureGatherLod, 654 EOpTextureGatherLodOffset, 655 EOpTextureGatherLodOffsets, 656 #endif 657 658 EOpSparseTextureGuardBegin, 659 660 EOpSparseTexture, 661 EOpSparseTextureLod, 662 EOpSparseTextureOffset, 663 EOpSparseTextureFetch, 664 EOpSparseTextureFetchOffset, 665 EOpSparseTextureLodOffset, 666 EOpSparseTextureGrad, 667 EOpSparseTextureGradOffset, 668 EOpSparseTextureGather, 669 EOpSparseTextureGatherOffset, 670 EOpSparseTextureGatherOffsets, 671 EOpSparseTexelsResident, 672 EOpSparseTextureClamp, 673 EOpSparseTextureOffsetClamp, 674 EOpSparseTextureGradClamp, 675 EOpSparseTextureGradOffsetClamp, 676 #ifdef AMD_EXTENSIONS 677 EOpSparseTextureGatherLod, 678 EOpSparseTextureGatherLodOffset, 679 EOpSparseTextureGatherLodOffsets, 680 #endif 681 682 EOpSparseTextureGuardEnd, 683 EOpSamplingGuardEnd, 684 EOpTextureGuardEnd, 685 686 // 687 // Integer operations 688 // 689 690 EOpAddCarry, 691 EOpSubBorrow, 692 EOpUMulExtended, 693 EOpIMulExtended, 694 EOpBitfieldExtract, 695 EOpBitfieldInsert, 696 EOpBitFieldReverse, 697 EOpBitCount, 698 EOpFindLSB, 699 EOpFindMSB, 700 701 // 702 // HLSL operations 703 // 704 705 EOpClip, // discard if input value < 0 706 EOpIsFinite, 707 EOpLog10, // base 10 log 708 EOpRcp, // 1/x 709 EOpSaturate, // clamp from 0 to 1 710 EOpSinCos, // sin and cos in out parameters 711 EOpGenMul, // mul(x,y) on any of mat/vec/scalars 712 EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w 713 EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return 714 EOpInterlockedAnd, // ... 715 EOpInterlockedCompareExchange, // ... 716 EOpInterlockedCompareStore, // ... 717 EOpInterlockedExchange, // ... 718 EOpInterlockedMax, // ... 719 EOpInterlockedMin, // ... 720 EOpInterlockedOr, // ... 721 EOpInterlockedXor, // ... 722 EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents 723 EOpGroupMemoryBarrierWithGroupSync, // ... 724 EOpWorkgroupMemoryBarrier, // ... 725 EOpWorkgroupMemoryBarrierWithGroupSync, // ... 726 EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid 727 EOpF32tof16, // HLSL conversion: half of a PackHalf2x16 728 EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16 729 EOpLit, // HLSL lighting coefficient vector 730 EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture 731 EOpAsDouble, // slightly different from EOpUint64BitsToDouble 732 EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range 733 734 EOpMethodSample, // Texture object methods. These are translated to existing 735 EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that 736 EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods(). 737 EOpMethodSampleCmpLevelZero, // ... 738 EOpMethodSampleGrad, // ... 739 EOpMethodSampleLevel, // ... 740 EOpMethodLoad, // ... 741 EOpMethodGetDimensions, // ... 742 EOpMethodGetSamplePosition, // ... 743 EOpMethodGather, // ... 744 EOpMethodCalculateLevelOfDetail, // ... 745 EOpMethodCalculateLevelOfDetailUnclamped, // ... 746 747 // Load already defined above for textures 748 EOpMethodLoad2, // Structure buffer object methods. These are translated to existing 749 EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that 750 EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods(). 751 EOpMethodStore, // ... 752 EOpMethodStore2, // ... 753 EOpMethodStore3, // ... 754 EOpMethodStore4, // ... 755 EOpMethodIncrementCounter, // ... 756 EOpMethodDecrementCounter, // ... 757 // EOpMethodAppend is defined for geo shaders below 758 EOpMethodConsume, 759 760 // SM5 texture methods 761 EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about 762 EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily 763 EOpMethodGatherBlue, // because HLSL arguments are slightly different. 764 EOpMethodGatherAlpha, // ... 765 EOpMethodGatherCmp, // ... 766 EOpMethodGatherCmpRed, // ... 767 EOpMethodGatherCmpGreen, // ... 768 EOpMethodGatherCmpBlue, // ... 769 EOpMethodGatherCmpAlpha, // ... 770 771 // geometry methods 772 EOpMethodAppend, // Geometry shader methods 773 EOpMethodRestartStrip, // ... 774 775 // matrix 776 EOpMatrixSwizzle, // select multiple matrix components (non-column) 777 }; 778 779 class TIntermTraverser; 780 class TIntermOperator; 781 class TIntermAggregate; 782 class TIntermUnary; 783 class TIntermBinary; 784 class TIntermConstantUnion; 785 class TIntermSelection; 786 class TIntermSwitch; 787 class TIntermBranch; 788 class TIntermTyped; 789 class TIntermMethod; 790 class TIntermSymbol; 791 792 } // end namespace glslang 793 794 // 795 // Base class for the tree nodes 796 // 797 // (Put outside the glslang namespace, as it's used as part of the external interface.) 798 // 799 class TIntermNode { 800 public: 801 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator()) 802 803 TIntermNode() { loc.init(); } 804 virtual const glslang::TSourceLoc& getLoc() const { return loc; } 805 virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; } 806 virtual void traverse(glslang::TIntermTraverser*) = 0; 807 virtual glslang::TIntermTyped* getAsTyped() { return 0; } 808 virtual glslang::TIntermOperator* getAsOperator() { return 0; } 809 virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return 0; } 810 virtual glslang::TIntermAggregate* getAsAggregate() { return 0; } 811 virtual glslang::TIntermUnary* getAsUnaryNode() { return 0; } 812 virtual glslang::TIntermBinary* getAsBinaryNode() { return 0; } 813 virtual glslang::TIntermSelection* getAsSelectionNode() { return 0; } 814 virtual glslang::TIntermSwitch* getAsSwitchNode() { return 0; } 815 virtual glslang::TIntermMethod* getAsMethodNode() { return 0; } 816 virtual glslang::TIntermSymbol* getAsSymbolNode() { return 0; } 817 virtual glslang::TIntermBranch* getAsBranchNode() { return 0; } 818 819 virtual const glslang::TIntermTyped* getAsTyped() const { return 0; } 820 virtual const glslang::TIntermOperator* getAsOperator() const { return 0; } 821 virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; } 822 virtual const glslang::TIntermAggregate* getAsAggregate() const { return 0; } 823 virtual const glslang::TIntermUnary* getAsUnaryNode() const { return 0; } 824 virtual const glslang::TIntermBinary* getAsBinaryNode() const { return 0; } 825 virtual const glslang::TIntermSelection* getAsSelectionNode() const { return 0; } 826 virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return 0; } 827 virtual const glslang::TIntermMethod* getAsMethodNode() const { return 0; } 828 virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return 0; } 829 virtual const glslang::TIntermBranch* getAsBranchNode() const { return 0; } 830 virtual ~TIntermNode() { } 831 832 protected: 833 TIntermNode(const TIntermNode&); 834 TIntermNode& operator=(const TIntermNode&); 835 glslang::TSourceLoc loc; 836 }; 837 838 namespace glslang { 839 840 // 841 // This is just to help yacc. 842 // 843 struct TIntermNodePair { 844 TIntermNode* node1; 845 TIntermNode* node2; 846 }; 847 848 // 849 // Intermediate class for nodes that have a type. 850 // 851 class TIntermTyped : public TIntermNode { 852 public: 853 TIntermTyped(const TType& t) { type.shallowCopy(t); } 854 TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); } 855 virtual TIntermTyped* getAsTyped() { return this; } 856 virtual const TIntermTyped* getAsTyped() const { return this; } 857 virtual void setType(const TType& t) { type.shallowCopy(t); } 858 virtual const TType& getType() const { return type; } 859 virtual TType& getWritableType() { return type; } 860 861 virtual TBasicType getBasicType() const { return type.getBasicType(); } 862 virtual TQualifier& getQualifier() { return type.getQualifier(); } 863 virtual const TQualifier& getQualifier() const { return type.getQualifier(); } 864 virtual void propagatePrecision(TPrecisionQualifier); 865 virtual int getVectorSize() const { return type.getVectorSize(); } 866 virtual int getMatrixCols() const { return type.getMatrixCols(); } 867 virtual int getMatrixRows() const { return type.getMatrixRows(); } 868 virtual bool isMatrix() const { return type.isMatrix(); } 869 virtual bool isArray() const { return type.isArray(); } 870 virtual bool isVector() const { return type.isVector(); } 871 virtual bool isScalar() const { return type.isScalar(); } 872 virtual bool isStruct() const { return type.isStruct(); } 873 TString getCompleteString() const { return type.getCompleteString(); } 874 875 protected: 876 TIntermTyped& operator=(const TIntermTyped&); 877 TType type; 878 }; 879 880 // 881 // Selection control hints 882 // 883 enum TSelectionControl { 884 ESelectionControlNone, 885 ESelectionControlFlatten, 886 ESelectionControlDontFlatten, 887 }; 888 889 // 890 // Loop control hints 891 // 892 enum TLoopControl { 893 ELoopControlNone, 894 ELoopControlUnroll, 895 ELoopControlDontUnroll, 896 }; 897 898 // 899 // Handle for, do-while, and while loops. 900 // 901 class TIntermLoop : public TIntermNode { 902 public: 903 TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) : 904 body(aBody), 905 test(aTest), 906 terminal(aTerminal), 907 first(testFirst), 908 control(ELoopControlNone) 909 { } 910 911 virtual void traverse(TIntermTraverser*); 912 TIntermNode* getBody() const { return body; } 913 TIntermTyped* getTest() const { return test; } 914 TIntermTyped* getTerminal() const { return terminal; } 915 bool testFirst() const { return first; } 916 917 void setLoopControl(TLoopControl c) { control = c; } 918 TLoopControl getLoopControl() const { return control; } 919 920 protected: 921 TIntermNode* body; // code to loop over 922 TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops 923 TIntermTyped* terminal; // exists for for-loops 924 bool first; // true for while and for, not for do-while 925 TLoopControl control; // loop control hint 926 }; 927 928 // 929 // Handle case, break, continue, return, and kill. 930 // 931 class TIntermBranch : public TIntermNode { 932 public: 933 TIntermBranch(TOperator op, TIntermTyped* e) : 934 flowOp(op), 935 expression(e) { } 936 virtual TIntermBranch* getAsBranchNode() { return this; } 937 virtual const TIntermBranch* getAsBranchNode() const { return this; } 938 virtual void traverse(TIntermTraverser*); 939 TOperator getFlowOp() const { return flowOp; } 940 TIntermTyped* getExpression() const { return expression; } 941 protected: 942 TOperator flowOp; 943 TIntermTyped* expression; 944 }; 945 946 // 947 // Represent method names before seeing their calling signature 948 // or resolving them to operations. Just an expression as the base object 949 // and a textural name. 950 // 951 class TIntermMethod : public TIntermTyped { 952 public: 953 TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { } 954 virtual TIntermMethod* getAsMethodNode() { return this; } 955 virtual const TIntermMethod* getAsMethodNode() const { return this; } 956 virtual const TString& getMethodName() const { return method; } 957 virtual TIntermTyped* getObject() const { return object; } 958 virtual void traverse(TIntermTraverser*); 959 protected: 960 TIntermTyped* object; 961 TString method; 962 }; 963 964 // 965 // Nodes that correspond to symbols or constants in the source code. 966 // 967 class TIntermSymbol : public TIntermTyped { 968 public: 969 // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from 970 // per process threadPoolAllocator, then it causes increased memory usage per compile 971 // it is essential to use "symbol = sym" to assign to symbol 972 TIntermSymbol(int i, const TString& n, const TType& t) 973 : TIntermTyped(t), id(i), 974 #ifdef ENABLE_HLSL 975 flattenSubset(-1), 976 #endif 977 constSubtree(nullptr) 978 { name = n; } 979 virtual int getId() const { return id; } 980 virtual const TString& getName() const { return name; } 981 virtual void traverse(TIntermTraverser*); 982 virtual TIntermSymbol* getAsSymbolNode() { return this; } 983 virtual const TIntermSymbol* getAsSymbolNode() const { return this; } 984 void setConstArray(const TConstUnionArray& c) { constArray = c; } 985 const TConstUnionArray& getConstArray() const { return constArray; } 986 void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; } 987 TIntermTyped* getConstSubtree() const { return constSubtree; } 988 #ifdef ENABLE_HLSL 989 void setFlattenSubset(int subset) { flattenSubset = subset; } 990 int getFlattenSubset() const { return flattenSubset; } // -1 means full object 991 #endif 992 993 protected: 994 int id; // the unique id of the symbol this node represents 995 #ifdef ENABLE_HLSL 996 int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced 997 #endif 998 TString name; // the name of the symbol this node represents 999 TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value 1000 TIntermTyped* constSubtree; 1001 }; 1002 1003 class TIntermConstantUnion : public TIntermTyped { 1004 public: 1005 TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { } 1006 const TConstUnionArray& getConstArray() const { return constArray; } 1007 virtual TIntermConstantUnion* getAsConstantUnion() { return this; } 1008 virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; } 1009 virtual void traverse(TIntermTraverser*); 1010 virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const; 1011 virtual TIntermTyped* fold(TOperator, const TType&) const; 1012 void setLiteral() { literal = true; } 1013 void setExpression() { literal = false; } 1014 bool isLiteral() const { return literal; } 1015 1016 protected: 1017 TIntermConstantUnion& operator=(const TIntermConstantUnion&); 1018 1019 const TConstUnionArray constArray; 1020 bool literal; // true if node represents a literal in the source code 1021 }; 1022 1023 // Represent the independent aspects of a texturing TOperator 1024 struct TCrackedTextureOp { 1025 bool query; 1026 bool proj; 1027 bool lod; 1028 bool fetch; 1029 bool offset; 1030 bool offsets; 1031 bool gather; 1032 bool grad; 1033 bool subpass; 1034 bool lodClamp; 1035 }; 1036 1037 // 1038 // Intermediate class for node types that hold operators. 1039 // 1040 class TIntermOperator : public TIntermTyped { 1041 public: 1042 virtual TIntermOperator* getAsOperator() { return this; } 1043 virtual const TIntermOperator* getAsOperator() const { return this; } 1044 TOperator getOp() const { return op; } 1045 void setOp(TOperator newOp) { op = newOp; } 1046 bool modifiesState() const; 1047 bool isConstructor() const; 1048 bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; } 1049 bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; } 1050 bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; } 1051 bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; } 1052 bool isSparseImage() const { return op == EOpSparseImageLoad; } 1053 1054 void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; } 1055 TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ? 1056 operationPrecision : 1057 type.getQualifier().precision; } 1058 TString getCompleteString() const 1059 { 1060 TString cs = type.getCompleteString(); 1061 if (getOperationPrecision() != type.getQualifier().precision) { 1062 cs += ", operation at "; 1063 cs += GetPrecisionQualifierString(getOperationPrecision()); 1064 } 1065 1066 return cs; 1067 } 1068 1069 // Crack the op into the individual dimensions of texturing operation. 1070 void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const 1071 { 1072 cracked.query = false; 1073 cracked.proj = false; 1074 cracked.lod = false; 1075 cracked.fetch = false; 1076 cracked.offset = false; 1077 cracked.offsets = false; 1078 cracked.gather = false; 1079 cracked.grad = false; 1080 cracked.subpass = false; 1081 cracked.lodClamp = false; 1082 1083 switch (op) { 1084 case EOpImageQuerySize: 1085 case EOpImageQuerySamples: 1086 case EOpTextureQuerySize: 1087 case EOpTextureQueryLod: 1088 case EOpTextureQueryLevels: 1089 case EOpTextureQuerySamples: 1090 case EOpSparseTexelsResident: 1091 cracked.query = true; 1092 break; 1093 case EOpTexture: 1094 case EOpSparseTexture: 1095 break; 1096 case EOpTextureClamp: 1097 case EOpSparseTextureClamp: 1098 cracked.lodClamp = true; 1099 break; 1100 case EOpTextureProj: 1101 cracked.proj = true; 1102 break; 1103 case EOpTextureLod: 1104 case EOpSparseTextureLod: 1105 cracked.lod = true; 1106 break; 1107 case EOpTextureOffset: 1108 case EOpSparseTextureOffset: 1109 cracked.offset = true; 1110 break; 1111 case EOpTextureOffsetClamp: 1112 case EOpSparseTextureOffsetClamp: 1113 cracked.offset = true; 1114 cracked.lodClamp = true; 1115 break; 1116 case EOpTextureFetch: 1117 case EOpSparseTextureFetch: 1118 cracked.fetch = true; 1119 if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D) 1120 cracked.lod = true; 1121 break; 1122 case EOpTextureFetchOffset: 1123 case EOpSparseTextureFetchOffset: 1124 cracked.fetch = true; 1125 cracked.offset = true; 1126 if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D) 1127 cracked.lod = true; 1128 break; 1129 case EOpTextureProjOffset: 1130 cracked.offset = true; 1131 cracked.proj = true; 1132 break; 1133 case EOpTextureLodOffset: 1134 case EOpSparseTextureLodOffset: 1135 cracked.offset = true; 1136 cracked.lod = true; 1137 break; 1138 case EOpTextureProjLod: 1139 cracked.lod = true; 1140 cracked.proj = true; 1141 break; 1142 case EOpTextureProjLodOffset: 1143 cracked.offset = true; 1144 cracked.lod = true; 1145 cracked.proj = true; 1146 break; 1147 case EOpTextureGrad: 1148 case EOpSparseTextureGrad: 1149 cracked.grad = true; 1150 break; 1151 case EOpTextureGradClamp: 1152 case EOpSparseTextureGradClamp: 1153 cracked.grad = true; 1154 cracked.lodClamp = true; 1155 break; 1156 case EOpTextureGradOffset: 1157 case EOpSparseTextureGradOffset: 1158 cracked.grad = true; 1159 cracked.offset = true; 1160 break; 1161 case EOpTextureProjGrad: 1162 cracked.grad = true; 1163 cracked.proj = true; 1164 break; 1165 case EOpTextureProjGradOffset: 1166 cracked.grad = true; 1167 cracked.offset = true; 1168 cracked.proj = true; 1169 break; 1170 case EOpTextureGradOffsetClamp: 1171 case EOpSparseTextureGradOffsetClamp: 1172 cracked.grad = true; 1173 cracked.offset = true; 1174 cracked.lodClamp = true; 1175 break; 1176 case EOpTextureGather: 1177 case EOpSparseTextureGather: 1178 cracked.gather = true; 1179 break; 1180 case EOpTextureGatherOffset: 1181 case EOpSparseTextureGatherOffset: 1182 cracked.gather = true; 1183 cracked.offset = true; 1184 break; 1185 case EOpTextureGatherOffsets: 1186 case EOpSparseTextureGatherOffsets: 1187 cracked.gather = true; 1188 cracked.offsets = true; 1189 break; 1190 #ifdef AMD_EXTENSIONS 1191 case EOpTextureGatherLod: 1192 case EOpSparseTextureGatherLod: 1193 cracked.gather = true; 1194 cracked.lod = true; 1195 break; 1196 case EOpTextureGatherLodOffset: 1197 case EOpSparseTextureGatherLodOffset: 1198 cracked.gather = true; 1199 cracked.offset = true; 1200 cracked.lod = true; 1201 break; 1202 case EOpTextureGatherLodOffsets: 1203 case EOpSparseTextureGatherLodOffsets: 1204 cracked.gather = true; 1205 cracked.offsets = true; 1206 cracked.lod = true; 1207 break; 1208 case EOpImageLoadLod: 1209 case EOpImageStoreLod: 1210 case EOpSparseImageLoadLod: 1211 cracked.lod = true; 1212 break; 1213 #endif 1214 case EOpSubpassLoad: 1215 case EOpSubpassLoadMS: 1216 cracked.subpass = true; 1217 break; 1218 default: 1219 break; 1220 } 1221 } 1222 1223 protected: 1224 TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {} 1225 TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {} 1226 TOperator op; 1227 // The result precision is in the inherited TType, and is usually meant to be both 1228 // the operation precision and the result precision. However, some more complex things, 1229 // like built-in function calls, distinguish between the two, in which case non-EqpNone 1230 // 'operationPrecision' overrides the result precision as far as operation precision 1231 // is concerned. 1232 TPrecisionQualifier operationPrecision; 1233 }; 1234 1235 // 1236 // Nodes for all the basic binary math operators. 1237 // 1238 class TIntermBinary : public TIntermOperator { 1239 public: 1240 TIntermBinary(TOperator o) : TIntermOperator(o) {} 1241 virtual void traverse(TIntermTraverser*); 1242 virtual void setLeft(TIntermTyped* n) { left = n; } 1243 virtual void setRight(TIntermTyped* n) { right = n; } 1244 virtual TIntermTyped* getLeft() const { return left; } 1245 virtual TIntermTyped* getRight() const { return right; } 1246 virtual TIntermBinary* getAsBinaryNode() { return this; } 1247 virtual const TIntermBinary* getAsBinaryNode() const { return this; } 1248 virtual void updatePrecision(); 1249 protected: 1250 TIntermTyped* left; 1251 TIntermTyped* right; 1252 }; 1253 1254 // 1255 // Nodes for unary math operators. 1256 // 1257 class TIntermUnary : public TIntermOperator { 1258 public: 1259 TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {} 1260 TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {} 1261 virtual void traverse(TIntermTraverser*); 1262 virtual void setOperand(TIntermTyped* o) { operand = o; } 1263 virtual TIntermTyped* getOperand() { return operand; } 1264 virtual const TIntermTyped* getOperand() const { return operand; } 1265 virtual TIntermUnary* getAsUnaryNode() { return this; } 1266 virtual const TIntermUnary* getAsUnaryNode() const { return this; } 1267 virtual void updatePrecision(); 1268 protected: 1269 TIntermTyped* operand; 1270 }; 1271 1272 typedef TVector<TIntermNode*> TIntermSequence; 1273 typedef TVector<int> TQualifierList; 1274 // 1275 // Nodes that operate on an arbitrary sized set of children. 1276 // 1277 class TIntermAggregate : public TIntermOperator { 1278 public: 1279 TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(0) { } 1280 TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(0) { } 1281 ~TIntermAggregate() { delete pragmaTable; } 1282 virtual TIntermAggregate* getAsAggregate() { return this; } 1283 virtual const TIntermAggregate* getAsAggregate() const { return this; } 1284 virtual void setOperator(TOperator o) { op = o; } 1285 virtual TIntermSequence& getSequence() { return sequence; } 1286 virtual const TIntermSequence& getSequence() const { return sequence; } 1287 virtual void setName(const TString& n) { name = n; } 1288 virtual const TString& getName() const { return name; } 1289 virtual void traverse(TIntermTraverser*); 1290 virtual void setUserDefined() { userDefined = true; } 1291 virtual bool isUserDefined() { return userDefined; } 1292 virtual TQualifierList& getQualifierList() { return qualifier; } 1293 virtual const TQualifierList& getQualifierList() const { return qualifier; } 1294 void setOptimize(bool o) { optimize = o; } 1295 void setDebug(bool d) { debug = d; } 1296 bool getOptimize() const { return optimize; } 1297 bool getDebug() const { return debug; } 1298 void addToPragmaTable(const TPragmaTable& pTable); 1299 const TPragmaTable& getPragmaTable() const { return *pragmaTable; } 1300 protected: 1301 TIntermAggregate(const TIntermAggregate&); // disallow copy constructor 1302 TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator 1303 TIntermSequence sequence; 1304 TQualifierList qualifier; 1305 TString name; 1306 bool userDefined; // used for user defined function names 1307 bool optimize; 1308 bool debug; 1309 TPragmaTable* pragmaTable; 1310 }; 1311 1312 // 1313 // For if tests. 1314 // 1315 class TIntermSelection : public TIntermTyped { 1316 public: 1317 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) : 1318 TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB), control(ESelectionControlNone) {} 1319 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) : 1320 TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB), control(ESelectionControlNone) {} 1321 virtual void traverse(TIntermTraverser*); 1322 virtual TIntermTyped* getCondition() const { return condition; } 1323 virtual TIntermNode* getTrueBlock() const { return trueBlock; } 1324 virtual TIntermNode* getFalseBlock() const { return falseBlock; } 1325 virtual TIntermSelection* getAsSelectionNode() { return this; } 1326 virtual const TIntermSelection* getAsSelectionNode() const { return this; } 1327 void setSelectionControl(TSelectionControl c) { control = c; } 1328 TSelectionControl getSelectionControl() const { return control; } 1329 protected: 1330 TIntermTyped* condition; 1331 TIntermNode* trueBlock; 1332 TIntermNode* falseBlock; 1333 TSelectionControl control; // selection control hint 1334 }; 1335 1336 // 1337 // For switch statements. Designed use is that a switch will have sequence of nodes 1338 // that are either case/default nodes or a *single* node that represents all the code 1339 // in between (if any) consecutive case/defaults. So, a traversal need only deal with 1340 // 0 or 1 nodes per case/default statement. 1341 // 1342 class TIntermSwitch : public TIntermNode { 1343 public: 1344 TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b), control(ESelectionControlNone) { } 1345 virtual void traverse(TIntermTraverser*); 1346 virtual TIntermNode* getCondition() const { return condition; } 1347 virtual TIntermAggregate* getBody() const { return body; } 1348 virtual TIntermSwitch* getAsSwitchNode() { return this; } 1349 virtual const TIntermSwitch* getAsSwitchNode() const { return this; } 1350 void setSelectionControl(TSelectionControl c) { control = c; } 1351 TSelectionControl getSelectionControl() const { return control; } 1352 protected: 1353 TIntermTyped* condition; 1354 TIntermAggregate* body; 1355 TSelectionControl control; // selection control hint 1356 }; 1357 1358 enum TVisit 1359 { 1360 EvPreVisit, 1361 EvInVisit, 1362 EvPostVisit 1363 }; 1364 1365 // 1366 // For traversing the tree. User should derive from this, 1367 // put their traversal specific data in it, and then pass 1368 // it to a Traverse method. 1369 // 1370 // When using this, just fill in the methods for nodes you want visited. 1371 // Return false from a pre-visit to skip visiting that node's subtree. 1372 // 1373 // Explicitly set postVisit to true if you want post visiting, otherwise, 1374 // filled in methods will only be called at pre-visit time (before processing 1375 // the subtree). Similarly for inVisit for in-order visiting of nodes with 1376 // multiple children. 1377 // 1378 // If you only want post-visits, explicitly turn off preVisit (and inVisit) 1379 // and turn on postVisit. 1380 // 1381 // In general, for the visit*() methods, return true from interior nodes 1382 // to have the traversal continue on to children. 1383 // 1384 // If you process children yourself, or don't want them processed, return false. 1385 // 1386 class TIntermTraverser { 1387 public: 1388 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator()) 1389 TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) : 1390 preVisit(preVisit), 1391 inVisit(inVisit), 1392 postVisit(postVisit), 1393 rightToLeft(rightToLeft), 1394 depth(0), 1395 maxDepth(0) { } 1396 virtual ~TIntermTraverser() { } 1397 1398 virtual void visitSymbol(TIntermSymbol*) { } 1399 virtual void visitConstantUnion(TIntermConstantUnion*) { } 1400 virtual bool visitBinary(TVisit, TIntermBinary*) { return true; } 1401 virtual bool visitUnary(TVisit, TIntermUnary*) { return true; } 1402 virtual bool visitSelection(TVisit, TIntermSelection*) { return true; } 1403 virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; } 1404 virtual bool visitLoop(TVisit, TIntermLoop*) { return true; } 1405 virtual bool visitBranch(TVisit, TIntermBranch*) { return true; } 1406 virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; } 1407 1408 int getMaxDepth() const { return maxDepth; } 1409 1410 void incrementDepth(TIntermNode *current) 1411 { 1412 depth++; 1413 maxDepth = (std::max)(maxDepth, depth); 1414 path.push_back(current); 1415 } 1416 1417 void decrementDepth() 1418 { 1419 depth--; 1420 path.pop_back(); 1421 } 1422 1423 TIntermNode *getParentNode() 1424 { 1425 return path.size() == 0 ? NULL : path.back(); 1426 } 1427 1428 const bool preVisit; 1429 const bool inVisit; 1430 const bool postVisit; 1431 const bool rightToLeft; 1432 1433 protected: 1434 TIntermTraverser& operator=(TIntermTraverser&); 1435 1436 int depth; 1437 int maxDepth; 1438 1439 // All the nodes from root to the current node's parent during traversing. 1440 TVector<TIntermNode *> path; 1441 }; 1442 1443 // KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if 1444 // sized with the same symbol, involving no operations" 1445 inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2) 1446 { 1447 return node1->getAsSymbolNode() && node2->getAsSymbolNode() && 1448 node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId(); 1449 } 1450 1451 } // end namespace glslang 1452 1453 #endif // __INTERMEDIATE_H 1454