Home | History | Annotate | Download | only in MachineIndependent
      1 //
      2 // Copyright (C) 2013-2016 LunarG, Inc.
      3 //
      4 // All rights reserved.
      5 //
      6 // Redistribution and use in source and binary forms, with or without
      7 // modification, are permitted provided that the following conditions
      8 // are met:
      9 //
     10 //    Redistributions of source code must retain the above copyright
     11 //    notice, this list of conditions and the following disclaimer.
     12 //
     13 //    Redistributions in binary form must reproduce the above
     14 //    copyright notice, this list of conditions and the following
     15 //    disclaimer in the documentation and/or other materials provided
     16 //    with the distribution.
     17 //
     18 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
     19 //    contributors may be used to endorse or promote products derived
     20 //    from this software without specific prior written permission.
     21 //
     22 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     23 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     24 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     25 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     26 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     27 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     28 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     29 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     30 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     32 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33 // POSSIBILITY OF SUCH DAMAGE.
     34 //
     35 
     36 #include "../Include/Common.h"
     37 #include "reflection.h"
     38 #include "LiveTraverser.h"
     39 #include "localintermediate.h"
     40 
     41 #include "gl_types.h"
     42 
     43 //
     44 // Grow the reflection database through a friend traverser class of TReflection and a
     45 // collection of functions to do a liveness traversal that note what uniforms are used
     46 // in semantically non-dead code.
     47 //
     48 // Can be used multiple times, once per stage, to grow a program reflection.
     49 //
     50 // High-level algorithm for one stage:
     51 //
     52 // 1. Put the entry point on the list of live functions.
     53 //
     54 // 2. Traverse any live function, while skipping if-tests with a compile-time constant
     55 //    condition of false, and while adding any encountered function calls to the live
     56 //    function list.
     57 //
     58 //    Repeat until the live function list is empty.
     59 //
     60 // 3. Add any encountered uniform variables and blocks to the reflection database.
     61 //
     62 // Can be attempted with a failed link, but will return false if recursion had been detected, or
     63 // there wasn't exactly one entry point.
     64 //
     65 
     66 namespace glslang {
     67 
     68 //
     69 // The traverser: mostly pass through, except
     70 //  - processing binary nodes to see if they are dereferences of an aggregates to track
     71 //  - processing symbol nodes to see if they are non-aggregate objects to track
     72 //
     73 // This ignores semantically dead code by using TLiveTraverser.
     74 //
     75 // This is in the glslang namespace directly so it can be a friend of TReflection.
     76 //
     77 
     78 class TReflectionTraverser : public TLiveTraverser {
     79 public:
     80     TReflectionTraverser(const TIntermediate& i, TReflection& r) :
     81          TLiveTraverser(i), reflection(r) { }
     82 
     83     virtual bool visitBinary(TVisit, TIntermBinary* node);
     84     virtual void visitSymbol(TIntermSymbol* base);
     85 
     86     // Add a simple reference to a uniform variable to the uniform database, no dereference involved.
     87     // However, no dereference doesn't mean simple... it could be a complex aggregate.
     88     void addUniform(const TIntermSymbol& base)
     89     {
     90         if (processedDerefs.find(&base) == processedDerefs.end()) {
     91             processedDerefs.insert(&base);
     92 
     93             // Use a degenerate (empty) set of dereferences to immediately put as at the end of
     94             // the dereference change expected by blowUpActiveAggregate.
     95             TList<TIntermBinary*> derefs;
     96             blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0);
     97         }
     98     }
     99 
    100     void addAttribute(const TIntermSymbol& base)
    101     {
    102         if (processedDerefs.find(&base) == processedDerefs.end()) {
    103             processedDerefs.insert(&base);
    104 
    105             const TString &name = base.getName();
    106             const TType &type = base.getType();
    107 
    108             TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
    109             if (it == reflection.nameToIndex.end()) {
    110                 reflection.nameToIndex[name] = (int)reflection.indexToAttribute.size();
    111                 reflection.indexToAttribute.push_back(TObjectReflection(name, type, 0, mapToGlType(type), 0, 0));
    112             }
    113         }
    114     }
    115 
    116     // Lookup or calculate the offset of a block member, using the recursively
    117     // defined block offset rules.
    118     int getOffset(const TType& type, int index)
    119     {
    120         const TTypeList& memberList = *type.getStruct();
    121 
    122         // Don't calculate offset if one is present, it could be user supplied
    123         // and different than what would be calculated.  That is, this is faster,
    124         // but not just an optimization.
    125         if (memberList[index].type->getQualifier().hasOffset())
    126             return memberList[index].type->getQualifier().layoutOffset;
    127 
    128         int memberSize;
    129         int dummyStride;
    130         int offset = 0;
    131         for (int m = 0; m <= index; ++m) {
    132             // modify just the children's view of matrix layout, if there is one for this member
    133             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
    134             int memberAlignment = intermediate.getBaseAlignment(*memberList[m].type, memberSize, dummyStride,
    135                                                                 type.getQualifier().layoutPacking == ElpStd140,
    136                                                                 subMatrixLayout != ElmNone
    137                                                                     ? subMatrixLayout == ElmRowMajor
    138                                                                     : type.getQualifier().layoutMatrix == ElmRowMajor);
    139             RoundToPow2(offset, memberAlignment);
    140             if (m < index)
    141                 offset += memberSize;
    142         }
    143 
    144         return offset;
    145     }
    146 
    147     // Calculate the block data size.
    148     // Block arrayness is not taken into account, each element is backed by a separate buffer.
    149     int getBlockSize(const TType& blockType)
    150     {
    151         const TTypeList& memberList = *blockType.getStruct();
    152         int lastIndex = (int)memberList.size() - 1;
    153         int lastOffset = getOffset(blockType, lastIndex);
    154 
    155         int lastMemberSize;
    156         int dummyStride;
    157         intermediate.getBaseAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
    158                                       blockType.getQualifier().layoutPacking == ElpStd140,
    159                                       blockType.getQualifier().layoutMatrix == ElmRowMajor);
    160 
    161         return lastOffset + lastMemberSize;
    162     }
    163 
    164     // Traverse the provided deref chain, including the base, and
    165     // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
    166     // - recursively expand any variable array index in the middle of that traversal
    167     // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
    168     //
    169     // arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
    170     // A value of 0 for arraySize will mean to use the full array's size.
    171     void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
    172                                TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize)
    173     {
    174         // process the part of the dereference chain that was explicit in the shader
    175         TString name = baseName;
    176         const TType* terminalType = &baseType;
    177         for (; deref != derefs.end(); ++deref) {
    178             TIntermBinary* visitNode = *deref;
    179             terminalType = &visitNode->getType();
    180             int index;
    181             switch (visitNode->getOp()) {
    182             case EOpIndexIndirect:
    183                 // Visit all the indices of this array, and for each one add on the remaining dereferencing
    184                 for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
    185                     TString newBaseName = name;
    186                     if (baseType.getBasicType() != EbtBlock)
    187                         newBaseName.append(TString("[") + String(i) + "]");
    188                     TList<TIntermBinary*>::const_iterator nextDeref = deref;
    189                     ++nextDeref;
    190                     TType derefType(*terminalType, 0);
    191                     blowUpActiveAggregate(derefType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize);
    192                 }
    193 
    194                 // it was all completed in the recursive calls above
    195                 return;
    196             case EOpIndexDirect:
    197                 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
    198                 if (baseType.getBasicType() != EbtBlock)
    199                     name.append(TString("[") + String(index) + "]");
    200                 break;
    201             case EOpIndexDirectStruct:
    202                 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
    203                 if (offset >= 0)
    204                     offset += getOffset(visitNode->getLeft()->getType(), index);
    205                 if (name.size() > 0)
    206                     name.append(".");
    207                 name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
    208                 break;
    209             default:
    210                 break;
    211             }
    212         }
    213 
    214         // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
    215         if (! isReflectionGranularity(*terminalType)) {
    216             if (terminalType->isArray()) {
    217                 // Visit all the indices of this array, and for each one,
    218                 // fully explode the remaining aggregate to dereference
    219                 for (int i = 0; i < std::max(terminalType->getOuterArraySize(), 1); ++i) {
    220                     TString newBaseName = name;
    221                     newBaseName.append(TString("[") + String(i) + "]");
    222                     TType derefType(*terminalType, 0);
    223                     blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
    224                 }
    225             } else {
    226                 // Visit all members of this aggregate, and for each one,
    227                 // fully explode the remaining aggregate to dereference
    228                 const TTypeList& typeList = *terminalType->getStruct();
    229                 for (int i = 0; i < (int)typeList.size(); ++i) {
    230                     TString newBaseName = name;
    231                     newBaseName.append(TString(".") + typeList[i].type->getFieldName());
    232                     TType derefType(*terminalType, i);
    233                     blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
    234                 }
    235             }
    236 
    237             // it was all completed in the recursive calls above
    238             return;
    239         }
    240 
    241         // Finally, add a full string to the reflection database, and update the array size if necessary.
    242         // If the dereferenced entity to record is an array, compute the size and update the maximum size.
    243 
    244         // there might not be a final array dereference, it could have been copied as an array object
    245         if (arraySize == 0)
    246             arraySize = mapToGlArraySize(*terminalType);
    247 
    248         TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
    249         if (it == reflection.nameToIndex.end()) {
    250             reflection.nameToIndex[name] = (int)reflection.indexToUniform.size();
    251             reflection.indexToUniform.push_back(TObjectReflection(name, *terminalType, offset,
    252                                                                   mapToGlType(*terminalType),
    253                                                                   arraySize, blockIndex));
    254         } else if (arraySize > 1) {
    255             int& reflectedArraySize = reflection.indexToUniform[it->second].size;
    256             reflectedArraySize = std::max(arraySize, reflectedArraySize);
    257         }
    258     }
    259 
    260     // Add a uniform dereference where blocks/struct/arrays are involved in the access.
    261     // Handles the situation where the left node is at the correct or too coarse a
    262     // granularity for reflection.  (That is, further dereferences up the tree will be
    263     // skipped.) Earlier dereferences, down the tree, will be handled
    264     // at the same time, and logged to prevent reprocessing as the tree is traversed.
    265     //
    266     // Note: Other things like the following must be caught elsewhere:
    267     //  - a simple non-array, non-struct variable (no dereference even conceivable)
    268     //  - an aggregrate consumed en masse, without a dereference
    269     //
    270     // So, this code is for cases like
    271     //   - a struct/block dereferencing a member (whether the member is array or not)
    272     //   - an array of struct
    273     //   - structs/arrays containing the above
    274     //
    275     void addDereferencedUniform(TIntermBinary* topNode)
    276     {
    277         // See if too fine-grained to process (wait to get further down the tree)
    278         const TType& leftType = topNode->getLeft()->getType();
    279         if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray())
    280             return;
    281 
    282         // We have an array or structure or block dereference, see if it's a uniform
    283         // based dereference (if not, skip it).
    284         TIntermSymbol* base = findBase(topNode);
    285         if (! base || ! base->getQualifier().isUniformOrBuffer())
    286             return;
    287 
    288         // See if we've already processed this (e.g., in the middle of something
    289         // we did earlier), and if so skip it
    290         if (processedDerefs.find(topNode) != processedDerefs.end())
    291             return;
    292 
    293         // Process this uniform dereference
    294 
    295         int offset = -1;
    296         int blockIndex = -1;
    297         bool anonymous = false;
    298 
    299         // See if we need to record the block itself
    300         bool block = base->getBasicType() == EbtBlock;
    301         if (block) {
    302             offset = 0;
    303             anonymous = IsAnonymous(base->getName());
    304 
    305             const TString& blockName = base->getType().getTypeName();
    306 
    307             if (base->getType().isArray()) {
    308                 TType derefType(base->getType(), 0);
    309 
    310                 assert(! anonymous);
    311                 for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
    312                     blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
    313                                               getBlockSize(base->getType()));
    314             } else
    315                 blockIndex = addBlockName(blockName, base->getType(), getBlockSize(base->getType()));
    316         }
    317 
    318         // Process the dereference chain, backward, accumulating the pieces for later forward traversal.
    319         // If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
    320         TList<TIntermBinary*> derefs;
    321         for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
    322             if (isReflectionGranularity(visitNode->getLeft()->getType()))
    323                 continue;
    324 
    325             derefs.push_front(visitNode);
    326             processedDerefs.insert(visitNode);
    327         }
    328         processedDerefs.insert(base);
    329 
    330         // See if we have a specific array size to stick to while enumerating the explosion of the aggregate
    331         int arraySize = 0;
    332         if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
    333             if (topNode->getOp() == EOpIndexDirect)
    334                 arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
    335         }
    336 
    337         // Put the dereference chain together, forward
    338         TString baseName;
    339         if (! anonymous) {
    340             if (block)
    341                 baseName = base->getType().getTypeName();
    342             else
    343                 baseName = base->getName();
    344         }
    345         blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize);
    346     }
    347 
    348     int addBlockName(const TString& name, const TType& type, int size)
    349     {
    350         int blockIndex;
    351         TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
    352         if (reflection.nameToIndex.find(name) == reflection.nameToIndex.end()) {
    353             blockIndex = (int)reflection.indexToUniformBlock.size();
    354             reflection.nameToIndex[name] = blockIndex;
    355             reflection.indexToUniformBlock.push_back(TObjectReflection(name, type, -1, -1, size, -1));
    356         } else
    357             blockIndex = it->second;
    358 
    359         return blockIndex;
    360     }
    361 
    362     // Are we at a level in a dereference chain at which individual active uniform queries are made?
    363     bool isReflectionGranularity(const TType& type)
    364     {
    365         return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct;
    366     }
    367 
    368     // For a binary operation indexing into an aggregate, chase down the base of the aggregate.
    369     // Return 0 if the topology does not fit this situation.
    370     TIntermSymbol* findBase(const TIntermBinary* node)
    371     {
    372         TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
    373         if (base)
    374             return base;
    375         TIntermBinary* left = node->getLeft()->getAsBinaryNode();
    376         if (! left)
    377             return nullptr;
    378 
    379         return findBase(left);
    380     }
    381 
    382     //
    383     // Translate a glslang sampler type into the GL API #define number.
    384     //
    385     int mapSamplerToGlType(TSampler sampler)
    386     {
    387         if (! sampler.image) {
    388             // a sampler...
    389             switch (sampler.type) {
    390             case EbtFloat:
    391                 switch ((int)sampler.dim) {
    392                 case Esd1D:
    393                     switch ((int)sampler.shadow) {
    394                     case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D;
    395                     case true:  return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW;
    396                     }
    397                 case Esd2D:
    398                     switch ((int)sampler.ms) {
    399                     case false:
    400                         switch ((int)sampler.shadow) {
    401                         case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D;
    402                         case true:  return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW;
    403                         }
    404                     case true:      return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE;
    405                     }
    406                 case Esd3D:
    407                     return GL_SAMPLER_3D;
    408                 case EsdCube:
    409                     switch ((int)sampler.shadow) {
    410                     case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE;
    411                     case true:  return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW;
    412                     }
    413                 case EsdRect:
    414                     return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT;
    415                 case EsdBuffer:
    416                     return GL_SAMPLER_BUFFER;
    417                 }
    418             case EbtInt:
    419                 switch ((int)sampler.dim) {
    420                 case Esd1D:
    421                     return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D;
    422                 case Esd2D:
    423                     switch ((int)sampler.ms) {
    424                     case false:  return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D;
    425                     case true:   return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
    426                                                         : GL_INT_SAMPLER_2D_MULTISAMPLE;
    427                     }
    428                 case Esd3D:
    429                     return GL_INT_SAMPLER_3D;
    430                 case EsdCube:
    431                     return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE;
    432                 case EsdRect:
    433                     return GL_INT_SAMPLER_2D_RECT;
    434                 case EsdBuffer:
    435                     return GL_INT_SAMPLER_BUFFER;
    436                 }
    437             case EbtUint:
    438                 switch ((int)sampler.dim) {
    439                 case Esd1D:
    440                     return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D;
    441                 case Esd2D:
    442                     switch ((int)sampler.ms) {
    443                     case false:  return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D;
    444                     case true:   return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
    445                                                         : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE;
    446                     }
    447                 case Esd3D:
    448                     return GL_UNSIGNED_INT_SAMPLER_3D;
    449                 case EsdCube:
    450                     return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE;
    451                 case EsdRect:
    452                     return GL_UNSIGNED_INT_SAMPLER_2D_RECT;
    453                 case EsdBuffer:
    454                     return GL_UNSIGNED_INT_SAMPLER_BUFFER;
    455                 }
    456             default:
    457                 return 0;
    458             }
    459         } else {
    460             // an image...
    461             switch (sampler.type) {
    462             case EbtFloat:
    463                 switch ((int)sampler.dim) {
    464                 case Esd1D:
    465                     return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D;
    466                 case Esd2D:
    467                     switch ((int)sampler.ms) {
    468                     case false:     return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D;
    469                     case true:      return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE;
    470                     }
    471                 case Esd3D:
    472                     return GL_IMAGE_3D;
    473                 case EsdCube:
    474                     return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE;
    475                 case EsdRect:
    476                     return GL_IMAGE_2D_RECT;
    477                 case EsdBuffer:
    478                     return GL_IMAGE_BUFFER;
    479                 }
    480             case EbtInt:
    481                 switch ((int)sampler.dim) {
    482                 case Esd1D:
    483                     return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D;
    484                 case Esd2D:
    485                     switch ((int)sampler.ms) {
    486                     case false:  return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D;
    487                     case true:   return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE;
    488                     }
    489                 case Esd3D:
    490                     return GL_INT_IMAGE_3D;
    491                 case EsdCube:
    492                     return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE;
    493                 case EsdRect:
    494                     return GL_INT_IMAGE_2D_RECT;
    495                 case EsdBuffer:
    496                     return GL_INT_IMAGE_BUFFER;
    497                 }
    498             case EbtUint:
    499                 switch ((int)sampler.dim) {
    500                 case Esd1D:
    501                     return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D;
    502                 case Esd2D:
    503                     switch ((int)sampler.ms) {
    504                     case false:  return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D;
    505                     case true:   return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY
    506                                                         : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE;
    507                     }
    508                 case Esd3D:
    509                     return GL_UNSIGNED_INT_IMAGE_3D;
    510                 case EsdCube:
    511                     return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE;
    512                 case EsdRect:
    513                     return GL_UNSIGNED_INT_IMAGE_2D_RECT;
    514                 case EsdBuffer:
    515                     return GL_UNSIGNED_INT_IMAGE_BUFFER;
    516                 }
    517             default:
    518                 return 0;
    519             }
    520         }
    521     }
    522 
    523     //
    524     // Translate a glslang type into the GL API #define number.
    525     // Ignores arrayness.
    526     //
    527     int mapToGlType(const TType& type)
    528     {
    529         switch (type.getBasicType()) {
    530         case EbtSampler:
    531             return mapSamplerToGlType(type.getSampler());
    532         case EbtStruct:
    533         case EbtBlock:
    534         case EbtVoid:
    535             return 0;
    536         default:
    537             break;
    538         }
    539 
    540         if (type.isVector()) {
    541             int offset = type.getVectorSize() - 2;
    542             switch (type.getBasicType()) {
    543             case EbtFloat:      return GL_FLOAT_VEC2                  + offset;
    544             case EbtDouble:     return GL_DOUBLE_VEC2                 + offset;
    545 #ifdef AMD_EXTENSIONS
    546             case EbtFloat16:    return GL_FLOAT16_VEC2_NV             + offset;
    547 #endif
    548             case EbtInt:        return GL_INT_VEC2                    + offset;
    549             case EbtUint:       return GL_UNSIGNED_INT_VEC2           + offset;
    550             case EbtInt64:      return GL_INT64_ARB                   + offset;
    551             case EbtUint64:     return GL_UNSIGNED_INT64_ARB          + offset;
    552             case EbtBool:       return GL_BOOL_VEC2                   + offset;
    553             case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
    554             default:            return 0;
    555             }
    556         }
    557         if (type.isMatrix()) {
    558             switch (type.getBasicType()) {
    559             case EbtFloat:
    560                 switch (type.getMatrixCols()) {
    561                 case 2:
    562                     switch (type.getMatrixRows()) {
    563                     case 2:    return GL_FLOAT_MAT2;
    564                     case 3:    return GL_FLOAT_MAT2x3;
    565                     case 4:    return GL_FLOAT_MAT2x4;
    566                     default:   return 0;
    567                     }
    568                 case 3:
    569                     switch (type.getMatrixRows()) {
    570                     case 2:    return GL_FLOAT_MAT3x2;
    571                     case 3:    return GL_FLOAT_MAT3;
    572                     case 4:    return GL_FLOAT_MAT3x4;
    573                     default:   return 0;
    574                     }
    575                 case 4:
    576                     switch (type.getMatrixRows()) {
    577                     case 2:    return GL_FLOAT_MAT4x2;
    578                     case 3:    return GL_FLOAT_MAT4x3;
    579                     case 4:    return GL_FLOAT_MAT4;
    580                     default:   return 0;
    581                     }
    582                 }
    583             case EbtDouble:
    584                 switch (type.getMatrixCols()) {
    585                 case 2:
    586                     switch (type.getMatrixRows()) {
    587                     case 2:    return GL_DOUBLE_MAT2;
    588                     case 3:    return GL_DOUBLE_MAT2x3;
    589                     case 4:    return GL_DOUBLE_MAT2x4;
    590                     default:   return 0;
    591                     }
    592                 case 3:
    593                     switch (type.getMatrixRows()) {
    594                     case 2:    return GL_DOUBLE_MAT3x2;
    595                     case 3:    return GL_DOUBLE_MAT3;
    596                     case 4:    return GL_DOUBLE_MAT3x4;
    597                     default:   return 0;
    598                     }
    599                 case 4:
    600                     switch (type.getMatrixRows()) {
    601                     case 2:    return GL_DOUBLE_MAT4x2;
    602                     case 3:    return GL_DOUBLE_MAT4x3;
    603                     case 4:    return GL_DOUBLE_MAT4;
    604                     default:   return 0;
    605                     }
    606                 }
    607 #ifdef AMD_EXTENSIONS
    608             case EbtFloat16:
    609                 switch (type.getMatrixCols()) {
    610                 case 2:
    611                     switch (type.getMatrixRows()) {
    612                     case 2:    return GL_FLOAT16_MAT2_AMD;
    613                     case 3:    return GL_FLOAT16_MAT2x3_AMD;
    614                     case 4:    return GL_FLOAT16_MAT2x4_AMD;
    615                     default:   return 0;
    616                     }
    617                 case 3:
    618                     switch (type.getMatrixRows()) {
    619                     case 2:    return GL_FLOAT16_MAT3x2_AMD;
    620                     case 3:    return GL_FLOAT16_MAT3_AMD;
    621                     case 4:    return GL_FLOAT16_MAT3x4_AMD;
    622                     default:   return 0;
    623                     }
    624                 case 4:
    625                     switch (type.getMatrixRows()) {
    626                     case 2:    return GL_FLOAT16_MAT4x2_AMD;
    627                     case 3:    return GL_FLOAT16_MAT4x3_AMD;
    628                     case 4:    return GL_FLOAT16_MAT4_AMD;
    629                     default:   return 0;
    630                     }
    631                 }
    632 #endif
    633             default:
    634                 return 0;
    635             }
    636         }
    637         if (type.getVectorSize() == 1) {
    638             switch (type.getBasicType()) {
    639             case EbtFloat:      return GL_FLOAT;
    640             case EbtDouble:     return GL_DOUBLE;
    641 #ifdef AMD_EXTENSIONS
    642             case EbtFloat16:    return GL_FLOAT16_NV;
    643 #endif
    644             case EbtInt:        return GL_INT;
    645             case EbtUint:       return GL_UNSIGNED_INT;
    646             case EbtInt64:      return GL_INT64_ARB;
    647             case EbtUint64:     return GL_UNSIGNED_INT64_ARB;
    648             case EbtBool:       return GL_BOOL;
    649             case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER;
    650             default:            return 0;
    651             }
    652         }
    653 
    654         return 0;
    655     }
    656 
    657     int mapToGlArraySize(const TType& type)
    658     {
    659         return type.isArray() ? type.getOuterArraySize() : 1;
    660     }
    661 
    662     TReflection& reflection;
    663     std::set<const TIntermNode*> processedDerefs;
    664 
    665 protected:
    666     TReflectionTraverser(TReflectionTraverser&);
    667     TReflectionTraverser& operator=(TReflectionTraverser&);
    668 };
    669 
    670 //
    671 // Implement the traversal functions of interest.
    672 //
    673 
    674 // To catch dereferenced aggregates that must be reflected.
    675 // This catches them at the highest level possible in the tree.
    676 bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
    677 {
    678     switch (node->getOp()) {
    679     case EOpIndexDirect:
    680     case EOpIndexIndirect:
    681     case EOpIndexDirectStruct:
    682         addDereferencedUniform(node);
    683         break;
    684     default:
    685         break;
    686     }
    687 
    688     // still need to visit everything below, which could contain sub-expressions
    689     // containing different uniforms
    690     return true;
    691 }
    692 
    693 // To reflect non-dereferenced objects.
    694 void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
    695 {
    696     if (base->getQualifier().storage == EvqUniform)
    697         addUniform(*base);
    698 
    699     if (intermediate.getStage() == EShLangVertex && base->getQualifier().isPipeInput())
    700         addAttribute(*base);
    701 }
    702 
    703 //
    704 // Implement TReflection methods.
    705 //
    706 
    707 // Track any required attribute reflection, such as compute shader numthreads.
    708 //
    709 void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate)
    710 {
    711     if (stage == EShLangCompute) {
    712         // Remember thread dimensions
    713         for (int dim=0; dim<3; ++dim)
    714             localSize[dim] = intermediate.getLocalSize(dim);
    715     }
    716 }
    717 
    718 // build counter block index associations for buffers
    719 void TReflection::buildCounterIndices()
    720 {
    721     // search for ones that have counters
    722     for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
    723         const TString counterName(indexToUniformBlock[i].name + "@count");
    724         const int index = getIndex(counterName);
    725 
    726         if (index >= 0)
    727             indexToUniformBlock[i].counterIndex = index;
    728     }
    729 }
    730 
    731 // Merge live symbols from 'intermediate' into the existing reflection database.
    732 //
    733 // Returns false if the input is too malformed to do this.
    734 bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
    735 {
    736     if (intermediate.getTreeRoot() == nullptr ||
    737         intermediate.getNumEntryPoints() != 1 ||
    738         intermediate.isRecursive())
    739         return false;
    740 
    741     buildAttributeReflection(stage, intermediate);
    742 
    743     TReflectionTraverser it(intermediate, *this);
    744 
    745     // put the entry point on the list of functions to process
    746     it.pushFunction(intermediate.getEntryPointMangledName().c_str());
    747 
    748     // process all the functions
    749     while (! it.functions.empty()) {
    750         TIntermNode* function = it.functions.back();
    751         it.functions.pop_back();
    752         function->traverse(&it);
    753     }
    754 
    755     buildCounterIndices();
    756 
    757     return true;
    758 }
    759 
    760 void TReflection::dump()
    761 {
    762     printf("Uniform reflection:\n");
    763     for (size_t i = 0; i < indexToUniform.size(); ++i)
    764         indexToUniform[i].dump();
    765     printf("\n");
    766 
    767     printf("Uniform block reflection:\n");
    768     for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
    769         indexToUniformBlock[i].dump();
    770     printf("\n");
    771 
    772     printf("Vertex attribute reflection:\n");
    773     for (size_t i = 0; i < indexToAttribute.size(); ++i)
    774         indexToAttribute[i].dump();
    775     printf("\n");
    776 
    777     if (getLocalSize(0) > 1) {
    778         static const char* axis[] = { "X", "Y", "Z" };
    779 
    780         for (int dim=0; dim<3; ++dim)
    781             if (getLocalSize(dim) > 1)
    782                 printf("Local size %s: %d\n", axis[dim], getLocalSize(dim));
    783 
    784         printf("\n");
    785     }
    786 
    787     // printf("Live names\n");
    788     // for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
    789     //    printf("%s: %d\n", it->first.c_str(), it->second);
    790     // printf("\n");
    791 }
    792 
    793 } // end namespace glslang
    794