Home | History | Annotate | Download | only in MachineIndependent
      1 //
      2 //Copyright (C) 2013 LunarG, Inc.
      3 //
      4 //All rights reserved.
      5 //
      6 //Redistribution and use in source and binary forms, with or without
      7 //modification, are permitted provided that the following conditions
      8 //are met:
      9 //
     10 //    Redistributions of source code must retain the above copyright
     11 //    notice, this list of conditions and the following disclaimer.
     12 //
     13 //    Redistributions in binary form must reproduce the above
     14 //    copyright notice, this list of conditions and the following
     15 //    disclaimer in the documentation and/or other materials provided
     16 //    with the distribution.
     17 //
     18 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
     19 //    contributors may be used to endorse or promote products derived
     20 //    from this software without specific prior written permission.
     21 //
     22 //THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     23 //"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     24 //LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     25 //FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     26 //COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     27 //INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     28 //BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     29 //LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     30 //CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31 //LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     32 //ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33 //POSSIBILITY OF SUCH DAMAGE.
     34 //
     35 
     36 #include "../Include/Common.h"
     37 #include "reflection.h"
     38 #include "localintermediate.h"
     39 
     40 #include "gl_types.h"
     41 
     42 //
     43 // Grow the reflection database through a friend traverser class of TReflection and a
     44 // collection of functions to do a liveness traversal that note what uniforms are used
     45 // in semantically non-dead code.
     46 //
     47 // Can be used multiple times, once per stage, to grow a program reflection.
     48 //
     49 // High-level algorithm for one stage:
     50 //
     51 // 1. Put main() on list of live functions.
     52 //
     53 // 2. Traverse any live function, while skipping if-tests with a compile-time constant
     54 //    condition of false, and while adding any encountered function calls to the live
     55 //    function list.
     56 //
     57 //    Repeat until the live function list is empty.
     58 //
     59 // 3. Add any encountered uniform variables and blocks to the reflection database.
     60 //
     61 // Can be attempted with a failed link, but will return false if recursion had been detected, or
     62 // there wasn't exactly one main.
     63 //
     64 
     65 namespace glslang {
     66 
     67 //
     68 // The traverser: mostly pass through, except
     69 //  - processing function-call nodes to push live functions onto the stack of functions to process
     70 //  - processing binary nodes to see if they are dereferences of an aggregates to track
     71 //  - processing symbol nodes to see if they are non-aggregate objects to track
     72 //  - processing selection nodes to trim semantically dead code
     73 //
     74 // This is in the glslang namespace directly so it can be a friend of TReflection.
     75 //
     76 
     77 class TLiveTraverser : public TIntermTraverser {
     78 public:
     79     TLiveTraverser(const TIntermediate& i, TReflection& r) : intermediate(i), reflection(r) { }
     80 
     81     virtual bool visitAggregate(TVisit, TIntermAggregate* node);
     82     virtual bool visitBinary(TVisit, TIntermBinary* node);
     83     virtual void visitSymbol(TIntermSymbol* base);
     84     virtual bool visitSelection(TVisit, TIntermSelection* node);
     85 
     86     // Track live funtions as well as uniforms, so that we don't visit dead functions
     87     // and only visit each function once.
     88     void addFunctionCall(TIntermAggregate* call)
     89     {
     90         // just use the map to ensure we process each function at most once
     91         if (reflection.nameToIndex.find(call->getName()) == reflection.nameToIndex.end()) {
     92             reflection.nameToIndex[call->getName()] = -1;
     93             pushFunction(call->getName());
     94         }
     95     }
     96 
     97     // Add a simple reference to a uniform variable to the uniform database, no dereference involved.
     98     // However, no dereference doesn't mean simple... it could be a complex aggregate.
     99     void addUniform(const TIntermSymbol& base)
    100     {
    101         if (processedDerefs.find(&base) == processedDerefs.end()) {
    102             processedDerefs.insert(&base);
    103 
    104             // Use a degenerate (empty) set of dereferences to immediately put as at the end of
    105             // the dereference change expected by blowUpActiveAggregate.
    106             TList<TIntermBinary*> derefs;
    107             blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0);
    108         }
    109     }
    110 
    111     void addAttribute(const TIntermSymbol& base)
    112     {
    113         if (processedDerefs.find(&base) == processedDerefs.end()) {
    114             processedDerefs.insert(&base);
    115 
    116             const TString &name = base.getName();
    117             const TType &type = base.getType();
    118 
    119             TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
    120             if (it == reflection.nameToIndex.end()) {
    121                 reflection.nameToIndex[name] = (int)reflection.indexToAttribute.size();
    122                 reflection.indexToAttribute.push_back(TObjectReflection(name, 0, mapToGlType(type), 0, 0));
    123             }
    124         }
    125     }
    126 
    127     // Lookup or calculate the offset of a block member, using the recursively
    128     // defined block offset rules.
    129     int getOffset(const TType& type, int index)
    130     {
    131         const TTypeList& memberList = *type.getStruct();
    132 
    133         // Don't calculate offset if one is present, it could be user supplied
    134         // and different than what would be calculated.  That is, this is faster,
    135         // but not just an optimization.
    136         if (memberList[index].type->getQualifier().hasOffset())
    137             return memberList[index].type->getQualifier().layoutOffset;
    138 
    139         int memberSize;
    140         int dummyStride;
    141         int offset = 0;
    142         for (int m = 0; m <= index; ++m) {
    143             // modify just the children's view of matrix layout, if there is one for this member
    144             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
    145             int memberAlignment = intermediate.getBaseAlignment(*memberList[m].type, memberSize, dummyStride, type.getQualifier().layoutPacking == ElpStd140,
    146                                                                 subMatrixLayout != ElmNone ? subMatrixLayout == ElmRowMajor : type.getQualifier().layoutMatrix == ElmRowMajor);
    147             RoundToPow2(offset, memberAlignment);
    148             if (m < index)
    149                 offset += memberSize;
    150         }
    151 
    152         return offset;
    153     }
    154 
    155     // Calculate the block data size.
    156     // Block arrayness is not taken into account, each element is backed by a separate buffer.
    157     int getBlockSize(const TType& blockType)
    158     {
    159         const TTypeList& memberList = *blockType.getStruct();
    160         int lastIndex = (int)memberList.size() - 1;
    161         int lastOffset = getOffset(blockType, lastIndex);
    162 
    163         int lastMemberSize;
    164         int dummyStride;
    165         intermediate.getBaseAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride, blockType.getQualifier().layoutPacking == ElpStd140,
    166                                       blockType.getQualifier().layoutMatrix == ElmRowMajor);
    167 
    168         return lastOffset + lastMemberSize;
    169     }
    170 
    171     // Traverse the provided deref chain, including the base, and
    172     // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
    173     // - recursively expand any variable array index in the middle of that traversal
    174     // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
    175     //
    176     // arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
    177     // A value of 0 for arraySize will mean to use the full array's size.
    178     void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
    179                                TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize)
    180     {
    181         // process the part of the derefence chain that was explicit in the shader
    182         TString name = baseName;
    183         const TType* terminalType = &baseType;
    184         for (; deref != derefs.end(); ++deref) {
    185             TIntermBinary* visitNode = *deref;
    186             terminalType = &visitNode->getType();
    187             int index;
    188             switch (visitNode->getOp()) {
    189             case EOpIndexIndirect:
    190                 // Visit all the indices of this array, and for each one add on the remaining dereferencing
    191                 for (int i = 0; i < visitNode->getLeft()->getType().getOuterArraySize(); ++i) {
    192                     TString newBaseName = name;
    193                     if (baseType.getBasicType() != EbtBlock)
    194                         newBaseName.append(TString("[") + String(i) + "]");
    195                     TList<TIntermBinary*>::const_iterator nextDeref = deref;
    196                     ++nextDeref;
    197                     TType derefType(*terminalType, 0);
    198                     blowUpActiveAggregate(derefType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize);
    199                 }
    200 
    201                 // it was all completed in the recursive calls above
    202                 return;
    203             case EOpIndexDirect:
    204                 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
    205                 if (baseType.getBasicType() != EbtBlock)
    206                     name.append(TString("[") + String(index) + "]");
    207                 break;
    208             case EOpIndexDirectStruct:
    209                 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
    210                 if (offset >= 0)
    211                     offset += getOffset(visitNode->getLeft()->getType(), index);
    212                 if (name.size() > 0)
    213                     name.append(".");
    214                 name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
    215                 break;
    216             default:
    217                 break;
    218             }
    219         }
    220 
    221         // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
    222         if (! isReflectionGranularity(*terminalType)) {
    223             if (terminalType->isArray()) {
    224                 // Visit all the indices of this array, and for each one,
    225                 // fully explode the remaining aggregate to dereference
    226                 for (int i = 0; i < terminalType->getOuterArraySize(); ++i) {
    227                     TString newBaseName = name;
    228                     newBaseName.append(TString("[") + String(i) + "]");
    229                     TType derefType(*terminalType, 0);
    230                     blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
    231                 }
    232             } else {
    233                 // Visit all members of this aggregate, and for each one,
    234                 // fully explode the remaining aggregate to dereference
    235                 const TTypeList& typeList = *terminalType->getStruct();
    236                 for (int i = 0; i < (int)typeList.size(); ++i) {
    237                     TString newBaseName = name;
    238                     newBaseName.append(TString(".") + typeList[i].type->getFieldName());
    239                     TType derefType(*terminalType, i);
    240                     blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
    241                 }
    242             }
    243 
    244             // it was all completed in the recursive calls above
    245             return;
    246         }
    247 
    248         // Finally, add a full string to the reflection database, and update the array size if necessary.
    249         // If the derefenced entity to record is an array, compute the size and update the maximum size.
    250 
    251         // there might not be a final array dereference, it could have been copied as an array object
    252         if (arraySize == 0)
    253             arraySize = mapToGlArraySize(*terminalType);
    254 
    255         TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
    256         if (it == reflection.nameToIndex.end()) {
    257             reflection.nameToIndex[name] = (int)reflection.indexToUniform.size();
    258             reflection.indexToUniform.push_back(TObjectReflection(name, offset, mapToGlType(*terminalType), arraySize, blockIndex));
    259         } else if (arraySize > 1) {
    260             int& reflectedArraySize = reflection.indexToUniform[it->second].size;
    261             reflectedArraySize = std::max(arraySize, reflectedArraySize);
    262         }
    263     }
    264 
    265     // Add a uniform dereference where blocks/struct/arrays are involved in the access.
    266     // Handles the situation where the left node is at the correct or too coarse a
    267     // granularity for reflection.  (That is, further dereferences up the tree will be
    268     // skipped.) Earlier dereferences, down the tree, will be handled
    269     // at the same time, and logged to prevent reprocessing as the tree is traversed.
    270     //
    271     // Note: Other things like the following must be caught elsewhere:
    272     //  - a simple non-array, non-struct variable (no dereference even conceivable)
    273     //  - an aggregrate consumed en masse, without a dereference
    274     //
    275     // So, this code is for cases like
    276     //   - a struct/block dereferencing a member (whether the member is array or not)
    277     //   - an array of struct
    278     //   - structs/arrays containing the above
    279     //
    280     void addDereferencedUniform(TIntermBinary* topNode)
    281     {
    282         // See if too fine-grained to process (wait to get further down the tree)
    283         const TType& leftType = topNode->getLeft()->getType();
    284         if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray())
    285             return;
    286 
    287         // We have an array or structure or block dereference, see if it's a uniform
    288         // based dereference (if not, skip it).
    289         TIntermSymbol* base = findBase(topNode);
    290         if (! base || ! base->getQualifier().isUniformOrBuffer())
    291             return;
    292 
    293         // See if we've already processed this (e.g., in the middle of something
    294         // we did earlier), and if so skip it
    295         if (processedDerefs.find(topNode) != processedDerefs.end())
    296             return;
    297 
    298         // Process this uniform dereference
    299 
    300         int offset = -1;
    301         int blockIndex = -1;
    302         bool anonymous = false;
    303 
    304         // See if we need to record the block itself
    305         bool block = base->getBasicType() == EbtBlock;
    306         if (block) {
    307             offset = 0;
    308             anonymous = IsAnonymous(base->getName());
    309             if (base->getType().isArray()) {
    310                 assert(! anonymous);
    311                 for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
    312                     blockIndex = addBlockName(base->getType().getTypeName() + "[" + String(e) + "]", getBlockSize(base->getType()));
    313             } else
    314                 blockIndex = addBlockName(base->getType().getTypeName(), getBlockSize(base->getType()));
    315         }
    316 
    317         // Process the dereference chain, backward, accumulating the pieces for later forward traversal.
    318         // If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
    319         TList<TIntermBinary*> derefs;
    320         for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
    321             if (isReflectionGranularity(visitNode->getLeft()->getType()))
    322                 continue;
    323 
    324             derefs.push_front(visitNode);
    325             processedDerefs.insert(visitNode);
    326         }
    327         processedDerefs.insert(base);
    328 
    329         // See if we have a specific array size to stick to while enumerating the explosion of the aggregate
    330         int arraySize = 0;
    331         if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
    332             if (topNode->getOp() == EOpIndexDirect)
    333                 arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
    334         }
    335 
    336         // Put the dereference chain together, forward
    337         TString baseName;
    338         if (! anonymous) {
    339             if (block)
    340                 baseName = base->getType().getTypeName();
    341             else
    342                 baseName = base->getName();
    343         }
    344         blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize);
    345     }
    346 
    347     int addBlockName(const TString& name, int size)
    348     {
    349         int blockIndex;
    350         TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
    351         if (reflection.nameToIndex.find(name) == reflection.nameToIndex.end()) {
    352             blockIndex = (int)reflection.indexToUniformBlock.size();
    353             reflection.nameToIndex[name] = blockIndex;
    354             reflection.indexToUniformBlock.push_back(TObjectReflection(name, -1, -1, size, -1));
    355         } else
    356             blockIndex = it->second;
    357 
    358         return blockIndex;
    359     }
    360 
    361     //
    362     // Given a function name, find its subroot in the tree, and push it onto the stack of
    363     // functions left to process.
    364     //
    365     void pushFunction(const TString& name)
    366     {
    367         TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence();
    368         for (unsigned int f = 0; f < globals.size(); ++f) {
    369             TIntermAggregate* candidate = globals[f]->getAsAggregate();
    370             if (candidate && candidate->getOp() == EOpFunction && candidate->getName() == name) {
    371                 functions.push_back(candidate);
    372                 break;
    373             }
    374         }
    375     }
    376 
    377     // Are we at a level in a dereference chain at which individual active uniform queries are made?
    378     bool isReflectionGranularity(const TType& type)
    379     {
    380         return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct;
    381     }
    382 
    383     // For a binary operation indexing into an aggregate, chase down the base of the aggregate.
    384     // Return 0 if the topology does not fit this situation.
    385     TIntermSymbol* findBase(const TIntermBinary* node)
    386     {
    387         TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
    388         if (base)
    389             return base;
    390         TIntermBinary* left = node->getLeft()->getAsBinaryNode();
    391         if (! left)
    392             return nullptr;
    393 
    394         return findBase(left);
    395     }
    396 
    397     //
    398     // Translate a glslang sampler type into the GL API #define number.
    399     //
    400     int mapSamplerToGlType(TSampler sampler)
    401     {
    402         if (! sampler.image) {
    403             // a sampler...
    404             switch (sampler.type) {
    405             case EbtFloat:
    406                 switch ((int)sampler.dim) {
    407                 case Esd1D:
    408                     switch ((int)sampler.shadow) {
    409                     case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D;
    410                     case true:  return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW;
    411                     }
    412                 case Esd2D:
    413                     switch ((int)sampler.ms) {
    414                     case false:
    415                         switch ((int)sampler.shadow) {
    416                         case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D;
    417                         case true:  return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW;
    418                         }
    419                     case true:      return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE;
    420                     }
    421                 case Esd3D:
    422                     return GL_SAMPLER_3D;
    423                 case EsdCube:
    424                     switch ((int)sampler.shadow) {
    425                     case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE;
    426                     case true:  return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW;
    427                     }
    428                 case EsdRect:
    429                     return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT;
    430                 case EsdBuffer:
    431                     return GL_SAMPLER_BUFFER;
    432                 }
    433             case EbtInt:
    434                 switch ((int)sampler.dim) {
    435                 case Esd1D:
    436                     return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D;
    437                 case Esd2D:
    438                     switch ((int)sampler.ms) {
    439                     case false:  return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D;
    440                     case true:   return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_INT_SAMPLER_2D_MULTISAMPLE;
    441                     }
    442                 case Esd3D:
    443                     return GL_INT_SAMPLER_3D;
    444                 case EsdCube:
    445                     return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE;
    446                 case EsdRect:
    447                     return GL_INT_SAMPLER_2D_RECT;
    448                 case EsdBuffer:
    449                     return GL_INT_SAMPLER_BUFFER;
    450                 }
    451             case EbtUint:
    452                 switch ((int)sampler.dim) {
    453                 case Esd1D:
    454                     return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D;
    455                 case Esd2D:
    456                     switch ((int)sampler.ms) {
    457                     case false:  return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D;
    458                     case true:   return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE;
    459                     }
    460                 case Esd3D:
    461                     return GL_UNSIGNED_INT_SAMPLER_3D;
    462                 case EsdCube:
    463                     return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE;
    464                 case EsdRect:
    465                     return GL_UNSIGNED_INT_SAMPLER_2D_RECT;
    466                 case EsdBuffer:
    467                     return GL_UNSIGNED_INT_SAMPLER_BUFFER;
    468                 }
    469             default:
    470                 return 0;
    471             }
    472         } else {
    473             // an image...
    474             switch (sampler.type) {
    475             case EbtFloat:
    476                 switch ((int)sampler.dim) {
    477                 case Esd1D:
    478                     return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D;
    479                 case Esd2D:
    480                     switch ((int)sampler.ms) {
    481                     case false:     return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D;
    482                     case true:      return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE;
    483                     }
    484                 case Esd3D:
    485                     return GL_IMAGE_3D;
    486                 case EsdCube:
    487                     return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE;
    488                 case EsdRect:
    489                     return GL_IMAGE_2D_RECT;
    490                 case EsdBuffer:
    491                     return GL_IMAGE_BUFFER;
    492                 }
    493             case EbtInt:
    494                 switch ((int)sampler.dim) {
    495                 case Esd1D:
    496                     return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D;
    497                 case Esd2D:
    498                     switch ((int)sampler.ms) {
    499                     case false:  return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D;
    500                     case true:   return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE;
    501                     }
    502                 case Esd3D:
    503                     return GL_INT_IMAGE_3D;
    504                 case EsdCube:
    505                     return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE;
    506                 case EsdRect:
    507                     return GL_INT_IMAGE_2D_RECT;
    508                 case EsdBuffer:
    509                     return GL_INT_IMAGE_BUFFER;
    510                 }
    511             case EbtUint:
    512                 switch ((int)sampler.dim) {
    513                 case Esd1D:
    514                     return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D;
    515                 case Esd2D:
    516                     switch ((int)sampler.ms) {
    517                     case false:  return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D;
    518                     case true:   return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE;
    519                     }
    520                 case Esd3D:
    521                     return GL_UNSIGNED_INT_IMAGE_3D;
    522                 case EsdCube:
    523                     return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE;
    524                 case EsdRect:
    525                     return GL_UNSIGNED_INT_IMAGE_2D_RECT;
    526                 case EsdBuffer:
    527                     return GL_UNSIGNED_INT_IMAGE_BUFFER;
    528                 }
    529             default:
    530                 return 0;
    531             }
    532         }
    533     }
    534 
    535     //
    536     // Translate a glslang type into the GL API #define number.
    537     // Ignores arrayness.
    538     //
    539     int mapToGlType(const TType& type)
    540     {
    541         switch (type.getBasicType()) {
    542         case EbtSampler:
    543             return mapSamplerToGlType(type.getSampler());
    544         case EbtStruct:
    545         case EbtBlock:
    546         case EbtVoid:
    547             return 0;
    548         default:
    549             break;
    550         }
    551 
    552         if (type.isVector()) {
    553             int offset = type.getVectorSize() - 2;
    554             switch (type.getBasicType()) {
    555             case EbtFloat:      return GL_FLOAT_VEC2                  + offset;
    556             case EbtDouble:     return GL_DOUBLE_VEC2                 + offset;
    557             case EbtInt:        return GL_INT_VEC2                    + offset;
    558             case EbtUint:       return GL_UNSIGNED_INT_VEC2           + offset;
    559             case EbtInt64:      return GL_INT64_ARB                   + offset;
    560             case EbtUint64:     return GL_UNSIGNED_INT64_ARB          + offset;
    561             case EbtBool:       return GL_BOOL_VEC2                   + offset;
    562             case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
    563             default:            return 0;
    564             }
    565         }
    566         if (type.isMatrix()) {
    567             switch (type.getBasicType()) {
    568             case EbtFloat:
    569                 switch (type.getMatrixCols()) {
    570                 case 2:
    571                     switch (type.getMatrixRows()) {
    572                     case 2:    return GL_FLOAT_MAT2;
    573                     case 3:    return GL_FLOAT_MAT2x3;
    574                     case 4:    return GL_FLOAT_MAT2x4;
    575                     default:   return 0;
    576                     }
    577                 case 3:
    578                     switch (type.getMatrixRows()) {
    579                     case 2:    return GL_FLOAT_MAT3x2;
    580                     case 3:    return GL_FLOAT_MAT3;
    581                     case 4:    return GL_FLOAT_MAT3x4;
    582                     default:   return 0;
    583                     }
    584                 case 4:
    585                     switch (type.getMatrixRows()) {
    586                     case 2:    return GL_FLOAT_MAT4x2;
    587                     case 3:    return GL_FLOAT_MAT4x3;
    588                     case 4:    return GL_FLOAT_MAT4;
    589                     default:   return 0;
    590                     }
    591                 }
    592             case EbtDouble:
    593                 switch (type.getMatrixCols()) {
    594                 case 2:
    595                     switch (type.getMatrixRows()) {
    596                     case 2:    return GL_DOUBLE_MAT2;
    597                     case 3:    return GL_DOUBLE_MAT2x3;
    598                     case 4:    return GL_DOUBLE_MAT2x4;
    599                     default:   return 0;
    600                     }
    601                 case 3:
    602                     switch (type.getMatrixRows()) {
    603                     case 2:    return GL_DOUBLE_MAT3x2;
    604                     case 3:    return GL_DOUBLE_MAT3;
    605                     case 4:    return GL_DOUBLE_MAT3x4;
    606                     default:   return 0;
    607                     }
    608                 case 4:
    609                     switch (type.getMatrixRows()) {
    610                     case 2:    return GL_DOUBLE_MAT4x2;
    611                     case 3:    return GL_DOUBLE_MAT4x3;
    612                     case 4:    return GL_DOUBLE_MAT4;
    613                     default:   return 0;
    614                     }
    615                 }
    616             default:
    617                 return 0;
    618             }
    619         }
    620         if (type.getVectorSize() == 1) {
    621             switch (type.getBasicType()) {
    622             case EbtFloat:      return GL_FLOAT;
    623             case EbtDouble:     return GL_DOUBLE;
    624             case EbtInt:        return GL_INT;
    625             case EbtUint:       return GL_UNSIGNED_INT;
    626             case EbtInt64:      return GL_INT64_ARB;
    627             case EbtUint64:     return GL_UNSIGNED_INT64_ARB;
    628             case EbtBool:       return GL_BOOL;
    629             case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER;
    630             default:            return 0;
    631             }
    632         }
    633 
    634         return 0;
    635     }
    636 
    637     int mapToGlArraySize(const TType& type)
    638     {
    639         return type.isArray() ? type.getOuterArraySize() : 1;
    640     }
    641 
    642     typedef std::list<TIntermAggregate*> TFunctionStack;
    643     TFunctionStack functions;
    644     const TIntermediate& intermediate;
    645     TReflection& reflection;
    646     std::set<const TIntermNode*> processedDerefs;
    647 
    648 protected:
    649     TLiveTraverser(TLiveTraverser&);
    650     TLiveTraverser& operator=(TLiveTraverser&);
    651 };
    652 
    653 //
    654 // Implement the traversal functions of interest.
    655 //
    656 
    657 // To catch which function calls are not dead, and hence which functions must be visited.
    658 bool TLiveTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
    659 {
    660     if (node->getOp() == EOpFunctionCall)
    661         addFunctionCall(node);
    662 
    663     return true; // traverse this subtree
    664 }
    665 
    666 // To catch dereferenced aggregates that must be reflected.
    667 // This catches them at the highest level possible in the tree.
    668 bool TLiveTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
    669 {
    670     switch (node->getOp()) {
    671     case EOpIndexDirect:
    672     case EOpIndexIndirect:
    673     case EOpIndexDirectStruct:
    674         addDereferencedUniform(node);
    675         break;
    676     default:
    677         break;
    678     }
    679 
    680     // still need to visit everything below, which could contain sub-expressions
    681     // containing different uniforms
    682     return true;
    683 }
    684 
    685 // To reflect non-dereferenced objects.
    686 void TLiveTraverser::visitSymbol(TIntermSymbol* base)
    687 {
    688     if (base->getQualifier().storage == EvqUniform)
    689         addUniform(*base);
    690 
    691     if (intermediate.getStage() == EShLangVertex && base->getQualifier().isPipeInput())
    692         addAttribute(*base);
    693 }
    694 
    695 // To prune semantically dead paths.
    696 bool TLiveTraverser::visitSelection(TVisit /* visit */,  TIntermSelection* node)
    697 {
    698     TIntermConstantUnion* constant = node->getCondition()->getAsConstantUnion();
    699     if (constant) {
    700         // cull the path that is dead
    701         if (constant->getConstArray()[0].getBConst() == true && node->getTrueBlock())
    702             node->getTrueBlock()->traverse(this);
    703         if (constant->getConstArray()[0].getBConst() == false && node->getFalseBlock())
    704             node->getFalseBlock()->traverse(this);
    705 
    706         return false; // don't traverse any more, we did it all above
    707     } else
    708         return true; // traverse the whole subtree
    709 }
    710 
    711 //
    712 // Implement TReflection methods.
    713 //
    714 
    715 // Merge live symbols from 'intermediate' into the existing reflection database.
    716 //
    717 // Returns false if the input is too malformed to do this.
    718 bool TReflection::addStage(EShLanguage, const TIntermediate& intermediate)
    719 {
    720     if (intermediate.getNumMains() != 1 || intermediate.isRecursive())
    721         return false;
    722 
    723     TLiveTraverser it(intermediate, *this);
    724 
    725     // put main() on functions to process
    726     it.pushFunction("main(");
    727 
    728     // process all the functions
    729     while (! it.functions.empty()) {
    730         TIntermNode* function = it.functions.back();
    731         it.functions.pop_back();
    732         function->traverse(&it);
    733     }
    734 
    735     return true;
    736 }
    737 
    738 void TReflection::dump()
    739 {
    740     printf("Uniform reflection:\n");
    741     for (size_t i = 0; i < indexToUniform.size(); ++i)
    742         indexToUniform[i].dump();
    743     printf("\n");
    744 
    745     printf("Uniform block reflection:\n");
    746     for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
    747         indexToUniformBlock[i].dump();
    748     printf("\n");
    749 
    750     printf("Vertex attribute reflection:\n");
    751     for (size_t i = 0; i < indexToAttribute.size(); ++i)
    752         indexToAttribute[i].dump();
    753     printf("\n");
    754 
    755     //printf("Live names\n");
    756     //for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
    757     //    printf("%s: %d\n", it->first.c_str(), it->second);
    758     //printf("\n");
    759 }
    760 
    761 } // end namespace glslang
    762