Home | History | Annotate | Download | only in opt
      1 // Copyright (c) 2017 The Khronos Group Inc.
      2 // Copyright (c) 2017 Valve Corporation
      3 // Copyright (c) 2017 LunarG Inc.
      4 //
      5 // Licensed under the Apache License, Version 2.0 (the "License");
      6 // you may not use this file except in compliance with the License.
      7 // You may obtain a copy of the License at
      8 //
      9 //     http://www.apache.org/licenses/LICENSE-2.0
     10 //
     11 // Unless required by applicable law or agreed to in writing, software
     12 // distributed under the License is distributed on an "AS IS" BASIS,
     13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14 // See the License for the specific language governing permissions and
     15 // limitations under the License.
     16 
     17 #include "source/opt/common_uniform_elim_pass.h"
     18 #include "source/cfa.h"
     19 #include "source/opt/ir_context.h"
     20 
     21 namespace spvtools {
     22 namespace opt {
     23 
     24 namespace {
     25 
     26 const uint32_t kAccessChainPtrIdInIdx = 0;
     27 const uint32_t kTypePointerStorageClassInIdx = 0;
     28 const uint32_t kTypePointerTypeIdInIdx = 1;
     29 const uint32_t kConstantValueInIdx = 0;
     30 const uint32_t kExtractCompositeIdInIdx = 0;
     31 const uint32_t kExtractIdx0InIdx = 1;
     32 const uint32_t kStorePtrIdInIdx = 0;
     33 const uint32_t kLoadPtrIdInIdx = 0;
     34 const uint32_t kCopyObjectOperandInIdx = 0;
     35 const uint32_t kTypeIntWidthInIdx = 0;
     36 
     37 }  // anonymous namespace
     38 
     39 bool CommonUniformElimPass::IsNonPtrAccessChain(const SpvOp opcode) const {
     40   return opcode == SpvOpAccessChain || opcode == SpvOpInBoundsAccessChain;
     41 }
     42 
     43 bool CommonUniformElimPass::IsSamplerOrImageType(
     44     const Instruction* typeInst) const {
     45   switch (typeInst->opcode()) {
     46     case SpvOpTypeSampler:
     47     case SpvOpTypeImage:
     48     case SpvOpTypeSampledImage:
     49       return true;
     50     default:
     51       break;
     52   }
     53   if (typeInst->opcode() != SpvOpTypeStruct) return false;
     54   // Return true if any member is a sampler or image
     55   return !typeInst->WhileEachInId([this](const uint32_t* tid) {
     56     const Instruction* compTypeInst = get_def_use_mgr()->GetDef(*tid);
     57     if (IsSamplerOrImageType(compTypeInst)) {
     58       return false;
     59     }
     60     return true;
     61   });
     62 }
     63 
     64 bool CommonUniformElimPass::IsSamplerOrImageVar(uint32_t varId) const {
     65   const Instruction* varInst = get_def_use_mgr()->GetDef(varId);
     66   assert(varInst->opcode() == SpvOpVariable);
     67   const uint32_t varTypeId = varInst->type_id();
     68   const Instruction* varTypeInst = get_def_use_mgr()->GetDef(varTypeId);
     69   const uint32_t varPteTypeId =
     70       varTypeInst->GetSingleWordInOperand(kTypePointerTypeIdInIdx);
     71   Instruction* varPteTypeInst = get_def_use_mgr()->GetDef(varPteTypeId);
     72   return IsSamplerOrImageType(varPteTypeInst);
     73 }
     74 
     75 Instruction* CommonUniformElimPass::GetPtr(Instruction* ip, uint32_t* objId) {
     76   const SpvOp op = ip->opcode();
     77   assert(op == SpvOpStore || op == SpvOpLoad);
     78   *objId = ip->GetSingleWordInOperand(op == SpvOpStore ? kStorePtrIdInIdx
     79                                                        : kLoadPtrIdInIdx);
     80   Instruction* ptrInst = get_def_use_mgr()->GetDef(*objId);
     81   while (ptrInst->opcode() == SpvOpCopyObject) {
     82     *objId = ptrInst->GetSingleWordInOperand(kCopyObjectOperandInIdx);
     83     ptrInst = get_def_use_mgr()->GetDef(*objId);
     84   }
     85   Instruction* objInst = ptrInst;
     86   while (objInst->opcode() != SpvOpVariable &&
     87          objInst->opcode() != SpvOpFunctionParameter) {
     88     if (IsNonPtrAccessChain(objInst->opcode())) {
     89       *objId = objInst->GetSingleWordInOperand(kAccessChainPtrIdInIdx);
     90     } else {
     91       assert(objInst->opcode() == SpvOpCopyObject);
     92       *objId = objInst->GetSingleWordInOperand(kCopyObjectOperandInIdx);
     93     }
     94     objInst = get_def_use_mgr()->GetDef(*objId);
     95   }
     96   return ptrInst;
     97 }
     98 
     99 bool CommonUniformElimPass::IsVolatileStruct(uint32_t type_id) {
    100   assert(get_def_use_mgr()->GetDef(type_id)->opcode() == SpvOpTypeStruct);
    101   return !get_decoration_mgr()->WhileEachDecoration(
    102       type_id, SpvDecorationVolatile, [](const Instruction&) { return false; });
    103 }
    104 
    105 bool CommonUniformElimPass::IsAccessChainToVolatileStructType(
    106     const Instruction& AccessChainInst) {
    107   assert(AccessChainInst.opcode() == SpvOpAccessChain);
    108 
    109   uint32_t ptr_id = AccessChainInst.GetSingleWordInOperand(0);
    110   const Instruction* ptr_inst = get_def_use_mgr()->GetDef(ptr_id);
    111   uint32_t pointee_type_id = GetPointeeTypeId(ptr_inst);
    112   const uint32_t num_operands = AccessChainInst.NumOperands();
    113 
    114   // walk the type tree:
    115   for (uint32_t idx = 3; idx < num_operands; ++idx) {
    116     Instruction* pointee_type = get_def_use_mgr()->GetDef(pointee_type_id);
    117 
    118     switch (pointee_type->opcode()) {
    119       case SpvOpTypeMatrix:
    120       case SpvOpTypeVector:
    121       case SpvOpTypeArray:
    122       case SpvOpTypeRuntimeArray:
    123         pointee_type_id = pointee_type->GetSingleWordOperand(1);
    124         break;
    125       case SpvOpTypeStruct:
    126         // check for volatile decorations:
    127         if (IsVolatileStruct(pointee_type_id)) return true;
    128 
    129         if (idx < num_operands - 1) {
    130           const uint32_t index_id = AccessChainInst.GetSingleWordOperand(idx);
    131           const Instruction* index_inst = get_def_use_mgr()->GetDef(index_id);
    132           uint32_t index_value = index_inst->GetSingleWordOperand(
    133               2);  // TODO: replace with GetUintValueFromConstant()
    134           pointee_type_id = pointee_type->GetSingleWordInOperand(index_value);
    135         }
    136         break;
    137       default:
    138         assert(false && "Unhandled pointee type.");
    139     }
    140   }
    141   return false;
    142 }
    143 
    144 bool CommonUniformElimPass::IsVolatileLoad(const Instruction& loadInst) {
    145   assert(loadInst.opcode() == SpvOpLoad);
    146   // Check if this Load instruction has Volatile Memory Access flag
    147   if (loadInst.NumOperands() == 4) {
    148     uint32_t memory_access_mask = loadInst.GetSingleWordOperand(3);
    149     if (memory_access_mask & SpvMemoryAccessVolatileMask) return true;
    150   }
    151   // If we load a struct directly (result type is struct),
    152   // check if the struct is decorated volatile
    153   uint32_t type_id = loadInst.type_id();
    154   if (get_def_use_mgr()->GetDef(type_id)->opcode() == SpvOpTypeStruct)
    155     return IsVolatileStruct(type_id);
    156   else
    157     return false;
    158 }
    159 
    160 bool CommonUniformElimPass::IsUniformVar(uint32_t varId) {
    161   const Instruction* varInst =
    162       get_def_use_mgr()->id_to_defs().find(varId)->second;
    163   if (varInst->opcode() != SpvOpVariable) return false;
    164   const uint32_t varTypeId = varInst->type_id();
    165   const Instruction* varTypeInst =
    166       get_def_use_mgr()->id_to_defs().find(varTypeId)->second;
    167   return varTypeInst->GetSingleWordInOperand(kTypePointerStorageClassInIdx) ==
    168              SpvStorageClassUniform ||
    169          varTypeInst->GetSingleWordInOperand(kTypePointerStorageClassInIdx) ==
    170              SpvStorageClassUniformConstant;
    171 }
    172 
    173 bool CommonUniformElimPass::HasUnsupportedDecorates(uint32_t id) const {
    174   return !get_def_use_mgr()->WhileEachUser(id, [this](Instruction* user) {
    175     if (IsNonTypeDecorate(user->opcode())) return false;
    176     return true;
    177   });
    178 }
    179 
    180 bool CommonUniformElimPass::HasOnlyNamesAndDecorates(uint32_t id) const {
    181   return get_def_use_mgr()->WhileEachUser(id, [this](Instruction* user) {
    182     SpvOp op = user->opcode();
    183     if (op != SpvOpName && !IsNonTypeDecorate(op)) return false;
    184     return true;
    185   });
    186 }
    187 
    188 void CommonUniformElimPass::DeleteIfUseless(Instruction* inst) {
    189   const uint32_t resId = inst->result_id();
    190   assert(resId != 0);
    191   if (HasOnlyNamesAndDecorates(resId)) {
    192     context()->KillInst(inst);
    193   }
    194 }
    195 
    196 Instruction* CommonUniformElimPass::ReplaceAndDeleteLoad(Instruction* loadInst,
    197                                                          uint32_t replId,
    198                                                          Instruction* ptrInst) {
    199   const uint32_t loadId = loadInst->result_id();
    200   context()->KillNamesAndDecorates(loadId);
    201   (void)context()->ReplaceAllUsesWith(loadId, replId);
    202   // remove load instruction
    203   Instruction* next_instruction = context()->KillInst(loadInst);
    204   // if access chain, see if it can be removed as well
    205   if (IsNonPtrAccessChain(ptrInst->opcode())) DeleteIfUseless(ptrInst);
    206   return next_instruction;
    207 }
    208 
    209 void CommonUniformElimPass::GenACLoadRepl(
    210     const Instruction* ptrInst,
    211     std::vector<std::unique_ptr<Instruction>>* newInsts, uint32_t* resultId) {
    212   // Build and append Load
    213   const uint32_t ldResultId = TakeNextId();
    214   const uint32_t varId =
    215       ptrInst->GetSingleWordInOperand(kAccessChainPtrIdInIdx);
    216   const Instruction* varInst = get_def_use_mgr()->GetDef(varId);
    217   assert(varInst->opcode() == SpvOpVariable);
    218   const uint32_t varPteTypeId = GetPointeeTypeId(varInst);
    219   std::vector<Operand> load_in_operands;
    220   load_in_operands.push_back(Operand(spv_operand_type_t::SPV_OPERAND_TYPE_ID,
    221                                      std::initializer_list<uint32_t>{varId}));
    222   std::unique_ptr<Instruction> newLoad(new Instruction(
    223       context(), SpvOpLoad, varPteTypeId, ldResultId, load_in_operands));
    224   get_def_use_mgr()->AnalyzeInstDefUse(&*newLoad);
    225   newInsts->emplace_back(std::move(newLoad));
    226 
    227   // Build and append Extract
    228   const uint32_t extResultId = TakeNextId();
    229   const uint32_t ptrPteTypeId = GetPointeeTypeId(ptrInst);
    230   std::vector<Operand> ext_in_opnds;
    231   ext_in_opnds.push_back(Operand(spv_operand_type_t::SPV_OPERAND_TYPE_ID,
    232                                  std::initializer_list<uint32_t>{ldResultId}));
    233   uint32_t iidIdx = 0;
    234   ptrInst->ForEachInId([&iidIdx, &ext_in_opnds, this](const uint32_t* iid) {
    235     if (iidIdx > 0) {
    236       const Instruction* cInst = get_def_use_mgr()->GetDef(*iid);
    237       uint32_t val = cInst->GetSingleWordInOperand(kConstantValueInIdx);
    238       ext_in_opnds.push_back(
    239           Operand(spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_INTEGER,
    240                   std::initializer_list<uint32_t>{val}));
    241     }
    242     ++iidIdx;
    243   });
    244   std::unique_ptr<Instruction> newExt(
    245       new Instruction(context(), SpvOpCompositeExtract, ptrPteTypeId,
    246                       extResultId, ext_in_opnds));
    247   get_def_use_mgr()->AnalyzeInstDefUse(&*newExt);
    248   newInsts->emplace_back(std::move(newExt));
    249   *resultId = extResultId;
    250 }
    251 
    252 bool CommonUniformElimPass::IsConstantIndexAccessChain(Instruction* acp) {
    253   uint32_t inIdx = 0;
    254   return acp->WhileEachInId([&inIdx, this](uint32_t* tid) {
    255     if (inIdx > 0) {
    256       Instruction* opInst = get_def_use_mgr()->GetDef(*tid);
    257       if (opInst->opcode() != SpvOpConstant) return false;
    258     }
    259     ++inIdx;
    260     return true;
    261   });
    262 }
    263 
    264 bool CommonUniformElimPass::UniformAccessChainConvert(Function* func) {
    265   bool modified = false;
    266   for (auto bi = func->begin(); bi != func->end(); ++bi) {
    267     for (Instruction* inst = &*bi->begin(); inst; inst = inst->NextNode()) {
    268       if (inst->opcode() != SpvOpLoad) continue;
    269       uint32_t varId;
    270       Instruction* ptrInst = GetPtr(inst, &varId);
    271       if (!IsNonPtrAccessChain(ptrInst->opcode())) continue;
    272       // Do not convert nested access chains
    273       if (ptrInst->GetSingleWordInOperand(kAccessChainPtrIdInIdx) != varId)
    274         continue;
    275       if (!IsUniformVar(varId)) continue;
    276       if (!IsConstantIndexAccessChain(ptrInst)) continue;
    277       if (HasUnsupportedDecorates(inst->result_id())) continue;
    278       if (HasUnsupportedDecorates(ptrInst->result_id())) continue;
    279       if (IsVolatileLoad(*inst)) continue;
    280       if (IsAccessChainToVolatileStructType(*ptrInst)) continue;
    281       std::vector<std::unique_ptr<Instruction>> newInsts;
    282       uint32_t replId;
    283       GenACLoadRepl(ptrInst, &newInsts, &replId);
    284       inst = ReplaceAndDeleteLoad(inst, replId, ptrInst);
    285       assert(inst->opcode() != SpvOpPhi);
    286       inst = inst->InsertBefore(std::move(newInsts));
    287       modified = true;
    288     }
    289   }
    290   return modified;
    291 }
    292 
    293 void CommonUniformElimPass::ComputeStructuredSuccessors(Function* func) {
    294   block2structured_succs_.clear();
    295   for (auto& blk : *func) {
    296     // If header, make merge block first successor.
    297     uint32_t mbid = blk.MergeBlockIdIfAny();
    298     if (mbid != 0) {
    299       block2structured_succs_[&blk].push_back(cfg()->block(mbid));
    300       uint32_t cbid = blk.ContinueBlockIdIfAny();
    301       if (cbid != 0) {
    302         block2structured_succs_[&blk].push_back(cfg()->block(mbid));
    303       }
    304     }
    305     // add true successors
    306     const auto& const_blk = blk;
    307     const_blk.ForEachSuccessorLabel([&blk, this](const uint32_t sbid) {
    308       block2structured_succs_[&blk].push_back(cfg()->block(sbid));
    309     });
    310   }
    311 }
    312 
    313 void CommonUniformElimPass::ComputeStructuredOrder(
    314     Function* func, std::list<BasicBlock*>* order) {
    315   // Compute structured successors and do DFS
    316   ComputeStructuredSuccessors(func);
    317   auto ignore_block = [](cbb_ptr) {};
    318   auto ignore_edge = [](cbb_ptr, cbb_ptr) {};
    319   auto get_structured_successors = [this](const BasicBlock* block) {
    320     return &(block2structured_succs_[block]);
    321   };
    322   // TODO(greg-lunarg): Get rid of const_cast by making moving const
    323   // out of the cfa.h prototypes and into the invoking code.
    324   auto post_order = [&](cbb_ptr b) {
    325     order->push_front(const_cast<BasicBlock*>(b));
    326   };
    327 
    328   order->clear();
    329   CFA<BasicBlock>::DepthFirstTraversal(&*func->begin(),
    330                                        get_structured_successors, ignore_block,
    331                                        post_order, ignore_edge);
    332 }
    333 
    334 bool CommonUniformElimPass::CommonUniformLoadElimination(Function* func) {
    335   // Process all blocks in structured order. This is just one way (the
    336   // simplest?) to keep track of the most recent block outside of control
    337   // flow, used to copy common instructions, guaranteed to dominate all
    338   // following load sites.
    339   std::list<BasicBlock*> structuredOrder;
    340   ComputeStructuredOrder(func, &structuredOrder);
    341   uniform2load_id_.clear();
    342   bool modified = false;
    343   // Find insertion point in first block to copy non-dominating loads.
    344   auto insertItr = func->begin()->begin();
    345   while (insertItr->opcode() == SpvOpVariable ||
    346          insertItr->opcode() == SpvOpNop)
    347     ++insertItr;
    348   // Update insertItr until it will not be removed. Without this code,
    349   // ReplaceAndDeleteLoad() can set |insertItr| as a dangling pointer.
    350   while (IsUniformLoadToBeRemoved(&*insertItr)) ++insertItr;
    351   uint32_t mergeBlockId = 0;
    352   for (auto bi = structuredOrder.begin(); bi != structuredOrder.end(); ++bi) {
    353     BasicBlock* bp = *bi;
    354     // Check if we are exiting outermost control construct. If so, remember
    355     // new load insertion point. Trying to keep register pressure down.
    356     if (mergeBlockId == bp->id()) {
    357       mergeBlockId = 0;
    358       insertItr = bp->begin();
    359       while (insertItr->opcode() == SpvOpPhi) {
    360         ++insertItr;
    361       }
    362 
    363       // Update insertItr until it will not be removed. Without this code,
    364       // ReplaceAndDeleteLoad() can set |insertItr| as a dangling pointer.
    365       while (IsUniformLoadToBeRemoved(&*insertItr)) ++insertItr;
    366     }
    367     for (Instruction* inst = &*bp->begin(); inst; inst = inst->NextNode()) {
    368       if (inst->opcode() != SpvOpLoad) continue;
    369       uint32_t varId;
    370       Instruction* ptrInst = GetPtr(inst, &varId);
    371       if (ptrInst->opcode() != SpvOpVariable) continue;
    372       if (!IsUniformVar(varId)) continue;
    373       if (IsSamplerOrImageVar(varId)) continue;
    374       if (HasUnsupportedDecorates(inst->result_id())) continue;
    375       if (IsVolatileLoad(*inst)) continue;
    376       uint32_t replId;
    377       const auto uItr = uniform2load_id_.find(varId);
    378       if (uItr != uniform2load_id_.end()) {
    379         replId = uItr->second;
    380       } else {
    381         if (mergeBlockId == 0) {
    382           // Load is in dominating block; just remember it
    383           uniform2load_id_[varId] = inst->result_id();
    384           continue;
    385         } else {
    386           // Copy load into most recent dominating block and remember it
    387           replId = TakeNextId();
    388           std::unique_ptr<Instruction> newLoad(new Instruction(
    389               context(), SpvOpLoad, inst->type_id(), replId,
    390               {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {varId}}}));
    391           get_def_use_mgr()->AnalyzeInstDefUse(&*newLoad);
    392           insertItr = insertItr.InsertBefore(std::move(newLoad));
    393           ++insertItr;
    394           uniform2load_id_[varId] = replId;
    395         }
    396       }
    397       inst = ReplaceAndDeleteLoad(inst, replId, ptrInst);
    398       modified = true;
    399     }
    400     // If we are outside of any control construct and entering one, remember
    401     // the id of the merge block
    402     if (mergeBlockId == 0) {
    403       mergeBlockId = bp->MergeBlockIdIfAny();
    404     }
    405   }
    406   return modified;
    407 }
    408 
    409 bool CommonUniformElimPass::CommonUniformLoadElimBlock(Function* func) {
    410   bool modified = false;
    411   for (auto& blk : *func) {
    412     uniform2load_id_.clear();
    413     for (Instruction* inst = &*blk.begin(); inst; inst = inst->NextNode()) {
    414       if (inst->opcode() != SpvOpLoad) continue;
    415       uint32_t varId;
    416       Instruction* ptrInst = GetPtr(inst, &varId);
    417       if (ptrInst->opcode() != SpvOpVariable) continue;
    418       if (!IsUniformVar(varId)) continue;
    419       if (!IsSamplerOrImageVar(varId)) continue;
    420       if (HasUnsupportedDecorates(inst->result_id())) continue;
    421       if (IsVolatileLoad(*inst)) continue;
    422       uint32_t replId;
    423       const auto uItr = uniform2load_id_.find(varId);
    424       if (uItr != uniform2load_id_.end()) {
    425         replId = uItr->second;
    426       } else {
    427         uniform2load_id_[varId] = inst->result_id();
    428         continue;
    429       }
    430       inst = ReplaceAndDeleteLoad(inst, replId, ptrInst);
    431       modified = true;
    432     }
    433   }
    434   return modified;
    435 }
    436 
    437 bool CommonUniformElimPass::CommonExtractElimination(Function* func) {
    438   // Find all composite ids with duplicate extracts.
    439   for (auto bi = func->begin(); bi != func->end(); ++bi) {
    440     for (auto ii = bi->begin(); ii != bi->end(); ++ii) {
    441       if (ii->opcode() != SpvOpCompositeExtract) continue;
    442       // TODO(greg-lunarg): Support multiple indices
    443       if (ii->NumInOperands() > 2) continue;
    444       if (HasUnsupportedDecorates(ii->result_id())) continue;
    445       uint32_t compId = ii->GetSingleWordInOperand(kExtractCompositeIdInIdx);
    446       uint32_t idx = ii->GetSingleWordInOperand(kExtractIdx0InIdx);
    447       comp2idx2inst_[compId][idx].push_back(&*ii);
    448     }
    449   }
    450   // For all defs of ids with duplicate extracts, insert new extracts
    451   // after def, and replace and delete old extracts
    452   bool modified = false;
    453   for (auto bi = func->begin(); bi != func->end(); ++bi) {
    454     for (auto ii = bi->begin(); ii != bi->end(); ++ii) {
    455       const auto cItr = comp2idx2inst_.find(ii->result_id());
    456       if (cItr == comp2idx2inst_.end()) continue;
    457       for (auto idxItr : cItr->second) {
    458         if (idxItr.second.size() < 2) continue;
    459         uint32_t replId = TakeNextId();
    460         std::unique_ptr<Instruction> newExtract(
    461             idxItr.second.front()->Clone(context()));
    462         newExtract->SetResultId(replId);
    463         get_def_use_mgr()->AnalyzeInstDefUse(&*newExtract);
    464         ++ii;
    465         ii = ii.InsertBefore(std::move(newExtract));
    466         for (auto instItr : idxItr.second) {
    467           uint32_t resId = instItr->result_id();
    468           context()->KillNamesAndDecorates(resId);
    469           (void)context()->ReplaceAllUsesWith(resId, replId);
    470           context()->KillInst(instItr);
    471         }
    472         modified = true;
    473       }
    474     }
    475   }
    476   return modified;
    477 }
    478 
    479 bool CommonUniformElimPass::EliminateCommonUniform(Function* func) {
    480   bool modified = false;
    481   modified |= UniformAccessChainConvert(func);
    482   modified |= CommonUniformLoadElimination(func);
    483   modified |= CommonExtractElimination(func);
    484 
    485   modified |= CommonUniformLoadElimBlock(func);
    486   return modified;
    487 }
    488 
    489 void CommonUniformElimPass::Initialize() {
    490   // Clear collections.
    491   comp2idx2inst_.clear();
    492 
    493   // Initialize extension whitelist
    494   InitExtensions();
    495 }
    496 
    497 bool CommonUniformElimPass::AllExtensionsSupported() const {
    498   // If any extension not in whitelist, return false
    499   for (auto& ei : get_module()->extensions()) {
    500     const char* extName =
    501         reinterpret_cast<const char*>(&ei.GetInOperand(0).words[0]);
    502     if (extensions_whitelist_.find(extName) == extensions_whitelist_.end())
    503       return false;
    504   }
    505   return true;
    506 }
    507 
    508 Pass::Status CommonUniformElimPass::ProcessImpl() {
    509   // Assumes all control flow structured.
    510   // TODO(greg-lunarg): Do SSA rewrite for non-structured control flow
    511   if (!context()->get_feature_mgr()->HasCapability(SpvCapabilityShader))
    512     return Status::SuccessWithoutChange;
    513   // Assumes logical addressing only
    514   // TODO(greg-lunarg): Add support for physical addressing
    515   if (context()->get_feature_mgr()->HasCapability(SpvCapabilityAddresses))
    516     return Status::SuccessWithoutChange;
    517   // Do not process if any disallowed extensions are enabled
    518   if (!AllExtensionsSupported()) return Status::SuccessWithoutChange;
    519   // Do not process if module contains OpGroupDecorate. Additional
    520   // support required in KillNamesAndDecorates().
    521   // TODO(greg-lunarg): Add support for OpGroupDecorate
    522   for (auto& ai : get_module()->annotations())
    523     if (ai.opcode() == SpvOpGroupDecorate) return Status::SuccessWithoutChange;
    524   // If non-32-bit integer type in module, terminate processing
    525   // TODO(): Handle non-32-bit integer constants in access chains
    526   for (const Instruction& inst : get_module()->types_values())
    527     if (inst.opcode() == SpvOpTypeInt &&
    528         inst.GetSingleWordInOperand(kTypeIntWidthInIdx) != 32)
    529       return Status::SuccessWithoutChange;
    530   // Process entry point functions
    531   ProcessFunction pfn = [this](Function* fp) {
    532     return EliminateCommonUniform(fp);
    533   };
    534   bool modified = context()->ProcessEntryPointCallTree(pfn);
    535   return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange;
    536 }
    537 
    538 CommonUniformElimPass::CommonUniformElimPass() = default;
    539 
    540 Pass::Status CommonUniformElimPass::Process() {
    541   Initialize();
    542   return ProcessImpl();
    543 }
    544 
    545 void CommonUniformElimPass::InitExtensions() {
    546   extensions_whitelist_.clear();
    547   extensions_whitelist_.insert({
    548       "SPV_AMD_shader_explicit_vertex_parameter",
    549       "SPV_AMD_shader_trinary_minmax",
    550       "SPV_AMD_gcn_shader",
    551       "SPV_KHR_shader_ballot",
    552       "SPV_AMD_shader_ballot",
    553       "SPV_AMD_gpu_shader_half_float",
    554       "SPV_KHR_shader_draw_parameters",
    555       "SPV_KHR_subgroup_vote",
    556       "SPV_KHR_16bit_storage",
    557       "SPV_KHR_device_group",
    558       "SPV_KHR_multiview",
    559       "SPV_NVX_multiview_per_view_attributes",
    560       "SPV_NV_viewport_array2",
    561       "SPV_NV_stereo_view_rendering",
    562       "SPV_NV_sample_mask_override_coverage",
    563       "SPV_NV_geometry_shader_passthrough",
    564       "SPV_AMD_texture_gather_bias_lod",
    565       "SPV_KHR_storage_buffer_storage_class",
    566       // SPV_KHR_variable_pointers
    567       //   Currently do not support extended pointer expressions
    568       "SPV_AMD_gpu_shader_int16",
    569       "SPV_KHR_post_depth_coverage",
    570       "SPV_KHR_shader_atomic_counter_ops",
    571       "SPV_EXT_shader_stencil_export",
    572       "SPV_EXT_shader_viewport_index_layer",
    573       "SPV_AMD_shader_image_load_store_lod",
    574       "SPV_AMD_shader_fragment_mask",
    575       "SPV_EXT_fragment_fully_covered",
    576       "SPV_AMD_gpu_shader_half_float_fetch",
    577       "SPV_GOOGLE_decorate_string",
    578       "SPV_GOOGLE_hlsl_functionality1",
    579       "SPV_NV_shader_subgroup_partitioned",
    580       "SPV_EXT_descriptor_indexing",
    581       "SPV_NV_fragment_shader_barycentric",
    582       "SPV_NV_compute_shader_derivatives",
    583       "SPV_NV_shader_image_footprint",
    584       "SPV_NV_shading_rate",
    585       "SPV_NV_mesh_shader",
    586       "SPV_NV_ray_tracing",
    587       "SPV_EXT_fragment_invocation_density",
    588   });
    589 }
    590 
    591 }  // namespace opt
    592 }  // namespace spvtools
    593