1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include <Pipeline/SpirvShader.hpp> 16 #include "VkPipeline.hpp" 17 #include "VkShaderModule.hpp" 18 19 namespace 20 { 21 22 sw::DrawType Convert(VkPrimitiveTopology topology) 23 { 24 switch(topology) 25 { 26 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: 27 return sw::DRAW_POINTLIST; 28 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: 29 return sw::DRAW_LINELIST; 30 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: 31 return sw::DRAW_LINESTRIP; 32 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: 33 return sw::DRAW_TRIANGLELIST; 34 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: 35 return sw::DRAW_TRIANGLESTRIP; 36 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: 37 return sw::DRAW_TRIANGLEFAN; 38 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY: 39 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY: 40 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY: 41 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY: 42 // geometry shader specific 43 ASSERT(false); 44 break; 45 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST: 46 // tesselation shader specific 47 ASSERT(false); 48 break; 49 default: 50 UNIMPLEMENTED(); 51 } 52 53 return sw::DRAW_TRIANGLELIST; 54 } 55 56 sw::Rect Convert(const VkRect2D& rect) 57 { 58 return sw::Rect(rect.offset.x, rect.offset.y, rect.offset.x + rect.extent.width, rect.offset.y + rect.extent.height); 59 } 60 61 sw::StreamType getStreamType(VkFormat format) 62 { 63 switch(format) 64 { 65 case VK_FORMAT_R8_UNORM: 66 case VK_FORMAT_R8G8_UNORM: 67 case VK_FORMAT_R8G8B8A8_UNORM: 68 case VK_FORMAT_R8_UINT: 69 case VK_FORMAT_R8G8_UINT: 70 case VK_FORMAT_R8G8B8A8_UINT: 71 case VK_FORMAT_B8G8R8A8_UNORM: 72 case VK_FORMAT_A8B8G8R8_UNORM_PACK32: 73 case VK_FORMAT_A8B8G8R8_UINT_PACK32: 74 return sw::STREAMTYPE_BYTE; 75 case VK_FORMAT_R8_SNORM: 76 case VK_FORMAT_R8_SINT: 77 case VK_FORMAT_R8G8_SNORM: 78 case VK_FORMAT_R8G8_SINT: 79 case VK_FORMAT_R8G8B8A8_SNORM: 80 case VK_FORMAT_R8G8B8A8_SINT: 81 case VK_FORMAT_A8B8G8R8_SNORM_PACK32: 82 case VK_FORMAT_A8B8G8R8_SINT_PACK32: 83 return sw::STREAMTYPE_SBYTE; 84 case VK_FORMAT_A2B10G10R10_UNORM_PACK32: 85 return sw::STREAMTYPE_2_10_10_10_UINT; 86 case VK_FORMAT_R16_UNORM: 87 case VK_FORMAT_R16_UINT: 88 case VK_FORMAT_R16G16_UNORM: 89 case VK_FORMAT_R16G16_UINT: 90 case VK_FORMAT_R16G16B16A16_UNORM: 91 case VK_FORMAT_R16G16B16A16_UINT: 92 return sw::STREAMTYPE_USHORT; 93 case VK_FORMAT_R16_SNORM: 94 case VK_FORMAT_R16_SINT: 95 case VK_FORMAT_R16G16_SNORM: 96 case VK_FORMAT_R16G16_SINT: 97 case VK_FORMAT_R16G16B16A16_SNORM: 98 case VK_FORMAT_R16G16B16A16_SINT: 99 return sw::STREAMTYPE_SHORT; 100 case VK_FORMAT_R16_SFLOAT: 101 case VK_FORMAT_R16G16_SFLOAT: 102 case VK_FORMAT_R16G16B16A16_SFLOAT: 103 return sw::STREAMTYPE_HALF; 104 case VK_FORMAT_R32_UINT: 105 case VK_FORMAT_R32G32_UINT: 106 case VK_FORMAT_R32G32B32_UINT: 107 case VK_FORMAT_R32G32B32A32_UINT: 108 return sw::STREAMTYPE_UINT; 109 case VK_FORMAT_R32_SINT: 110 case VK_FORMAT_R32G32_SINT: 111 case VK_FORMAT_R32G32B32_SINT: 112 case VK_FORMAT_R32G32B32A32_SINT: 113 return sw::STREAMTYPE_INT; 114 case VK_FORMAT_R32_SFLOAT: 115 case VK_FORMAT_R32G32_SFLOAT: 116 case VK_FORMAT_R32G32B32_SFLOAT: 117 case VK_FORMAT_R32G32B32A32_SFLOAT: 118 return sw::STREAMTYPE_FLOAT; 119 default: 120 UNIMPLEMENTED(); 121 } 122 123 return sw::STREAMTYPE_BYTE; 124 } 125 126 uint32_t getNumberOfChannels(VkFormat format) 127 { 128 switch(format) 129 { 130 case VK_FORMAT_R8_UNORM: 131 case VK_FORMAT_R8_SNORM: 132 case VK_FORMAT_R8_UINT: 133 case VK_FORMAT_R8_SINT: 134 case VK_FORMAT_R16_UNORM: 135 case VK_FORMAT_R16_SNORM: 136 case VK_FORMAT_R16_UINT: 137 case VK_FORMAT_R16_SINT: 138 case VK_FORMAT_R16_SFLOAT: 139 case VK_FORMAT_R32_UINT: 140 case VK_FORMAT_R32_SINT: 141 case VK_FORMAT_R32_SFLOAT: 142 return 1; 143 case VK_FORMAT_R8G8_UNORM: 144 case VK_FORMAT_R8G8_SNORM: 145 case VK_FORMAT_R8G8_UINT: 146 case VK_FORMAT_R8G8_SINT: 147 case VK_FORMAT_R16G16_UNORM: 148 case VK_FORMAT_R16G16_SNORM: 149 case VK_FORMAT_R16G16_UINT: 150 case VK_FORMAT_R16G16_SINT: 151 case VK_FORMAT_R16G16_SFLOAT: 152 case VK_FORMAT_R32G32_UINT: 153 case VK_FORMAT_R32G32_SINT: 154 case VK_FORMAT_R32G32_SFLOAT: 155 return 2; 156 case VK_FORMAT_R32G32B32_UINT: 157 case VK_FORMAT_R32G32B32_SINT: 158 case VK_FORMAT_R32G32B32_SFLOAT: 159 return 3; 160 case VK_FORMAT_R8G8B8A8_UNORM: 161 case VK_FORMAT_R8G8B8A8_SNORM: 162 case VK_FORMAT_R8G8B8A8_UINT: 163 case VK_FORMAT_R8G8B8A8_SINT: 164 case VK_FORMAT_B8G8R8A8_UNORM: 165 case VK_FORMAT_A8B8G8R8_UNORM_PACK32: 166 case VK_FORMAT_A8B8G8R8_SNORM_PACK32: 167 case VK_FORMAT_A8B8G8R8_UINT_PACK32: 168 case VK_FORMAT_A8B8G8R8_SINT_PACK32: 169 case VK_FORMAT_A2B10G10R10_UNORM_PACK32: 170 case VK_FORMAT_R16G16B16A16_UNORM: 171 case VK_FORMAT_R16G16B16A16_SNORM: 172 case VK_FORMAT_R16G16B16A16_UINT: 173 case VK_FORMAT_R16G16B16A16_SINT: 174 case VK_FORMAT_R16G16B16A16_SFLOAT: 175 case VK_FORMAT_R32G32B32A32_UINT: 176 case VK_FORMAT_R32G32B32A32_SINT: 177 case VK_FORMAT_R32G32B32A32_SFLOAT: 178 return 4; 179 default: 180 UNIMPLEMENTED(); 181 } 182 183 return 0; 184 } 185 186 } 187 188 namespace vk 189 { 190 191 GraphicsPipeline::GraphicsPipeline(const VkGraphicsPipelineCreateInfo* pCreateInfo, void* mem) 192 { 193 if((pCreateInfo->flags != 0) || 194 (pCreateInfo->stageCount != 2) || 195 (pCreateInfo->pTessellationState != nullptr) || 196 (pCreateInfo->pDynamicState != nullptr) || 197 (pCreateInfo->subpass != 0) || 198 (pCreateInfo->basePipelineHandle != VK_NULL_HANDLE) || 199 (pCreateInfo->basePipelineIndex != 0)) 200 { 201 UNIMPLEMENTED(); 202 } 203 204 const VkPipelineShaderStageCreateInfo& vertexStage = pCreateInfo->pStages[0]; 205 if((vertexStage.stage != VK_SHADER_STAGE_VERTEX_BIT) || 206 (vertexStage.flags != 0) || 207 !((vertexStage.pSpecializationInfo == nullptr) || 208 ((vertexStage.pSpecializationInfo->mapEntryCount == 0) && 209 (vertexStage.pSpecializationInfo->dataSize == 0)))) 210 { 211 UNIMPLEMENTED(); 212 } 213 214 const VkPipelineShaderStageCreateInfo& fragmentStage = pCreateInfo->pStages[1]; 215 if((fragmentStage.stage != VK_SHADER_STAGE_FRAGMENT_BIT) || 216 (fragmentStage.flags != 0) || 217 !((fragmentStage.pSpecializationInfo == nullptr) || 218 ((fragmentStage.pSpecializationInfo->mapEntryCount == 0) && 219 (fragmentStage.pSpecializationInfo->dataSize == 0)))) 220 { 221 UNIMPLEMENTED(); 222 } 223 224 const VkPipelineVertexInputStateCreateInfo* vertexInputState = pCreateInfo->pVertexInputState; 225 if(vertexInputState->flags != 0) 226 { 227 UNIMPLEMENTED(); 228 } 229 230 for(uint32_t i = 0; i < vertexInputState->vertexBindingDescriptionCount; i++) 231 { 232 const VkVertexInputBindingDescription* vertexBindingDescription = vertexInputState->pVertexBindingDescriptions; 233 context.input[vertexBindingDescription->binding].stride = vertexBindingDescription->stride; 234 if(vertexBindingDescription->inputRate != VK_VERTEX_INPUT_RATE_VERTEX) 235 { 236 UNIMPLEMENTED(); 237 } 238 } 239 240 for(uint32_t i = 0; i < vertexInputState->vertexAttributeDescriptionCount; i++) 241 { 242 const VkVertexInputAttributeDescription* vertexAttributeDescriptions = vertexInputState->pVertexAttributeDescriptions; 243 sw::Stream& input = context.input[vertexAttributeDescriptions->binding]; 244 input.count = getNumberOfChannels(vertexAttributeDescriptions->format); 245 input.type = getStreamType(vertexAttributeDescriptions->format); 246 input.normalized = !sw::Surface::isNonNormalizedInteger(vertexAttributeDescriptions->format); 247 248 if(vertexAttributeDescriptions->location != vertexAttributeDescriptions->binding) 249 { 250 UNIMPLEMENTED(); 251 } 252 if(vertexAttributeDescriptions->offset != 0) 253 { 254 UNIMPLEMENTED(); 255 } 256 } 257 258 const VkPipelineInputAssemblyStateCreateInfo* assemblyState = pCreateInfo->pInputAssemblyState; 259 if((assemblyState->flags != 0) || 260 (assemblyState->primitiveRestartEnable != 0)) 261 { 262 UNIMPLEMENTED(); 263 } 264 265 context.drawType = Convert(assemblyState->topology); 266 267 const VkPipelineViewportStateCreateInfo* viewportState = pCreateInfo->pViewportState; 268 if((viewportState->flags != 0) || 269 (viewportState->viewportCount != 1) || 270 (viewportState->scissorCount != 1)) 271 { 272 UNIMPLEMENTED(); 273 } 274 275 scissor = Convert(viewportState->pScissors[0]); 276 viewport = viewportState->pViewports[0]; 277 278 const VkPipelineRasterizationStateCreateInfo* rasterizationState = pCreateInfo->pRasterizationState; 279 if((rasterizationState->flags != 0) || 280 (rasterizationState->depthClampEnable != 0) || 281 (rasterizationState->polygonMode != VK_POLYGON_MODE_FILL)) 282 { 283 UNIMPLEMENTED(); 284 } 285 286 context.rasterizerDiscard = rasterizationState->rasterizerDiscardEnable; 287 context.frontFacingCCW = rasterizationState->frontFace == VK_FRONT_FACE_COUNTER_CLOCKWISE; 288 context.depthBias = (rasterizationState->depthBiasEnable ? rasterizationState->depthBiasConstantFactor : 0.0f); 289 context.slopeDepthBias = (rasterizationState->depthBiasEnable ? rasterizationState->depthBiasSlopeFactor : 0.0f); 290 291 const VkPipelineMultisampleStateCreateInfo* multisampleState = pCreateInfo->pMultisampleState; 292 if((multisampleState->flags != 0) || 293 (multisampleState->rasterizationSamples != VK_SAMPLE_COUNT_1_BIT) || 294 (multisampleState->sampleShadingEnable != 0) || 295 !((multisampleState->pSampleMask == nullptr) || 296 (*(multisampleState->pSampleMask) == 0xFFFFFFFFu)) || 297 (multisampleState->alphaToCoverageEnable != 0) || 298 (multisampleState->alphaToOneEnable != 0)) 299 { 300 UNIMPLEMENTED(); 301 } 302 303 const VkPipelineDepthStencilStateCreateInfo* depthStencilState = pCreateInfo->pDepthStencilState; 304 if((depthStencilState->flags != 0) || 305 (depthStencilState->depthBoundsTestEnable != 0) || 306 (depthStencilState->minDepthBounds != 0.0f) || 307 (depthStencilState->maxDepthBounds != 1.0f)) 308 { 309 UNIMPLEMENTED(); 310 } 311 312 context.depthBufferEnable = depthStencilState->depthTestEnable; 313 context.depthWriteEnable = depthStencilState->depthWriteEnable; 314 context.depthCompareMode = depthStencilState->depthCompareOp; 315 316 context.stencilEnable = context.twoSidedStencil = depthStencilState->stencilTestEnable; 317 if(context.stencilEnable) 318 { 319 context.stencilMask = depthStencilState->front.compareMask; 320 context.stencilCompareMode = depthStencilState->front.compareOp; 321 context.stencilZFailOperation = depthStencilState->front.depthFailOp; 322 context.stencilFailOperation = depthStencilState->front.failOp; 323 context.stencilPassOperation = depthStencilState->front.passOp; 324 context.stencilReference = depthStencilState->front.reference; 325 context.stencilWriteMask = depthStencilState->front.writeMask; 326 327 context.stencilMaskCCW = depthStencilState->back.compareMask; 328 context.stencilCompareModeCCW = depthStencilState->back.compareOp; 329 context.stencilZFailOperationCCW = depthStencilState->back.depthFailOp; 330 context.stencilFailOperationCCW = depthStencilState->back.failOp; 331 context.stencilPassOperationCCW = depthStencilState->back.passOp; 332 context.stencilReferenceCCW = depthStencilState->back.reference; 333 context.stencilWriteMaskCCW = depthStencilState->back.writeMask; 334 } 335 336 const VkPipelineColorBlendStateCreateInfo* colorBlendState = pCreateInfo->pColorBlendState; 337 if((colorBlendState->flags != 0) || 338 ((colorBlendState->logicOpEnable != 0) && 339 (colorBlendState->attachmentCount > 1))) 340 { 341 UNIMPLEMENTED(); 342 } 343 344 context.colorLogicOpEnabled = colorBlendState->logicOpEnable; 345 context.logicalOperation = colorBlendState->logicOp; 346 blendConstants.r = colorBlendState->blendConstants[0]; 347 blendConstants.g = colorBlendState->blendConstants[1]; 348 blendConstants.b = colorBlendState->blendConstants[2]; 349 blendConstants.a = colorBlendState->blendConstants[3]; 350 351 if(colorBlendState->attachmentCount == 1) 352 { 353 const VkPipelineColorBlendAttachmentState& attachment = colorBlendState->pAttachments[0]; 354 if(attachment.colorWriteMask != (VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT)) 355 { 356 UNIMPLEMENTED(); 357 } 358 359 context.alphaBlendEnable = attachment.blendEnable; 360 context.separateAlphaBlendEnable = (attachment.alphaBlendOp != attachment.colorBlendOp) || 361 (attachment.dstAlphaBlendFactor != attachment.dstColorBlendFactor) || 362 (attachment.srcAlphaBlendFactor != attachment.srcColorBlendFactor); 363 context.blendOperationStateAlpha = attachment.alphaBlendOp; 364 context.blendOperationState = attachment.colorBlendOp; 365 context.destBlendFactorStateAlpha = attachment.dstAlphaBlendFactor; 366 context.destBlendFactorState = attachment.dstColorBlendFactor; 367 context.sourceBlendFactorStateAlpha = attachment.srcAlphaBlendFactor; 368 context.sourceBlendFactorState = attachment.srcColorBlendFactor; 369 } 370 } 371 372 void GraphicsPipeline::destroyPipeline(const VkAllocationCallbacks* pAllocator) 373 { 374 delete vertexShader; 375 delete fragmentShader; 376 } 377 378 size_t GraphicsPipeline::ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo* pCreateInfo) 379 { 380 return 0; 381 } 382 383 void GraphicsPipeline::compileShaders(const VkAllocationCallbacks* pAllocator, const VkGraphicsPipelineCreateInfo* pCreateInfo) 384 { 385 for (auto pStage = pCreateInfo->pStages; pStage != pCreateInfo->pStages + pCreateInfo->stageCount; pStage++) { 386 auto module = Cast(pStage->module); 387 388 // TODO: apply prep passes using SPIRV-Opt here. 389 // - Apply and freeze specializations, etc. 390 auto code = module->getCode(); 391 392 // TODO: pass in additional information here: 393 // - any NOS from pCreateInfo which we'll actually need 394 auto spirvShader = new sw::SpirvShader{code}; 395 396 switch (pStage->stage) { 397 case VK_SHADER_STAGE_VERTEX_BIT: 398 vertexShader = spirvShader; 399 break; 400 401 case VK_SHADER_STAGE_FRAGMENT_BIT: 402 fragmentShader = spirvShader; 403 break; 404 405 default: 406 UNIMPLEMENTED("Unsupported stage"); 407 } 408 } 409 } 410 411 uint32_t GraphicsPipeline::computePrimitiveCount(uint32_t vertexCount) const 412 { 413 switch(context.drawType) 414 { 415 case sw::DRAW_POINTLIST: 416 return vertexCount; 417 case sw::DRAW_LINELIST: 418 return vertexCount / 2; 419 case sw::DRAW_LINESTRIP: 420 return vertexCount - 1; 421 case sw::DRAW_TRIANGLELIST: 422 return vertexCount / 3; 423 case sw::DRAW_TRIANGLESTRIP: 424 return vertexCount - 2; 425 case sw::DRAW_TRIANGLEFAN: 426 return vertexCount - 2; 427 default: 428 UNIMPLEMENTED(); 429 } 430 431 return 0; 432 } 433 434 const sw::Context& GraphicsPipeline::getContext() const 435 { 436 return context; 437 } 438 439 const sw::Rect& GraphicsPipeline::getScissor() const 440 { 441 return scissor; 442 } 443 444 const VkViewport& GraphicsPipeline::getViewport() const 445 { 446 return viewport; 447 } 448 449 const sw::Color<float>& GraphicsPipeline::getBlendConstants() const 450 { 451 return blendConstants; 452 } 453 454 ComputePipeline::ComputePipeline(const VkComputePipelineCreateInfo* pCreateInfo, void* mem) 455 { 456 } 457 458 void ComputePipeline::destroyPipeline(const VkAllocationCallbacks* pAllocator) 459 { 460 } 461 462 size_t ComputePipeline::ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo* pCreateInfo) 463 { 464 return 0; 465 } 466 467 } // namespace vk 468