1 /*------------------------------------------------------------------------ 2 * Vulkan Conformance Tests 3 * ------------------------ 4 * 5 * Copyright (c) 2015 The Khronos Group Inc. 6 * Copyright (c) 2015 Imagination Technologies Ltd. 7 * 8 * Licensed under the Apache License, Version 2.0 (the "License"); 9 * you may not use this file except in compliance with the License. 10 * You may obtain a copy of the License at 11 * 12 * http://www.apache.org/licenses/LICENSE-2.0 13 * 14 * Unless required by applicable law or agreed to in writing, software 15 * distributed under the License is distributed on an "AS IS" BASIS, 16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 * See the License for the specific language governing permissions and 18 * limitations under the License. 19 * 20 *//*! 21 * \file 22 * \brief Utilities for images. 23 *//*--------------------------------------------------------------------*/ 24 25 #include "vktPipelineImageUtil.hpp" 26 #include "vkImageUtil.hpp" 27 #include "vkMemUtil.hpp" 28 #include "vkQueryUtil.hpp" 29 #include "vkRefUtil.hpp" 30 #include "tcuTextureUtil.hpp" 31 #include "tcuAstcUtil.hpp" 32 #include "deRandom.hpp" 33 34 namespace vkt 35 { 36 namespace pipeline 37 { 38 39 using namespace vk; 40 41 /*! Gets the next multiple of a given divisor */ 42 static deUint32 getNextMultiple (deUint32 divisor, deUint32 value) 43 { 44 if (value % divisor == 0) 45 { 46 return value; 47 } 48 return value + divisor - (value % divisor); 49 } 50 51 /*! Gets the next value that is multiple of all given divisors */ 52 static deUint32 getNextMultiple (const std::vector<deUint32>& divisors, deUint32 value) 53 { 54 deUint32 nextMultiple = value; 55 bool nextMultipleFound = false; 56 57 while (true) 58 { 59 nextMultipleFound = true; 60 61 for (size_t divNdx = 0; divNdx < divisors.size(); divNdx++) 62 nextMultipleFound = nextMultipleFound && (nextMultiple % divisors[divNdx] == 0); 63 64 if (nextMultipleFound) 65 break; 66 67 DE_ASSERT(nextMultiple < ~((deUint32)0u)); 68 nextMultiple = getNextMultiple(divisors[0], nextMultiple + 1); 69 } 70 71 return nextMultiple; 72 } 73 74 bool isSupportedSamplableFormat (const InstanceInterface& instanceInterface, VkPhysicalDevice device, VkFormat format) 75 { 76 if (isCompressedFormat(format)) 77 { 78 VkPhysicalDeviceFeatures physicalFeatures; 79 const tcu::CompressedTexFormat compressedFormat = mapVkCompressedFormat(format); 80 81 instanceInterface.getPhysicalDeviceFeatures(device, &physicalFeatures); 82 83 if (tcu::isAstcFormat(compressedFormat)) 84 { 85 if (!physicalFeatures.textureCompressionASTC_LDR) 86 return false; 87 } 88 else if (tcu::isEtcFormat(compressedFormat)) 89 { 90 if (!physicalFeatures.textureCompressionETC2) 91 return false; 92 } 93 else 94 { 95 DE_FATAL("Unsupported compressed format"); 96 } 97 } 98 99 VkFormatProperties formatProps; 100 instanceInterface.getPhysicalDeviceFormatProperties(device, format, &formatProps); 101 102 return (formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != 0u; 103 } 104 105 // \todo [2016-01-21 pyry] Update this to just rely on vkDefs.hpp once 106 // CTS has been updated to 1.0.2. 107 enum 108 { 109 VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, 110 }; 111 112 bool isLinearFilteringSupported (const InstanceInterface& vki, VkPhysicalDevice physicalDevice, VkFormat format, VkImageTiling tiling) 113 { 114 const VkFormatProperties formatProperties = getPhysicalDeviceFormatProperties(vki, physicalDevice, format); 115 const VkFormatFeatureFlags formatFeatures = tiling == VK_IMAGE_TILING_LINEAR 116 ? formatProperties.linearTilingFeatures 117 : formatProperties.optimalTilingFeatures; 118 119 switch (format) 120 { 121 case VK_FORMAT_R32_SFLOAT: 122 case VK_FORMAT_R32G32_SFLOAT: 123 case VK_FORMAT_R32G32B32_SFLOAT: 124 case VK_FORMAT_R32G32B32A32_SFLOAT: 125 case VK_FORMAT_R64_SFLOAT: 126 case VK_FORMAT_R64G64_SFLOAT: 127 case VK_FORMAT_R64G64B64_SFLOAT: 128 case VK_FORMAT_R64G64B64A64_SFLOAT: 129 return (formatFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT) != 0; 130 131 default: 132 // \todo [2016-01-21 pyry] Check for all formats once drivers have been updated to 1.0.2 133 // and we have tests to verify format properties. 134 return true; 135 } 136 } 137 138 VkBorderColor getFormatBorderColor (BorderColor color, VkFormat format) 139 { 140 if (!isCompressedFormat(format) && (isIntFormat(format) || isUintFormat(format))) 141 { 142 switch (color) 143 { 144 case BORDER_COLOR_OPAQUE_BLACK: return VK_BORDER_COLOR_INT_OPAQUE_BLACK; 145 case BORDER_COLOR_OPAQUE_WHITE: return VK_BORDER_COLOR_INT_OPAQUE_WHITE; 146 case BORDER_COLOR_TRANSPARENT_BLACK: return VK_BORDER_COLOR_INT_TRANSPARENT_BLACK; 147 default: 148 break; 149 } 150 } 151 else 152 { 153 switch (color) 154 { 155 case BORDER_COLOR_OPAQUE_BLACK: return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK; 156 case BORDER_COLOR_OPAQUE_WHITE: return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; 157 case BORDER_COLOR_TRANSPARENT_BLACK: return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; 158 default: 159 break; 160 } 161 } 162 163 DE_ASSERT(false); 164 return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; 165 } 166 167 void getLookupScaleBias (vk::VkFormat format, tcu::Vec4& lookupScale, tcu::Vec4& lookupBias) 168 { 169 if (!isCompressedFormat(format)) 170 { 171 const tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(mapVkFormat(format)); 172 173 // Needed to normalize various formats to 0..1 range for writing into RT 174 lookupScale = fmtInfo.lookupScale; 175 lookupBias = fmtInfo.lookupBias; 176 } 177 else 178 { 179 switch (format) 180 { 181 case VK_FORMAT_EAC_R11_SNORM_BLOCK: 182 lookupScale = tcu::Vec4(0.5f, 1.0f, 1.0f, 1.0f); 183 lookupBias = tcu::Vec4(0.5f, 0.0f, 0.0f, 0.0f); 184 break; 185 186 case VK_FORMAT_EAC_R11G11_SNORM_BLOCK: 187 lookupScale = tcu::Vec4(0.5f, 0.5f, 1.0f, 1.0f); 188 lookupBias = tcu::Vec4(0.5f, 0.5f, 0.0f, 0.0f); 189 break; 190 191 default: 192 // else: All supported compressed formats are fine with no normalization. 193 // ASTC LDR blocks decompress to f16 so querying normalization parameters 194 // based on uncompressed formats would actually lead to massive precision loss 195 // and complete lack of coverage in case of R8G8B8A8_UNORM RT. 196 lookupScale = tcu::Vec4(1.0f); 197 lookupBias = tcu::Vec4(0.0f); 198 break; 199 } 200 } 201 } 202 203 de::MovePtr<tcu::TextureLevel> readColorAttachment (const vk::DeviceInterface& vk, 204 vk::VkDevice device, 205 vk::VkQueue queue, 206 deUint32 queueFamilyIndex, 207 vk::Allocator& allocator, 208 vk::VkImage image, 209 vk::VkFormat format, 210 const tcu::UVec2& renderSize) 211 { 212 Move<VkBuffer> buffer; 213 de::MovePtr<Allocation> bufferAlloc; 214 Move<VkCommandPool> cmdPool; 215 Move<VkCommandBuffer> cmdBuffer; 216 Move<VkFence> fence; 217 const tcu::TextureFormat tcuFormat = mapVkFormat(format); 218 const VkDeviceSize pixelDataSize = renderSize.x() * renderSize.y() * tcuFormat.getPixelSize(); 219 de::MovePtr<tcu::TextureLevel> resultLevel (new tcu::TextureLevel(tcuFormat, renderSize.x(), renderSize.y())); 220 221 // Create destination buffer 222 { 223 const VkBufferCreateInfo bufferParams = 224 { 225 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType; 226 DE_NULL, // const void* pNext; 227 0u, // VkBufferCreateFlags flags; 228 pixelDataSize, // VkDeviceSize size; 229 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage; 230 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; 231 0u, // deUint32 queueFamilyIndexCount; 232 DE_NULL // const deUint32* pQueueFamilyIndices; 233 }; 234 235 buffer = createBuffer(vk, device, &bufferParams); 236 bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible); 237 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset())); 238 } 239 240 // Create command pool and buffer 241 cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex); 242 cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY); 243 244 // Create fence 245 fence = createFence(vk, device); 246 247 // Barriers for copying image to buffer 248 249 const VkImageMemoryBarrier imageBarrier = 250 { 251 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType; 252 DE_NULL, // const void* pNext; 253 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask; 254 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask; 255 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout; 256 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout; 257 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex; 258 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex; 259 image, // VkImage image; 260 { // VkImageSubresourceRange subresourceRange; 261 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask; 262 0u, // deUint32 baseMipLevel; 263 1u, // deUint32 mipLevels; 264 0u, // deUint32 baseArraySlice; 265 1u // deUint32 arraySize; 266 } 267 }; 268 269 const VkBufferMemoryBarrier bufferBarrier = 270 { 271 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType; 272 DE_NULL, // const void* pNext; 273 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask; 274 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask; 275 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex; 276 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex; 277 *buffer, // VkBuffer buffer; 278 0u, // VkDeviceSize offset; 279 pixelDataSize // VkDeviceSize size; 280 }; 281 282 const VkCommandBufferBeginInfo cmdBufferBeginInfo = 283 { 284 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType; 285 DE_NULL, // const void* pNext; 286 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags; 287 (const VkCommandBufferInheritanceInfo*)DE_NULL, 288 }; 289 290 // Copy image to buffer 291 292 const VkBufferImageCopy copyRegion = 293 { 294 0u, // VkDeviceSize bufferOffset; 295 (deUint32)renderSize.x(), // deUint32 bufferRowLength; 296 (deUint32)renderSize.y(), // deUint32 bufferImageHeight; 297 { VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u }, // VkImageSubresourceLayers imageSubresource; 298 { 0, 0, 0 }, // VkOffset3D imageOffset; 299 { renderSize.x(), renderSize.y(), 1u } // VkExtent3D imageExtent; 300 }; 301 302 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo)); 303 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier); 304 vk.cmdCopyImageToBuffer(*cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *buffer, 1, ©Region); 305 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL); 306 VK_CHECK(vk.endCommandBuffer(*cmdBuffer)); 307 308 const VkSubmitInfo submitInfo = 309 { 310 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType; 311 DE_NULL, // const void* pNext; 312 0u, // deUint32 waitSemaphoreCount; 313 DE_NULL, // const VkSemaphore* pWaitSemaphores; 314 DE_NULL, 315 1u, // deUint32 commandBufferCount; 316 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers; 317 0u, // deUint32 signalSemaphoreCount; 318 DE_NULL // const VkSemaphore* pSignalSemaphores; 319 }; 320 321 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence)); 322 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), 0, ~(0ull) /* infinity */)); 323 324 // Read buffer data 325 invalidateMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE); 326 tcu::copy(*resultLevel, tcu::ConstPixelBufferAccess(resultLevel->getFormat(), resultLevel->getSize(), bufferAlloc->getHostPtr())); 327 328 return resultLevel; 329 } 330 331 namespace 332 { 333 334 VkImageAspectFlags getImageAspectFlags (const tcu::TextureFormat textureFormat) 335 { 336 VkImageAspectFlags imageAspectFlags = 0; 337 338 if (tcu::hasDepthComponent(textureFormat.order)) 339 imageAspectFlags |= VK_IMAGE_ASPECT_DEPTH_BIT; 340 341 if (tcu::hasStencilComponent(textureFormat.order)) 342 imageAspectFlags |= VK_IMAGE_ASPECT_STENCIL_BIT; 343 344 if (imageAspectFlags == 0) 345 imageAspectFlags = VK_IMAGE_ASPECT_COLOR_BIT; 346 347 return imageAspectFlags; 348 } 349 350 } // anonymous 351 352 void uploadTestTextureInternal (const DeviceInterface& vk, 353 VkDevice device, 354 VkQueue queue, 355 deUint32 queueFamilyIndex, 356 Allocator& allocator, 357 const TestTexture& srcTexture, 358 const TestTexture* srcStencilTexture, 359 tcu::TextureFormat format, 360 VkImage destImage) 361 { 362 deUint32 bufferSize; 363 Move<VkBuffer> buffer; 364 de::MovePtr<Allocation> bufferAlloc; 365 Move<VkCommandPool> cmdPool; 366 Move<VkCommandBuffer> cmdBuffer; 367 Move<VkFence> fence; 368 const VkImageAspectFlags imageAspectFlags = getImageAspectFlags(format); 369 deUint32 stencilOffset = 0u; 370 371 // Calculate buffer size 372 bufferSize = (srcTexture.isCompressed())? srcTexture.getCompressedSize(): srcTexture.getSize(); 373 374 // Stencil-only texture should be provided if (and only if) the image has a combined DS format 375 DE_ASSERT((tcu::hasDepthComponent(format.order) && tcu::hasStencilComponent(format.order)) == (srcStencilTexture != DE_NULL)); 376 377 if (srcStencilTexture != DE_NULL) 378 { 379 stencilOffset = static_cast<deUint32>(deAlign32(static_cast<deInt32>(bufferSize), 4)); 380 bufferSize = stencilOffset + srcStencilTexture->getSize(); 381 } 382 383 // Create source buffer 384 { 385 const VkBufferCreateInfo bufferParams = 386 { 387 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType; 388 DE_NULL, // const void* pNext; 389 0u, // VkBufferCreateFlags flags; 390 bufferSize, // VkDeviceSize size; 391 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage; 392 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; 393 0u, // deUint32 queueFamilyIndexCount; 394 DE_NULL, // const deUint32* pQueueFamilyIndices; 395 }; 396 397 buffer = createBuffer(vk, device, &bufferParams); 398 bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible); 399 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset())); 400 } 401 402 // Create command pool and buffer 403 cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex); 404 cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY); 405 406 // Create fence 407 fence = createFence(vk, device); 408 409 // Barriers for copying buffer to image 410 const VkBufferMemoryBarrier preBufferBarrier = 411 { 412 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType; 413 DE_NULL, // const void* pNext; 414 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask; 415 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask; 416 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex; 417 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex; 418 *buffer, // VkBuffer buffer; 419 0u, // VkDeviceSize offset; 420 bufferSize // VkDeviceSize size; 421 }; 422 423 const VkImageMemoryBarrier preImageBarrier = 424 { 425 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType; 426 DE_NULL, // const void* pNext; 427 0u, // VkAccessFlags srcAccessMask; 428 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask; 429 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout; 430 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout; 431 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex; 432 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex; 433 destImage, // VkImage image; 434 { // VkImageSubresourceRange subresourceRange; 435 imageAspectFlags, // VkImageAspectFlags aspectMask; 436 0u, // deUint32 baseMipLevel; 437 (deUint32)srcTexture.getNumLevels(), // deUint32 mipLevels; 438 0u, // deUint32 baseArraySlice; 439 (deUint32)srcTexture.getArraySize(), // deUint32 arraySize; 440 } 441 }; 442 443 const VkImageMemoryBarrier postImageBarrier = 444 { 445 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType; 446 DE_NULL, // const void* pNext; 447 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask; 448 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask; 449 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout; 450 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout; 451 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex; 452 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex; 453 destImage, // VkImage image; 454 { // VkImageSubresourceRange subresourceRange; 455 imageAspectFlags, // VkImageAspectFlags aspectMask; 456 0u, // deUint32 baseMipLevel; 457 (deUint32)srcTexture.getNumLevels(), // deUint32 mipLevels; 458 0u, // deUint32 baseArraySlice; 459 (deUint32)srcTexture.getArraySize(), // deUint32 arraySize; 460 } 461 }; 462 463 const VkCommandBufferBeginInfo cmdBufferBeginInfo = 464 { 465 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType; 466 DE_NULL, // const void* pNext; 467 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags; 468 (const VkCommandBufferInheritanceInfo*)DE_NULL, 469 }; 470 471 std::vector<VkBufferImageCopy> copyRegions = srcTexture.getBufferCopyRegions(); 472 473 // Write buffer data 474 srcTexture.write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr())); 475 476 if (srcStencilTexture != DE_NULL) 477 { 478 DE_ASSERT(stencilOffset != 0u); 479 480 srcStencilTexture->write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()) + stencilOffset); 481 482 std::vector<VkBufferImageCopy> stencilCopyRegions = srcStencilTexture->getBufferCopyRegions(); 483 for (size_t regionIdx = 0; regionIdx < stencilCopyRegions.size(); regionIdx++) 484 { 485 VkBufferImageCopy region = stencilCopyRegions[regionIdx]; 486 region.bufferOffset += stencilOffset; 487 488 copyRegions.push_back(region); 489 } 490 } 491 492 flushMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE); 493 494 // Copy buffer to image 495 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo)); 496 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier); 497 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data()); 498 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier); 499 500 VK_CHECK(vk.endCommandBuffer(*cmdBuffer)); 501 502 const VkSubmitInfo submitInfo = 503 { 504 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType; 505 DE_NULL, // const void* pNext; 506 0u, // deUint32 waitSemaphoreCount; 507 DE_NULL, // const VkSemaphore* pWaitSemaphores; 508 DE_NULL, 509 1u, // deUint32 commandBufferCount; 510 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers; 511 0u, // deUint32 signalSemaphoreCount; 512 DE_NULL // const VkSemaphore* pSignalSemaphores; 513 }; 514 515 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence)); 516 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), true, ~(0ull) /* infinity */)); 517 } 518 519 void uploadTestTexture (const DeviceInterface& vk, 520 VkDevice device, 521 VkQueue queue, 522 deUint32 queueFamilyIndex, 523 Allocator& allocator, 524 const TestTexture& srcTexture, 525 VkImage destImage) 526 { 527 if (tcu::isCombinedDepthStencilType(srcTexture.getTextureFormat().type)) 528 { 529 de::MovePtr<TestTexture> srcDepthTexture; 530 de::MovePtr<TestTexture> srcStencilTexture; 531 532 if (tcu::hasDepthComponent(srcTexture.getTextureFormat().order)) 533 { 534 tcu::TextureFormat format; 535 switch (srcTexture.getTextureFormat().type) { 536 case tcu::TextureFormat::UNSIGNED_INT_16_8_8: 537 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16); 538 break; 539 case tcu::TextureFormat::UNSIGNED_INT_24_8_REV: 540 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNSIGNED_INT_24_8_REV); 541 break; 542 case tcu::TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV: 543 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::FLOAT); 544 break; 545 default: 546 DE_ASSERT(0); 547 break; 548 } 549 srcDepthTexture = srcTexture.copy(format); 550 } 551 552 if (tcu::hasStencilComponent(srcTexture.getTextureFormat().order)) 553 srcStencilTexture = srcTexture.copy(tcu::getEffectiveDepthStencilTextureFormat(srcTexture.getTextureFormat(), tcu::Sampler::MODE_STENCIL)); 554 555 uploadTestTextureInternal(vk, device, queue, queueFamilyIndex, allocator, *srcDepthTexture, srcStencilTexture.get(), srcTexture.getTextureFormat(), destImage); 556 } 557 else 558 uploadTestTextureInternal(vk, device, queue, queueFamilyIndex, allocator, srcTexture, DE_NULL, srcTexture.getTextureFormat(), destImage); 559 } 560 561 // Utilities for test textures 562 563 template<typename TcuTextureType> 564 void allocateLevels (TcuTextureType& texture) 565 { 566 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); levelNdx++) 567 texture.allocLevel(levelNdx); 568 } 569 570 template<typename TcuTextureType> 571 std::vector<tcu::PixelBufferAccess> getLevelsVector (const TcuTextureType& texture) 572 { 573 std::vector<tcu::PixelBufferAccess> levels(texture.getNumLevels()); 574 575 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); levelNdx++) 576 levels[levelNdx] = *reinterpret_cast<const tcu::PixelBufferAccess*>(&texture.getLevel(levelNdx)); 577 578 return levels; 579 } 580 581 // TestTexture 582 583 TestTexture::TestTexture (const tcu::TextureFormat& format, int width, int height, int depth) 584 { 585 DE_ASSERT(width >= 1); 586 DE_ASSERT(height >= 1); 587 DE_ASSERT(depth >= 1); 588 589 DE_UNREF(format); 590 DE_UNREF(width); 591 DE_UNREF(height); 592 DE_UNREF(depth); 593 } 594 595 TestTexture::TestTexture (const tcu::CompressedTexFormat& format, int width, int height, int depth) 596 { 597 DE_ASSERT(width >= 1); 598 DE_ASSERT(height >= 1); 599 DE_ASSERT(depth >= 1); 600 601 DE_UNREF(format); 602 DE_UNREF(width); 603 DE_UNREF(height); 604 DE_UNREF(depth); 605 } 606 607 TestTexture::~TestTexture (void) 608 { 609 for (size_t levelNdx = 0; levelNdx < m_compressedLevels.size(); levelNdx++) 610 delete m_compressedLevels[levelNdx]; 611 } 612 613 deUint32 TestTexture::getSize (void) const 614 { 615 std::vector<deUint32> offsetMultiples; 616 deUint32 textureSize = 0; 617 618 offsetMultiples.push_back(4); 619 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize()); 620 621 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++) 622 { 623 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++) 624 { 625 const tcu::ConstPixelBufferAccess level = getLevel(levelNdx, layerNdx); 626 textureSize = getNextMultiple(offsetMultiples, textureSize); 627 textureSize += level.getWidth() * level.getHeight() * level.getDepth() * level.getFormat().getPixelSize(); 628 } 629 } 630 631 return textureSize; 632 } 633 634 deUint32 TestTexture::getCompressedSize (void) const 635 { 636 if (!isCompressed()) 637 throw tcu::InternalError("Texture is not compressed"); 638 639 std::vector<deUint32> offsetMultiples; 640 deUint32 textureSize = 0; 641 642 offsetMultiples.push_back(4); 643 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat())); 644 645 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++) 646 { 647 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++) 648 { 649 textureSize = getNextMultiple(offsetMultiples, textureSize); 650 textureSize += getCompressedLevel(levelNdx, layerNdx).getDataSize(); 651 } 652 } 653 654 return textureSize; 655 } 656 657 tcu::CompressedTexture& TestTexture::getCompressedLevel (int level, int layer) 658 { 659 DE_ASSERT(level >= 0 && level < getNumLevels()); 660 DE_ASSERT(layer >= 0 && layer < getArraySize()); 661 662 return *m_compressedLevels[level * getArraySize() + layer]; 663 } 664 665 const tcu::CompressedTexture& TestTexture::getCompressedLevel (int level, int layer) const 666 { 667 DE_ASSERT(level >= 0 && level < getNumLevels()); 668 DE_ASSERT(layer >= 0 && layer < getArraySize()); 669 670 return *m_compressedLevels[level * getArraySize() + layer]; 671 } 672 673 std::vector<VkBufferImageCopy> TestTexture::getBufferCopyRegions (void) const 674 { 675 std::vector<deUint32> offsetMultiples; 676 std::vector<VkBufferImageCopy> regions; 677 deUint32 layerDataOffset = 0; 678 679 offsetMultiples.push_back(4); 680 681 if (isCompressed()) 682 { 683 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat())); 684 685 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++) 686 { 687 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++) 688 { 689 const tcu::CompressedTexture& level = getCompressedLevel(levelNdx, layerNdx); 690 tcu::IVec3 blockPixelSize = getBlockPixelSize(level.getFormat()); 691 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset); 692 693 const VkBufferImageCopy layerRegion = 694 { 695 layerDataOffset, // VkDeviceSize bufferOffset; 696 (deUint32)getNextMultiple(blockPixelSize.x(), level.getWidth()), // deUint32 bufferRowLength; 697 (deUint32)getNextMultiple(blockPixelSize.y(), level.getHeight()), // deUint32 bufferImageHeight; 698 { // VkImageSubresourceLayers imageSubresource; 699 VK_IMAGE_ASPECT_COLOR_BIT, 700 (deUint32)levelNdx, 701 (deUint32)layerNdx, 702 1u 703 }, 704 { 0u, 0u, 0u }, // VkOffset3D imageOffset; 705 { // VkExtent3D imageExtent; 706 (deUint32)level.getWidth(), 707 (deUint32)level.getHeight(), 708 (deUint32)level.getDepth() 709 } 710 }; 711 712 regions.push_back(layerRegion); 713 layerDataOffset += level.getDataSize(); 714 } 715 } 716 } 717 else 718 { 719 std::vector<VkImageAspectFlags> imageAspects; 720 tcu::TextureFormat textureFormat = getTextureFormat(); 721 722 if (tcu::hasDepthComponent(textureFormat.order)) 723 imageAspects.push_back(VK_IMAGE_ASPECT_DEPTH_BIT); 724 725 if (tcu::hasStencilComponent(textureFormat.order)) 726 imageAspects.push_back(VK_IMAGE_ASPECT_STENCIL_BIT); 727 728 if (imageAspects.empty()) 729 imageAspects.push_back(VK_IMAGE_ASPECT_COLOR_BIT); 730 731 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize()); 732 733 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++) 734 { 735 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++) 736 { 737 const tcu::ConstPixelBufferAccess level = getLevel(levelNdx, layerNdx); 738 739 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset); 740 741 for (size_t aspectIndex = 0; aspectIndex < imageAspects.size(); ++aspectIndex) 742 { 743 const VkBufferImageCopy layerRegion = 744 { 745 layerDataOffset, // VkDeviceSize bufferOffset; 746 (deUint32)level.getWidth(), // deUint32 bufferRowLength; 747 (deUint32)level.getHeight(), // deUint32 bufferImageHeight; 748 { // VkImageSubresourceLayers imageSubresource; 749 imageAspects[aspectIndex], 750 (deUint32)levelNdx, 751 (deUint32)layerNdx, 752 1u 753 }, 754 { 0u, 0u, 0u }, // VkOffset3D imageOffset; 755 { // VkExtent3D imageExtent; 756 (deUint32)level.getWidth(), 757 (deUint32)level.getHeight(), 758 (deUint32)level.getDepth() 759 } 760 }; 761 762 regions.push_back(layerRegion); 763 } 764 layerDataOffset += level.getWidth() * level.getHeight() * level.getDepth() * level.getFormat().getPixelSize(); 765 } 766 } 767 } 768 769 return regions; 770 } 771 772 void TestTexture::write (deUint8* destPtr) const 773 { 774 std::vector<deUint32> offsetMultiples; 775 deUint32 levelOffset = 0; 776 777 offsetMultiples.push_back(4); 778 779 if (isCompressed()) 780 { 781 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat())); 782 783 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++) 784 { 785 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++) 786 { 787 levelOffset = getNextMultiple(offsetMultiples, levelOffset); 788 789 const tcu::CompressedTexture& compressedTex = getCompressedLevel(levelNdx, layerNdx); 790 791 deMemcpy(destPtr + levelOffset, compressedTex.getData(), compressedTex.getDataSize()); 792 levelOffset += compressedTex.getDataSize(); 793 } 794 } 795 } 796 else 797 { 798 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize()); 799 800 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++) 801 { 802 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++) 803 { 804 levelOffset = getNextMultiple(offsetMultiples, levelOffset); 805 806 const tcu::ConstPixelBufferAccess srcAccess = getLevel(levelNdx, layerNdx); 807 const tcu::PixelBufferAccess destAccess (srcAccess.getFormat(), srcAccess.getSize(), srcAccess.getPitch(), destPtr + levelOffset); 808 809 tcu::copy(destAccess, srcAccess); 810 levelOffset += srcAccess.getWidth() * srcAccess.getHeight() * srcAccess.getDepth() * srcAccess.getFormat().getPixelSize(); 811 } 812 } 813 } 814 } 815 816 void TestTexture::copyToTexture (TestTexture& destTexture) const 817 { 818 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++) 819 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++) 820 tcu::copy(destTexture.getLevel(levelNdx, layerNdx), getLevel(levelNdx, layerNdx)); 821 } 822 823 void TestTexture::populateLevels (const std::vector<tcu::PixelBufferAccess>& levels) 824 { 825 for (size_t levelNdx = 0; levelNdx < levels.size(); levelNdx++) 826 TestTexture::fillWithGradient(levels[levelNdx]); 827 } 828 829 void TestTexture::populateCompressedLevels (tcu::CompressedTexFormat format, const std::vector<tcu::PixelBufferAccess>& decompressedLevels) 830 { 831 // Generate random compressed data and update decompressed data 832 833 de::Random random(123); 834 835 for (size_t levelNdx = 0; levelNdx < decompressedLevels.size(); levelNdx++) 836 { 837 const tcu::PixelBufferAccess level = decompressedLevels[levelNdx]; 838 tcu::CompressedTexture* compressedLevel = new tcu::CompressedTexture(format, level.getWidth(), level.getHeight(), level.getDepth()); 839 deUint8* const compressedData = (deUint8*)compressedLevel->getData(); 840 841 if (tcu::isAstcFormat(format)) 842 { 843 // \todo [2016-01-20 pyry] Comparison doesn't currently handle invalid blocks correctly so we use only valid blocks 844 tcu::astc::generateRandomValidBlocks(compressedData, compressedLevel->getDataSize()/tcu::astc::BLOCK_SIZE_BYTES, 845 format, tcu::TexDecompressionParams::ASTCMODE_LDR, random.getUint32()); 846 } 847 else 848 { 849 // Generate random compressed data 850 // Random initial values cause assertion during the decompression in case of COMPRESSEDTEXFORMAT_ETC1_RGB8 format 851 if (format != tcu::COMPRESSEDTEXFORMAT_ETC1_RGB8) 852 for (int byteNdx = 0; byteNdx < compressedLevel->getDataSize(); byteNdx++) 853 compressedData[byteNdx] = 0xFF & random.getUint32(); 854 } 855 856 m_compressedLevels.push_back(compressedLevel); 857 858 // Store decompressed data 859 compressedLevel->decompress(level, tcu::TexDecompressionParams(tcu::TexDecompressionParams::ASTCMODE_LDR)); 860 } 861 } 862 863 void TestTexture::fillWithGradient (const tcu::PixelBufferAccess& levelAccess) 864 { 865 const tcu::TextureFormatInfo formatInfo = tcu::getTextureFormatInfo(levelAccess.getFormat()); 866 tcu::fillWithComponentGradients(levelAccess, formatInfo.valueMin, formatInfo.valueMax); 867 } 868 869 // TestTexture1D 870 871 TestTexture1D::TestTexture1D (const tcu::TextureFormat& format, int width) 872 : TestTexture (format, width, 1, 1) 873 , m_texture (format, width) 874 { 875 allocateLevels(m_texture); 876 TestTexture::populateLevels(getLevelsVector(m_texture)); 877 } 878 879 TestTexture1D::TestTexture1D (const tcu::CompressedTexFormat& format, int width) 880 : TestTexture (format, width, 1, 1) 881 , m_texture (tcu::getUncompressedFormat(format), width) 882 { 883 allocateLevels(m_texture); 884 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture)); 885 } 886 887 TestTexture1D::~TestTexture1D (void) 888 { 889 } 890 891 int TestTexture1D::getNumLevels (void) const 892 { 893 return m_texture.getNumLevels(); 894 } 895 896 tcu::PixelBufferAccess TestTexture1D::getLevel (int level, int layer) 897 { 898 DE_ASSERT(layer == 0); 899 DE_UNREF(layer); 900 return m_texture.getLevel(level); 901 } 902 903 const tcu::ConstPixelBufferAccess TestTexture1D::getLevel (int level, int layer) const 904 { 905 DE_ASSERT(layer == 0); 906 DE_UNREF(layer); 907 return m_texture.getLevel(level); 908 } 909 910 const tcu::Texture1D& TestTexture1D::getTexture (void) const 911 { 912 return m_texture; 913 } 914 915 tcu::Texture1D& TestTexture1D::getTexture (void) 916 { 917 return m_texture; 918 } 919 920 de::MovePtr<TestTexture> TestTexture1D::copy(const tcu::TextureFormat format) const 921 { 922 DE_ASSERT(!isCompressed()); 923 924 de::MovePtr<TestTexture> texture (new TestTexture1D(format, m_texture.getWidth())); 925 926 copyToTexture(*texture); 927 928 return texture; 929 } 930 931 // TestTexture1DArray 932 933 TestTexture1DArray::TestTexture1DArray (const tcu::TextureFormat& format, int width, int arraySize) 934 : TestTexture (format, width, 1, arraySize) 935 , m_texture (format, width, arraySize) 936 { 937 allocateLevels(m_texture); 938 TestTexture::populateLevels(getLevelsVector(m_texture)); 939 } 940 941 TestTexture1DArray::TestTexture1DArray (const tcu::CompressedTexFormat& format, int width, int arraySize) 942 : TestTexture (format, width, 1, arraySize) 943 , m_texture (tcu::getUncompressedFormat(format), width, arraySize) 944 { 945 allocateLevels(m_texture); 946 947 std::vector<tcu::PixelBufferAccess> layers; 948 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++) 949 for (int layerNdx = 0; layerNdx < m_texture.getNumLayers(); layerNdx++) 950 layers.push_back(getLevel(levelNdx, layerNdx)); 951 952 TestTexture::populateCompressedLevels(format, layers); 953 } 954 955 TestTexture1DArray::~TestTexture1DArray (void) 956 { 957 } 958 959 int TestTexture1DArray::getNumLevels (void) const 960 { 961 return m_texture.getNumLevels(); 962 } 963 964 tcu::PixelBufferAccess TestTexture1DArray::getLevel (int level, int layer) 965 { 966 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level); 967 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize(); 968 const deUint32 layerOffset = layerSize * layer; 969 970 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset); 971 } 972 973 const tcu::ConstPixelBufferAccess TestTexture1DArray::getLevel (int level, int layer) const 974 { 975 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level); 976 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize(); 977 const deUint32 layerOffset = layerSize * layer; 978 979 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset); 980 } 981 982 const tcu::Texture1DArray& TestTexture1DArray::getTexture (void) const 983 { 984 return m_texture; 985 } 986 987 tcu::Texture1DArray& TestTexture1DArray::getTexture (void) 988 { 989 return m_texture; 990 } 991 992 int TestTexture1DArray::getArraySize (void) const 993 { 994 return m_texture.getNumLayers(); 995 } 996 997 de::MovePtr<TestTexture> TestTexture1DArray::copy(const tcu::TextureFormat format) const 998 { 999 DE_ASSERT(!isCompressed()); 1000 1001 de::MovePtr<TestTexture> texture (new TestTexture1DArray(format, m_texture.getWidth(), getArraySize())); 1002 1003 copyToTexture(*texture); 1004 1005 return texture; 1006 } 1007 1008 // TestTexture2D 1009 1010 TestTexture2D::TestTexture2D (const tcu::TextureFormat& format, int width, int height) 1011 : TestTexture (format, width, height, 1) 1012 , m_texture (format, width, height) 1013 { 1014 allocateLevels(m_texture); 1015 TestTexture::populateLevels(getLevelsVector(m_texture)); 1016 } 1017 1018 TestTexture2D::TestTexture2D (const tcu::CompressedTexFormat& format, int width, int height) 1019 : TestTexture (format, width, height, 1) 1020 , m_texture (tcu::getUncompressedFormat(format), width, height) 1021 { 1022 allocateLevels(m_texture); 1023 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture)); 1024 } 1025 1026 TestTexture2D::~TestTexture2D (void) 1027 { 1028 } 1029 1030 int TestTexture2D::getNumLevels (void) const 1031 { 1032 return m_texture.getNumLevels(); 1033 } 1034 1035 tcu::PixelBufferAccess TestTexture2D::getLevel (int level, int layer) 1036 { 1037 DE_ASSERT(layer == 0); 1038 DE_UNREF(layer); 1039 return m_texture.getLevel(level); 1040 } 1041 1042 const tcu::ConstPixelBufferAccess TestTexture2D::getLevel (int level, int layer) const 1043 { 1044 DE_ASSERT(layer == 0); 1045 DE_UNREF(layer); 1046 return m_texture.getLevel(level); 1047 } 1048 1049 const tcu::Texture2D& TestTexture2D::getTexture (void) const 1050 { 1051 return m_texture; 1052 } 1053 1054 tcu::Texture2D& TestTexture2D::getTexture (void) 1055 { 1056 return m_texture; 1057 } 1058 1059 de::MovePtr<TestTexture> TestTexture2D::copy(const tcu::TextureFormat format) const 1060 { 1061 DE_ASSERT(!isCompressed()); 1062 1063 de::MovePtr<TestTexture> texture (new TestTexture2D(format, m_texture.getWidth(), m_texture.getHeight())); 1064 1065 copyToTexture(*texture); 1066 1067 return texture; 1068 } 1069 1070 // TestTexture2DArray 1071 1072 TestTexture2DArray::TestTexture2DArray (const tcu::TextureFormat& format, int width, int height, int arraySize) 1073 : TestTexture (format, width, height, arraySize) 1074 , m_texture (format, width, height, arraySize) 1075 { 1076 allocateLevels(m_texture); 1077 TestTexture::populateLevels(getLevelsVector(m_texture)); 1078 } 1079 1080 TestTexture2DArray::TestTexture2DArray (const tcu::CompressedTexFormat& format, int width, int height, int arraySize) 1081 : TestTexture (format, width, height, arraySize) 1082 , m_texture (tcu::getUncompressedFormat(format), width, height, arraySize) 1083 { 1084 allocateLevels(m_texture); 1085 1086 std::vector<tcu::PixelBufferAccess> layers; 1087 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++) 1088 for (int layerNdx = 0; layerNdx < m_texture.getNumLayers(); layerNdx++) 1089 layers.push_back(getLevel(levelNdx, layerNdx)); 1090 1091 TestTexture::populateCompressedLevels(format, layers); 1092 } 1093 1094 TestTexture2DArray::~TestTexture2DArray (void) 1095 { 1096 } 1097 1098 int TestTexture2DArray::getNumLevels (void) const 1099 { 1100 return m_texture.getNumLevels(); 1101 } 1102 1103 tcu::PixelBufferAccess TestTexture2DArray::getLevel (int level, int layer) 1104 { 1105 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level); 1106 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize(); 1107 const deUint32 layerOffset = layerSize * layer; 1108 1109 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset); 1110 } 1111 1112 const tcu::ConstPixelBufferAccess TestTexture2DArray::getLevel (int level, int layer) const 1113 { 1114 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level); 1115 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize(); 1116 const deUint32 layerOffset = layerSize * layer; 1117 1118 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset); 1119 } 1120 1121 const tcu::Texture2DArray& TestTexture2DArray::getTexture (void) const 1122 { 1123 return m_texture; 1124 } 1125 1126 tcu::Texture2DArray& TestTexture2DArray::getTexture (void) 1127 { 1128 return m_texture; 1129 } 1130 1131 int TestTexture2DArray::getArraySize (void) const 1132 { 1133 return m_texture.getNumLayers(); 1134 } 1135 1136 de::MovePtr<TestTexture> TestTexture2DArray::copy(const tcu::TextureFormat format) const 1137 { 1138 DE_ASSERT(!isCompressed()); 1139 1140 de::MovePtr<TestTexture> texture (new TestTexture2DArray(format, m_texture.getWidth(), m_texture.getHeight(), getArraySize())); 1141 1142 copyToTexture(*texture); 1143 1144 return texture; 1145 } 1146 1147 // TestTexture3D 1148 1149 TestTexture3D::TestTexture3D (const tcu::TextureFormat& format, int width, int height, int depth) 1150 : TestTexture (format, width, height, depth) 1151 , m_texture (format, width, height, depth) 1152 { 1153 allocateLevels(m_texture); 1154 TestTexture::populateLevels(getLevelsVector(m_texture)); 1155 } 1156 1157 TestTexture3D::TestTexture3D (const tcu::CompressedTexFormat& format, int width, int height, int depth) 1158 : TestTexture (format, width, height, depth) 1159 , m_texture (tcu::getUncompressedFormat(format), width, height, depth) 1160 { 1161 allocateLevels(m_texture); 1162 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture)); 1163 } 1164 1165 TestTexture3D::~TestTexture3D (void) 1166 { 1167 } 1168 1169 int TestTexture3D::getNumLevels (void) const 1170 { 1171 return m_texture.getNumLevels(); 1172 } 1173 1174 tcu::PixelBufferAccess TestTexture3D::getLevel (int level, int layer) 1175 { 1176 DE_ASSERT(layer == 0); 1177 DE_UNREF(layer); 1178 return m_texture.getLevel(level); 1179 } 1180 1181 const tcu::ConstPixelBufferAccess TestTexture3D::getLevel (int level, int layer) const 1182 { 1183 DE_ASSERT(layer == 0); 1184 DE_UNREF(layer); 1185 return m_texture.getLevel(level); 1186 } 1187 1188 const tcu::Texture3D& TestTexture3D::getTexture (void) const 1189 { 1190 return m_texture; 1191 } 1192 1193 tcu::Texture3D& TestTexture3D::getTexture (void) 1194 { 1195 return m_texture; 1196 } 1197 1198 de::MovePtr<TestTexture> TestTexture3D::copy(const tcu::TextureFormat format) const 1199 { 1200 DE_ASSERT(!isCompressed()); 1201 1202 de::MovePtr<TestTexture> texture (new TestTexture3D(format, m_texture.getWidth(), m_texture.getHeight(), m_texture.getDepth())); 1203 1204 copyToTexture(*texture); 1205 1206 return texture; 1207 } 1208 1209 // TestTextureCube 1210 1211 const static tcu::CubeFace tcuFaceMapping[tcu::CUBEFACE_LAST] = 1212 { 1213 tcu::CUBEFACE_POSITIVE_X, 1214 tcu::CUBEFACE_NEGATIVE_X, 1215 tcu::CUBEFACE_POSITIVE_Y, 1216 tcu::CUBEFACE_NEGATIVE_Y, 1217 tcu::CUBEFACE_POSITIVE_Z, 1218 tcu::CUBEFACE_NEGATIVE_Z 1219 }; 1220 1221 TestTextureCube::TestTextureCube (const tcu::TextureFormat& format, int size) 1222 : TestTexture (format, size, size, 1) 1223 , m_texture (format, size) 1224 { 1225 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++) 1226 { 1227 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; faceNdx++) 1228 { 1229 m_texture.allocLevel(tcuFaceMapping[faceNdx], levelNdx); 1230 TestTexture::fillWithGradient(m_texture.getLevelFace(levelNdx, tcuFaceMapping[faceNdx])); 1231 } 1232 } 1233 } 1234 1235 TestTextureCube::TestTextureCube (const tcu::CompressedTexFormat& format, int size) 1236 : TestTexture (format, size, size, 1) 1237 , m_texture (tcu::getUncompressedFormat(format), size) 1238 { 1239 std::vector<tcu::PixelBufferAccess> levels(m_texture.getNumLevels() * tcu::CUBEFACE_LAST); 1240 1241 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++) 1242 { 1243 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; faceNdx++) 1244 { 1245 m_texture.allocLevel(tcuFaceMapping[faceNdx], levelNdx); 1246 levels[levelNdx * tcu::CUBEFACE_LAST + faceNdx] = m_texture.getLevelFace(levelNdx, tcuFaceMapping[faceNdx]); 1247 } 1248 } 1249 1250 TestTexture::populateCompressedLevels(format, levels); 1251 } 1252 1253 TestTextureCube::~TestTextureCube (void) 1254 { 1255 } 1256 1257 int TestTextureCube::getNumLevels (void) const 1258 { 1259 return m_texture.getNumLevels(); 1260 } 1261 1262 tcu::PixelBufferAccess TestTextureCube::getLevel (int level, int layer) 1263 { 1264 return m_texture.getLevelFace(level, tcuFaceMapping[layer]); 1265 } 1266 1267 const tcu::ConstPixelBufferAccess TestTextureCube::getLevel (int level, int layer) const 1268 { 1269 return m_texture.getLevelFace(level, tcuFaceMapping[layer]); 1270 } 1271 1272 int TestTextureCube::getArraySize (void) const 1273 { 1274 return (int)tcu::CUBEFACE_LAST; 1275 } 1276 1277 const tcu::TextureCube& TestTextureCube::getTexture (void) const 1278 { 1279 return m_texture; 1280 } 1281 1282 tcu::TextureCube& TestTextureCube::getTexture (void) 1283 { 1284 return m_texture; 1285 } 1286 1287 de::MovePtr<TestTexture> TestTextureCube::copy(const tcu::TextureFormat format) const 1288 { 1289 DE_ASSERT(!isCompressed()); 1290 1291 de::MovePtr<TestTexture> texture (new TestTextureCube(format, m_texture.getSize())); 1292 1293 copyToTexture(*texture); 1294 1295 return texture; 1296 } 1297 1298 // TestTextureCubeArray 1299 1300 TestTextureCubeArray::TestTextureCubeArray (const tcu::TextureFormat& format, int size, int arraySize) 1301 : TestTexture (format, size, size, arraySize) 1302 , m_texture (format, size, arraySize) 1303 { 1304 allocateLevels(m_texture); 1305 TestTexture::populateLevels(getLevelsVector(m_texture)); 1306 } 1307 1308 TestTextureCubeArray::TestTextureCubeArray (const tcu::CompressedTexFormat& format, int size, int arraySize) 1309 : TestTexture (format, size, size, arraySize) 1310 , m_texture (tcu::getUncompressedFormat(format), size, arraySize) 1311 { 1312 DE_ASSERT(arraySize % 6 == 0); 1313 1314 allocateLevels(m_texture); 1315 1316 std::vector<tcu::PixelBufferAccess> layers; 1317 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++) 1318 for (int layerNdx = 0; layerNdx < m_texture.getDepth(); layerNdx++) 1319 layers.push_back(getLevel(levelNdx, layerNdx)); 1320 1321 TestTexture::populateCompressedLevels(format, layers); 1322 } 1323 1324 TestTextureCubeArray::~TestTextureCubeArray (void) 1325 { 1326 } 1327 1328 int TestTextureCubeArray::getNumLevels (void) const 1329 { 1330 return m_texture.getNumLevels(); 1331 } 1332 1333 tcu::PixelBufferAccess TestTextureCubeArray::getLevel (int level, int layer) 1334 { 1335 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level); 1336 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize(); 1337 const deUint32 layerOffset = layerSize * layer; 1338 1339 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset); 1340 } 1341 1342 const tcu::ConstPixelBufferAccess TestTextureCubeArray::getLevel (int level, int layer) const 1343 { 1344 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level); 1345 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize(); 1346 const deUint32 layerOffset = layerSize * layer; 1347 1348 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset); 1349 } 1350 1351 int TestTextureCubeArray::getArraySize (void) const 1352 { 1353 return m_texture.getDepth(); 1354 } 1355 1356 const tcu::TextureCubeArray& TestTextureCubeArray::getTexture (void) const 1357 { 1358 return m_texture; 1359 } 1360 1361 tcu::TextureCubeArray& TestTextureCubeArray::getTexture (void) 1362 { 1363 return m_texture; 1364 } 1365 1366 de::MovePtr<TestTexture> TestTextureCubeArray::copy(const tcu::TextureFormat format) const 1367 { 1368 DE_ASSERT(!isCompressed()); 1369 1370 de::MovePtr<TestTexture> texture (new TestTextureCubeArray(format, m_texture.getSize(), getArraySize())); 1371 1372 copyToTexture(*texture); 1373 1374 return texture; 1375 } 1376 1377 } // pipeline 1378 } // vkt 1379