mirror of
https://github.com/dolphin-emu/dolphin.git
synced 2025-06-13 10:47:48 +00:00
VideoBackends:Vulkan: Clean up barriers
This commit is contained in:
parent
5f7e9d3bf1
commit
a3f113dd7a
@ -68,24 +68,14 @@ void StagingBuffer::FlushCPUCache(VkDeviceSize offset, VkDeviceSize size)
|
||||
vmaFlushAllocation(g_vulkan_context->GetMemoryAllocator(), m_alloc, offset, size);
|
||||
}
|
||||
|
||||
void StagingBuffer::InvalidateGPUCache(VkCommandBuffer command_buffer,
|
||||
VkAccessFlagBits dest_access_flags,
|
||||
VkPipelineStageFlagBits dest_pipeline_stage,
|
||||
VkDeviceSize offset, VkDeviceSize size)
|
||||
{
|
||||
ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
|
||||
BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_HOST_WRITE_BIT, dest_access_flags, offset,
|
||||
size, VK_PIPELINE_STAGE_HOST_BIT, dest_pipeline_stage);
|
||||
}
|
||||
|
||||
void StagingBuffer::PrepareForGPUWrite(VkCommandBuffer command_buffer,
|
||||
VkAccessFlagBits dst_access_flags,
|
||||
VkPipelineStageFlagBits dst_pipeline_stage,
|
||||
VkDeviceSize offset, VkDeviceSize size)
|
||||
{
|
||||
ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
|
||||
BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_MEMORY_WRITE_BIT, dst_access_flags,
|
||||
offset, size, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, dst_pipeline_stage);
|
||||
BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access_flags,
|
||||
offset, size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_pipeline_stage);
|
||||
}
|
||||
|
||||
void StagingBuffer::FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits src_access_flags,
|
||||
|
@ -29,12 +29,6 @@ public:
|
||||
// Upload part 1: Prepare from device read from the CPU side
|
||||
void FlushCPUCache(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE);
|
||||
|
||||
// Upload part 2: Prepare for device read from the GPU side
|
||||
// Implicit when submitting the command buffer, so rarely needed.
|
||||
void InvalidateGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags,
|
||||
VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset = 0,
|
||||
VkDeviceSize size = VK_WHOLE_SIZE);
|
||||
|
||||
// Readback part 0: Prepare for GPU usage (if necessary)
|
||||
void PrepareForGPUWrite(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags,
|
||||
VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset = 0,
|
||||
|
@ -43,10 +43,10 @@ std::vector<BBoxType> VKBoundingBox::Read(u32 index, u32 length)
|
||||
StateTracker::GetInstance()->EndRenderPass();
|
||||
|
||||
// Ensure all writes are completed to the GPU buffer prior to the transfer.
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, 0,
|
||||
BUFFER_SIZE, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
StagingBuffer::BufferMemoryBarrier(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, 0,
|
||||
BUFFER_SIZE, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
m_readback_buffer->PrepareForGPUWrite(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
@ -57,10 +57,10 @@ std::vector<BBoxType> VKBoundingBox::Read(u32 index, u32 length)
|
||||
m_readback_buffer->GetBuffer(), 1, ®ion);
|
||||
|
||||
// Restore GPU buffer access.
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer, VK_ACCESS_TRANSFER_READ_BIT,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, 0, BUFFER_SIZE,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
StagingBuffer::BufferMemoryBarrier(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
0, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, 0,
|
||||
BUFFER_SIZE, VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
m_readback_buffer->FlushGPUCache(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
@ -84,10 +84,10 @@ void VKBoundingBox::Write(u32 index, const std::vector<BBoxType>& values)
|
||||
StateTracker::GetInstance()->EndRenderPass();
|
||||
|
||||
// Ensure GPU buffer is in a state where it can be transferred to.
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, 0,
|
||||
BUFFER_SIZE, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
StagingBuffer::BufferMemoryBarrier(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, 0,
|
||||
BUFFER_SIZE, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
// Write the values to the GPU buffer
|
||||
vkCmdUpdateBuffer(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
|
@ -442,34 +442,35 @@ void VKTexture::OverrideImageLayout(VkImageLayout new_layout)
|
||||
m_layout = new_layout;
|
||||
}
|
||||
|
||||
void VKTexture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout) const
|
||||
void VKTexture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout,
|
||||
ComputeImageLayout new_compute_layout) const
|
||||
{
|
||||
if (m_layout == new_layout)
|
||||
return;
|
||||
ASSERT(new_layout == VK_IMAGE_LAYOUT_GENERAL ||
|
||||
new_compute_layout == ComputeImageLayout::Undefined);
|
||||
|
||||
ASSERT(new_layout != VK_IMAGE_LAYOUT_UNDEFINED);
|
||||
|
||||
VkImageMemoryBarrier barrier = {
|
||||
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
|
||||
nullptr, // const void* pNext
|
||||
0, // VkAccessFlags srcAccessMask
|
||||
0, // VkAccessFlags dstAccessMask
|
||||
m_layout, // VkImageLayout oldLayout
|
||||
new_layout, // VkImageLayout newLayout
|
||||
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
|
||||
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
|
||||
m_image, // VkImage image
|
||||
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
|
||||
nullptr, // const void* pNext
|
||||
0, // VkAccessFlags srcAccessMask
|
||||
0, // VkAccessFlags dstAccessMask
|
||||
m_layout, // VkImageLayout oldLayout
|
||||
new_layout, // VkImageLayout newLayout
|
||||
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
|
||||
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
|
||||
m_image, // VkImage image
|
||||
{GetImageAspectForFormat(GetFormat()), 0, GetLevels(), 0,
|
||||
GetLayers()} // VkImageSubresourceRange subresourceRange
|
||||
GetLayers()} // VkImageSubresourceRange subresourceRange
|
||||
};
|
||||
|
||||
// srcStageMask -> Stages that must complete before the barrier
|
||||
// dstStageMask -> Stages that must wait for after the barrier before beginning
|
||||
VkPipelineStageFlags srcStageMask, dstStageMask;
|
||||
switch (m_layout)
|
||||
{
|
||||
case VK_IMAGE_LAYOUT_UNDEFINED:
|
||||
// Layout undefined therefore contents undefined, and we don't care what happens to it.
|
||||
barrier.srcAccessMask = 0;
|
||||
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_PREINITIALIZED:
|
||||
@ -480,28 +481,26 @@ void VKTexture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout
|
||||
|
||||
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
|
||||
// Image was being used as a color attachment, so ensure all writes have completed.
|
||||
barrier.srcAccessMask =
|
||||
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
|
||||
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
|
||||
// Image was being used as a depthstencil attachment, so ensure all writes have completed.
|
||||
barrier.srcAccessMask =
|
||||
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
|
||||
barrier.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
|
||||
srcStageMask =
|
||||
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
|
||||
// Image was being used as a shader resource, make sure all reads have finished.
|
||||
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
barrier.srcAccessMask = 0;
|
||||
srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
|
||||
// Image was being used as a copy source, ensure all reads have finished.
|
||||
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
|
||||
barrier.srcAccessMask = 0;
|
||||
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
|
||||
break;
|
||||
|
||||
@ -512,17 +511,29 @@ void VKTexture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout
|
||||
break;
|
||||
|
||||
default:
|
||||
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
|
||||
barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
|
||||
break;
|
||||
}
|
||||
|
||||
// If we were using a compute layout, the stages need to reflect that
|
||||
switch (m_compute_layout)
|
||||
{
|
||||
case ComputeImageLayout::Undefined:
|
||||
break;
|
||||
case ComputeImageLayout::ReadOnly:
|
||||
barrier.srcAccessMask = 0;
|
||||
srcStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
break;
|
||||
case ComputeImageLayout::ReadWrite:
|
||||
case ComputeImageLayout::WriteOnly:
|
||||
barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (new_layout)
|
||||
{
|
||||
case VK_IMAGE_LAYOUT_UNDEFINED:
|
||||
barrier.dstAccessMask = 0;
|
||||
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
|
||||
barrier.dstAccessMask =
|
||||
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
|
||||
@ -557,34 +568,45 @@ void VKTexture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout
|
||||
break;
|
||||
|
||||
default:
|
||||
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
|
||||
barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT;
|
||||
dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
|
||||
break;
|
||||
}
|
||||
|
||||
// If we were using a compute layout, the stages need to reflect that
|
||||
switch (m_compute_layout)
|
||||
switch (new_compute_layout)
|
||||
{
|
||||
case ComputeImageLayout::Undefined:
|
||||
break;
|
||||
case ComputeImageLayout::ReadOnly:
|
||||
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
dstStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
break;
|
||||
case ComputeImageLayout::WriteOnly:
|
||||
barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
|
||||
dstStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
break;
|
||||
case ComputeImageLayout::ReadWrite:
|
||||
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
|
||||
dstStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
break;
|
||||
default:
|
||||
dstStageMask = 0;
|
||||
break;
|
||||
}
|
||||
m_compute_layout = ComputeImageLayout::Undefined;
|
||||
|
||||
m_layout = new_layout;
|
||||
m_compute_layout = new_compute_layout;
|
||||
|
||||
vkCmdPipelineBarrier(command_buffer, srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1,
|
||||
&barrier);
|
||||
}
|
||||
|
||||
m_layout = new_layout;
|
||||
void VKTexture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout) const
|
||||
{
|
||||
if (m_layout == new_layout)
|
||||
return;
|
||||
|
||||
TransitionToLayout(command_buffer, new_layout, ComputeImageLayout::Undefined);
|
||||
}
|
||||
|
||||
void VKTexture::TransitionToLayout(VkCommandBuffer command_buffer,
|
||||
@ -594,100 +616,7 @@ void VKTexture::TransitionToLayout(VkCommandBuffer command_buffer,
|
||||
if (m_compute_layout == new_layout)
|
||||
return;
|
||||
|
||||
VkImageMemoryBarrier barrier = {
|
||||
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
|
||||
nullptr, // const void* pNext
|
||||
0, // VkAccessFlags srcAccessMask
|
||||
0, // VkAccessFlags dstAccessMask
|
||||
m_layout, // VkImageLayout oldLayout
|
||||
VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout
|
||||
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
|
||||
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
|
||||
m_image, // VkImage image
|
||||
{GetImageAspectForFormat(GetFormat()), 0, GetLevels(), 0,
|
||||
GetLayers()} // VkImageSubresourceRange subresourceRange
|
||||
};
|
||||
|
||||
VkPipelineStageFlags srcStageMask, dstStageMask;
|
||||
switch (m_layout)
|
||||
{
|
||||
case VK_IMAGE_LAYOUT_UNDEFINED:
|
||||
// Layout undefined therefore contents undefined, and we don't care what happens to it.
|
||||
barrier.srcAccessMask = 0;
|
||||
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_PREINITIALIZED:
|
||||
// Image has been pre-initialized by the host, so ensure all writes have completed.
|
||||
barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_HOST_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
|
||||
// Image was being used as a color attachment, so ensure all writes have completed.
|
||||
barrier.srcAccessMask =
|
||||
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
|
||||
// Image was being used as a depthstencil attachment, so ensure all writes have completed.
|
||||
barrier.srcAccessMask =
|
||||
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
|
||||
srcStageMask =
|
||||
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
|
||||
// Image was being used as a shader resource, make sure all reads have finished.
|
||||
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
|
||||
// Image was being used as a copy source, ensure all reads have finished.
|
||||
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
|
||||
break;
|
||||
|
||||
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
|
||||
// Image was being used as a copy destination, ensure all writes have finished.
|
||||
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
|
||||
break;
|
||||
|
||||
default:
|
||||
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (new_layout)
|
||||
{
|
||||
case ComputeImageLayout::ReadOnly:
|
||||
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||
dstStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
break;
|
||||
case ComputeImageLayout::WriteOnly:
|
||||
barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
|
||||
barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
|
||||
dstStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
break;
|
||||
case ComputeImageLayout::ReadWrite:
|
||||
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
|
||||
barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
|
||||
dstStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
|
||||
break;
|
||||
default:
|
||||
dstStageMask = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
m_layout = barrier.newLayout;
|
||||
m_compute_layout = new_layout;
|
||||
|
||||
vkCmdPipelineBarrier(command_buffer, srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1,
|
||||
&barrier);
|
||||
TransitionToLayout(command_buffer, VK_IMAGE_LAYOUT_GENERAL, new_layout);
|
||||
}
|
||||
|
||||
VKStagingTexture::VKStagingTexture(PrivateTag, StagingTextureType type, const TextureConfig& config,
|
||||
@ -877,7 +806,7 @@ void VKStagingTexture::CopyFromTextureToLinearImage(const VKTexture* src_tex,
|
||||
VkImageMemoryBarrier linear_image_barrier = {};
|
||||
linear_image_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
|
||||
linear_image_barrier.pNext = nullptr;
|
||||
linear_image_barrier.srcAccessMask = 0;
|
||||
linear_image_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||
linear_image_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
|
||||
linear_image_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
linear_image_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
|
||||
|
@ -73,6 +73,11 @@ public:
|
||||
private:
|
||||
bool CreateView(VkImageViewType type);
|
||||
|
||||
// If new_compute_layout is not ComputeImageLayout::Undefined, it takes precendence over
|
||||
// new_layout.
|
||||
void TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout,
|
||||
ComputeImageLayout new_compute_layout) const;
|
||||
|
||||
VmaAllocation m_alloc;
|
||||
VkImage m_image;
|
||||
VkImageView m_view = VK_NULL_HANDLE;
|
||||
|
Loading…
Reference in New Issue
Block a user