1
0
mirror of https://github.com/doitsujin/dxvk.git synced 2025-03-28 02:19:26 +01:00

[dxvk] Don't use sparse zero buffer

Too broken on too many drivers.
This commit is contained in:
Philip Rebohle 2025-03-23 15:58:49 +01:00
parent 9edb81dec9
commit 369651f08b
2 changed files with 10 additions and 47 deletions

View File

@ -326,12 +326,6 @@ namespace dxvk {
// Always enable robust buffer access
enabledFeatures.core.features.robustBufferAccess = VK_TRUE;
// Always enable sparse residency if we can use it for efficient zero-initialization
if (m_deviceInfo.core.properties.sparseProperties.residencyNonResidentStrict) {
enabledFeatures.core.features.sparseBinding = m_deviceFeatures.core.features.sparseBinding;
enabledFeatures.core.features.sparseResidencyBuffer = m_deviceFeatures.core.features.sparseResidencyBuffer;
}
// Always enable features used by the HUD
enabledFeatures.core.features.multiDrawIndirect = VK_TRUE;
enabledFeatures.vk11.shaderDrawParameters = VK_TRUE;

View File

@ -7723,55 +7723,24 @@ namespace dxvk {
| VK_ACCESS_TRANSFER_READ_BIT;
bufInfo.debugName = "Zero buffer";
// If supported by the device, create a large sparse buffer and keep it unmapped
// in order to avoid having to allocate and clear any actual memory. Some specific
// older AMD hardware seems to have some trouble with this, use the minimum subgroup
// size to disable the sparse path on anything that predates RDNA1.
bool noSparseWorkaroundAmd = m_device->properties().core.properties.vendorID == uint32_t(DxvkGpuVendor::Amd)
&& m_device->properties().vk13.minSubgroupSize == 64u;
if (m_device->features().core.features.sparseBinding
&& m_device->features().core.features.sparseResidencyBuffer
&& m_device->properties().core.properties.sparseProperties.residencyNonResidentStrict
&& !noSparseWorkaroundAmd) {
bufInfo.size = align<VkDeviceSize>(size, DxvkMemoryPool::MaxChunkSize);
bufInfo.flags |= VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
}
m_zeroBuffer = m_device->createBuffer(bufInfo,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
DxvkBufferSliceHandle slice = m_zeroBuffer->getSliceHandle();
if (bufInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
DxvkSparseBufferBindKey key = { };
key.buffer = slice.handle;
key.offset = slice.offset;
key.size = slice.length;
// FillBuffer is allowed even on transfer queues. Execute it on the barrier
// command buffer to ensure that subsequent transfer commands can see it.
m_cmd->cmdFillBuffer(DxvkCmdBuffer::SdmaBarriers,
slice.handle, slice.offset, slice.length, 0);
DxvkResourceMemoryInfo memory = { };
memory.size = slice.length;
accessMemory(DxvkCmdBuffer::SdmaBarriers,
VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_TRANSFER_READ_BIT);
// We really should have a sparse queue, but if for whatever reason we don't,
// just assume that the buffer is mapped to null by default, which seems to
// be intended by the spec anyway
if (m_device->queues().sparse.queueHandle)
m_cmd->bindBufferMemory(key, memory);
} else {
// FillBuffer is allowed even on transfer queues. Execute it on the barrier
// command buffer to ensure that subsequent transfer commands can see it.
m_cmd->cmdFillBuffer(DxvkCmdBuffer::SdmaBarriers,
slice.handle, slice.offset, slice.length, 0);
accessMemory(DxvkCmdBuffer::SdmaBarriers,
VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_TRANSFER_WRITE_BIT,
if (m_device->hasDedicatedTransferQueue()) {
accessMemory(DxvkCmdBuffer::InitBarriers,
VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_NONE,
VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_TRANSFER_READ_BIT);
if (m_device->hasDedicatedTransferQueue()) {
accessMemory(DxvkCmdBuffer::InitBarriers,
VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_NONE,
VK_PIPELINE_STAGE_2_TRANSFER_BIT, VK_ACCESS_2_TRANSFER_READ_BIT);
}
}
m_cmd->track(m_zeroBuffer, DxvkAccess::Write);