1
0
mirror of https://github.com/doitsujin/dxvk.git synced 2025-02-23 01:54:22 +01:00

[dxvk] Periodically defragment VRAM chunks

This commit is contained in:
Philip Rebohle 2024-10-18 10:46:04 +02:00
parent 18a06fd15b
commit eea9355654
2 changed files with 143 additions and 4 deletions

View File

@ -596,6 +596,21 @@ namespace dxvk {
if (likely(address >= 0))
return createAllocation(type, selectedPool, address, size, allocationInfo);
// If we're not allowed to allocate additional device memory, move on.
// Also do not try to revive any chunks marked for defragmentation since
// that would defeat the purpose.
if (allocationInfo.mode.test(DxvkAllocationMode::NoAllocation))
continue;
// Otherwise, if there are any chunks marked for defragmentation, stop
// that process and use any available memory for new allocations.
if (selectedPool.pageAllocator.reviveChunks()) {
address = selectedPool.alloc(size, requirements.alignment);
if (address >= 0)
return createAllocation(type, selectedPool, address, size, allocationInfo);
}
// If the memory type is host-visible, try to find an existing chunk
// in the other memory pool of the memory type and move over.
if (type.properties.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
@ -623,10 +638,6 @@ namespace dxvk {
}
}
// If we're not allowed to allocate device memory, move on.
if (allocationInfo.mode.test(DxvkAllocationMode::NoAllocation))
continue;
// If the allocation is very large, use a dedicated allocation instead
// of creating a new chunk. This way we avoid excessive fragmentation,
// especially when a multiple such resources are created at once.
@ -2026,6 +2037,118 @@ namespace dxvk {
}
void DxvkMemoryAllocator::moveDefragChunks(
DxvkMemoryType& type) {
auto& pool = type.devicePool;
DxvkAllocationModes mode(
DxvkAllocationMode::NoAllocation,
DxvkAllocationMode::NoFallback);
std::unique_lock lock(m_resourceMutex, std::defer_lock);
for (uint32_t i = 0; i < pool.chunks.size(); i++) {
if (!pool.chunks[i].memory.memory
|| !pool.pageAllocator.pagesUsed(i)
|| pool.pageAllocator.chunkIsAvailable(i))
continue;
// Iterate over the chunk's allocation list and look up resources
for (auto a = pool.chunks[i].allocationList; a; a = a->m_nextInChunk) {
if (!a->m_flags.test(DxvkAllocationFlag::CanMove))
continue;
if (!lock)
lock.lock();
// If we can't find the resource by its cookie, it has probably
// already been destroyed. This is fine since the allocation will
// likely get freed soon anyway.
auto entry = m_resourceMap.find(a->m_resourceCookie);
if (entry == m_resourceMap.end())
continue;
// Same if there are no external references. There is a small chance
// that we pick up a newly created resource here that has no public
// references yet, but skipping that will not affect correctness.
auto resource = entry->second->tryAcquire();
if (!resource)
continue;
// Acquired the resource, add it to the relocation list.
m_relocations.addResource(std::move(resource), mode);
}
}
}
void DxvkMemoryAllocator::pickDefragChunk(
DxvkMemoryType& type) {
auto& pool = type.devicePool;
// Only engage defragmentation at all if we have a significant
// amount of memory wasted, or if we're under memory pressure.
auto heapStats = getMemoryStats(type.heap->index);
bool engageDefrag = heapStats.memoryAllocated + pool.nextChunkSize > heapStats.memoryBudget
|| heapStats.memoryAllocated > (pool.nextChunkSize + (heapStats.memoryUsed * 7u) / 6u);
if (!engageDefrag)
return;
// Find live chunk with the lowest number of pages used. Skip
// empty chunks since the goal here is to turn a used chunk
// into an empty one.
uint32_t chunkIndex = 0u;
uint32_t chunkPages = 0u;
for (uint32_t i = 0; i < pool.chunks.size(); i++) {
if (!pool.chunks[i].memory.memory)
continue;
// Mark any empty chunk as dead for now as well so that we don't
// keep moving resources between multiple otherwise unused chunks
uint32_t pagesUsed = pool.pageAllocator.pagesUsed(i);
if (!pagesUsed || !pool.pageAllocator.chunkIsAvailable(i)) {
pool.pageAllocator.killChunk(i);
continue;
}
if (!chunkPages || pagesUsed < chunkPages) {
chunkIndex = i;
chunkPages = pagesUsed;
}
}
if (!chunkPages)
return;
// Check if the remaining chunks in the pool have sufficient free space.
// This is not a strong guarantee that relocation will succeed, but the
// chance is reasonably high.
uint32_t freePages = 0u;
for (uint32_t i = 0; i < pool.chunks.size(); i++) {
uint32_t pagesUsed = pool.pageAllocator.pagesUsed(i);
uint32_t pageCount = pool.pageAllocator.pageCount(i);
if (pagesUsed && pool.pageAllocator.chunkIsAvailable(i) && i != chunkIndex)
freePages += pageCount - pagesUsed;
}
if (freePages < 2u * chunkPages)
return;
// Mark the chunk as dead. If it does not subsequently get reactivated
// because the game is loading more resources, the next worker iteration
// will queue all live resources for relocation.
pool.pageAllocator.killChunk(chunkIndex);
}
void DxvkMemoryAllocator::runWorker() {
env::setThreadName("dxvk-memory");
@ -2065,6 +2188,16 @@ namespace dxvk {
if (m_memTypes[i].sharedCache)
m_memTypes[i].sharedCache->cleanupUnusedFromLockedAllocator(currentTime);
}
// Periodically defragment device-local memory types. We cannot
// do anything about mapped allocations since we rely on pointer
// stability there.
for (uint32_t i = 0; i < m_memTypeCount; i++) {
if (m_memTypes[i].properties.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
moveDefragChunks(m_memTypes[i]);
pickDefragChunk(m_memTypes[i]);
}
}
}
// Ensure adapter allocation statistics are consistent

View File

@ -1365,6 +1365,12 @@ namespace dxvk {
void updateMemoryHeapBudgets();
void moveDefragChunks(
DxvkMemoryType& type);
void pickDefragChunk(
DxvkMemoryType& type);
void runWorker();
};