mirror of
https://github.com/doitsujin/dxvk.git
synced 2025-02-21 13:54:18 +01:00
[dxvk] Implement basic pool balancing for shared allocation cache
This makes the entire cache available to all allocation sizes rather than having fixed-size pools for every allocation size. Improves hit rate in games that primarily use one constant buffer size.
This commit is contained in:
parent
428b1087a0
commit
9cf72b5b19
@ -266,6 +266,12 @@ namespace dxvk {
|
|||||||
VkDeviceSize size = DxvkLocalAllocationCache::computeAllocationSize(i);
|
VkDeviceSize size = DxvkLocalAllocationCache::computeAllocationSize(i);
|
||||||
m_freeLists[i].capacity = DxvkLocalAllocationCache::computePreferredAllocationCount(size);
|
m_freeLists[i].capacity = DxvkLocalAllocationCache::computePreferredAllocationCount(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize unallocated list of lists
|
||||||
|
for (uint32_t i = 0u; i < m_lists.size() - 1u; i++)
|
||||||
|
m_lists[i].next = i + 1;
|
||||||
|
|
||||||
|
m_nextList = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -273,10 +279,8 @@ namespace dxvk {
|
|||||||
for (const auto& freeList : m_freeLists)
|
for (const auto& freeList : m_freeLists)
|
||||||
m_allocator->freeCachedAllocations(freeList.head);
|
m_allocator->freeCachedAllocations(freeList.head);
|
||||||
|
|
||||||
for (const auto& pool : m_pools) {
|
for (const auto& list : m_lists)
|
||||||
for (auto list : pool.lists)
|
m_allocator->freeCachedAllocations(list.head);
|
||||||
m_allocator->freeCachedAllocations(list);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -289,8 +293,9 @@ namespace dxvk {
|
|||||||
m_numRequests += 1u;
|
m_numRequests += 1u;
|
||||||
|
|
||||||
auto& pool = m_pools[poolIndex];
|
auto& pool = m_pools[poolIndex];
|
||||||
|
int32_t listIndex = pool.listIndex;
|
||||||
|
|
||||||
if (!pool.listCount) {
|
if (listIndex < 0) {
|
||||||
m_numMisses += 1u;
|
m_numMisses += 1u;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
@ -298,8 +303,17 @@ namespace dxvk {
|
|||||||
if (!(--pool.listCount))
|
if (!(--pool.listCount))
|
||||||
pool.drainTime = high_resolution_clock::now();
|
pool.drainTime = high_resolution_clock::now();
|
||||||
|
|
||||||
|
// Extract allocations and mark list as free
|
||||||
|
DxvkResourceAllocation* allocation = m_lists[listIndex].head;
|
||||||
|
pool.listIndex = m_lists[listIndex].next;
|
||||||
|
|
||||||
|
m_lists[listIndex].head = nullptr;
|
||||||
|
m_lists[listIndex].next = m_nextList;
|
||||||
|
|
||||||
|
m_nextList = listIndex;
|
||||||
|
|
||||||
m_cacheSize -= PoolCapacityInBytes;
|
m_cacheSize -= PoolCapacityInBytes;
|
||||||
return std::exchange(pool.lists[pool.listCount], nullptr);
|
return allocation;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -326,17 +340,53 @@ namespace dxvk {
|
|||||||
{ std::unique_lock poolLock(m_poolMutex);
|
{ std::unique_lock poolLock(m_poolMutex);
|
||||||
auto& pool = m_pools[poolIndex];
|
auto& pool = m_pools[poolIndex];
|
||||||
|
|
||||||
if (likely(pool.listCount < PoolSize)) {
|
if (unlikely(m_nextList < 0)) {
|
||||||
pool.lists[pool.listCount++] = allocation;
|
// Cache is currently full, see if we can steal a list from
|
||||||
|
// the largest pool. This automatically balances pool sizes
|
||||||
|
// under cache pressure.
|
||||||
|
uint32_t largestPoolIndex = 0;
|
||||||
|
|
||||||
|
for (uint32_t i = 1; i < PoolCount; i++) {
|
||||||
|
if (m_pools[i].listCount > m_pools[largestPoolIndex].listCount)
|
||||||
|
largestPoolIndex = i;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the current pool is already (one of) the largest, give up
|
||||||
|
// and free the entire list to avoid pools playing ping-pong.
|
||||||
|
if (m_pools[largestPoolIndex].listCount == pool.listCount)
|
||||||
|
return allocation;
|
||||||
|
|
||||||
|
// Move first list of largest pool to current pool and free any
|
||||||
|
// allocations associated with it.
|
||||||
|
auto& largestPool = m_pools[largestPoolIndex];
|
||||||
|
int32_t listIndex = largestPool.listIndex;
|
||||||
|
|
||||||
|
DxvkResourceAllocation* result = m_lists[listIndex].head;
|
||||||
|
largestPool.listIndex = m_lists[listIndex].next;
|
||||||
|
largestPool.listCount -= 1u;
|
||||||
|
|
||||||
|
m_lists[listIndex].head = allocation;
|
||||||
|
m_lists[listIndex].next = pool.listIndex;
|
||||||
|
|
||||||
|
pool.listIndex = listIndex;
|
||||||
|
pool.listCount += 1u;
|
||||||
|
return result;
|
||||||
|
} else {
|
||||||
|
// Otherwise, allocate a fresh list and assign it to the pool
|
||||||
|
int32_t listIndex = m_nextList;
|
||||||
|
m_nextList = m_lists[listIndex].next;
|
||||||
|
|
||||||
|
m_lists[listIndex].head = allocation;
|
||||||
|
m_lists[listIndex].next = pool.listIndex;
|
||||||
|
|
||||||
|
pool.listIndex = listIndex;
|
||||||
|
pool.listCount += 1u;
|
||||||
|
|
||||||
if ((m_cacheSize += PoolCapacityInBytes) > m_maxCacheSize)
|
if ((m_cacheSize += PoolCapacityInBytes) > m_maxCacheSize)
|
||||||
m_maxCacheSize = m_cacheSize;
|
m_maxCacheSize = m_cacheSize;
|
||||||
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the pool is full, destroy the entire free list
|
|
||||||
return allocation;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -361,10 +411,24 @@ namespace dxvk {
|
|||||||
std::unique_lock poolLock(m_poolMutex);
|
std::unique_lock poolLock(m_poolMutex);
|
||||||
|
|
||||||
for (auto& pool : m_pools) {
|
for (auto& pool : m_pools) {
|
||||||
if (pool.listCount && time - pool.drainTime >= std::chrono::seconds(1u)) {
|
int32_t listIndex = pool.listIndex;
|
||||||
m_allocator->freeCachedAllocationsLocked(std::exchange(
|
|
||||||
pool.lists[--pool.listCount], nullptr));
|
if (listIndex < 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (time - pool.drainTime >= std::chrono::seconds(1u)) {
|
||||||
|
m_allocator->freeCachedAllocationsLocked(m_lists[listIndex].head);
|
||||||
|
|
||||||
|
pool.listIndex = m_lists[listIndex].next;
|
||||||
|
pool.listCount -= 1u;
|
||||||
pool.drainTime = time;
|
pool.drainTime = time;
|
||||||
|
|
||||||
|
m_lists[listIndex].head = nullptr;
|
||||||
|
m_lists[listIndex].next = m_nextList;
|
||||||
|
|
||||||
|
m_nextList = listIndex;
|
||||||
|
|
||||||
|
m_cacheSize -= PoolCapacityInBytes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -884,7 +884,7 @@ namespace dxvk {
|
|||||||
*/
|
*/
|
||||||
class DxvkSharedAllocationCache {
|
class DxvkSharedAllocationCache {
|
||||||
constexpr static uint32_t PoolCount = DxvkLocalAllocationCache::PoolCount;
|
constexpr static uint32_t PoolCount = DxvkLocalAllocationCache::PoolCount;
|
||||||
constexpr static uint32_t PoolSize = env::is32BitHostPlatform() ? 6u : 12u;
|
constexpr static uint32_t PoolSize = PoolCount * (env::is32BitHostPlatform() ? 6u : 12u);
|
||||||
|
|
||||||
constexpr static VkDeviceSize PoolCapacityInBytes = DxvkLocalAllocationCache::PoolCapacityInBytes;
|
constexpr static VkDeviceSize PoolCapacityInBytes = DxvkLocalAllocationCache::PoolCapacityInBytes;
|
||||||
|
|
||||||
@ -941,9 +941,14 @@ namespace dxvk {
|
|||||||
DxvkResourceAllocation* head = nullptr;
|
DxvkResourceAllocation* head = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct List {
|
||||||
|
DxvkResourceAllocation* head = nullptr;
|
||||||
|
int32_t next = -1;
|
||||||
|
};
|
||||||
|
|
||||||
struct Pool {
|
struct Pool {
|
||||||
uint32_t listCount = 0u;
|
int32_t listIndex = -1;
|
||||||
std::array<DxvkResourceAllocation*, PoolSize> lists = { };
|
uint32_t listCount = 0u;
|
||||||
high_resolution_clock::time_point drainTime = { };
|
high_resolution_clock::time_point drainTime = { };
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -956,6 +961,8 @@ namespace dxvk {
|
|||||||
alignas(CACHE_LINE_SIZE)
|
alignas(CACHE_LINE_SIZE)
|
||||||
dxvk::mutex m_poolMutex;
|
dxvk::mutex m_poolMutex;
|
||||||
std::array<Pool, PoolCount> m_pools = { };
|
std::array<Pool, PoolCount> m_pools = { };
|
||||||
|
std::array<List, PoolSize> m_lists = { };
|
||||||
|
int32_t m_nextList = -1;
|
||||||
|
|
||||||
uint32_t m_numRequests = 0u;
|
uint32_t m_numRequests = 0u;
|
||||||
uint32_t m_numMisses = 0u;
|
uint32_t m_numMisses = 0u;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user