diff --git a/src/dxvk/dxvk_allocator.cpp b/src/dxvk/dxvk_allocator.cpp index 97a0fdebd..8ddab3be3 100644 --- a/src/dxvk/dxvk_allocator.cpp +++ b/src/dxvk/dxvk_allocator.cpp @@ -32,6 +32,13 @@ namespace dxvk { while (index--) { PageRange entry = m_freeList[index]; + // The chunk index is the same regardless of alignment. + // Skip chunk if it does not accept new allocations. + uint32_t chunkIndex = entry.index >> ChunkPageBits; + + if (unlikely(m_chunks[chunkIndex].disabled)) + continue; + if (likely(!(entry.index & (alignment - 1u)))) { // If the current free range is sufficiently aligned, we can use // it as-is and simply modify the remaining free list entry. @@ -42,7 +49,6 @@ namespace dxvk { insertFreeRange(entry, index); - uint32_t chunkIndex = pageIndex >> ChunkPageBits; m_chunks[chunkIndex].pagesUsed += count; return pageIndex; } else { @@ -68,7 +74,6 @@ namespace dxvk { if (nextRange.count) insertFreeRange(nextRange, -1); - uint32_t chunkIndex = pageIndex >> ChunkPageBits; m_chunks[chunkIndex].pagesUsed += count; return pageIndex; @@ -163,6 +168,7 @@ namespace dxvk { chunk.pageCount = size / PageSize; chunk.pagesUsed = 0u; chunk.nextChunk = -1; + chunk.disabled = false; PageRange pageRange = { }; pageRange.index = uint32_t(chunkIndex) << ChunkPageBits; @@ -179,6 +185,7 @@ namespace dxvk { chunk.pageCount = 0u; chunk.pagesUsed = 0u; chunk.nextChunk = std::exchange(m_freeChunk, int32_t(chunkIndex)); + chunk.disabled = true; uint32_t pageIndex = chunkIndex << ChunkPageBits; @@ -190,6 +197,30 @@ namespace dxvk { } + void DxvkPageAllocator::killChunk(uint32_t chunkIndex) { + m_chunks[chunkIndex].disabled = true; + } + + + void DxvkPageAllocator::reviveChunk(uint32_t chunkIndex) { + m_chunks[chunkIndex].disabled = false; + } + + + uint32_t DxvkPageAllocator::reviveChunks() { + uint32_t count = 0u; + + for (uint32_t i = 0; i < m_chunks.size(); i++) { + if (m_chunks[i].pageCount && m_chunks[i].disabled) { + m_chunks[i].disabled = false; + count += 1u; + } + } + + return count; + } + + void DxvkPageAllocator::getPageAllocationMask(uint32_t chunkIndex, uint32_t* pageMask) const { // Initialize bit mask with all ones const auto& chunk = m_chunks[chunkIndex]; diff --git a/src/dxvk/dxvk_allocator.h b/src/dxvk/dxvk_allocator.h index ea1357f83..6a402903b 100644 --- a/src/dxvk/dxvk_allocator.h +++ b/src/dxvk/dxvk_allocator.h @@ -75,6 +75,16 @@ namespace dxvk { return m_chunks.at(chunkIndex).pagesUsed; } + /** + * \brief Checks whether a chunk is alive + * + * \param [in] chunkIndex Chunk index + * \returns \c true if chunk can be used + */ + bool chunkIsAvailable(uint32_t chunkIndex) const { + return !m_chunks.at(chunkIndex).disabled; + } + /** * \brief Allocates given number of bytes from the pool * @@ -131,6 +141,31 @@ namespace dxvk { */ void removeChunk(uint32_t chunkIndex); + /** + * \brief Disables a chunk + * + * Makes an entire chunk unavailable for subsequent allocations. + * This can be useful when moving allocations out of that chunk + * in an attempt to free some memory. + * \param [in] chunkIndex Chunk index + */ + void killChunk(uint32_t chunkIndex); + + /** + * \brief Re-enables a chunk + * + * Makes all disabled chunks available for allocations again. + * Should be used before allocating new chunk memory. + * \param [in] chunkIndex Chunk index + */ + void reviveChunk(uint32_t chunkIndex); + + /** + * \brief Re-enables all disabled chunks + * \returns Number of chunks re-enabled + */ + uint32_t reviveChunks(); + /** * \brief Queries page allocation mask * @@ -149,6 +184,7 @@ namespace dxvk { uint32_t pageCount = 0u; uint32_t pagesUsed = 0u; int32_t nextChunk = -1; + bool disabled = false; }; struct PageRange {