1
0
mirror of https://github.com/doitsujin/dxvk.git synced 2025-03-15 07:29:17 +01:00

[util] Improve Spinlock implementation

Addresses two potential issues:
- Our spinlocks are almost never contested, however the code generated
  is not ideal without the likely/unlikely hints.
- In the unlike event that a spinlock is in fact contested, we'd yield
  immediately, even though most of the time we'd only have to wait for
  a few hundred cycles at most.

Replacing our spinlocks with std::mutex is not an option due to much
higher locking overhead in the uncontested case; doing so reduces
performance significantly for the buffer slice and pipeline locks.
This commit is contained in:
Philip Rebohle 2020-01-07 17:26:11 +01:00
parent 9541aef0b8
commit 8b9c03ce76
No known key found for this signature in database
GPG Key ID: C8CC613427A31C99

View File

@ -13,7 +13,7 @@ namespace dxvk::sync {
* in case the structure is not likely contested. * in case the structure is not likely contested.
*/ */
class Spinlock { class Spinlock {
constexpr static uint32_t SpinCount = 200;
public: public:
Spinlock() { } Spinlock() { }
@ -23,17 +23,23 @@ namespace dxvk::sync {
Spinlock& operator = (const Spinlock&) = delete; Spinlock& operator = (const Spinlock&) = delete;
void lock() { void lock() {
while (!this->try_lock()) while (unlikely(!try_lock())) {
for (uint32_t i = 1; i < SpinCount; i++) {
if (try_lock())
return;
}
dxvk::this_thread::yield(); dxvk::this_thread::yield();
} }
}
void unlock() { void unlock() {
m_lock.store(0, std::memory_order_release); m_lock.store(0, std::memory_order_release);
} }
bool try_lock() { bool try_lock() {
return !m_lock.load() return likely(!m_lock.load())
&& !m_lock.exchange(1, std::memory_order_acquire); && likely(!m_lock.exchange(1, std::memory_order_acquire));
} }
private: private: