2018-03-03 20:59:17 +01:00
|
|
|
#include "d3d11_cmdlist.h"
|
2018-01-20 13:22:44 +01:00
|
|
|
#include "d3d11_context_imm.h"
|
|
|
|
#include "d3d11_device.h"
|
2021-10-22 17:22:41 +02:00
|
|
|
#include "d3d11_fence.h"
|
2018-01-20 13:22:44 +01:00
|
|
|
#include "d3d11_texture.h"
|
|
|
|
|
2022-08-21 19:05:27 +00:00
|
|
|
#include "../util/util_win32_compat.h"
|
|
|
|
|
2019-05-09 16:56:35 +02:00
|
|
|
constexpr static uint32_t MinFlushIntervalUs = 750;
|
|
|
|
constexpr static uint32_t IncFlushIntervalUs = 250;
|
|
|
|
constexpr static uint32_t MaxPendingSubmits = 6;
|
2018-07-15 19:45:40 +02:00
|
|
|
|
2018-01-20 13:22:44 +01:00
|
|
|
namespace dxvk {
|
|
|
|
|
|
|
|
D3D11ImmediateContext::D3D11ImmediateContext(
|
2018-04-28 14:17:52 +02:00
|
|
|
D3D11Device* pParent,
|
|
|
|
const Rc<DxvkDevice>& Device)
|
2022-08-03 22:19:16 +02:00
|
|
|
: D3D11CommonContext<D3D11ImmediateContext>(pParent, Device, 0, DxvkCsChunkFlag::SingleUse),
|
2022-06-21 23:17:26 +02:00
|
|
|
m_csThread(Device, Device->createContext(DxvkContextType::Primary)),
|
2022-02-14 04:11:36 +01:00
|
|
|
m_maxImplicitDiscardSize(pParent->GetOptions()->maxImplicitDiscardSize),
|
2023-01-14 14:31:26 +01:00
|
|
|
m_submissionFence(new sync::CallbackFence()),
|
2022-08-24 12:27:02 +02:00
|
|
|
m_multithread(this, false, pParent->GetOptions()->enableContextLock),
|
2021-05-07 15:02:52 +02:00
|
|
|
m_videoContext(this, Device) {
|
2019-02-07 00:57:21 +01:00
|
|
|
EmitCs([
|
2021-09-08 15:20:01 +02:00
|
|
|
cDevice = m_device,
|
|
|
|
cRelaxedBarriers = pParent->GetOptions()->relaxedBarriers,
|
|
|
|
cIgnoreGraphicsBarriers = pParent->GetOptions()->ignoreGraphicsBarriers
|
2019-02-07 00:57:21 +01:00
|
|
|
] (DxvkContext* ctx) {
|
2018-03-22 13:40:45 +01:00
|
|
|
ctx->beginRecording(cDevice->createCommandList());
|
2019-02-07 00:57:21 +01:00
|
|
|
|
2021-09-08 15:20:01 +02:00
|
|
|
DxvkBarrierControlFlags barrierControl;
|
|
|
|
|
2019-02-07 00:57:21 +01:00
|
|
|
if (cRelaxedBarriers)
|
2021-09-08 15:20:01 +02:00
|
|
|
barrierControl.set(DxvkBarrierControl::IgnoreWriteAfterWrite);
|
|
|
|
|
|
|
|
if (cIgnoreGraphicsBarriers)
|
|
|
|
barrierControl.set(DxvkBarrierControl::IgnoreGraphicsBarriers);
|
|
|
|
|
|
|
|
ctx->setBarrierControl(barrierControl);
|
2018-03-22 13:40:45 +01:00
|
|
|
});
|
2018-01-20 13:22:44 +01:00
|
|
|
|
2018-03-22 13:40:45 +01:00
|
|
|
ClearState();
|
2018-01-20 13:22:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
D3D11ImmediateContext::~D3D11ImmediateContext() {
|
2022-07-06 17:11:58 +01:00
|
|
|
// Avoids hanging when in this state, see comment
|
|
|
|
// in DxvkDevice::~DxvkDevice.
|
|
|
|
if (this_thread::isInModuleDetachment())
|
|
|
|
return;
|
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
ExecuteFlush(GpuFlushType::ExplicitFlush, nullptr);
|
2022-02-09 02:58:21 +01:00
|
|
|
SynchronizeCsThread(DxvkCsThread::SynchronizeAll);
|
2018-01-21 18:04:22 +01:00
|
|
|
SynchronizeDevice();
|
2018-01-20 13:22:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-05-06 17:44:30 +02:00
|
|
|
HRESULT STDMETHODCALLTYPE D3D11ImmediateContext::QueryInterface(REFIID riid, void** ppvObject) {
|
2022-08-24 12:14:43 +02:00
|
|
|
if (riid == __uuidof(ID3D10Multithread)) {
|
|
|
|
*ppvObject = ref(&m_multithread);
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
2021-05-06 17:44:30 +02:00
|
|
|
if (riid == __uuidof(ID3D11VideoContext)) {
|
|
|
|
*ppvObject = ref(&m_videoContext);
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
2022-08-03 17:09:30 +02:00
|
|
|
return D3D11CommonContext<D3D11ImmediateContext>::QueryInterface(riid, ppvObject);
|
2021-05-06 17:44:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-08 12:29:24 +02:00
|
|
|
HRESULT STDMETHODCALLTYPE D3D11ImmediateContext::GetData(
|
|
|
|
ID3D11Asynchronous* pAsync,
|
|
|
|
void* pData,
|
|
|
|
UINT DataSize,
|
|
|
|
UINT GetDataFlags) {
|
2019-06-16 19:24:44 +02:00
|
|
|
if (!pAsync || (DataSize && !pData))
|
2018-10-17 17:19:07 +02:00
|
|
|
return E_INVALIDARG;
|
|
|
|
|
2019-06-16 19:24:44 +02:00
|
|
|
// Check whether the data size is actually correct
|
|
|
|
if (DataSize && DataSize != pAsync->GetDataSize())
|
2018-06-08 12:29:24 +02:00
|
|
|
return E_INVALIDARG;
|
|
|
|
|
2019-06-16 19:24:44 +02:00
|
|
|
// Passing a non-null pData is actually allowed if
|
|
|
|
// DataSize is 0, but we should ignore that pointer
|
|
|
|
pData = DataSize ? pData : nullptr;
|
|
|
|
|
2018-10-17 17:19:07 +02:00
|
|
|
// Get query status directly from the query object
|
2019-05-09 16:38:07 +02:00
|
|
|
auto query = static_cast<D3D11Query*>(pAsync);
|
|
|
|
HRESULT hr = query->GetData(pData, GetDataFlags);
|
2018-06-08 13:11:24 +02:00
|
|
|
|
|
|
|
// If we're likely going to spin on the asynchronous object,
|
2019-07-15 16:24:09 +02:00
|
|
|
// flush the context so that we're keeping the GPU busy.
|
2019-05-09 16:38:07 +02:00
|
|
|
if (hr == S_FALSE) {
|
2019-07-15 16:24:09 +02:00
|
|
|
// Don't mark the event query as stalling if the app does
|
|
|
|
// not intend to spin on it. This reduces flushes on End.
|
|
|
|
if (!(GetDataFlags & D3D11_ASYNC_GETDATA_DONOTFLUSH))
|
|
|
|
query->NotifyStall();
|
|
|
|
|
|
|
|
// Ignore the DONOTFLUSH flag here as some games will spin
|
|
|
|
// on queries without ever flushing the context otherwise.
|
2023-01-14 14:31:26 +01:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
|
|
|
ConsiderFlush(GpuFlushType::ImplicitSynchronization);
|
2019-05-09 16:38:07 +02:00
|
|
|
}
|
2018-06-08 12:29:24 +02:00
|
|
|
|
2018-06-08 13:11:24 +02:00
|
|
|
return hr;
|
2018-06-08 12:29:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-11-02 13:10:59 +01:00
|
|
|
void STDMETHODCALLTYPE D3D11ImmediateContext::Begin(ID3D11Asynchronous* pAsync) {
|
|
|
|
D3D10DeviceLock lock = LockContext();
|
|
|
|
|
|
|
|
if (unlikely(!pAsync))
|
|
|
|
return;
|
|
|
|
|
2019-11-14 22:57:30 +01:00
|
|
|
auto query = static_cast<D3D11Query*>(pAsync);
|
2019-11-02 13:10:59 +01:00
|
|
|
|
2019-11-02 13:24:36 +01:00
|
|
|
if (unlikely(!query->DoBegin()))
|
2019-11-02 13:10:59 +01:00
|
|
|
return;
|
|
|
|
|
2019-11-14 22:57:30 +01:00
|
|
|
EmitCs([cQuery = Com<D3D11Query, false>(query)]
|
2019-11-02 13:10:59 +01:00
|
|
|
(DxvkContext* ctx) {
|
|
|
|
cQuery->Begin(ctx);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-05-09 16:38:07 +02:00
|
|
|
void STDMETHODCALLTYPE D3D11ImmediateContext::End(ID3D11Asynchronous* pAsync) {
|
2019-11-02 13:10:59 +01:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
2019-05-09 16:38:07 +02:00
|
|
|
|
2019-11-02 13:10:59 +01:00
|
|
|
if (unlikely(!pAsync))
|
|
|
|
return;
|
|
|
|
|
2019-11-14 22:57:30 +01:00
|
|
|
auto query = static_cast<D3D11Query*>(pAsync);
|
2019-11-02 13:10:59 +01:00
|
|
|
|
2020-01-22 04:17:13 +01:00
|
|
|
if (unlikely(!query->DoEnd())) {
|
|
|
|
EmitCs([cQuery = Com<D3D11Query, false>(query)]
|
|
|
|
(DxvkContext* ctx) {
|
|
|
|
cQuery->Begin(ctx);
|
|
|
|
});
|
|
|
|
}
|
2019-11-02 13:24:36 +01:00
|
|
|
|
2019-11-14 22:57:30 +01:00
|
|
|
EmitCs([cQuery = Com<D3D11Query, false>(query)]
|
|
|
|
(DxvkContext* ctx) {
|
|
|
|
cQuery->End(ctx);
|
|
|
|
});
|
|
|
|
|
2022-02-16 19:44:40 +01:00
|
|
|
if (unlikely(query->TrackStalls())) {
|
2019-05-09 16:38:07 +02:00
|
|
|
query->NotifyEnd();
|
2022-02-16 19:44:40 +01:00
|
|
|
|
|
|
|
if (query->IsStalling())
|
2023-01-14 14:31:26 +01:00
|
|
|
ExecuteFlush(GpuFlushType::ImplicitSynchronization, nullptr);
|
2022-02-16 19:44:40 +01:00
|
|
|
else if (query->IsEvent())
|
2023-01-14 14:31:26 +01:00
|
|
|
ConsiderFlush(GpuFlushType::ImplicitStrongHint);
|
2019-05-09 16:38:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-01-20 13:22:44 +01:00
|
|
|
void STDMETHODCALLTYPE D3D11ImmediateContext::Flush() {
|
2023-01-14 14:31:26 +01:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
|
|
|
|
|
|
|
ExecuteFlush(GpuFlushType::ExplicitFlush, nullptr);
|
2019-09-16 13:12:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void STDMETHODCALLTYPE D3D11ImmediateContext::Flush1(
|
|
|
|
D3D11_CONTEXT_TYPE ContextType,
|
|
|
|
HANDLE hEvent) {
|
2018-11-29 20:59:40 +01:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
2023-01-14 14:31:26 +01:00
|
|
|
|
|
|
|
ExecuteFlush(GpuFlushType::ExplicitFlush, hEvent);
|
2018-01-20 13:22:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-16 13:17:00 +02:00
|
|
|
HRESULT STDMETHODCALLTYPE D3D11ImmediateContext::Signal(
|
|
|
|
ID3D11Fence* pFence,
|
|
|
|
UINT64 Value) {
|
2023-01-14 14:31:26 +01:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
2021-10-22 17:22:41 +02:00
|
|
|
auto fence = static_cast<D3D11Fence*>(pFence);
|
|
|
|
|
|
|
|
if (!fence)
|
|
|
|
return E_INVALIDARG;
|
|
|
|
|
|
|
|
EmitCs([
|
|
|
|
cFence = fence->GetFence(),
|
|
|
|
cValue = Value
|
|
|
|
] (DxvkContext* ctx) {
|
|
|
|
ctx->signalFence(cFence, cValue);
|
|
|
|
});
|
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
ExecuteFlush(GpuFlushType::ExplicitFlush, nullptr);
|
2021-10-22 17:22:41 +02:00
|
|
|
return S_OK;
|
2019-09-16 13:17:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
HRESULT STDMETHODCALLTYPE D3D11ImmediateContext::Wait(
|
|
|
|
ID3D11Fence* pFence,
|
|
|
|
UINT64 Value) {
|
2023-01-14 14:31:26 +01:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
2021-10-22 17:22:41 +02:00
|
|
|
auto fence = static_cast<D3D11Fence*>(pFence);
|
|
|
|
|
|
|
|
if (!fence)
|
|
|
|
return E_INVALIDARG;
|
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
ExecuteFlush(GpuFlushType::ExplicitFlush, nullptr);
|
2021-10-22 17:22:41 +02:00
|
|
|
|
|
|
|
EmitCs([
|
|
|
|
cFence = fence->GetFence(),
|
|
|
|
cValue = Value
|
|
|
|
] (DxvkContext* ctx) {
|
|
|
|
ctx->waitFence(cFence, cValue);
|
|
|
|
});
|
|
|
|
|
|
|
|
return S_OK;
|
2019-09-16 13:17:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-01-20 13:22:44 +01:00
|
|
|
void STDMETHODCALLTYPE D3D11ImmediateContext::ExecuteCommandList(
|
|
|
|
ID3D11CommandList* pCommandList,
|
2018-03-06 20:34:34 +03:00
|
|
|
BOOL RestoreContextState) {
|
2018-11-29 20:59:40 +01:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
|
|
|
|
2018-04-03 19:52:14 +02:00
|
|
|
auto commandList = static_cast<D3D11CommandList*>(pCommandList);
|
|
|
|
|
2022-08-04 14:21:27 +02:00
|
|
|
// Clear state so that the command list can't observe any
|
|
|
|
// current context state. The command list itself will clean
|
|
|
|
// up after execution to ensure that no state changes done
|
|
|
|
// by the command list are visible to the immediate context.
|
|
|
|
ResetCommandListState();
|
|
|
|
|
2018-04-03 19:52:14 +02:00
|
|
|
// Flush any outstanding commands so that
|
|
|
|
// we don't mess up the execution order
|
2018-03-12 23:36:55 +01:00
|
|
|
FlushCsChunk();
|
|
|
|
|
2018-04-03 19:52:14 +02:00
|
|
|
// As an optimization, flush everything if the
|
|
|
|
// number of pending draw calls is high enough.
|
2023-01-14 14:31:26 +01:00
|
|
|
ConsiderFlush(GpuFlushType::ImplicitWeakHint);
|
2023-01-14 19:28:51 +01:00
|
|
|
|
2023-01-15 01:02:03 +01:00
|
|
|
// Dispatch command list to the CS thread
|
2023-01-17 12:33:19 +01:00
|
|
|
commandList->EmitToCsThread([this] (DxvkCsChunkRef&& chunk, GpuFlushType flushType) {
|
2023-01-15 01:02:03 +01:00
|
|
|
EmitCsChunk(std::move(chunk));
|
|
|
|
|
|
|
|
// Return the sequence number from before the flush since
|
|
|
|
// that is actually going to be needed for resource tracking
|
|
|
|
uint64_t csSeqNum = m_csSeqNum;
|
|
|
|
|
|
|
|
// Consider a flush after every chunk in case the app
|
|
|
|
// submits a very large command list or the GPU is idle
|
2023-01-17 12:33:19 +01:00
|
|
|
ConsiderFlush(flushType);
|
2023-01-15 01:02:03 +01:00
|
|
|
return csSeqNum;
|
|
|
|
});
|
|
|
|
|
|
|
|
// Restore the immediate context's state
|
2018-03-03 20:59:17 +01:00
|
|
|
if (RestoreContextState)
|
2022-08-04 14:07:48 +02:00
|
|
|
RestoreCommandListState();
|
2018-03-03 20:59:17 +01:00
|
|
|
else
|
2022-08-04 14:21:27 +02:00
|
|
|
ResetContextState();
|
2018-01-20 13:22:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
HRESULT STDMETHODCALLTYPE D3D11ImmediateContext::FinishCommandList(
|
2018-03-06 20:34:34 +03:00
|
|
|
BOOL RestoreDeferredContextState,
|
2018-01-20 13:22:44 +01:00
|
|
|
ID3D11CommandList **ppCommandList) {
|
2018-04-02 12:04:20 +02:00
|
|
|
InitReturnPtr(ppCommandList);
|
|
|
|
|
2018-01-20 13:22:44 +01:00
|
|
|
Logger::err("D3D11: FinishCommandList called on immediate context");
|
|
|
|
return DXGI_ERROR_INVALID_CALL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
HRESULT STDMETHODCALLTYPE D3D11ImmediateContext::Map(
|
|
|
|
ID3D11Resource* pResource,
|
|
|
|
UINT Subresource,
|
|
|
|
D3D11_MAP MapType,
|
|
|
|
UINT MapFlags,
|
|
|
|
D3D11_MAPPED_SUBRESOURCE* pMappedResource) {
|
2018-11-29 20:59:40 +01:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
|
|
|
|
2019-09-17 18:24:31 +02:00
|
|
|
if (unlikely(!pResource))
|
2018-10-16 12:29:04 +02:00
|
|
|
return E_INVALIDARG;
|
2018-03-22 12:58:26 +01:00
|
|
|
|
2018-01-20 13:22:44 +01:00
|
|
|
D3D11_RESOURCE_DIMENSION resourceDim = D3D11_RESOURCE_DIMENSION_UNKNOWN;
|
|
|
|
pResource->GetType(&resourceDim);
|
2018-10-16 12:29:04 +02:00
|
|
|
|
|
|
|
HRESULT hr;
|
2018-01-20 13:22:44 +01:00
|
|
|
|
2019-05-01 03:01:36 +02:00
|
|
|
if (likely(resourceDim == D3D11_RESOURCE_DIMENSION_BUFFER)) {
|
2018-10-16 12:29:04 +02:00
|
|
|
hr = MapBuffer(
|
2018-03-14 14:40:09 +01:00
|
|
|
static_cast<D3D11Buffer*>(pResource),
|
|
|
|
MapType, MapFlags, pMappedResource);
|
2018-01-20 13:22:44 +01:00
|
|
|
} else {
|
2018-10-16 12:29:04 +02:00
|
|
|
hr = MapImage(
|
2018-03-14 14:40:09 +01:00
|
|
|
GetCommonTexture(pResource),
|
|
|
|
Subresource, MapType, MapFlags,
|
|
|
|
pMappedResource);
|
|
|
|
}
|
2018-10-16 12:29:04 +02:00
|
|
|
|
2019-06-03 15:30:04 +02:00
|
|
|
if (unlikely(FAILED(hr)))
|
|
|
|
*pMappedResource = D3D11_MAPPED_SUBRESOURCE();
|
2018-10-16 12:29:04 +02:00
|
|
|
|
|
|
|
return hr;
|
2018-03-14 14:40:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void STDMETHODCALLTYPE D3D11ImmediateContext::Unmap(
|
|
|
|
ID3D11Resource* pResource,
|
|
|
|
UINT Subresource) {
|
2022-02-10 04:34:50 +01:00
|
|
|
// Since it is very uncommon for images to be mapped compared
|
|
|
|
// to buffers, we count the currently mapped images in order
|
|
|
|
// to avoid a virtual method call in the common case.
|
|
|
|
if (unlikely(m_mappedImageCount > 0)) {
|
|
|
|
D3D11_RESOURCE_DIMENSION resourceDim = D3D11_RESOURCE_DIMENSION_UNKNOWN;
|
|
|
|
pResource->GetType(&resourceDim);
|
|
|
|
|
|
|
|
if (resourceDim != D3D11_RESOURCE_DIMENSION_BUFFER) {
|
|
|
|
D3D10DeviceLock lock = LockContext();
|
|
|
|
UnmapImage(GetCommonTexture(pResource), Subresource);
|
|
|
|
}
|
2019-05-01 03:01:36 +02:00
|
|
|
}
|
2018-03-14 14:40:09 +01:00
|
|
|
}
|
2018-10-25 20:54:10 +02:00
|
|
|
|
2022-08-03 18:07:03 +02:00
|
|
|
|
2018-03-14 14:40:09 +01:00
|
|
|
HRESULT D3D11ImmediateContext::MapBuffer(
|
|
|
|
D3D11Buffer* pResource,
|
|
|
|
D3D11_MAP MapType,
|
|
|
|
UINT MapFlags,
|
|
|
|
D3D11_MAPPED_SUBRESOURCE* pMappedResource) {
|
2019-09-17 18:24:31 +02:00
|
|
|
if (unlikely(!pMappedResource))
|
|
|
|
return E_INVALIDARG;
|
|
|
|
|
2019-02-04 10:24:04 +01:00
|
|
|
if (unlikely(pResource->GetMapMode() == D3D11_COMMON_BUFFER_MAP_MODE_NONE)) {
|
2018-03-14 14:40:09 +01:00
|
|
|
Logger::err("D3D11: Cannot map a device-local buffer");
|
|
|
|
return E_INVALIDARG;
|
|
|
|
}
|
2022-02-10 19:12:30 +01:00
|
|
|
|
|
|
|
VkDeviceSize bufferSize = pResource->Desc()->ByteWidth;
|
|
|
|
|
|
|
|
if (likely(MapType == D3D11_MAP_WRITE_DISCARD)) {
|
2018-03-14 14:40:09 +01:00
|
|
|
// Allocate a new backing slice for the buffer and set
|
|
|
|
// it as the 'new' mapped slice. This assumes that the
|
|
|
|
// only way to invalidate a buffer is by mapping it.
|
2019-01-09 17:56:53 +01:00
|
|
|
auto physSlice = pResource->DiscardSlice();
|
|
|
|
pMappedResource->pData = physSlice.mapPtr;
|
2022-02-10 19:12:30 +01:00
|
|
|
pMappedResource->RowPitch = bufferSize;
|
|
|
|
pMappedResource->DepthPitch = bufferSize;
|
2018-03-14 00:45:07 +01:00
|
|
|
|
2018-03-14 14:40:09 +01:00
|
|
|
EmitCs([
|
2019-02-04 10:24:04 +01:00
|
|
|
cBuffer = pResource->GetBuffer(),
|
|
|
|
cBufferSlice = physSlice
|
2018-03-14 14:40:09 +01:00
|
|
|
] (DxvkContext* ctx) {
|
2019-01-09 17:56:53 +01:00
|
|
|
ctx->invalidateBuffer(cBuffer, cBufferSlice);
|
2018-03-14 14:40:09 +01:00
|
|
|
});
|
2018-10-16 12:29:04 +02:00
|
|
|
|
|
|
|
return S_OK;
|
2022-02-10 19:12:30 +01:00
|
|
|
} else if (likely(MapType == D3D11_MAP_WRITE_NO_OVERWRITE)) {
|
|
|
|
// Put this on a fast path without any extra checks since it's
|
|
|
|
// a somewhat desired method to partially update large buffers
|
2019-01-09 17:56:53 +01:00
|
|
|
DxvkBufferSliceHandle physSlice = pResource->GetMappedSlice();
|
|
|
|
pMappedResource->pData = physSlice.mapPtr;
|
2022-02-10 19:12:30 +01:00
|
|
|
pMappedResource->RowPitch = bufferSize;
|
|
|
|
pMappedResource->DepthPitch = bufferSize;
|
2018-10-16 12:29:04 +02:00
|
|
|
return S_OK;
|
2022-02-10 19:12:30 +01:00
|
|
|
} else {
|
|
|
|
// Quantum Break likes using MAP_WRITE on resources which would force
|
|
|
|
// us to synchronize with the GPU multiple times per frame. In those
|
|
|
|
// situations, if there are no pending GPU writes to the resource, we
|
|
|
|
// can promote it to MAP_WRITE_DISCARD, but preserve the data by doing
|
|
|
|
// a CPU copy from the previous buffer slice, to avoid the sync point.
|
|
|
|
bool doInvalidatePreserve = false;
|
|
|
|
|
|
|
|
auto buffer = pResource->GetBuffer();
|
|
|
|
auto sequenceNumber = pResource->GetSequenceNumber();
|
|
|
|
|
2022-02-14 04:11:36 +01:00
|
|
|
if (MapType != D3D11_MAP_READ && !MapFlags && bufferSize <= m_maxImplicitDiscardSize) {
|
2022-02-10 19:12:30 +01:00
|
|
|
SynchronizeCsThread(sequenceNumber);
|
|
|
|
|
|
|
|
bool hasWoAccess = buffer->isInUse(DxvkAccess::Write);
|
|
|
|
bool hasRwAccess = buffer->isInUse(DxvkAccess::Read);
|
|
|
|
|
|
|
|
if (hasRwAccess && !hasWoAccess) {
|
|
|
|
// Uncached reads can be so slow that a GPU sync may actually be faster
|
|
|
|
doInvalidatePreserve = buffer->memFlags() & VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (doInvalidatePreserve) {
|
|
|
|
auto prevSlice = pResource->GetMappedSlice();
|
|
|
|
auto physSlice = pResource->DiscardSlice();
|
|
|
|
|
|
|
|
EmitCs([
|
|
|
|
cBuffer = std::move(buffer),
|
|
|
|
cBufferSlice = physSlice
|
|
|
|
] (DxvkContext* ctx) {
|
|
|
|
ctx->invalidateBuffer(cBuffer, cBufferSlice);
|
|
|
|
});
|
|
|
|
|
|
|
|
std::memcpy(physSlice.mapPtr, prevSlice.mapPtr, physSlice.length);
|
|
|
|
pMappedResource->pData = physSlice.mapPtr;
|
|
|
|
pMappedResource->RowPitch = bufferSize;
|
|
|
|
pMappedResource->DepthPitch = bufferSize;
|
|
|
|
return S_OK;
|
|
|
|
} else {
|
|
|
|
if (!WaitForResource(buffer, sequenceNumber, MapType, MapFlags))
|
|
|
|
return DXGI_ERROR_WAS_STILL_DRAWING;
|
|
|
|
|
|
|
|
DxvkBufferSliceHandle physSlice = pResource->GetMappedSlice();
|
|
|
|
pMappedResource->pData = physSlice.mapPtr;
|
|
|
|
pMappedResource->RowPitch = bufferSize;
|
|
|
|
pMappedResource->DepthPitch = bufferSize;
|
|
|
|
return S_OK;
|
|
|
|
}
|
2018-03-14 14:40:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
HRESULT D3D11ImmediateContext::MapImage(
|
|
|
|
D3D11CommonTexture* pResource,
|
|
|
|
UINT Subresource,
|
|
|
|
D3D11_MAP MapType,
|
|
|
|
UINT MapFlags,
|
|
|
|
D3D11_MAPPED_SUBRESOURCE* pMappedResource) {
|
|
|
|
const Rc<DxvkImage> mappedImage = pResource->GetImage();
|
2019-05-20 15:56:31 +02:00
|
|
|
const Rc<DxvkBuffer> mappedBuffer = pResource->GetMappedBuffer(Subresource);
|
2021-06-22 00:17:02 +02:00
|
|
|
|
|
|
|
auto mapMode = pResource->GetMapMode();
|
2018-03-14 14:40:09 +01:00
|
|
|
|
2021-06-22 00:17:02 +02:00
|
|
|
if (unlikely(mapMode == D3D11_COMMON_TEXTURE_MAP_MODE_NONE)) {
|
2018-03-14 14:40:09 +01:00
|
|
|
Logger::err("D3D11: Cannot map a device-local image");
|
|
|
|
return E_INVALIDARG;
|
|
|
|
}
|
2019-03-26 15:14:56 +01:00
|
|
|
|
2019-05-20 19:26:15 +02:00
|
|
|
if (unlikely(Subresource >= pResource->CountSubresources()))
|
|
|
|
return E_INVALIDARG;
|
2019-09-17 18:24:31 +02:00
|
|
|
|
|
|
|
if (likely(pMappedResource != nullptr)) {
|
|
|
|
// Resources with an unknown memory layout cannot return a pointer
|
|
|
|
if (pResource->Desc()->Usage == D3D11_USAGE_DEFAULT
|
|
|
|
&& pResource->Desc()->TextureLayout == D3D11_TEXTURE_LAYOUT_UNDEFINED)
|
|
|
|
return E_INVALIDARG;
|
|
|
|
} else {
|
|
|
|
if (pResource->Desc()->Usage != D3D11_USAGE_DEFAULT)
|
|
|
|
return E_INVALIDARG;
|
|
|
|
}
|
2019-05-20 19:26:15 +02:00
|
|
|
|
2019-03-26 15:14:56 +01:00
|
|
|
VkFormat packedFormat = m_parent->LookupPackedFormat(
|
|
|
|
pResource->Desc()->Format, pResource->GetFormatMode()).Format;
|
2018-03-14 14:40:09 +01:00
|
|
|
|
2022-02-10 19:14:59 +01:00
|
|
|
uint64_t sequenceNumber = pResource->GetSequenceNumber(Subresource);
|
|
|
|
|
2022-07-15 17:23:54 +02:00
|
|
|
auto formatInfo = lookupFormatInfo(packedFormat);
|
2021-06-22 00:17:02 +02:00
|
|
|
void* mapPtr;
|
|
|
|
|
|
|
|
if (mapMode == D3D11_COMMON_TEXTURE_MAP_MODE_DIRECT) {
|
2022-02-10 19:14:59 +01:00
|
|
|
// Wait for the resource to become available. We do not
|
|
|
|
// support image renaming, so stall on DISCARD instead.
|
|
|
|
if (MapType == D3D11_MAP_WRITE_DISCARD)
|
|
|
|
MapFlags &= ~D3D11_MAP_FLAG_DO_NOT_WAIT;
|
|
|
|
|
|
|
|
if (MapType != D3D11_MAP_WRITE_NO_OVERWRITE) {
|
|
|
|
if (!WaitForResource(mappedImage, sequenceNumber, MapType, MapFlags))
|
|
|
|
return DXGI_ERROR_WAS_STILL_DRAWING;
|
|
|
|
}
|
2018-03-14 16:40:28 +01:00
|
|
|
|
|
|
|
// Query the subresource's memory layout and hope that
|
|
|
|
// the application respects the returned pitch values.
|
2021-06-22 00:17:02 +02:00
|
|
|
mapPtr = mappedImage->mapPtr(0);
|
2018-03-14 14:40:09 +01:00
|
|
|
} else {
|
2022-02-10 19:14:59 +01:00
|
|
|
constexpr uint32_t DoInvalidate = (1u << 0);
|
|
|
|
constexpr uint32_t DoPreserve = (1u << 1);
|
|
|
|
constexpr uint32_t DoWait = (1u << 2);
|
|
|
|
uint32_t doFlags;
|
|
|
|
|
2022-09-05 01:56:54 +02:00
|
|
|
if (mapMode == D3D11_COMMON_TEXTURE_MAP_MODE_BUFFER) {
|
|
|
|
// If the image can be written by the GPU, we need to update the
|
|
|
|
// mapped staging buffer to reflect the current image contents.
|
2022-09-05 05:35:16 +02:00
|
|
|
if (pResource->Desc()->Usage == D3D11_USAGE_DEFAULT) {
|
|
|
|
bool needsReadback = !pResource->NeedsDirtyRegionTracking();
|
|
|
|
|
|
|
|
needsReadback |= MapType == D3D11_MAP_READ
|
|
|
|
|| MapType == D3D11_MAP_READ_WRITE;
|
|
|
|
|
|
|
|
if (needsReadback)
|
|
|
|
ReadbackImageBuffer(pResource, Subresource);
|
|
|
|
}
|
2022-09-05 01:56:54 +02:00
|
|
|
}
|
|
|
|
|
2022-02-10 19:14:59 +01:00
|
|
|
if (MapType == D3D11_MAP_READ) {
|
|
|
|
// Reads will not change the image content, so we only need
|
|
|
|
// to wait for the GPU to finish writing to the mapped buffer.
|
|
|
|
doFlags = DoWait;
|
|
|
|
} else if (MapType == D3D11_MAP_WRITE_DISCARD) {
|
|
|
|
doFlags = DoInvalidate;
|
|
|
|
|
|
|
|
// If we know for sure that the mapped buffer is currently not
|
|
|
|
// in use by the GPU, we don't have to allocate a new slice.
|
|
|
|
if (m_csThread.lastSequenceNumber() >= sequenceNumber && !mappedBuffer->isInUse(DxvkAccess::Read))
|
|
|
|
doFlags = 0;
|
|
|
|
} else if (mapMode == D3D11_COMMON_TEXTURE_MAP_MODE_STAGING && (MapFlags & D3D11_MAP_FLAG_DO_NOT_WAIT)) {
|
|
|
|
// Always respect DO_NOT_WAIT for mapped staging images
|
|
|
|
doFlags = DoWait;
|
|
|
|
} else if (MapType != D3D11_MAP_WRITE_NO_OVERWRITE || mapMode == D3D11_COMMON_TEXTURE_MAP_MODE_BUFFER) {
|
|
|
|
// Need to synchronize thread to determine pending GPU accesses
|
|
|
|
SynchronizeCsThread(sequenceNumber);
|
|
|
|
|
|
|
|
// Don't implicitly discard large buffers or buffers of images with
|
|
|
|
// multiple subresources, as that is likely to cause memory issues.
|
2022-02-14 04:11:36 +01:00
|
|
|
VkDeviceSize bufferSize = pResource->GetMappedSlice(Subresource).length;
|
2022-02-10 19:14:59 +01:00
|
|
|
|
2022-02-14 04:11:36 +01:00
|
|
|
if (bufferSize >= m_maxImplicitDiscardSize || pResource->CountSubresources() > 1) {
|
2022-02-10 19:14:59 +01:00
|
|
|
// Don't check access flags, WaitForResource will return
|
|
|
|
// early anyway if the resource is currently in use
|
|
|
|
doFlags = DoWait;
|
|
|
|
} else if (mappedBuffer->isInUse(DxvkAccess::Write)) {
|
|
|
|
// There are pending GPU writes, need to wait for those
|
|
|
|
doFlags = DoWait;
|
|
|
|
} else if (mappedBuffer->isInUse(DxvkAccess::Read)) {
|
|
|
|
// All pending GPU accesses are reads, so the buffer data
|
|
|
|
// is still current, and we can prevent GPU synchronization
|
|
|
|
// by creating a new slice with an exact copy of the data.
|
|
|
|
doFlags = DoInvalidate | DoPreserve;
|
|
|
|
} else {
|
|
|
|
// There are no pending accesses, so we don't need to wait
|
|
|
|
doFlags = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// No need to synchronize staging resources with NO_OVERWRITE
|
|
|
|
// since the buffer will be used directly.
|
|
|
|
doFlags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (doFlags & DoInvalidate) {
|
|
|
|
DxvkBufferSliceHandle prevSlice = pResource->GetMappedSlice(Subresource);
|
2021-06-22 00:17:02 +02:00
|
|
|
DxvkBufferSliceHandle physSlice = pResource->DiscardSlice(Subresource);
|
2022-02-10 19:14:59 +01:00
|
|
|
|
2018-03-14 00:45:07 +01:00
|
|
|
EmitCs([
|
2019-01-09 17:56:53 +01:00
|
|
|
cImageBuffer = mappedBuffer,
|
|
|
|
cBufferSlice = physSlice
|
2018-03-14 00:45:07 +01:00
|
|
|
] (DxvkContext* ctx) {
|
2019-01-09 17:56:53 +01:00
|
|
|
ctx->invalidateBuffer(cImageBuffer, cBufferSlice);
|
2018-03-14 00:45:07 +01:00
|
|
|
});
|
2021-06-22 00:17:02 +02:00
|
|
|
|
2022-02-10 19:14:59 +01:00
|
|
|
if (doFlags & DoPreserve)
|
|
|
|
std::memcpy(physSlice.mapPtr, prevSlice.mapPtr, physSlice.length);
|
|
|
|
|
2021-06-22 00:17:02 +02:00
|
|
|
mapPtr = physSlice.mapPtr;
|
2018-03-14 16:40:28 +01:00
|
|
|
} else {
|
2022-02-10 19:14:59 +01:00
|
|
|
if (doFlags & DoWait) {
|
|
|
|
// We cannot respect DO_NOT_WAIT for buffer-mapped resources since
|
|
|
|
// our internal copies need to be transparent to the application.
|
|
|
|
if (mapMode == D3D11_COMMON_TEXTURE_MAP_MODE_BUFFER)
|
|
|
|
MapFlags &= ~D3D11_MAP_FLAG_DO_NOT_WAIT;
|
|
|
|
|
|
|
|
// Wait for mapped buffer to become available
|
|
|
|
if (!WaitForResource(mappedBuffer, sequenceNumber, MapType, MapFlags))
|
|
|
|
return DXGI_ERROR_WAS_STILL_DRAWING;
|
|
|
|
}
|
|
|
|
|
2021-06-22 00:17:02 +02:00
|
|
|
mapPtr = pResource->GetMappedSlice(Subresource).mapPtr;
|
2019-09-17 18:24:31 +02:00
|
|
|
}
|
2021-06-22 00:17:02 +02:00
|
|
|
}
|
2019-09-17 18:24:31 +02:00
|
|
|
|
2021-06-22 00:17:02 +02:00
|
|
|
// Mark the given subresource as mapped
|
|
|
|
pResource->SetMapType(Subresource, MapType);
|
|
|
|
|
|
|
|
if (pMappedResource) {
|
|
|
|
auto layout = pResource->GetSubresourceLayout(formatInfo->aspectMask, Subresource);
|
|
|
|
pMappedResource->pData = reinterpret_cast<char*>(mapPtr) + layout.Offset;
|
|
|
|
pMappedResource->RowPitch = layout.RowPitch;
|
|
|
|
pMappedResource->DepthPitch = layout.DepthPitch;
|
2018-01-20 13:22:44 +01:00
|
|
|
}
|
2021-06-22 00:17:02 +02:00
|
|
|
|
2022-02-10 04:34:50 +01:00
|
|
|
m_mappedImageCount += 1;
|
2021-06-22 00:17:02 +02:00
|
|
|
return S_OK;
|
2018-01-20 13:22:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-14 14:40:09 +01:00
|
|
|
void D3D11ImmediateContext::UnmapImage(
|
|
|
|
D3D11CommonTexture* pResource,
|
2018-01-20 13:22:44 +01:00
|
|
|
UINT Subresource) {
|
2019-05-20 15:56:31 +02:00
|
|
|
D3D11_MAP mapType = pResource->GetMapType(Subresource);
|
|
|
|
pResource->SetMapType(Subresource, D3D11_MAP(~0u));
|
|
|
|
|
2022-02-10 04:34:50 +01:00
|
|
|
if (mapType == D3D11_MAP(~0u))
|
2018-11-08 18:15:24 +01:00
|
|
|
return;
|
2022-02-10 04:34:50 +01:00
|
|
|
|
|
|
|
// Decrement mapped image counter only after making sure
|
|
|
|
// the given subresource is actually mapped right now
|
|
|
|
m_mappedImageCount -= 1;
|
|
|
|
|
2022-09-05 01:56:54 +02:00
|
|
|
if ((mapType != D3D11_MAP_READ) && (pResource->GetMapMode() == D3D11_COMMON_TEXTURE_MAP_MODE_BUFFER)) {
|
2022-09-05 05:35:16 +02:00
|
|
|
if (pResource->NeedsDirtyRegionTracking()) {
|
|
|
|
for (uint32_t i = 0; i < pResource->GetDirtyRegionCount(Subresource); i++) {
|
|
|
|
D3D11_COMMON_TEXTURE_REGION region = pResource->GetDirtyRegion(Subresource, i);
|
|
|
|
UpdateDirtyImageRegion(pResource, Subresource, ®ion);
|
|
|
|
}
|
|
|
|
|
|
|
|
pResource->ClearDirtyRegions(Subresource);
|
|
|
|
} else {
|
|
|
|
UpdateDirtyImageRegion(pResource, Subresource, nullptr);
|
|
|
|
}
|
2018-01-20 13:22:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-09-05 01:56:54 +02:00
|
|
|
void D3D11ImmediateContext::ReadbackImageBuffer(
|
|
|
|
D3D11CommonTexture* pResource,
|
|
|
|
UINT Subresource) {
|
|
|
|
VkImageAspectFlags aspectMask = lookupFormatInfo(pResource->GetPackedFormat())->aspectMask;
|
|
|
|
VkImageSubresource subresource = pResource->GetSubresourceFromIndex(aspectMask, Subresource);
|
|
|
|
|
|
|
|
EmitCs([
|
|
|
|
cSrcImage = pResource->GetImage(),
|
|
|
|
cSrcSubresource = vk::makeSubresourceLayers(subresource),
|
|
|
|
cDstBuffer = pResource->GetMappedBuffer(Subresource),
|
|
|
|
cPackedFormat = pResource->GetPackedFormat()
|
|
|
|
] (DxvkContext* ctx) {
|
|
|
|
VkOffset3D offset = { 0, 0, 0 };
|
|
|
|
VkExtent3D extent = cSrcImage->mipLevelExtent(cSrcSubresource.mipLevel);
|
|
|
|
|
|
|
|
if (cSrcSubresource.aspectMask != (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
|
|
|
|
ctx->copyImageToBuffer(cDstBuffer, 0, 0, 0,
|
|
|
|
cSrcImage, cSrcSubresource, offset, extent);
|
|
|
|
} else {
|
|
|
|
ctx->copyDepthStencilImageToPackedBuffer(cDstBuffer, 0,
|
|
|
|
VkOffset2D { 0, 0 },
|
|
|
|
VkExtent2D { extent.width, extent.height },
|
|
|
|
cSrcImage, cSrcSubresource,
|
|
|
|
VkOffset2D { 0, 0 },
|
|
|
|
VkExtent2D { extent.width, extent.height },
|
|
|
|
cPackedFormat);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
if (pResource->HasSequenceNumber())
|
|
|
|
TrackTextureSequenceNumber(pResource, Subresource);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-09-05 05:35:16 +02:00
|
|
|
void D3D11ImmediateContext::UpdateDirtyImageRegion(
|
|
|
|
D3D11CommonTexture* pResource,
|
|
|
|
UINT Subresource,
|
|
|
|
const D3D11_COMMON_TEXTURE_REGION* pRegion) {
|
|
|
|
auto formatInfo = lookupFormatInfo(pResource->GetPackedFormat());
|
|
|
|
auto subresource = vk::makeSubresourceLayers(
|
|
|
|
pResource->GetSubresourceFromIndex(formatInfo->aspectMask, Subresource));
|
|
|
|
|
|
|
|
// Update the entire image if no dirty region was specified
|
|
|
|
D3D11_COMMON_TEXTURE_REGION region;
|
|
|
|
|
|
|
|
if (pRegion) {
|
|
|
|
region = *pRegion;
|
|
|
|
} else {
|
|
|
|
region.Offset = VkOffset3D { 0, 0, 0 };
|
|
|
|
region.Extent = pResource->MipLevelExtent(subresource.mipLevel);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto subresourceLayout = pResource->GetSubresourceLayout(formatInfo->aspectMask, Subresource);
|
|
|
|
|
|
|
|
// Update dirty region one aspect at a time, due to
|
|
|
|
// how the data is laid out in the staging buffer.
|
|
|
|
for (uint32_t i = 0; i < pResource->GetPlaneCount(); i++) {
|
|
|
|
subresource.aspectMask = formatInfo->aspectMask;
|
|
|
|
|
|
|
|
if (formatInfo->flags.test(DxvkFormatFlag::MultiPlane))
|
|
|
|
subresource.aspectMask = vk::getPlaneAspect(i);
|
|
|
|
|
|
|
|
EmitCs([
|
|
|
|
cDstImage = pResource->GetImage(),
|
|
|
|
cDstSubresource = subresource,
|
|
|
|
cDstOffset = region.Offset,
|
|
|
|
cDstExtent = region.Extent,
|
|
|
|
cSrcBuffer = pResource->GetMappedBuffer(Subresource),
|
|
|
|
cSrcOffset = pResource->ComputeMappedOffset(Subresource, i, region.Offset),
|
|
|
|
cSrcRowPitch = subresourceLayout.RowPitch,
|
|
|
|
cSrcDepthPitch = subresourceLayout.DepthPitch,
|
|
|
|
cPackedFormat = pResource->GetPackedFormat()
|
|
|
|
] (DxvkContext* ctx) {
|
|
|
|
if (cDstSubresource.aspectMask != (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
|
|
|
|
ctx->copyBufferToImage(
|
|
|
|
cDstImage, cDstSubresource, cDstOffset, cDstExtent,
|
|
|
|
cSrcBuffer, cSrcOffset, cSrcRowPitch, cSrcDepthPitch);
|
|
|
|
} else {
|
|
|
|
ctx->copyPackedBufferToDepthStencilImage(
|
|
|
|
cDstImage, cDstSubresource,
|
|
|
|
VkOffset2D { cDstOffset.x, cDstOffset.y },
|
|
|
|
VkExtent2D { cDstExtent.width, cDstExtent.height },
|
|
|
|
cSrcBuffer, 0,
|
|
|
|
VkOffset2D { cDstOffset.x, cDstOffset.y },
|
|
|
|
VkExtent2D { cDstExtent.width, cDstExtent.height },
|
|
|
|
cPackedFormat);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pResource->HasSequenceNumber())
|
|
|
|
TrackTextureSequenceNumber(pResource, Subresource);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-02-06 18:20:31 +01:00
|
|
|
void D3D11ImmediateContext::UpdateMappedBuffer(
|
|
|
|
D3D11Buffer* pDstBuffer,
|
|
|
|
UINT Offset,
|
|
|
|
UINT Length,
|
|
|
|
const void* pSrcData,
|
|
|
|
UINT CopyFlags) {
|
|
|
|
DxvkBufferSliceHandle slice;
|
|
|
|
|
|
|
|
if (likely(CopyFlags != D3D11_COPY_NO_OVERWRITE)) {
|
|
|
|
slice = pDstBuffer->DiscardSlice();
|
|
|
|
|
|
|
|
EmitCs([
|
|
|
|
cBuffer = pDstBuffer->GetBuffer(),
|
|
|
|
cBufferSlice = slice
|
|
|
|
] (DxvkContext* ctx) {
|
|
|
|
ctx->invalidateBuffer(cBuffer, cBufferSlice);
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
slice = pDstBuffer->GetMappedSlice();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::memcpy(reinterpret_cast<char*>(slice.mapPtr) + Offset, pSrcData, Length);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-05-03 16:41:44 +02:00
|
|
|
void STDMETHODCALLTYPE D3D11ImmediateContext::SwapDeviceContextState(
|
|
|
|
ID3DDeviceContextState* pState,
|
|
|
|
ID3DDeviceContextState** ppPreviousState) {
|
|
|
|
InitReturnPtr(ppPreviousState);
|
|
|
|
|
|
|
|
if (!pState)
|
|
|
|
return;
|
2022-08-04 15:10:31 +02:00
|
|
|
|
|
|
|
// Reset all state affected by the current context state
|
|
|
|
ResetCommandListState();
|
|
|
|
|
2022-09-16 10:08:40 +00:00
|
|
|
Com<D3D11DeviceContextState, false> oldState = std::move(m_stateObject);
|
|
|
|
Com<D3D11DeviceContextState, false> newState = static_cast<D3D11DeviceContextState*>(pState);
|
2019-05-03 16:41:44 +02:00
|
|
|
|
|
|
|
if (oldState == nullptr)
|
|
|
|
oldState = new D3D11DeviceContextState(m_parent);
|
|
|
|
|
|
|
|
if (ppPreviousState)
|
|
|
|
*ppPreviousState = oldState.ref();
|
|
|
|
|
|
|
|
m_stateObject = newState;
|
|
|
|
|
|
|
|
oldState->SetState(m_state);
|
|
|
|
newState->GetState(m_state);
|
|
|
|
|
2022-08-04 15:10:31 +02:00
|
|
|
// Restore all state affected by the new context state
|
2022-08-04 14:07:48 +02:00
|
|
|
RestoreCommandListState();
|
2019-05-03 16:41:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-02-09 02:58:21 +01:00
|
|
|
void D3D11ImmediateContext::SynchronizeCsThread(uint64_t SequenceNumber) {
|
2018-12-30 20:47:04 +01:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
|
|
|
|
2018-01-21 18:04:22 +01:00
|
|
|
// Dispatch current chunk so that all commands
|
|
|
|
// recorded prior to this function will be run
|
2022-02-09 02:58:21 +01:00
|
|
|
if (SequenceNumber > m_csSeqNum)
|
|
|
|
FlushCsChunk();
|
2018-01-21 18:04:22 +01:00
|
|
|
|
2022-02-09 02:58:21 +01:00
|
|
|
m_csThread.synchronize(SequenceNumber);
|
2018-01-21 18:04:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void D3D11ImmediateContext::SynchronizeDevice() {
|
2018-01-20 13:22:44 +01:00
|
|
|
m_device->waitForIdle();
|
|
|
|
}
|
|
|
|
|
2018-01-20 22:52:18 +01:00
|
|
|
|
2022-06-22 00:39:36 +02:00
|
|
|
void D3D11ImmediateContext::EndFrame() {
|
2022-08-05 12:10:03 +02:00
|
|
|
D3D10DeviceLock lock = LockContext();
|
|
|
|
|
2023-01-14 19:28:51 +01:00
|
|
|
EmitCs<false>([] (DxvkContext* ctx) {
|
2022-06-22 00:39:36 +02:00
|
|
|
ctx->endFrame();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-10 23:32:15 +01:00
|
|
|
bool D3D11ImmediateContext::WaitForResource(
|
|
|
|
const Rc<DxvkResource>& Resource,
|
2022-02-09 02:58:21 +01:00
|
|
|
uint64_t SequenceNumber,
|
2019-09-19 20:28:22 +02:00
|
|
|
D3D11_MAP MapType,
|
2018-03-10 23:32:15 +01:00
|
|
|
UINT MapFlags) {
|
2019-09-19 20:28:22 +02:00
|
|
|
// Determine access type to wait for based on map mode
|
|
|
|
DxvkAccess access = MapType == D3D11_MAP_READ
|
|
|
|
? DxvkAccess::Write
|
|
|
|
: DxvkAccess::Read;
|
2018-03-24 17:02:24 +01:00
|
|
|
|
2022-02-09 02:58:21 +01:00
|
|
|
// Wait for any CS chunk using the resource to execute, since
|
|
|
|
// otherwise we cannot accurately determine if the resource is
|
|
|
|
// actually being used by the GPU right now.
|
|
|
|
bool isInUse = Resource->isInUse(access);
|
|
|
|
|
|
|
|
if (!isInUse) {
|
|
|
|
SynchronizeCsThread(SequenceNumber);
|
|
|
|
isInUse = Resource->isInUse(access);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MapFlags & D3D11_MAP_FLAG_DO_NOT_WAIT) {
|
|
|
|
if (isInUse) {
|
2018-07-23 16:08:01 +02:00
|
|
|
// We don't have to wait, but misbehaving games may
|
|
|
|
// still try to spin on `Map` until the resource is
|
|
|
|
// idle, so we should flush pending commands
|
2023-01-14 14:31:26 +01:00
|
|
|
ConsiderFlush(GpuFlushType::ImplicitSynchronization);
|
2018-03-22 11:02:14 +01:00
|
|
|
return false;
|
2022-02-09 02:58:21 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (isInUse) {
|
2018-07-23 16:08:01 +02:00
|
|
|
// Make sure pending commands using the resource get
|
|
|
|
// executed on the the GPU if we have to wait for it
|
2023-01-14 14:31:26 +01:00
|
|
|
ExecuteFlush(GpuFlushType::ImplicitSynchronization, nullptr);
|
2022-02-09 02:58:21 +01:00
|
|
|
SynchronizeCsThread(SequenceNumber);
|
|
|
|
|
2022-02-14 01:32:15 +01:00
|
|
|
m_device->waitForResource(Resource, access);
|
2018-07-23 16:08:01 +02:00
|
|
|
}
|
2018-03-10 23:32:15 +01:00
|
|
|
}
|
2022-02-09 02:58:21 +01:00
|
|
|
|
2018-03-10 23:32:15 +01:00
|
|
|
return true;
|
|
|
|
}
|
2019-05-19 13:21:08 +02:00
|
|
|
|
|
|
|
|
|
|
|
void D3D11ImmediateContext::EmitCsChunk(DxvkCsChunkRef&& chunk) {
|
2022-02-09 02:54:32 +01:00
|
|
|
m_csSeqNum = m_csThread.dispatchChunk(std::move(chunk));
|
2019-05-19 13:21:08 +02:00
|
|
|
}
|
2018-06-04 23:31:49 +02:00
|
|
|
|
|
|
|
|
2022-02-09 02:54:32 +01:00
|
|
|
void D3D11ImmediateContext::TrackTextureSequenceNumber(
|
|
|
|
D3D11CommonTexture* pResource,
|
|
|
|
UINT Subresource) {
|
2022-02-18 14:14:18 +01:00
|
|
|
uint64_t sequenceNumber = GetCurrentSequenceNumber();
|
|
|
|
pResource->TrackSequenceNumber(Subresource, sequenceNumber);
|
2022-02-16 20:49:00 +01:00
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
ConsiderFlush(GpuFlushType::ImplicitStrongHint);
|
2022-02-09 02:54:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void D3D11ImmediateContext::TrackBufferSequenceNumber(
|
|
|
|
D3D11Buffer* pResource) {
|
2022-02-18 14:14:18 +01:00
|
|
|
uint64_t sequenceNumber = GetCurrentSequenceNumber();
|
|
|
|
pResource->TrackSequenceNumber(sequenceNumber);
|
2022-02-16 20:49:00 +01:00
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
ConsiderFlush(GpuFlushType::ImplicitStrongHint);
|
2022-02-09 02:54:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-02-18 14:14:18 +01:00
|
|
|
uint64_t D3D11ImmediateContext::GetCurrentSequenceNumber() {
|
|
|
|
// We do not flush empty chunks, so if we are tracking a resource
|
|
|
|
// immediately after a flush, we need to use the sequence number
|
|
|
|
// of the previously submitted chunk to prevent deadlocks.
|
|
|
|
return m_csChunk->empty() ? m_csSeqNum : m_csSeqNum + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-01-14 14:30:30 +01:00
|
|
|
uint64_t D3D11ImmediateContext::GetPendingCsChunks() {
|
|
|
|
return GetCurrentSequenceNumber() - m_flushSeqNum;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
void D3D11ImmediateContext::ConsiderFlush(
|
|
|
|
GpuFlushType FlushType) {
|
|
|
|
uint64_t chunkId = GetCurrentSequenceNumber();
|
|
|
|
uint64_t submissionId = m_submissionFence->value();
|
2018-06-04 23:31:49 +02:00
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
if (m_flushTracker.considerFlush(FlushType, chunkId, submissionId))
|
|
|
|
ExecuteFlush(FlushType, nullptr);
|
2018-06-04 23:31:49 +02:00
|
|
|
}
|
2020-03-19 23:29:32 +01:00
|
|
|
|
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
void D3D11ImmediateContext::ExecuteFlush(
|
|
|
|
GpuFlushType FlushType,
|
|
|
|
HANDLE hEvent) {
|
|
|
|
// Flush init context so that new resources are fully initialized
|
|
|
|
// before the app can access them in any way. This has to happen
|
|
|
|
// unconditionally since we may otherwise deadlock on Map.
|
|
|
|
m_parent->FlushInitContext();
|
|
|
|
|
|
|
|
// Exit early if there's nothing to do
|
|
|
|
if (!GetPendingCsChunks() && !hEvent)
|
|
|
|
return;
|
2020-03-19 23:29:32 +01:00
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
// Signal the submission fence and flush the command list
|
|
|
|
uint64_t submissionId = ++m_submissionId;
|
2020-03-19 23:29:32 +01:00
|
|
|
|
2023-01-14 14:31:26 +01:00
|
|
|
if (hEvent) {
|
|
|
|
m_submissionFence->setCallback(submissionId, [hEvent] {
|
|
|
|
SetEvent(hEvent);
|
|
|
|
});
|
|
|
|
}
|
2020-03-19 23:29:32 +01:00
|
|
|
|
2023-01-14 19:28:51 +01:00
|
|
|
EmitCs<false>([
|
2023-01-14 14:31:26 +01:00
|
|
|
cSubmissionFence = m_submissionFence,
|
|
|
|
cSubmissionId = submissionId
|
2020-03-19 23:29:32 +01:00
|
|
|
] (DxvkContext* ctx) {
|
2023-01-14 14:31:26 +01:00
|
|
|
ctx->signal(cSubmissionFence, cSubmissionId);
|
2023-03-16 12:44:58 +01:00
|
|
|
ctx->flushCommandList(nullptr);
|
2020-03-19 23:29:32 +01:00
|
|
|
});
|
2023-01-14 14:31:26 +01:00
|
|
|
|
|
|
|
FlushCsChunk();
|
|
|
|
|
|
|
|
// Notify flush tracker about the flush
|
|
|
|
m_flushSeqNum = m_csSeqNum;
|
|
|
|
m_flushTracker.notifyFlush(m_flushSeqNum, submissionId);
|
2020-03-19 23:29:32 +01:00
|
|
|
}
|
2018-01-20 22:52:18 +01:00
|
|
|
|
2018-07-15 19:45:40 +02:00
|
|
|
}
|