1
0
mirror of https://github.com/Yours3lf/rpi-vk-driver.git synced 2024-12-04 16:24:15 +01:00
rpi-vk-driver/driver/command.c

845 lines
31 KiB
C
Raw Normal View History

2018-08-26 15:11:43 +02:00
#include "common.h"
#include "kernel/vc4_packet.h"
#include "../brcm/cle/v3d_decoder.h"
#include "../brcm/clif/clif_dump.h"
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#commandbuffers-pools
* Command pools are opaque objects that command buffer memory is allocated from, and which allow the implementation to amortize the
* cost of resource creation across multiple command buffers. Command pools are externally synchronized, meaning that a command pool must
* not be used concurrently in multiple threads. That includes use via recording commands on any command buffers allocated from the pool,
* as well as operations that allocate, free, and reset command buffers or the pool itself.
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR VkResult VKAPI_CALL rpi_vkCreateCommandPool(
2018-08-26 15:11:43 +02:00
VkDevice device,
const VkCommandPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkCommandPool* pCommandPool)
{
assert(device);
assert(pCreateInfo);
2019-09-07 18:41:46 +02:00
//TODO VK_COMMAND_POOL_CREATE_TRANSIENT_BIT
2018-08-26 15:11:43 +02:00
//specifies that command buffers allocated from the pool will be short-lived, meaning that they will be reset or freed in a relatively short timeframe.
//This flag may be used by the implementation to control memory allocation behavior within the pool.
//--> definitely use pool allocator
//TODO pool family ignored for now
_commandPool* cp = ALLOCATE(sizeof(_commandPool), 1, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2018-08-26 15:11:43 +02:00
if(!cp)
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
cp->queueFamilyIndex = pCreateInfo->queueFamilyIndex;
2019-09-07 18:41:46 +02:00
cp->resetAble = pCreateInfo->flags & VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
//TODO CTS fails as we can't allocate enough memory for some reason
//tweak system allocation as root using:
//make sure kernel denies memory allocation that it won't be able to serve
//sysctl -w vm.overcommit_memory="2"
//specify after how much memory used the kernel will start denying requests
//sysctl -w vm.overcommit_ratio="80"
//
2018-08-26 15:11:43 +02:00
//initial number of command buffers to hold
int numCommandBufs = 128;
2020-03-08 23:06:12 +01:00
int consecutiveBlockSize = ARM_PAGE_SIZE;
int consecutiveBlockNumber = 64;
2020-03-07 12:13:08 +01:00
//int numCommandBufs = 30;
//int consecutiveBlockSize = getCPABlockSize(256);
//int consecutiveBlockNumber = 30;
int consecutivePoolSize = consecutiveBlockNumber * consecutiveBlockSize;
static int counter = 0;
2018-08-26 15:11:43 +02:00
//if(pCreateInfo->flags & VK_COMMAND_POOL_CREATE_TRANSIENT_BIT)
{
//use pool allocator
void* pamem = ALLOCATE(numCommandBufs * sizeof(_commandBuffer), 1, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2018-08-26 15:11:43 +02:00
if(!pamem)
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
cp->pa = createPoolAllocator(pamem, sizeof(_commandBuffer), numCommandBufs * sizeof(_commandBuffer));
void* cpamem = ALLOCATE(consecutivePoolSize, 1, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2018-08-26 15:11:43 +02:00
if(!cpamem)
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
cp->cpa = createConsecutivePoolAllocator(cpamem, consecutiveBlockSize, consecutivePoolSize);
2018-08-26 15:11:43 +02:00
}
*pCommandPool = (VkCommandPool)cp;
return VK_SUCCESS;
}
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#commandbuffer-allocation
* vkAllocateCommandBuffers can be used to create multiple command buffers. If the creation of any of those command buffers fails,
* the implementation must destroy all successfully created command buffer objects from this command, set all entries of the pCommandBuffers array to NULL and return the error.
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR VkResult VKAPI_CALL rpi_vkAllocateCommandBuffers(
2018-08-26 15:11:43 +02:00
VkDevice device,
const VkCommandBufferAllocateInfo* pAllocateInfo,
VkCommandBuffer* pCommandBuffers)
{
assert(device);
assert(pAllocateInfo);
assert(pCommandBuffers);
VkResult res = VK_SUCCESS;
_commandPool* cp = (_commandPool*)pAllocateInfo->commandPool;
2019-09-07 18:41:46 +02:00
//TODO secondary command buffers
2018-08-26 15:11:43 +02:00
//if(cp->usePoolAllocator)
{
for(int c = 0; c < pAllocateInfo->commandBufferCount; ++c)
{
pCommandBuffers[c] = poolAllocate(&cp->pa);
if(!pCommandBuffers[c])
{
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
}
2019-09-30 00:52:21 +02:00
set_loader_magic_value(&pCommandBuffers[c]->loaderData);
pCommandBuffers[c]->dev = device;
2018-08-26 15:11:43 +02:00
pCommandBuffers[c]->shaderRecCount = 0;
pCommandBuffers[c]->usageFlags = 0;
pCommandBuffers[c]->state = CMDBUF_STATE_INITIAL;
pCommandBuffers[c]->cp = cp;
clInit(&pCommandBuffers[c]->binCl, consecutivePoolAllocate(&cp->cpa, 1), cp->cpa.blockSize);
clInit(&pCommandBuffers[c]->handlesCl, consecutivePoolAllocate(&cp->cpa, 1), cp->cpa.blockSize);
clInit(&pCommandBuffers[c]->shaderRecCl, consecutivePoolAllocate(&cp->cpa, 1), cp->cpa.blockSize);
clInit(&pCommandBuffers[c]->uniformsCl, consecutivePoolAllocate(&cp->cpa, 1), cp->cpa.blockSize);
2018-08-26 15:11:43 +02:00
2018-10-21 14:33:02 +02:00
pCommandBuffers[c]->graphicsPipeline = 0;
pCommandBuffers[c]->computePipeline = 0;
pCommandBuffers[c]->numDrawCallsSubmitted = 0;
2019-09-23 20:40:36 +02:00
pCommandBuffers[c]->indexBuffer = 0;
pCommandBuffers[c]->indexBufferOffset = 0;
2018-10-21 14:33:02 +02:00
pCommandBuffers[c]->vertexBufferDirty = 1;
pCommandBuffers[c]->indexBufferDirty = 1;
pCommandBuffers[c]->viewportDirty = 1;
pCommandBuffers[c]->lineWidthDirty = 1;
pCommandBuffers[c]->depthBiasDirty = 1;
pCommandBuffers[c]->graphicsPipelineDirty = 1;
pCommandBuffers[c]->computePipelineDirty = 1;
pCommandBuffers[c]->subpassDirty = 1;
pCommandBuffers[c]->blendConstantsDirty = 1;
pCommandBuffers[c]->scissorDirty = 1;
pCommandBuffers[c]->depthBoundsDirty = 1;
pCommandBuffers[c]->stencilCompareMaskDirty = 1;
pCommandBuffers[c]->stencilWriteMaskDirty = 1;
pCommandBuffers[c]->stencilReferenceDirty = 1;
pCommandBuffers[c]->descriptorSetDirty = 1;
pCommandBuffers[c]->pushConstantDirty = 1;
2020-02-24 22:45:47 +01:00
pCommandBuffers[c]->perfmonID = 0;
2018-08-26 15:11:43 +02:00
if(!pCommandBuffers[c]->binCl.buffer)
{
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
}
if(!pCommandBuffers[c]->handlesCl.buffer)
{
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
}
if(!pCommandBuffers[c]->shaderRecCl.buffer)
{
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
}
if(!pCommandBuffers[c]->uniformsCl.buffer)
{
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
}
}
}
if(res != VK_SUCCESS)
{
//if(cp->usePoolAllocator)
{
for(int c = 0; c < pAllocateInfo->commandBufferCount; ++c)
{
consecutivePoolFree(&cp->cpa, pCommandBuffers[c]->binCl.buffer, pCommandBuffers[c]->binCl.numBlocks);
consecutivePoolFree(&cp->cpa, pCommandBuffers[c]->handlesCl.buffer, pCommandBuffers[c]->handlesCl.numBlocks);
consecutivePoolFree(&cp->cpa, pCommandBuffers[c]->shaderRecCl.buffer, pCommandBuffers[c]->shaderRecCl.numBlocks);
consecutivePoolFree(&cp->cpa, pCommandBuffers[c]->uniformsCl.buffer, pCommandBuffers[c]->uniformsCl.numBlocks);
2018-08-26 15:11:43 +02:00
poolFree(&cp->pa, pCommandBuffers[c]);
pCommandBuffers[c] = 0;
}
}
}
return res;
}
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#vkBeginCommandBuffer
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR VkResult VKAPI_CALL rpi_vkBeginCommandBuffer(
2018-08-26 15:11:43 +02:00
VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo* pBeginInfo)
{
assert(commandBuffer);
assert(pBeginInfo);
2019-09-07 18:41:46 +02:00
//TODO secondary command buffers
2018-08-26 15:11:43 +02:00
//VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
//specifies that each recording of the command buffer will only be submitted once, and the command buffer will be reset and recorded again between each submission.
2019-09-07 18:41:46 +02:00
//TODO VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
2018-08-26 15:11:43 +02:00
//specifies that a secondary command buffer is considered to be entirely inside a render pass. If this is a primary command buffer, then this bit is ignored
2019-09-07 18:41:46 +02:00
//TODO VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT
2018-08-26 15:11:43 +02:00
//specifies that a command buffer can be resubmitted to a queue while it is in the pending state, and recorded into multiple primary command buffers
//When a command buffer begins recording, all state in that command buffer is undefined
commandBuffer->usageFlags = pBeginInfo->flags;
commandBuffer->state = CMDBUF_STATE_RECORDING;
2019-09-07 18:41:46 +02:00
//TODO reset state?
2018-08-26 15:11:43 +02:00
return VK_SUCCESS;
}
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#vkEndCommandBuffer
* If there was an error during recording, the application will be notified by an unsuccessful return code returned by vkEndCommandBuffer.
* If the application wishes to further use the command buffer, the command buffer must be reset. The command buffer must have been in the recording state,
* and is moved to the executable state.
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR VkResult VKAPI_CALL rpi_vkEndCommandBuffer(
2018-08-26 15:11:43 +02:00
VkCommandBuffer commandBuffer)
{
assert(commandBuffer);
commandBuffer->state = CMDBUF_STATE_EXECUTABLE;
return VK_SUCCESS;
}
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#vkQueueSubmit
* vkQueueSubmit is a queue submission command, with each batch defined by an element of pSubmits as an instance of the VkSubmitInfo structure.
* Batches begin execution in the order they appear in pSubmits, but may complete out of order.
* Fence and semaphore operations submitted with vkQueueSubmit have additional ordering constraints compared to other submission commands,
* with dependencies involving previous and subsequent queue operations. Information about these additional constraints can be found in the semaphore and
* fence sections of the synchronization chapter.
* Details on the interaction of pWaitDstStageMask with synchronization are described in the semaphore wait operation section of the synchronization chapter.
* The order that batches appear in pSubmits is used to determine submission order, and thus all the implicit ordering guarantees that respect it.
* Other than these implicit ordering guarantees and any explicit synchronization primitives, these batches may overlap or otherwise execute out of order.
* If any command buffer submitted to this queue is in the executable state, it is moved to the pending state. Once execution of all submissions of a command buffer complete,
* it moves from the pending state, back to the executable state. If a command buffer was recorded with the VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT flag,
* it instead moves back to the invalid state.
* If vkQueueSubmit fails, it may return VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY.
* If it does, the implementation must ensure that the state and contents of any resources or synchronization primitives referenced by the submitted command buffers and any semaphores
* referenced by pSubmits is unaffected by the call or its failure. If vkQueueSubmit fails in such a way that the implementation is unable to make that guarantee,
* the implementation must return VK_ERROR_DEVICE_LOST. See Lost Device.
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR VkResult VKAPI_CALL rpi_vkQueueSubmit(
2018-08-26 15:11:43 +02:00
VkQueue queue,
uint32_t submitCount,
const VkSubmitInfo* pSubmits,
VkFence fence)
{
assert(queue);
2019-09-07 18:41:46 +02:00
//TODO this is incorrect
//see sync.c
//TODO: deal with pSubmits->pWaitDstStageMask
2018-08-26 15:11:43 +02:00
for(int c = 0; c < pSubmits->waitSemaphoreCount; ++c)
{
sem_wait((sem_t*)pSubmits->pWaitSemaphores[c]);
}
for(int c = 0; c < pSubmits->commandBufferCount; ++c)
{
if(pSubmits->pCommandBuffers[c]->state == CMDBUF_STATE_EXECUTABLE)
{
pSubmits->pCommandBuffers[c]->state = CMDBUF_STATE_PENDING;
}
}
for(int c = 0; c < pSubmits->commandBufferCount; ++c)
{
VkCommandBuffer cmdbuf = pSubmits->pCommandBuffers[c];
2019-09-07 18:41:46 +02:00
if(!cmdbuf->binCl.currMarker)
{
//no markers recorded yet, skip
continue;
}
//first entry is assumed to be a marker
CLMarker* marker = cmdbuf->binCl.buffer;
//a command buffer may contain multiple render passes
//and commands outside render passes such as clear commands
//each of these corresponds to a control list submit
//submit each separate control list
while(marker)
{
struct drm_vc4_submit_cl submitCl =
{
.color_read.hindex = ~0,
.zs_read.hindex = ~0,
.color_write.hindex = ~0,
.msaa_color_write.hindex = ~0,
.zs_write.hindex = ~0,
.msaa_zs_write.hindex = ~0,
};
_image* writeImage = marker->writeImage;
_image* readImage = marker->readImage;
_image* writeDepthStencilImage = marker->writeDepthStencilImage;
_image* readDepthStencilImage = marker->readDepthStencilImage;
_image* writeMSAAimage = marker->writeMSAAimage;
_image* writeMSAAdepthStencilImage = marker->writeMSAAdepthStencilImage;
uint32_t performResolve = marker->performResolve;
uint32_t readMSAAimage = marker->readMSAAimage;
uint32_t readMSAAdepthStencilImage = marker->readMSAAdepthStencilImage;
//This should not result in an insertion!
uint32_t writeImageIdx = writeImage ? clGetHandleIndex(&cmdbuf->handlesCl, marker->handlesBuf, marker->handlesSize, writeImage->boundMem->bo) : 0;
uint32_t readImageIdx = readImage ? clGetHandleIndex(&cmdbuf->handlesCl, marker->handlesBuf, marker->handlesSize, readImage->boundMem->bo) : 0;
uint32_t writeDepthStencilImageIdx = writeDepthStencilImage ? clGetHandleIndex(&cmdbuf->handlesCl, marker->handlesBuf, marker->handlesSize, writeDepthStencilImage->boundMem->bo) : 0;
uint32_t readDepthStencilImageIdx = readDepthStencilImage ? clGetHandleIndex(&cmdbuf->handlesCl, marker->handlesBuf, marker->handlesSize, readDepthStencilImage->boundMem->bo) : 0;
uint32_t writeMSAAimageIdx = writeMSAAimage ? clGetHandleIndex(&cmdbuf->handlesCl, marker->handlesBuf, marker->handlesSize, writeMSAAimage->boundMem->bo) : 0;
uint32_t writeMSAAdepthStencilImageIdx = writeMSAAdepthStencilImage ? clGetHandleIndex(&cmdbuf->handlesCl, marker->handlesBuf, marker->handlesSize, writeMSAAdepthStencilImage->boundMem->bo) : 0;
// fprintf(stderr, "writeImage: %u\n", writeImage);
// fprintf(stderr, "readImage: %u\n", readImage);
// fprintf(stderr, "writeDepthStencilImage: %u\n", writeDepthStencilImage);
// fprintf(stderr, "readDepthStencilImage: %u\n", readDepthStencilImage);
// fprintf(stderr, "writeMSAAimage: %u\n", writeMSAAimage);
// fprintf(stderr, "writeMSAAdepthStencilImage: %u\n", writeMSAAdepthStencilImage);
// fprintf(stderr, "performResolve: %u\n", performResolve);
// fprintf(stderr, "readMSAAimage: %u\n", readMSAAimage);
// fprintf(stderr, "readMSAAdepthStencilImage: %u\n", readMSAAdepthStencilImage);
// fprintf(stderr, "writeImageIdx: %u\n", writeImageIdx);
// fprintf(stderr, "readImageIdx: %u\n", readImageIdx);
// fprintf(stderr, "writeDepthStencilImageIdx: %u\n", writeDepthStencilImageIdx);
// fprintf(stderr, "readDepthStencilImageIdx: %u\n", readDepthStencilImageIdx);
// fprintf(stderr, "writeMSAAimageIdx: %u\n", writeMSAAimageIdx);
// fprintf(stderr, "writeMSAAdepthStencilImageIdx: %u\n", writeMSAAdepthStencilImageIdx);
submitCl.clear_color[0] = 0;
submitCl.clear_color[1] = 0;
submitCl.clear_z = 0;
submitCl.clear_s = 0;
2019-09-07 18:41:46 +02:00
//fill out submit cl fields
if(writeImage)
{
uint32_t nonPaddedSize = (marker->width * marker->height * getFormatBpp(writeImage->format)) >> 3;
uint32_t tiling = writeImage->tiling;
if(writeImage->tiling == VC4_TILING_FORMAT_T && nonPaddedSize <= 4096)
{
tiling = VC4_TILING_FORMAT_LT;
}
submitCl.color_write.hindex = writeImageIdx;
2020-03-01 20:11:31 +01:00
submitCl.color_write.offset = marker->writeImageOffset;
submitCl.color_write.flags = 0;
submitCl.color_write.bits =
VC4_SET_FIELD(getRenderTargetFormatVC4(writeImage->format), VC4_RENDER_CONFIG_FORMAT) |
VC4_SET_FIELD(tiling, VC4_RENDER_CONFIG_MEMORY_FORMAT);
if(performResolve)
{
submitCl.color_write.bits |= VC4_RENDER_CONFIG_MS_MODE_4X | VC4_RENDER_CONFIG_DECIMATE_MODE_4X;
}
}
if(writeMSAAimage)
{
submitCl.msaa_color_write.hindex = writeMSAAimageIdx;
2020-03-01 20:11:31 +01:00
submitCl.msaa_color_write.offset = marker->writeMSAAimageOffset;
submitCl.msaa_color_write.flags = 0;
submitCl.msaa_color_write.bits = VC4_RENDER_CONFIG_MS_MODE_4X;
}
if(readImage)
{
uint32_t nonPaddedSize = (marker->width * marker->height * getFormatBpp(readImage->format)) >> 3;
uint32_t tiling = readImage->tiling;
if(readImage->tiling == VC4_TILING_FORMAT_T && nonPaddedSize <= 4096)
{
tiling = VC4_TILING_FORMAT_LT;
}
submitCl.color_read.hindex = readImageIdx;
2020-03-01 20:11:31 +01:00
submitCl.color_read.offset = marker->readImageOffset;
submitCl.color_read.flags = readMSAAimage ? VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES : 0;
submitCl.color_read.bits = VC4_SET_FIELD(getRenderTargetFormatVC4(readImage->format), VC4_RENDER_CONFIG_FORMAT) |
VC4_SET_FIELD(tiling, VC4_RENDER_CONFIG_MEMORY_FORMAT);
}
if(writeDepthStencilImage)
2019-09-23 20:40:36 +02:00
{
uint32_t nonPaddedSize = (marker->width * marker->height * getFormatBpp(writeDepthStencilImage->format)) >> 3;
uint32_t tiling = writeDepthStencilImage->tiling;
if(writeDepthStencilImage->tiling == VC4_TILING_FORMAT_T && nonPaddedSize <= 4096)
{
tiling = VC4_TILING_FORMAT_LT;
}
submitCl.zs_write.hindex = writeDepthStencilImageIdx;
2020-03-01 20:11:31 +01:00
submitCl.zs_write.offset = marker->writeDepthStencilImageOffset;
2019-09-23 20:40:36 +02:00
submitCl.zs_write.flags = 0;
submitCl.zs_write.bits = VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_ZS, VC4_LOADSTORE_TILE_BUFFER_BUFFER) |
VC4_SET_FIELD(tiling, VC4_LOADSTORE_TILE_BUFFER_TILING);
2019-09-23 20:40:36 +02:00
}
if(writeMSAAdepthStencilImage)
2019-09-23 20:40:36 +02:00
{
submitCl.msaa_zs_write.hindex = writeMSAAdepthStencilImageIdx;
2020-03-01 20:11:31 +01:00
submitCl.msaa_zs_write.offset = marker->writeMSAAdepthStencilImageOffset;
submitCl.msaa_zs_write.flags = 0;
submitCl.msaa_zs_write.bits = VC4_RENDER_CONFIG_MS_MODE_4X;
2019-09-23 20:40:36 +02:00
}
2019-09-23 16:51:56 +02:00
if(readDepthStencilImage)
{
uint32_t nonPaddedSize = (marker->width * marker->height * getFormatBpp(readDepthStencilImage->format)) >> 3;
uint32_t tiling = readDepthStencilImage->tiling;
if(readDepthStencilImage->tiling == VC4_TILING_FORMAT_T && nonPaddedSize <= 4096)
{
tiling = VC4_TILING_FORMAT_LT;
}
submitCl.zs_read.hindex = readDepthStencilImageIdx;
2020-03-01 20:11:31 +01:00
submitCl.zs_read.offset = marker->readDepthStencilImageOffset;
submitCl.zs_read.flags = readMSAAdepthStencilImage ? VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES : 0; //TODO is this valid?
submitCl.zs_read.bits = VC4_SET_FIELD(getRenderTargetFormatVC4(readDepthStencilImage->format), VC4_RENDER_CONFIG_FORMAT) |
VC4_SET_FIELD(tiling, VC4_RENDER_CONFIG_MEMORY_FORMAT);
}
submitCl.clear_color[0] = marker->clearColor[0];
submitCl.clear_color[1] = marker->clearColor[1];
submitCl.clear_z = marker->clearDepth; //0...1 -> 0...0xffffff
submitCl.clear_s = marker->clearStencil; //0...0xff
// fprintf(stderr, "submitCl.clear_color[0]: %u\n", submitCl.clear_color[0]);
// fprintf(stderr, "submitCl.clear_color[1]: %u\n", submitCl.clear_color[1]);
// fprintf(stderr, "submitCl.clear_z: %u\n", submitCl.clear_z);
// fprintf(stderr, "submitCl.clear_s: %u\n", submitCl.clear_s);
2020-02-16 19:14:45 +01:00
submitCl.min_x_tile = 0;
submitCl.min_y_tile = 0;
uint32_t tileSizeW = 64;
uint32_t tileSizeH = 64;
uint32_t widthInTiles = 0, heightInTiles = 0;
uint32_t width = 0, height = 0, bpp = 0;
width = marker->width;
height = marker->height;
if(writeImage)
{
bpp = getFormatBpp(writeImage->format);
}
else if(writeMSAAimage)
{
bpp = getFormatBpp(writeMSAAimage->format);
}
if(bpp == 64)
{
tileSizeH >>= 1;
}
if(performResolve || writeMSAAimage || writeMSAAdepthStencilImage)
{
tileSizeW >>= 1;
tileSizeH >>= 1;
}
widthInTiles = divRoundUp(width, tileSizeW);
heightInTiles = divRoundUp(height, tileSizeH);
//pad width if rendering to miplevel
if(marker->renderToMip)
{
width = getPow2Pad(width);
width = width < 4 ? 4 : width;
}
submitCl.max_x_tile = widthInTiles - 1;
submitCl.max_y_tile = heightInTiles - 1;
submitCl.width = width;
submitCl.height = height;
submitCl.flags |= marker->flags;
submitCl.bo_handles = marker->handlesBuf;
submitCl.bin_cl = ((uint8_t*)marker) + sizeof(CLMarker);
submitCl.shader_rec = marker->shaderRecBuf;
submitCl.uniforms = marker->uniformsBuf;
2020-02-24 22:45:47 +01:00
if(marker->perfmonID)
{
uint32_t perfmonSelector = 0;
uint32_t* perfmonIDptr = (uint32_t*)marker->perfmonID;
if(pSubmits->pNext)
{
VkPerformanceQuerySubmitInfoKHR* perfQuerySubmitInfo = pSubmits->pNext;
perfmonSelector = perfQuerySubmitInfo->counterPassIndex;
}
submitCl.perfmonid = *(perfmonIDptr + perfmonSelector);
}
//marker not closed yet
//close here
if(!marker->size)
{
clCloseCurrentMarker(&cmdbuf->binCl, &cmdbuf->handlesCl, &cmdbuf->shaderRecCl, cmdbuf->shaderRecCount, &cmdbuf->uniformsCl);
}
submitCl.bo_handle_count = marker->handlesSize / 4;
submitCl.bin_cl_size = marker->size;
submitCl.shader_rec_size = marker->shaderRecSize;
submitCl.shader_rec_count = marker->shaderRecCount;
submitCl.uniforms_size = marker->uniformsSize;
/**/
printf("BCL:\n");
clDump(((uint8_t*)marker) + sizeof(CLMarker), marker->size);
printf("BO handles: ");
for(int d = 0; d < marker->handlesSize / 4; ++d)
{
printf("%u ", *((uint32_t*)(marker->handlesBuf)+d));
}
printf("\nUniforms: ");
for(int d = 0; d < marker->uniformsSize / 4; ++d)
{
printf("%u ", *((uint32_t*)(marker->uniformsBuf)+d));
}
2019-09-22 17:39:51 +02:00
printf("\nShader recs: ");
2019-09-23 16:51:56 +02:00
uint8_t* ptr = marker->shaderRecBuf + (3 + 1) * 4;
2019-09-22 17:39:51 +02:00
for(int d = 0; d < marker->shaderRecCount; ++d)
{
uint8_t flags = *ptr;
uint8_t fragmentShaderIsSingleThreaded = flags & (1 << 0);
uint8_t pointSizeIncludedInShadedVertexData = (flags & (1 << 1)) >> 1;
uint8_t enableClipping = (flags & (1 << 2)) >> 2;
ptr += 2;
uint8_t fragmentNumberOfUniforms = *ptr; ptr++;
uint8_t fragmentNumberOfVaryings = *ptr; ptr++;
uint32_t fragmentShaderCodeAddress = *(uint32_t*)ptr; ptr+=4;
uint32_t fragmentShaderUniformAddress = *(uint32_t*)ptr; ptr+=4;
uint16_t vertexNumberOfUniforms = *(uint16_t*)ptr; ptr+=2;
uint8_t vertexAttribSelectBits = *ptr; ptr++;
uint8_t vertexAttribTotalSize = *ptr; ptr++;
uint32_t vertexShaderCodeAddress = *(uint32_t*)ptr; ptr+=4;
uint32_t vertexShaderUniformAddress = *(uint32_t*)ptr; ptr+=4;
uint16_t coordNumberOfUniforms = *(uint16_t*)ptr; ptr+=2;
uint8_t coordAttribSelectBits = *ptr; ptr++;
uint8_t coordAttribTotalSize = *ptr; ptr++;
uint32_t coordShaderCodeAddress = *(uint32_t*)ptr; ptr+=4;
uint32_t coordShaderUniformAddress = *(uint32_t*)ptr; ptr+=4;
printf("\nfragmentShaderIsSingleThreaded: %i", fragmentShaderIsSingleThreaded);
printf("\npointSizeIncludedInShadedVertexData: %i", pointSizeIncludedInShadedVertexData);
printf("\nenableClipping: %i", enableClipping);
printf("\nfragmentNumberOfUniforms: %i", fragmentNumberOfUniforms);
printf("\nfragmentNumberOfVaryings: %i", fragmentNumberOfVaryings);
printf("\nfragmentShaderCodeAddress: %i", fragmentShaderCodeAddress);
printf("\nfragmentShaderUniformAddress: %i", fragmentShaderUniformAddress);
printf("\nvertexNumberOfUniforms: %i", vertexNumberOfUniforms);
printf("\nvertexAttribSelectBits: %i", vertexAttribSelectBits);
printf("\nvertexAttribTotalSize: %i", vertexAttribTotalSize);
printf("\nvertexShaderCodeAddress: %i", vertexShaderCodeAddress);
printf("\nvertexShaderUniformAddress: %i", vertexShaderUniformAddress);
printf("\ncoordNumberOfUniforms: %i", coordNumberOfUniforms);
printf("\ncoordAttribSelectBits: %i", coordAttribSelectBits);
printf("\ncoordAttribTotalSize: %i", coordAttribTotalSize);
printf("\ncoordShaderCodeAddress: %i", coordShaderCodeAddress);
printf("\ncoordShaderUniformAddress: %i", coordShaderUniformAddress);
uint8_t numAttribs = 0;
for(uint8_t e = 0; e < 8; ++e)
{
numAttribs += (vertexAttribSelectBits & (1 << e)) >> e;
}
for(uint8_t e = 0; e < numAttribs; ++e)
{
uint32_t attribBaseAddress = *(uint32_t*)ptr; ptr+=4;
uint8_t attribNumBytes = *ptr; ptr++;
uint8_t attribStride = *ptr; ptr++;
uint8_t attribVsVPMOffset = *ptr; ptr++;
uint8_t attribCsVPMOffset = *ptr; ptr++;
printf("\nattrib \#%i", e);
printf("\nattribBaseAddress: %i", attribBaseAddress);
printf("\nattribNumBytes: %i", attribNumBytes);
printf("\nattribStride: %i", attribStride);
printf("\nattribVsVPMOffset: %i", attribVsVPMOffset);
printf("\nattribCsVPMOffset: %i", attribCsVPMOffset);
}
}
printf("\nwidth height: %u, %u\n", submitCl.width, submitCl.height);
printf("tile min/max: %u,%u %u,%u\n", submitCl.min_x_tile, submitCl.min_y_tile, submitCl.max_x_tile, submitCl.max_y_tile);
printf("color read surf: hindex, offset, bits, flags %u %u %u %u\n", submitCl.color_read.hindex, submitCl.color_read.offset, submitCl.color_read.bits, submitCl.color_read.flags);
printf("color write surf: hindex, offset, bits, flags %u %u %u %u\n", submitCl.color_write.hindex, submitCl.color_write.offset, submitCl.color_write.bits, submitCl.color_write.flags);
printf("zs read surf: hindex, offset, bits, flags %u %u %u %u\n", submitCl.zs_read.hindex, submitCl.zs_read.offset, submitCl.zs_read.bits, submitCl.zs_read.flags);
printf("zs write surf: hindex, offset, bits, flags %u %u %u %u\n", submitCl.zs_write.hindex, submitCl.zs_write.offset, submitCl.zs_write.bits, submitCl.zs_write.flags);
printf("msaa color write surf: hindex, offset, bits, flags %u %u %u %u\n", submitCl.msaa_color_write.hindex, submitCl.msaa_color_write.offset, submitCl.msaa_color_write.bits, submitCl.msaa_color_write.flags);
printf("msaa zs write surf: hindex, offset, bits, flags %u %u %u %u\n", submitCl.msaa_zs_write.hindex, submitCl.msaa_zs_write.offset, submitCl.msaa_zs_write.bits, submitCl.msaa_zs_write.flags);
printf("clear color packed rgba %u %u\n", submitCl.clear_color[0], submitCl.clear_color[1]);
printf("clear z %u\n", submitCl.clear_z);
printf("clear s %u\n", submitCl.clear_s);
printf("flags %u\n", submitCl.flags);
2020-02-24 22:45:47 +01:00
printf("perfmonID %u\n", submitCl.perfmonid);
/**/
2020-03-08 23:06:12 +01:00
assert(submitCl.bo_handle_count > 0);
2019-09-07 18:41:46 +02:00
//TODO somehow store last finished globally
//so waiting on fences is faster
//eg. could be an atomic value
static uint64_t lastFinishedSeqno = 0;
2019-09-07 18:41:46 +02:00
//submit ioctl
vc4_cl_submit(controlFd, &submitCl, &queue->lastEmitSeqno, &lastFinishedSeqno);
//advance in linked list
marker = marker->nextMarker;
}
2018-08-26 15:11:43 +02:00
}
for(int c = 0; c < pSubmits->commandBufferCount; ++c)
{
if(pSubmits->pCommandBuffers[c]->state == CMDBUF_STATE_PENDING)
{
if(pSubmits->pCommandBuffers[c]->usageFlags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT)
{
pSubmits->pCommandBuffers[c]->state = CMDBUF_STATE_INVALID;
}
else
{
pSubmits->pCommandBuffers[c]->state = CMDBUF_STATE_EXECUTABLE;
}
}
}
for(int c = 0; c < pSubmits->signalSemaphoreCount; ++c)
{
sem_post((sem_t*)pSubmits->pSignalSemaphores[c]);
}
_fence* f = fence;
if(f)
{
f->seqno = queue->lastEmitSeqno;
}
2018-08-26 15:11:43 +02:00
return VK_SUCCESS;
}
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#vkFreeCommandBuffers
* Any primary command buffer that is in the recording or executable state and has any element of pCommandBuffers recorded into it, becomes invalid.
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR void VKAPI_CALL rpi_vkFreeCommandBuffers(
2018-08-26 15:11:43 +02:00
VkDevice device,
VkCommandPool commandPool,
uint32_t commandBufferCount,
const VkCommandBuffer* pCommandBuffers)
{
assert(device);
assert(commandPool);
assert(pCommandBuffers);
_commandPool* cp = (_commandPool*)commandPool;
for(int c = 0; c < commandBufferCount; ++c)
{
2019-02-09 17:18:15 +01:00
if(pCommandBuffers[c])
2018-08-26 15:11:43 +02:00
{
consecutivePoolFree(&cp->cpa, pCommandBuffers[c]->binCl.buffer, pCommandBuffers[c]->binCl.numBlocks);
consecutivePoolFree(&cp->cpa, pCommandBuffers[c]->handlesCl.buffer, pCommandBuffers[c]->handlesCl.numBlocks);
consecutivePoolFree(&cp->cpa, pCommandBuffers[c]->shaderRecCl.buffer, pCommandBuffers[c]->shaderRecCl.numBlocks);
consecutivePoolFree(&cp->cpa, pCommandBuffers[c]->uniformsCl.buffer, pCommandBuffers[c]->uniformsCl.numBlocks);
2018-08-26 15:11:43 +02:00
poolFree(&cp->pa, pCommandBuffers[c]);
}
}
}
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#vkDestroyCommandPool
* When a pool is destroyed, all command buffers allocated from the pool are freed.
* Any primary command buffer allocated from another VkCommandPool that is in the recording or executable state and has a secondary command buffer
* allocated from commandPool recorded into it, becomes invalid.
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR void VKAPI_CALL rpi_vkDestroyCommandPool(
2018-08-26 15:11:43 +02:00
VkDevice device,
VkCommandPool commandPool,
const VkAllocationCallbacks* pAllocator)
{
assert(device);
_commandPool* cp = (_commandPool*)commandPool;
2019-02-09 17:18:15 +01:00
if(cp)
2018-08-26 15:11:43 +02:00
{
FREE(cp->pa.buf);
FREE(cp->cpa.buf);
2018-08-26 15:11:43 +02:00
destroyPoolAllocator(&cp->pa);
destroyConsecutivePoolAllocator(&cp->cpa);
2019-02-10 00:53:32 +01:00
FREE(cp);
2018-08-26 15:11:43 +02:00
}
}
2018-10-16 20:34:17 +02:00
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#vkTrimCommandPool
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR void VKAPI_CALL rpi_vkTrimCommandPool(
2018-10-16 20:34:17 +02:00
VkDevice device,
VkCommandPool commandPool,
VkCommandPoolTrimFlags flags)
2018-08-26 15:11:43 +02:00
{
2018-10-16 20:34:17 +02:00
assert(device);
assert(commandPool);
_commandPool* cp = commandPool;
2019-09-07 18:41:46 +02:00
//TODO trim cp's pool allocator and consecutive pool allocator
//by reallocating to just used size
//kinda silly, as if you need memory afterwards we need to reallocate again...
2018-08-26 15:11:43 +02:00
}
2018-10-16 20:34:17 +02:00
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#vkResetCommandPool
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR VkResult VKAPI_CALL rpi_vkResetCommandPool(
2018-10-16 20:34:17 +02:00
VkDevice device,
VkCommandPool commandPool,
VkCommandPoolResetFlags flags)
2018-08-26 15:11:43 +02:00
{
2018-10-16 20:34:17 +02:00
assert(device);
assert(commandPool);
_commandPool* cp = commandPool;
2018-10-17 21:56:13 +02:00
for(char* c = cp->pa.buf; c != cp->pa.buf + cp->pa.size; c += cp->pa.blockSize)
{
char* d = cp->pa.nextFreeBlock;
while(d)
{
if(c == d) break;
2019-02-08 23:06:17 +01:00
d = *(uint32_t*)d;
2018-10-17 21:56:13 +02:00
}
if(c == d) //block is free, as we found it in the free chain
{
continue;
}
else
{
//we found a valid block
_commandBuffer* cb = c;
assert(cb->state != CMDBUF_STATE_PENDING);
cb->state = CMDBUF_STATE_INITIAL;
}
}
2019-09-07 18:41:46 +02:00
//TODO secondary command buffers
//TODO reset flag --> free all pool resources
2018-10-16 20:34:17 +02:00
}
/*
* https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#vkResetCommandBuffer
*/
2019-09-30 00:52:21 +02:00
VKAPI_ATTR VkResult VKAPI_CALL rpi_vkResetCommandBuffer(
2018-10-16 20:34:17 +02:00
VkCommandBuffer commandBuffer,
VkCommandBufferResetFlags flags)
{
assert(commandBuffer);
_commandBuffer* cb = commandBuffer;
2018-08-26 15:11:43 +02:00
2018-10-17 21:56:13 +02:00
assert(cb->state != CMDBUF_STATE_PENDING);
2019-09-07 18:41:46 +02:00
assert(cb->cp->resetAble);
2018-10-17 21:56:13 +02:00
if(cb->state == CMDBUF_STATE_RECORDING || cb->state == CMDBUF_STATE_EXECUTABLE)
{
cb->state = CMDBUF_STATE_INVALID;
}
else
{
cb->state = CMDBUF_STATE_INITIAL;
}
2019-09-07 18:41:46 +02:00
if(flags & VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT)
{
//TODO release resources
}
//TODO reset state?
2018-08-26 15:11:43 +02:00
}
2019-04-22 15:58:27 +02:00
2019-09-30 00:52:21 +02:00
VKAPI_ATTR void VKAPI_CALL rpi_vkCmdExecuteCommands(
2019-04-22 15:58:27 +02:00
VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
const VkCommandBuffer* pCommandBuffers)
{
}
2019-09-30 00:52:21 +02:00
VKAPI_ATTR void VKAPI_CALL rpi_vkCmdSetDeviceMask(
2019-04-22 15:58:27 +02:00
VkCommandBuffer commandBuffer,
uint32_t deviceMask)
{
2020-04-17 14:04:28 +02:00
UNSUPPORTED(vkCmdSetDeviceMask);
2019-04-22 15:58:27 +02:00
}