1
0
mirror of https://github.com/Yours3lf/rpi-vk-driver.git synced 2024-12-04 16:24:15 +01:00
rpi-vk-driver/driver/kernelInterface.c

563 lines
11 KiB
C
Raw Normal View History

#define _GNU_SOURCE
#include "kernelInterface.h"
#include <stdatomic.h>
atomic_int refCounter = 0;
int controlFd = 0;
//int renderFd = 0;
int openIoctl()
{
if(!controlFd)
{
controlFd = open(DRM_IOCTL_CTRL_DEV_FILE_NAME, O_RDWR | O_CLOEXEC);
if (controlFd < 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Can't open device file: %s \nError: %s\n", DRM_IOCTL_CTRL_DEV_FILE_NAME, strerror(errno));
return -1;
}
}
/*if(!renderFd)
{
renderFd = open(DRM_IOCTL_RENDER_DEV_FILE_NAME, O_RDWR | O_CLOEXEC);
if (renderFd < 0) {
printf("Can't open device file: %s \nError: %s\n", DRM_IOCTL_RENDER_DEV_FILE_NAME, strerror(errno));
return -1;
}
}*/
++refCounter;
return 0;
}
void closeIoctl(int fd)
{
if (--refCounter == 0)
{
close(fd);
}
}
2018-05-13 18:20:52 +02:00
static uint32_t align(uint32_t num, uint32_t alignment)
{
uint32_t mod = num%alignment;
if(!mod)
{
return num;
}
else
{
return num + alignment - mod;
}
}
int vc4_get_chip_info(int fd)
{
2018-05-13 20:47:05 +02:00
assert(fd);
2018-05-13 20:29:47 +02:00
struct drm_vc4_get_param ident0 = {
.param = DRM_VC4_PARAM_V3D_IDENT0,
};
struct drm_vc4_get_param ident1 = {
.param = DRM_VC4_PARAM_V3D_IDENT1,
};
int ret;
ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_PARAM, &ident0);
if (ret != 0) {
if (errno == EINVAL) {
/* Backwards compatibility with 2835 kernels which
* only do V3D 2.1.
*/
2018-05-13 20:29:47 +02:00
return 21;
} else {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't get V3D IDENT0: %s\n",
2018-05-13 20:29:47 +02:00
strerror(errno));
return 0;
}
2018-05-13 20:29:47 +02:00
}
ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_PARAM, &ident1);
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't get V3D IDENT1: %s\n",
2018-05-13 20:29:47 +02:00
strerror(errno));
return 0;
}
2018-05-13 20:29:47 +02:00
uint32_t major = (ident0.value >> 24) & 0xff;
uint32_t minor = (ident1.value >> 0) & 0xf;
uint32_t v3d_ver = major * 10 + minor;
2018-05-13 20:29:47 +02:00
if (v3d_ver != 21 && v3d_ver != 26) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "V3D %d.%d not supported.\n",
2018-05-13 20:29:47 +02:00
v3d_ver / 10,
v3d_ver % 10);
return 0;
}
2018-05-13 20:29:47 +02:00
return v3d_ver;
}
int vc4_has_feature(int fd, uint32_t feature)
{
2018-05-13 20:47:05 +02:00
assert(fd);
2018-05-13 20:29:47 +02:00
struct drm_vc4_get_param p = {
.param = feature,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_PARAM, &p);
2018-05-13 20:29:47 +02:00
if (ret != 0)
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't determine if VC4 has feature: %s\n", strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 20:29:47 +02:00
return p.value;
}
int vc4_test_tiling(int fd)
{
2018-05-13 20:47:05 +02:00
assert(fd);
/* Test if the kernel has GET_TILING; it will return -EINVAL if the
* ioctl does not exist, but -ENOENT if we pass an impossible handle.
* 0 cannot be a valid GEM object, so use that.
*/
struct drm_vc4_get_tiling get_tiling = {
2018-05-13 20:29:47 +02:00
.handle = 0x0,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
if (ret == -1 && errno == ENOENT)
{
return 1;
}
return 0;
}
2019-09-08 00:30:52 +02:00
//TODO what is this supposed to do?
//ask the kernel what is the buffer's tiling?
uint64_t vc4_bo_get_tiling(int fd, uint32_t bo, uint64_t mod)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
struct drm_vc4_get_tiling get_tiling = {
2018-05-13 20:29:47 +02:00
.handle = bo,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
if (ret != 0) {
2018-08-25 12:03:54 +02:00
return DRM_FORMAT_MOD_LINEAR; //0
} else if (mod == DRM_FORMAT_MOD_INVALID) {
2018-05-13 20:29:47 +02:00
return get_tiling.modifier;
} else if (mod != get_tiling.modifier) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Modifier 0x%llx vs. tiling (0x%llx) mismatch\n",
2018-05-13 20:29:47 +02:00
(long long)mod, get_tiling.modifier);
2018-08-25 12:03:54 +02:00
return -1;
}
2018-05-13 20:47:05 +02:00
2018-08-25 12:03:54 +02:00
return -1;
}
int vc4_bo_set_tiling(int fd, uint32_t bo, uint64_t mod)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
struct drm_vc4_set_tiling set_tiling = {
2018-05-13 20:29:47 +02:00
.handle = bo,
.modifier = mod,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_SET_TILING,
2018-05-13 20:29:47 +02:00
&set_tiling);
if (ret != 0)
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't set tiling: %s\n",
strerror(errno));
return 0;
}
return 1;
}
void* vc4_bo_map_unsynchronized(int fd, uint32_t bo, uint32_t offset, uint32_t size)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
assert(size);
2018-05-13 20:29:47 +02:00
int ret;
2018-05-13 20:29:47 +02:00
//if (bo->map)
// return bo->map;
struct drm_vc4_mmap_bo map;
memset(&map, 0, sizeof(map));
map.handle = bo;
ret = drmIoctl(fd, DRM_IOCTL_VC4_MMAP_BO, &map);
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't map unsync: %s\n", strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
void* mapPtr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, map.offset + offset);
2018-05-13 20:29:47 +02:00
if (mapPtr == MAP_FAILED) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "mmap of bo %d (offset 0x%016llx, size %d) failed\n",
bo, (long long)map.offset + offset, size);
2018-05-13 20:29:47 +02:00
return 0;
}
//VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, false));
2018-05-13 20:29:47 +02:00
return mapPtr;
}
void vc4_bo_unmap_unsynchronized(int fd, void* ptr, uint32_t size)
{
assert(fd);
assert(ptr);
assert(size);
munmap(ptr, size);
}
2018-08-25 12:03:54 +02:00
int vc4_bo_wait(int fd, uint32_t bo, uint64_t timeout_ns)
{
2018-05-13 20:47:05 +02:00
assert(fd);
2018-08-25 12:03:54 +02:00
assert(bo);
2018-05-13 20:47:05 +02:00
2018-05-13 20:29:47 +02:00
struct drm_vc4_wait_bo wait = {
2018-08-25 12:03:54 +02:00
.handle = bo,
.timeout_ns = timeout_ns,
2018-05-13 20:29:47 +02:00
};
2018-08-25 12:03:54 +02:00
printf("Wait for BO: %u\n", bo);
2018-05-13 20:47:05 +02:00
2018-08-25 12:03:54 +02:00
int ret = drmIoctl(fd, DRM_IOCTL_VC4_WAIT_BO, &wait);
2018-05-13 20:29:47 +02:00
if (ret) {
if (ret != -ETIME) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "BO wait failed: %s\n",
strerror(errno));
2018-05-13 18:20:52 +02:00
}
2018-05-13 20:29:47 +02:00
return 0;
}
return 1;
2018-05-13 18:20:52 +02:00
}
2018-10-17 21:56:13 +02:00
int vc4_seqno_wait(int fd, uint64_t* lastFinishedSeqno, uint64_t seqno, uint64_t* timeout_ns)
{
2018-05-13 20:47:05 +02:00
assert(fd);
2018-08-25 12:03:54 +02:00
assert(lastFinishedSeqno);
2018-10-17 21:56:13 +02:00
assert(timeout_ns);
2018-05-13 20:47:05 +02:00
if(!seqno)
return 1;
2018-08-25 12:03:54 +02:00
if (*lastFinishedSeqno >= seqno)
return 1;
2018-05-13 20:29:47 +02:00
struct drm_vc4_wait_seqno wait = {
.seqno = seqno,
2018-10-17 21:56:13 +02:00
.timeout_ns = *timeout_ns,
2018-05-13 20:29:47 +02:00
};
2018-05-13 20:47:05 +02:00
2018-08-25 12:03:54 +02:00
printf("Wait for seqno: %llu\n", seqno);
2018-05-13 18:20:52 +02:00
2018-08-25 12:03:54 +02:00
int ret = drmIoctl(fd, DRM_IOCTL_VC4_WAIT_SEQNO, &wait);
2018-05-13 20:29:47 +02:00
if (ret) {
if (ret != -ETIME) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Seqno wait failed: %s\n",
strerror(errno));
2018-05-13 18:20:52 +02:00
}
2018-10-17 21:56:13 +02:00
else
{
//Timeout happened
*timeout_ns = -1;
return -1;
}
2018-05-13 18:20:52 +02:00
2018-05-13 20:29:47 +02:00
return 0;
}
2018-10-17 21:56:13 +02:00
*timeout_ns = wait.timeout_ns;
2018-05-13 20:29:47 +02:00
*lastFinishedSeqno = seqno;
return 1;
}
int vc4_bo_flink(int fd, uint32_t bo, uint32_t *name)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
assert(name);
2018-05-13 20:29:47 +02:00
struct drm_gem_flink flink = {
.handle = bo,
};
int ret = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
if (ret) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Failed to flink bo %d: %s\n",
2018-05-13 20:29:47 +02:00
bo, strerror(errno));
//free(bo);
return 0;
}
2018-05-13 20:29:47 +02:00
//bo->private = false;
*name = flink.name;
2018-05-13 20:29:47 +02:00
return 1;
}
2019-09-08 00:30:52 +02:00
uint32_t getBOAlignedSize(uint32_t size, uint32_t alignment)
{
2019-09-08 00:30:52 +02:00
return align(size, alignment);
}
uint32_t vc4_bo_alloc_shader(int fd, const void *data, uint32_t* size)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(data);
assert(size);
2018-05-13 20:29:47 +02:00
int ret;
2019-09-08 00:30:52 +02:00
//kernel only requires alignmnet to sizeof(uint64_t), not an entire page
uint32_t alignedSize = getBOAlignedSize(*size, sizeof(uint64_t));
2018-05-13 20:29:47 +02:00
struct drm_vc4_create_shader_bo create = {
2018-05-13 20:47:05 +02:00
.size = alignedSize,
.data = (uintptr_t)data,
2018-05-13 20:29:47 +02:00
};
2018-05-13 20:29:47 +02:00
ret = drmIoctl(fd, DRM_IOCTL_VC4_CREATE_SHADER_BO,
&create);
2018-05-13 20:29:47 +02:00
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't create shader: %s\n",
strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 20:29:47 +02:00
*size = alignedSize;
2018-05-13 20:29:47 +02:00
return create.handle;
}
uint32_t vc4_bo_open_name(int fd, uint32_t name)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(name);
2018-05-13 20:29:47 +02:00
struct drm_gem_open o = {
.name = name
};
int ret = drmIoctl(fd, DRM_IOCTL_GEM_OPEN, &o);
if (ret) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Failed to open bo %d: %s\n",
2018-05-13 20:29:47 +02:00
name, strerror(errno));
return 0;
}
2018-05-13 20:29:47 +02:00
return o.handle;
}
uint32_t vc4_bo_alloc(int fd, uint32_t size, const char *name)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(size);
2018-05-13 20:29:47 +02:00
struct drm_vc4_create_bo create;
int ret;
2018-05-13 20:29:47 +02:00
/*bo = vc4_bo_from_cache(screen, size, name);
if (bo) {
if (dump_stats) {
fprintf(stderr, "Allocated %s %dkb from cache:\n",
name, size / 1024);
vc4_bo_dump_stats(screen);
}
return bo;
}*/
2018-05-13 20:29:47 +02:00
memset(&create, 0, sizeof(create));
create.size = size;
2018-05-13 20:29:47 +02:00
ret = drmIoctl(fd, DRM_IOCTL_VC4_CREATE_BO, &create);
uint32_t handle = create.handle;
2018-05-13 20:29:47 +02:00
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't alloc BO: %s\n",
strerror(errno));
2018-05-13 20:29:47 +02:00
/*if (!list_empty(&screen->bo_cache.time_list) &&
!cleared_and_retried) {
cleared_and_retried = true;
vc4_bo_cache_free_all(&screen->bo_cache);
goto retry;
}
free(bo);*/
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 20:47:05 +02:00
vc4_bo_label(fd, handle, name);
2018-05-13 20:29:47 +02:00
return handle;
}
void vc4_bo_free(int fd, uint32_t bo, void* mappedAddr, uint32_t size)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
assert(size);
2018-05-13 20:29:47 +02:00
if (mappedAddr) {
vc4_bo_unmap_unsynchronized(fd, mappedAddr, size);
2018-05-13 20:29:47 +02:00
//VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
}
2018-05-13 20:29:47 +02:00
struct drm_gem_close c;
memset(&c, 0, sizeof(c));
c.handle = bo;
int ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &c);
if (ret != 0)
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "couldn't close object %d: %s\n", bo, strerror(errno));
2018-05-13 20:29:47 +02:00
}
}
int vc4_bo_unpurgeable(int fd, uint32_t bo, int hasMadvise)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
2018-05-13 20:29:47 +02:00
struct drm_vc4_gem_madvise arg = {
.handle = bo,
.madv = VC4_MADV_WILLNEED,
2018-05-13 20:29:47 +02:00
};
if (!hasMadvise)
return 1;
if (drmIoctl(fd, DRM_IOCTL_VC4_GEM_MADVISE, &arg))
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Unpurgable BO madvise failed: %s\n",
strerror(errno));
return 0;
}
return arg.retained;
}
void vc4_bo_purgeable(int fd, uint32_t bo, int hasMadvise)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
2018-05-13 20:29:47 +02:00
struct drm_vc4_gem_madvise arg = {
.handle = bo,
.madv = VC4_MADV_DONTNEED,
2018-05-13 20:29:47 +02:00
};
if (hasMadvise)
{
int ret = drmIoctl(fd, DRM_IOCTL_VC4_GEM_MADVISE, &arg);
if(ret)
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Purgable BO madvise failed: %s\n",
strerror(errno));
}
}
}
void vc4_bo_label(int fd, uint32_t bo, const char* name)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
char* str = name;
if(!str) str = "";
//TODO don't use in release!
struct drm_vc4_label_bo label = {
2018-05-13 20:29:47 +02:00
.handle = bo,
2018-05-13 20:47:05 +02:00
.len = strlen(str),
.name = (uintptr_t)str,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_LABEL_BO, &label);
if(ret)
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "BO label failed: %s\n",
strerror(errno));
}
}
2018-05-13 18:20:52 +02:00
int vc4_bo_get_dmabuf(int fd, uint32_t bo)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
2018-05-13 20:29:47 +02:00
int boFd;
int ret = drmPrimeHandleToFD(fd, bo,
O_CLOEXEC, &boFd);
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Failed to export gem bo %d to dmabuf: %s\n",
bo, strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 18:20:52 +02:00
2018-05-13 20:29:47 +02:00
return boFd;
2018-05-13 18:20:52 +02:00
}
void* vc4_bo_map(int fd, uint32_t bo, uint32_t offset, uint32_t size)
2018-05-13 18:20:52 +02:00
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
assert(size);
void* map = vc4_bo_map_unsynchronized(fd, bo, offset, size);
2018-05-13 18:20:52 +02:00
2018-05-13 20:29:47 +02:00
//wait infinitely
int ok = vc4_bo_wait(fd, bo, WAIT_TIMEOUT_INFINITE);
if (!ok) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "BO wait for map failed: %s\n", strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 18:20:52 +02:00
2018-05-13 20:29:47 +02:00
return map;
2018-05-13 18:20:52 +02:00
}
2018-05-13 20:28:29 +02:00
2018-05-13 20:47:05 +02:00
void vc4_cl_submit(int fd, struct drm_vc4_submit_cl* submit, uint64_t* lastEmittedSeqno, uint64_t* lastFinishedSeqno)
2018-05-13 20:28:29 +02:00
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(submit);
assert(lastEmittedSeqno);
assert(lastFinishedSeqno);
int ret = drmIoctl(fd, DRM_IOCTL_VC4_SUBMIT_CL, submit);
2018-05-13 20:29:47 +02:00
static int warned = 0;
if (ret && !warned) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Draw call returned %s. "
2018-05-13 20:29:47 +02:00
"Expect corruption.\n", strerror(errno));
warned = 1;
} else if (!ret) {
2018-05-13 20:47:05 +02:00
*lastEmittedSeqno = submit->seqno;
2018-05-13 20:29:47 +02:00
}
2018-05-13 20:28:29 +02:00
2018-05-13 20:29:47 +02:00
if (*lastEmittedSeqno - *lastFinishedSeqno > 5) {
2018-10-17 21:56:13 +02:00
uint64_t timeout = WAIT_TIMEOUT_INFINITE;
2018-05-13 20:29:47 +02:00
if (!vc4_seqno_wait(fd,
2018-05-13 20:47:05 +02:00
lastFinishedSeqno,
2018-08-25 12:03:54 +02:00
*lastFinishedSeqno > 0 ? *lastEmittedSeqno - 5 : *lastEmittedSeqno,
2018-10-17 21:56:13 +02:00
&timeout))
2018-05-13 20:29:47 +02:00
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Job throttling failed\n");
2018-05-13 20:28:29 +02:00
}
2018-05-13 20:29:47 +02:00
}
2018-05-13 20:28:29 +02:00
}