1
0
mirror of https://github.com/Yours3lf/rpi-vk-driver.git synced 2024-12-04 16:24:15 +01:00
rpi-vk-driver/driver/kernelInterface.c

722 lines
15 KiB
C
Raw Normal View History

#define _GNU_SOURCE
#include "kernelInterface.h"
#include <stdatomic.h>
atomic_int refCounter = 0;
int controlFd = 0;
//int renderFd = 0;
int openIoctl()
{
if(!controlFd)
{
controlFd = open(DRM_IOCTL_CTRL_DEV_FILE_NAME, O_RDWR | O_CLOEXEC);
if (controlFd < 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Can't open device file: %s \nError: %s\n", DRM_IOCTL_CTRL_DEV_FILE_NAME, strerror(errno));
return -1;
}
}
/*if(!renderFd)
{
renderFd = open(DRM_IOCTL_RENDER_DEV_FILE_NAME, O_RDWR | O_CLOEXEC);
if (renderFd < 0) {
printf("Can't open device file: %s \nError: %s\n", DRM_IOCTL_RENDER_DEV_FILE_NAME, strerror(errno));
return -1;
}
}*/
++refCounter;
return 0;
}
void closeIoctl(int fd)
{
if (--refCounter == 0)
{
close(fd);
}
}
2018-05-13 18:20:52 +02:00
static uint32_t align(uint32_t num, uint32_t alignment)
{
uint32_t mod = num%alignment;
if(!mod)
{
return num;
}
else
{
return num + alignment - mod;
}
}
int vc4_get_chip_info(int fd,
uint32_t* technologyVersion,
uint32_t* IDstrUINT,
uint32_t* vpmMemorySize,
uint32_t* hdrSupported,
uint32_t* numSemaphores,
uint32_t* numTMUperSlice,
uint32_t* numQPUperSlice,
uint32_t* numSlices,
uint32_t* v3dRevision,
uint32_t* tileBufferDoubleBufferModeSupported,
uint32_t* tileBufferSize,
uint32_t* vriMemorySize)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(technologyVersion);
assert(IDstrUINT);
assert(vpmMemorySize);
assert(hdrSupported);
assert(numSemaphores);
assert(numTMUperSlice);
assert(numQPUperSlice);
assert(numSlices);
assert(v3dRevision);
assert(tileBufferDoubleBufferModeSupported);
assert(tileBufferSize);
assert(vriMemorySize);
2018-05-13 20:47:05 +02:00
2018-05-13 20:29:47 +02:00
struct drm_vc4_get_param ident0 = {
.param = DRM_VC4_PARAM_V3D_IDENT0,
};
struct drm_vc4_get_param ident1 = {
.param = DRM_VC4_PARAM_V3D_IDENT1,
};
struct drm_vc4_get_param ident2 = {
.param = DRM_VC4_PARAM_V3D_IDENT2,
};
2018-05-13 20:29:47 +02:00
int ret;
ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_PARAM, &ident0);
if (ret != 0) {
if (errno == EINVAL) {
/* Backwards compatibility with 2835 kernels which
* only do V3D 2.1.
*/
return 0; //21
2018-05-13 20:29:47 +02:00
} else {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't get V3D IDENT0: %s\n",
2018-05-13 20:29:47 +02:00
strerror(errno));
return 0;
}
2018-05-13 20:29:47 +02:00
}
ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_PARAM, &ident1);
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't get V3D IDENT1: %s\n",
2018-05-13 20:29:47 +02:00
strerror(errno));
return 0;
}
ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_PARAM, &ident2);
if (ret != 0) {
fprintf(stderr, "Couldn't get V3D IDENT2: %s\n",
strerror(errno));
return 0;
}
*technologyVersion = (ident0.value >> 24) & 0xff;
*IDstrUINT = (ident0.value >> 0) & 0x00ffffff;
*vpmMemorySize = ((ident1.value >> 28) & 0xf) * 1024; //multiples of 1K
*hdrSupported = (ident1.value >> 24) & 0xf;
*numSemaphores = (ident1.value >> 16) & 0xff;
*numTMUperSlice = (ident1.value >> 12) & 0xf;
*numQPUperSlice = (ident1.value >> 8) & 0xf;
*numSlices = (ident1.value >> 4) & 0xf;
*v3dRevision = (ident1.value >> 0) & 0xf;
*tileBufferDoubleBufferModeSupported = (ident2.value >> 8) & 0xf;
*tileBufferSize = (ident2.value >> 4) & 0xf;
*vriMemorySize = (ident2.value >> 0) & 0xf;
uint32_t v3d_ver = (*technologyVersion) * 10 + (*v3dRevision);
if(v3d_ver != 21 && v3d_ver != 26)
{
printf("v3d_ver unsupported: %u\n", v3d_ver);
2018-05-13 20:29:47 +02:00
return 0;
}
return 1;
}
int vc4_has_feature(int fd, uint32_t feature)
{
2018-05-13 20:47:05 +02:00
assert(fd);
2018-05-13 20:29:47 +02:00
struct drm_vc4_get_param p = {
.param = feature,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_PARAM, &p);
2018-05-13 20:29:47 +02:00
if (ret != 0)
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't determine if VC4 has feature: %s\n", strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 20:29:47 +02:00
return p.value;
}
int vc4_test_tiling(int fd)
{
2018-05-13 20:47:05 +02:00
assert(fd);
/* Test if the kernel has GET_TILING; it will return -EINVAL if the
* ioctl does not exist, but -ENOENT if we pass an impossible handle.
* 0 cannot be a valid GEM object, so use that.
*/
struct drm_vc4_get_tiling get_tiling = {
2018-05-13 20:29:47 +02:00
.handle = 0x0,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
if (ret == -1 && errno == ENOENT)
{
return 1;
}
return 0;
}
2019-09-08 00:30:52 +02:00
//TODO what is this supposed to do?
//ask the kernel what is the buffer's tiling?
uint64_t vc4_bo_get_tiling(int fd, uint32_t bo, uint64_t mod)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
struct drm_vc4_get_tiling get_tiling = {
2018-05-13 20:29:47 +02:00
.handle = bo,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
if (ret != 0) {
2018-08-25 12:03:54 +02:00
return DRM_FORMAT_MOD_LINEAR; //0
} else if (mod == DRM_FORMAT_MOD_INVALID) {
2018-05-13 20:29:47 +02:00
return get_tiling.modifier;
} else if (mod != get_tiling.modifier) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Modifier 0x%llx vs. tiling (0x%llx) mismatch\n",
2018-05-13 20:29:47 +02:00
(long long)mod, get_tiling.modifier);
2018-08-25 12:03:54 +02:00
return -1;
}
2018-05-13 20:47:05 +02:00
2018-08-25 12:03:54 +02:00
return -1;
}
int vc4_bo_set_tiling(int fd, uint32_t bo, uint64_t mod)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
struct drm_vc4_set_tiling set_tiling = {
2018-05-13 20:29:47 +02:00
.handle = bo,
.modifier = mod,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_SET_TILING,
2018-05-13 20:29:47 +02:00
&set_tiling);
if (ret != 0)
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't set tiling: %s\n",
strerror(errno));
return 0;
}
return 1;
}
uint32_t vc4_set_madvise(int fd, uint32_t bo, uint32_t needed, int hasMadvise)
{
assert(fd);
assert(bo);
//VC4_MADV_WILLNEED 0
//VC4_MADV_DONTNEED 1
struct drm_vc4_gem_madvise arg = {
.handle = bo,
.madv = !needed,
};
if (!hasMadvise)
return 1;
if (drmIoctl(fd, DRM_IOCTL_VC4_GEM_MADVISE, &arg))
{
fprintf(stderr, "BO madvise failed: %s\n",
strerror(errno));
return 0;
}
return arg.retained;
}
void* vc4_bo_map_unsynchronized(int fd, uint32_t bo, uint32_t offset, uint32_t size)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
assert(size);
2018-05-13 20:29:47 +02:00
int ret;
2018-05-13 20:29:47 +02:00
//if (bo->map)
// return bo->map;
struct drm_vc4_mmap_bo map;
memset(&map, 0, sizeof(map));
map.handle = bo;
ret = drmIoctl(fd, DRM_IOCTL_VC4_MMAP_BO, &map);
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't map unsync: %s\n", strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
void* mapPtr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, map.offset + offset);
2018-05-13 20:29:47 +02:00
if (mapPtr == MAP_FAILED) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "mmap of bo %d (offset 0x%016llx, size %d) failed\n",
bo, (long long)map.offset + offset, size);
2018-05-13 20:29:47 +02:00
return 0;
}
//VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, false));
2018-05-13 20:29:47 +02:00
return mapPtr;
}
void vc4_bo_unmap_unsynchronized(int fd, void* ptr, uint32_t size)
{
assert(fd);
assert(ptr);
assert(size);
munmap(ptr, size);
}
2018-08-25 12:03:54 +02:00
int vc4_bo_wait(int fd, uint32_t bo, uint64_t timeout_ns)
{
2018-05-13 20:47:05 +02:00
assert(fd);
2018-08-25 12:03:54 +02:00
assert(bo);
2018-05-13 20:47:05 +02:00
2018-05-13 20:29:47 +02:00
struct drm_vc4_wait_bo wait = {
2018-08-25 12:03:54 +02:00
.handle = bo,
.timeout_ns = timeout_ns,
2018-05-13 20:29:47 +02:00
};
2018-08-25 12:03:54 +02:00
printf("Wait for BO: %u\n", bo);
2018-05-13 20:47:05 +02:00
2018-08-25 12:03:54 +02:00
int ret = drmIoctl(fd, DRM_IOCTL_VC4_WAIT_BO, &wait);
2018-05-13 20:29:47 +02:00
if (ret) {
if (ret != -ETIME) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "BO wait failed: %s\n",
strerror(errno));
2018-05-13 18:20:52 +02:00
}
2018-05-13 20:29:47 +02:00
return 0;
}
return 1;
2018-05-13 18:20:52 +02:00
}
2018-10-17 21:56:13 +02:00
int vc4_seqno_wait(int fd, uint64_t* lastFinishedSeqno, uint64_t seqno, uint64_t* timeout_ns)
{
2018-05-13 20:47:05 +02:00
assert(fd);
2018-08-25 12:03:54 +02:00
assert(lastFinishedSeqno);
2018-10-17 21:56:13 +02:00
assert(timeout_ns);
2018-05-13 20:47:05 +02:00
if(!seqno)
return 1;
2018-08-25 12:03:54 +02:00
if (*lastFinishedSeqno >= seqno)
return 1;
2018-05-13 20:29:47 +02:00
struct drm_vc4_wait_seqno wait = {
.seqno = seqno,
2018-10-17 21:56:13 +02:00
.timeout_ns = *timeout_ns,
2018-05-13 20:29:47 +02:00
};
2018-05-13 20:47:05 +02:00
2018-08-25 12:03:54 +02:00
printf("Wait for seqno: %llu\n", seqno);
2018-05-13 18:20:52 +02:00
2018-08-25 12:03:54 +02:00
int ret = drmIoctl(fd, DRM_IOCTL_VC4_WAIT_SEQNO, &wait);
2018-05-13 20:29:47 +02:00
if (ret) {
if (ret != -ETIME) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Seqno wait failed: %s\n",
strerror(errno));
2020-02-24 22:45:47 +01:00
vc4_print_hang_state(controlFd);
2018-05-13 18:20:52 +02:00
}
2018-10-17 21:56:13 +02:00
else
{
//Timeout happened
2020-02-24 22:45:47 +01:00
vc4_print_hang_state(controlFd);
2018-10-17 21:56:13 +02:00
*timeout_ns = -1;
return -1;
}
2018-05-13 18:20:52 +02:00
2018-05-13 20:29:47 +02:00
return 0;
}
2018-10-17 21:56:13 +02:00
*timeout_ns = wait.timeout_ns;
2018-05-13 20:29:47 +02:00
*lastFinishedSeqno = seqno;
return 1;
}
int vc4_bo_flink(int fd, uint32_t bo, uint32_t *name)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
assert(name);
2018-05-13 20:29:47 +02:00
struct drm_gem_flink flink = {
.handle = bo,
};
int ret = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
if (ret) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Failed to flink bo %d: %s\n",
2018-05-13 20:29:47 +02:00
bo, strerror(errno));
//free(bo);
return 0;
}
2018-05-13 20:29:47 +02:00
//bo->private = false;
*name = flink.name;
2018-05-13 20:29:47 +02:00
return 1;
}
2019-09-08 00:30:52 +02:00
uint32_t getBOAlignedSize(uint32_t size, uint32_t alignment)
{
2019-09-08 00:30:52 +02:00
return align(size, alignment);
}
uint32_t vc4_bo_alloc_shader(int fd, const void *data, uint32_t* size)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(data);
assert(size);
2018-05-13 20:29:47 +02:00
int ret;
2019-09-08 00:30:52 +02:00
//kernel only requires alignmnet to sizeof(uint64_t), not an entire page
uint32_t alignedSize = getBOAlignedSize(*size, sizeof(uint64_t));
2018-05-13 20:29:47 +02:00
struct drm_vc4_create_shader_bo create = {
2018-05-13 20:47:05 +02:00
.size = alignedSize,
.data = (uintptr_t)data,
2018-05-13 20:29:47 +02:00
};
2018-05-13 20:29:47 +02:00
ret = drmIoctl(fd, DRM_IOCTL_VC4_CREATE_SHADER_BO,
&create);
2018-05-13 20:29:47 +02:00
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't create shader: %s\n",
strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 20:29:47 +02:00
*size = alignedSize;
2018-05-13 20:29:47 +02:00
return create.handle;
}
uint32_t vc4_bo_open_name(int fd, uint32_t name)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(name);
2018-05-13 20:29:47 +02:00
struct drm_gem_open o = {
.name = name
};
int ret = drmIoctl(fd, DRM_IOCTL_GEM_OPEN, &o);
if (ret) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Failed to open bo %d: %s\n",
2018-05-13 20:29:47 +02:00
name, strerror(errno));
return 0;
}
2018-05-13 20:29:47 +02:00
return o.handle;
}
uint32_t vc4_bo_alloc(int fd, uint32_t size, const char *name)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(size);
2018-05-13 20:29:47 +02:00
struct drm_vc4_create_bo create;
int ret;
2018-05-13 20:29:47 +02:00
/*bo = vc4_bo_from_cache(screen, size, name);
if (bo) {
if (dump_stats) {
fprintf(stderr, "Allocated %s %dkb from cache:\n",
name, size / 1024);
vc4_bo_dump_stats(screen);
}
return bo;
}*/
2018-05-13 20:29:47 +02:00
memset(&create, 0, sizeof(create));
create.size = size;
2018-05-13 20:29:47 +02:00
ret = drmIoctl(fd, DRM_IOCTL_VC4_CREATE_BO, &create);
uint32_t handle = create.handle;
2018-05-13 20:29:47 +02:00
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Couldn't alloc BO: %s\n",
strerror(errno));
2018-05-13 20:29:47 +02:00
/*if (!list_empty(&screen->bo_cache.time_list) &&
!cleared_and_retried) {
cleared_and_retried = true;
vc4_bo_cache_free_all(&screen->bo_cache);
goto retry;
}
free(bo);*/
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 20:47:05 +02:00
vc4_bo_label(fd, handle, name);
2018-05-13 20:29:47 +02:00
return handle;
}
void vc4_bo_free(int fd, uint32_t bo, void* mappedAddr, uint32_t size)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
assert(size);
2018-05-13 20:29:47 +02:00
if (mappedAddr) {
vc4_bo_unmap_unsynchronized(fd, mappedAddr, size);
2018-05-13 20:29:47 +02:00
//VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
}
2018-05-13 20:29:47 +02:00
struct drm_gem_close c;
memset(&c, 0, sizeof(c));
c.handle = bo;
int ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &c);
if (ret != 0)
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "couldn't close object %d: %s\n", bo, strerror(errno));
2018-05-13 20:29:47 +02:00
}
}
void vc4_bo_label(int fd, uint32_t bo, const char* name)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
char* str = name;
if(!str) str = "";
//TODO don't use in release!
struct drm_vc4_label_bo label = {
2018-05-13 20:29:47 +02:00
.handle = bo,
2018-05-13 20:47:05 +02:00
.len = strlen(str),
.name = (uintptr_t)str,
};
int ret = drmIoctl(fd, DRM_IOCTL_VC4_LABEL_BO, &label);
if(ret)
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "BO label failed: %s\n",
strerror(errno));
}
}
2018-05-13 18:20:52 +02:00
int vc4_bo_get_dmabuf(int fd, uint32_t bo)
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
2018-05-13 20:29:47 +02:00
int boFd;
int ret = drmPrimeHandleToFD(fd, bo,
O_CLOEXEC, &boFd);
if (ret != 0) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Failed to export gem bo %d to dmabuf: %s\n",
bo, strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 18:20:52 +02:00
2018-05-13 20:29:47 +02:00
return boFd;
2018-05-13 18:20:52 +02:00
}
void* vc4_bo_map(int fd, uint32_t bo, uint32_t offset, uint32_t size)
2018-05-13 18:20:52 +02:00
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(bo);
assert(size);
void* map = vc4_bo_map_unsynchronized(fd, bo, offset, size);
2018-05-13 18:20:52 +02:00
2018-05-13 20:29:47 +02:00
//wait infinitely
int ok = vc4_bo_wait(fd, bo, WAIT_TIMEOUT_INFINITE);
if (!ok) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "BO wait for map failed: %s\n", strerror(errno));
2018-05-13 20:29:47 +02:00
return 0;
}
2018-05-13 18:20:52 +02:00
2018-05-13 20:29:47 +02:00
return map;
2018-05-13 18:20:52 +02:00
}
2018-05-13 20:28:29 +02:00
2018-05-13 20:47:05 +02:00
void vc4_cl_submit(int fd, struct drm_vc4_submit_cl* submit, uint64_t* lastEmittedSeqno, uint64_t* lastFinishedSeqno)
2018-05-13 20:28:29 +02:00
{
2018-05-13 20:47:05 +02:00
assert(fd);
assert(submit);
assert(lastEmittedSeqno);
assert(lastFinishedSeqno);
int ret = drmIoctl(fd, DRM_IOCTL_VC4_SUBMIT_CL, submit);
2018-05-13 20:29:47 +02:00
static int warned = 0;
if (ret && !warned) {
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Draw call returned %s. "
2018-05-13 20:29:47 +02:00
"Expect corruption.\n", strerror(errno));
warned = 1;
} else if (!ret) {
2018-05-13 20:47:05 +02:00
*lastEmittedSeqno = submit->seqno;
2018-05-13 20:29:47 +02:00
}
2018-05-13 20:28:29 +02:00
2018-05-13 20:29:47 +02:00
if (*lastEmittedSeqno - *lastFinishedSeqno > 5) {
2018-10-17 21:56:13 +02:00
uint64_t timeout = WAIT_TIMEOUT_INFINITE;
2020-02-24 22:45:47 +01:00
//uint64_t timeout = 1000ull * 1000ull * 1000ull; //TODO waits too long...
2018-05-13 20:29:47 +02:00
if (!vc4_seqno_wait(fd,
2018-05-13 20:47:05 +02:00
lastFinishedSeqno,
2018-08-25 12:03:54 +02:00
*lastFinishedSeqno > 0 ? *lastEmittedSeqno - 5 : *lastEmittedSeqno,
2018-10-17 21:56:13 +02:00
&timeout))
2018-05-13 20:29:47 +02:00
{
2019-09-07 18:41:46 +02:00
fprintf(stderr, "Job throttling failed\n");
2018-05-13 20:28:29 +02:00
}
2018-05-13 20:29:47 +02:00
}
2018-05-13 20:28:29 +02:00
}
uint32_t vc4_create_perfmon(int fd, uint32_t* counters, uint32_t num_counters)
{
assert(fd);
assert(counters);
assert(num_counters > 0);
assert(num_counters <= DRM_VC4_MAX_PERF_COUNTERS);
struct drm_vc4_perfmon_create arg =
{
.id = 0,
2020-02-23 14:29:01 +01:00
.ncounters = num_counters,
};
for(uint32_t c = 0; c < num_counters; ++c)
{
arg.events[c] = counters[c];
}
if (drmIoctl(fd, DRM_IOCTL_VC4_PERFMON_CREATE, &arg))
{
fprintf(stderr, "Perfmon create failed: %s\n",
strerror(errno));
return 0;
}
if(!arg.id)
{
fprintf(stderr, "Perfmon create failed (invalid ID): %s\n",
strerror(errno));
return 0;
}
return arg.id;
}
void vc4_destroy_perfmon(int fd, uint32_t id)
{
assert(fd);
assert(id);
struct drm_vc4_perfmon_destroy arg =
{
.id = id
};
if (drmIoctl(fd, DRM_IOCTL_VC4_PERFMON_DESTROY, &arg))
{
fprintf(stderr, "Perfmon destroy failed: %s\n",
strerror(errno));
}
}
/*
* Returns the values of the performance counters tracked by this
* perfmon (as an array of ncounters * u64 values).
*
* No implicit synchronization is performed, so the user has to
* guarantee that any jobs using this perfmon have already been
* completed (probably by blocking on the seqno returned by the
* last exec that used the perfmon).
*/
void vc4_perfmon_get_values(int fd, uint32_t id, void* ptr)
{
assert(fd);
assert(id);
assert(ptr);
struct drm_vc4_perfmon_get_values arg =
{
.id = id,
.values_ptr = ptr
};
if (drmIoctl(fd, DRM_IOCTL_VC4_PERFMON_GET_VALUES, &arg))
{
fprintf(stderr, "Perfmon get values failed: %s\n",
strerror(errno));
}
}
void vc4_print_hang_state(int fd)
{
assert(fd);
struct drm_vc4_get_hang_state_bo bo_states[128];
struct drm_vc4_get_hang_state arg =
{
/** Pointer to array of struct drm_vc4_get_hang_state_bo. */
.bo = bo_states,
/**
* On input, the size of the bo array. Output is the number
* of bos to be returned.
*/
.bo_count = 128
};
if (drmIoctl(fd, DRM_IOCTL_VC4_GET_HANG_STATE, &arg))
{
2020-02-24 22:45:47 +01:00
fprintf(stderr, "vc4 get hang state failed: %s\n",
strerror(errno));
}
else
{
fprintf(stderr, "--------------\n");
fprintf(stderr, "--------------\n");
fprintf(stderr, "GPU hang state\n");
for(uint32_t c = 0; c < arg.bo_count; ++c)
{
struct drm_vc4_get_hang_state_bo* bos = arg.bo;
fprintf(stderr, "BO: %u, Addr: %u, Size: %u\n", bos[c].handle, bos[c].paddr, bos[c].size);
}
fprintf(stderr, "Start bin: %u, Start render: %u\n", arg.start_bin, arg.start_render);
fprintf(stderr, "ct0ca: %u, ct0ea: %u\n", arg.ct0ca, arg.ct0ea);
fprintf(stderr, "ct1ca: %u, ct1ea: %u\n", arg.ct1ca, arg.ct1ea);
fprintf(stderr, "ct0cs: %u, ct1cs: %u\n", arg.ct0cs, arg.ct1cs);
fprintf(stderr, "ct0ra0: %u, ct1ra0: %u\n", arg.ct0ra0, arg.ct1ra0);
fprintf(stderr, "bpca: %u, bpcs: %u\n", arg.bpca, arg.bpcs);
fprintf(stderr, "bpoa: %u, bpos: %u\n", arg.bpoa, arg.bpos);
fprintf(stderr, "vpmbase: %u: %u\n", arg.vpmbase);
fprintf(stderr, "dbge: %u: %u\n", arg.dbge);
fprintf(stderr, "fdbgo: %u: %u\n", arg.fdbgo);
fprintf(stderr, "fdbgb: %u: %u\n", arg.fdbgb);
fprintf(stderr, "fdbgr: %u: %u\n", arg.fdbgr);
fprintf(stderr, "fdbgs: %u: %u\n", arg.fdbgs);
fprintf(stderr, "errstat: %u: %u\n", arg.errstat);
}
}