This commit is contained in:
Bernhard Stoeckner 2024-12-05 16:37:35 +01:00
parent d5a0858f90
commit 9d0b0414a5
No known key found for this signature in database
GPG Key ID: 7D23DC2750FAC2E1
39 changed files with 457 additions and 216 deletions

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 565.57.01.
version 565.77.
## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
565.57.01 driver release. This can be achieved by installing
565.77 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@ -185,7 +185,7 @@ table below).
For details on feature support and limitations, see the NVIDIA GPU driver
end user README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/565.57.01/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/565.77/README/kernel_open.html
For vGPU support, please refer to the README.vgpu packaged in the vGPU Host
Package for more details.
@ -754,6 +754,8 @@ Subsystem Device ID.
| NVIDIA H800 | 2324 10DE 17A8 |
| NVIDIA H20 | 2329 10DE 198B |
| NVIDIA H20 | 2329 10DE 198C |
| NVIDIA H20-3e | 232C 10DE 2063 |
| NVIDIA H20-3e | 232C 10DE 2064 |
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C0 |
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 |
| NVIDIA H100 PCIe | 2331 10DE 1626 |
@ -836,10 +838,12 @@ Subsystem Device ID.
| NVIDIA GeForce RTX 2050 | 25AD |
| NVIDIA RTX A1000 | 25B0 1028 1878 |
| NVIDIA RTX A1000 | 25B0 103C 1878 |
| NVIDIA RTX A1000 | 25B0 103C 8D96 |
| NVIDIA RTX A1000 | 25B0 10DE 1878 |
| NVIDIA RTX A1000 | 25B0 17AA 1878 |
| NVIDIA RTX A400 | 25B2 1028 1879 |
| NVIDIA RTX A400 | 25B2 103C 1879 |
| NVIDIA RTX A400 | 25B2 103C 8D95 |
| NVIDIA RTX A400 | 25B2 10DE 1879 |
| NVIDIA RTX A400 | 25B2 17AA 1879 |
| NVIDIA A16 | 25B6 10DE 14A9 |

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"565.57.01\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"565.77\"
ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)

View File

@ -52,6 +52,22 @@ else
endif
endif
# If CC hasn't been set explicitly, check the value of CONFIG_CC_VERSION_TEXT.
# Look for the compiler specified there, and use it by default, if found.
ifeq ($(origin CC),default)
cc_version_text=$(firstword $(shell . $(KERNEL_OUTPUT)/.config; \
echo "$$CONFIG_CC_VERSION_TEXT"))
ifneq ($(cc_version_text),)
ifeq ($(shell command -v $(cc_version_text)),)
$(warning WARNING: Unable to locate the compiler $(cc_version_text) \
from CONFIG_CC_VERSION_TEXT in the kernel configuration.)
else
CC=$(cc_version_text)
endif
endif
endif
CC ?= cc
LD ?= ld
OBJDUMP ?= objdump
@ -65,6 +81,16 @@ else
)
endif
KERNEL_ARCH = $(ARCH)
ifneq ($(filter $(ARCH),i386 x86_64),)
KERNEL_ARCH = x86
else
ifeq ($(filter $(ARCH),arm64 powerpc),)
$(error Unsupported architecture $(ARCH))
endif
endif
NV_KERNEL_MODULES ?= $(wildcard nvidia nvidia-uvm nvidia-vgpu-vfio nvidia-modeset nvidia-drm nvidia-peermem)
NV_KERNEL_MODULES := $(filter-out $(NV_EXCLUDE_KERNEL_MODULES), \
$(NV_KERNEL_MODULES))
@ -106,8 +132,9 @@ else
# module symbols on which the Linux kernel's module resolution is dependent
# and hence must be used whenever present.
LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \
$(KERNEL_SOURCES)/arch/$(ARCH)/kernel/module.lds \
LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \
$(KERNEL_SOURCES)/arch/$(KERNEL_ARCH)/kernel/module.lds \
$(KERNEL_OUTPUT)/arch/$(KERNEL_ARCH)/module.lds \
$(KERNEL_OUTPUT)/scripts/module.lds
NV_MODULE_COMMON_SCRIPTS := $(foreach s, $(wildcard $(LD_SCRIPT)), -T $(s))

View File

@ -2450,6 +2450,22 @@ compile_test() {
fi
;;
file_operations_fop_unsigned_offset_present)
#
# Determine if the FOP_UNSIGNED_OFFSET define is present.
#
# Added by commit 641bb4394f40 ("fs: move FMODE_UNSIGNED_OFFSET to
# fop_flags") in v6.12.
#
CODE="
#include <linux/fs.h>
int conftest_file_operations_fop_unsigned_offset_present(void) {
return FOP_UNSIGNED_OFFSET;
}"
compile_check_conftest "$CODE" "NV_FILE_OPERATIONS_FOP_UNSIGNED_OFFSET_PRESENT" "" "types"
;;
pci_dev_has_ats_enabled)
#
# Determine if the 'pci_dev' data type has a 'ats_enabled' member.

View File

@ -1711,6 +1711,10 @@ static const struct file_operations nv_drm_fops = {
.read = drm_read,
.llseek = noop_llseek,
#if defined(NV_FILE_OPERATIONS_FOP_UNSIGNED_OFFSET_PRESENT)
.fop_flags = FOP_UNSIGNED_OFFSET,
#endif
};
static const struct drm_ioctl_desc nv_drm_ioctls[] = {

View File

@ -140,3 +140,4 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_lut
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_property_blob_put
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_mmap
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations_fop_unsigned_offset_present

View File

@ -184,7 +184,7 @@ static void hmm_copy_devmem_page(struct page *dst_page, struct page *src_page)
gpu = uvm_gpu_chunk_get_gpu(gpu_chunk);
status = uvm_mmu_chunk_map(gpu_chunk);
if (status != NV_OK)
goto out_zero;
goto out;
status = uvm_parent_gpu_map_cpu_pages(gpu->parent, dst_page, PAGE_SIZE, &dma_addr);
if (status != NV_OK)
@ -215,7 +215,7 @@ out_unmap_cpu:
out_unmap_gpu:
uvm_mmu_chunk_unmap(gpu_chunk, NULL);
out_zero:
out:
// We can't fail eviction because we need to free the device-private pages
// so the GPU can be unregistered. So the best we can do is warn on any
// failures and zero the uninitialised page. This could result in data loss
@ -245,6 +245,7 @@ static NV_STATUS uvm_hmm_pmm_gpu_evict_pfn(unsigned long pfn)
}
lock_page(dst_page);
hmm_copy_devmem_page(dst_page, migrate_pfn_to_page(src_pfn));
dst_pfn = migrate_pfn(page_to_pfn(dst_page));
migrate_device_pages(&src_pfn, &dst_pfn, 1);

View File

@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r565_97
#define NV_BUILD_BRANCH r565_00
#endif
#ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r565_97
#define NV_PUBLIC_BRANCH r565_00
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r565/r565_97-152"
#define NV_BUILD_CHANGELIST_NUM (34971420)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r565/r565_00-213"
#define NV_BUILD_CHANGELIST_NUM (35186646)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r565/r565_97-152"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34971420)
#define NV_BUILD_NAME "rel/gpu_drv/r565/r565_00-213"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (35186646)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r565_97-1"
#define NV_BUILD_CHANGELIST_NUM (34971420)
#define NV_BUILD_BRANCH_VERSION "r565_00-169"
#define NV_BUILD_CHANGELIST_NUM (35186646)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "565.98"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34971420)
#define NV_BUILD_NAME "566.31"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (35186646)
#define NV_BUILD_BRANCH_BASE_VERSION R565
#endif
// End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "565.57.01"
#define NV_VERSION_STRING "565.77"
#else

View File

@ -362,6 +362,42 @@ nvswitch_corelib_set_dl_link_mode_ls10
switch (mode)
{
case NVLINK_LINKSTATE_SAFE:
{
// check if link is in reset
if (nvswitch_is_link_in_reset(device, link))
{
NVSWITCH_PRINT(device, ERROR,
"%s: link #%d is still in reset, cannot change link state\n",
__FUNCTION__, link->linkNumber);
return NVL_ERR_INVALID_STATE;
}
NVSWITCH_PRINT(device, INFO,
"%s : Link state request to Safe for (%s):(%s) not needed. Skipping.\n",
__FUNCTION__, device->name, link->linkName);
break;
}
case NVLINK_LINKSTATE_HS:
{
// check if link is in reset
if (nvswitch_is_link_in_reset(device, link))
{
NVSWITCH_PRINT(device, ERROR,
"%s: link #%d is still in reset, cannot change link state\n",
__FUNCTION__, link->linkNumber);
return -NVL_ERR_INVALID_STATE;
}
NVSWITCH_PRINT(device, INFO,
"%s : Link state request to Active for (%s):(%s) not needed. Skipping.\n",
__FUNCTION__, device->name, link->linkName);
break;
}
case NVLINK_LINKSTATE_INITPHASE1:
{
// Apply appropriate SIMMODE settings

View File

@ -41,7 +41,8 @@ extern "C" {
#define NVA084_NOTIFIERS_EVENT_GUEST_DRIVER_UNLOADED (4)
#define NVA084_NOTIFIERS_EVENT_PRINT_ERROR_MESSAGE (5)
#define NVA084_NOTIFIERS_EVENT_GUEST_LICENSE_STATE_CHANGED (6)
#define NVA084_NOTIFIERS_MAXCOUNT (7)
#define NVA084_NOTIFIERS_EVENT_UPDATE_GUEST_OS_TYPE (7)
#define NVA084_NOTIFIERS_MAXCOUNT (8)
#define NVA084_NOTIFICATION_STATUS_IN_PROGRESS (0x8000)
#define NVA084_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000)

View File

@ -125,6 +125,7 @@ void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo,
void nvChooseDitheringEvo(
const NVConnectorEvoRec *pConnectorEvo,
enum NvKmsDpyAttributeColorBpcValue bpc,
enum NvKmsOutputColorimetry colorimetry,
const NVDpyAttributeRequestedDitheringConfig *pReqDithering,
NVDpyAttributeCurrentDitheringConfig *pCurrDithering);

View File

@ -233,6 +233,7 @@ static void SetDitheringCommon(NVDpyEvoPtr pDpyEvo)
nvChooseDitheringEvo(pConnectorEvo,
pApiHeadState->attributes.color.bpc,
pApiHeadState->attributes.color.colorimetry,
&pDpyEvo->requestedDithering,
&pApiHeadState->attributes.dithering);

View File

@ -2737,10 +2737,10 @@ void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo,
static enum NvKmsDpyAttributeColorBpcValue GetMinRequiredBpc(
enum NvKmsOutputColorimetry colorimetry)
{
// 10 BPC required for HDR
// >= 8 BPC required for HDR
// XXX HDR TODO: Handle other colorimetries
return (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) ?
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10 :
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8 :
NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6;
}
@ -3155,6 +3155,7 @@ static const struct {
void nvChooseDitheringEvo(
const NVConnectorEvoRec *pConnectorEvo,
enum NvKmsDpyAttributeColorBpcValue bpc,
enum NvKmsOutputColorimetry colorimetry,
const NVDpyAttributeRequestedDitheringConfig *pReqDithering,
NVDpyAttributeCurrentDitheringConfig *pCurrDithering)
{
@ -3261,6 +3262,29 @@ void nvChooseDitheringEvo(
}
}
// XXX HDR TODO: Handle other colorimetries
if ((colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) &&
(pReqDithering->state !=
NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED)) {
// GetMinRequiredBpc() enforces >= 8 BPC for HDR
nvAssert(bpc >= 8);
/*
* If output has BT.2100 (HDR10) colorimetry but fewer than 10 bits of
* precision, dither to 8 BPC, or as requested.
*/
if (bpc < 10) {
currDithering.enabled = TRUE;
if (pReqDithering->depth ==
NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO) {
currDithering.depth =
NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS;
}
}
}
if (currDithering.enabled) {
switch (pReqDithering->mode) {
case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL:
@ -6828,7 +6852,8 @@ static NvBool GetDfpHdmiProtocol(const NVDpyEvoRec *pDpyEvo,
nvDpyGetOutputColorFormatInfo(pDpyEvo);
const NvBool forceHdmiFrlIsSupported = FALSE;
nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A ||
nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS ||
rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A ||
rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B);
/* Override protocol if this mode requires HDMI FRL. */
@ -6855,10 +6880,25 @@ static NvBool GetDfpHdmiProtocol(const NVDpyEvoRec *pDpyEvo,
if (nvHdmiGetEffectivePixelClockKHz(pDpyEvo, pTimings, pDpyColor) <=
pDpyEvo->maxSingleLinkPixelClockKHz) {
*pTimingsProtocol = (rmProtocol ==
NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A) ?
NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A :
NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B;
switch (rmProtocol) {
case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
/*
* Force single link TMDS protocol. HDMI does not support
* physically support dual link TMDS.
*
* TMDS_A: "use A side of the link"
*/
*pTimingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A;
break;
case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
*pTimingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A;
break;
case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
*pTimingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B;
break;
default:
return FALSE;
}
return TRUE;
}
} while (nvDowngradeColorSpaceAndBpc(pDpyEvo,

View File

@ -2061,6 +2061,7 @@ ValidateProposedModeSetHwStateOneDisp(
nvChooseDitheringEvo(pDpyEvo->pConnectorEvo,
pProposedApiHead->attributes.color.bpc,
pProposedApiHead->attributes.color.colorimetry,
&pDpyEvo->requestedDithering,
&pProposedApiHead->attributes.dithering);
}

View File

@ -4645,6 +4645,11 @@ static NvBool NotifyVblank(
struct NvKmsPerOpenDisp* pOpenDisp =
GetPerOpenDisp(pOpen, pParams->request.deviceHandle,
pParams->request.dispHandle);
if (pOpenDisp == NULL) {
return NV_FALSE;
}
const NvU32 apiHead = pParams->request.head;
pEventOpenFd = nvkms_get_per_open_data(pParams->request.unicastEvent.fd);
@ -5219,8 +5224,8 @@ void nvKmsClose(void *pOpenVoid)
/*
Frees all references to a device
*/
*Frees all references to a device
*/
void nvRevokeDevice(NVDevEvoPtr pDevEvo)
{
if (pDevEvo == NULL) {
@ -5229,7 +5234,7 @@ void nvRevokeDevice(NVDevEvoPtr pDevEvo)
struct NvKmsPerOpen *pOpen;
nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenListEntry) {
nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo);
if (pOpenDev == NULL) {
continue;

View File

@ -3771,8 +3771,15 @@ static NV_STATUS RmNonDPAuxI2CTransfer
break;
case NV_I2C_CMD_SMBUS_BLOCK_WRITE:
if (pData[0] >= len) {
return NV_ERR_INVALID_ARGUMENT;
}
params->transData.smbusBlockData.bWrite = NV_TRUE;
/* fall through*/
params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW;
params->transData.smbusBlockData.registerAddress = command;
params->transData.smbusBlockData.messageLength = pData[0];
params->transData.smbusBlockData.pMessage = pData + 1;
break;
case NV_I2C_CMD_SMBUS_BLOCK_READ:
params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW;

View File

@ -122,6 +122,18 @@ const PRB_FIELD_DESC prb_fields_dcl_dclmsg[] = {
PRB_MAYBE_FIELD_NAME("engine")
PRB_MAYBE_FIELD_DEFAULT(0)
},
{
331,
{
PRB_OPTIONAL,
PRB_MESSAGE,
0,
},
RC_RCDIAGRECORD,
0,
PRB_MAYBE_FIELD_NAME("rc_diag_recs")
PRB_MAYBE_FIELD_DEFAULT(0)
},
};
// 'ErrorBlock' field defaults
@ -150,7 +162,7 @@ const PRB_MSG_DESC prb_messages_dcl[] = {
PRB_MAYBE_MESSAGE_NAME("Dcl.Engines")
},
{
7,
8,
prb_fields_dcl_dclmsg,
PRB_MAYBE_MESSAGE_NAME("Dcl.DclMsg")
},

View File

@ -18,8 +18,8 @@ extern const PRB_MSG_DESC prb_messages_dcl[];
// Message maximum lengths
// Does not include repeated fields, strings and byte arrays.
#define DCL_ENGINES_LEN 136
#define DCL_DCLMSG_LEN 573
#define DCL_ERRORBLOCK_LEN 577
#define DCL_DCLMSG_LEN 616
#define DCL_ERRORBLOCK_LEN 620
extern const PRB_FIELD_DESC prb_fields_dcl_engines[];
@ -41,6 +41,7 @@ extern const PRB_FIELD_DESC prb_fields_dcl_dclmsg[];
#define DCL_DCLMSG_JOURNAL_BUGCHECK (&prb_fields_dcl_dclmsg[4])
#define DCL_DCLMSG_RCCOUNTER (&prb_fields_dcl_dclmsg[5])
#define DCL_DCLMSG_ENGINE (&prb_fields_dcl_dclmsg[6])
#define DCL_DCLMSG_RC_DIAG_RECS (&prb_fields_dcl_dclmsg[7])
// 'DclMsg' field lengths
#define DCL_DCLMSG_COMMON_LEN 42
@ -50,6 +51,7 @@ extern const PRB_FIELD_DESC prb_fields_dcl_dclmsg[];
#define DCL_DCLMSG_JOURNAL_BUGCHECK_LEN 69
#define DCL_DCLMSG_RCCOUNTER_LEN 64
#define DCL_DCLMSG_ENGINE_LEN 139
#define DCL_DCLMSG_RC_DIAG_RECS_LEN 42
extern const PRB_FIELD_DESC prb_fields_dcl_errorblock[];
@ -57,7 +59,7 @@ extern const PRB_FIELD_DESC prb_fields_dcl_errorblock[];
#define DCL_ERRORBLOCK_DATA (&prb_fields_dcl_errorblock[0])
// 'ErrorBlock' field lengths
#define DCL_ERRORBLOCK_DATA_LEN 576
#define DCL_ERRORBLOCK_DATA_LEN 619
extern const PRB_SERVICE_DESC prb_services_dcl[];

View File

@ -157,6 +157,11 @@ interruptEntryIsEmpty(const InterruptEntry *pEntry)
// Default value for intrStuckThreshold
#define INTR_STUCK_THRESHOLD 1000
// Minimum length of interrupt to log as long-running
#define LONG_INTR_LOG_LENGTH_NS (1000000LLU) // 1ms
// Maximum frequency of long-running interrupt print, per engine
#define LONG_INTR_LOG_RATELIMIT_NS (10000000000LLU) // 10s
#define INTR_TABLE_INIT_KERNEL (1 << 0)
#define INTR_TABLE_INIT_PHYSICAL (1 << 1)
@ -274,6 +279,13 @@ typedef struct Device Device;
// Metadata including vtable
struct NVOC_VTABLE__Intr;
struct __nvoc_inner_struc_Intr_1__ {
NvU32 intrCount;
NvU64 intrLength;
NvU64 lastPrintTime;
};
struct Intr {
@ -347,6 +359,7 @@ struct Intr {
NvU32 intrEn0Orig;
NvBool halIntrEnabled;
NvU32 saveIntrEn0;
struct __nvoc_inner_struc_Intr_1__ longIntrStats[175];
};

View File

@ -455,7 +455,7 @@ struct KernelGsp {
NvBool bHasVgpuLogs;
void *pLogElf;
NvU64 logElfDataSize;
PORT_MUTEX *pNvlogFlushMtx;
volatile NvS32 logDumpLock;
NvBool bLibosLogsPollingEnabled;
NvU8 bootAttempts;
NvBool bInInit;

View File

@ -170,7 +170,7 @@ struct KernelHostVgpuDeviceApi {
// Data members
struct KernelHostVgpuDeviceShr *pShared;
NvU32 notifyActions[7];
NvU32 notifyActions[8];
};

View File

@ -5220,6 +5220,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2324, 0x17a8, 0x10de, "NVIDIA H800" },
{ 0x2329, 0x198b, 0x10de, "NVIDIA H20" },
{ 0x2329, 0x198c, 0x10de, "NVIDIA H20" },
{ 0x232C, 0x2063, 0x10de, "NVIDIA H20-3e" },
{ 0x232C, 0x2064, 0x10de, "NVIDIA H20-3e" },
{ 0x2330, 0x16c0, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
@ -5302,10 +5304,12 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25AD, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25B0, 0x1878, 0x1028, "NVIDIA RTX A1000" },
{ 0x25B0, 0x1878, 0x103c, "NVIDIA RTX A1000" },
{ 0x25B0, 0x8d96, 0x103c, "NVIDIA RTX A1000" },
{ 0x25B0, 0x1878, 0x10de, "NVIDIA RTX A1000" },
{ 0x25B0, 0x1878, 0x17aa, "NVIDIA RTX A1000" },
{ 0x25B2, 0x1879, 0x1028, "NVIDIA RTX A400" },
{ 0x25B2, 0x1879, 0x103c, "NVIDIA RTX A400" },
{ 0x25B2, 0x8d95, 0x103c, "NVIDIA RTX A400" },
{ 0x25B2, 0x1879, 0x10de, "NVIDIA RTX A400" },
{ 0x25B2, 0x1879, 0x17aa, "NVIDIA RTX A400" },
{ 0x25B6, 0x14a9, 0x10de, "NVIDIA A16" },

View File

@ -43,7 +43,7 @@ extern const PRB_MSG_DESC prb_messages_nvdebug[];
// Does not include repeated fields, strings and byte arrays.
#define NVDEBUG_SYSTEMINFO_LEN 354
#define NVDEBUG_GPUINFO_LEN 262
#define NVDEBUG_NVDUMP_LEN 1570
#define NVDEBUG_NVDUMP_LEN 1613
#define NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO_LEN 12
#define NVDEBUG_SYSTEMINFO_SOCINFO_LEN 12
#define NVDEBUG_SYSTEMINFO_CPUINFO_LEN 24
@ -114,7 +114,7 @@ extern const PRB_FIELD_DESC prb_fields_nvdebug_nvdump[];
// 'NvDump' field lengths
#define NVDEBUG_NVDUMP_SYSTEM_INFO_LEN 357
#define NVDEBUG_NVDUMP_DCL_MSG_LEN 576
#define NVDEBUG_NVDUMP_DCL_MSG_LEN 619
#define NVDEBUG_NVDUMP_GPU_INFO_LEN 265
#define NVDEBUG_NVDUMP_EXCEPTION_ADDRESS_LEN 10
#define NVDEBUG_NVDUMP_SYSTEM_INFO_GSPRM_LEN 357

View File

@ -23,10 +23,10 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
void __nvoc_init_NvencSession(NvencSession*, RmHalspecOwner* );
void __nvoc_init_funcTable_NvencSession(NvencSession*, RmHalspecOwner* );
NV_STATUS __nvoc_ctor_NvencSession(NvencSession*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_NvencSession(NvencSession*, RmHalspecOwner* );
void __nvoc_init_NvencSession(NvencSession*);
void __nvoc_init_funcTable_NvencSession(NvencSession*);
NV_STATUS __nvoc_ctor_NvencSession(NvencSession*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
void __nvoc_init_dataField_NvencSession(NvencSession*);
void __nvoc_dtor_NvencSession(NvencSession*);
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NvencSession;
@ -103,7 +103,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_NvencSes
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) nvencsessionCtrlCmdNvencSwSessionUpdateInfo_DISPATCH,
/*pFunc=*/ (void (*)(void)) nvencsessionCtrlCmdNvencSwSessionUpdateInfo_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*flags=*/ 0x8u,
/*accessRight=*/0x0u,
@ -118,7 +118,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_NvencSes
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_DISPATCH,
/*pFunc=*/ (void (*)(void)) nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*flags=*/ 0x8u,
/*accessRight=*/0x0u,
@ -341,21 +341,16 @@ void __nvoc_dtor_NvencSession(NvencSession *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
}
void __nvoc_init_dataField_NvencSession(NvencSession *pThis, RmHalspecOwner *pRmhalspecowner) {
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
void __nvoc_init_dataField_NvencSession(NvencSession *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
}
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
NV_STATUS __nvoc_ctor_NvencSession(NvencSession *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS __nvoc_ctor_NvencSession(NvencSession *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
NV_STATUS status = NV_OK;
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_NvencSession_fail_GpuResource;
__nvoc_init_dataField_NvencSession(pThis, pRmhalspecowner);
__nvoc_init_dataField_NvencSession(pThis);
status = __nvoc_nvencsessionConstruct(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_ctor_NvencSession_fail__init;
@ -370,41 +365,30 @@ __nvoc_ctor_NvencSession_exit:
}
// Vtable initialization
static void __nvoc_init_funcTable_NvencSession_1(NvencSession *pThis, RmHalspecOwner *pRmhalspecowner) {
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
static void __nvoc_init_funcTable_NvencSession_1(NvencSession *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
// nvencsessionCtrlCmdNvencSwSessionUpdateInfo -- halified (2 hals) exported (id=0xa0bc0101) body
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__nvencsessionCtrlCmdNvencSwSessionUpdateInfo__ = &nvencsessionCtrlCmdNvencSwSessionUpdateInfo_46f6a7;
}
else
{
pThis->__nvencsessionCtrlCmdNvencSwSessionUpdateInfo__ = &nvencsessionCtrlCmdNvencSwSessionUpdateInfo_IMPL;
}
// nvencsessionCtrlCmdNvencSwSessionUpdateInfo -- exported (id=0xa0bc0101)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
pThis->__nvencsessionCtrlCmdNvencSwSessionUpdateInfo__ = &nvencsessionCtrlCmdNvencSwSessionUpdateInfo_IMPL;
#endif
// nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2 -- halified (2 hals) exported (id=0xa0bc0102) body
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2__ = &nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_46f6a7;
}
else
{
pThis->__nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2__ = &nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_IMPL;
}
} // End __nvoc_init_funcTable_NvencSession_1 with approximately 4 basic block(s).
// nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2 -- exported (id=0xa0bc0102)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
pThis->__nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2__ = &nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_IMPL;
#endif
} // End __nvoc_init_funcTable_NvencSession_1 with approximately 2 basic block(s).
// Initialize vtable(s) for 27 virtual method(s).
void __nvoc_init_funcTable_NvencSession(NvencSession *pThis, RmHalspecOwner *pRmhalspecowner) {
void __nvoc_init_funcTable_NvencSession(NvencSession *pThis) {
// Per-class vtable definition
static const struct NVOC_VTABLE__NvencSession vtable = {
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
#endif
.__nvencsessionControl__ = &__nvoc_up_thunk_GpuResource_nvencsessionControl, // virtual inherited (gpures) base (gpures)
.GpuResource.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres)
.GpuResource.RmResource.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res)
@ -503,11 +487,11 @@ void __nvoc_init_funcTable_NvencSession(NvencSession *pThis, RmHalspecOwner *pRm
pThis->__nvoc_vtable = &vtable; // (nvencsession) this
// Initialize vtable(s) with 2 per-object function pointer(s).
__nvoc_init_funcTable_NvencSession_1(pThis, pRmhalspecowner);
__nvoc_init_funcTable_NvencSession_1(pThis);
}
void __nvoc_init_GpuResource(GpuResource*);
void __nvoc_init_NvencSession(NvencSession *pThis, RmHalspecOwner *pRmhalspecowner) {
void __nvoc_init_NvencSession(NvencSession *pThis) {
pThis->__nvoc_pbase_NvencSession = pThis;
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
@ -515,7 +499,7 @@ void __nvoc_init_NvencSession(NvencSession *pThis, RmHalspecOwner *pRmhalspecown
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
__nvoc_init_funcTable_NvencSession(pThis, pRmhalspecowner);
__nvoc_init_funcTable_NvencSession(pThis);
}
NV_STATUS __nvoc_objCreate_NvencSession(NvencSession **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams)
@ -523,7 +507,6 @@ NV_STATUS __nvoc_objCreate_NvencSession(NvencSession **ppThis, Dynamic *pParent,
NV_STATUS status;
Object *pParentObj = NULL;
NvencSession *pThis;
RmHalspecOwner *pRmhalspecowner;
// Assign `pThis`, allocating memory unless suppressed by flag.
status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(NvencSession), (void**)&pThis, (void**)ppThis);
@ -538,11 +521,8 @@ NV_STATUS __nvoc_objCreate_NvencSession(NvencSession **ppThis, Dynamic *pParent,
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags;
// pParent must be a valid object that derives from a halspec owner class.
NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT);
// Link the child into the parent unless flagged not to do so.
if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
// Link the child into the parent if there is one unless flagged not to do so.
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
{
pParentObj = dynamicCast(pParent, Object);
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
@ -552,12 +532,8 @@ NV_STATUS __nvoc_objCreate_NvencSession(NvencSession **ppThis, Dynamic *pParent,
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
}
if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL)
pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent);
NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT);
__nvoc_init_NvencSession(pThis, pRmhalspecowner);
status = __nvoc_ctor_NvencSession(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams);
__nvoc_init_NvencSession(pThis);
status = __nvoc_ctor_NvencSession(pThis, arg_pCallContext, arg_pParams);
if (status != NV_OK) goto __nvoc_objCreate_NvencSession_cleanup;
// Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set.

View File

@ -45,6 +45,7 @@ extern "C" {
#include "core/core.h"
#include "rmapi/client.h"
#include "gpu/gpu_halspec.h"
#include "gpu/gpu_halspec.h"
#include "gpu/gpu_resource.h"
#include "class/cla0bc.h"
#include "ctrl/ctrla0bc.h"
@ -117,8 +118,8 @@ struct NvencSession {
struct NvencSession *__nvoc_pbase_NvencSession; // nvencsession
// Vtable with 2 per-object function pointers
NV_STATUS (*__nvencsessionCtrlCmdNvencSwSessionUpdateInfo__)(struct NvencSession * /*this*/, NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS *); // halified (2 hals) exported (id=0xa0bc0101) body
NV_STATUS (*__nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2__)(struct NvencSession * /*this*/, NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_V2_PARAMS *); // halified (2 hals) exported (id=0xa0bc0102) body
NV_STATUS (*__nvencsessionCtrlCmdNvencSwSessionUpdateInfo__)(struct NvencSession * /*this*/, NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS *); // exported (id=0xa0bc0101)
NV_STATUS (*__nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2__)(struct NvencSession * /*this*/, NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_V2_PARAMS *); // exported (id=0xa0bc0102)
// Data members
NvHandle handle;
@ -196,10 +197,8 @@ NV_STATUS __nvoc_objCreate_NvencSession(NvencSession**, Dynamic*, NvU32, struct
// Wrapper macros
#define nvencsessionCtrlCmdNvencSwSessionUpdateInfo_FNPTR(pNvencSession) pNvencSession->__nvencsessionCtrlCmdNvencSwSessionUpdateInfo__
#define nvencsessionCtrlCmdNvencSwSessionUpdateInfo(pNvencSession, pParams) nvencsessionCtrlCmdNvencSwSessionUpdateInfo_DISPATCH(pNvencSession, pParams)
#define nvencsessionCtrlCmdNvencSwSessionUpdateInfo_HAL(pNvencSession, pParams) nvencsessionCtrlCmdNvencSwSessionUpdateInfo_DISPATCH(pNvencSession, pParams)
#define nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_FNPTR(pNvencSession) pNvencSession->__nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2__
#define nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2(pNvencSession, pParams) nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_DISPATCH(pNvencSession, pParams)
#define nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_HAL(pNvencSession, pParams) nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_DISPATCH(pNvencSession, pParams)
#define nvencsessionControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_vtable->__gpuresControl__
#define nvencsessionControl(pGpuResource, pCallContext, pParams) nvencsessionControl_DISPATCH(pGpuResource, pCallContext, pParams)
#define nvencsessionMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_vtable->__gpuresMap__
@ -360,16 +359,8 @@ static inline void nvencsessionAddAdditionalDependants_DISPATCH(struct RsClient
pResource->__nvoc_vtable->__nvencsessionAddAdditionalDependants__(pClient, pResource, pReference);
}
static inline NV_STATUS nvencsessionCtrlCmdNvencSwSessionUpdateInfo_46f6a7(struct NvencSession *pNvencSession, NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS *pParams) {
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nvencsessionCtrlCmdNvencSwSessionUpdateInfo_IMPL(struct NvencSession *pNvencSession, NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS *pParams);
static inline NV_STATUS nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_46f6a7(struct NvencSession *pNvencSession, NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_V2_PARAMS *pParams) {
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nvencsessionCtrlCmdNvencSwSessionUpdateInfoV2_IMPL(struct NvencSession *pNvencSession, NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_V2_PARAMS *pParams);
NV_STATUS nvencsessionConstruct_IMPL(struct NvencSession *arg_pNvencSession, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);

View File

@ -351,26 +351,6 @@ static void __nvoc_init_funcTable_OBJTMR_1(OBJTMR *pThis, RmHalspecOwner *pRmhal
}
}
// tmrGetTimeLo -- halified (2 hals) body
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__tmrGetTimeLo__ = &tmrGetTimeLo_GM107;
}
else
{
pThis->__tmrGetTimeLo__ = &tmrGetTimeLo_cf0499;
}
// tmrGetTime -- halified (2 hals) body
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__tmrGetTime__ = &tmrGetTime_GM107;
}
else
{
pThis->__tmrGetTime__ = &tmrGetTime_fa6bbe;
}
// tmrGetTimeEx -- halified (2 hals) body
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0xd0000000UL) )) /* ChipHal: GH100 | GB100 | GB102 */
{
@ -420,10 +400,10 @@ static void __nvoc_init_funcTable_OBJTMR_1(OBJTMR *pThis, RmHalspecOwner *pRmhal
{
pThis->__tmrGetGpuPtimerOffset__ = &tmrGetGpuPtimerOffset_GA100;
}
} // End __nvoc_init_funcTable_OBJTMR_1 with approximately 22 basic block(s).
} // End __nvoc_init_funcTable_OBJTMR_1 with approximately 18 basic block(s).
// Initialize vtable(s) for 27 virtual method(s).
// Initialize vtable(s) for 25 virtual method(s).
void __nvoc_init_funcTable_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) {
// Per-class vtable definition
@ -470,7 +450,7 @@ void __nvoc_init_funcTable_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner
pThis->__nvoc_base_IntrService.__nvoc_vtable = &vtable.IntrService; // (intrserv) super
pThis->__nvoc_vtable = &vtable; // (tmr) this
// Initialize vtable(s) with 10 per-object function pointer(s).
// Initialize vtable(s) with 8 per-object function pointer(s).
__nvoc_init_funcTable_OBJTMR_1(pThis, pRmhalspecowner);
}

View File

@ -218,12 +218,10 @@ struct OBJTMR {
struct IntrService *__nvoc_pbase_IntrService; // intrserv super
struct OBJTMR *__nvoc_pbase_OBJTMR; // tmr
// Vtable with 10 per-object function pointers
// Vtable with 8 per-object function pointers
NV_STATUS (*__tmrDelay__)(struct OBJTMR * /*this*/, NvU32); // halified (2 hals)
NvU32 (*__tmrServiceInterrupt__)(OBJGPU *, struct OBJTMR * /*this*/, IntrServiceServiceInterruptArguments *); // virtual halified (3 hals) override (intrserv) base (intrserv) body
NV_STATUS (*__tmrSetCurrentTime__)(OBJGPU *, struct OBJTMR * /*this*/); // halified (3 hals) body
NvU32 (*__tmrGetTimeLo__)(OBJGPU *, struct OBJTMR * /*this*/); // halified (2 hals) body
NvU64 (*__tmrGetTime__)(OBJGPU *, struct OBJTMR * /*this*/); // halified (2 hals) body
NvU64 (*__tmrGetTimeEx__)(OBJGPU *, struct OBJTMR * /*this*/, struct THREAD_STATE_NODE *); // halified (2 hals) body
NV_STATUS (*__tmrSetCountdownIntrDisable__)(OBJGPU *, struct OBJTMR * /*this*/); // halified (2 hals) body
NV_STATUS (*__tmrSetCountdown__)(OBJGPU *, struct OBJTMR * /*this*/, NvU32, NvU32, struct THREAD_STATE_NODE *); // halified (2 hals) body
@ -359,12 +357,6 @@ NV_STATUS __nvoc_objCreate_OBJTMR(OBJTMR**, Dynamic*, NvU32);
#define tmrSetCurrentTime_FNPTR(pTmr) pTmr->__tmrSetCurrentTime__
#define tmrSetCurrentTime(pGpu, pTmr) tmrSetCurrentTime_DISPATCH(pGpu, pTmr)
#define tmrSetCurrentTime_HAL(pGpu, pTmr) tmrSetCurrentTime_DISPATCH(pGpu, pTmr)
#define tmrGetTimeLo_FNPTR(pTmr) pTmr->__tmrGetTimeLo__
#define tmrGetTimeLo(pGpu, pTmr) tmrGetTimeLo_DISPATCH(pGpu, pTmr)
#define tmrGetTimeLo_HAL(pGpu, pTmr) tmrGetTimeLo_DISPATCH(pGpu, pTmr)
#define tmrGetTime_FNPTR(pTmr) pTmr->__tmrGetTime__
#define tmrGetTime(pGpu, pTmr) tmrGetTime_DISPATCH(pGpu, pTmr)
#define tmrGetTime_HAL(pGpu, pTmr) tmrGetTime_DISPATCH(pGpu, pTmr)
#define tmrGetTimeEx_FNPTR(pTmr) pTmr->__tmrGetTimeEx__
#define tmrGetTimeEx(pGpu, pTmr, arg3) tmrGetTimeEx_DISPATCH(pGpu, pTmr, arg3)
#define tmrGetTimeEx_HAL(pGpu, pTmr, arg3) tmrGetTimeEx_DISPATCH(pGpu, pTmr, arg3)
@ -446,14 +438,6 @@ static inline NV_STATUS tmrSetCurrentTime_DISPATCH(OBJGPU *pGpu, struct OBJTMR *
return pTmr->__tmrSetCurrentTime__(pGpu, pTmr);
}
static inline NvU32 tmrGetTimeLo_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr) {
return pTmr->__tmrGetTimeLo__(pGpu, pTmr);
}
static inline NvU64 tmrGetTime_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr) {
return pTmr->__tmrGetTime__(pGpu, pTmr);
}
static inline NvU64 tmrGetTimeEx_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg3) {
return pTmr->__tmrGetTimeEx__(pGpu, pTmr, arg3);
}
@ -627,6 +611,34 @@ static inline NV_STATUS tmrGetIntrStatus(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU3
#define tmrGetIntrStatus_HAL(pGpu, pTmr, pStatus, arg4) tmrGetIntrStatus(pGpu, pTmr, pStatus, arg4)
NvU32 tmrGetTimeLo_GM107(OBJGPU *pGpu, struct OBJTMR *pTmr);
#ifdef __nvoc_objtmr_h_disabled
static inline NvU32 tmrGetTimeLo(OBJGPU *pGpu, struct OBJTMR *pTmr) {
NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!");
return 0;
}
#else //__nvoc_objtmr_h_disabled
#define tmrGetTimeLo(pGpu, pTmr) tmrGetTimeLo_GM107(pGpu, pTmr)
#endif //__nvoc_objtmr_h_disabled
#define tmrGetTimeLo_HAL(pGpu, pTmr) tmrGetTimeLo(pGpu, pTmr)
NvU64 tmrGetTime_GM107(OBJGPU *pGpu, struct OBJTMR *pTmr);
#ifdef __nvoc_objtmr_h_disabled
static inline NvU64 tmrGetTime(OBJGPU *pGpu, struct OBJTMR *pTmr) {
NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!");
return 0;
}
#else //__nvoc_objtmr_h_disabled
#define tmrGetTime(pGpu, pTmr) tmrGetTime_GM107(pGpu, pTmr)
#endif //__nvoc_objtmr_h_disabled
#define tmrGetTime_HAL(pGpu, pTmr) tmrGetTime(pGpu, pTmr)
NvU32 tmrReadTimeLoReg_TU102(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg3);
@ -914,18 +926,6 @@ NV_STATUS tmrSetCurrentTime_GV100(OBJGPU *pGpu, struct OBJTMR *pTmr);
NV_STATUS tmrSetCurrentTime_GH100(OBJGPU *pGpu, struct OBJTMR *pTmr);
static inline NvU32 tmrGetTimeLo_cf0499(OBJGPU *pGpu, struct OBJTMR *pTmr) {
return ((NvU32)(((NvU64)(osGetTimestamp())) & 4294967295U));
}
NvU32 tmrGetTimeLo_GM107(OBJGPU *pGpu, struct OBJTMR *pTmr);
static inline NvU64 tmrGetTime_fa6bbe(OBJGPU *pGpu, struct OBJTMR *pTmr) {
return osGetTimestamp();
}
NvU64 tmrGetTime_GM107(OBJGPU *pGpu, struct OBJTMR *pTmr);
NvU64 tmrGetTimeEx_GM107(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg3);
NvU64 tmrGetTimeEx_GH100(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg3);

View File

@ -49,10 +49,8 @@
/**
* @brief Compare two strings, character by character.
*
* Will only compare lengthBytes bytes. Strings are assumed to be at least that
* long.
*
* Strings are allowed to overlap, but in .
* Will compare the first 'length' chars of each string, or until
* the nul-terminator is reached in either string, whichever comes first.
*
* @returns:
* - 0 if all bytes are equal

View File

@ -1910,6 +1910,33 @@ _rcdbDumpDclMsgRecord(
}
break;
}
case RmRcDiagReport:
{
RmRcDiag_RECORD* pRecord = (RmRcDiag_RECORD*) &pDclRecord[1];
OBJGPU *pGpu = gpumgrGetGpuFromId(pDclRecord->GPUTag);
// open an RC Diagnostic record in the Proto Bufffer
NV_CHECK_OK(nvStatus, LEVEL_ERROR,
prbEncNestedStart(pPrbEnc, DCL_DCLMSG_RC_DIAG_RECS));
if (nvStatus == NV_OK)
{
prbEncAddUInt32(pPrbEnc, RC_RCDIAGRECORD_RECORD_ID, pRecord->idx);
prbEncAddUInt32(pPrbEnc, RC_RCDIAGRECORD_RECORD_TYPE, pRecord->type);
if (NULL != pGpu)
{
NvU32 i;
for (i = 0; i < pRecord->count; ++i)
{
if (NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_MAX_PSEDO_REG < pRecord->data[i].tag)
{
prbEncGpuRegImm(pGpu, pRecord->data[i].offset, pRecord->data[i].value, pPrbEnc, RC_RCDIAGRECORD_REGS);
}
}
}
NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedEnd(pPrbEnc));
}
break;
}
case RmPrbErrorInfo_V2:
case RmPrbFullDump_V2:
{

View File

@ -253,12 +253,18 @@ _gpuFindPcieRegAddr_GB100
else if (partitionId == NV_PCIE_PARTITION_ID_CFG_SPACE)
{
status = _gpuGetPcieCfgCapBaseAddr_GB100(pGpu, hwDefAddr, &capBaseAddr);
offset = _gpuGetPcieCfgRegOffset_GB100(pGpu, hwDefAddr);
if (status == NV_OK)
{
offset = _gpuGetPcieCfgRegOffset_GB100(pGpu, hwDefAddr);
}
}
else if (partitionId == NV_PCIE_PARTITION_ID_EXT_CFG_SPACE)
{
status = _gpuGetPcieExtCfgCapBaseAddr_GB100(pGpu, hwDefAddr, &capBaseAddr);
offset = _gpuGetPcieExtCfgRegOffset_GB100(pGpu, hwDefAddr);
if (status == NV_OK)
{
offset = _gpuGetPcieExtCfgRegOffset_GB100(pGpu, hwDefAddr);
}
}
else
{
@ -472,8 +478,9 @@ _gpuGetPcieCfgCapBaseAddr_GB100
targetCapId = _gpuGetPcieCfgCapId_GB100(pGpu, hwDefAddr);
if (targetCapId == 0)
{
status = NV_ERR_NOT_SUPPORTED;
DBG_BREAKPOINT();
status = NV_ERR_INVALID_ADDRESS;
*pCapBaseAddr = capBaseAddr;
NV_PRINTF(LEVEL_INFO, "capId for register 0x%x not found\n", hwDefAddr);
return status;
}
@ -534,7 +541,7 @@ _gpuGetPcieCfgCapBaseAddr_GB100
// however for the caller to differentiate return invalid_addr status
//
status = NV_ERR_INVALID_ADDRESS;
NV_PRINTF(LEVEL_ERROR, "Invalid address passed : 0x%x", hwDefAddr);
NV_PRINTF(LEVEL_INFO, "Register 0x%x not part of PCIe linked list\n", hwDefAddr);
}
else
{
@ -607,8 +614,9 @@ _gpuGetPcieExtCfgCapBaseAddr_GB100
targetCapId = _gpuGetPcieExtCfgCapId_GB100(pGpu, hwDefAddr);
if (targetCapId == 0)
{
status = NV_ERR_NOT_SUPPORTED;
DBG_BREAKPOINT();
status = NV_ERR_INVALID_ADDRESS;
*pCapBaseAddr = capBaseAddr;
NV_PRINTF(LEVEL_INFO, "capId for register 0x%x not found\n", hwDefAddr);
return status;
}
@ -680,7 +688,7 @@ _gpuGetPcieExtCfgCapBaseAddr_GB100
// however for the caller to differentiate return invalid_addr status
//
status = NV_ERR_INVALID_ADDRESS;
NV_PRINTF(LEVEL_ERROR, "Invalid address passed : 0x%x", hwDefAddr);
NV_PRINTF(LEVEL_INFO, "Register 0x%x not part of PCIe linked list\n", hwDefAddr);
}
else
{
@ -717,8 +725,6 @@ _gpuGetPcieCfgCapId_GB100
}
}
NV_ASSERT_FAILED("Incorrect capability_id\n");
return 0;
}
@ -749,8 +755,6 @@ _gpuGetPcieExtCfgCapId_GB100
}
}
NV_ASSERT_FAILED("Incorrect capability_id\n");
return 0;
}
@ -813,7 +817,7 @@ _gpuGetPcieExtCfgDvsecInfo_GB100
{
*pVenId = pcieExtCfgRegInfo[i].vendorId;
*pDvsecLen = pcieExtCfgRegInfo[i].dvsecLen;
break;
return;
}
}

View File

@ -782,7 +782,7 @@ _kbifSavePcieConfigRegisters_GH100
{
status = GPU_BUS_CFG_CYCLE_RD32(pGpu, regOffset,
&pRegmapRef->bufBootConfigSpace[bufOffset]);
if (status != NV_OK)
if (status == NV_ERR_INVALID_STATE)
{
NV_PRINTF(LEVEL_ERROR, "Config read failed.\n");
return status;
@ -828,7 +828,7 @@ _kbifRestorePcieConfigRegisters_GH100
{
status = GPU_BUS_CFG_CYCLE_WR32(pGpu, regOffset,
pRegmapRef->bufBootConfigSpace[bufOffset]);
if (status != NV_OK)
if (status == NV_ERR_INVALID_STATE)
{
NV_PRINTF(LEVEL_ERROR, "Config write failed.\n");
NV_ASSERT(0);

View File

@ -313,8 +313,8 @@ gpuPostConstruct_IMPL
GPUATTACHARG *pAttachArg
)
{
NV_STATUS rmStatus;
NvU32 config = 0;
NV_STATUS rmStatus;
NvU32 config = 0;
gpumgrAddDeviceInstanceToGpus(NVBIT(pGpu->gpuInstance));

View File

@ -555,6 +555,67 @@ _kgspRpcRCTriggered
NV_ERR_INVALID_CHANNEL);
}
// Add the RcDiag records we received from GSP-RM to our system wide journal
{
OBJSYS *pSys = SYS_GET_INSTANCE();
Journal *pRcDB = SYS_GET_RCDB(pSys);
RmClient *pClient;
NvU32 recordSize = rcdbGetOcaRecordSizeWithHeader(pRcDB, RmRcDiagReport);
NvU32 rcDiagRecStart = pRcDB->RcErrRptNextIdx;
NvU32 rcDiagRecEnd;
NvU32 processId = 0;
NvU32 owner = RCDB_RCDIAG_DEFAULT_OWNER;
if (pKernelChannel != NULL)
{
pClient = dynamicCast(RES_GET_CLIENT(pKernelChannel), RmClient);
NV_ASSERT(pClient != NULL);
if (pClient != NULL)
processId = pClient->ProcID;
}
for (NvU32 i = 0; i < rpc_params->rcJournalBufferSize / recordSize; i++)
{
RmRCCommonJournal_RECORD *pCommonRecord =
(RmRCCommonJournal_RECORD *)((NvU8*)&rpc_params->rcJournalBuffer + i * recordSize);
RmRcDiag_RECORD *pRcDiagRecord =
(RmRcDiag_RECORD *)&pCommonRecord[1];
#if defined(DEBUG)
NV_PRINTF(LEVEL_INFO, "%d: GPUTag=0x%x CPUTag=0x%llx timestamp=0x%llx stateMask=0x%llx\n",
i, pCommonRecord->GPUTag, pCommonRecord->CPUTag, pCommonRecord->timeStamp,
pCommonRecord->stateMask);
NV_PRINTF(LEVEL_INFO, " idx=%d timeStamp=0x%x type=0x%x flags=0x%x count=%d owner=0x%x processId=0x%x\n",
pRcDiagRecord->idx, pRcDiagRecord->timeStamp, pRcDiagRecord->type, pRcDiagRecord->flags,
pRcDiagRecord->count, pRcDiagRecord->owner, processId);
for (NvU32 j = 0; j < pRcDiagRecord->count; j++)
{
NV_PRINTF(LEVEL_INFO, " %d: offset=0x08%x tag=0x08%x value=0x08%x attribute=0x08%x\n",
j, pRcDiagRecord->data[j].offset, pRcDiagRecord->data[j].tag,
pRcDiagRecord->data[j].value, pRcDiagRecord->data[j].attribute);
}
#endif
if (rcdbAddRcDiagRecFromGsp(pGpu, pRcDB, pCommonRecord, pRcDiagRecord) == NULL)
{
NV_PRINTF(LEVEL_WARNING, "Lost RC diagnostic record coming from GPU%d GSP: type=0x%x stateMask=0x%llx\n",
gpuGetInstance(pGpu), pRcDiagRecord->type, pCommonRecord->stateMask);
}
}
rcDiagRecEnd = pRcDB->RcErrRptNextIdx - 1;
// Update records to have the correct PID associated with the channel
if (rcDiagRecStart != rcDiagRecEnd)
{
rcdbUpdateRcDiagRecContext(pRcDB,
rcDiagRecStart,
rcDiagRecEnd,
processId,
owner);
}
}
bIsCcEnabled = gpuIsCCFeatureEnabled(pGpu);
// With CC enabled, CPU-RM needs to write error notifiers
@ -2395,16 +2456,12 @@ static NV_STATUS setupLogBufferVgpu(
{
NV_PRINTF(LEVEL_ERROR, "Failed to map memory for %s task log buffer for vGPU partition \n", szPrefix);
nvStatus = NV_ERR_INSUFFICIENT_RESOURCES;
if (pKernelGsp->pNvlogFlushMtx != NULL)
portSyncMutexRelease(pKernelGsp->pNvlogFlushMtx);
if (nvStatus != NV_OK)
_kgspFreeLibosVgpuPartitionLoggingStructures(pGpu, pKernelGsp, gfid);
}
error_cleanup:
if (pKernelGsp->pNvlogFlushMtx != NULL)
portSyncMutexRelease(pKernelGsp->pNvlogFlushMtx);
if (nvStatus != NV_OK)
_kgspFreeLibosVgpuPartitionLoggingStructures(pGpu, pKernelGsp, gfid);
@ -2454,8 +2511,8 @@ kgspInitVgpuPartitionLogging_IMPL
return NV_ERR_INVALID_ARGUMENT;
}
if (pKernelGsp->pNvlogFlushMtx != NULL)
portSyncMutexAcquire(pKernelGsp->pNvlogFlushMtx);
while (!portAtomicCompareAndSwapS32(&pKernelGsp->logDumpLock, 1, 0))
osSpinLoop();
// Source name is used to generate a tag that is a unique identifier for nvlog buffers.
// As the source name 'GSP' is already in use, we will need a custom source name.
@ -2483,7 +2540,7 @@ kgspInitVgpuPartitionLogging_IMPL
);
if (nvStatus != NV_OK)
return nvStatus;
goto exit;
}
// Determine which kernel is online, and add the according buffer
@ -2544,7 +2601,7 @@ kgspInitVgpuPartitionLogging_IMPL
}
if (nvStatus != NV_OK)
return nvStatus;
goto exit;
}
{
@ -2560,6 +2617,9 @@ kgspInitVgpuPartitionLogging_IMPL
*pPreserveLogBufferFull = bPreserveLogBufferFull;
exit:
portAtomicCompareAndSwapS32(&pKernelGsp->logDumpLock, 0, 1);
return nvStatus;
}
@ -2629,12 +2689,6 @@ _kgspFreeLibosLoggingStructures
if (pKernelGsp->pLogElf == NULL)
nvlogDeregisterFlushCb(kgspNvlogFlushCb, pKernelGsp);
if (pKernelGsp->pNvlogFlushMtx != NULL)
{
portSyncMutexDestroy(pKernelGsp->pNvlogFlushMtx);
pKernelGsp->pNvlogFlushMtx = NULL;
}
libosLogDestroy(&pKernelGsp->logDecode);
for (idx = 0; idx < _getLogArgCount(pGpu); idx++)
@ -2770,17 +2824,6 @@ _kgspInitLibosLoggingStructures
NvU8 idx;
NvU64 flags = MEMDESC_FLAGS_NONE;
// Needed only on Unix where NV_ESC_RM_LOCKLESS_DIAGNOSTIC is supported
if (RMCFG_FEATURE_PLATFORM_UNIX)
{
pKernelGsp->pNvlogFlushMtx = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged());
if (pKernelGsp->pNvlogFlushMtx == NULL)
{
nvStatus = NV_ERR_INSUFFICIENT_RESOURCES;
goto error_cleanup;
}
}
libosLogCreate(&pKernelGsp->logDecode);
flags |= MEMDESC_FLAGS_ALLOC_IN_UNPROTECTED_MEMORY;
@ -2856,9 +2899,6 @@ _kgspInitLibosLoggingStructures
return nvStatus;
}
error_cleanup:
if (nvStatus != NV_OK)
_kgspFreeLibosLoggingStructures(pGpu, pKernelGsp);
return nvStatus;
}
@ -3464,9 +3504,8 @@ kgspInitRm_IMPL
//
// Do not register nvlog flush callback if:
// 1. Live decoding is enabled, as logs will be printed to dmesg.
// 2. NV_ESC_RM_LOCKLESS_DIAGNOSTIC is not supported on this platform, i.e. pNvlogFlushMtx=NULL.
//
if (pKernelGsp->pLogElf == NULL && pKernelGsp->pNvlogFlushMtx != NULL)
if (pKernelGsp->pLogElf == NULL)
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, nvlogRegisterFlushCb(kgspNvlogFlushCb, pKernelGsp), done);
// Reset thread state timeout and wait for GFW_BOOT OK status
@ -3756,13 +3795,20 @@ kgspDumpGspLogs_IMPL
|| pKernelGsp->bHasVgpuLogs
)
{
if (pKernelGsp->pNvlogFlushMtx != NULL)
portSyncMutexAcquire(pKernelGsp->pNvlogFlushMtx);
while (!portAtomicCompareAndSwapS32(&pKernelGsp->logDumpLock, 1, 0))
{
if (osIsRaisedIRQL())
{
// called at DPC/ISR and there is contention, just bail
return;
}
osSpinLoop();
}
kgspDumpGspLogsUnlocked(pKernelGsp, bSyncNvLog);
if (pKernelGsp->pNvlogFlushMtx != NULL)
portSyncMutexRelease(pKernelGsp->pNvlogFlushMtx);
portAtomicCompareAndSwapS32(&pKernelGsp->logDumpLock, 0, 1);
}
}

View File

@ -57,6 +57,7 @@ static struct
} stuckIntr[MC_ENGINE_IDX_MAX];
static NvBool _intrServiceStallExactList(OBJGPU *pGpu, Intr *pIntr, MC_ENGINE_BITVECTOR *pEngines);
static void _intrLogLongRunningInterrupts(Intr *pIntr);
static void _intrInitServiceTable(OBJGPU *pGpu, Intr *pIntr);
@ -143,6 +144,8 @@ intrServiceStall_IMPL(OBJGPU *pGpu, Intr *pIntr)
intrProcessDPCQueue_HAL(pGpu, pIntr);
}
_intrLogLongRunningInterrupts(pIntr);
exit:
return;
}
@ -1111,6 +1114,7 @@ NvU32 intrServiceInterruptRecords_IMPL
IntrService *pIntrService = pIntr->intrServiceTable[engineIdx].pInterruptService;
NvU32 ret = 0;
NvBool bShouldService;
NvU64 intrTiming, intrTiming2;
IntrServiceClearInterruptArguments clearParams = {engineIdx};
IntrServiceServiceInterruptArguments serviceParams = {engineIdx};
@ -1132,9 +1136,20 @@ NvU32 intrServiceInterruptRecords_IMPL
if (bShouldService)
{
osGetPerformanceCounter(&intrTiming);
GSP_TRACE_RATS_ADD_RECORD(NV_RATS_GSP_TRACE_TYPE_INTR_START, pGpu, (NvU32) engineIdx);
ret = intrservServiceInterrupt(pGpu, pIntrService, &serviceParams);
GSP_TRACE_RATS_ADD_RECORD(NV_RATS_GSP_TRACE_TYPE_INTR_END, pGpu, (NvU32) engineIdx);
osGetPerformanceCounter(&intrTiming2);
intrTiming = intrTiming2 - intrTiming;
if (intrTiming > LONG_INTR_LOG_LENGTH_NS)
{
pIntr->longIntrStats[engineIdx].intrCount++;
if (intrTiming > pIntr->longIntrStats[engineIdx].intrLength)
pIntr->longIntrStats[engineIdx].intrLength = intrTiming;
}
}
return ret;
}
@ -1459,6 +1474,29 @@ _intrExitCriticalSection
}
}
static void
_intrLogLongRunningInterrupts(Intr *pIntr)
{
NvU64 now;
osGetPerformanceCounter(&now);
for (NvU32 i = 0; i < MC_ENGINE_IDX_MAX; ++i)
{
if (pIntr->longIntrStats[i].intrCount > 0)
{
if (now - pIntr->longIntrStats[i].lastPrintTime > LONG_INTR_LOG_RATELIMIT_NS)
{
NV_PRINTF(LEVEL_WARNING, "%u long-running interrupts (%llu ns or slower) from engine %u, longest taking %llu ns\n",
pIntr->longIntrStats[i].intrCount, LONG_INTR_LOG_LENGTH_NS, i, pIntr->longIntrStats[i].intrLength);
pIntr->longIntrStats[i].intrCount = 0;
pIntr->longIntrStats[i].intrLength = 0;
pIntr->longIntrStats[i].lastPrintTime = now;
}
}
}
}
static NvBool
_intrServiceStallExactList
(
@ -1641,6 +1679,9 @@ done:
// allow the isr to come in.
_intrExitCriticalSection(pGpu, pIntr, &intrMaskCtx);
// Delay prints until after exiting critical sections to save perf impact
_intrLogLongRunningInterrupts(pIntr);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
}

View File

@ -3671,7 +3671,8 @@ cliresCtrlCmdNvdGetNvlogBufferInfo_IMPL
}
pBuffer = NvLogLogger.pBuffers[hBuffer];
NV_ASSERT_OR_RETURN(pBuffer != NULL, NV_ERR_OBJECT_NOT_FOUND);
NV_ASSERT_OR_ELSE(pBuffer != NULL,
status = NV_ERR_OBJECT_NOT_FOUND; goto done);
NvBool bPause = pParams->flags & DRF_DEF(0000, _CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS, _PAUSE, _YES);
nvlogPauseLoggingToBuffer(hBuffer, bPause);

View File

@ -1662,7 +1662,7 @@ NV_STATUS freeRpcInfrastructure_VGPU(OBJGPU *pGpu)
NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE);
// Return early if RPC is not initialized
if (!pVGpu->bRpcInitialized)
if ((pVGpu == NULL) || !pVGpu->bRpcInitialized)
{
return NV_ERR_INVALID_STATE;
}

View File

@ -29,6 +29,7 @@
#include "nvport/nvport.h"
#include "nvmisc.h"
#ifndef NVPORT_STRING_DONT_DEFINE_portStringLength
NvLength
portStringLength

View File

@ -1,4 +1,4 @@
NVIDIA_VERSION = 565.57.01
NVIDIA_VERSION = 565.77
# This file.
VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))