570.124.04

This commit is contained in:
Bernhard Stoeckner 2025-02-27 17:32:23 +01:00
parent 81fe4fb417
commit 129479b1b7
No known key found for this signature in database
GPG Key ID: 7D23DC2750FAC2E1
141 changed files with 102245 additions and 100070 deletions

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 570.86.16.
version 570.124.04.
## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
570.86.16 driver release. This can be achieved by installing
570.124.04 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@ -185,7 +185,7 @@ table below).
For details on feature support and limitations, see the NVIDIA GPU driver
end user README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/570.86.16/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/570.124.04/README/kernel_open.html
For vGPU support, please refer to the README.vgpu packaged in the vGPU Host
Package for more details.
@ -938,10 +938,17 @@ Subsystem Device ID.
| NVIDIA RTX 500 Ada Generation Laptop GPU | 28BB |
| NVIDIA GeForce RTX 4060 Laptop GPU | 28E0 |
| NVIDIA GeForce RTX 4050 Laptop GPU | 28E1 |
| NVIDIA GeForce RTX 3050 A Laptop GPU | 28E3 |
| NVIDIA RTX 2000 Ada Generation Embedded GPU | 28F8 |
| NVIDIA B200 | 2901 10DE 1999 |
| NVIDIA B200 | 2901 10DE 199B |
| NVIDIA B200 | 2901 10DE 20DA |
| HGX GB200 | 2941 10DE 2046 |
| HGX GB200 | 2941 10DE 20CA |
| HGX GB200 | 2941 10DE 20D5 |
| HGX GB200 | 2941 10DE 21C9 |
| HGX GB200 | 2941 10DE 21CA |
| NVIDIA GeForce RTX 5090 | 2B85 |
| NVIDIA GeForce RTX 5090 D | 2B87 |
| NVIDIA GeForce RTX 5080 | 2C02 |
| NVIDIA GeForce RTX 5070 Ti | 2C05 |

View File

@ -86,7 +86,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"570.86.16\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"570.124.04\"
ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -756,6 +756,8 @@ typedef struct UvmGpuFbInfo_tag
NvBool bStaticBar1Enabled; // Static BAR1 mode is enabled
NvU64 staticBar1StartOffset; // The start offset of the the static mapping
NvU64 staticBar1Size; // The size of the static mapping
NvU32 heapStart; // The start offset of heap in KB, helpful for MIG
// systems
} UvmGpuFbInfo;
typedef struct UvmGpuEccInfo_tag

View File

@ -6307,6 +6307,32 @@ compile_test() {
compile_check_conftest "$CODE" "NV_NUM_REGISTERED_FB_PRESENT" "" "types"
;;
acpi_video_register_backlight)
#
# Determine if acpi_video_register_backlight() function is present
#
# acpi_video_register_backlight was added by commit 3dbc80a3e4c55c
# (ACPI: video: Make backlight class device registration a separate
# step (v2)) for v6.0 (2022-09-02).
# Note: the include directive for <linux/types> in this conftest is
# necessary in order to support kernels between commit 0b9f7d93ca61
# ("ACPI / i915: ignore firmware requests backlight change") for
# v3.16 (2014-07-07) and commit 3bd6bce369f5 ("ACPI / video: Port
# to new backlight interface selection API") for v4.2 (2015-07-16).
# Kernels within this range use the 'bool' type and the related
# 'false' value in <acpi/video.h> without first including the
# definitions of that type and value.
#
CODE="
#include <linux/types.h>
#include <acpi/video.h>
void conftest_acpi_video_register_backlight(void) {
acpi_video_register_backlight(0);
}"
compile_check_conftest "$CODE" "NV_ACPI_VIDEO_REGISTER_BACKLIGHT" "" "functions"
;;
acpi_video_backlight_use_native)
#
# Determine if acpi_video_backlight_use_native() function is present
@ -6690,13 +6716,18 @@ compile_test() {
#
# Determine whether drm_client_setup is present.
#
# Added by commit d07fdf922592 ("drm/fbdev-ttm:
# Convert to client-setup") in v6.13.
# Added by commit d07fdf922592 ("drm/fbdev-ttm: Convert to
# client-setup") in v6.13 in drm/drm_client_setup.h, but then moved
# to drm/clients/drm_client_setup.h by commit b86711c6d6e2
# ("drm/client: Move public client header to clients/ subdirectory")
# in linux-next b86711c6d6e2.
#
CODE="
#include <drm/drm_fb_helper.h>
#if defined(NV_DRM_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/drm_client_setup.h>
#elif defined(NV_DRM_CLIENTS_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/clients/drm_client_setup.h>
#endif
void conftest_drm_client_setup(void) {
drm_client_setup();
@ -7509,6 +7540,31 @@ compile_test() {
compile_check_conftest "$CODE" "NV_MODULE_IMPORT_NS_TAKES_CONSTANT" "" "generic"
;;
drm_driver_has_date)
#
# Determine if the 'drm_driver' structure has a 'date' field.
#
# Removed by commit cb2e1c2136f7 ("drm: remove driver date from
# struct drm_driver and all drivers") in linux-next, expected in
# v6.14.
#
CODE="
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
int conftest_drm_driver_has_date(void) {
return offsetof(struct drm_driver, date);
}"
compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_DATE" "" "types"
;;
# When adding a new conftest entry, please use the correct format for
# specifying the relevant upstream Linux kernel commit. Please
# avoid specifying -rc kernels, and only use SHAs that actually exist

View File

@ -31,6 +31,7 @@ NV_HEADER_PRESENCE_TESTS = \
drm/drm_mode_config.h \
drm/drm_modeset_lock.h \
drm/drm_property.h \
drm/clients/drm_client_setup.h \
dt-bindings/interconnect/tegra_icc_id.h \
generated/autoconf.h \
generated/compile.h \

View File

@ -65,9 +65,13 @@
#if defined(NV_DRM_CLIENT_SETUP_PRESENT) && \
(defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) || \
defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT))
// XXX remove dependency on DRM_TTM_HELPER by implementing nvidia-drm's own
// .fbdev_probe callback that uses NVKMS kapi
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_CLIENT_AVAILABLE
#endif
#endif
/*
* We can support color management if either drm_helper_crtc_enable_color_mgmt()

View File

@ -78,6 +78,8 @@
#if defined(NV_DRM_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/drm_client_setup.h>
#elif defined(NV_DRM_CLIENTS_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/clients/drm_client_setup.h>
#endif
#if defined(NV_DRM_DRM_FBDEV_TTM_H_PRESENT)
@ -1915,14 +1917,18 @@ static struct drm_driver nv_drm_driver = {
.name = "nvidia-drm",
.desc = "NVIDIA DRM driver",
#if defined(NV_DRM_DRIVER_HAS_DATE)
.date = "20160202",
#endif
#if defined(NV_DRM_DRIVER_HAS_DEVICE_LIST)
.device_list = LIST_HEAD_INIT(nv_drm_driver.device_list),
#elif defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST)
.legacy_dev_list = LIST_HEAD_INIT(nv_drm_driver.legacy_dev_list),
#endif
#if defined(DRM_FBDEV_TTM_DRIVER_OPS)
// XXX implement nvidia-drm's own .fbdev_probe callback that uses NVKMS kapi directly
#if defined(NV_DRM_FBDEV_AVAILABLE) && defined(DRM_FBDEV_TTM_DRIVER_OPS)
DRM_FBDEV_TTM_DRIVER_OPS,
#endif
};

View File

@ -143,4 +143,5 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_lut
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_property_blob_put
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_mmap
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_date
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations_fop_unsigned_offset_present

View File

@ -1050,6 +1050,11 @@ nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv,
#if defined(NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE)
if (!acpi_video_backlight_use_native()) {
#if defined(NV_ACPI_VIDEO_REGISTER_BACKLIGHT)
nvkms_log(NVKMS_LOG_LEVEL_INFO, NVKMS_LOG_PREFIX,
"ACPI reported no NVIDIA native backlight available; attempting to use ACPI backlight.");
acpi_video_register_backlight();
#endif
return NULL;
}
#endif

View File

@ -102,4 +102,5 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_backlight_use_native
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_register_backlight
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg

View File

@ -29,6 +29,7 @@
#include <linux/nodemask.h>
#include <linux/mempolicy.h>
#include <linux/mmu_notifier.h>
#include <linux/topology.h>
#if UVM_HMM_RANGE_FAULT_SUPPORTED()
#include <linux/hmm.h>
@ -291,6 +292,27 @@ static const struct mmu_interval_notifier_ops uvm_ats_notifier_ops =
#endif
static bool resident_policy_match(struct vm_area_struct *vma, int dst_nid, int src_nid)
{
#if defined(NV_MEMPOLICY_HAS_UNIFIED_NODES)
struct mempolicy *vma_policy = vma_policy(vma);
// TODO: Bug 4981209: When migrations between CPU numa nodes are supported,
// add (dst_nid != closest_cpu_numa_node) to allow migrations between CPU
// NUMA nodes when destination is the closest_cpu_numa_node.
if (vma_policy &&
node_isset(src_nid, vma_policy->nodes) &&
node_isset(dst_nid, vma_policy->nodes) &&
!cpumask_empty(cpumask_of_node(src_nid)) &&
!cpumask_empty(cpumask_of_node(dst_nid))) {
return true;
}
#endif
return false;
}
static NV_STATUS ats_compute_residency_mask(uvm_gpu_va_space_t *gpu_va_space,
struct vm_area_struct *vma,
NvU64 base,
@ -370,9 +392,23 @@ static NV_STATUS ats_compute_residency_mask(uvm_gpu_va_space_t *gpu_va_space,
if (pfn & HMM_PFN_VALID) {
struct page *page = hmm_pfn_to_page(pfn);
int resident_node = page_to_nid(page);
if (page_to_nid(page) == ats_context->residency_node)
// Set the residency_mask if:
// - The page is already resident at the intended destination.
// or
// - If both the source and destination nodes are CPU nodes and
// source node is already in the list of preferred nodes for
// the vma. On multi-CPU NUMA node architectures, this avoids
// unnecessary migrations between CPU nodes. Since the
// specific ats_context->residency_node selected by
// ats_batch_select_residency() is just a guess among the list
// of preferred nodes, paying the cost of migration across the
// CPU preferred nodes in this case can't be justified.
if ((resident_node == ats_context->residency_node) ||
resident_policy_match(vma, ats_context->residency_node, resident_node)) {
uvm_page_mask_set(residency_mask, page_index);
}
ats_context->prefetch_state.first_touch = false;
}

View File

@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2024 NVIDIA Corporation
Copyright (c) 2015-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@ -35,6 +35,7 @@
#include "uvm_mmu.h"
#include "uvm_perf_heuristics.h"
#include "uvm_pmm_sysmem.h"
#include "uvm_pmm_gpu.h"
#include "uvm_migrate.h"
#include "uvm_gpu_access_counters.h"
#include "uvm_va_space_mm.h"
@ -90,6 +91,8 @@ NV_STATUS uvm_global_init(void)
uvm_spin_lock_irqsave_init(&g_uvm_global.gpu_table_lock, UVM_LOCK_ORDER_LEAF);
uvm_mutex_init(&g_uvm_global.va_spaces.lock, UVM_LOCK_ORDER_VA_SPACES_LIST);
INIT_LIST_HEAD(&g_uvm_global.va_spaces.list);
uvm_mutex_init(&g_uvm_global.devmem_ranges.lock, UVM_LOCK_ORDER_LEAF);
INIT_LIST_HEAD(&g_uvm_global.devmem_ranges.list);
status = uvm_kvmalloc_init();
if (status != NV_OK) {
@ -231,6 +234,7 @@ void uvm_global_exit(void)
uvm_va_policy_exit();
uvm_mem_global_exit();
uvm_pmm_sysmem_exit();
uvm_pmm_devmem_exit();
uvm_gpu_exit();
uvm_processor_mask_cache_exit();

View File

@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2024 NVIDIA Corporation
Copyright (c) 2015-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@ -157,6 +157,12 @@ struct uvm_global_struct
// This field is set once during global initialization (uvm_global_init),
// and can be read afterwards without acquiring any locks.
bool conf_computing_enabled;
// List of all devmem ranges allocted on this GPU
struct {
uvm_mutex_t lock;
struct list_head list;
} devmem_ranges;
};
// Initialize global uvm state

View File

@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2024 NVIDIA Corporation
Copyright (c) 2015-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@ -109,8 +109,10 @@ static void fill_parent_gpu_info(uvm_parent_gpu_t *parent_gpu, const UvmGpuInfo
// nvswitch is routed via physical pages, where the upper 13-bits of the
// 47-bit address space holds the routing information for each peer.
// Currently, this is limited to a 16GB framebuffer window size.
if (parent_gpu->nvswitch_info.is_nvswitch_connected)
if (parent_gpu->nvswitch_info.is_nvswitch_connected) {
parent_gpu->nvswitch_info.fabric_memory_window_start = gpu_info->nvswitchMemoryWindowStart;
parent_gpu->nvswitch_info.egm_fabric_memory_window_start = gpu_info->nvswitchEgmMemoryWindowStart;
}
uvm_uuid_string(uuid_buffer, &parent_gpu->uuid);
snprintf(parent_gpu->name,
@ -244,6 +246,7 @@ static NV_STATUS get_gpu_fb_info(uvm_gpu_t *gpu)
if (!fb_info.bZeroFb) {
gpu->mem_info.size = ((NvU64)fb_info.heapSize + fb_info.reservedHeapSize) * 1024;
gpu->mem_info.max_allocatable_address = fb_info.maxAllocatableAddress;
gpu->mem_info.phys_start = (NvU64)fb_info.heapStart * 1024;
}
gpu->mem_info.max_vidmem_page_size = fb_info.maxVidmemPageSize;
@ -568,6 +571,9 @@ static void gpu_info_print_common(uvm_gpu_t *gpu, struct seq_file *s)
UVM_SEQ_OR_DBG_PRINT(s, "big_page_size %u\n", gpu->big_page.internal_size);
UVM_SEQ_OR_DBG_PRINT(s, "rm_va_base 0x%llx\n", gpu->parent->rm_va_base);
UVM_SEQ_OR_DBG_PRINT(s, "rm_va_size 0x%llx\n", gpu->parent->rm_va_size);
UVM_SEQ_OR_DBG_PRINT(s, "vidmem_start %llu (%llu MBs)\n",
gpu->mem_info.phys_start,
gpu->mem_info.phys_start / (1024 * 1024));
UVM_SEQ_OR_DBG_PRINT(s, "vidmem_size %llu (%llu MBs)\n",
gpu->mem_info.size,
gpu->mem_info.size / (1024 * 1024));
@ -1361,6 +1367,7 @@ static NV_STATUS init_parent_gpu(uvm_parent_gpu_t *parent_gpu,
const UvmGpuPlatformInfo *gpu_platform_info)
{
NV_STATUS status;
UvmGpuFbInfo fb_info = {0};
status = uvm_rm_locked_call(nvUvmInterfaceDeviceCreate(uvm_global_session_handle(),
gpu_info,
@ -1384,8 +1391,15 @@ static NV_STATUS init_parent_gpu(uvm_parent_gpu_t *parent_gpu,
parent_gpu->egm.local_peer_id = gpu_info->egmPeerId;
parent_gpu->egm.base_address = gpu_info->egmBaseAddr;
status = uvm_rm_locked_call(nvUvmInterfaceGetFbInfo(parent_gpu->rm_device, &fb_info));
if (status != NV_OK)
return status;
parent_gpu->sli_enabled = (gpu_info->subdeviceCount > 1);
if (!fb_info.bZeroFb)
parent_gpu->max_allocatable_address = fb_info.maxAllocatableAddress;
parent_gpu->virt_mode = gpu_info->virtMode;
if (parent_gpu->virt_mode == UVM_VIRT_MODE_LEGACY) {
UVM_ERR_PRINT("Failed to init GPU %s. UVM is not supported in legacy virtualization mode\n",
@ -1419,6 +1433,14 @@ static NV_STATUS init_parent_gpu(uvm_parent_gpu_t *parent_gpu,
uvm_mmu_init_gpu_chunk_sizes(parent_gpu);
status = uvm_pmm_devmem_init(parent_gpu);
if (status != NV_OK) {
UVM_ERR_PRINT("failed to intialize device private memory: %s, GPU %s\n",
nvstatusToString(status),
uvm_parent_gpu_name(parent_gpu));
return status;
}
status = uvm_ats_add_gpu(parent_gpu);
if (status != NV_OK) {
UVM_ERR_PRINT("uvm_ats_add_gpu failed: %s, GPU %s\n",
@ -1667,6 +1689,7 @@ static void deinit_parent_gpu(uvm_parent_gpu_t *parent_gpu)
deinit_parent_procfs_files(parent_gpu);
uvm_pmm_devmem_deinit(parent_gpu);
uvm_ats_remove_gpu(parent_gpu);
UVM_ASSERT(atomic64_read(&parent_gpu->mapped_cpu_pages_size) == 0);

View File

@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2024 NVIDIA Corporation
Copyright (c) 2015-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@ -696,6 +696,11 @@ struct uvm_gpu_struct
// ZeroFB testing mode, this will be 0.
NvU64 size;
// Physical start of heap, for SMC enabled GPUs, this is useful to
// partition PMM, it is used by HMM to figure out the right translation
// between HMM ranges and PMM offsets.
NvU64 phys_start;
// Max (inclusive) physical address of this GPU's memory that the driver
// can allocate through PMM (PMA).
NvU64 max_allocatable_address;
@ -1015,6 +1020,13 @@ struct uvm_parent_gpu_struct
// Do not read this field directly, use uvm_gpu_device_handle instead.
uvmGpuDeviceHandle rm_device;
// Total amount of physical memory available on the parent GPU.
NvU64 max_allocatable_address;
#if UVM_IS_CONFIG_HMM()
uvm_pmm_gpu_devmem_t *devmem;
#endif
// The physical address range addressable by the GPU
//
// The GPU has its NV_PFB_XV_UPPER_ADDR register set by RM to
@ -1288,6 +1300,10 @@ struct uvm_parent_gpu_struct
// 47-bit fabric memory physical offset that peer gpus need to access
// to read a peer's memory
NvU64 fabric_memory_window_start;
// 47-bit fabric memory physical offset that peer gpus need to access
// to read remote EGM memory.
NvU64 egm_fabric_memory_window_start;
} nvswitch_info;
struct

View File

@ -321,13 +321,17 @@ void uvm_hmm_unregister_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu, struct mm_
{
uvm_range_tree_node_t *node;
uvm_va_block_t *va_block;
struct range range = gpu->pmm.devmem.pagemap.range;
unsigned long devmem_start;
unsigned long devmem_end;
unsigned long pfn;
bool retry;
if (!uvm_hmm_is_enabled(va_space))
return;
devmem_start = gpu->parent->devmem->pagemap.range.start + gpu->mem_info.phys_start;
devmem_end = devmem_start + gpu->mem_info.size;
if (mm)
uvm_assert_mmap_lock_locked(mm);
uvm_assert_rwsem_locked_write(&va_space->lock);
@ -341,7 +345,7 @@ void uvm_hmm_unregister_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu, struct mm_
do {
retry = false;
for (pfn = __phys_to_pfn(range.start); pfn <= __phys_to_pfn(range.end); pfn++) {
for (pfn = __phys_to_pfn(devmem_start); pfn <= __phys_to_pfn(devmem_end); pfn++) {
struct page *page = pfn_to_page(pfn);
UVM_ASSERT(is_device_private_page(page));
@ -349,7 +353,7 @@ void uvm_hmm_unregister_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu, struct mm_
// This check is racy because nothing stops the page being freed and
// even reused. That doesn't matter though - worst case the
// migration fails, we retry and find the va_space doesn't match.
if (page->zone_device_data == va_space)
if (uvm_pmm_devmem_page_to_va_space(page) == va_space)
if (uvm_hmm_pmm_gpu_evict_pfn(pfn) != NV_OK)
retry = true;
}
@ -1713,7 +1717,7 @@ static void gpu_chunk_remove(uvm_va_block_t *va_block,
uvm_gpu_chunk_t *gpu_chunk;
uvm_gpu_id_t id;
id = uvm_pmm_devmem_page_to_gpu_id(page);
id = uvm_gpu_chunk_get_gpu(uvm_pmm_devmem_page_to_chunk(page))->id;
gpu_state = uvm_va_block_gpu_state_get(va_block, id);
UVM_ASSERT(gpu_state);
@ -1743,7 +1747,7 @@ static NV_STATUS gpu_chunk_add(uvm_va_block_t *va_block,
uvm_gpu_id_t id;
NV_STATUS status;
id = uvm_pmm_devmem_page_to_gpu_id(page);
id = uvm_gpu_chunk_get_gpu(uvm_pmm_devmem_page_to_chunk(page))->id;
gpu_state = uvm_va_block_gpu_state_get(va_block, id);
// It's possible that this is a fresh va_block we're trying to add an
@ -1765,7 +1769,7 @@ static NV_STATUS gpu_chunk_add(uvm_va_block_t *va_block,
gpu_chunk = uvm_pmm_devmem_page_to_chunk(page);
UVM_ASSERT(gpu_chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED);
UVM_ASSERT(gpu_chunk->is_referenced);
UVM_ASSERT(page->zone_device_data == va_block->hmm.va_space);
UVM_ASSERT(uvm_pmm_devmem_page_to_va_space(page) == va_block->hmm.va_space);
if (gpu_state->chunks[page_index] == gpu_chunk)
return NV_OK;
@ -1992,7 +1996,7 @@ static void fill_dst_pfn(uvm_va_block_t *va_block,
hmm_mark_gpu_chunk_referenced(va_block, gpu, gpu_chunk);
UVM_ASSERT(!page_count(dpage));
zone_device_page_init(dpage);
dpage->zone_device_data = va_block->hmm.va_space;
dpage->zone_device_data = gpu_chunk;
dst_pfns[page_index] = migrate_pfn(pfn);
}

View File

@ -130,27 +130,12 @@ static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block,
uvm_tracker_t local_tracker = UVM_TRACKER_INIT();
NV_STATUS status = NV_OK;
NV_STATUS tracker_status;
uvm_prot_t prot = UVM_PROT_READ_WRITE_ATOMIC;
// Get the mask of unmapped pages because it will change after the
// first map operation
uvm_va_block_unmapped_pages_get(va_block, region, &va_block_context->caller_page_mask);
if (uvm_va_block_is_hmm(va_block) && !UVM_ID_IS_CPU(dest_id)) {
// Do not map pages that are already resident on the CPU. This is in
// order to avoid breaking system-wide atomic operations on HMM. HMM's
// implementation of system-side atomic operations involves restricting
// mappings to one processor (CPU or a GPU) at a time. If we were to
// grant a GPU a mapping to system memory, this gets into trouble
// because, on the CPU side, Linux can silently upgrade PTE permissions
// (move from read-only, to read-write, without any MMU notifiers
// firing), thus breaking the model by allowing simultaneous read-write
// access from two separate processors. To avoid that, just don't map
// such pages at all, when migrating.
uvm_page_mask_andnot(&va_block_context->caller_page_mask,
&va_block_context->caller_page_mask,
uvm_va_block_resident_mask_get(va_block, UVM_ID_CPU, NUMA_NO_NODE));
}
// Only map those pages that are not mapped anywhere else (likely due
// to a first touch or a migration). We pass
// UvmEventMapRemoteCauseInvalid since the destination processor of a
@ -166,6 +151,31 @@ static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block,
if (status != NV_OK)
goto out;
if (uvm_va_block_is_hmm(va_block) && UVM_ID_IS_CPU(dest_id)) {
uvm_processor_id_t id;
// Do not atomically map pages that are resident on the CPU. This is in
// order to avoid breaking system-wide atomic operations on HMM. HMM's
// implementation of system-side atomic operations involves restricting
// mappings to one processor (CPU or a GPU) at a time. If we were to
// grant a GPU a mapping to system memory, this gets into trouble
// because, on the CPU side, Linux can silently upgrade PTE permissions
// (move from read-only, to read-write, without any MMU notifiers
// firing), thus breaking the model by allowing simultaneous read-write
// access from two separate processors. To avoid that, don't remote map
// such pages atomically, after migrating.
// Also note that HMM sets CPU mapping for resident pages so the mask
// of pages to be mapped needs to be recomputed without including the
// CPU mapping.
prot = UVM_PROT_READ_WRITE;
uvm_page_mask_region_fill(&va_block_context->caller_page_mask, region);
for_each_gpu_id_in_mask(id, &va_block->mapped) {
uvm_page_mask_andnot(&va_block_context->caller_page_mask,
&va_block_context->caller_page_mask,
uvm_va_block_map_mask_get(va_block, id));
}
}
// Add mappings for AccessedBy processors
//
// No mappings within this call will operate on dest_id, so we don't
@ -176,7 +186,7 @@ static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block,
dest_id,
region,
&va_block_context->caller_page_mask,
UVM_PROT_READ_WRITE_ATOMIC,
prot,
NULL);
out:

View File

@ -1409,11 +1409,13 @@ static bool thrashing_processors_have_fast_access_to(uvm_va_space_t *va_space,
uvm_processor_mask_set(fast_to, to);
}
else {
// Include registered SMC peers and the processor 'to'.
// Include all SMC peers and the processor 'to'.
// This includes SMC peers that are not registered.
// Since not-registered peers cannot be in page_thrashing->processors,
// the value of their respective bits in "fast_to" doesn't matter.
uvm_processor_mask_range_fill(fast_to,
uvm_gpu_id_from_sub_processor(uvm_parent_gpu_id_from_gpu_id(to), 0),
UVM_PARENT_ID_MAX_SUB_PROCESSORS);
uvm_processor_mask_and(fast_to, fast_to, &va_space->registered_gpu_va_spaces);
}
return uvm_processor_mask_subset(&page_thrashing->processors, fast_to);

View File

@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2024 NVIDIA Corporation
Copyright (c) 2015-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@ -3030,69 +3030,23 @@ NvU32 uvm_pmm_gpu_phys_to_virt(uvm_pmm_gpu_t *pmm, NvU64 phys_addr, NvU64 region
#if UVM_IS_CONFIG_HMM()
static uvm_pmm_gpu_t *devmem_page_to_pmm(struct page *page)
{
return container_of(page->pgmap, uvm_pmm_gpu_t, devmem.pagemap);
}
static uvm_gpu_chunk_t *devmem_page_to_chunk_locked(struct page *page)
{
uvm_pmm_gpu_t *pmm = devmem_page_to_pmm(page);
NvU64 chunk_addr = ((NvU64)page_to_pfn(page) << PAGE_SHIFT) - pmm->devmem.pagemap.range.start;
size_t index = chunk_addr / UVM_CHUNK_SIZE_MAX;
uvm_gpu_chunk_t *root_chunk;
uvm_gpu_chunk_t *chunk;
uvm_gpu_chunk_t *parent;
uvm_chunk_size_t chunk_size;
UVM_ASSERT(index < pmm->root_chunks.count);
root_chunk = &pmm->root_chunks.array[index].chunk;
UVM_ASSERT(root_chunk->address == UVM_ALIGN_DOWN(chunk_addr, UVM_CHUNK_SIZE_MAX));
// Find the uvm_gpu_chunk_t that corresponds to the device private struct
// page's PFN. The loop is only 0, 1, or 2 iterations.
for (chunk = root_chunk;
uvm_gpu_chunk_get_size(chunk) != page_size(page);
chunk = parent->suballoc->subchunks[index]) {
parent = chunk;
UVM_ASSERT(parent->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT);
UVM_ASSERT(parent->suballoc);
chunk_size = uvm_gpu_chunk_get_size(parent->suballoc->subchunks[0]);
index = (size_t)uvm_div_pow2_64(chunk_addr - parent->address, chunk_size);
UVM_ASSERT(index < num_subchunks(parent));
}
UVM_ASSERT(chunk->address = chunk_addr);
UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED);
UVM_ASSERT(chunk->is_referenced);
return chunk;
}
uvm_gpu_chunk_t *uvm_pmm_devmem_page_to_chunk(struct page *page)
{
uvm_pmm_gpu_t *pmm = devmem_page_to_pmm(page);
uvm_gpu_chunk_t *chunk;
UVM_ASSERT(is_device_private_page(page));
uvm_spin_lock(&pmm->list_lock);
chunk = devmem_page_to_chunk_locked(page);
uvm_spin_unlock(&pmm->list_lock);
return chunk;
return page->zone_device_data;
}
uvm_gpu_id_t uvm_pmm_devmem_page_to_gpu_id(struct page *page)
uvm_va_space_t *uvm_pmm_devmem_page_to_va_space(struct page *page)
{
uvm_pmm_gpu_t *pmm = devmem_page_to_pmm(page);
uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm);
uvm_gpu_chunk_t *gpu_chunk = uvm_pmm_devmem_page_to_chunk(page);
UVM_ASSERT(is_device_private_page(page));
// uvm_hmm_unregister_gpu() needs to do a racy check here so
// page->zone_device_data might be NULL.
if (!gpu_chunk || !gpu_chunk->va_block)
return NULL;
return gpu->id;
return gpu_chunk->va_block->hmm.va_space;
}
// Check there are no orphan pages. This should be only called as part of
@ -3104,12 +3058,17 @@ static bool uvm_pmm_gpu_check_orphan_pages(uvm_pmm_gpu_t *pmm)
{
size_t i;
bool ret = true;
uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm);
unsigned long devmem_start;
unsigned long devmem_end;
unsigned long pfn;
struct range range = pmm->devmem.pagemap.range;
if (!pmm->initialized || !uvm_hmm_is_enabled_system_wide())
return ret;
devmem_start = gpu->parent->devmem->pagemap.range.start + gpu->mem_info.phys_start;
devmem_end = devmem_start + gpu->mem_info.size;
// Scan all the root chunks looking for subchunks which are still
// referenced.
for (i = 0; i < pmm->root_chunks.count; i++) {
@ -3121,7 +3080,7 @@ static bool uvm_pmm_gpu_check_orphan_pages(uvm_pmm_gpu_t *pmm)
root_chunk_unlock(pmm, root_chunk);
}
for (pfn = __phys_to_pfn(range.start); pfn <= __phys_to_pfn(range.end); pfn++) {
for (pfn = __phys_to_pfn(devmem_start); pfn <= __phys_to_pfn(devmem_end); pfn++) {
struct page *page = pfn_to_page(pfn);
if (!is_device_private_page(page)) {
@ -3140,9 +3099,8 @@ static bool uvm_pmm_gpu_check_orphan_pages(uvm_pmm_gpu_t *pmm)
static void devmem_page_free(struct page *page)
{
uvm_pmm_gpu_t *pmm = devmem_page_to_pmm(page);
uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm);
uvm_gpu_chunk_t *chunk;
uvm_gpu_chunk_t *chunk = uvm_pmm_devmem_page_to_chunk(page);
uvm_gpu_t *gpu = uvm_gpu_chunk_get_gpu(chunk);
page->zone_device_data = NULL;
@ -3150,23 +3108,22 @@ static void devmem_page_free(struct page *page)
// we may be in an interrupt context where we can't do that. Instead,
// do a lazy free. Note that we have to use a "normal" spin lock because
// the UVM context is not available.
spin_lock(&pmm->list_lock.lock);
spin_lock(&gpu->pmm.list_lock.lock);
chunk = devmem_page_to_chunk_locked(page);
UVM_ASSERT(chunk->is_referenced);
chunk->is_referenced = false;
list_add_tail(&chunk->list, &pmm->root_chunks.va_block_lazy_free);
list_add_tail(&chunk->list, &gpu->pmm.root_chunks.va_block_lazy_free);
spin_unlock(&pmm->list_lock.lock);
spin_unlock(&gpu->pmm.list_lock.lock);
nv_kthread_q_schedule_q_item(&gpu->parent->lazy_free_q,
&pmm->root_chunks.va_block_lazy_free_q_item);
&gpu->pmm.root_chunks.va_block_lazy_free_q_item);
}
// This is called by HMM when the CPU faults on a ZONE_DEVICE private entry.
static vm_fault_t devmem_fault(struct vm_fault *vmf)
{
uvm_va_space_t *va_space = vmf->page->zone_device_data;
uvm_va_space_t *va_space = uvm_pmm_devmem_page_to_va_space(vmf->page);
if (!va_space)
return VM_FAULT_SIGBUS;
@ -3185,26 +3142,46 @@ static const struct dev_pagemap_ops uvm_pmm_devmem_ops =
.migrate_to_ram = devmem_fault_entry,
};
static NV_STATUS devmem_init(uvm_pmm_gpu_t *pmm)
// Allocating and initialising device private pages takes a significant amount
// of time on very large systems. So rather than do that everytime a GPU is
// registered we do it once and keep track of the range when the GPU is
// unregistered for later reuse.
//
// This function tries to find an exsiting range of device private pages and if
// available allocates and returns it for reuse.
static uvm_pmm_gpu_devmem_t *devmem_reuse_pagemap(unsigned long size)
{
unsigned long size = pmm->root_chunks.count * UVM_CHUNK_SIZE_MAX;
uvm_pmm_gpu_devmem_t *devmem = &pmm->devmem;
uvm_pmm_gpu_devmem_t *devmem;
list_for_each_entry(devmem, &g_uvm_global.devmem_ranges.list, list_node) {
if (devmem->size == size) {
list_del(&devmem->list_node);
return devmem;
}
}
return NULL;
}
static uvm_pmm_gpu_devmem_t *devmem_alloc_pagemap(unsigned long size)
{
uvm_pmm_gpu_devmem_t *devmem;
struct resource *res;
void *ptr;
NV_STATUS status;
if (!uvm_hmm_is_enabled_system_wide()) {
devmem->pagemap.owner = NULL;
return NV_OK;
}
res = request_free_mem_region(&iomem_resource, size, "nvidia-uvm-hmm");
if (IS_ERR(res)) {
UVM_ERR_PRINT("request_free_mem_region() err %ld\n", PTR_ERR(res));
status = errno_to_nv_status(PTR_ERR(res));
goto err;
return NULL;
}
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
if (!devmem)
goto err;
devmem->size = size;
devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
devmem->pagemap.range.start = res->start;
devmem->pagemap.range.end = res->end;
@ -3217,43 +3194,77 @@ static NV_STATUS devmem_init(uvm_pmm_gpu_t *pmm)
if (IS_ERR(ptr)) {
UVM_ERR_PRINT("memremap_pages() err %ld\n", PTR_ERR(ptr));
status = errno_to_nv_status(PTR_ERR(ptr));
goto err_release;
goto err_free;
}
return NV_OK;
return devmem;
err_free:
kfree(devmem);
err_release:
release_mem_region(res->start, resource_size(res));
err:
devmem->pagemap.owner = NULL;
return status;
release_mem_region(res->start, resource_size(res));
return NULL;
}
static void devmem_deinit(uvm_pmm_gpu_t *pmm)
NV_STATUS uvm_pmm_devmem_init(uvm_parent_gpu_t *gpu)
{
uvm_pmm_gpu_devmem_t *devmem = &pmm->devmem;
// Create a DEVICE_PRIVATE page for every GPU page available on the parent.
unsigned long size = gpu->max_allocatable_address;
if (!devmem->pagemap.owner)
if (!uvm_hmm_is_enabled_system_wide()) {
gpu->devmem = NULL;
return NV_OK;
}
gpu->devmem = devmem_reuse_pagemap(size);
if (!gpu->devmem)
gpu->devmem = devmem_alloc_pagemap(size);
if (!gpu->devmem)
return NV_ERR_NO_MEMORY;
return NV_OK;
}
void uvm_pmm_devmem_deinit(uvm_parent_gpu_t *gpu)
{
if (!gpu->devmem)
return;
memunmap_pages(&devmem->pagemap);
release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
list_add_tail(&gpu->devmem->list_node, &g_uvm_global.devmem_ranges.list);
gpu->devmem = NULL;
}
void uvm_pmm_devmem_exit(void)
{
uvm_pmm_gpu_devmem_t *devmem, *devmem_next;
list_for_each_entry_safe(devmem, devmem_next, &g_uvm_global.devmem_ranges.list, list_node) {
list_del(&devmem->list_node);
memunmap_pages(&devmem->pagemap);
release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
kfree(devmem);
}
}
unsigned long uvm_pmm_gpu_devmem_get_pfn(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk)
{
return (pmm->devmem.pagemap.range.start + chunk->address) >> PAGE_SHIFT;
uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm);
unsigned long devmem_start = gpu->parent->devmem->pagemap.range.start;
return (devmem_start + chunk->address) >> PAGE_SHIFT;
}
#endif // UVM_IS_CONFIG_HMM()
#if !UVM_IS_CONFIG_HMM()
static NV_STATUS devmem_init(uvm_pmm_gpu_t *pmm)
NV_STATUS uvm_pmm_devmem_init(uvm_parent_gpu_t *gpu)
{
return NV_OK;
}
static void devmem_deinit(uvm_pmm_gpu_t *pmm)
void uvm_pmm_devmem_deinit(uvm_parent_gpu_t *gpu)
{
}
@ -3469,10 +3480,6 @@ NV_STATUS uvm_pmm_gpu_init(uvm_pmm_gpu_t *pmm)
}
}
status = devmem_init(pmm);
if (status != NV_OK)
goto cleanup;
return NV_OK;
cleanup:
uvm_pmm_gpu_deinit(pmm);
@ -3543,8 +3550,6 @@ void uvm_pmm_gpu_deinit(uvm_pmm_gpu_t *pmm)
deinit_caches(pmm);
devmem_deinit(pmm);
pmm->initialized = false;
}

View File

@ -192,22 +192,41 @@ typedef struct uvm_pmm_gpu_chunk_suballoc_struct uvm_pmm_gpu_chunk_suballoc_t;
#if UVM_IS_CONFIG_HMM()
typedef struct uvm_pmm_gpu_struct uvm_pmm_gpu_t;
typedef struct
{
// For g_uvm_global.devmem_ranges
struct list_head list_node;
// Size that was requested when created this region. This may be less than
// the size actually allocated by the kernel due to alignment contraints.
// Figuring out the required alignment at compile time is difficult due to
// unexported macros, so just use the requested size as the search key.
unsigned long size;
struct dev_pagemap pagemap;
} uvm_pmm_gpu_devmem_t;
typedef struct uvm_pmm_gpu_struct uvm_pmm_gpu_t;
// Return the GPU chunk for a given device private struct page.
uvm_gpu_chunk_t *uvm_pmm_devmem_page_to_chunk(struct page *page);
// Return the va_space for a given device private struct page.
uvm_va_space_t *uvm_pmm_devmem_page_to_va_space(struct page *page);
// Return the GPU id for a given device private struct page.
uvm_gpu_id_t uvm_pmm_devmem_page_to_gpu_id(struct page *page);
// Return the PFN of the device private struct page for the given GPU chunk.
unsigned long uvm_pmm_gpu_devmem_get_pfn(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk);
// Free unused ZONE_DEVICE pages.
void uvm_pmm_devmem_exit(void);
#else
static inline void uvm_pmm_devmem_exit(void)
{
}
#endif
#if defined(CONFIG_PCI_P2PDMA) && defined(NV_STRUCT_PAGE_HAS_ZONE_DEVICE_DATA)
@ -349,10 +368,6 @@ typedef struct uvm_pmm_gpu_struct
nv_kthread_q_item_t va_block_lazy_free_q_item;
} root_chunks;
#if UVM_IS_CONFIG_HMM()
uvm_pmm_gpu_devmem_t devmem;
#endif
// Lock protecting PMA allocation, freeing and eviction
uvm_rw_semaphore_t pma_lock;
@ -604,6 +619,10 @@ static uvm_chunk_size_t uvm_chunk_find_prev_size(uvm_chunk_sizes_mask_t chunk_si
// retained, and it's up to the caller to release them.
NvU32 uvm_pmm_gpu_phys_to_virt(uvm_pmm_gpu_t *pmm, NvU64 phys_addr, NvU64 region_size, uvm_reverse_map_t *out_mappings);
// Allocate and initialise struct page data in the kernel to support HMM.
NV_STATUS uvm_pmm_devmem_init(uvm_parent_gpu_t *gpu);
void uvm_pmm_devmem_deinit(uvm_parent_gpu_t *parent_gpu);
// Iterates over every size in the input mask from smallest to largest
#define for_each_chunk_size(__size, __chunk_sizes) \
for ((__size) = (__chunk_sizes) ? uvm_chunk_find_first_size(__chunk_sizes) : \

View File

@ -2839,10 +2839,14 @@ static bool block_check_egm_peer(uvm_va_space_t *va_space, uvm_gpu_t *gpu, int n
remote_node_info = uvm_va_space_get_egm_numa_node_info(va_space, nid);
UVM_ASSERT(!uvm_parent_processor_mask_empty(&remote_node_info->parent_gpus));
for_each_parent_gpu_in_mask(parent_gpu, &remote_node_info->parent_gpus) {
UVM_ASSERT(parent_gpu->egm.enabled);
NvU64 page_addr = phys_addr.address;
if (phys_addr.address + parent_gpu->egm.base_address >= remote_node_info->node_start &&
phys_addr.address + parent_gpu->egm.base_address < remote_node_info->node_end &&
UVM_ASSERT(parent_gpu->egm.enabled);
page_addr += parent_gpu->egm.base_address;
if (parent_gpu->nvswitch_info.is_nvswitch_connected && gpu->parent != parent_gpu)
page_addr -= parent_gpu->nvswitch_info.egm_fabric_memory_window_start;
if (page_addr >= remote_node_info->node_start && page_addr < remote_node_info->node_end &&
remote_node_info->routing_table[uvm_parent_id_gpu_index(gpu->parent->id)] == parent_gpu) {
return true;
}
@ -3229,8 +3233,15 @@ static uvm_gpu_phys_address_t block_phys_page_address(uvm_va_block_t *block,
if (routing_gpu) {
struct page *page = uvm_cpu_chunk_get_cpu_page(block, chunk, block_page.page_index);
phys_addr = page_to_phys(page);
aperture = uvm_gpu_egm_peer_aperture(gpu->parent, routing_gpu);
// Remote EGM routing is based on both the EGM base address and EGM
// fabric memory window.
if (routing_gpu->nvswitch_info.is_nvswitch_connected && routing_gpu != gpu->parent)
phys_addr += routing_gpu->nvswitch_info.egm_fabric_memory_window_start;
uvm_page_mask_set(&accessing_gpu_state->egm_pages, block_page.page_index);
return uvm_gpu_phys_address(aperture, phys_addr - routing_gpu->egm.base_address);
}
@ -13575,6 +13586,9 @@ NV_STATUS uvm_test_va_residency_info(UVM_TEST_VA_RESIDENCY_INFO_PARAMS *params,
struct page *page = block_page_get(block, block_page);
phys_addr = page_to_phys(page) - egm_routing_gpu->egm.base_address;
if (egm_routing_gpu->nvswitch_info.is_nvswitch_connected && egm_routing_gpu != gpu->parent)
phys_addr += egm_routing_gpu->nvswitch_info.egm_fabric_memory_window_start;
params->is_egm_mapping[count] = true;
}
}

View File

@ -612,6 +612,42 @@ nv_dma_buf_unmap_pfns(
}
}
static NvU32
nv_dma_buf_get_sg_count (
struct device *dev,
nv_dma_buf_file_private_t *priv,
NvU32 *max_seg_size
)
{
NvU32 dma_max_seg_size, i;
NvU32 nents = 0;
dma_max_seg_size = NV_ALIGN_DOWN(dma_get_max_seg_size(dev), PAGE_SIZE);
if (dma_max_seg_size < PAGE_SIZE)
{
return 0;
}
// Calculate nents needed to allocate sg_table
for (i = 0; i < priv->num_objects; i++)
{
NvU32 range_count = priv->handles[i].memArea.numRanges;
NvU32 index;
for (index = 0; index < range_count; index++)
{
NvU64 length = priv->handles[i].memArea.pRanges[index].size;
NvU64 count = length + dma_max_seg_size - 1;
do_div(count, dma_max_seg_size);
nents += count;
}
}
*max_seg_size = dma_max_seg_size;
return nents;
}
static struct sg_table*
nv_dma_buf_map_pages (
struct device *dev,
@ -620,15 +656,11 @@ nv_dma_buf_map_pages (
{
struct sg_table *sgt = NULL;
struct scatterlist *sg;
NvU32 nents = 0;
NvU32 i;
NvU32 dma_max_seg_size = 0;
NvU32 i, nents;
int rc;
// Calculate nents needed to allocate sg_table
for (i = 0; i < priv->num_objects; i++)
{
nents += priv->handles[i].memArea.numRanges;
}
nents = nv_dma_buf_get_sg_count(dev, priv, &dma_max_seg_size);
NV_KZALLOC(sgt, sizeof(struct sg_table));
if (sgt == NULL)
@ -650,20 +682,30 @@ nv_dma_buf_map_pages (
NvU32 index = 0;
for (index = 0; index < range_count; index++)
{
NvU64 addr = priv->handles[i].memArea.pRanges[index].start;
NvU64 len = priv->handles[i].memArea.pRanges[index].size;
struct page *page = NV_GET_PAGE_STRUCT(addr);
NvU64 dma_addr = priv->handles[i].memArea.pRanges[index].start;
NvU64 dma_len = priv->handles[i].memArea.pRanges[index].size;
if ((page == NULL) || (sg == NULL))
// Split each range into dma_max_seg_size chunks
while(dma_len != 0)
{
goto free_table;
}
NvU32 sg_len = NV_MIN(dma_len, dma_max_seg_size);
struct page *page = NV_GET_PAGE_STRUCT(dma_addr);
sg_set_page(sg, page, len, NV_GET_OFFSET_IN_PAGE(addr));
sg = sg_next(sg);
if ((page == NULL) || (sg == NULL))
{
goto free_table;
}
sg_set_page(sg, page, sg_len, NV_GET_OFFSET_IN_PAGE(dma_addr));
dma_addr += sg_len;
dma_len -= sg_len;
sg = sg_next(sg);
}
}
}
WARN_ON(sg != NULL);
// DMA map the sg_table
rc = dma_map_sg(dev, sgt->sgl, sgt->orig_nents, DMA_BIDIRECTIONAL);
if (rc <= 0)
@ -693,36 +735,16 @@ nv_dma_buf_map_pfns (
struct sg_table *sgt = NULL;
struct scatterlist *sg;
nv_dma_device_t peer_dma_dev = {{ 0 }};
NvU32 dma_max_seg_size;
NvU32 nents = 0;
NvU32 dma_max_seg_size = 0;
NvU32 mapped_nents = 0;
NvU32 i = 0;
NvU32 nents;
int rc = 0;
peer_dma_dev.dev = dev;
peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask;
dma_max_seg_size = NV_ALIGN_DOWN(dma_get_max_seg_size(dev), PAGE_SIZE);
if (dma_max_seg_size < PAGE_SIZE)
{
return NULL;
}
// Calculate nents needed to allocate sg_table
for (i = 0; i < priv->num_objects; i++)
{
NvU32 range_count = priv->handles[i].memArea.numRanges;
NvU32 index;
for (index = 0; index < range_count; index++)
{
NvU64 length = priv->handles[i].memArea.pRanges[index].size;
NvU64 count = length + dma_max_seg_size - 1;
do_div(count, dma_max_seg_size);
nents += count;
}
}
nents = nv_dma_buf_get_sg_count(dev, priv, &dma_max_seg_size);
NV_KZALLOC(sgt, sizeof(struct sg_table));
if (sgt == NULL)
@ -777,6 +799,9 @@ nv_dma_buf_map_pfns (
}
}
}
WARN_ON(sg != NULL);
sgt->nents = mapped_nents;
WARN_ON(sgt->nents != sgt->orig_nents);

View File

@ -445,7 +445,9 @@ static int nvidia_mmap_sysmem(
}
else
{
vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot, 0);
if (at->flags.unencrypted)
vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot, 0);
ret = vm_insert_page(vma, start,
NV_GET_PAGE_STRUCT(at->page_table[j]->phys_addr));
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -308,6 +308,15 @@ static NvU32 find_gpu_numa_nodes_in_srat(nv_linux_state_t *nvl)
struct acpi_srat_generic_affinity *gi;
NvU32 numa_node = NUMA_NO_NODE;
if (NV_PCI_DEVFN(nvl->pci_dev) != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Failing to parse SRAT GI for %04x:%02x:%02x.%x "
"since non-zero device function is not supported.\n",
NV_PCI_DOMAIN_NUMBER(nvl->pci_dev), NV_PCI_BUS_NUMBER(nvl->pci_dev),
NV_PCI_SLOT_NUMBER(nvl->pci_dev), PCI_FUNC(nvl->pci_dev->devfn));
return 0;
}
if (acpi_get_table(ACPI_SIG_SRAT, 0, &table_header)) {
nv_printf(NV_DBG_INFO, "NVRM: Failed to parse the SRAT table.\n");
return 0;
@ -331,9 +340,14 @@ static NvU32 find_gpu_numa_nodes_in_srat(nv_linux_state_t *nvl)
(((unsigned long)subtable_header) + subtable_header_length < table_end)) {
if (subtable_header->type == ACPI_SRAT_TYPE_GENERIC_AFFINITY) {
NvU8 busAtByte2, busAtByte3;
gi = (struct acpi_srat_generic_affinity *) subtable_header;
busAtByte2 = gi->device_handle[2];
busAtByte3 = gi->device_handle[3];
// Device and function should be zero enforced by above check
gi_dbdf = *((NvU16 *)(&gi->device_handle[0])) << 16 |
*((NvU16 *)(&gi->device_handle[2]));
(busAtByte2 != 0 ? busAtByte2 : busAtByte3) << 8;
if (gi_dbdf == dev_dbdf) {
numa_node = pxm_to_node(gi->proximity_domain);
@ -347,6 +361,31 @@ static NvU32 find_gpu_numa_nodes_in_srat(nv_linux_state_t *nvl)
pxm_count = 0;
goto exit;
}
nv_printf(NV_DBG_INFO,
"NVRM: matching SRAT GI entry: 0x%x 0x%x 0x%x 0x%x PXM: %d\n",
gi->device_handle[3],
gi->device_handle[2],
gi->device_handle[1],
gi->device_handle[0],
gi->proximity_domain);
if ((busAtByte2) == 0 &&
(busAtByte3) != 0)
{
/*
* TODO: Remove this WAR once Hypervisor stack is updated
* to fix this bug and after all CSPs have moved to using
* the updated Hypervisor stack with fix.
*/
nv_printf(NV_DBG_WARNINGS,
"NVRM: PCIe bus value picked from byte 3 offset in SRAT GI entry: 0x%x 0x%x 0x%x 0x%x PXM: %d\n"
"NVRM: Hypervisor stack is old and not following ACPI spec defined offset.\n"
"NVRM: Please consider upgrading the Hypervisor stack as this workaround will be removed in future release.\n",
gi->device_handle[3],
gi->device_handle[2],
gi->device_handle[1],
gi->device_handle[0],
gi->proximity_domain);
}
}
}
@ -792,7 +831,10 @@ next_bar:
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_DISABLED);
nvl->numa_info.node_id = NUMA_NO_NODE;
nv_init_coherent_link_info(nv);
if (pci_devid_is_self_hosted(pci_dev->device))
{
nv_init_coherent_link_info(nv);
}
#if defined(NVCPU_PPC64LE)
// Use HW NUMA support as a proxy for ATS support. This is true in the only

View File

@ -1630,17 +1630,25 @@ static void nv_init_mapping_revocation(nv_linux_state_t *nvl,
nv_linux_file_private_t *nvlfp,
struct inode *inode)
{
down(&nvl->mmap_lock);
/* Set up struct address_space for use with unmap_mapping_range() */
address_space_init_once(&nvlfp->mapping);
nvlfp->mapping.host = inode;
nvlfp->mapping.a_ops = inode->i_mapping->a_ops;
file->f_mapping = &nvlfp->mapping;
}
/* Add nvlfp to list of open files in nvl for mapping revocation */
/* Adds nvlfp to list of open files for mapping revocation */
static void nv_add_open_file(nv_linux_state_t *nvl,
nv_linux_file_private_t *nvlfp)
{
nvlfp->nvptr = nvl;
/*
* nvl->open_files and other mapping revocation members in nv_linux_state_t
* are protected by nvl->mmap_lock instead of nvl->ldata_lock.
*/
down(&nvl->mmap_lock);
list_add(&nvlfp->entry, &nvl->open_files);
up(&nvl->mmap_lock);
}
@ -1690,11 +1698,12 @@ static void nvidia_open_deferred(void *nvlfp_raw)
*/
down(&nvl->ldata_lock);
rc = nv_open_device_for_nvlfp(NV_STATE_PTR(nvl), nvlfp->sp, nvlfp);
up(&nvl->ldata_lock);
/* Set nvptr only upon success (where nvl->usage_count is incremented) */
/* Only add open file tracking where nvl->usage_count is incremented */
if (rc == 0)
nvlfp->nvptr = nvl;
nv_add_open_file(nvl, nvlfp);
up(&nvl->ldata_lock);
complete_all(&nvlfp->open_complete);
}
@ -1813,6 +1822,7 @@ nvidia_open(
}
nv = NV_STATE_PTR(nvl);
nv_init_mapping_revocation(nvl, file, nvlfp, inode);
if (nv_try_lock_foreground_open(file, nvl) == 0)
{
@ -1823,11 +1833,11 @@ nvidia_open(
rc = nv_open_device_for_nvlfp(nv, nvlfp->sp, nvlfp);
up(&nvl->ldata_lock);
/* Set nvptr only upon success (where nvl->usage_count is incremented) */
/* Only add open file tracking where nvl->usage_count is incremented */
if (rc == 0)
nvlfp->nvptr = nvl;
nv_add_open_file(nvl, nvlfp);
up(&nvl->ldata_lock);
complete_all(&nvlfp->open_complete);
}
@ -1882,10 +1892,6 @@ failed:
NV_SET_FILE_PRIVATE(file, NULL);
}
}
else
{
nv_init_mapping_revocation(nvl, file, nvlfp, inode);
}
return rc;
}

View File

@ -1672,7 +1672,7 @@ NV_STATUS NV_API_CALL os_alloc_pages_node
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
#if defined(__GFP_THISNODE) && defined(GFP_HIGHUSER_MOVABLE) && \
defined(__GFP_COMP) && defined(__GFP_NORETRY) && defined(__GFP_NOWARN)
defined(__GFP_COMP) && defined(__GFP_NOWARN)
gfp_t gfp_mask;
struct page *alloc_addr;
unsigned int order = get_order(size);
@ -1689,13 +1689,11 @@ NV_STATUS NV_API_CALL os_alloc_pages_node
* pages, which is needed in order to use
* vm_insert_page API.
*
* 4. __GFP_NORETRY: Used to avoid the Linux kernel OOM killer.
*
* 5. __GFP_NOWARN: Used to avoid a WARN_ON in the slowpath if
* 4. __GFP_NOWARN: Used to avoid a WARN_ON in the slowpath if
* the requested order is too large (just fail
* instead).
*
* 6. (Optional) __GFP_RECLAIM: Used to allow/forbid reclaim.
* 5. (Optional) __GFP_RECLAIM: Used to allow/forbid reclaim.
* This is part of GFP_USER and consequently
* GFP_HIGHUSER_MOVABLE.
*
@ -1709,7 +1707,30 @@ NV_STATUS NV_API_CALL os_alloc_pages_node
*/
gfp_mask = __GFP_THISNODE | GFP_HIGHUSER_MOVABLE | __GFP_COMP |
__GFP_NORETRY | __GFP_NOWARN;
__GFP_NOWARN;
#if defined(__GFP_RETRY_MAYFAIL)
/*
* __GFP_RETRY_MAYFAIL : Used to avoid the Linux kernel OOM killer.
* To help PMA on paths where UVM might be
* in memory over subscription. This gives UVM
* a chance to free memory before invoking any
* action from the OOM killer.
* Freeing non-essential memory will also benefit
* the system as a whole.
*/
gfp_mask |= __GFP_RETRY_MAYFAIL;
#elif defined(__GFP_NORETRY)
/*
* __GFP_NORETRY : Use __GFP_NORETRY on older kernels where
* __GFP_RETRY_MAYFAIL is not present.
*/
gfp_mask |= __GFP_NORETRY;
#endif
#if defined(__GFP_RECLAIM)
if (flag & NV_ALLOC_PAGES_NODE_SKIP_RECLAIM)

View File

@ -567,8 +567,10 @@ namespace DisplayPort
virtual AuxRetry::status notifySDPErrDetectionCapability() = 0;
virtual bool isDp2xChannelCodingCapable() = 0;
virtual void setIgnoreCableIdCaps(bool bIgnore) = 0;
virtual void overrideCableIdCap(LinkRate linkRate, bool bEnable) = 0;
virtual void initialize() = 0;
virtual AuxRetry::status setMainLinkChannelCoding(MainLinkChannelCoding channelCoding) = 0;
virtual void setConnectorTypeC(bool bTypeC) = 0;
virtual ~DPCDHAL() {}
};
@ -1483,23 +1485,16 @@ namespace DisplayPort
bool clearDpTunnelingEstimatedBwStatus();
bool clearDpTunnelingBwAllocationCapStatus();
virtual AuxRetry::status notifySDPErrDetectionCapability()
{
return AuxRetry::ack;
}
virtual bool isDp2xChannelCodingCapable()
{
return false;
}
virtual void setIgnoreCableIdCaps(bool bIgnore)
{
return;
}
virtual AuxRetry::status notifySDPErrDetectionCapability() { return AuxRetry::ack; }
virtual bool isDp2xChannelCodingCapable() { return false; }
virtual void setIgnoreCableIdCaps(bool bIgnore) { return; }
virtual void overrideCableIdCap(LinkRate linkRate, bool bEnable) { return; }
// implement this function if DPCDHALImpl needs updated state between hotunplug/plug
virtual void initialize(){};
virtual AuxRetry::status setMainLinkChannelCoding(MainLinkChannelCoding channelCoding){ return AuxRetry::ack; }
virtual MainLinkChannelCoding getMainLinkChannelCoding() { return ChannelCoding8B10B; }
virtual void setConnectorTypeC(bool bTypeC) {};
};
}

View File

@ -104,14 +104,17 @@ namespace DisplayPort
virtual AuxRetry::status setMainLinkChannelCoding(MainLinkChannelCoding channelCoding);
virtual MainLinkChannelCoding getMainLinkChannelCoding();
void performCableIdHandshake();
virtual void setGpuDPSupportedVersions(NvU32 _gpuDPSupportedVersions);
virtual bool isDp2xChannelCodingCapable();
virtual void parseAndReadCaps();
virtual LinkRate getMaxLinkRate();
virtual NvU32 getUHBRSupported();
virtual void setIgnoreCableIdCaps(bool bIgnore){ bIgnoreCableIdCaps = bIgnore; }
void performCableIdHandshake();
void performCableIdHandshakeForTypeC();
void parseAndSetCableId(NvU8 cableId);
virtual void setGpuDPSupportedVersions(NvU32 _gpuDPSupportedVersions);
virtual bool isDp2xChannelCodingCapable();
virtual void parseAndReadCaps();
virtual LinkRate getMaxLinkRate();
virtual NvU32 getUHBRSupported();
virtual void setIgnoreCableIdCaps(bool bIgnore){ bIgnoreCableIdCaps = bIgnore; }
virtual void overrideCableIdCap(LinkRate linkRate, bool bEnable);
virtual bool parseTestRequestPhy();
virtual bool parseTestRequestTraining(NvU8 * buffer);
// DPCD offset 2230 - 2250
@ -123,13 +126,16 @@ namespace DisplayPort
// class fields that need re-initialization
bool bIgnoreCableIdCaps;
bool bConnectorIsTypeC;
virtual void initialize()
{
setIgnoreCableIdCaps(false);
}
DPCDHALImpl2x(AuxBus * bus, Timer * timer) : DPCDHALImpl(bus, timer), bIgnoreCableIdCaps(false)
virtual void setConnectorTypeC(bool bTypeC);
DPCDHALImpl2x(AuxBus * bus, Timer * timer) : DPCDHALImpl(bus, timer), bIgnoreCableIdCaps(false), bConnectorIsTypeC(false)
{
dpMemZero(&caps2x, sizeof(caps2x));
dpMemZero(&interrupts2x, sizeof(interrupts2x));

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -329,6 +329,12 @@ namespace DisplayPort
// On eDP, do not cache the source OUI if it reads 0. See bug 4793112
bool bSkipZeroOuiCache;
bool bDisable5019537Fix;
bool bForceHeadShutdownFromRegkey;
bool bForceHeadShutdownPerMonitor;
//
// Dual SST Partner connector object pointer
ConnectorImpl *pCoupledConnector;

View File

@ -79,9 +79,10 @@ namespace DisplayPort
{
struct _Enum_Path
{
unsigned availableStreams, total, free;
unsigned availableStreams, total, free, dfpLinkAvailable;
bool bPathFECCapable;
bool dataValid; // Is the cache valid?
bool availablePbnUpdated;
} enum_path;
struct Compound_Query_State
@ -110,6 +111,7 @@ namespace DisplayPort
void resetCacheInferredLink();
LinkConfiguration * inferLeafLink(unsigned * totalLinkSlots);
void inferPathConstraints();
DeviceImpl * parent; // Upstream parent device
@ -503,8 +505,8 @@ namespace DisplayPort
unsigned getDscMaxSliceWidth();
unsigned getDscDecoderColorDepthSupportMask();
void setDscDecompressionDevice(bool bDscCapBasedOnParent);
virtual bool getDeviceSpecificData(NvU8 *oui, NvU8 *deviceIdString,
NvU8 *hwRevision, NvU8 *swMajorRevision,
virtual bool getDeviceSpecificData(NvU8 *oui, NvU8 *deviceIdString,
NvU8 *hwRevision, NvU8 *swMajorRevision,
NvU8 *swMinorRevision);
virtual bool setModeList(DisplayPort::DpModesetParams *pModeList, unsigned numModes);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2010-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -167,6 +167,7 @@ namespace DisplayPort
bool useLegacyAddress;
bool bIgnoreDscCap; // Ignore DSC even if sink reports DSC capability
bool bDisableDownspread;
bool bForceHeadShutdown;
bool bSkipCableIdCheck;
bool bAllocateManualTimeslots;
}_WARFlags;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -425,10 +425,7 @@ namespace DisplayPort
bool freeDisplayId(NvU32 displayId);
virtual bool queryAndUpdateDfpParams();
virtual bool queryGPUCapability();
virtual void updateFallbackMap(NvU32 maxLaneCount, LinkRate maxLinkRate, NvU32 sinkUhbrCaps = 0)
{
return;
}
bool getEdpPowerData(bool *panelPowerOn, bool *dpcdPowerStateD0);
virtual bool vrrRunEnablementStage(unsigned stage, NvU32 *status);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -107,6 +107,8 @@ namespace DisplayPort
bool bSupportUHBR2_70; // Support UHBR2.7 for internal testing.
bool bSupportUHBR5_00; // Support UHBR5.0 for internal testing.
bool bConnectorIsUSBTypeC;
// Start time of DP2.x LT Channel Eqaulization phase.
NvU64 channelEqualizationStartTimeUs;
@ -124,7 +126,7 @@ namespace DisplayPort
// and with correct channel encoding.
// Return false if not.
//
bool isSupportedDPLinkConfig(LinkConfiguration &link);
virtual bool isSupportedDPLinkConfig(LinkConfiguration &link);
// Before link training start, reset DPRX link and make sure it's ready.
bool resetDPRXLink(DP2XResetParam param);
@ -144,7 +146,8 @@ namespace DisplayPort
virtual bool clearFlushMode(FlushModePhase phase, NvU32 attachFailedHeadMask = 0, NvU32 headIndex = 0);
virtual bool isRgFlushSequenceUsed() {return bUseRgFlushSequence;}
void applyDP2xRegkeyOverrides();
virtual NvU32 headToStream(NvU32 head, bool bSidebandMessageSupported, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY);
virtual NvU32 headToStream(NvU32 head, bool bSidebandMessageSupported,
DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY);
// Link Rate will return the value with 10M convention!
virtual void getLinkConfig(unsigned &laneCount, NvU64 & linkRate);
@ -170,6 +173,12 @@ namespace DisplayPort
virtual bool getDp2xLaneData(NvU32 *numLanes, NvU32 *data);
virtual bool setDp2xLaneData(NvU32 numLanes, NvU32 *data);
virtual bool physicalLayerSetDP2xTestPattern(DP2xPatternInfo *patternInfo);
virtual bool isConnectorUSBTypeC()
{
return bConnectorIsUSBTypeC;
}
virtual void invalidateLinkRatesInFallbackTable(const LinkRate linkRate);
};
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -280,16 +280,14 @@ namespace DisplayPort
virtual bool queryGPUCapability() {return false;}
virtual bool queryAndUpdateDfpParams() = 0;
virtual void updateFallbackMap(NvU32 maxLaneCount, LinkRate maxLinkRate, NvU32 sinkUhbrCaps = 0) { return; }
virtual bool isConnectorUSBTypeC() { return false; }
virtual void invalidateLinkRatesInFallbackTable(const LinkRate linkRate) { return; }
virtual bool setFlushMode(FlushModePhase phase) { return false; }
virtual bool clearFlushMode(FlushModePhase phase, NvU32 attachFailedHeadMask = 0, NvU32 headIndex = 0) { return false; }
virtual bool getDp2xLaneData(NvU32 *numLanes, NvU32 *data)
{
return false;
}
virtual bool setDp2xLaneData(NvU32 numLanes, NvU32 *data)
{
return false;
}
virtual bool getDp2xLaneData(NvU32 *numLanes, NvU32 *data) { return false; }
virtual bool setDp2xLaneData(NvU32 numLanes, NvU32 *data) { return false; }
virtual bool isSupportedDPLinkConfig(LinkConfiguration &link) {return false; };
virtual bool getEdpPowerData(bool *panelPowerOn, bool *bDPCDPowerStateD0) = 0;
virtual bool vrrRunEnablementStage(unsigned stage, NvU32 *status) = 0;

View File

@ -278,6 +278,7 @@ namespace DisplayPort
bool bFECCapability;
unsigned TotalPBN;
unsigned FreePBN;
unsigned DFPLinkAvailablePBN;
} reply;
EnumPathResMessage(const Address & target, unsigned port, bool point);
};

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -85,18 +85,21 @@
#define NV_DP2X_REGKEY_FPGA_UHBR_SUPPORT_2_7G NVBIT(1)
#define NV_DP2X_REGKEY_FPGA_UHBR_SUPPORT_5_0G NVBIT(2)
#define NV_DP2X_IGNORE_CABLE_ID_CAPS "DP2X_IGNORE_CABLE_ID_CAPS"
//
// Bug 4388987 : This regkey will disable reading PCON caps for MST.
//
#define NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED "DP_BUG_4388987_WAR"
#define NV_DP_REGKEY_DISABLE_TUNNEL_BW_ALLOCATION "DP_DISABLE_TUNNEL_BW_ALLOCATION"
#define NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED "DP_BUG_4388987_WAR"
#define NV_DP_REGKEY_DISABLE_TUNNEL_BW_ALLOCATION "DP_DISABLE_TUNNEL_BW_ALLOCATION"
// Bug 4793112 : On eDP panel, do not cache source OUI if it reads zero
#define NV_DP_REGKEY_SKIP_ZERO_OUI_CACHE "DP_SKIP_ZERO_OUI_CACHE"
#define NV_DP_REGKEY_SKIP_ZERO_OUI_CACHE "DP_SKIP_ZERO_OUI_CACHE"
#define NV_DP_REGKEY_DISABLE_FIX_FOR_5019537 "DP_DISABLE_5019537_FIX"
// Bug 5088957 : Force head shutdown in DpLib
#define NV_DP_REGKEY_FORCE_HEAD_SHUTDOWN "DP_WAR_5088957"
//
// Data Base used to store all the regkey values.
@ -136,9 +139,10 @@ struct DP_REGKEY_DATABASE
bool bForceDisableTunnelBwAllocation;
bool bDownspreadDisabled;
bool bSkipZeroOuiCache;
bool bDisable5019537Fix;
bool bForceHeadShutdown;
};
extern struct DP_REGKEY_DATABASE dpRegkeyDatabase;
#endif //INCLUDED_DP_REGKEYDATABASE_H

View File

@ -35,47 +35,110 @@
using namespace DisplayPort;
void DPCDHALImpl2x::performCableIdHandshake()
void DPCDHALImpl2x::parseAndSetCableId(NvU8 cableId)
{
NvU8 byte = 0;
caps2x.cableCaps.bUHBR_10GSupported = true;
if (AuxRetry::ack ==
bus.read(NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, &byte, sizeof byte))
caps2x.cableCaps.bUHBR_20GSupported =
FLD_TEST_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPRX, _UHBR20_10_CAPABILITY, _10_AND_20_GBPS_SUPPORTED, cableId);
caps2x.cableCaps.bUHBR_13_5GSupported =
FLD_TEST_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPRX, _13_5_GBPS_SUPPORTED, _YES, cableId);
switch (DRF_VAL(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPRX, _CABLE_TYPE, cableId))
{
caps2x.cableCaps.bUHBR_10GSupported =
FLD_TEST_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPRX, _UHBR20_10_CAPABILITY, _10_0_GBPS_SUPPORTED, byte) ||
FLD_TEST_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPRX, _UHBR20_10_CAPABILITY, _10_AND_20_GBPS_SUPPORTED, byte);
case NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX_CABLE_TYPE_CABLE_TYPE_UNKNOWN:
caps2x.cableCaps.cableType = CableTypeUnknown;
break;
case NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX_CABLE_TYPE_PASSIVE:
caps2x.cableCaps.cableType = CableTypePassive;
break;
case NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX_CABLE_TYPE_LRD:
caps2x.cableCaps.cableType = CableTypeLRD;
break;
case NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX_CABLE_TYPE_ACTIVE_RETIMER:
caps2x.cableCaps.cableType = CableTypeActiveReTimer;
break;
default:
DP_PRINTF(DP_ERROR, "Unknown cable type\n");
break;
}
}
caps2x.cableCaps.bUHBR_20GSupported =
FLD_TEST_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPRX, _UHBR20_10_CAPABILITY, _10_AND_20_GBPS_SUPPORTED, byte);
void DPCDHALImpl2x::performCableIdHandshakeForTypeC()
{
NvU8 txCableCaps = 0;
NvU8 rxCableCaps = 0;
caps2x.cableCaps.bUHBR_13_5GSupported =
FLD_TEST_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPRX, _13_5_GBPS_SUPPORTED, _YES, byte);
switch (DRF_VAL(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPRX, _CABLE_TYPE, byte))
{
case NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX_CABLE_TYPE_CABLE_TYPE_UNKNOWN:
caps2x.cableCaps.cableType = CableTypeUnknown;
break;
case NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX_CABLE_TYPE_PASSIVE:
caps2x.cableCaps.cableType = CableTypePassive;
break;
case NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX_CABLE_TYPE_LRD:
caps2x.cableCaps.cableType = CableTypeLRD;
break;
case NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX_CABLE_TYPE_ACTIVE_RETIMER:
caps2x.cableCaps.cableType = CableTypeActiveReTimer;
break;
default:
DP_PRINTF(DP_ERROR, "Unknown cable type\n");
break;
}
// Write sink caps to NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPTX first.
if (caps2x.bUHBR_20GSupported)
{
txCableCaps = FLD_SET_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPTX, _UHBR20_10_CAPABILITY,
_10_AND_20_GBPS_SUPPORTED, txCableCaps);
}
else
{
txCableCaps = FLD_SET_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPTX, _UHBR20_10_CAPABILITY,
_10_0_GBPS_SUPPORTED, txCableCaps);
}
if (caps2x.bUHBR_13_5GSupported)
{
txCableCaps = FLD_SET_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPTX, _13_5_GBPS_SUPPORTED,
_YES, txCableCaps);
}
if (byte == 0)
// Set cable type based on if any LTTPR is detected.
if (!bLttprSupported || (this->caps.phyRepeaterCount == 0))
{
caps2x.cableCaps.bUHBR_10GSupported = true;
return;
txCableCaps = FLD_SET_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPTX, _CABLE_TYPE,
_PASSIVE, txCableCaps);
}
else
{
txCableCaps = FLD_SET_DRF(_DPCD20, _CABLE_ATTRIBUTES_UPDATED_BY_DPTX, _CABLE_TYPE,
_ACTIVE_RETIMER, txCableCaps);
}
if (AuxRetry::ack != bus.write(NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, &txCableCaps,
sizeof txCableCaps))
{
DP_PRINTF(DP_WARNING, "Failed to write NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPTX");
}
// Check Cable ID from DPRX
if (AuxRetry::ack !=
bus.read(NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, &rxCableCaps, sizeof rxCableCaps))
{
DP_PRINTF(DP_WARNING, "Failed to read NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX for updated results");
}
parseAndSetCableId(rxCableCaps);
// If no matches, reflect that to the DPRX
if (txCableCaps != rxCableCaps)
{
if (AuxRetry::ack !=
bus.write(NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, &rxCableCaps, sizeof rxCableCaps))
{
DP_PRINTF(DP_WARNING, "Failed to update NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPTX -> %02x", rxCableCaps);
}
}
}
void DPCDHALImpl2x::performCableIdHandshake()
{
NvU8 rxCableCaps = 0;
if (this->bConnectorIsTypeC)
{
return performCableIdHandshakeForTypeC();
}
// Read Cable ID from DPRX
if (AuxRetry::ack ==
bus.read(NV_DPCD20_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, &rxCableCaps, sizeof rxCableCaps))
{
parseAndSetCableId(rxCableCaps);
}
}
@ -183,7 +246,7 @@ void DPCDHALImpl2x::parseAndReadCaps()
}
}
if (!bIgnoreCableIdCaps)
if (caps2x.bDP2xChannelCodingSupported && !bIgnoreCableIdCaps)
{
performCableIdHandshake();
}
@ -231,6 +294,11 @@ void DPCDHALImpl2x::setGpuDPSupportedVersions(NvU32 _gpuDPSupportedVersions)
return DPCDHALImpl::setGpuDPSupportedVersions(_gpuDPSupportedVersions);
}
void DPCDHALImpl2x::setConnectorTypeC(bool bTypeC)
{
this->bConnectorIsTypeC = bTypeC;
}
AuxRetry::status DPCDHALImpl2x::notifySDPErrDetectionCapability()
{
NvU8 config = 0;
@ -379,6 +447,24 @@ NvU32 DPCDHALImpl2x::getUHBRSupported()
return uhbrCaps;
}
void DPCDHALImpl2x::overrideCableIdCap(LinkRate linkRate, bool bEnable)
{
switch (linkRate)
{
case dp2LinkRate_20_0Gbps:
caps2x.cableCaps.bUHBR_20GSupported = bEnable;
break;
case dp2LinkRate_13_5Gbps:
caps2x.cableCaps.bUHBR_13_5GSupported = bEnable;
break;
case dp2LinkRate_10_0Gbps:
caps2x.cableCaps.bUHBR_10GSupported = bEnable;
break;
default:
DP_PRINTF(DP_ERROR, "DPHAL> Invalid link rate (%d) to override.", linkRate);
}
}
bool DPCDHALImpl2x::parseTestRequestPhy()
{
NvU8 buffer[2] = {0};

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -147,6 +147,7 @@ ConnectorImpl::ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Co
this->applyRegkeyOverrides(dpRegkeyDatabase);
hal->applyRegkeyOverrides(dpRegkeyDatabase);
hal->setConnectorTypeC(main->isConnectorUSBTypeC());
highestAssessedLC = initMaxLinkConfig();
}
@ -182,6 +183,8 @@ void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatab
}
this->bForceDisableTunnelBwAllocation = dpRegkeyDatabase.bForceDisableTunnelBwAllocation;
this->bSkipZeroOuiCache = dpRegkeyDatabase.bSkipZeroOuiCache;
this->bDisable5019537Fix = dpRegkeyDatabase.bDisable5019537Fix;
this->bForceHeadShutdownFromRegkey = dpRegkeyDatabase.bForceHeadShutdown;
}
void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled)
@ -2655,10 +2658,16 @@ bool ConnectorImpl::isHeadShutDownNeeded(Group * target, // Group
unsigned headIndex,
ModesetInfo modesetInfo)
{
if (bForceHeadShutdownFromRegkey || bForceHeadShutdownPerMonitor)
{
return true;
}
if (linkUseMultistream())
{
return true;
}
if (activeGroups.isEmpty())
{
return false;
@ -4399,9 +4408,18 @@ void ConnectorImpl::assessLink(LinkTrainingType trainType)
}
}
//
// Find the active group(s)
GroupImpl * groupAttached = 0;
if (!this->bDisable5019537Fix)
{
for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next)
{
DP_ASSERT(bIsUefiSystem || linkUseMultistream() || (!groupAttached && "Multiple attached heads"));
groupAttached = (GroupImpl * )e;
}
}
// Disconnect heads
//
bool bIsFlushModeEnabled = enableFlush();
if (bIsFlushModeEnabled)
@ -4442,6 +4460,15 @@ void ConnectorImpl::assessLink(LinkTrainingType trainType)
timer->sleep(40);
} while (retryCount++ < WAR_MAX_REASSESS_ATTEMPT);
if (!activeLinkConfig.isValid() && !(this->bDisable5019537Fix))
{
if (groupAttached && groupAttached->lastModesetInfo.pixelClockHz != 0)
{
// If there is no active link, force LT to max before disable flush
lConfig = _maxLinkConfig;
train(lConfig, true);
}
}
disableFlush();
}
@ -4898,9 +4925,9 @@ bool ConnectorImpl::trainLinkOptimized(LinkConfiguration lConfig)
bool bTwoHeadOneOrLinkRetrain = false; // force link re-train if any attached
// groups are in 2Head1OR mode.
// Power off the link if no stream are active
if (isNoActiveStreamAndPowerdown())
{
DP_PRINTF(DP_INFO, "Power off the link because no stream are active");
return true;
}
@ -5059,7 +5086,7 @@ bool ConnectorImpl::trainLinkOptimized(LinkConfiguration lConfig)
bSkipLt = false;
}
if (groupAttached && groupAttached->isHeadAttached())
if ((groupAttached && groupAttached->isHeadAttached()) || !(this->bDisable5019537Fix))
{
// Enter flush mode/detach head before LT
if (!bSkipLt)
@ -5077,7 +5104,7 @@ bool ConnectorImpl::trainLinkOptimized(LinkConfiguration lConfig)
if (!bLinkTrainingSuccessful && bSkipLt)
{
bSkipLt = false;
if (groupAttached && groupAttached->isHeadAttached())
if ((groupAttached && groupAttached->isHeadAttached()) || !(this->bDisable5019537Fix))
{
if (!(bEnteredFlushMode = this->enableFlush()))
return false;
@ -5087,36 +5114,37 @@ bool ConnectorImpl::trainLinkOptimized(LinkConfiguration lConfig)
if (!bLinkTrainingSuccessful)
{
LinkConfiguration maxLinkConfig = getMaxLinkConfig();
//
// If optimized link config fails, try max link config with fallback.
if (!train(maxLinkConfig, false))
// Note: It's possible some link rates are dynamically invalidated
// during failed link training. That means we can't assume
// maxLinkConfig is always greater than the lowestSelected
// link configuration.
//
train(maxLinkConfig, false);
//
// Note here that fallback might happen while attempting LT to max link config.
// activeLinkConfig will be set to that passing config.
//
if (!willLinkSupportModeSST(activeLinkConfig, groupAttached->lastModesetInfo))
{
//
// Note here that if highest link config fails and a lower
// link config passes, link training will be returned as
// failure but activeLinkConfig will be set to that passing config.
// If none of the link configs pass LT or a fall back link config passed LT
// but cannot support the mode, then we will force the optimized link config
// on the link and mark LT as fail.
//
if (!willLinkSupportModeSST(activeLinkConfig, groupAttached->lastModesetInfo))
{
//
// If none of the link configs pass LT or a fall back link config passed LT
// but cannot support the mode, then we will force the optimized link config
// on the link and mark LT as fail.
//
train(lowestSelected, true);
bLinkTrainingSuccessful = false;
}
else
{
//
// If a fallback link config pass LT and can support
// the mode, mark LT as pass.
//
bLinkTrainingSuccessful = true;
}
// Force LT really should not fail!
DP_ASSERT(train(lowestSelected, true));
bLinkTrainingSuccessful = false;
}
else
{
// If LT passes at max link config, mark LT as pass.
//
// If a fallback link config pass LT and can support
// the mode, mark LT as pass.
//
bLinkTrainingSuccessful = true;
}
}
@ -5798,10 +5826,15 @@ bool ConnectorImpl::enableFlush()
return false;
//
// Enabling flush mode shuts down the link, so the next link training
// call must not skip programming the hardware. Otherwise, EVO will
// hang if the head is still active when flush mode is disabled.
// Enabling flush mode shuts down the link:
// 1. reset activeLinkConfig to indicate the link is now lost.
// 2. The next link training call must not skip programming the hardware.
// Otherwise, EVO will hang if the head is still active when flush mode is disabled.
//
if (!this->bDisable5019537Fix)
{
activeLinkConfig = LinkConfiguration();
}
bSkipLt = false;
sortActiveGroups(false);
@ -6826,6 +6859,10 @@ void ConnectorImpl::notifyLongPulseInternal(bool statusConnected)
{
preferredLinkConfig.multistream = false;
}
if (AuxRetry::ack != hal->setMessagingEnable(false, true))
{
DP_PRINTF(DP_WARNING, "DP> Failed to clear messaging for singlestream panel");
}
// We will report a dongle as new device with videoSink flag as false.
if (hal->getSinkCount() == 0)
@ -7273,6 +7310,28 @@ void ConnectorImpl::notifyShortPulse()
//save the previous highest assessed LC
LinkConfiguration previousAssessedLC = highestAssessedLC;
if (main->isConnectorUSBTypeC() &&
activeLinkConfig.bIs128b132bChannelCoding &&
activeLinkConfig.peakRate > dp2LinkRate_10_0Gbps)
{
if (activeLinkConfig.isValid() && enableFlush())
{
train(activeLinkConfig, true);
disableFlush();
}
main->invalidateLinkRatesInFallbackTable(activeLinkConfig.peakRate);
hal->overrideCableIdCap(activeLinkConfig.peakRate, false);
highestAssessedLC = getMaxLinkConfig();
DeviceImpl * dev = findDeviceInList(Address());
if (dev)
{
sink->bandwidthChangeNotification(dev, false);
}
return;
}
if (activeLinkConfig.isValid() && enableFlush())
{
LinkConfiguration originalActiveLinkConfig = activeLinkConfig;
@ -8150,6 +8209,7 @@ void ConnectorImpl::configInit()
allocatedDpTunnelBw = 0;
allocatedDpTunnelBwShadow = 0;
bDP2XPreferNonDSCForLowPClk = false;
bForceHeadShutdownPerMonitor = false;
}
bool ConnectorImpl::dpUpdateDscStream(Group *target, NvU32 dscBpp)

View File

@ -72,6 +72,9 @@ bool ConnectorImpl2x::willLinkSupportModeSST
const DscParams *pDscParams
)
{
LinkConfiguration lc = linkConfig;
if (!main->isSupportedDPLinkConfig(lc))
return false;
// no headIndex (default 0) for mode enumeration.
return willLinkSupportMode(linkConfig, modesetInfo, 0, NULL, pDscParams);
}
@ -464,33 +467,55 @@ bool ConnectorImpl2x::compoundQueryAttachMSTGeneric(Group * target,
return false;
}
for(Device * d = target->enumDevices(0); d; d = target->enumDevices(d))
if (!hal->isDp2xChannelCodingCapable())
{
DeviceImpl * i = (DeviceImpl *)d;
// Allocate bandwidth for the entire path to the root
// NOTE: Above we're already handle the local link
DeviceImpl * tail = i;
while (tail && tail->getParent())
for(Device * d = target->enumDevices(0); d; d = target->enumDevices(d))
{
// Have we already accounted for this stream?
DeviceImpl * i = (DeviceImpl *)d;
// Allocate bandwidth for the entire path to the root
// NOTE: Above we're already handle the local link
DeviceImpl * tail = i;
while (tail && tail->getParent())
{
// Have we already accounted for this stream?
if (!(tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex & (1 << compoundQueryCount)))
{
tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex |= (1 << compoundQueryCount);
LinkConfiguration * linkConfig = tail->inferLeafLink(NULL);
tail->bandwidth.compound_query_state.timeslots_used_by_query += linkConfig->slotsForPBN(base_pbn);
if (tail->bandwidth.compound_query_state.timeslots_used_by_query >
tail->bandwidth.compound_query_state.totalTimeSlots)
{
compoundQueryResult = false;
SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_INSUFFICIENT_BANDWIDTH)
}
}
tail = (DeviceImpl*)tail->getParent();
}
}
}
else
{
for(Device * d = target->enumDevices(0); d; d = target->enumDevices(d))
{
DeviceImpl * tail = (DeviceImpl *)d;
if (!(tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex & (1 << compoundQueryCount)))
{
tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex |= (1 << compoundQueryCount);
tail->inferPathConstraints();
LinkConfiguration * linkConfig = tail->inferLeafLink(NULL);
tail->bandwidth.compound_query_state.timeslots_used_by_query += linkConfig->slotsForPBN(base_pbn);
if (tail->bandwidth.compound_query_state.timeslots_used_by_query >
tail->bandwidth.compound_query_state.totalTimeSlots)
if (slots_pbn > DP_MIN(tail->bandwidth.enum_path.total, tail->bandwidth.enum_path.dfpLinkAvailable))
{
compoundQueryResult = false;
SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_INSUFFICIENT_BANDWIDTH)
}
}
tail = (DeviceImpl*)tail->getParent();
}
}
return compoundQueryResult;
}
@ -1063,7 +1088,6 @@ void ConnectorImpl2x::beforeDeleteStream(GroupImpl * group, bool forFlushMode)
// Delete the stream
hal->payloadTableClearACT();
hal->payloadAllocate(group->streamIndex, group->timeslot.begin, 0);
main->triggerACT();
}
}
@ -1079,7 +1103,7 @@ void ConnectorImpl2x::afterDeleteStream(GroupImpl * group)
return ConnectorImpl::afterDeleteStream(group);
DP_ASSERT(!group->isTimeslotAllocated());
main->triggerACT();
if (group->isHeadAttached() && group->bWaitForDeAllocACT)
{
if (!hal->payloadWaitForACTReceived())
@ -1110,6 +1134,13 @@ bool ConnectorImpl2x::train(const LinkConfiguration &lConfig, bool force, LinkTr
freeSlots = maximumSlots;
firstFreeSlot = 0;
}
// Invalidate the UHBR if the connector is a USB-C to DP/USB-C.
if (!trainResult && main->isConnectorUSBTypeC() &&
lConfig.bIs128b132bChannelCoding && lConfig.peakRate > dp2LinkRate_10_0Gbps)
{
hal->overrideCableIdCap(lConfig.peakRate, false);
}
return trainResult;
}
@ -1303,6 +1334,7 @@ bool ConnectorImpl2x::enableFlush()
// call must not skip programming the hardware. Otherwise, EVO will
// hang if the head is still active when flush mode is disabled.
//
bSkipLt = false;
sortActiveGroups(false);
@ -1335,6 +1367,12 @@ bool ConnectorImpl2x::enableFlush()
return false;
}
// Reset activeLinkConfig to indicate the link is now lost
if (!this->bDisable5019537Fix)
{
activeLinkConfig = LinkConfiguration();
}
return true;
}
@ -1666,5 +1704,14 @@ void ConnectorImpl2x::handleEdidWARs(Edid & edid, DiscoveryManager::Device & dev
bDP2XPreferNonDSCForLowPClk = true;
}
}
if (edid.WARFlags.bForceHeadShutdown)
{
// SST mode
if (device.address.size() <= 1)
{
bForceHeadShutdownPerMonitor = true;
}
}
}

View File

@ -1401,6 +1401,56 @@ LinkConfiguration * DeviceImpl::inferLeafLink(unsigned * totalLinkSlots)
return &bandwidth.lastHopLinkConfig;
}
void DeviceImpl::inferPathConstraints()
{
if (!bandwidth.enum_path.availablePbnUpdated)
{
if (plugged)
{
NakData nack;
for (unsigned retries = 0; retries < 7; retries++)
{
// Marking the EPR as a path message in 2x
EnumPathResMessage epr(getTopologyAddress().parent(), getTopologyAddress().tail(), false);
bool sendStatus = connector->messageManager->send(&epr, nack);
if (!sendStatus)
{
if (nack.reason == NakDefer || nack.reason == NakTimeout)
continue;
bandwidth.enum_path.total = 0;
bandwidth.enum_path.free = 0;
bandwidth.enum_path.availableStreams = 0;
bandwidth.enum_path.dfpLinkAvailable = 0;
break;
}
else
{
bandwidth.enum_path.total = epr.reply.TotalPBN;
bandwidth.enum_path.free = epr.reply.FreePBN;
bandwidth.enum_path.bPathFECCapable = epr.reply.bFECCapability;
bandwidth.enum_path.availableStreams = epr.reply.availableStreams;
// Include the new DFP available PBN only for 2x
bandwidth.enum_path.dfpLinkAvailable = epr.reply.DFPLinkAvailablePBN;
break;
}
}
}
else
{
bandwidth.enum_path.total = bandwidth.enum_path.free = bandwidth.enum_path.dfpLinkAvailable = 0;
}
bandwidth.enum_path.dataValid = true;
bandwidth.enum_path.availablePbnUpdated = true;
bandwidth.lastHopLinkConfig = LinkConfiguration(DP_MIN(bandwidth.enum_path.total, bandwidth.enum_path.dfpLinkAvailable));
// Update FEC support of the device after EPR
this->getFECSupport();
}
return;
}
bool DeviceImpl::isActive()
{
DP_ASSERT(!activeGroup || activeGroup->isHeadAttached());

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -102,7 +102,9 @@ const struct
{NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED, &dpRegkeyDatabase.bMSTPCONCapsReadDisabled, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_DISABLE_TUNNEL_BW_ALLOCATION, &dpRegkeyDatabase.bForceDisableTunnelBwAllocation, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_DISABLE_DOWNSPREAD, &dpRegkeyDatabase.bDownspreadDisabled, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_SKIP_ZERO_OUI_CACHE, &dpRegkeyDatabase.bSkipZeroOuiCache, DP_REG_VAL_BOOL}
{NV_DP_REGKEY_SKIP_ZERO_OUI_CACHE, &dpRegkeyDatabase.bSkipZeroOuiCache, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_DISABLE_FIX_FOR_5019537, &dpRegkeyDatabase.bDisable5019537Fix, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_FORCE_HEAD_SHUTDOWN, &dpRegkeyDatabase.bForceHeadShutdown, DP_REG_VAL_BOOL}
};
EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) :

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES.
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES.
* All rights reserved.
* SPDX-License-Identifier: MIT
*
@ -180,7 +180,8 @@ bool EvoMainLink2x::queryAndUpdateDfpParams()
if (!EvoMainLink::queryAndUpdateDfpParams())
return false;
dfpUhbrCaps = dfpParams.UHBRSupportedByDfp;
dfpUhbrCaps = dfpParams.UHBRSupportedByDfp;
bConnectorIsUSBTypeC = FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _TYPE_C_TO_DP_CONNECTOR, _TRUE, dfpParams.flags);
return true;
}
@ -197,7 +198,7 @@ bool EvoMainLink2x::queryAndUpdateDfpParams()
* into this function.
*
* Output: EvoMainLink2x::fallbackMandateTable is updated for each entry to indicate
* is a specific link configuration is supported.
* if a specific link configuration is supported.
*
*/
void EvoMainLink2x::updateFallbackMap
@ -260,6 +261,32 @@ void EvoMainLink2x::updateFallbackMap
}
}
/*!
* @brief Invalidate all entries with specific link rate in fallbackMandateTable based on request.
*
* @param[in] linkRate The link rate to be removed.
*
* Caller of this function has to complete the capabilities probing before calling
* into this function.
*
* Output: EvoMainLink2x::fallbackMandateTable is updated for each entry to indicate
* if a specific link configuration is supported.
*/
void EvoMainLink2x::invalidateLinkRatesInFallbackTable(const LinkRate linkRate)
{
NvU32 idx;
for (idx = 0U; idx < NV_DP2X_VALID_LINK_CONFIGURATION_COUNT; idx++)
{
if (fallbackMandateTable[idx].linkRate == linkRate)
{
fallbackMandateTable[idx].bSupported = NV_FALSE;
if (fallbackMandateTable[idx].laneCount == 1)
return;
}
}
}
/*!
* @brief Update fallbackMandateTable based on the capabilities of GPU, Sink and CableId.
*
@ -412,11 +439,13 @@ bool EvoMainLink2x::train(const LinkConfiguration & link, bool force,
LinkConfiguration requestRmLC = link;
//
// Check if LinkConfiguration passed in is supported by the system
if (!isSupportedDPLinkConfig(requestRmLC))
// Skip the capability check if client forces the link training.
//
if (!force && !isSupportedDPLinkConfig(requestRmLC))
{
DP_PRINTF(DP_ERROR, "DP2xEVO> EvoMainLink2x::train(): client requested link "
"is not a supported link configuration!");
DP_PRINTF(DP_ERROR, "DP2xEVO> client requested link is not a supported link configuration!");
return false;
}
@ -444,7 +473,7 @@ bool EvoMainLink2x::train(const LinkConfiguration & link, bool force,
{
if (!resetDPRXLink(resetParam))
{
DP_PRINTF(DP_ERROR, "DP2xEVO> EvoMainLink2x::train(): Reset DP link before LT failed.");
DP_PRINTF(DP_ERROR, "DP2xEVO> Reset DP link before LT failed.");
return false;
}
}
@ -479,7 +508,7 @@ bool EvoMainLink2x::train(const LinkConfiguration & link, bool force,
if (FLD_TEST_DRF(0073_CTRL, _DP2X_ERR, _LINK_STATUS, _DISCONNECTED, ltRmParams.err))
{
DP_PRINTF(DP_ERROR, "DP2xEVO> EvoMainLink2x::train(): Link Disconnected - stop LT / Fallback.");
DP_PRINTF(DP_ERROR, "DP2xEVO> Link Disconnected - stop LT / Fallback.");
// Do not fallback if link is disconnected.
bFallback = false;
}
@ -490,13 +519,23 @@ bool EvoMainLink2x::train(const LinkConfiguration & link, bool force,
}
else
{
if (this->isConnectorUSBTypeC() &&
requestRmLC.bIs128b132bChannelCoding &&
requestRmLC.peakRate > dp2LinkRate_10_0Gbps)
{
//
// Invalidate the link rate from fallback table if the connector type is USB-C to DP.
// Source will not retry the same link rate if fallback LT fails again.
//
invalidateLinkRatesInFallbackTable(requestRmLC.peakRate);
}
//
// Get next available link configuration based on DP2.1 spec, Table 3-31
// Break here if next link configuration is not available.
//
if (!this->getFallbackForDP2xLinkTraining(&requestRmLC))
{
// No link configuration available for fallback.
DP_PRINTF(DP_ERROR, "DP2xEVO> No link configuration available for fallback");
bFallback = false;
}
@ -509,12 +548,12 @@ bool EvoMainLink2x::train(const LinkConfiguration & link, bool force,
bChannelCodingChanged = (requestRmLC.bIs128b132bChannelCoding != bCur128b132bChannelCoding);
if (bChannelCodingChanged)
{
DP_PRINTF(DP_NOTICE, "DP2xEVO> EvoMainLink2x::train(): Fallback - Reset DP link before LT.");
DP_PRINTF(DP_NOTICE, "DP2xEVO> Fallback - Reset DP link before LT.");
// Reset link due to changing the channel coding during LT
resetParam.reason = DP2X_ResetLinkForFallback;
if (!resetDPRXLink(resetParam))
{
DP_PRINTF(DP_ERROR, "DP2xEVO> EvoMainLink2x::train(): Reset DP link for fallback failed.");
DP_PRINTF(DP_ERROR, "DP2xEVO> Reset DP link for fallback failed.");
return false;
}
}
@ -907,11 +946,6 @@ bool EvoMainLink2x::getFallbackForDP2xLinkTraining(LinkConfiguration *link)
for (linkIdx = 0; linkIdx < NV_DP2X_VALID_LINK_CONFIGURATION_COUNT; linkIdx++)
{
if (!(fallbackMandateTable[linkIdx].bSupported))
{
continue;
}
if ((link->lanes == fallbackMandateTable[linkIdx].laneCount) &&
(link->peakRate == fallbackMandateTable[linkIdx].linkRate) &&
(link->bIs128b132bChannelCoding == (bool)fallbackMandateTable[linkIdx].bUseDP2xChannelCoding))

View File

@ -227,11 +227,12 @@ EnumPathResMessage::EnumPathResMessage(const Address & target, unsigned port, bo
ParseResponseStatus EnumPathResMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader)
{
reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF);
reply.availableStreams = reader->readOrDefault(3 /*Available_Streams*/, 0);
reply.bFECCapability = (reader->readOrDefault(1 /*FEC*/, 0x0) == 1) ? true : false;
reply.TotalPBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF);
reply.FreePBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF);
reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF);
reply.availableStreams = reader->readOrDefault(3 /*Available_Streams*/, 0);
reply.bFECCapability = (reader->readOrDefault(1 /*FEC*/, 0x0) == 1) ? true : false;
reply.TotalPBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF);
reply.FreePBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF);
reply.DFPLinkAvailablePBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF);
if (this->getSinkPort() != reply.portNumber)
return ParseResponseWrong;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -604,6 +604,15 @@ void Edid::applyEdidWorkArounds(NvU32 warFlag, const DpMonitorDenylistData *pDen
this->WARFlags.bSkipCableIdCheck = true;
DP_PRINTF(DP_NOTICE, "DP-WAR> Panel does not expose cable capability. Ignoring it. Bug 4968411");
}
else if(ProductID == 0x24b5 || ProductID == 0x32f2)
{
//
// Asus ROG PG248QP (0x24b5) Bug 5100062
// Asus ROG PG32UCDM (0x32f2) Bug 5088957
//
this->WARFlags.bForceHeadShutdown = true;
DP_PRINTF(DP_NOTICE, "DP-WAR> Force head shutdown.");
}
break;
// Samsung

View File

@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r571_57
#define NV_BUILD_BRANCH r572_46
#endif
#ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r571_57
#define NV_PUBLIC_BRANCH r572_46
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r570/r571_57-276"
#define NV_BUILD_CHANGELIST_NUM (35438409)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r570/r572_46-342"
#define NV_BUILD_CHANGELIST_NUM (35593155)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r570/r571_57-276"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (35438409)
#define NV_BUILD_NAME "rel/gpu_drv/r570/r572_46-342"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (35593155)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r571_57-35"
#define NV_BUILD_CHANGELIST_NUM (35438409)
#define NV_BUILD_BRANCH_VERSION "r572_46-6"
#define NV_BUILD_CHANGELIST_NUM (35593155)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "572.13"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (35438409)
#define NV_BUILD_NAME "572.60"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (35593155)
#define NV_BUILD_BRANCH_BASE_VERSION R570
#endif
// End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "570.86.16"
#define NV_VERSION_STRING "570.124.04"
#else

View File

@ -0,0 +1,30 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2003-2025 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __ga10b_dev_ram_h__
#define __ga10b_dev_ram_h__
#define NV_RAMIN_ENGINE_CTXSW_FW_MAGIC_VALUE (131*32+31):(131*32+0) /* RWXUF */
#define NV_RAMIN_ENGINE_CTXSW_FW_MAGIC_VALUE_V 0xcafeca11 /* RW--V */
#endif // __ga10b_dev_ram_h__

View File

@ -33,10 +33,10 @@ static const NVSWITCH_COLUMN_PORT_OFFSET_LS10 nvswitch_portmap_ls10[NVSWITCH_NUM
{ 0, 0 }, { 0, 1 }, { 0, 2 }, { 0, 3 },
{ 0, 4 }, { 0, 5 }, { 0, 6 }, { 0, 7 },
{ 0, 8 }, { 0, 9 }, { 0, 10 },
// ports 11 - 16
// ports 11 - 15
{ 2, 0 }, { 2, 3 }, { 2, 4 }, { 2, 5 },
{ 2, 8 },
//ports 16 - 26
// ports 16 - 26
{ 4, 10 }, { 4, 9 }, { 4, 8 }, { 4, 7 },
{ 4, 6 }, { 4, 5 }, { 4, 4 }, { 4, 3 },
{ 4, 2 }, { 4, 1 }, { 4, 0 },
@ -297,18 +297,27 @@ _is_primary_replica
// Set the round flags to indicate the size of each multicast round.
// See IAS section "6.12. Consistent MC Semantics" for more info.
//
static void
_nvswitch_mc_set_round_flags
(
// Make sure rounds have following properties:
// - no more than 11 rounds
// - no more than 3 directives per round
// - no duplicate TCP# within same round
//
static NvlStatus
_nvswitch_mc_set_round_flags(
nvswitch_device *device,
NVSWITCH_TCP_DIRECTIVE_LS10 *port_list,
NvU32 port_list_size
)
NvU32 port_list_size)
{
NvU32 cur_portlist_pos, round_size, round_start, round_end;
NVSWITCH_TCP_DIRECTIVE_LS10 *cur_dir, *next_dir;
NvU32 round_tcp_mask = 0;
NvU32 round_count = 0;
if ((port_list == NULL) || (port_list_size == 0))
return;
{
NVSWITCH_PRINT(device, ERROR, "%s: called with empty port list?\n",
__FUNCTION__);
return -NVL_BAD_ARGS;
}
round_start = 0;
round_end = 0;
@ -329,12 +338,16 @@ _nvswitch_mc_set_round_flags
// set the round size in the first directive
cur_dir = &port_list[round_start];
cur_dir->roundSize = (NvU8)round_size;
round_count++;
}
else
{
// if next tcp is less than or equal to the current, then current is end of round
round_tcp_mask |= (1 << cur_dir->tcp);
// if next tcp is tcp that is already in current round, then current directive is end of round
// if its third directive in current round, end round regardless of tcp assignments
next_dir = &port_list[cur_portlist_pos + 1];
if (next_dir->tcp <= cur_dir->tcp)
if ((round_end - round_start == 2) || ((round_tcp_mask & (1 << next_dir->tcp)) != 0))
{
cur_dir->continueRound = NV_FALSE;
@ -347,9 +360,21 @@ _nvswitch_mc_set_round_flags
// advance round_start
round_start = cur_portlist_pos + 1;
round_tcp_mask = 0;
round_count++;
}
}
}
if (round_count > 11)
{
NVSWITCH_PRINT(device, ERROR, "%s: Round count exceeds 11: %d\n",
__FUNCTION__, round_count);
return -NVL_BAD_ARGS;
}
return NVL_SUCCESS;
}
//
@ -359,8 +384,8 @@ _nvswitch_mc_set_round_flags
static void
_nvswitch_mc_set_port_flags
(
NVSWITCH_TCP_DIRECTIVE_LS10 *port_list,
NvU32 port_list_size
NVSWITCH_TCP_DIRECTIVE_LS10 *port_list,
NvU32 port_list_size
)
{
NvU32 cur_portlist_pos;
@ -766,7 +791,9 @@ nvswitch_mc_build_mcp_list_ls10
NVSWITCH_PRINT(device, INFO, "%s: entries used after building portlist: %d\n",
__FUNCTION__, dir_entries_used_sg);
#endif
_nvswitch_mc_set_round_flags(tmp_mcp_list, dir_entries_used_sg);
ret = _nvswitch_mc_set_round_flags(device, tmp_mcp_list, dir_entries_used_sg);
if (ret != NVL_SUCCESS)
return ret;
_nvswitch_mc_set_port_flags(tmp_mcp_list, dir_entries_used_sg);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2005-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -86,6 +86,8 @@
* NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE
* This specifies whether the displayId is capable of sending
* YCBCR444 color format out from the board.
* NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR
* This specifies whether the displayId is a DP connector routed to an USB-TYPE-C port.
* NV0073_CTRL_DFP_FLAGS_DP_LINK_BANDWIDTH
* This specifies max link rate supported by the displayId, if the DFP is
* display port.
@ -161,6 +163,9 @@ typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR 13:13
#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_FALSE (0x00000000U)
#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_TRUE (0x00000001U)
#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -449,6 +449,7 @@ typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C 0x00000071U
#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DSI 0x00000072U
#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_STEREO_3PIN_DIN 0x00000073U
#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C_UHBR 0x00000074U
#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_UNKNOWN 0xFFFFFFFFU
/* defines for the platform field */
@ -1994,9 +1995,9 @@ typedef struct NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS {
/*
* NV0073_CTRL_CMD_SPECIFIC_GET_DISPLAY_BRIGHTNESS_LTM
* NV0073_CTRL_CMD_SPECIFIC_SET_DISPLAY_BRIGHTNESS_LTM
* These commands retrieve and set the brightness level and Local Tone Mapping (LTM) settings for
* These commands retrieve and set the brightness level and Local Tone Mapping (LTM) settings for
* the specified display.
*
*
* subDeviceInstance
* This parameter specifies the subdevice instance within the
* NV04_DISPLAY_COMMON parent device to which the operation should be

View File

@ -5252,4 +5252,13 @@ typedef struct NV2080_CTRL_INTERNAL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS {
#define NV2080_CTRL_CMD_INTERNAL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK (0x20800aabU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS_MESSAGE_ID" */
/*
* NV2080_CTRL_CMD_INTERNAL_NVLINK_REPLAY_SUPPRESSED_ERRORS
*
* Request from CPU-RM to proccess supressed errors during boot on GSP
* This command accepts no parameters.
*/
#define NV2080_CTRL_CMD_INTERNAL_NVLINK_REPLAY_SUPPRESSED_ERRORS (0x20800b01U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_2_INTERFACE_ID << 8) | 0x1" */
/* ctrl2080internal_h */

View File

@ -380,6 +380,8 @@ typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_PMGR;
#define FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID (0x20800aU)
typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_INTERNAL;
#define FINN_NV20_SUBDEVICE_0_INTERNAL_2_INTERFACE_ID (0x20800bU)
typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_INTERNAL_2;
#define FINN_NV20_SUBDEVICE_0_LPWR_INTERFACE_ID (0x208028U)
typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_LPWR;

View File

@ -675,6 +675,22 @@ ENTRY(0x2329, 0x2034, 0x10de, "NVIDIA H20-48C"),
ENTRY(0x2329, 0x2035, 0x10de, "NVIDIA H20-96C"),
ENTRY(0x2329, 0x2047, 0x10de, "NVIDIA H20-8C"),
ENTRY(0x2329, 0x2048, 0x10de, "NVIDIA H20-32C"),
ENTRY(0x232C, 0x2108, 0x10de, "NVIDIA H20X-1-18CME"),
ENTRY(0x232C, 0x2109, 0x10de, "NVIDIA H20X-1-18C"),
ENTRY(0x232C, 0x210A, 0x10de, "NVIDIA H20X-1-35C"),
ENTRY(0x232C, 0x210B, 0x10de, "NVIDIA H20X-2-35C"),
ENTRY(0x232C, 0x210C, 0x10de, "NVIDIA H20X-3-71C"),
ENTRY(0x232C, 0x210D, 0x10de, "NVIDIA H20X-4-71C"),
ENTRY(0x232C, 0x210E, 0x10de, "NVIDIA H20X-7-141C"),
ENTRY(0x232C, 0x210F, 0x10de, "NVIDIA H20X-4C"),
ENTRY(0x232C, 0x2110, 0x10de, "NVIDIA H20X-7C"),
ENTRY(0x232C, 0x2111, 0x10de, "NVIDIA H20X-8C"),
ENTRY(0x232C, 0x2112, 0x10de, "NVIDIA H20X-14C"),
ENTRY(0x232C, 0x2113, 0x10de, "NVIDIA H20X-17C"),
ENTRY(0x232C, 0x2114, 0x10de, "NVIDIA H20X-28C"),
ENTRY(0x232C, 0x2115, 0x10de, "NVIDIA H20X-35C"),
ENTRY(0x232C, 0x2116, 0x10de, "NVIDIA H20X-70C"),
ENTRY(0x232C, 0x2117, 0x10de, "NVIDIA H20X-141C"),
ENTRY(0x2330, 0x187A, 0x10de, "NVIDIA H100XM-1-10CME"),
ENTRY(0x2330, 0x187B, 0x10de, "NVIDIA H100XM-1-10C"),
ENTRY(0x2330, 0x187C, 0x10de, "NVIDIA H100XM-1-20C"),
@ -705,13 +721,13 @@ ENTRY(0x2331, 0x16DE, 0x10de, "NVIDIA H100-80C"),
ENTRY(0x2331, 0x1798, 0x10de, "NVIDIA H100-5C"),
ENTRY(0x2331, 0x17F0, 0x10de, "NVIDIA H100-1-10CME"),
ENTRY(0x2331, 0x1844, 0x10de, "NVIDIA H100-1-20C"),
ENTRY(0x2335, 0x206E, 0x10de, "NVIDIA H200X-1-17CME"),
ENTRY(0x2335, 0x206F, 0x10de, "NVIDIA H200X-1-17C"),
ENTRY(0x2335, 0x206E, 0x10de, "NVIDIA H200X-1-18CME"),
ENTRY(0x2335, 0x206F, 0x10de, "NVIDIA H200X-1-18C"),
ENTRY(0x2335, 0x2070, 0x10de, "NVIDIA H200X-1-35C"),
ENTRY(0x2335, 0x2071, 0x10de, "NVIDIA H200X-2-35C"),
ENTRY(0x2335, 0x2072, 0x10de, "NVIDIA H200X-3-70C"),
ENTRY(0x2335, 0x2073, 0x10de, "NVIDIA H200X-4-70C"),
ENTRY(0x2335, 0x2074, 0x10de, "NVIDIA H200X-7-140C"),
ENTRY(0x2335, 0x2072, 0x10de, "NVIDIA H200X-3-71C"),
ENTRY(0x2335, 0x2073, 0x10de, "NVIDIA H200X-4-71C"),
ENTRY(0x2335, 0x2074, 0x10de, "NVIDIA H200X-7-141C"),
ENTRY(0x2335, 0x2075, 0x10de, "NVIDIA H200X-4C"),
ENTRY(0x2335, 0x2076, 0x10de, "NVIDIA H200X-7C"),
ENTRY(0x2335, 0x2077, 0x10de, "NVIDIA H200X-8C"),
@ -720,7 +736,7 @@ ENTRY(0x2335, 0x2079, 0x10de, "NVIDIA H200X-17C"),
ENTRY(0x2335, 0x207A, 0x10de, "NVIDIA H200X-28C"),
ENTRY(0x2335, 0x207B, 0x10de, "NVIDIA H200X-35C"),
ENTRY(0x2335, 0x207E, 0x10de, "NVIDIA H200X-70C"),
ENTRY(0x2335, 0x207F, 0x10de, "NVIDIA H200X-140C"),
ENTRY(0x2335, 0x207F, 0x10de, "NVIDIA H200X-141C"),
ENTRY(0x2337, 0x18F2, 0x10de, "NVIDIA H100XS-1-8CME"),
ENTRY(0x2337, 0x18F3, 0x10de, "NVIDIA H100XS-1-8C"),
ENTRY(0x2337, 0x18F4, 0x10de, "NVIDIA H100XS-1-16C"),
@ -761,13 +777,13 @@ ENTRY(0x233A, 0x186B, 0x10de, "NVIDIA H800L-15C"),
ENTRY(0x233A, 0x186C, 0x10de, "NVIDIA H800L-23C"),
ENTRY(0x233A, 0x186D, 0x10de, "NVIDIA H800L-47C"),
ENTRY(0x233A, 0x186E, 0x10de, "NVIDIA H800L-94C"),
ENTRY(0x233B, 0x2081, 0x10de, "NVIDIA H200-1-17CME"),
ENTRY(0x233B, 0x2082, 0x10de, "NVIDIA H200-1-17C"),
ENTRY(0x233B, 0x2081, 0x10de, "NVIDIA H200-1-18CME"),
ENTRY(0x233B, 0x2082, 0x10de, "NVIDIA H200-1-18C"),
ENTRY(0x233B, 0x2083, 0x10de, "NVIDIA H200-1-35C"),
ENTRY(0x233B, 0x2084, 0x10de, "NVIDIA H200-2-35C"),
ENTRY(0x233B, 0x2085, 0x10de, "NVIDIA H200-3-70C"),
ENTRY(0x233B, 0x2086, 0x10de, "NVIDIA H200-4-70C"),
ENTRY(0x233B, 0x2087, 0x10de, "NVIDIA H200-7-140C"),
ENTRY(0x233B, 0x2085, 0x10de, "NVIDIA H200-3-71C"),
ENTRY(0x233B, 0x2086, 0x10de, "NVIDIA H200-4-71C"),
ENTRY(0x233B, 0x2087, 0x10de, "NVIDIA H200-7-141C"),
ENTRY(0x233B, 0x2088, 0x10de, "NVIDIA H200-4C"),
ENTRY(0x233B, 0x2089, 0x10de, "NVIDIA H200-7C"),
ENTRY(0x233B, 0x208A, 0x10de, "NVIDIA H200-8C"),
@ -776,7 +792,7 @@ ENTRY(0x233B, 0x208C, 0x10de, "NVIDIA H200-17C"),
ENTRY(0x233B, 0x208D, 0x10de, "NVIDIA H200-28C"),
ENTRY(0x233B, 0x208E, 0x10de, "NVIDIA H200-35C"),
ENTRY(0x233B, 0x208F, 0x10de, "NVIDIA H200-70C"),
ENTRY(0x233B, 0x2090, 0x10de, "NVIDIA H200-140C"),
ENTRY(0x233B, 0x2090, 0x10de, "NVIDIA H200-141C"),
ENTRY(0x2342, 0x18C2, 0x10de, "NVIDIA GH200-1-12CME"),
ENTRY(0x2342, 0x18C3, 0x10de, "NVIDIA GH200-1-12C"),
ENTRY(0x2342, 0x18C4, 0x10de, "NVIDIA GH200-1-24C"),
@ -1146,6 +1162,14 @@ ENTRY(0x27B8, 0x174A, 0x10de, "NVIDIA L4-6C"),
ENTRY(0x27B8, 0x174B, 0x10de, "NVIDIA L4-8C"),
ENTRY(0x27B8, 0x174C, 0x10de, "NVIDIA L4-12C"),
ENTRY(0x27B8, 0x174D, 0x10de, "NVIDIA L4-24C"),
ENTRY(0x2941, 0x20CB, 0x10de, "NVIDIA GB200-1-24CME"),
ENTRY(0x2941, 0x20CC, 0x10de, "NVIDIA GB200-1-24C"),
ENTRY(0x2941, 0x20CD, 0x10de, "NVIDIA GB200-1-47C"),
ENTRY(0x2941, 0x20CE, 0x10de, "NVIDIA GB200-2-47C"),
ENTRY(0x2941, 0x20CF, 0x10de, "NVIDIA GB200-3-95C"),
ENTRY(0x2941, 0x20D0, 0x10de, "NVIDIA GB200-4-95C"),
ENTRY(0x2941, 0x20D1, 0x10de, "NVIDIA GB200-7-189C"),
ENTRY(0x2941, 0x20D2, 0x10de, "NVIDIA GB200-189C"),
};
#endif /* _G_VGPU_CHIP_FLAGS_H_ */

View File

@ -19,10 +19,12 @@ static inline void _get_chip_id_for_alias_pgpu(NvU32 *dev_id, NvU32 *subdev_id)
{ 0x20FD, 0x17F8, 0x20F5, 0x0 },
{ 0x2324, 0x17A8, 0x2324, 0x17A6 },
{ 0x2329, 0x198C, 0x2329, 0x198B },
{ 0x232C, 0x2064, 0x232C, 0x2063 },
{ 0x2330, 0x16C0, 0x2330, 0x16C1 },
{ 0x2336, 0x16C2, 0x2330, 0x16C1 },
{ 0x2335, 0x18BF, 0x2335, 0x18BE },
{ 0x26BA, 0x1990, 0x26BA, 0x1957 },
{ 0x2941, 0x2046, 0x2941, 0x2045 },
};
for (NvU32 i = 0; i < (sizeof(vgpu_aliases) / sizeof(struct vgpu_alias_details)); ++i) {
@ -145,6 +147,13 @@ static const struct {
{0x232910DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1401}, // NVIDIA H20-3-48C
{0x232910DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1402}, // NVIDIA H20-4-48C
{0x232910DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1403}, // NVIDIA H20-7-96C
{0x232C10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1480}, // NVIDIA H20X-1-18CME
{0x232C10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1481}, // NVIDIA H20X-1-18C
{0x232C10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1482}, // NVIDIA H20X-1-35C
{0x232C10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_QUARTER_GPU , 1483}, // NVIDIA H20X-2-35C
{0x232C10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1484}, // NVIDIA H20X-3-71C
{0x232C10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1485}, // NVIDIA H20X-4-71C
{0x232C10DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1486}, // NVIDIA H20X-7-141C
{0x233010DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1130}, // NVIDIA H100XM-1-10CME
{0x233610DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1130}, // NVIDIA H100XM-1-10CME
{0x233010DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1131}, // NVIDIA H100XM-1-10C
@ -166,13 +175,13 @@ static const struct {
{0x233110DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 825}, // NVIDIA H100-7-80C
{0x233110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 993}, // NVIDIA H100-1-10CME
{0x233110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1059}, // NVIDIA H100-1-20C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1413}, // NVIDIA H200X-1-17CME
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1414}, // NVIDIA H200X-1-17C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1413}, // NVIDIA H200X-1-18CME
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1414}, // NVIDIA H200X-1-18C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1415}, // NVIDIA H200X-1-35C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_QUARTER_GPU , 1416}, // NVIDIA H200X-2-35C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1417}, // NVIDIA H200X-3-70C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1418}, // NVIDIA H200X-4-70C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1419}, // NVIDIA H200X-7-140C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1417}, // NVIDIA H200X-3-71C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1418}, // NVIDIA H200X-4-71C
{0x233510DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1419}, // NVIDIA H200X-7-141C
{0x233710DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1305}, // NVIDIA H100XS-1-8CME
{0x233710DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1306}, // NVIDIA H100XS-1-8C
{0x233710DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1307}, // NVIDIA H100XS-1-16C
@ -194,13 +203,13 @@ static const struct {
{0x233A10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1079}, // NVIDIA H800L-3-47C
{0x233A10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1080}, // NVIDIA H800L-4-47C
{0x233A10DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1081}, // NVIDIA H800L-7-94C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1429}, // NVIDIA H200-1-17CME
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1430}, // NVIDIA H200-1-17C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1429}, // NVIDIA H200-1-18CME
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1430}, // NVIDIA H200-1-18C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1431}, // NVIDIA H200-1-35C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_QUARTER_GPU , 1432}, // NVIDIA H200-2-35C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1433}, // NVIDIA H200-3-70C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1434}, // NVIDIA H200-4-70C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1435}, // NVIDIA H200-7-140C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1433}, // NVIDIA H200-3-71C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1434}, // NVIDIA H200-4-71C
{0x233B10DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1435}, // NVIDIA H200-7-141C
{0x234210DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1196}, // NVIDIA GH200-1-12CME
{0x234210DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1197}, // NVIDIA GH200-1-12C
{0x234210DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1198}, // NVIDIA GH200-1-24C
@ -215,6 +224,13 @@ static const struct {
{0x234810DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1454}, // NVIDIA GH200L-3-72C
{0x234810DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1455}, // NVIDIA GH200L-4-72C
{0x234810DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1456}, // NVIDIA GH200L-7-144C
{0x294110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _REQ_DEC_JPG_OFA, _ENABLE), 1445}, // NVIDIA GB200-1-24CME
{0x294110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU , 1446}, // NVIDIA GB200-1-24C
{0x294110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU , 1458}, // NVIDIA GB200-1-47C
{0x294110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_QUARTER_GPU , 1447}, // NVIDIA GB200-2-47C
{0x294110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU , 1459}, // NVIDIA GB200-3-95C
{0x294110DE, NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU , 1448}, // NVIDIA GB200-4-95C
{0x294110DE, NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU , 1460}, // NVIDIA GB200-7-189C
};
#endif // GENERATE_vgpuSmcTypeIdMappings

View File

@ -50,7 +50,8 @@ void nvEvo1SendHdmiInfoFrame(const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NvEvoInfoFrameTransmitControl transmitCtrl,
const NVT_INFOFRAME_HEADER *pInfoFrameHeader,
const NvU32 infoframeSize);
const NvU32 infoframeSize,
NvBool needChecksum);
void nvEvo1DisableHdmiInfoFrame(const NVDispEvoRec *pDispEvo,
const NvU32 head,

View File

@ -34,7 +34,8 @@ void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo,
void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo,
struct NvKmsPerOpenDev *pOpenDev,
NvKmsSurfaceHandle surfaceHandle,
NvBool skipUpdate);
NvBool skipUpdate,
NvBool skipSync);
void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo,
struct NvKmsPerOpenDev *pOpenDev,
NvKmsSurfaceHandle surfaceHandle);
@ -49,6 +50,9 @@ void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceRefCnts(NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceRefCntsWithSync(NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo,
NvBool skipSync);
NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo);

View File

@ -3194,7 +3194,8 @@ typedef const struct _nv_evo_hal {
const NvU32 head,
const NvEvoInfoFrameTransmitControl transmitCtrl,
const NVT_INFOFRAME_HEADER *pInfoFrameHeader,
const NvU32 infoFrameSize);
const NvU32 infoFrameSize,
NvBool needChecksum);
void (*DisableHdmiInfoFrame)(const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NvU8 nvtInfoFrameType);

View File

@ -2483,6 +2483,16 @@ struct NvKmsRegisterSurfaceParams {
struct NvKmsUnregisterSurfaceRequest {
NvKmsDeviceHandle deviceHandle;
NvKmsSurfaceHandle surfaceHandle;
/*
* Normally, when a surface is unregistered, nvkms will sync any
* outstanding flips to ensure the surface is no longer referenced by
* display hardware before being torn down.
*
* To improve performance with GSP firmware, when checking if this sync is
* necessary a trusted kernel-mode client who knows it is safe to do so
* may indicate to nvkms that the sync is unneeded.
*/
NvBool skipSync;
};
struct NvKmsUnregisterSurfaceReply {

View File

@ -2339,6 +2339,13 @@ static void DestroySurface
paramsUnreg.request.deviceHandle = device->hKmsDevice;
paramsUnreg.request.surfaceHandle = surface->hKmsHandle;
/*
* Since we are unregistering this surface from KAPI we know that this is
* primarily happens from nv_drm_framebuffer_destroy and access to this
* framebuffer has been externally synchronized, we are done with it.
* Because of that we do not need to synchronize this unregister.
*/
paramsUnreg.request.skipSync = NV_TRUE;
status = nvkms_ioctl_from_kapi(device->pKmsOpen,
NVKMS_IOCTL_UNREGISTER_SURFACE,

View File

@ -126,6 +126,7 @@
*/
typedef struct _NVDIFRStateEvoRec {
NVDevEvoPtr pDevEvo;
NvU32 copyEngineClass;
NvU32 copyEngineType;
/*
@ -471,11 +472,6 @@ static NvBool AllocDIFRCopyEngine(NVDIFRStateEvoPtr pDifr)
return NV_FALSE;
}
// XXX Disabled DIFR on GB20x due to bug 5026524 and bug 5002540
if (ceClass == BLACKWELL_DMA_COPY_B) {
return NV_FALSE;
}
pDifr->prefetchEngine = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
if (pDifr->prefetchEngine == 0) {
return NV_FALSE;
@ -496,6 +492,9 @@ static NvBool AllocDIFRCopyEngine(NVDIFRStateEvoPtr pDifr)
return NV_FALSE;
}
// For Ampere vs Blackwell+ differentiation later
pDifr->copyEngineClass = ceClass;
return NV_TRUE;
}
@ -523,6 +522,7 @@ static NvU32 PrefetchSingleSurface(NVDIFRStateEvoPtr pDifr,
const NvKmsSurfaceMemoryFormatInfo *finfo =
nvKmsGetSurfaceMemoryFormatInfo(pParams->surfFormat);
NvU32 componentSizes;
NvU32 dataTransferType;
NvU32 line_length_in;
NvU32 line_count;
NvU64 starttime;
@ -626,10 +626,21 @@ static NvU32 PrefetchSingleSurface(NVDIFRStateEvoPtr pDifr,
nvPushSetMethodData(p, line_count);
nvAssert(pParams->surfPitchBytes * line_count == pParams->surfSizeBytes);
if (pDifr->copyEngineClass != AMPERE_DMA_COPY_B) {
nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, NVCAB5_REQ_ATTR, 1);
nvPushSetMethodData
(p, DRF_DEF(CAB5, _REQ_ATTR, _PREFETCH_L2_CLASS, _EVICT_LAST));
dataTransferType = DRF_DEF(CAB5, _LAUNCH_DMA, _DATA_TRANSFER_TYPE, _PREFETCH);
} else
{
dataTransferType = DRF_DEF(A0B5, _LAUNCH_DMA, _DATA_TRANSFER_TYPE, _PIPELINED);
}
nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, NVA0B5_LAUNCH_DMA, 1);
nvPushSetMethodData
(p,
DRF_DEF(A0B5, _LAUNCH_DMA, _DATA_TRANSFER_TYPE, _PIPELINED) |
dataTransferType |
DRF_DEF(A0B5, _LAUNCH_DMA, _FLUSH_ENABLE, _TRUE) |
DRF_DEF(A0B5, _LAUNCH_DMA, _SEMAPHORE_TYPE, _NONE) |
DRF_DEF(A0B5, _LAUNCH_DMA, _INTERRUPT_TYPE, _NONE) |

View File

@ -8978,7 +8978,8 @@ NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo)
nvEvoUnregisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev,
pDevEvo->fbConsoleSurfaceHandle,
TRUE /* skipUpdate */);
TRUE /* skipUpdate */,
FALSE /* skipSync */);
pDevEvo->fbConsoleSurfaceHandle = 0;
}

View File

@ -586,25 +586,38 @@ NvBool nvEvo1NvtToHdmiInfoFramePacketType(const NvU32 srcType, NvU8 *pDstType)
}
static NVHDMIPKT_TC EvoInfoFrameToHdmiLibTransmitCtrl(
NvEvoInfoFrameTransmitControl src)
NvEvoInfoFrameTransmitControl src,
NvBool needChecksum)
{
NVHDMIPKT_TC hdmiLibTransmitCtrl =
NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME;
switch (src) {
case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_SINGLE_FRAME:
return NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_SINGLE_FRAME;
hdmiLibTransmitCtrl = NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_SINGLE_FRAME;
break;
case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME:
return NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME;
hdmiLibTransmitCtrl = NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME;
break;
}
return NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME;
if (!needChecksum) {
hdmiLibTransmitCtrl &=
~DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN);
}
return hdmiLibTransmitCtrl;
}
void nvEvo1SendHdmiInfoFrame(const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NvEvoInfoFrameTransmitControl transmitCtrl,
const NVT_INFOFRAME_HEADER *pInfoFrameHeader,
const NvU32 infoframeSize)
const NvU32 infoframeSize,
NvBool needChecksum)
{
NVHDMIPKT_TC hdmiLibTransmitCtrl =
EvoInfoFrameToHdmiLibTransmitCtrl(transmitCtrl);
EvoInfoFrameToHdmiLibTransmitCtrl(transmitCtrl, needChecksum);
const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NVHDMIPKT_TYPE hdmiLibType;
@ -614,8 +627,6 @@ void nvEvo1SendHdmiInfoFrame(const NVDispEvoRec *pDispEvo,
NvU32 i;
const NvU8 *pPayload;
size_t headerSize;
NvBool needChecksum = (hdmiLibTransmitCtrl &
DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN));
/*
* The 'type' the timing library is not the type that the HDMI
@ -679,6 +690,11 @@ void nvEvo1SendHdmiInfoFrame(const NVDispEvoRec *pDispEvo,
nvkms_memcpy(&infoframe[1], &((const NvU8*) pInfoFrameHeader)[1],
headerSize - 1);
/*
* XXX Redundant since needsChecksum implies
* _HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW_EN via
* EvoInfoFrameToHdmiLibTransmitCtrl()?
*/
if (needChecksum) {
/* PB0: checksum */
checksum = 0;

View File

@ -2504,6 +2504,7 @@ static NvBool ConstructAdvancedInfoFramePacket(
const NVT_INFOFRAME_HEADER *pInfoFrameHeader,
const NvU32 infoframeSize,
const NvBool needChecksum,
const NvBool swChecksum,
NvU8 *pPacket,
const NvU32 packetLen)
{
@ -2535,7 +2536,7 @@ static NvBool ConstructAdvancedInfoFramePacket(
(const NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER *)
pInfoFrameHeader;
pPacket[1] = pExtMetadataHeader->type; /* HB1 */
pPacket[1] = pExtMetadataHeader->firstLast; /* HB1 */
pPacket[2] = pExtMetadataHeader->sequenceIndex; /* HB2 */
pPayload = (const NvU8 *)(pExtMetadataHeader + 1);
@ -2550,16 +2551,26 @@ static NvBool ConstructAdvancedInfoFramePacket(
}
pPacket[3] = 0; /* HB3, reserved */
nvkms_memcpy(&pPacket[5], pPayload, payloadLen); /* PB1~ */
pPacket[4] = 0; /* PB0: checksum */
if (needChecksum) {
NvU8 checksum = 0;
pPacket[4] = 0; /* PB0: checksum */
for (NvU32 i = 0; i < packetLen; i++) {
checksum += pPacket[i];
/*
* XXX Redundant since we always call with swChecksum=FALSE and
* _HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW_EN
*/
if (swChecksum) {
NvU8 checksum = 0;
for (NvU32 i = 0; i < packetLen; i++) {
checksum += pPacket[i];
}
pPacket[4] = ~checksum + 1;
}
pPacket[4] = ~checksum + 1;
nvkms_memcpy(&pPacket[5], pPayload, payloadLen); /* PB1~ */
} else {
nvAssert(!swChecksum);
nvkms_memcpy(&pPacket[4], pPayload, payloadLen); /* PB0~ */
}
return TRUE;
@ -2569,7 +2580,8 @@ static void SendHdmiInfoFrameCA(const NVDispEvoRec *pDispEvo,
const NvU32 head,
const NvEvoInfoFrameTransmitControl transmitCtrl,
const NVT_INFOFRAME_HEADER *pInfoFrameHeader,
const NvU32 infoFrameSize)
const NvU32 infoFrameSize,
NvBool needChecksum)
{
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NVHDMIPKT_TYPE hdmiLibType;
@ -2589,7 +2601,7 @@ static void SendHdmiInfoFrameCA(const NVDispEvoRec *pDispEvo,
if (!NvtToHdmiLibGenericInfoFramePktType(pInfoFrameHeader->type,
&hdmiLibType)) {
nvEvo1SendHdmiInfoFrame(pDispEvo, head, transmitCtrl, pInfoFrameHeader,
infoFrameSize);
infoFrameSize, needChecksum);
return;
}
@ -2602,12 +2614,12 @@ static void SendHdmiInfoFrameCA(const NVDispEvoRec *pDispEvo,
break;
}
advancedInfoFrame.location = INFOFRAME_CTRL_LOC_VBLANK;
advancedInfoFrame.hwChecksum = TRUE;
advancedInfoFrame.hwChecksum = needChecksum;
if (!ConstructAdvancedInfoFramePacket(pInfoFrameHeader,
infoFrameSize,
!advancedInfoFrame.hwChecksum
/* needChecksum */,
needChecksum,
FALSE /* swChecksum */,
packet,
sizeof(packet))) {
return;

View File

@ -421,8 +421,9 @@ static void SendVideoInfoFrame(const NVDispEvoRec *pDispEvo,
head,
NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME,
(NVT_INFOFRAME_HEADER *) &VideoInfoFrame,
/* header length */ sizeof(NVT_INFOFRAME_HEADER) +
/* payload length */ VideoInfoFrame.length);
(/* header length */ sizeof(NVT_INFOFRAME_HEADER) +
/* payload length */ VideoInfoFrame.length),
TRUE /* needChecksum */);
}
/*
@ -477,8 +478,9 @@ SendHDMI3DVendorSpecificInfoFrame(const NVDispEvoRec *pDispEvo,
head,
NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME,
&vendorInfoFrame.Header,
/* header length */ sizeof(vendorInfoFrame.Header) +
/* payload length */ vendorInfoFrame.Header.length);
(/* header length */ sizeof(vendorInfoFrame.Header) +
/* payload length */ vendorInfoFrame.Header.length),
TRUE /* needChecksum */);
}
static void
@ -541,8 +543,9 @@ SendHDRInfoFrame(const NVDispEvoRec *pDispEvo, const NvU32 head,
head,
transmitCtrl,
(NVT_INFOFRAME_HEADER *) &hdrInfoFrame.header,
/* header length */ sizeof(hdrInfoFrame.header) +
/* payload length */ hdrInfoFrame.header.length);
(/* header length */ sizeof(hdrInfoFrame.header) +
/* payload length */ hdrInfoFrame.header.length),
TRUE /* needChecksum */);
}
@ -1745,7 +1748,8 @@ void nvHdmiSetVRR(NVDispEvoPtr pDispEvo, NvU32 head, NvBool enable)
head,
transmitCtrl,
(NVT_INFOFRAME_HEADER *) &empInfoFrame,
sizeof(empInfoFrame));
sizeof(empInfoFrame),
FALSE /* needChecksum */);
}
/*

View File

@ -228,7 +228,8 @@ void nvHsFreeSurface(
nvEvoUnregisterSurface(pDevEvo,
pDevEvo->pNvKmsOpenDev,
pHsSurface->nvKmsHandle,
FALSE /* skipUpdate */);
FALSE /* skipUpdate */,
FALSE /* skipSync */);
}
nvFree(pHsSurface);
@ -1078,7 +1079,8 @@ static void FreeNotifiers(NVHsDeviceEvoRec *pHsDevice)
nvEvoUnregisterSurface(pDevEvo,
pDevEvo->pNvKmsOpenDev,
pNotifiers->nvKmsHandle,
FALSE /* skipUpdate */);
FALSE /* skipUpdate */,
FALSE /* skipSync */);
pNotifiers->pSurfaceEvo = NULL;
}

View File

@ -45,7 +45,8 @@ static void FreeLutSurfaceEvo(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfEvo)
nvEvoUnregisterSurface(pDevEvo,
pDevEvo->pNvKmsOpenDev,
pSurfEvo->owner.surfaceHandle,
TRUE /* skipUpdate */);
TRUE /* skipUpdate */,
FALSE /* skipSync */);
}
static NVSurfaceEvoPtr RegisterLutSurfaceEvo(NVDevEvoPtr pDevEvo, NvU32 memoryHandle)

View File

@ -4765,7 +4765,8 @@ void nvRmUnmapFbConsoleMemory(NVDevEvoPtr pDevEvo)
// Free the NVKMS surface.
nvEvoUnregisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev,
pDevEvo->fbConsoleSurfaceHandle,
TRUE /* skipUpdate */);
TRUE /* skipUpdate */,
FALSE /* skipSync */);
pDevEvo->fbConsoleSurfaceHandle = 0;
}

View File

@ -1068,7 +1068,8 @@ void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo,
void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo,
struct NvKmsPerOpenDev *pOpenDev,
NvKmsSurfaceHandle surfaceHandle,
NvBool skipUpdate)
NvBool skipUpdate,
NvBool skipSync)
{
NVEvoApiHandlesRec *pOpenDevSurfaceHandles =
nvGetSurfaceHandlesFromOpenDev(pOpenDev);
@ -1102,7 +1103,7 @@ void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo,
/* Remove the handle from the calling client's namespace. */
nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo);
nvEvoDecrementSurfaceRefCntsWithSync(pDevEvo, pSurfaceEvo, skipSync);
}
void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo,
@ -1142,6 +1143,13 @@ void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo)
void nvEvoDecrementSurfaceRefCnts(NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo)
{
nvEvoDecrementSurfaceRefCntsWithSync(pDevEvo, pSurfaceEvo, NV_FALSE);
}
void nvEvoDecrementSurfaceRefCntsWithSync(NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo,
NvBool skipSync)
{
nvAssert(pSurfaceEvo->rmRefCnt >= 1);
pSurfaceEvo->rmRefCnt--;
@ -1154,7 +1162,7 @@ void nvEvoDecrementSurfaceRefCnts(NVDevEvoPtr pDevEvo,
* GLS hasn't had the opportunity to release semaphores with pending
* flips. (Bug 2050970)
*/
if (pSurfaceEvo->requireDisplayHardwareAccess) {
if (!skipSync && pSurfaceEvo->requireDisplayHardwareAccess) {
nvEvoClearSurfaceUsage(pDevEvo, pSurfaceEvo);
}

View File

@ -2720,9 +2720,16 @@ static NvBool UnregisterSurface(struct NvKmsPerOpen *pOpen,
return FALSE;
}
/* Fail the ioctl if a non-privileged client sets this */
if (pOpen->clientType != NVKMS_CLIENT_KERNEL_SPACE &&
pParams->request.skipSync) {
return FALSE;
}
nvEvoUnregisterSurface(pOpenDev->pDevEvo, pOpenDev,
pParams->request.surfaceHandle,
FALSE /* skipUpdate */);
FALSE /* skipUpdate */,
pParams->request.skipSync);
return TRUE;
}

View File

@ -47,6 +47,7 @@
#include "mem_mgr/io_vaspace.h"
#include <diagnostics/journal.h>
#include "kernel/diagnostics/xid_context.h"
#include "gpu/mem_mgr/mem_desc.h"
#include "gpu/mem_mgr/mem_mgr.h"
#include "core/thread_state.h"
@ -1661,7 +1662,7 @@ void osFlushGpuCoherentCpuCacheRange
nv_flush_coherent_cpu_cache_range(pOsGpuInfo, cpuVirtual, size);
}
void osErrorLogV(OBJGPU *pGpu, NvU32 num, const char * pFormat, va_list arglist)
void osErrorLogV(OBJGPU *pGpu, XidContext context, const char * pFormat, va_list arglist)
{
NV_STATUS rmStatus;
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
@ -1671,7 +1672,7 @@ void osErrorLogV(OBJGPU *pGpu, NvU32 num, const char * pFormat, va_list arglist)
return;
}
rmStatus = nv_log_error(nv, num, pFormat, arglist);
rmStatus = nv_log_error(nv, context.xid, pFormat, arglist);
NV_ASSERT(rmStatus == NV_OK);
}
@ -1679,7 +1680,7 @@ void osErrorLog(OBJGPU *pGpu, NvU32 num, const char* pFormat, ...)
{
va_list arglist;
va_start(arglist, pFormat);
osErrorLogV(pGpu, num, pFormat, arglist);
osErrorLogV(pGpu, (XidContext){.xid = num}, pFormat, arglist);
va_end(arglist);
}

View File

@ -1028,14 +1028,21 @@ static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
pThis->__gpuGetRegBaseOffset__ = &gpuGetRegBaseOffset_FWCLIENT;
}
// gpuHandleSanityCheckRegReadError -- halified (2 hals) body
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
// gpuHandleSanityCheckRegReadError -- halified (3 hals) body
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__gpuHandleSanityCheckRegReadError__ = &gpuHandleSanityCheckRegReadError_GM107;
pThis->__gpuHandleSanityCheckRegReadError__ = &gpuHandleSanityCheckRegReadError_b3696a;
}
else
{
pThis->__gpuHandleSanityCheckRegReadError__ = &gpuHandleSanityCheckRegReadError_GH100;
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__gpuHandleSanityCheckRegReadError__ = &gpuHandleSanityCheckRegReadError_GM107;
}
else
{
pThis->__gpuHandleSanityCheckRegReadError__ = &gpuHandleSanityCheckRegReadError_GH100;
}
}
// gpuHandleSecFault -- halified (5 hals) body
@ -1615,7 +1622,7 @@ static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
{
pThis->__gpuGetIsCmpSku__ = &gpuGetIsCmpSku_72a2e1;
}
} // End __nvoc_init_funcTable_OBJGPU_1 with approximately 196 basic block(s).
} // End __nvoc_init_funcTable_OBJGPU_1 with approximately 197 basic block(s).
// Initialize vtable(s) for 77 virtual method(s).

View File

@ -1031,7 +1031,7 @@ struct OBJGPU {
NvU8 (*__gpuGetChipSubRev__)(struct OBJGPU * /*this*/); // halified (2 hals) body
NV_STATUS (*__gpuGetSkuInfo__)(struct OBJGPU * /*this*/, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *); // halified (2 hals) body
NV_STATUS (*__gpuGetRegBaseOffset__)(struct OBJGPU * /*this*/, NvU32, NvU32 *); // halified (2 hals) body
void (*__gpuHandleSanityCheckRegReadError__)(struct OBJGPU * /*this*/, NvU32, NvU32); // halified (2 hals) body
void (*__gpuHandleSanityCheckRegReadError__)(struct OBJGPU * /*this*/, NvU32, NvU32); // halified (3 hals) body
void (*__gpuHandleSecFault__)(struct OBJGPU * /*this*/); // halified (5 hals) body
NV_STATUS (*__gpuSanityCheckVirtRegAccess__)(struct OBJGPU * /*this*/, NvU32); // halified (3 hals) body
const GPUCHILDPRESENT * (*__gpuGetChildrenPresent__)(struct OBJGPU * /*this*/, NvU32 *); // halified (11 hals)
@ -1199,6 +1199,7 @@ struct OBJGPU {
OS_RM_CAPS *pOsRmCaps;
NvU32 halImpl;
void *hPci;
void *hPciFn1;
GpuEngineEventNotificationList *engineNonstallIntrEventNotifications[84];
NvBool bIsSOC;
NvU32 gpuInstance;
@ -1255,6 +1256,7 @@ struct OBJGPU {
NvU32 masterFromSLIConfig;
NvU32 sliStatus;
NvBool bIsRTD3Gc6D3HotTransition;
NvU32 simMode;
struct OBJOS *pOS;
struct OBJHAL *pHal;
union __nvoc_inner_struc_OBJGPU_1__ children;
@ -3492,6 +3494,10 @@ NV_STATUS gpuGetRegBaseOffset_FWCLIENT(struct OBJGPU *pGpu, NvU32 arg2, NvU32 *a
NV_STATUS gpuGetRegBaseOffset_TU102(struct OBJGPU *pGpu, NvU32 arg2, NvU32 *arg3);
static inline void gpuHandleSanityCheckRegReadError_b3696a(struct OBJGPU *pGpu, NvU32 addr, NvU32 value) {
return;
}
void gpuHandleSanityCheckRegReadError_GM107(struct OBJGPU *pGpu, NvU32 addr, NvU32 value);
void gpuHandleSanityCheckRegReadError_GH100(struct OBJGPU *pGpu, NvU32 addr, NvU32 value);

View File

@ -513,6 +513,18 @@ NV_STATUS rpcCtrlVaspaceCopyServerReservedPdes_STUB(
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
NV_STATUS rpcCtrlCmdGetChipletHsCreditPool_STUB(
POBJGPU pGpu,
POBJRPC pRpc,
NvHandle arg3,
NvHandle arg4,
void *pArg5
)
{
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:CTRL_GR_CTXSW_PREEMPTION_BIND - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
NV_STATUS rpcCtrlGrCtxswPreemptionBind_STUB(
POBJGPU pGpu,
@ -537,6 +549,18 @@ NV_STATUS rpcCtrlAllocPmaStream_STUB(
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:CTRL_CMD_GET_HS_CREDITS_MAPPING - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
NV_STATUS rpcCtrlCmdGetHsCreditsMapping_STUB(
POBJGPU pGpu,
POBJRPC pRpc,
NvHandle arg3,
NvHandle arg4,
void *pArg5
)
{
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:CTRL_RELEASE_HES - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
NV_STATUS rpcCtrlReleaseHes_STUB(
POBJGPU pGpu,
@ -561,6 +585,18 @@ NV_STATUS rpcCtrlReserveHwpmLegacy_STUB(
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:CTRL_PERF_RATED_TDP_GET_STATUS - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
NV_STATUS rpcCtrlPerfRatedTdpGetStatus_STUB(
POBJGPU pGpu,
POBJRPC pRpc,
NvHandle arg3,
NvHandle arg4,
void *pArg5
)
{
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
NV_STATUS rpcCtrlSubdeviceGetVgpuHeapStats_STUB(
POBJGPU pGpu,
@ -583,18 +619,6 @@ NV_STATUS rpcCtrlInternalQuiescePmaChannel_STUB(
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:CTRL_PERF_RATED_TDP_GET_STATUS - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
NV_STATUS rpcCtrlPerfRatedTdpGetStatus_STUB(
POBJGPU pGpu,
POBJRPC pRpc,
NvHandle arg3,
NvHandle arg4,
void *pArg5
)
{
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:CTRL_BUS_SET_P2P_MAPPING - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
NV_STATUS rpcCtrlBusSetP2pMapping_STUB(
POBJGPU pGpu,

View File

@ -61,8 +61,8 @@ extern "C" {
//
// Virtual BAR2 mapping info is shared by tesla and fermi code
//
#if defined(NV_UNIX) && (defined(NVCPU_X86_64) || defined(NVCPU_AARCH64))
// 64-bit Unix can support many more mappings than some other operating systems:
#if ((defined(NV_UNIX) && (defined(NVCPU_X86_64) || defined(NVCPU_AARCH64))) || (defined(NVCPU_X86_64) && defined(_WIN64)))
// 64-bit Unix and 64-bit Windows can support many more mappings than some other operating systems:
#define BUS_BAR2_MAX_MAPPINGS 200
#else
#define BUS_BAR2_MAX_MAPPINGS 50

View File

@ -326,6 +326,18 @@ static void __nvoc_init_funcTable_KernelGmmu_1(KernelGmmu *pThis, RmHalspecOwner
pThis->__kgmmuInstBlkVaLimitGet__ = &kgmmuInstBlkVaLimitGet_f03539;
}
// kgmmuInstBlkMagicValueGet -- halified (2 hals) body
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0xc0000000UL) ) ||
( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000ec1UL) )) /* ChipHal: GB100 | GB102 | GB10B | GB202 | GB203 | GB205 | GB206 | GB207 */
{
pThis->__kgmmuInstBlkMagicValueGet__ = &kgmmuInstBlkMagicValueGet_GA10B;
}
// default
else
{
pThis->__kgmmuInstBlkMagicValueGet__ = &kgmmuInstBlkMagicValueGet_46f6a7;
}
// kgmmuCheckPendingInvalidates -- halified (2 hals) body
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
{
@ -1142,10 +1154,10 @@ static void __nvoc_init_funcTable_KernelGmmu_1(KernelGmmu *pThis, RmHalspecOwner
{
pThis->__kgmmuGetFakeSparseEntry__ = &kgmmuGetFakeSparseEntry_fa6e19;
}
} // End __nvoc_init_funcTable_KernelGmmu_1 with approximately 161 basic block(s).
} // End __nvoc_init_funcTable_KernelGmmu_1 with approximately 163 basic block(s).
// Initialize vtable(s) for 93 virtual method(s).
// Initialize vtable(s) for 94 virtual method(s).
void __nvoc_init_funcTable_KernelGmmu(KernelGmmu *pThis, RmHalspecOwner *pRmhalspecowner) {
// Per-class vtable definition
@ -1193,7 +1205,7 @@ void __nvoc_init_funcTable_KernelGmmu(KernelGmmu *pThis, RmHalspecOwner *pRmhals
pThis->__nvoc_base_IntrService.__nvoc_vtable = &vtable.IntrService; // (intrserv) super
pThis->__nvoc_vtable = &vtable; // (kgmmu) this
// Initialize vtable(s) with 75 per-object function pointer(s).
// Initialize vtable(s) with 76 per-object function pointer(s).
__nvoc_init_funcTable_KernelGmmu_1(pThis, pRmhalspecowner);
}

View File

@ -14,7 +14,7 @@ extern "C" {
#endif
/*
3* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -415,8 +415,9 @@ struct KernelGmmu {
struct IntrService *__nvoc_pbase_IntrService; // intrserv super
struct KernelGmmu *__nvoc_pbase_KernelGmmu; // kgmmu
// Vtable with 75 per-object function pointers
// Vtable with 76 per-object function pointers
NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu * /*this*/, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *); // halified (2 hals) body
NV_STATUS (*__kgmmuInstBlkMagicValueGet__)(struct KernelGmmu * /*this*/, NvU32 *, NvU32 *); // halified (2 hals) body
NV_STATUS (*__kgmmuCheckPendingInvalidates__)(OBJGPU *, struct KernelGmmu * /*this*/, RMTIMEOUT *); // halified (2 hals) body
NV_STATUS (*__kgmmuCommitTlbInvalidate__)(OBJGPU *, struct KernelGmmu * /*this*/, TLB_INVALIDATE_PARAMS *); // halified (2 hals) body
void (*__kgmmuSetPdbToInvalidate__)(OBJGPU *, struct KernelGmmu * /*this*/, TLB_INVALIDATE_PARAMS *); // halified (2 hals) body
@ -550,8 +551,9 @@ struct KernelGmmu_PRIVATE {
struct IntrService *__nvoc_pbase_IntrService; // intrserv super
struct KernelGmmu *__nvoc_pbase_KernelGmmu; // kgmmu
// Vtable with 75 per-object function pointers
// Vtable with 76 per-object function pointers
NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu * /*this*/, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *); // halified (2 hals) body
NV_STATUS (*__kgmmuInstBlkMagicValueGet__)(struct KernelGmmu * /*this*/, NvU32 *, NvU32 *); // halified (2 hals) body
NV_STATUS (*__kgmmuCheckPendingInvalidates__)(OBJGPU *, struct KernelGmmu * /*this*/, RMTIMEOUT *); // halified (2 hals) body
NV_STATUS (*__kgmmuCommitTlbInvalidate__)(OBJGPU *, struct KernelGmmu * /*this*/, TLB_INVALIDATE_PARAMS *); // halified (2 hals) body
void (*__kgmmuSetPdbToInvalidate__)(OBJGPU *, struct KernelGmmu * /*this*/, TLB_INVALIDATE_PARAMS *); // halified (2 hals) body
@ -764,6 +766,9 @@ NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32);
#define kgmmuInstBlkVaLimitGet_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuInstBlkVaLimitGet__
#define kgmmuInstBlkVaLimitGet(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData)
#define kgmmuInstBlkVaLimitGet_HAL(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData)
#define kgmmuInstBlkMagicValueGet_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuInstBlkMagicValueGet__
#define kgmmuInstBlkMagicValueGet(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet_DISPATCH(pKernelGmmu, pOffset, pData)
#define kgmmuInstBlkMagicValueGet_HAL(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet_DISPATCH(pKernelGmmu, pOffset, pData)
#define kgmmuCheckPendingInvalidates_FNPTR(pKernelGmmu) pKernelGmmu->__kgmmuCheckPendingInvalidates__
#define kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut) kgmmuCheckPendingInvalidates_DISPATCH(pGpu, pKernelGmmu, pTimeOut)
#define kgmmuCheckPendingInvalidates_HAL(pGpu, pKernelGmmu, pTimeOut) kgmmuCheckPendingInvalidates_DISPATCH(pGpu, pKernelGmmu, pTimeOut)
@ -1050,6 +1055,10 @@ static inline NV_STATUS kgmmuInstBlkVaLimitGet_DISPATCH(struct KernelGmmu *pKern
return pKernelGmmu->__kgmmuInstBlkVaLimitGet__(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData);
}
static inline NV_STATUS kgmmuInstBlkMagicValueGet_DISPATCH(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) {
return pKernelGmmu->__kgmmuInstBlkMagicValueGet__(pKernelGmmu, pOffset, pData);
}
static inline NV_STATUS kgmmuCheckPendingInvalidates_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut) {
return pKernelGmmu->__kgmmuCheckPendingInvalidates__(pGpu, pKernelGmmu, pTimeOut);
}
@ -1434,22 +1443,6 @@ static inline NV_STATUS kgmmuInstBlkAtsGet(struct KernelGmmu *pKernelGmmu, struc
#define kgmmuInstBlkAtsGet_HAL(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData)
static inline NV_STATUS kgmmuInstBlkMagicValueGet_46f6a7(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) {
return NV_ERR_NOT_SUPPORTED;
}
#ifdef __nvoc_kern_gmmu_h_disabled
static inline NV_STATUS kgmmuInstBlkMagicValueGet(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) {
NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kern_gmmu_h_disabled
#define kgmmuInstBlkMagicValueGet(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet_46f6a7(pKernelGmmu, pOffset, pData)
#endif //__nvoc_kern_gmmu_h_disabled
#define kgmmuInstBlkMagicValueGet_HAL(pKernelGmmu, pOffset, pData) kgmmuInstBlkMagicValueGet(pKernelGmmu, pOffset, pData)
NV_STATUS kgmmuInstBlkPageDirBaseGet_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi);
@ -1966,6 +1959,12 @@ static inline NV_STATUS kgmmuInstBlkVaLimitGet_f03539(struct KernelGmmu *pKernel
return NV_OK;
}
NV_STATUS kgmmuInstBlkMagicValueGet_GA10B(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData);
static inline NV_STATUS kgmmuInstBlkMagicValueGet_46f6a7(struct KernelGmmu *pKernelGmmu, NvU32 *pOffset, NvU32 *pData) {
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS kgmmuCheckPendingInvalidates_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut);
NV_STATUS kgmmuCommitTlbInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams);

View File

@ -14,7 +14,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -244,7 +244,7 @@ struct KernelBif {
void (*__kbifClearDownstreamReadCounter__)(struct OBJGPU *, struct KernelBif * /*this*/); // halified (2 hals) body
NV_STATUS (*__kbifDoSecondaryBusHotReset__)(struct OBJGPU *, struct KernelBif * /*this*/); // halified (2 hals) body
// 25 PDB properties
// 26 PDB properties
NvBool PDB_PROP_KBIF_CHECK_IF_GPU_EXISTS_DEF;
NvBool PDB_PROP_KBIF_IS_MSI_ENABLED;
NvBool PDB_PROP_KBIF_IS_MSI_CACHED;
@ -270,6 +270,7 @@ struct KernelBif {
NvBool PDB_PROP_KBIF_FORCE_PCIE_CONFIG_SAVE;
NvBool PDB_PROP_KBIF_FLR_PRE_CONDITIONING_REQUIRED;
NvBool PDB_PROP_KBIF_FLR_HANDLED_BY_OS;
NvBool PDB_PROP_KBIF_WAR_5045021_ENABLED;
// Data members
NvU32 dmaCaps;
@ -344,6 +345,8 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelBif;
#define PDB_PROP_KBIF_IS_MSI_ENABLED_BASE_NAME PDB_PROP_KBIF_IS_MSI_ENABLED
#define PDB_PROP_KBIF_FORCE_PCIE_CONFIG_SAVE_BASE_CAST
#define PDB_PROP_KBIF_FORCE_PCIE_CONFIG_SAVE_BASE_NAME PDB_PROP_KBIF_FORCE_PCIE_CONFIG_SAVE
#define PDB_PROP_KBIF_WAR_5045021_ENABLED_BASE_CAST
#define PDB_PROP_KBIF_WAR_5045021_ENABLED_BASE_NAME PDB_PROP_KBIF_WAR_5045021_ENABLED
#define PDB_PROP_KBIF_PCIE_GEN4_CAPABLE_BASE_CAST
#define PDB_PROP_KBIF_PCIE_GEN4_CAPABLE_BASE_NAME PDB_PROP_KBIF_PCIE_GEN4_CAPABLE
#define PDB_PROP_KBIF_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.

View File

@ -693,6 +693,28 @@ static void __nvoc_init_funcTable_KernelNvlink_1(KernelNvlink *pThis, RmHalspecO
pThis->__knvlinkGetHshubSupportedRbmModes__ = &knvlinkGetHshubSupportedRbmModes_46f6a7;
}
// knvlinkPostSchedulingEnableCallbackRegister -- halified (2 hals) body
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0xc0000000UL) )) /* ChipHal: GB100 | GB102 */
{
pThis->__knvlinkPostSchedulingEnableCallbackRegister__ = &knvlinkPostSchedulingEnableCallbackRegister_GB100;
}
// default
else
{
pThis->__knvlinkPostSchedulingEnableCallbackRegister__ = &knvlinkPostSchedulingEnableCallbackRegister_b3696a;
}
// knvlinkPostSchedulingEnableCallbackUnregister -- halified (2 hals) body
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0xc0000000UL) )) /* ChipHal: GB100 | GB102 */
{
pThis->__knvlinkPostSchedulingEnableCallbackUnregister__ = &knvlinkPostSchedulingEnableCallbackUnregister_GB100;
}
// default
else
{
pThis->__knvlinkPostSchedulingEnableCallbackUnregister__ = &knvlinkPostSchedulingEnableCallbackUnregister_b3696a;
}
// knvlinkGetSupportedBwMode -- halified (3 hals) body
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
@ -748,10 +770,10 @@ static void __nvoc_init_funcTable_KernelNvlink_1(KernelNvlink *pThis, RmHalspecO
{
pThis->__knvlinkGetSupportedCounters__ = &knvlinkGetSupportedCounters_46f6a7;
}
} // End __nvoc_init_funcTable_KernelNvlink_1 with approximately 76 basic block(s).
} // End __nvoc_init_funcTable_KernelNvlink_1 with approximately 80 basic block(s).
// Initialize vtable(s) for 48 virtual method(s).
// Initialize vtable(s) for 50 virtual method(s).
void __nvoc_init_funcTable_KernelNvlink(KernelNvlink *pThis, RmHalspecOwner *pRmhalspecowner) {
// Per-class vtable definition
@ -790,7 +812,7 @@ void __nvoc_init_funcTable_KernelNvlink(KernelNvlink *pThis, RmHalspecOwner *pRm
pThis->__nvoc_base_OBJENGSTATE.__nvoc_vtable = &vtable.OBJENGSTATE; // (engstate) super
pThis->__nvoc_vtable = &vtable; // (knvlink) this
// Initialize vtable(s) with 34 per-object function pointer(s).
// Initialize vtable(s) with 36 per-object function pointer(s).
__nvoc_init_funcTable_KernelNvlink_1(pThis, pRmhalspecowner);
}

View File

@ -264,7 +264,7 @@ struct KernelNvlink {
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; // engstate super
struct KernelNvlink *__nvoc_pbase_KernelNvlink; // knvlink
// Vtable with 34 per-object function pointers
// Vtable with 36 per-object function pointers
NV_STATUS (*__knvlinkSetUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink * /*this*/, NvU64); // halified (3 hals) body
void (*__knvlinkClearUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
NV_STATUS (*__knvlinkSetUniqueFabricEgmBaseAddress__)(struct OBJGPU *, struct KernelNvlink * /*this*/, NvU64); // halified (2 hals) body
@ -294,6 +294,8 @@ struct KernelNvlink {
NvBool (*__knvlinkIsBandwidthModeOff__)(struct KernelNvlink * /*this*/); // halified (2 hals) body
NvBool (*__knvlinkIsBwModeSupported__)(struct OBJGPU *, struct KernelNvlink * /*this*/, NvU8); // halified (3 hals) body
NV_STATUS (*__knvlinkGetHshubSupportedRbmModes__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
void (*__knvlinkPostSchedulingEnableCallbackRegister__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
void (*__knvlinkPostSchedulingEnableCallbackUnregister__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
NV_STATUS (*__knvlinkGetSupportedBwMode__)(struct OBJGPU *, struct KernelNvlink * /*this*/, NV2080_CTRL_NVLINK_GET_SUPPORTED_BW_MODE_PARAMS *); // halified (3 hals) body
void (*__knvlinkDirectConnectCheck__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
NvBool (*__knvlinkIsGpuReducedNvlinkConfig__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
@ -395,7 +397,7 @@ struct KernelNvlink_PRIVATE {
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; // engstate super
struct KernelNvlink *__nvoc_pbase_KernelNvlink; // knvlink
// Vtable with 34 per-object function pointers
// Vtable with 36 per-object function pointers
NV_STATUS (*__knvlinkSetUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink * /*this*/, NvU64); // halified (3 hals) body
void (*__knvlinkClearUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
NV_STATUS (*__knvlinkSetUniqueFabricEgmBaseAddress__)(struct OBJGPU *, struct KernelNvlink * /*this*/, NvU64); // halified (2 hals) body
@ -425,6 +427,8 @@ struct KernelNvlink_PRIVATE {
NvBool (*__knvlinkIsBandwidthModeOff__)(struct KernelNvlink * /*this*/); // halified (2 hals) body
NvBool (*__knvlinkIsBwModeSupported__)(struct OBJGPU *, struct KernelNvlink * /*this*/, NvU8); // halified (3 hals) body
NV_STATUS (*__knvlinkGetHshubSupportedRbmModes__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
void (*__knvlinkPostSchedulingEnableCallbackRegister__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
void (*__knvlinkPostSchedulingEnableCallbackUnregister__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
NV_STATUS (*__knvlinkGetSupportedBwMode__)(struct OBJGPU *, struct KernelNvlink * /*this*/, NV2080_CTRL_NVLINK_GET_SUPPORTED_BW_MODE_PARAMS *); // halified (3 hals) body
void (*__knvlinkDirectConnectCheck__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
NvBool (*__knvlinkIsGpuReducedNvlinkConfig__)(struct OBJGPU *, struct KernelNvlink * /*this*/); // halified (2 hals) body
@ -703,6 +707,12 @@ NV_STATUS __nvoc_objCreate_KernelNvlink(KernelNvlink**, Dynamic*, NvU32);
#define knvlinkGetHshubSupportedRbmModes_FNPTR(pKernelNvlink) pKernelNvlink->__knvlinkGetHshubSupportedRbmModes__
#define knvlinkGetHshubSupportedRbmModes(pGpu, pKernelNvlink) knvlinkGetHshubSupportedRbmModes_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkGetHshubSupportedRbmModes_HAL(pGpu, pKernelNvlink) knvlinkGetHshubSupportedRbmModes_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkPostSchedulingEnableCallbackRegister_FNPTR(pKernelNvlink) pKernelNvlink->__knvlinkPostSchedulingEnableCallbackRegister__
#define knvlinkPostSchedulingEnableCallbackRegister(pGpu, pKernelNvlink) knvlinkPostSchedulingEnableCallbackRegister_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkPostSchedulingEnableCallbackRegister_HAL(pGpu, pKernelNvlink) knvlinkPostSchedulingEnableCallbackRegister_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkPostSchedulingEnableCallbackUnregister_FNPTR(pKernelNvlink) pKernelNvlink->__knvlinkPostSchedulingEnableCallbackUnregister__
#define knvlinkPostSchedulingEnableCallbackUnregister(pGpu, pKernelNvlink) knvlinkPostSchedulingEnableCallbackUnregister_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkPostSchedulingEnableCallbackUnregister_HAL(pGpu, pKernelNvlink) knvlinkPostSchedulingEnableCallbackUnregister_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkGetSupportedBwMode_FNPTR(pKernelNvlink) pKernelNvlink->__knvlinkGetSupportedBwMode__
#define knvlinkGetSupportedBwMode(pGpu, pKernelNvlink, pParams) knvlinkGetSupportedBwMode_DISPATCH(pGpu, pKernelNvlink, pParams)
#define knvlinkGetSupportedBwMode_HAL(pGpu, pKernelNvlink, pParams) knvlinkGetSupportedBwMode_DISPATCH(pGpu, pKernelNvlink, pParams)
@ -878,6 +888,14 @@ static inline NV_STATUS knvlinkGetHshubSupportedRbmModes_DISPATCH(struct OBJGPU
return pKernelNvlink->__knvlinkGetHshubSupportedRbmModes__(pGpu, pKernelNvlink);
}
static inline void knvlinkPostSchedulingEnableCallbackRegister_DISPATCH(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
pKernelNvlink->__knvlinkPostSchedulingEnableCallbackRegister__(pGpu, pKernelNvlink);
}
static inline void knvlinkPostSchedulingEnableCallbackUnregister_DISPATCH(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
pKernelNvlink->__knvlinkPostSchedulingEnableCallbackUnregister__(pGpu, pKernelNvlink);
}
static inline NV_STATUS knvlinkGetSupportedBwMode_DISPATCH(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NV2080_CTRL_NVLINK_GET_SUPPORTED_BW_MODE_PARAMS *pParams) {
return pKernelNvlink->__knvlinkGetSupportedBwMode__(pGpu, pKernelNvlink, pParams);
}
@ -2038,6 +2056,18 @@ static inline NV_STATUS knvlinkGetHshubSupportedRbmModes_46f6a7(struct OBJGPU *p
return NV_ERR_NOT_SUPPORTED;
}
static inline void knvlinkPostSchedulingEnableCallbackRegister_b3696a(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
return;
}
void knvlinkPostSchedulingEnableCallbackRegister_GB100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink);
static inline void knvlinkPostSchedulingEnableCallbackUnregister_b3696a(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
return;
}
void knvlinkPostSchedulingEnableCallbackUnregister_GB100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink);
NV_STATUS knvlinkGetSupportedBwMode_GB100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NV2080_CTRL_NVLINK_GET_SUPPORTED_BW_MODE_PARAMS *pParams);
NV_STATUS knvlinkGetSupportedBwMode_GH100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NV2080_CTRL_NVLINK_GET_SUPPORTED_BW_MODE_PARAMS *pParams);

View File

@ -14,7 +14,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -42,6 +42,7 @@ extern "C" {
#ifndef KERNEL_RC_H
#define KERNEL_RC_H 1
#include "kernel/diagnostics/xid_context.h"
#include "kernel/gpu/eng_desc.h"
#include "kernel/gpu/eng_state.h"
#include "kernel/gpu/fifo/kernel_channel.h"
@ -73,6 +74,7 @@ typedef enum {
RC_NOTIFIER_SCOPE_TSG,
} RC_NOTIFIER_SCOPE;
/*!
* Kernel interface for RC (Robust Channels) and Watchdog
*/
@ -474,14 +476,14 @@ static inline NV_STATUS krcReadVirtMem(struct OBJGPU *pGpu, struct KernelRc *pKe
#define krcReadVirtMem(pGpu, pKernelRc, pKernelChannel, virtAddr, bufPtr, bufSize) krcReadVirtMem_IMPL(pGpu, pKernelRc, pKernelChannel, virtAddr, bufPtr, bufSize)
#endif //__nvoc_kernel_rc_h_disabled
void krcReportXid_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, NvU32 exceptType, const char *pMsg);
void krcReportXid_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, XidContext context, const char *pMsg);
#ifdef __nvoc_kernel_rc_h_disabled
static inline void krcReportXid(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, NvU32 exceptType, const char *pMsg) {
static inline void krcReportXid(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, XidContext context, const char *pMsg) {
NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!");
}
#else //__nvoc_kernel_rc_h_disabled
#define krcReportXid(pGpu, pKernelRc, exceptType, pMsg) krcReportXid_IMPL(pGpu, pKernelRc, exceptType, pMsg)
#define krcReportXid(pGpu, pKernelRc, context, pMsg) krcReportXid_IMPL(pGpu, pKernelRc, context, pMsg)
#endif //__nvoc_kernel_rc_h_disabled
NvBool krcTestAllowAlloc_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, NvU32 failMask);

View File

@ -866,10 +866,6 @@ typedef struct MEMORY_DESCRIPTOR
// We verified that memdesc is safe to be mapped as large pages
NvBool bForceHugePages;
// Memory handle that libos 3+ returns for dynamically mapped sysmem
NvU32 libosRegionHandle;
NvU64 baseVirtualAddress;
// Indicates granularity of mapping. Will be used to implement dynamic page sizes.
NvU32 pageArrayGranularity;

View File

@ -5404,13 +5404,20 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x28BB, 0x0000, 0x0000, "NVIDIA RTX 500 Ada Generation Laptop GPU" },
{ 0x28E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Laptop GPU" },
{ 0x28E1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" },
{ 0x28E3, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 A Laptop GPU" },
{ 0x28F8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Embedded GPU" },
{ 0x2901, 0x1999, 0x10de, "NVIDIA B200" },
{ 0x2901, 0x199b, 0x10de, "NVIDIA B200" },
{ 0x2901, 0x20da, 0x10de, "NVIDIA B200" },
{ 0x2941, 0x2046, 0x10de, "HGX GB200" },
{ 0x2941, 0x20ca, 0x10de, "HGX GB200" },
{ 0x2941, 0x20d5, 0x10de, "HGX GB200" },
{ 0x2941, 0x21c9, 0x10de, "HGX GB200" },
{ 0x2941, 0x21ca, 0x10de, "HGX GB200" },
{ 0x2B85, 0x0000, 0x0000, "NVIDIA GeForce RTX 5090" },
{ 0x2B87, 0x0000, 0x0000, "NVIDIA GeForce RTX 5090 D" },
{ 0x2C02, 0x0000, 0x0000, "NVIDIA GeForce RTX 5080" },
{ 0x2C05, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Ti" },
{ 0x13BD, 0x11cc, 0x10DE, "GRID M10-0B" },
{ 0x13BD, 0x11cd, 0x10DE, "GRID M10-1B" },
{ 0x13BD, 0x11ce, 0x10DE, "GRID M10-0Q" },
@ -6065,6 +6072,22 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2329, 0x2035, 0x10DE, "NVIDIA H20-96C" },
{ 0x2329, 0x2047, 0x10DE, "NVIDIA H20-8C" },
{ 0x2329, 0x2048, 0x10DE, "NVIDIA H20-32C" },
{ 0x232C, 0x2108, 0x10DE, "NVIDIA H20X-1-18CME" },
{ 0x232C, 0x2109, 0x10DE, "NVIDIA H20X-1-18C" },
{ 0x232C, 0x210a, 0x10DE, "NVIDIA H20X-1-35C" },
{ 0x232C, 0x210b, 0x10DE, "NVIDIA H20X-2-35C" },
{ 0x232C, 0x210c, 0x10DE, "NVIDIA H20X-3-71C" },
{ 0x232C, 0x210d, 0x10DE, "NVIDIA H20X-4-71C" },
{ 0x232C, 0x210e, 0x10DE, "NVIDIA H20X-7-141C" },
{ 0x232C, 0x210f, 0x10DE, "NVIDIA H20X-4C" },
{ 0x232C, 0x2110, 0x10DE, "NVIDIA H20X-7C" },
{ 0x232C, 0x2111, 0x10DE, "NVIDIA H20X-8C" },
{ 0x232C, 0x2112, 0x10DE, "NVIDIA H20X-14C" },
{ 0x232C, 0x2113, 0x10DE, "NVIDIA H20X-17C" },
{ 0x232C, 0x2114, 0x10DE, "NVIDIA H20X-28C" },
{ 0x232C, 0x2115, 0x10DE, "NVIDIA H20X-35C" },
{ 0x232C, 0x2116, 0x10DE, "NVIDIA H20X-70C" },
{ 0x232C, 0x2117, 0x10DE, "NVIDIA H20X-141C" },
{ 0x2330, 0x187a, 0x10DE, "NVIDIA H100XM-1-10CME" },
{ 0x2330, 0x187b, 0x10DE, "NVIDIA H100XM-1-10C" },
{ 0x2330, 0x187c, 0x10DE, "NVIDIA H100XM-1-20C" },
@ -6095,13 +6118,13 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2331, 0x1798, 0x10DE, "NVIDIA H100-5C" },
{ 0x2331, 0x17f0, 0x10DE, "NVIDIA H100-1-10CME" },
{ 0x2331, 0x1844, 0x10DE, "NVIDIA H100-1-20C" },
{ 0x2335, 0x206e, 0x10DE, "NVIDIA H200X-1-17CME" },
{ 0x2335, 0x206f, 0x10DE, "NVIDIA H200X-1-17C" },
{ 0x2335, 0x206e, 0x10DE, "NVIDIA H200X-1-18CME" },
{ 0x2335, 0x206f, 0x10DE, "NVIDIA H200X-1-18C" },
{ 0x2335, 0x2070, 0x10DE, "NVIDIA H200X-1-35C" },
{ 0x2335, 0x2071, 0x10DE, "NVIDIA H200X-2-35C" },
{ 0x2335, 0x2072, 0x10DE, "NVIDIA H200X-3-70C" },
{ 0x2335, 0x2073, 0x10DE, "NVIDIA H200X-4-70C" },
{ 0x2335, 0x2074, 0x10DE, "NVIDIA H200X-7-140C" },
{ 0x2335, 0x2072, 0x10DE, "NVIDIA H200X-3-71C" },
{ 0x2335, 0x2073, 0x10DE, "NVIDIA H200X-4-71C" },
{ 0x2335, 0x2074, 0x10DE, "NVIDIA H200X-7-141C" },
{ 0x2335, 0x2075, 0x10DE, "NVIDIA H200X-4C" },
{ 0x2335, 0x2076, 0x10DE, "NVIDIA H200X-7C" },
{ 0x2335, 0x2077, 0x10DE, "NVIDIA H200X-8C" },
@ -6110,7 +6133,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2335, 0x207a, 0x10DE, "NVIDIA H200X-28C" },
{ 0x2335, 0x207b, 0x10DE, "NVIDIA H200X-35C" },
{ 0x2335, 0x207e, 0x10DE, "NVIDIA H200X-70C" },
{ 0x2335, 0x207f, 0x10DE, "NVIDIA H200X-140C" },
{ 0x2335, 0x207f, 0x10DE, "NVIDIA H200X-141C" },
{ 0x2337, 0x18f2, 0x10DE, "NVIDIA H100XS-1-8CME" },
{ 0x2337, 0x18f3, 0x10DE, "NVIDIA H100XS-1-8C" },
{ 0x2337, 0x18f4, 0x10DE, "NVIDIA H100XS-1-16C" },
@ -6151,13 +6174,13 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x233A, 0x186c, 0x10DE, "NVIDIA H800L-23C" },
{ 0x233A, 0x186d, 0x10DE, "NVIDIA H800L-47C" },
{ 0x233A, 0x186e, 0x10DE, "NVIDIA H800L-94C" },
{ 0x233B, 0x2081, 0x10DE, "NVIDIA H200-1-17CME" },
{ 0x233B, 0x2082, 0x10DE, "NVIDIA H200-1-17C" },
{ 0x233B, 0x2081, 0x10DE, "NVIDIA H200-1-18CME" },
{ 0x233B, 0x2082, 0x10DE, "NVIDIA H200-1-18C" },
{ 0x233B, 0x2083, 0x10DE, "NVIDIA H200-1-35C" },
{ 0x233B, 0x2084, 0x10DE, "NVIDIA H200-2-35C" },
{ 0x233B, 0x2085, 0x10DE, "NVIDIA H200-3-70C" },
{ 0x233B, 0x2086, 0x10DE, "NVIDIA H200-4-70C" },
{ 0x233B, 0x2087, 0x10DE, "NVIDIA H200-7-140C" },
{ 0x233B, 0x2085, 0x10DE, "NVIDIA H200-3-71C" },
{ 0x233B, 0x2086, 0x10DE, "NVIDIA H200-4-71C" },
{ 0x233B, 0x2087, 0x10DE, "NVIDIA H200-7-141C" },
{ 0x233B, 0x2088, 0x10DE, "NVIDIA H200-4C" },
{ 0x233B, 0x2089, 0x10DE, "NVIDIA H200-7C" },
{ 0x233B, 0x208a, 0x10DE, "NVIDIA H200-8C" },
@ -6166,7 +6189,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x233B, 0x208d, 0x10DE, "NVIDIA H200-28C" },
{ 0x233B, 0x208e, 0x10DE, "NVIDIA H200-35C" },
{ 0x233B, 0x208f, 0x10DE, "NVIDIA H200-70C" },
{ 0x233B, 0x2090, 0x10DE, "NVIDIA H200-140C" },
{ 0x233B, 0x2090, 0x10DE, "NVIDIA H200-141C" },
{ 0x2342, 0x18c2, 0x10DE, "NVIDIA GH200-1-12CME" },
{ 0x2342, 0x18c3, 0x10DE, "NVIDIA GH200-1-12C" },
{ 0x2342, 0x18c4, 0x10DE, "NVIDIA GH200-1-24C" },
@ -6536,6 +6559,14 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x27B8, 0x174b, 0x10DE, "NVIDIA L4-8C" },
{ 0x27B8, 0x174c, 0x10DE, "NVIDIA L4-12C" },
{ 0x27B8, 0x174d, 0x10DE, "NVIDIA L4-24C" },
{ 0x2941, 0x20cb, 0x10DE, "NVIDIA GB200-1-24CME" },
{ 0x2941, 0x20cc, 0x10DE, "NVIDIA GB200-1-24C" },
{ 0x2941, 0x20cd, 0x10DE, "NVIDIA GB200-1-47C" },
{ 0x2941, 0x20ce, 0x10DE, "NVIDIA GB200-2-47C" },
{ 0x2941, 0x20cf, 0x10DE, "NVIDIA GB200-3-95C" },
{ 0x2941, 0x20d0, 0x10DE, "NVIDIA GB200-4-95C" },
{ 0x2941, 0x20d1, 0x10DE, "NVIDIA GB200-7-189C" },
{ 0x2941, 0x20d2, 0x10DE, "NVIDIA GB200-189C" },
};
#endif // G_NV_NAME_RELEASED_H

View File

@ -14,7 +14,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -52,6 +52,7 @@ extern "C" {
#include "core/core.h"
#include "containers/btree.h"
#include "ctrl/ctrl0073/ctrl0073dfp.h"
#include "kernel/diagnostics/xid_context.h"
/* ------------------------ SDK & Interface Includes ------------------------ */
#include "nvsecurityinfo.h"
@ -892,7 +893,7 @@ NV_STATUS osDereferenceObjectCount(void *pEvent);
// osErrorLogV() call, create a copy of the va_list using va_copy().
// The caller controls the lifetime of the va_list argument, and should free it using va_end.
//
void osErrorLogV(OBJGPU *pGpu, NvU32 num, const char * pFormat, va_list arglist);
void osErrorLogV(OBJGPU *pGpu, XidContext context, const char * pFormat, va_list arglist);
void osErrorLog(OBJGPU *pGpu, NvU32 num, const char* pFormat, ...);
NV_STATUS osNvifInitialize(OBJGPU *pGpu);

View File

@ -355,12 +355,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
#endif
},
{ /* [17] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdGetChipletHsCredits_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*flags=*/ 0x48u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*flags=*/ 0x248u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0115u,
/*paramSize=*/ sizeof(NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL),
@ -370,12 +370,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
#endif
},
{ /* [18] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdGetHsCreditsMapping_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*flags=*/ 0x48u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*flags=*/ 0x248u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0116u,
/*paramSize=*/ sizeof(NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS),
@ -1014,12 +1014,12 @@ static void __nvoc_init_funcTable_ProfilerBase_1(ProfilerBase *pThis, RmHalspecO
#endif
// profilerBaseCtrlCmdGetChipletHsCredits -- exported (id=0xb0cc0115)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
pThis->__profilerBaseCtrlCmdGetChipletHsCredits__ = &profilerBaseCtrlCmdGetChipletHsCredits_IMPL;
#endif
// profilerBaseCtrlCmdGetHsCreditsMapping -- exported (id=0xb0cc0116)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
pThis->__profilerBaseCtrlCmdGetHsCreditsMapping__ = &profilerBaseCtrlCmdGetHsCreditsMapping_IMPL;
#endif

View File

@ -906,7 +906,14 @@ typedef struct rpc_ctrl_pma_stream_update_get_put_v1A_14
NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14 params;
} rpc_ctrl_pma_stream_update_get_put_v1A_14;
typedef rpc_ctrl_pma_stream_update_get_put_v1A_14 rpc_ctrl_pma_stream_update_get_put_v;
typedef struct rpc_ctrl_pma_stream_update_get_put_v29_0B
{
NvHandle hClient;
NvHandle hObject;
NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v29_0B params;
} rpc_ctrl_pma_stream_update_get_put_v29_0B;
typedef rpc_ctrl_pma_stream_update_get_put_v29_0B rpc_ctrl_pma_stream_update_get_put_v;
typedef struct rpc_ctrl_fb_get_info_v2_v25_0A
{
@ -1228,6 +1235,24 @@ typedef struct rpc_ctrl_release_ccu_prof_v29_07
typedef rpc_ctrl_release_ccu_prof_v29_07 rpc_ctrl_release_ccu_prof_v;
typedef struct rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A
{
NvHandle hClient;
NvHandle hObject;
NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v29_0A params;
} rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A;
typedef rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v;
typedef struct rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A
{
NvHandle hClient;
NvHandle hObject;
NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v29_0A params;
} rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A;
typedef rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A rpc_ctrl_cmd_get_hs_credits_mapping_v;
typedef struct rpc_ctrl_set_hs_credits_v21_08
{
NvHandle hClient;
@ -1436,6 +1461,7 @@ typedef struct rpc_os_error_log_v17_00
NvU32 runlistId;
NvU32 chid;
char errString[0x100];
NvU32 preemptiveRemovalPreviousXid;
} rpc_os_error_log_v17_00;
typedef rpc_os_error_log_v17_00 rpc_os_error_log_v;
@ -5626,6 +5652,43 @@ static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_ctrl_pma_stream_update_get_put_v1A_14 =
};
#endif
#ifndef SKIP_PRINT_rpc_ctrl_pma_stream_update_get_put_v29_0B
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_ctrl_pma_stream_update_get_put_v29_0B[] = {
{
.vtype = vtype_NvHandle,
.offset = NV_OFFSETOF(rpc_ctrl_pma_stream_update_get_put_v29_0B, hClient),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "hClient"
#endif
},
{
.vtype = vtype_NvHandle,
.offset = NV_OFFSETOF(rpc_ctrl_pma_stream_update_get_put_v29_0B, hObject),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "hObject"
#endif
},
{
.vtype = vtype_NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v29_0B,
.offset = NV_OFFSETOF(rpc_ctrl_pma_stream_update_get_put_v29_0B, params),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "params"
#endif
},
{
.vtype = vt_end
}
};
static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_ctrl_pma_stream_update_get_put_v29_0B = {
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rpc_ctrl_pma_stream_update_get_put",
#endif
.header_length = sizeof(rpc_ctrl_pma_stream_update_get_put_v29_0B),
.fdesc = vmiopd_fdesc_t_rpc_ctrl_pma_stream_update_get_put_v29_0B
};
#endif
#ifndef SKIP_PRINT_rpc_ctrl_fb_get_info_v2_v27_00
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_ctrl_fb_get_info_v2_v27_00[] = {
{
@ -6972,6 +7035,80 @@ static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_ctrl_release_ccu_prof_v29_07 = {
};
#endif
#ifndef SKIP_PRINT_rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A[] = {
{
.vtype = vtype_NvHandle,
.offset = NV_OFFSETOF(rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A, hClient),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "hClient"
#endif
},
{
.vtype = vtype_NvHandle,
.offset = NV_OFFSETOF(rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A, hObject),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "hObject"
#endif
},
{
.vtype = vtype_NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v29_0A,
.offset = NV_OFFSETOF(rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A, params),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "params"
#endif
},
{
.vtype = vt_end
}
};
static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A = {
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rpc_ctrl_cmd_get_chiplet_hs_credit_pool",
#endif
.header_length = sizeof(rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A),
.fdesc = vmiopd_fdesc_t_rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A
};
#endif
#ifndef SKIP_PRINT_rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A[] = {
{
.vtype = vtype_NvHandle,
.offset = NV_OFFSETOF(rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A, hClient),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "hClient"
#endif
},
{
.vtype = vtype_NvHandle,
.offset = NV_OFFSETOF(rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A, hObject),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "hObject"
#endif
},
{
.vtype = vtype_NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v29_0A,
.offset = NV_OFFSETOF(rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A, params),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "params"
#endif
},
{
.vtype = vt_end
}
};
static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A = {
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rpc_ctrl_cmd_get_hs_credits_mapping",
#endif
.header_length = sizeof(rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A),
.fdesc = vmiopd_fdesc_t_rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A
};
#endif
#ifndef SKIP_PRINT_rpc_ctrl_set_hs_credits_v21_08
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_ctrl_set_hs_credits_v21_08[] = {
{
@ -7937,6 +8074,13 @@ static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_os_error_log_v17_00[] = {
.name = "errString"
#endif
},
{
.vtype = vtype_NvU32,
.offset = NV_OFFSETOF(rpc_os_error_log_v17_00, preemptiveRemovalPreviousXid),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "preemptiveRemovalPreviousXid"
#endif
},
{
.vtype = vt_end
}
@ -9924,6 +10068,13 @@ vmiopd_mdesc_t *rpcdebugCtrlPmaStreamUpdateGetPut_v1A_14(void)
}
#endif
#ifndef SKIP_PRINT_rpc_ctrl_pma_stream_update_get_put_v29_0B
vmiopd_mdesc_t *rpcdebugCtrlPmaStreamUpdateGetPut_v29_0B(void)
{
return &vmiopd_mdesc_t_rpc_ctrl_pma_stream_update_get_put_v29_0B;
}
#endif
#ifndef SKIP_PRINT_rpc_ctrl_fb_get_info_v2_v27_00
vmiopd_mdesc_t *rpcdebugCtrlFbGetInfoV2_v27_00(void)
{
@ -10176,6 +10327,20 @@ vmiopd_mdesc_t *rpcdebugCtrlReleaseCcuProf_v29_07(void)
}
#endif
#ifndef SKIP_PRINT_rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A
vmiopd_mdesc_t *rpcdebugCtrlCmdGetChipletHsCreditPool_v29_0A(void)
{
return &vmiopd_mdesc_t_rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A;
}
#endif
#ifndef SKIP_PRINT_rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A
vmiopd_mdesc_t *rpcdebugCtrlCmdGetHsCreditsMapping_v29_0A(void)
{
return &vmiopd_mdesc_t_rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A;
}
#endif
#ifndef SKIP_PRINT_rpc_ctrl_set_hs_credits_v21_08
vmiopd_mdesc_t *rpcdebugCtrlSetHsCredits_v21_08(void)
{
@ -10795,6 +10960,7 @@ typedef union rpc_generic_union {
rpc_ctrl_alloc_pma_stream_v1A_14 ctrl_alloc_pma_stream_v1A_14;
rpc_ctrl_alloc_pma_stream_v ctrl_alloc_pma_stream_v;
rpc_ctrl_pma_stream_update_get_put_v1A_14 ctrl_pma_stream_update_get_put_v1A_14;
rpc_ctrl_pma_stream_update_get_put_v29_0B ctrl_pma_stream_update_get_put_v29_0B;
rpc_ctrl_pma_stream_update_get_put_v ctrl_pma_stream_update_get_put_v;
rpc_ctrl_fb_get_info_v2_v27_00 ctrl_fb_get_info_v2_v27_00;
rpc_ctrl_fb_get_info_v2_v25_0A ctrl_fb_get_info_v2_v25_0A;
@ -10865,6 +11031,10 @@ typedef union rpc_generic_union {
rpc_ctrl_reserve_ccu_prof_v ctrl_reserve_ccu_prof_v;
rpc_ctrl_release_ccu_prof_v29_07 ctrl_release_ccu_prof_v29_07;
rpc_ctrl_release_ccu_prof_v ctrl_release_ccu_prof_v;
rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A ctrl_cmd_get_chiplet_hs_credit_pool_v29_0A;
rpc_ctrl_cmd_get_chiplet_hs_credit_pool_v ctrl_cmd_get_chiplet_hs_credit_pool_v;
rpc_ctrl_cmd_get_hs_credits_mapping_v29_0A ctrl_cmd_get_hs_credits_mapping_v29_0A;
rpc_ctrl_cmd_get_hs_credits_mapping_v ctrl_cmd_get_hs_credits_mapping_v;
rpc_ctrl_set_hs_credits_v21_08 ctrl_set_hs_credits_v21_08;
rpc_ctrl_set_hs_credits_v ctrl_set_hs_credits_v;
rpc_ctrl_pm_area_pc_sampler_v21_0B ctrl_pm_area_pc_sampler_v21_0B;

View File

@ -108,13 +108,15 @@ typedef NV_STATUS RpcCtrlSubdeviceGetLibosHeapStats(POBJGPU, POBJRPC, void*
typedef NV_STATUS RpcCtrlDbgSetExceptionMask(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlSetZbcStencilClear(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlVaspaceCopyServerReservedPdes(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlCmdGetChipletHsCreditPool(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlGrCtxswPreemptionBind(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlAllocPmaStream(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlCmdGetHsCreditsMapping(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlReleaseHes(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlReserveHwpmLegacy(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlPerfRatedTdpGetStatus(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlSubdeviceGetVgpuHeapStats(POBJGPU, POBJRPC, void*);
typedef NV_STATUS RpcCtrlInternalQuiescePmaChannel(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlPerfRatedTdpGetStatus(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlBusSetP2pMapping(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlGpuGetInfoV2(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
typedef NV_STATUS RpcCtrlGetHsCredits(POBJGPU, POBJRPC, NvHandle, NvHandle, void*);
@ -269,13 +271,15 @@ typedef struct RPC_HAL_IFACES {
RpcCtrlDbgSetExceptionMask *rpcCtrlDbgSetExceptionMask; /* CTRL_DBG_SET_EXCEPTION_MASK */
RpcCtrlSetZbcStencilClear *rpcCtrlSetZbcStencilClear; /* CTRL_SET_ZBC_STENCIL_CLEAR */
RpcCtrlVaspaceCopyServerReservedPdes *rpcCtrlVaspaceCopyServerReservedPdes; /* CTRL_VASPACE_COPY_SERVER_RESERVED_PDES */
RpcCtrlCmdGetChipletHsCreditPool *rpcCtrlCmdGetChipletHsCreditPool; /* GET_CHIPLET_HS_CREDIT_POOL */
RpcCtrlGrCtxswPreemptionBind *rpcCtrlGrCtxswPreemptionBind; /* CTRL_GR_CTXSW_PREEMPTION_BIND */
RpcCtrlAllocPmaStream *rpcCtrlAllocPmaStream; /* CTRL_ALLOC_PMA_STREAM */
RpcCtrlCmdGetHsCreditsMapping *rpcCtrlCmdGetHsCreditsMapping; /* GET_HS_CREDITS_MAPPING */
RpcCtrlReleaseHes *rpcCtrlReleaseHes; /* RELEASE_HES */
RpcCtrlReserveHwpmLegacy *rpcCtrlReserveHwpmLegacy; /* CTRL_RESERVE_HWPM_LEGACY */
RpcCtrlPerfRatedTdpGetStatus *rpcCtrlPerfRatedTdpGetStatus; /* CTRL_PERF_RATED_TDP_GET_STATUS */
RpcCtrlSubdeviceGetVgpuHeapStats *rpcCtrlSubdeviceGetVgpuHeapStats; /* CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS */
RpcCtrlInternalQuiescePmaChannel *rpcCtrlInternalQuiescePmaChannel; /* CTRL_INTERNAL_QUIESCE_PMA_CHANNEL */
RpcCtrlPerfRatedTdpGetStatus *rpcCtrlPerfRatedTdpGetStatus; /* CTRL_PERF_RATED_TDP_GET_STATUS */
RpcCtrlBusSetP2pMapping *rpcCtrlBusSetP2pMapping; /* CTRL_BUS_SET_P2P_MAPPING */
RpcCtrlGpuGetInfoV2 *rpcCtrlGpuGetInfoV2; /* CTRL_GPU_GET_INFO_V2 */
RpcCtrlGetHsCredits *rpcCtrlGetHsCredits; /* CTRL_GET_HS_CREDITS */
@ -458,20 +462,24 @@ typedef struct RPC_HAL_IFACES {
(_pRpc)->_hal.rpcCtrlSetZbcStencilClear(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlVaspaceCopyServerReservedPdes_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlVaspaceCopyServerReservedPdes(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlCmdGetChipletHsCreditPool_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlCmdGetChipletHsCreditPool(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlGrCtxswPreemptionBind_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlGrCtxswPreemptionBind(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlAllocPmaStream_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlAllocPmaStream(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlCmdGetHsCreditsMapping_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlCmdGetHsCreditsMapping(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlReleaseHes_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlReleaseHes(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlReserveHwpmLegacy_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlReserveHwpmLegacy(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlPerfRatedTdpGetStatus_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlPerfRatedTdpGetStatus(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlSubdeviceGetVgpuHeapStats_HAL(_pGpu, _pRpc, _pArg0) \
(_pRpc)->_hal.rpcCtrlSubdeviceGetVgpuHeapStats(_pGpu, _pRpc, _pArg0)
#define rpcCtrlInternalQuiescePmaChannel_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlInternalQuiescePmaChannel(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlPerfRatedTdpGetStatus_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlPerfRatedTdpGetStatus(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlBusSetP2pMapping_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \
(_pRpc)->_hal.rpcCtrlBusSetP2pMapping(_pGpu, _pRpc, _arg0, _arg1, _pArg2)
#define rpcCtrlGpuGetInfoV2_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \

View File

@ -83,6 +83,7 @@ RpcCtrlDbgSetModeErrbarDebug rpcCtrlDbgSetModeErrbarDebug_STUB; // TU10X,
// RPC:CTRL_PMA_STREAM_UPDATE_GET_PUT
RpcCtrlPmaStreamUpdateGetPut rpcCtrlPmaStreamUpdateGetPut_v1A_14;
RpcCtrlPmaStreamUpdateGetPut rpcCtrlPmaStreamUpdateGetPut_v29_0B;
RpcCtrlPmaStreamUpdateGetPut rpcCtrlPmaStreamUpdateGetPut_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
// RPC:CTRL_FABRIC_MEMORY_DESCRIBE
@ -165,6 +166,10 @@ RpcCtrlSetZbcStencilClear rpcCtrlSetZbcStencilClear_STUB; // TU10X, GA
RpcCtrlVaspaceCopyServerReservedPdes rpcCtrlVaspaceCopyServerReservedPdes_v1E_04;
RpcCtrlVaspaceCopyServerReservedPdes rpcCtrlVaspaceCopyServerReservedPdes_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
// RPC:CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL
RpcCtrlCmdGetChipletHsCreditPool rpcCtrlCmdGetChipletHsCreditPool_v29_0A;
RpcCtrlCmdGetChipletHsCreditPool rpcCtrlCmdGetChipletHsCreditPool_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
// RPC:CTRL_GR_CTXSW_PREEMPTION_BIND
RpcCtrlGrCtxswPreemptionBind rpcCtrlGrCtxswPreemptionBind_v1A_0E;
RpcCtrlGrCtxswPreemptionBind rpcCtrlGrCtxswPreemptionBind_v28_07;
@ -174,6 +179,10 @@ RpcCtrlGrCtxswPreemptionBind rpcCtrlGrCtxswPreemptionBind_STUB; // TU10X,
RpcCtrlAllocPmaStream rpcCtrlAllocPmaStream_v1A_14;
RpcCtrlAllocPmaStream rpcCtrlAllocPmaStream_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
// RPC:CTRL_CMD_GET_HS_CREDITS_MAPPING
RpcCtrlCmdGetHsCreditsMapping rpcCtrlCmdGetHsCreditsMapping_v29_0A;
RpcCtrlCmdGetHsCreditsMapping rpcCtrlCmdGetHsCreditsMapping_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
// RPC:CTRL_RELEASE_HES
RpcCtrlReleaseHes rpcCtrlReleaseHes_v29_07;
RpcCtrlReleaseHes rpcCtrlReleaseHes_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
@ -182,6 +191,10 @@ RpcCtrlReleaseHes rpcCtrlReleaseHes_STUB; // TU10X, GA100, GA
RpcCtrlReserveHwpmLegacy rpcCtrlReserveHwpmLegacy_v1A_0F;
RpcCtrlReserveHwpmLegacy rpcCtrlReserveHwpmLegacy_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
// RPC:CTRL_PERF_RATED_TDP_GET_STATUS
RpcCtrlPerfRatedTdpGetStatus rpcCtrlPerfRatedTdpGetStatus_v1A_1F;
RpcCtrlPerfRatedTdpGetStatus rpcCtrlPerfRatedTdpGetStatus_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
// RPC:CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS
RpcCtrlSubdeviceGetVgpuHeapStats rpcCtrlSubdeviceGetVgpuHeapStats_v28_03;
RpcCtrlSubdeviceGetVgpuHeapStats rpcCtrlSubdeviceGetVgpuHeapStats_v28_06;
@ -191,10 +204,6 @@ RpcCtrlSubdeviceGetVgpuHeapStats rpcCtrlSubdeviceGetVgpuHeapStats_STUB; // TU
RpcCtrlInternalQuiescePmaChannel rpcCtrlInternalQuiescePmaChannel_v1C_08;
RpcCtrlInternalQuiescePmaChannel rpcCtrlInternalQuiescePmaChannel_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
// RPC:CTRL_PERF_RATED_TDP_GET_STATUS
RpcCtrlPerfRatedTdpGetStatus rpcCtrlPerfRatedTdpGetStatus_v1A_1F;
RpcCtrlPerfRatedTdpGetStatus rpcCtrlPerfRatedTdpGetStatus_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X, GB100, GB102, GB10B, GB202, GB203, GB205, GB206, GB207
// RPC:CTRL_BUS_SET_P2P_MAPPING
RpcCtrlBusSetP2pMapping rpcCtrlBusSetP2pMapping_v21_03;
RpcCtrlBusSetP2pMapping rpcCtrlBusSetP2pMapping_v29_08;
@ -2546,6 +2555,40 @@ static void rpc_iGrp_ipVersions_Install_v29_09(IGRP_IP_VERSIONS_TABLE_INFO *pInf
#endif //
}
// No enabled chips use this variant provider
static void rpc_iGrp_ipVersions_Install_v29_0A(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
{
#if 0
POBJGPU pGpu = pInfo->pGpu;
OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic;
RPC_HAL_IFACES *pRpcHal = &pRpc->_hal;
// avoid possible unused warnings
pGpu += 0;
pRpcHal += 0;
#endif //
}
// No enabled chips use this variant provider
static void rpc_iGrp_ipVersions_Install_v29_0B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
{
#if 0
POBJGPU pGpu = pInfo->pGpu;
OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic;
RPC_HAL_IFACES *pRpcHal = &pRpc->_hal;
// avoid possible unused warnings
pGpu += 0;
pRpcHal += 0;
#endif //
}
@ -2596,8 +2639,10 @@ static NV_STATUS rpc_iGrp_ipVersions_Wrapup(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
pRpcHal->rpcCtrlGpuMigratableOps = rpcCtrlGpuMigratableOps_v21_07;
if (IsIPVersionInRange(pRpc, 0x1A100000, 0xFFFFFFFF))
pRpcHal->rpcCtrlDbgSetModeErrbarDebug = rpcCtrlDbgSetModeErrbarDebug_v1A_10;
if (IsIPVersionInRange(pRpc, 0x1A140000, 0xFFFFFFFF))
if (IsIPVersionInRange(pRpc, 0x1A140000, 0x290AFFFF))
pRpcHal->rpcCtrlPmaStreamUpdateGetPut = rpcCtrlPmaStreamUpdateGetPut_v1A_14;
if (IsIPVersionInRange(pRpc, 0x290B0000, 0xFFFFFFFF))
pRpcHal->rpcCtrlPmaStreamUpdateGetPut = rpcCtrlPmaStreamUpdateGetPut_v29_0B;
if (IsIPVersionInRange(pRpc, 0x1E0C0000, 0xFFFFFFFF))
pRpcHal->rpcCtrlFabricMemoryDescribe = rpcCtrlFabricMemoryDescribe_v1E_0C;
if (IsIPVersionInRange(pRpc, 0x1F040000, 0xFFFFFFFF))
@ -2638,24 +2683,28 @@ static NV_STATUS rpc_iGrp_ipVersions_Wrapup(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
pRpcHal->rpcCtrlSetZbcStencilClear = rpcCtrlSetZbcStencilClear_v27_06;
if (IsIPVersionInRange(pRpc, 0x1E040000, 0xFFFFFFFF))
pRpcHal->rpcCtrlVaspaceCopyServerReservedPdes = rpcCtrlVaspaceCopyServerReservedPdes_v1E_04;
if (IsIPVersionInRange(pRpc, 0x290A0000, 0xFFFFFFFF))
pRpcHal->rpcCtrlCmdGetChipletHsCreditPool = rpcCtrlCmdGetChipletHsCreditPool_v29_0A;
if (IsIPVersionInRange(pRpc, 0x1A0E0000, 0x2806FFFF))
pRpcHal->rpcCtrlGrCtxswPreemptionBind = rpcCtrlGrCtxswPreemptionBind_v1A_0E;
if (IsIPVersionInRange(pRpc, 0x28070000, 0xFFFFFFFF))
pRpcHal->rpcCtrlGrCtxswPreemptionBind = rpcCtrlGrCtxswPreemptionBind_v28_07;
if (IsIPVersionInRange(pRpc, 0x1A140000, 0xFFFFFFFF))
pRpcHal->rpcCtrlAllocPmaStream = rpcCtrlAllocPmaStream_v1A_14;
if (IsIPVersionInRange(pRpc, 0x290A0000, 0xFFFFFFFF))
pRpcHal->rpcCtrlCmdGetHsCreditsMapping = rpcCtrlCmdGetHsCreditsMapping_v29_0A;
if (IsIPVersionInRange(pRpc, 0x29070000, 0xFFFFFFFF))
pRpcHal->rpcCtrlReleaseHes = rpcCtrlReleaseHes_v29_07;
if (IsIPVersionInRange(pRpc, 0x1A0F0000, 0xFFFFFFFF))
pRpcHal->rpcCtrlReserveHwpmLegacy = rpcCtrlReserveHwpmLegacy_v1A_0F;
if (IsIPVersionInRange(pRpc, 0x1A1F0000, 0xFFFFFFFF))
pRpcHal->rpcCtrlPerfRatedTdpGetStatus = rpcCtrlPerfRatedTdpGetStatus_v1A_1F;
if (IsIPVersionInRange(pRpc, 0x28030000, 0x2805FFFF))
pRpcHal->rpcCtrlSubdeviceGetVgpuHeapStats = rpcCtrlSubdeviceGetVgpuHeapStats_v28_03;
if (IsIPVersionInRange(pRpc, 0x28060000, 0xFFFFFFFF))
pRpcHal->rpcCtrlSubdeviceGetVgpuHeapStats = rpcCtrlSubdeviceGetVgpuHeapStats_v28_06;
if (IsIPVersionInRange(pRpc, 0x1C080000, 0xFFFFFFFF))
pRpcHal->rpcCtrlInternalQuiescePmaChannel = rpcCtrlInternalQuiescePmaChannel_v1C_08;
if (IsIPVersionInRange(pRpc, 0x1A1F0000, 0xFFFFFFFF))
pRpcHal->rpcCtrlPerfRatedTdpGetStatus = rpcCtrlPerfRatedTdpGetStatus_v1A_1F;
if (IsIPVersionInRange(pRpc, 0x21030000, 0x2907FFFF))
pRpcHal->rpcCtrlBusSetP2pMapping = rpcCtrlBusSetP2pMapping_v21_03;
if (IsIPVersionInRange(pRpc, 0x29080000, 0xFFFFFFFF))
@ -2932,13 +2981,15 @@ static NV_STATUS rpc_iGrp_ipVersions_Wrapup(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlDbgSetExceptionMask);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlSetZbcStencilClear);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlVaspaceCopyServerReservedPdes);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlCmdGetChipletHsCreditPool);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlGrCtxswPreemptionBind);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlAllocPmaStream);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlCmdGetHsCreditsMapping);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlReleaseHes);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlReserveHwpmLegacy);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlPerfRatedTdpGetStatus);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlSubdeviceGetVgpuHeapStats);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlInternalQuiescePmaChannel);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlPerfRatedTdpGetStatus);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlBusSetP2pMapping);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlGpuGetInfoV2);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlGetHsCredits);
@ -3381,6 +3432,12 @@ static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v29_09[] = {
{ 0x29090000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v29_0A[] = {
{ 0x290A0000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v29_0B[] = {
{ 0x290B0000, 0xFFFFFFFF, }, //
};
#define _RPC_HAL_IGRP_ENTRY_INIT(v) \
{ RPC_IGRP_IP_VERSIONS_RANGES_##v, NV_ARRAY_ELEMENTS(RPC_IGRP_IP_VERSIONS_RANGES_##v), rpc_iGrp_ipVersions_Install_##v, }
@ -3499,6 +3556,8 @@ static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
_RPC_HAL_IGRP_ENTRY_INIT(v29_07), //
_RPC_HAL_IGRP_ENTRY_INIT(v29_08), //
_RPC_HAL_IGRP_ENTRY_INIT(v29_09), //
_RPC_HAL_IGRP_ENTRY_INIT(v29_0A), //
_RPC_HAL_IGRP_ENTRY_INIT(v29_0B), //
};
#undef _RPC_HAL_IGRP_ENTRY_INIT
@ -3561,13 +3620,15 @@ static void rpcHalIfacesSetup_TU102(RPC_HAL_IFACES *pRpcHal)
rpcCtrlDbgSetExceptionMask_STUB, // rpcCtrlDbgSetExceptionMask
rpcCtrlSetZbcStencilClear_STUB, // rpcCtrlSetZbcStencilClear
rpcCtrlVaspaceCopyServerReservedPdes_STUB, // rpcCtrlVaspaceCopyServerReservedPdes
rpcCtrlCmdGetChipletHsCreditPool_STUB, // rpcCtrlCmdGetChipletHsCreditPool
rpcCtrlGrCtxswPreemptionBind_STUB, // rpcCtrlGrCtxswPreemptionBind
rpcCtrlAllocPmaStream_STUB, // rpcCtrlAllocPmaStream
rpcCtrlCmdGetHsCreditsMapping_STUB, // rpcCtrlCmdGetHsCreditsMapping
rpcCtrlReleaseHes_STUB, // rpcCtrlReleaseHes
rpcCtrlReserveHwpmLegacy_STUB, // rpcCtrlReserveHwpmLegacy
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlSubdeviceGetVgpuHeapStats_STUB, // rpcCtrlSubdeviceGetVgpuHeapStats
rpcCtrlInternalQuiescePmaChannel_STUB, // rpcCtrlInternalQuiescePmaChannel
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlBusSetP2pMapping_STUB, // rpcCtrlBusSetP2pMapping
rpcCtrlGpuGetInfoV2_STUB, // rpcCtrlGpuGetInfoV2
rpcCtrlGetHsCredits_STUB, // rpcCtrlGetHsCredits
@ -3757,13 +3818,15 @@ static void rpcHalIfacesSetup_GA100(RPC_HAL_IFACES *pRpcHal)
rpcCtrlDbgSetExceptionMask_STUB, // rpcCtrlDbgSetExceptionMask
rpcCtrlSetZbcStencilClear_STUB, // rpcCtrlSetZbcStencilClear
rpcCtrlVaspaceCopyServerReservedPdes_STUB, // rpcCtrlVaspaceCopyServerReservedPdes
rpcCtrlCmdGetChipletHsCreditPool_STUB, // rpcCtrlCmdGetChipletHsCreditPool
rpcCtrlGrCtxswPreemptionBind_STUB, // rpcCtrlGrCtxswPreemptionBind
rpcCtrlAllocPmaStream_STUB, // rpcCtrlAllocPmaStream
rpcCtrlCmdGetHsCreditsMapping_STUB, // rpcCtrlCmdGetHsCreditsMapping
rpcCtrlReleaseHes_STUB, // rpcCtrlReleaseHes
rpcCtrlReserveHwpmLegacy_STUB, // rpcCtrlReserveHwpmLegacy
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlSubdeviceGetVgpuHeapStats_STUB, // rpcCtrlSubdeviceGetVgpuHeapStats
rpcCtrlInternalQuiescePmaChannel_STUB, // rpcCtrlInternalQuiescePmaChannel
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlBusSetP2pMapping_STUB, // rpcCtrlBusSetP2pMapping
rpcCtrlGpuGetInfoV2_STUB, // rpcCtrlGpuGetInfoV2
rpcCtrlGetHsCredits_STUB, // rpcCtrlGetHsCredits
@ -3965,13 +4028,15 @@ static void rpcHalIfacesSetup_AD102(RPC_HAL_IFACES *pRpcHal)
rpcCtrlDbgSetExceptionMask_STUB, // rpcCtrlDbgSetExceptionMask
rpcCtrlSetZbcStencilClear_STUB, // rpcCtrlSetZbcStencilClear
rpcCtrlVaspaceCopyServerReservedPdes_STUB, // rpcCtrlVaspaceCopyServerReservedPdes
rpcCtrlCmdGetChipletHsCreditPool_STUB, // rpcCtrlCmdGetChipletHsCreditPool
rpcCtrlGrCtxswPreemptionBind_STUB, // rpcCtrlGrCtxswPreemptionBind
rpcCtrlAllocPmaStream_STUB, // rpcCtrlAllocPmaStream
rpcCtrlCmdGetHsCreditsMapping_STUB, // rpcCtrlCmdGetHsCreditsMapping
rpcCtrlReleaseHes_STUB, // rpcCtrlReleaseHes
rpcCtrlReserveHwpmLegacy_STUB, // rpcCtrlReserveHwpmLegacy
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlSubdeviceGetVgpuHeapStats_STUB, // rpcCtrlSubdeviceGetVgpuHeapStats
rpcCtrlInternalQuiescePmaChannel_STUB, // rpcCtrlInternalQuiescePmaChannel
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlBusSetP2pMapping_STUB, // rpcCtrlBusSetP2pMapping
rpcCtrlGpuGetInfoV2_STUB, // rpcCtrlGpuGetInfoV2
rpcCtrlGetHsCredits_STUB, // rpcCtrlGetHsCredits
@ -4161,13 +4226,15 @@ static void rpcHalIfacesSetup_GH100(RPC_HAL_IFACES *pRpcHal)
rpcCtrlDbgSetExceptionMask_STUB, // rpcCtrlDbgSetExceptionMask
rpcCtrlSetZbcStencilClear_STUB, // rpcCtrlSetZbcStencilClear
rpcCtrlVaspaceCopyServerReservedPdes_STUB, // rpcCtrlVaspaceCopyServerReservedPdes
rpcCtrlCmdGetChipletHsCreditPool_STUB, // rpcCtrlCmdGetChipletHsCreditPool
rpcCtrlGrCtxswPreemptionBind_STUB, // rpcCtrlGrCtxswPreemptionBind
rpcCtrlAllocPmaStream_STUB, // rpcCtrlAllocPmaStream
rpcCtrlCmdGetHsCreditsMapping_STUB, // rpcCtrlCmdGetHsCreditsMapping
rpcCtrlReleaseHes_STUB, // rpcCtrlReleaseHes
rpcCtrlReserveHwpmLegacy_STUB, // rpcCtrlReserveHwpmLegacy
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlSubdeviceGetVgpuHeapStats_STUB, // rpcCtrlSubdeviceGetVgpuHeapStats
rpcCtrlInternalQuiescePmaChannel_STUB, // rpcCtrlInternalQuiescePmaChannel
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlBusSetP2pMapping_STUB, // rpcCtrlBusSetP2pMapping
rpcCtrlGpuGetInfoV2_STUB, // rpcCtrlGpuGetInfoV2
rpcCtrlGetHsCredits_STUB, // rpcCtrlGetHsCredits
@ -4321,13 +4388,15 @@ static void rpcHalIfacesSetup_GB100(RPC_HAL_IFACES *pRpcHal)
rpcCtrlDbgSetExceptionMask_STUB, // rpcCtrlDbgSetExceptionMask
rpcCtrlSetZbcStencilClear_STUB, // rpcCtrlSetZbcStencilClear
rpcCtrlVaspaceCopyServerReservedPdes_STUB, // rpcCtrlVaspaceCopyServerReservedPdes
rpcCtrlCmdGetChipletHsCreditPool_STUB, // rpcCtrlCmdGetChipletHsCreditPool
rpcCtrlGrCtxswPreemptionBind_STUB, // rpcCtrlGrCtxswPreemptionBind
rpcCtrlAllocPmaStream_STUB, // rpcCtrlAllocPmaStream
rpcCtrlCmdGetHsCreditsMapping_STUB, // rpcCtrlCmdGetHsCreditsMapping
rpcCtrlReleaseHes_STUB, // rpcCtrlReleaseHes
rpcCtrlReserveHwpmLegacy_STUB, // rpcCtrlReserveHwpmLegacy
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlSubdeviceGetVgpuHeapStats_STUB, // rpcCtrlSubdeviceGetVgpuHeapStats
rpcCtrlInternalQuiescePmaChannel_STUB, // rpcCtrlInternalQuiescePmaChannel
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlBusSetP2pMapping_STUB, // rpcCtrlBusSetP2pMapping
rpcCtrlGpuGetInfoV2_STUB, // rpcCtrlGpuGetInfoV2
rpcCtrlGetHsCredits_STUB, // rpcCtrlGetHsCredits
@ -4499,13 +4568,15 @@ static void rpcHalIfacesSetup_GB202(RPC_HAL_IFACES *pRpcHal)
rpcCtrlDbgSetExceptionMask_STUB, // rpcCtrlDbgSetExceptionMask
rpcCtrlSetZbcStencilClear_STUB, // rpcCtrlSetZbcStencilClear
rpcCtrlVaspaceCopyServerReservedPdes_STUB, // rpcCtrlVaspaceCopyServerReservedPdes
rpcCtrlCmdGetChipletHsCreditPool_STUB, // rpcCtrlCmdGetChipletHsCreditPool
rpcCtrlGrCtxswPreemptionBind_STUB, // rpcCtrlGrCtxswPreemptionBind
rpcCtrlAllocPmaStream_STUB, // rpcCtrlAllocPmaStream
rpcCtrlCmdGetHsCreditsMapping_STUB, // rpcCtrlCmdGetHsCreditsMapping
rpcCtrlReleaseHes_STUB, // rpcCtrlReleaseHes
rpcCtrlReserveHwpmLegacy_STUB, // rpcCtrlReserveHwpmLegacy
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlSubdeviceGetVgpuHeapStats_STUB, // rpcCtrlSubdeviceGetVgpuHeapStats
rpcCtrlInternalQuiescePmaChannel_STUB, // rpcCtrlInternalQuiescePmaChannel
rpcCtrlPerfRatedTdpGetStatus_STUB, // rpcCtrlPerfRatedTdpGetStatus
rpcCtrlBusSetP2pMapping_STUB, // rpcCtrlBusSetP2pMapping
rpcCtrlGpuGetInfoV2_STUB, // rpcCtrlGpuGetInfoV2
rpcCtrlGetHsCredits_STUB, // rpcCtrlGetHsCredits

View File

@ -2962,7 +2962,19 @@ typedef struct NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14
NvU32 pmaChannelIdx;
} NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14;
typedef NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14 NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v;
typedef struct NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v29_0B
{
NvU64 bytesConsumed NV_ALIGN_BYTES(8);
NvBool bUpdateAvailableBytes;
NvBool bWait;
NvU64 bytesAvailable NV_ALIGN_BYTES(8);
NvBool bReturnPut;
NvU64 putPtr NV_ALIGN_BYTES(8);
NvU32 pmaChannelIdx;
NvBool bOverflowStatus;
} NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v29_0B;
typedef NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v29_0B NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v;
typedef struct NV2080_CTRL_FB_INFO_v1A_15
{
@ -3678,6 +3690,41 @@ typedef struct NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07
typedef NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07 NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v;
typedef struct NVB0CC_CTRL_CREDIT_POOL_INFO_v29_0A
{
NvU16 numCredits;
NvU8 poolIndex;
NvU8 chipletType;
} NVB0CC_CTRL_CREDIT_POOL_INFO_v29_0A;
typedef NVB0CC_CTRL_CREDIT_POOL_INFO_v29_0A NVB0CC_CTRL_CREDIT_POOL_INFO_v;
typedef struct NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v29_0A
{
NVB0CC_CTRL_CREDIT_POOL_INFO_v29_0A poolInfos[NVB0CC_CREDIT_POOL_MAX_COUNT_v29_0A];
NvU32 poolInfosCount;
} NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v29_0A;
typedef NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v29_0A NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v;
typedef struct NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_MAPPING_INFO_v29_0A
{
NvU8 chipletType;
NvU8 chipletIndex;
NvU8 poolIndex;
} NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_MAPPING_INFO_v29_0A;
typedef NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_MAPPING_INFO_v29_0A NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_MAPPING_INFO_v;
typedef struct NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v29_0A
{
NvU16 numQueries;
NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_STATUS_v21_08 statusInfo;
NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_MAPPING_INFO_v29_0A queries[NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08];
} NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v29_0A;
typedef NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v29_0A NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v;
typedef struct NVB0CC_CTRL_SET_HS_CREDITS_PARAMS_v21_08
{
NvU8 pmaChannelIdx;

View File

@ -4693,6 +4693,21 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif
},
{ /* [305] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xd0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xd0u)
/*flags=*/ 0xd0u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20800b01u,
/*paramSize=*/ 0,
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors"
#endif
},
{ /* [306] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4707,7 +4722,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdSetGpfifo"
#endif
},
{ /* [306] */
{ /* [307] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4722,7 +4737,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoBindEngines"
#endif
},
{ /* [307] */
{ /* [308] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4737,7 +4752,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdSetOperationalProperties"
#endif
},
{ /* [308] */
{ /* [309] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4752,7 +4767,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetPhysicalChannelCount"
#endif
},
{ /* [309] */
{ /* [310] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x20008u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4767,7 +4782,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetInfo"
#endif
},
{ /* [310] */
{ /* [311] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4782,7 +4797,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoDisableChannels"
#endif
},
{ /* [311] */
{ /* [312] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4797,7 +4812,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetChannelMemInfo"
#endif
},
{ /* [312] */
{ /* [313] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4812,7 +4827,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetUserdLocation"
#endif
},
{ /* [313] */
{ /* [314] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4827,7 +4842,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoObjschedSwGetLog"
#endif
},
{ /* [314] */
{ /* [315] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5c040u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4842,7 +4857,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetDeviceInfoTable"
#endif
},
{ /* [315] */
{ /* [316] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x244u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4857,7 +4872,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoClearFaultedBit"
#endif
},
{ /* [316] */
{ /* [317] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x68u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4872,7 +4887,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoRunlistSetSchedPolicy"
#endif
},
{ /* [317] */
{ /* [318] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4887,7 +4902,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoUpdateChannelInfo"
#endif
},
{ /* [318] */
{ /* [319] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4902,7 +4917,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoDisableUsermodeChannels"
#endif
},
{ /* [319] */
{ /* [320] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4917,7 +4932,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoSetupVfZombieSubctxPdb"
#endif
},
{ /* [320] */
{ /* [321] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4932,7 +4947,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetAllocatedChannels"
#endif
},
{ /* [321] */
{ /* [322] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4947,7 +4962,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoDisableChannelsForKeyRotation"
#endif
},
{ /* [322] */
{ /* [323] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4962,7 +4977,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoDisableChannelsForKeyRotationV2"
#endif
},
{ /* [323] */
{ /* [324] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4977,7 +4992,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoObjschedGetState"
#endif
},
{ /* [324] */
{ /* [325] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4992,7 +5007,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoObjschedSetState"
#endif
},
{ /* [325] */
{ /* [326] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5007,7 +5022,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoObjschedGetCaps"
#endif
},
{ /* [326] */
{ /* [327] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5022,7 +5037,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetChannelGroupUniqueIdInfo"
#endif
},
{ /* [327] */
{ /* [328] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5037,7 +5052,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoQueryChannelUniqueId"
#endif
},
{ /* [328] */
{ /* [329] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5052,7 +5067,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetInfo"
#endif
},
{ /* [329] */
{ /* [330] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5067,7 +5082,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswZcullMode"
#endif
},
{ /* [330] */
{ /* [331] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5082,7 +5097,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetZcullInfo"
#endif
},
{ /* [331] */
{ /* [332] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5097,7 +5112,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswPmMode"
#endif
},
{ /* [332] */
{ /* [333] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x80348u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5112,7 +5127,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswZcullBind"
#endif
},
{ /* [333] */
{ /* [334] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5127,7 +5142,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswPmBind"
#endif
},
{ /* [334] */
{ /* [335] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5142,7 +5157,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrSetGpcTileMap"
#endif
},
{ /* [335] */
{ /* [336] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5157,7 +5172,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswSmpcMode"
#endif
},
{ /* [336] */
{ /* [337] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5172,7 +5187,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetSmToGpcTpcMappings"
#endif
},
{ /* [337] */
{ /* [338] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x348u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5187,7 +5202,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrSetCtxswPreemptionMode"
#endif
},
{ /* [338] */
{ /* [339] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5202,7 +5217,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswPreemptionBind"
#endif
},
{ /* [339] */
{ /* [340] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5217,7 +5232,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrPcSamplingMode"
#endif
},
{ /* [340] */
{ /* [341] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5232,7 +5247,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetROPInfo"
#endif
},
{ /* [341] */
{ /* [342] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5247,7 +5262,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCtxswStats"
#endif
},
{ /* [342] */
{ /* [343] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x18u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5262,7 +5277,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferSize"
#endif
},
{ /* [343] */
{ /* [344] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8000u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5277,7 +5292,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferInfo"
#endif
},
{ /* [344] */
{ /* [345] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5292,7 +5307,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetGlobalSmOrder"
#endif
},
{ /* [345] */
{ /* [346] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5307,7 +5322,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCurrentResidentChannel"
#endif
},
{ /* [346] */
{ /* [347] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5322,7 +5337,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetVatAlarmData"
#endif
},
{ /* [347] */
{ /* [348] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5337,7 +5352,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetAttributeBufferSize"
#endif
},
{ /* [348] */
{ /* [349] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5352,7 +5367,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGfxPoolQuerySize"
#endif
},
{ /* [349] */
{ /* [350] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5367,7 +5382,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGfxPoolInitialize"
#endif
},
{ /* [350] */
{ /* [351] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5382,7 +5397,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGfxPoolAddSlots"
#endif
},
{ /* [351] */
{ /* [352] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5397,7 +5412,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGfxPoolRemoveSlots"
#endif
},
{ /* [352] */
{ /* [353] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10au)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5412,7 +5427,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCapsV2"
#endif
},
{ /* [353] */
{ /* [354] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5427,7 +5442,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetInfoV2"
#endif
},
{ /* [354] */
{ /* [355] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5442,7 +5457,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetGpcMask"
#endif
},
{ /* [355] */
{ /* [356] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5457,7 +5472,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetTpcMask"
#endif
},
{ /* [356] */
{ /* [357] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5472,7 +5487,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrSetTpcPartitionMode"
#endif
},
{ /* [357] */
{ /* [358] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5487,7 +5502,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetEngineContextProperties"
#endif
},
{ /* [358] */
{ /* [359] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5502,7 +5517,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetSmIssueRateModifier"
#endif
},
{ /* [359] */
{ /* [360] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5517,7 +5532,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrFecsBindEvtbufForUid"
#endif
},
{ /* [360] */
{ /* [361] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x14u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5532,7 +5547,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetPhysGpcMask"
#endif
},
{ /* [361] */
{ /* [362] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5547,7 +5562,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetPpcMask"
#endif
},
{ /* [362] */
{ /* [363] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5562,7 +5577,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetNumTpcsForGpc"
#endif
},
{ /* [363] */
{ /* [364] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5577,7 +5592,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCtxswModes"
#endif
},
{ /* [364] */
{ /* [365] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5592,7 +5607,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetGpcTileMap"
#endif
},
{ /* [365] */
{ /* [366] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x18u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5607,7 +5622,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetZcullMask"
#endif
},
{ /* [366] */
{ /* [367] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x918u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5622,7 +5637,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2"
#endif
},
{ /* [367] */
{ /* [368] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5637,7 +5652,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetGfxGpcAndTpcInfo"
#endif
},
{ /* [368] */
{ /* [369] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5652,7 +5667,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswSetupBind"
#endif
},
{ /* [369] */
{ /* [370] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5667,7 +5682,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetInfoV2"
#endif
},
{ /* [370] */
{ /* [371] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5682,7 +5697,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetCalibrationLockFailed"
#endif
},
{ /* [371] */
{ /* [372] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5697,7 +5712,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbFlushGpuCache"
#endif
},
{ /* [372] */
{ /* [373] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5712,7 +5727,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetBar1Offset"
#endif
},
{ /* [373] */
{ /* [374] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5727,7 +5742,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbIsKind"
#endif
},
{ /* [374] */
{ /* [375] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40148u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5742,7 +5757,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetGpuCacheInfo"
#endif
},
{ /* [375] */
{ /* [376] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5757,7 +5772,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetFBRegionInfo"
#endif
},
{ /* [376] */
{ /* [377] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5772,7 +5787,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetOfflinedPages"
#endif
},
{ /* [377] */
{ /* [378] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40158u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5787,7 +5802,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetLTCInfoForFBP"
#endif
},
{ /* [378] */
{ /* [379] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5802,7 +5817,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbCBCOp"
#endif
},
{ /* [379] */
{ /* [380] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5817,7 +5832,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetCtagsForCbcEviction"
#endif
},
{ /* [380] */
{ /* [381] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5832,7 +5847,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetupVprRegion"
#endif
},
{ /* [381] */
{ /* [382] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5847,7 +5862,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetCliManagedOfflinedPages"
#endif
},
{ /* [382] */
{ /* [383] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5862,7 +5877,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetCompBitCopyConstructInfo"
#endif
},
{ /* [383] */
{ /* [384] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5877,7 +5892,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetRrd"
#endif
},
{ /* [384] */
{ /* [385] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5892,7 +5907,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetReadLimit"
#endif
},
{ /* [385] */
{ /* [386] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5907,7 +5922,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetWriteLimit"
#endif
},
{ /* [386] */
{ /* [387] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5922,7 +5937,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbPatchPbrForMining"
#endif
},
{ /* [387] */
{ /* [388] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x18u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5937,7 +5952,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetMemAlignment"
#endif
},
{ /* [388] */
{ /* [389] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x58u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5952,7 +5967,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetRemappedRows"
#endif
},
{ /* [389] */
{ /* [390] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5967,7 +5982,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetFsInfo"
#endif
},
{ /* [390] */
{ /* [391] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x58u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5982,7 +5997,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetRowRemapperHistogram"
#endif
},
{ /* [391] */
{ /* [392] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5997,7 +6012,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetDynamicOfflinedPages"
#endif
},
{ /* [392] */
{ /* [393] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6012,7 +6027,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbUpdateNumaStatus"
#endif
},
{ /* [393] */
{ /* [394] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6027,7 +6042,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetNumaInfo"
#endif
},
{ /* [394] */
{ /* [395] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x509u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6042,7 +6057,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGbGetSemaphoreSurfaceLayout"
#endif
},
{ /* [395] */
{ /* [396] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100008u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6057,7 +6072,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGmmuCommitTlbInvalidate"
#endif
},
{ /* [396] */
{ /* [397] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6072,7 +6087,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetStaticBar1Info"
#endif
},
{ /* [397] */
{ /* [398] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6087,7 +6102,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysQueryDramEncryptionPendingConfiguration"
#endif
},
{ /* [398] */
{ /* [399] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40044u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6102,7 +6117,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysSetDramEncryptionConfiguration"
#endif
},
{ /* [399] */
{ /* [400] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6117,7 +6132,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetStatus"
#endif
},
{ /* [400] */
{ /* [401] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6132,7 +6147,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysQueryDramEncryptionInforomSupport"
#endif
},
{ /* [401] */
{ /* [402] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6147,7 +6162,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysQueryDramEncryptionStatus"
#endif
},
{ /* [402] */
{ /* [403] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50bu)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6162,7 +6177,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcGetArchInfo"
#endif
},
{ /* [403] */
{ /* [404] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6177,7 +6192,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcServiceInterrupts"
#endif
},
{ /* [404] */
{ /* [405] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6192,7 +6207,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcGetManufacturer"
#endif
},
{ /* [405] */
{ /* [406] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6207,7 +6222,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcChangeReplayableFaultOwnership"
#endif
},
{ /* [406] */
{ /* [407] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6222,7 +6237,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcGetEngineNotificationIntrVectors"
#endif
},
{ /* [407] */
{ /* [408] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6237,7 +6252,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcGetStaticIntrTable"
#endif
},
{ /* [408] */
{ /* [409] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x518u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6252,7 +6267,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPciInfo"
#endif
},
{ /* [409] */
{ /* [410] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x518u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6267,7 +6282,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPciBarInfo"
#endif
},
{ /* [410] */
{ /* [411] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6282,7 +6297,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetPcieLinkWidth"
#endif
},
{ /* [411] */
{ /* [412] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6297,7 +6312,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetPcieSpeed"
#endif
},
{ /* [412] */
{ /* [413] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6312,7 +6327,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusServiceGpuMultifunctionState"
#endif
},
{ /* [413] */
{ /* [414] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6327,7 +6342,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPexCounters"
#endif
},
{ /* [414] */
{ /* [415] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6342,7 +6357,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusClearPexCounters"
#endif
},
{ /* [415] */
{ /* [416] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6357,7 +6372,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusFreezePexCounters"
#endif
},
{ /* [416] */
{ /* [417] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6372,7 +6387,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPexLaneCounters"
#endif
},
{ /* [417] */
{ /* [418] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6387,7 +6402,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPcieLtrLatency"
#endif
},
{ /* [418] */
{ /* [419] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6402,7 +6417,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetPcieLtrLatency"
#endif
},
{ /* [419] */
{ /* [420] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6417,7 +6432,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPexUtilCounters"
#endif
},
{ /* [420] */
{ /* [421] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6432,7 +6447,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusClearPexUtilCounters"
#endif
},
{ /* [421] */
{ /* [422] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6447,7 +6462,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetBFD"
#endif
},
{ /* [422] */
{ /* [423] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6462,7 +6477,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetAspmDisableFlags"
#endif
},
{ /* [423] */
{ /* [424] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x20118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6477,7 +6492,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetInfoV2"
#endif
},
{ /* [424] */
{ /* [425] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6492,7 +6507,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusControlPublicAspmBits"
#endif
},
{ /* [425] */
{ /* [426] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6507,7 +6522,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetNvlinkPeerIdMask"
#endif
},
{ /* [426] */
{ /* [427] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6522,7 +6537,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetEomParameters"
#endif
},
{ /* [427] */
{ /* [428] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6537,7 +6552,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetUphyDlnCfgSpace"
#endif
},
{ /* [428] */
{ /* [429] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6552,7 +6567,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetEomStatus"
#endif
},
{ /* [429] */
{ /* [430] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6567,7 +6582,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPcieReqAtomicsCaps"
#endif
},
{ /* [430] */
{ /* [431] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6582,7 +6597,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics"
#endif
},
{ /* [431] */
{ /* [432] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6597,7 +6612,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetC2CInfo"
#endif
},
{ /* [432] */
{ /* [433] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1000u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6612,7 +6627,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSysmemAccess"
#endif
},
{ /* [433] */
{ /* [434] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50040u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6627,7 +6642,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetP2pMapping"
#endif
},
{ /* [434] */
{ /* [435] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50040u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6642,7 +6657,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusUnsetP2pMapping"
#endif
},
{ /* [435] */
{ /* [436] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40448u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6657,7 +6672,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPcieCplAtomicsCaps"
#endif
},
{ /* [436] */
{ /* [437] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x318u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6672,7 +6687,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKPerfBoost"
#endif
},
{ /* [437] */
{ /* [438] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6687,7 +6702,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfGetLevelInfo_V2"
#endif
},
{ /* [438] */
{ /* [439] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6702,7 +6717,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfGetPowerstate"
#endif
},
{ /* [439] */
{ /* [440] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6717,7 +6732,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfSetPowerstate"
#endif
},
{ /* [440] */
{ /* [441] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6732,7 +6747,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfNotifyVideoevent"
#endif
},
{ /* [441] */
{ /* [442] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6747,7 +6762,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfGetCurrentPstate"
#endif
},
{ /* [442] */
{ /* [443] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6762,7 +6777,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfRatedTdpGetControl"
#endif
},
{ /* [443] */
{ /* [444] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6777,7 +6792,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfRatedTdpSetControl"
#endif
},
{ /* [444] */
{ /* [445] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40048u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6792,7 +6807,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfGetVideoEnginePerfmonSample"
#endif
},
{ /* [445] */
{ /* [446] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6807,7 +6822,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfSetAuxPowerState"
#endif
},
{ /* [446] */
{ /* [447] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x18u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6822,7 +6837,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfReservePerfmonHw"
#endif
},
{ /* [447] */
{ /* [448] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40008u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6837,7 +6852,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2"
#endif
},
{ /* [448] */
{ /* [449] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6852,7 +6867,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcReadVirtualMem"
#endif
},
{ /* [449] */
{ /* [450] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6867,7 +6882,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcGetErrorCount"
#endif
},
{ /* [450] */
{ /* [451] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6882,7 +6897,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcSetCleanErrorHistory"
#endif
},
{ /* [451] */
{ /* [452] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6897,7 +6912,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcGetWatchdogInfo"
#endif
},
{ /* [452] */
{ /* [453] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x18u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6912,7 +6927,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcDisableWatchdog"
#endif
},
{ /* [453] */
{ /* [454] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x18u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6927,7 +6942,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcEnableWatchdog"
#endif
},
{ /* [454] */
{ /* [455] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x18u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6942,7 +6957,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcReleaseWatchdogRequests"
#endif
},
{ /* [455] */
{ /* [456] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40154u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6957,7 +6972,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdSetRcRecovery"
#endif
},
{ /* [456] */
{ /* [457] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40154u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6972,7 +6987,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetRcRecovery"
#endif
},
{ /* [457] */
{ /* [458] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x18u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6987,7 +7002,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcSoftDisableWatchdog"
#endif
},
{ /* [458] */
{ /* [459] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7002,7 +7017,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdSetRcInfo"
#endif
},
{ /* [459] */
{ /* [460] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7017,7 +7032,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetRcInfo"
#endif
},
{ /* [460] */
{ /* [461] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7032,7 +7047,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcGetErrorV2"
#endif
},
{ /* [461] */
{ /* [462] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7047,7 +7062,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGpioProgramDirection"
#endif
},
{ /* [462] */
{ /* [463] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7062,7 +7077,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGpioProgramOutput"
#endif
},
{ /* [463] */
{ /* [464] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7077,7 +7092,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGpioReadInput"
#endif
},
{ /* [464] */
{ /* [465] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7092,7 +7107,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGpioActivateHwFunction"
#endif
},
{ /* [465] */
{ /* [466] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7107,7 +7122,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvdGetDumpSize"
#endif
},
{ /* [466] */
{ /* [467] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7122,7 +7137,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvdGetDump"
#endif
},
{ /* [467] */
{ /* [468] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7137,7 +7152,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvdGetNocatJournalRpt"
#endif
},
{ /* [468] */
{ /* [469] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7152,7 +7167,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvdSetNocatJournalData"
#endif
},
{ /* [469] */
{ /* [470] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7167,7 +7182,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvdInsertNocatJournalRecord"
#endif
},
{ /* [470] */
{ /* [471] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7182,7 +7197,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDmaInvalidateTLB"
#endif
},
{ /* [471] */
{ /* [472] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7197,7 +7212,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDmaGetInfo"
#endif
},
{ /* [472] */
{ /* [473] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x158u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7212,7 +7227,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPmgrGetModuleInfo"
#endif
},
{ /* [473] */
{ /* [474] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7227,7 +7242,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGpuProcessPostGc6ExitTasks"
#endif
},
{ /* [474] */
{ /* [475] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7242,7 +7257,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGc6Entry"
#endif
},
{ /* [475] */
{ /* [476] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7257,7 +7272,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGc6Exit"
#endif
},
{ /* [476] */
{ /* [477] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7272,7 +7287,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdLpwrDifrCtrl"
#endif
},
{ /* [477] */
{ /* [478] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7287,7 +7302,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdLpwrDifrPrefetchResponse"
#endif
},
{ /* [478] */
{ /* [479] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7302,7 +7317,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetCaps"
#endif
},
{ /* [479] */
{ /* [480] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x20349u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7317,7 +7332,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetCePceMask"
#endif
},
{ /* [480] */
{ /* [481] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7332,7 +7347,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetCapsV2"
#endif
},
{ /* [481] */
{ /* [482] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7347,7 +7362,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeUpdatePceLceMappings"
#endif
},
{ /* [482] */
{ /* [483] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7362,7 +7377,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeUpdateClassDB"
#endif
},
{ /* [483] */
{ /* [484] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x301d0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7377,7 +7392,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetPhysicalCaps"
#endif
},
{ /* [484] */
{ /* [485] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c040u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7392,7 +7407,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetFaultMethodBufferSize"
#endif
},
{ /* [485] */
{ /* [486] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4c0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7407,7 +7422,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetHubPceMask"
#endif
},
{ /* [486] */
{ /* [487] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7422,7 +7437,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetAllCaps"
#endif
},
{ /* [487] */
{ /* [488] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x101d0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7437,7 +7452,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetAllPhysicalCaps"
#endif
},
{ /* [488] */
{ /* [489] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x145u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7452,7 +7467,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetLceShimInfo"
#endif
},
{ /* [489] */
{ /* [490] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7467,7 +7482,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeUpdatePceLceMappingsV2"
#endif
},
{ /* [490] */
{ /* [491] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7482,7 +7497,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetHubPceMaskV2"
#endif
},
{ /* [491] */
{ /* [492] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100c0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7497,7 +7512,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetPceConfigForLceType"
#endif
},
{ /* [492] */
{ /* [493] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x154u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7512,7 +7527,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetDecompLceMask"
#endif
},
{ /* [493] */
{ /* [494] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x154u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7527,7 +7542,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeIsDecompLceEnabled"
#endif
},
{ /* [494] */
{ /* [495] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7542,7 +7557,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetNvlinkCaps"
#endif
},
{ /* [495] */
{ /* [496] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7557,7 +7572,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetNvlinkStatus"
#endif
},
{ /* [496] */
{ /* [497] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7572,7 +7587,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetNvlinkErrInfo"
#endif
},
{ /* [497] */
{ /* [498] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7587,7 +7602,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetNvlinkCounters"
#endif
},
{ /* [498] */
{ /* [499] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7602,7 +7617,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdClearNvlinkCounters"
#endif
},
{ /* [499] */
{ /* [500] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7617,7 +7632,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts"
#endif
},
{ /* [500] */
{ /* [501] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7632,7 +7647,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSetupEom"
#endif
},
{ /* [501] */
{ /* [502] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7647,7 +7662,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetPowerState"
#endif
},
{ /* [502] */
{ /* [503] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7662,7 +7677,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinGetLinkFomValues"
#endif
},
{ /* [503] */
{ /* [504] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7677,7 +7692,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetNvlinkEccErrors"
#endif
},
{ /* [504] */
{ /* [505] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7692,7 +7707,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkReadTpCounters"
#endif
},
{ /* [505] */
{ /* [506] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7707,7 +7722,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetLpCounters"
#endif
},
{ /* [506] */
{ /* [507] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7722,7 +7737,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSetLoopbackMode"
#endif
},
{ /* [507] */
{ /* [508] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7737,7 +7752,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetRefreshCounters"
#endif
},
{ /* [508] */
{ /* [509] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7752,7 +7767,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkClearRefreshCounters"
#endif
},
{ /* [509] */
{ /* [510] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7767,7 +7782,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr"
#endif
},
{ /* [510] */
{ /* [511] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10041u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7782,7 +7797,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo"
#endif
},
{ /* [511] */
{ /* [512] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7797,7 +7812,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkEnableLinks"
#endif
},
{ /* [512] */
{ /* [513] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7812,7 +7827,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkProcessInitDisabledLinks"
#endif
},
{ /* [513] */
{ /* [514] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7827,7 +7842,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkEomControl"
#endif
},
{ /* [514] */
{ /* [515] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7842,7 +7857,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSetL1Threshold"
#endif
},
{ /* [515] */
{ /* [516] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7857,7 +7872,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetL1Threshold"
#endif
},
{ /* [516] */
{ /* [517] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10250u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7872,7 +7887,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkInbandSendData"
#endif
},
{ /* [517] */
{ /* [518] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7887,7 +7902,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkIsGpuDegraded"
#endif
},
{ /* [518] */
{ /* [519] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7902,7 +7917,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkDirectConnectCheck"
#endif
},
{ /* [519] */
{ /* [520] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7917,7 +7932,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPostFaultUp"
#endif
},
{ /* [520] */
{ /* [521] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7932,7 +7947,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetPortEvents"
#endif
},
{ /* [521] */
{ /* [522] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7947,7 +7962,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdIsNvlinkReducedConfig"
#endif
},
{ /* [522] */
{ /* [523] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7962,7 +7977,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPAOS"
#endif
},
{ /* [523] */
{ /* [524] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7977,7 +7992,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetNvlinkCountersV2"
#endif
},
{ /* [524] */
{ /* [525] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7992,7 +8007,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdClearNvlinkCountersV2"
#endif
},
{ /* [525] */
{ /* [526] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8007,7 +8022,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkClearLpCounters"
#endif
},
{ /* [526] */
{ /* [527] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8022,7 +8037,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPLTC"
#endif
},
{ /* [527] */
{ /* [528] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8037,7 +8052,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPPLM"
#endif
},
{ /* [528] */
{ /* [529] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8052,7 +8067,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPPSLC"
#endif
},
{ /* [529] */
{ /* [530] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8067,7 +8082,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMCAM"
#endif
},
{ /* [530] */
{ /* [531] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8082,7 +8097,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTECR"
#endif
},
{ /* [531] */
{ /* [532] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8097,7 +8112,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTEWE"
#endif
},
{ /* [532] */
{ /* [533] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8112,7 +8127,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTSDE"
#endif
},
{ /* [533] */
{ /* [534] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8127,7 +8142,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTCAP"
#endif
},
{ /* [534] */
{ /* [535] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8142,7 +8157,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPMTU"
#endif
},
{ /* [535] */
{ /* [536] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8157,7 +8172,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPMLP"
#endif
},
{ /* [536] */
{ /* [537] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8172,7 +8187,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessGHPKT"
#endif
},
{ /* [537] */
{ /* [538] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8187,7 +8202,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPDDR"
#endif
},
{ /* [538] */
{ /* [539] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8202,7 +8217,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPPTT"
#endif
},
{ /* [539] */
{ /* [540] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8217,7 +8232,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPPCNT"
#endif
},
{ /* [540] */
{ /* [541] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8232,7 +8247,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMGIR"
#endif
},
{ /* [541] */
{ /* [542] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8247,7 +8262,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPPAOS"
#endif
},
{ /* [542] */
{ /* [543] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8262,7 +8277,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPPHCR"
#endif
},
{ /* [543] */
{ /* [544] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8277,7 +8292,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessSLTP"
#endif
},
{ /* [544] */
{ /* [545] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8292,7 +8307,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPGUID"
#endif
},
{ /* [545] */
{ /* [546] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8307,7 +8322,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPPRT"
#endif
},
{ /* [546] */
{ /* [547] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8322,7 +8337,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPTYS"
#endif
},
{ /* [547] */
{ /* [548] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8337,7 +8352,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessSLRG"
#endif
},
{ /* [548] */
{ /* [549] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8352,7 +8367,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPMAOS"
#endif
},
{ /* [549] */
{ /* [550] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8367,7 +8382,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPPLR"
#endif
},
{ /* [550] */
{ /* [551] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8382,7 +8397,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetSupportedCounters"
#endif
},
{ /* [551] */
{ /* [552] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8397,7 +8412,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMORD"
#endif
},
{ /* [552] */
{ /* [553] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8412,7 +8427,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTRC_CAP"
#endif
},
{ /* [553] */
{ /* [554] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8427,7 +8442,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTRC_CONF"
#endif
},
{ /* [554] */
{ /* [555] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8442,7 +8457,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTRC_CTRL"
#endif
},
{ /* [555] */
{ /* [556] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8457,7 +8472,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTEIM"
#endif
},
{ /* [556] */
{ /* [557] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8472,7 +8487,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTIE"
#endif
},
{ /* [557] */
{ /* [558] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8487,7 +8502,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTIM"
#endif
},
{ /* [558] */
{ /* [559] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8502,7 +8517,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMPSCR"
#endif
},
{ /* [559] */
{ /* [560] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8517,7 +8532,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMTSR"
#endif
},
{ /* [560] */
{ /* [561] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8532,7 +8547,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPPSLS"
#endif
},
{ /* [561] */
{ /* [562] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8547,7 +8562,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessMLPC"
#endif
},
{ /* [562] */
{ /* [563] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8562,7 +8577,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPRMAccessPLIB"
#endif
},
{ /* [563] */
{ /* [564] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8577,7 +8592,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdSetNvlinkHwErrorInjectSettings"
#endif
},
{ /* [564] */
{ /* [565] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8592,7 +8607,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetNvlinkHwErrorInjectSettings"
#endif
},
{ /* [565] */
{ /* [566] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8607,7 +8622,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetPlatformInfo"
#endif
},
{ /* [566] */
{ /* [567] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8622,7 +8637,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetSupportedBWMode"
#endif
},
{ /* [567] */
{ /* [568] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8637,7 +8652,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSetBWMode"
#endif
},
{ /* [568] */
{ /* [569] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8652,7 +8667,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetBWMode"
#endif
},
{ /* [569] */
{ /* [570] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8667,7 +8682,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetLocalDeviceInfo"
#endif
},
{ /* [570] */
{ /* [571] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8682,7 +8697,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkInjectSWError"
#endif
},
{ /* [571] */
{ /* [572] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8697,7 +8712,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPostLazyErrorRecovery"
#endif
},
{ /* [572] */
{ /* [573] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8712,7 +8727,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkConfigureL1Toggle"
#endif
},
{ /* [573] */
{ /* [574] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8727,7 +8742,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlNvlinkGetL1Toggle"
#endif
},
{ /* [574] */
{ /* [575] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8742,7 +8757,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetDmemUsage"
#endif
},
{ /* [575] */
{ /* [576] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8757,7 +8772,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetEngineArch"
#endif
},
{ /* [576] */
{ /* [577] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8772,7 +8787,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerQueueInfo"
#endif
},
{ /* [577] */
{ /* [578] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8787,7 +8802,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlGet"
#endif
},
{ /* [578] */
{ /* [579] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8802,7 +8817,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlSet"
#endif
},
{ /* [579] */
{ /* [580] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8817,7 +8832,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferInfo"
#endif
},
{ /* [580] */
{ /* [581] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8832,7 +8847,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferSize"
#endif
},
{ /* [581] */
{ /* [582] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8847,7 +8862,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEccGetClientExposedCounters"
#endif
},
{ /* [582] */
{ /* [583] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8862,7 +8877,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEccGetVolatileCounts"
#endif
},
{ /* [583] */
{ /* [584] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8877,7 +8892,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaRange"
#endif
},
{ /* [584] */
{ /* [585] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10244u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8892,7 +8907,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaSetupInstanceMemBlock"
#endif
},
{ /* [585] */
{ /* [586] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8907,7 +8922,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetRange"
#endif
},
{ /* [586] */
{ /* [587] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8922,7 +8937,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetFabricMemStats"
#endif
},
{ /* [587] */
{ /* [588] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40549u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8937,7 +8952,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGspGetFeatures"
#endif
},
{ /* [588] */
{ /* [589] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8952,7 +8967,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGspGetRmHeapStats"
#endif
},
{ /* [589] */
{ /* [590] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8967,7 +8982,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGpuGetVgpuHeapStats"
#endif
},
{ /* [590] */
{ /* [591] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8982,7 +8997,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdLibosGetHeapStats"
#endif
},
{ /* [591] */
{ /* [592] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -8997,7 +9012,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGrmgrGetGrFsInfo"
#endif
},
{ /* [592] */
{ /* [593] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9012,7 +9027,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt"
#endif
},
{ /* [593] */
{ /* [594] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9027,7 +9042,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff"
#endif
},
{ /* [594] */
{ /* [595] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9042,7 +9057,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower"
#endif
},
{ /* [595] */
{ /* [596] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xbu)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9057,7 +9072,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixVidmemPersistenceStatus"
#endif
},
{ /* [596] */
{ /* [597] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9072,7 +9087,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixUpdateTgpStatus"
#endif
},
{ /* [597] */
{ /* [598] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9087,7 +9102,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalBootloadGspVgpuPluginTask"
#endif
},
{ /* [598] */
{ /* [599] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9102,7 +9117,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalShutdownGspVgpuPluginTask"
#endif
},
{ /* [599] */
{ /* [600] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9117,7 +9132,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalPgpuAddVgpuType"
#endif
},
{ /* [600] */
{ /* [601] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9132,7 +9147,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalEnumerateVgpuPerPgpu"
#endif
},
{ /* [601] */
{ /* [602] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9147,7 +9162,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalClearGuestVmInfo"
#endif
},
{ /* [602] */
{ /* [603] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9162,7 +9177,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetVgpuFbUsage"
#endif
},
{ /* [603] */
{ /* [604] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1d0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9177,7 +9192,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuEncoderCapacity"
#endif
},
{ /* [604] */
{ /* [605] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9192,7 +9207,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalCleanupGspVgpuPluginResources"
#endif
},
{ /* [605] */
{ /* [606] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9207,7 +9222,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuFsEncoding"
#endif
},
{ /* [606] */
{ /* [607] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9222,7 +9237,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuMigrationSupport"
#endif
},
{ /* [607] */
{ /* [608] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9237,7 +9252,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuMgrConfig"
#endif
},
{ /* [608] */
{ /* [609] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9252,7 +9267,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalFreeStates"
#endif
},
{ /* [609] */
{ /* [610] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9267,7 +9282,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetFrameRateLimiterStatus"
#endif
},
{ /* [610] */
{ /* [611] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9282,7 +9297,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuHeterogeneousMode"
#endif
},
{ /* [611] */
{ /* [612] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x158u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9297,7 +9312,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetAvailableHshubMask"
#endif
},
{ /* [612] */
{ /* [613] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x158u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9312,7 +9327,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlSetEcThrottleMode"
#endif
},
{ /* [613] */
{ /* [614] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -9566,7 +9581,7 @@ NV_STATUS __nvoc_up_thunk_Notifier_subdeviceGetOrAllocNotifShare(struct Subdevic
const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice =
{
/*numEntries=*/ 614,
/*numEntries=*/ 615,
/*pExportEntries=*/ __nvoc_exported_method_def_Subdevice
};
@ -12474,6 +12489,11 @@ static void __nvoc_init_funcTable_Subdevice_3(Subdevice *pThis, RmHalspecOwner *
pThis->__subdeviceCtrlCmdInternalNvlinkProgramBufferready__ = &subdeviceCtrlCmdInternalNvlinkProgramBufferready_IMPL;
#endif
// subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors -- exported (id=0x20800b01)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xd0u)
pThis->__subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors__ = &subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors_IMPL;
#endif
// subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig -- exported (id=0x20800a78)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xd0u)
pThis->__subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig__ = &subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig_IMPL;
@ -12938,10 +12958,10 @@ static void __nvoc_init_funcTable_Subdevice_3(Subdevice *pThis, RmHalspecOwner *
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u)
pThis->__subdeviceSpdmRetrieveTranscript__ = &subdeviceSpdmRetrieveTranscript_IMPL;
#endif
} // End __nvoc_init_funcTable_Subdevice_3 with approximately 136 basic block(s).
} // End __nvoc_init_funcTable_Subdevice_3 with approximately 137 basic block(s).
// Initialize vtable(s) for 644 virtual method(s).
// Initialize vtable(s) for 645 virtual method(s).
void __nvoc_init_funcTable_Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) {
// Per-class vtable definition
@ -13060,7 +13080,7 @@ void __nvoc_init_funcTable_Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspe
pThis->__nvoc_base_Notifier.__nvoc_vtable = &vtable.Notifier; // (notify) super
pThis->__nvoc_vtable = &vtable; // (subdevice) this
// Initialize vtable(s) with 614 per-object function pointer(s).
// Initialize vtable(s) with 615 per-object function pointer(s).
// To reduce stack pressure with some unoptimized builds, the logic is distributed among 3 functions.
__nvoc_init_funcTable_Subdevice_1(pThis, pRmhalspecowner);
__nvoc_init_funcTable_Subdevice_2(pThis, pRmhalspecowner);

View File

@ -148,7 +148,7 @@ struct Subdevice {
struct Notifier *__nvoc_pbase_Notifier; // notify super
struct Subdevice *__nvoc_pbase_Subdevice; // subdevice
// Vtable with 614 per-object function pointers
// Vtable with 615 per-object function pointers
NV_STATUS (*__subdeviceCtrlCmdBiosGetInfoV2__)(struct Subdevice * /*this*/, NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS *); // halified (2 hals) exported (id=0x20800810) body
NV_STATUS (*__subdeviceCtrlCmdBiosGetNbsiV2__)(struct Subdevice * /*this*/, NV2080_CTRL_BIOS_GET_NBSI_V2_PARAMS *); // exported (id=0x2080080e)
NV_STATUS (*__subdeviceCtrlCmdBiosGetSKUInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *); // halified (2 hals) exported (id=0x20800808)
@ -673,6 +673,7 @@ struct Subdevice {
NV_STATUS (*__subdeviceCtrlCmdInternalNvlinkGetAliEnabled__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_NVLINK_GET_ALI_ENABLED_PARAMS *); // exported (id=0x20800a29)
NV_STATUS (*__subdeviceCtrlCmdInternalNvlinkSaveRestoreHshubState__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS *); // exported (id=0x20800a62)
NV_STATUS (*__subdeviceCtrlCmdInternalNvlinkProgramBufferready__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_BUFFERREADY_PARAMS *); // exported (id=0x20800a64)
NV_STATUS (*__subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors__)(struct Subdevice * /*this*/); // exported (id=0x20800b01)
NV_STATUS (*__subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS *); // exported (id=0x20800a78)
NV_STATUS (*__subdeviceCtrlCmdInternalNvlinkUpdatePeerLinkMask__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS *); // exported (id=0x20800a7d)
NV_STATUS (*__subdeviceCtrlCmdInternalNvlinkUpdateLinkConnection__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS *); // exported (id=0x20800a82)
@ -1968,6 +1969,8 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdInternalNvlinkSaveRestoreHshubState(pSubdevice, pParams) subdeviceCtrlCmdInternalNvlinkSaveRestoreHshubState_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalNvlinkProgramBufferready_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalNvlinkProgramBufferready__
#define subdeviceCtrlCmdInternalNvlinkProgramBufferready(pSubdevice, pParams) subdeviceCtrlCmdInternalNvlinkProgramBufferready_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors__
#define subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors(pSubdevice) subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors_DISPATCH(pSubdevice)
#define subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig__
#define subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig(pSubdevice, pParams) subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalNvlinkUpdatePeerLinkMask_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalNvlinkUpdatePeerLinkMask__
@ -4313,6 +4316,10 @@ static inline NV_STATUS subdeviceCtrlCmdInternalNvlinkProgramBufferready_DISPATC
return pSubdevice->__subdeviceCtrlCmdInternalNvlinkProgramBufferready__(pSubdevice, pParams);
}
static inline NV_STATUS subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors_DISPATCH(struct Subdevice *pSubdevice) {
return pSubdevice->__subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors__(pSubdevice);
}
static inline NV_STATUS subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig__(pSubdevice, pParams);
}
@ -6047,6 +6054,8 @@ NV_STATUS subdeviceCtrlCmdInternalNvlinkSaveRestoreHshubState_IMPL(struct Subdev
NV_STATUS subdeviceCtrlCmdInternalNvlinkProgramBufferready_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_BUFFERREADY_PARAMS *pParams);
NV_STATUS subdeviceCtrlCmdInternalNvlinkReplaySuppressedErrors_IMPL(struct Subdevice *pSubdevice);
NV_STATUS subdeviceCtrlCmdInternalNvlinkUpdateCurrentConfig_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS *pParams);
NV_STATUS subdeviceCtrlCmdInternalNvlinkUpdatePeerLinkMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS *pParams);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2001-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2001-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -38,6 +38,9 @@ extern "C" {
#include "utils/nvprintf.h"
#include "nvlog/nvlog.h"
// TODO Bug 5078337: Move these away from kernel/core
#include "kernel/diagnostics/xid_context.h"
#define DBG_FILE_LINE_FUNCTION NV_FILE_STR, __LINE__, NV_FUNCTION_STR
/**
@ -235,6 +238,10 @@ void nvDbgDumpBufferBytes(void *pBuffer, NvU32 length);
#define DBG_VAL_PTR(p)
#endif
//
// TODO Bug 5078337: Move these away from kernel/core and rename to indicate
// that they emit XIDs
//
#define NV_ERROR_LOG(pGpu, num, fmt, ...) \
nvErrorLog_va((void*)pGpu, num, fmt, ##__VA_ARGS__); \
NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, LEVEL_ERROR, \
@ -245,10 +252,9 @@ void nvDbgDumpBufferBytes(void *pBuffer, NvU32 length);
NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, LEVEL_ERROR, \
NV_PRINTF_ADD_PREFIX(fmt), ##__VA_ARGS__)
void nvErrorLog(void *pVoid, NvU32 num, const char *pFormat, va_list arglist);
void nvErrorLog(void *pVoid, XidContext context, const char *pFormat, va_list arglist);
void nvErrorLog_va(void * pGpu, NvU32 num, const char * pFormat, ...);
void nvErrorLog2(void *pVoid, NvU32 num, NvBool oobLogging, const char *pFormat, va_list arglist);
void nvErrorLog2_va(void * pGpu, NvU32 num, NvBool oobLogging, const char * pFormat, ...);
void nvErrorLog2_va(void * pGpu, XidContext context, NvBool oobLogging, const char * pFormat, ...);
#ifdef __cplusplus
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -156,6 +156,11 @@ typedef struct THREAD_STATE_DB
//
#define TIMEOUT_WDDM_POWER_TRANSITION_INTERVAL_MS 9800
//
// Thread state timeout for DPC or ISR handling
//
#define TIMEOUT_DPC_ISR_INTERVAL_MS 500
//
// Thread State flags used for threadStateInitSetupFlags
//
@ -213,6 +218,7 @@ NV_STATUS threadStateResetTimeout(OBJGPU *pGpu);
void threadStateLogTimeout(OBJGPU *pGpu, NvU64 funcAddr, NvU32 lineNum);
void threadStateYieldCpuIfNecessary(OBJGPU *pGpu, NvBool bQuiet);
void threadStateSetTimeoutOverride(THREAD_STATE_NODE *, NvU64);
void threadStateSetTimeoutSingleOverride(THREAD_STATE_NODE *, NvU64);
NV_STATUS threadStateEnqueueCallbackOnFree(THREAD_STATE_NODE *pThreadNode,
THREAD_STATE_FREE_CALLBACK *pCallback);

View File

@ -0,0 +1,64 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef XID_CONTEXT_H
#define XID_CONTEXT_H 1
#include "nvtypes.h"
//!
//! Root cause information to print in specific cases.
//!
//! Some Xid strings must be kept "stable", so this information is
//! only printed in certain cases where we can break the stability, or where
//! that particular Xid string was not stable.
//!
//! It will always be okay to pass an all zero struct { 0 }
//!
typedef struct
{
//!
//! If nonzero, print this as a root cause of the current
//! ROBUST_CHANNEL_PREEMPTIVE_REMOVAL
//!
NvU32 preemptiveRemovalPreviousXid;
} RcRootCause;
//!
//! Xid and context information about an Xid passed to KernelRM.
//!
//! This is NOT stable. The Xid printing mechanism must take care that Xid
//! strings which must remain stable continue to do so even some information
//! changes here.
//!
typedef struct
{
//! Xid number.
NvU32 xid;
//! Additional root cause information valid only for certain Xids.
RcRootCause rootCause;
} XidContext;
#endif // XID_CONTEXT_H

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -216,6 +216,7 @@ typedef struct GspSystemInfo
NvBool bEnableDynamicGranularityPageArrays;
NvBool bClockBoostSupported;
NvBool bRouteDispIntrsToCPU;
NvU64 hostPageSize;
} GspSystemInfo;

Some files were not shown because too many files have changed in this diff Show More