525.47.22

This commit is contained in:
russellcnv 2023-04-27 14:28:07 -07:00
parent db2866126e
commit 986b3fd1e9
No known key found for this signature in database
GPG Key ID: 0F15D664965FBC5A
124 changed files with 3306 additions and 1191 deletions

View File

@ -2,6 +2,28 @@
## Release 525 Entries
### [525.89.02] 2023-02-08
### [525.85.12] 2023-01-30
### [525.85.05] 2023-01-19
#### Fixed
- Fix build problems with Clang 15.0, [#377](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/377) by @ptr1337
### [525.78.01] 2023-01-05
### [525.60.13] 2022-12-05
### [525.60.11] 2022-11-28
#### Fixed
- Fixed nvenc compatibility with usermode clients [#104](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/104)
### [525.53] 2022-11-10
#### Changed
- GSP firmware is now distributed as multiple firmware files: this release has `gsp_tu10x.bin` and `gsp_ad10x.bin` replacing `gsp.bin` from previous releases.
@ -10,7 +32,6 @@
#### Fixed
- Fix build problems with Clang 15.0, [#https://github.com/NVIDIA/open-gpu-kernel-modules/issues/377] by @ptr1337
- Add support for IBT (indirect branch tracking) on supported platforms, [#256](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/256) by @rnd-ash
- Return EINVAL when [failing to] allocating memory, [#280](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/280) by @YusufKhan-gamedev
- Fix various typos in nvidia/src/kernel, [#16](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/16) by @alexisgeoffrey

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 525.47.18.
version 525.47.22.
## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
525.47.18 driver release. This can be achieved by installing
525.47.22 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@ -167,7 +167,7 @@ for the target kernel.
## Compatible GPUs
The open-gpu-kernel-modules can be used on any Turing or later GPU
(see the table below). However, in the 525.47.18 release,
(see the table below). However, in the 525.47.22 release,
GeForce and Workstation support is still considered alpha-quality.
To enable use of the open kernel modules on GeForce and Workstation GPUs,
@ -175,7 +175,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
parameter to 1. For more details, see the NVIDIA GPU driver end user
README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/525.47.18/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/525.47.22/README/kernel_open.html
In the below table, if three IDs are listed, the first is the PCI Device
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
@ -720,9 +720,13 @@ Subsystem Device ID.
| NVIDIA A10 | 2236 10DE 1482 |
| NVIDIA A10G | 2237 10DE 152F |
| NVIDIA A10M | 2238 10DE 1677 |
| NVIDIA H800 PCIe | 2322 10DE 17A4 |
| NVIDIA H800 | 2324 10DE 17A6 |
| NVIDIA H800 | 2324 10DE 17A8 |
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C0 |
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 |
| NVIDIA H100 PCIe | 2331 10DE 1626 |
| NVIDIA H100 | 2339 10DE 17FC |
| NVIDIA GeForce RTX 3060 Ti | 2414 |
| NVIDIA GeForce RTX 3080 Ti Laptop GPU | 2420 |
| NVIDIA RTX A5500 Laptop GPU | 2438 |
@ -750,6 +754,7 @@ Subsystem Device ID.
| NVIDIA RTX A3000 12GB Laptop GPU | 24B9 |
| NVIDIA RTX A4500 Laptop GPU | 24BA |
| NVIDIA RTX A3000 12GB Laptop GPU | 24BB |
| NVIDIA GeForce RTX 3060 | 24C7 |
| NVIDIA GeForce RTX 3060 Ti | 24C9 |
| NVIDIA GeForce RTX 3080 Laptop GPU | 24DC |
| NVIDIA GeForce RTX 3070 Laptop GPU | 24DD |
@ -795,6 +800,8 @@ Subsystem Device ID.
| NVIDIA RTX A1000 Laptop GPU | 25B9 |
| NVIDIA RTX A2000 8GB Laptop GPU | 25BA |
| NVIDIA RTX A500 Laptop GPU | 25BB |
| NVIDIA RTX A1000 6GB Laptop GPU | 25BC |
| NVIDIA RTX A500 Laptop GPU | 25BD |
| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 25E0 |
| NVIDIA GeForce RTX 3050 Laptop GPU | 25E2 |
| NVIDIA GeForce RTX 3050 Laptop GPU | 25E5 |
@ -809,15 +816,28 @@ Subsystem Device ID.
| NVIDIA RTX 6000 Ada Generation | 26B1 10DE 16A1 |
| NVIDIA RTX 6000 Ada Generation | 26B1 17AA 16A1 |
| NVIDIA L40 | 26B5 10DE 169D |
| NVIDIA L40 | 26B5 10DE 17DA |
| NVIDIA GeForce RTX 4080 | 2704 |
| NVIDIA GeForce RTX 4090 Laptop GPU | 2717 |
| NVIDIA RTX 5000 Ada Generation Laptop GPU | 2730 |
| NVIDIA GeForce RTX 4090 Laptop GPU | 2757 |
| NVIDIA GeForce RTX 4070 Ti | 2782 |
| NVIDIA GeForce RTX 4070 | 2786 |
| NVIDIA GeForce RTX 4080 Laptop GPU | 27A0 |
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 1028 16FA |
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 103C 16FA |
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 10DE 16FA |
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 17AA 16FA |
| NVIDIA L4 | 27B8 10DE 16CA |
| NVIDIA L4 | 27B8 10DE 16EE |
| NVIDIA RTX 4000 Ada Generation Laptop GPU | 27BA |
| NVIDIA RTX 3500 Ada Generation Laptop GPU | 27BB |
| NVIDIA GeForce RTX 4080 Laptop GPU | 27E0 |
| NVIDIA GeForce RTX 4070 Laptop GPU | 2820 |
| NVIDIA RTX 3000 Ada Generation Laptop GPU | 2838 |
| NVIDIA GeForce RTX 4070 Laptop GPU | 2860 |
| NVIDIA GeForce RTX 4060 Laptop GPU | 28A0 |
| NVIDIA GeForce RTX 4050 Laptop GPU | 28A1 |
| NVIDIA RTX 2000 Ada Generation Laptop GPU | 28B8 |
| NVIDIA GeForce RTX 4060 Laptop GPU | 28E0 |
| NVIDIA GeForce RTX 4050 Laptop GPU | 28E1 |

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.47.18\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.47.22\"
EXTRA_CFLAGS += -Wno-unused-function

View File

@ -261,4 +261,22 @@ static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm)
#endif
}
static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_set(vma, flags);
#else
vma->vm_flags |= flags;
#endif
}
static inline void nv_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_clear(vma, flags);
#else
vma->vm_flags &= ~flags;
#endif
}
#endif // __NV_MM_H__

View File

@ -942,6 +942,23 @@ compile_test() {
compile_check_conftest "$CODE" "NV_VFIO_MIGRATION_OPS_PRESENT" "" "types"
;;
vfio_migration_ops_has_migration_get_data_size)
#
# Determine if vfio_migration_ops struct has .migration_get_data_size field.
#
# Added by commit in 4e016f969529f ("vfio: Add an option to get migration
# data size") in v6.2 kernel.
#
CODE="
#include <linux/pci.h>
#include <linux/vfio.h>
int conftest_mdev_vfio_migration_ops_has_migration_get_data_size(void) {
return offsetof(struct vfio_migration_ops, migration_get_data_size);
}"
compile_check_conftest "$CODE" "NV_VFIO_MIGRATION_OPS_HAS_MIGRATION_GET_DATA_SIZE" "" "types"
;;
mdev_parent)
#
# Determine if the struct mdev_parent type is present.
@ -5475,6 +5492,49 @@ compile_test() {
compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID" "" "types"
;;
vm_area_struct_has_const_vm_flags)
#
# Determine if the 'vm_area_struct' structure has
# const 'vm_flags'.
#
# A union of '__vm_flags' and 'const vm_flags' was added
# by commit bc292ab00f6c ("mm: introduce vma->vm_flags
# wrapper functions") in mm-stable branch (2023-02-09)
# of the akpm/mm maintainer tree.
#
CODE="
#include <linux/mm_types.h>
int conftest_vm_area_struct_has_const_vm_flags(void) {
return offsetof(struct vm_area_struct, __vm_flags);
}"
compile_check_conftest "$CODE" "NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS" "" "types"
;;
drm_driver_has_dumb_destroy)
#
# Determine if the 'drm_driver' structure has a 'dumb_destroy'
# function pointer.
#
# Removed by commit 96a7b60f6ddb2 ("drm: remove dumb_destroy
# callback") in v6.3 linux-next (2023-02-10).
#
CODE="
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
int conftest_drm_driver_has_dumb_destroy(void) {
return offsetof(struct drm_driver, dumb_destroy);
}"
compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_DUMB_DESTROY" "" "types"
;;
# When adding a new conftest entry, please use the correct format for
# specifying the relevant upstream Linux kernel commit.
#

View File

@ -921,7 +921,9 @@ static void nv_drm_update_drm_driver_features(void)
nv_drm_driver.dumb_create = nv_drm_dumb_create;
nv_drm_driver.dumb_map_offset = nv_drm_dumb_map_offset;
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
nv_drm_driver.dumb_destroy = nv_drm_dumb_destroy;
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
}

View File

@ -201,7 +201,7 @@ static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
nv_dev,
"Cannot create sg_table for NvKmsKapiMemory 0x%p",
nv_gem->pMemory);
return NULL;
return ERR_PTR(-ENOMEM);
}
sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev,
@ -583,11 +583,13 @@ int nv_drm_dumb_map_offset(struct drm_file *file,
return ret;
}
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
int nv_drm_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle)
{
return drm_gem_handle_delete(file, handle);
}
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
#endif

View File

@ -97,9 +97,11 @@ int nv_drm_dumb_map_offset(struct drm_file *file,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
int nv_drm_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
struct drm_gem_object *nv_drm_gem_nvkms_prime_import(
struct drm_device *dev,

View File

@ -92,9 +92,9 @@ static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem,
return -EINVAL;
}
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags &= ~VM_IO;
vma->vm_flags |= VM_MIXEDMAP;
nv_vm_flags_clear(vma, VM_PFNMAP);
nv_vm_flags_clear(vma, VM_IO);
nv_vm_flags_set(vma, VM_MIXEDMAP);
return 0;
}

View File

@ -299,7 +299,7 @@ int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
ret = -EINVAL;
goto done;
}
vma->vm_flags &= ~VM_MAYWRITE;
nv_vm_flags_clear(vma, VM_MAYWRITE);
}
#endif

View File

@ -124,3 +124,5 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -94,11 +94,10 @@ struct nvidia_p2p_params {
} nvidia_p2p_params_t;
/*
* Capability flag for users to detect
* Macro for users to detect
* driver support for persistent pages.
*/
extern int nvidia_p2p_cap_persistent_pages;
#define NVIDIA_P2P_CAP_PERSISTENT_PAGES
#define NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
/*
* This API is not supported.
@ -173,11 +172,6 @@ struct nvidia_p2p_page_table {
* A pointer to the function to be invoked when the pages
* underlying the virtual address range are freed
* implicitly.
* If NULL, persistent pages will be returned.
* This means the pages underlying the range of GPU virtual memory
* will persist until explicitly freed by nvidia_p2p_put_pages().
* Persistent GPU memory mappings are not supported on PowerPC,
* MIG-enabled devices and vGPU.
* @param[in] data
* A non-NULL opaque pointer to private data to be passed to the
* callback function.
@ -190,12 +184,48 @@ struct nvidia_p2p_page_table {
* insufficient resources were available to complete the operation.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address,
int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address, uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data), void *data);
/*
* @brief
* Pin and make the pages underlying a range of GPU virtual memory
* accessible to a third-party device. The pages will persist until
* explicitly freed by nvidia_p2p_put_pages_persistent().
*
* Persistent GPU memory mappings are not supported on PowerPC,
* MIG-enabled devices and vGPU.
*
* This API only supports pinned, GPU-resident memory, such as that provided
* by cudaMalloc().
*
* This API may sleep.
*
* @param[in] virtual_address
* The start address in the specified virtual address space.
* Address must be aligned to the 64KB boundary.
* @param[in] length
* The length of the requested P2P mapping.
* Length must be a multiple of 64KB.
* @param[out] page_table
* A pointer to an array of structures with P2P PTEs.
* @param[in] flags
* Must be set to zero for now.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -ENOMEM if the driver failed to allocate memory or if
* insufficient resources were available to complete the operation.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_get_pages_persistent(uint64_t virtual_address,
uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data),
void *data);
uint32_t flags);
#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003
@ -268,6 +298,8 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
* Release a set of pages previously made accessible to
* a third-party device.
*
* This API may sleep.
*
* @param[in] p2p_token
* A token that uniquely identifies the P2P mapping.
* @param[in] va_space
@ -282,10 +314,33 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address,
int nvidia_p2p_put_pages(uint64_t p2p_token,
uint32_t va_space, uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table);
/*
* @brief
* Release a set of persistent pages previously made accessible to
* a third-party device.
*
* This API may sleep.
*
* @param[in] virtual_address
* The start address in the specified virtual address space.
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
* @param[in] flags
* Must be set to zero for now.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_put_pages_persistent(uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table,
uint32_t flags);
/*
* @brief
* Free a third-party P2P page table. (This function is a no-op.)

View File

@ -284,8 +284,9 @@ out:
return 0;
}
static void nv_mem_put_pages(struct sg_table *sg_head, void *context)
static void nv_mem_put_pages_common(int nc,
struct sg_table *sg_head,
void *context)
{
int ret = 0;
struct nv_mem_context *nv_mem_context =
@ -302,8 +303,13 @@ static void nv_mem_put_pages(struct sg_table *sg_head, void *context)
if (nv_mem_context->callback_task == current)
return;
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
nv_mem_context->page_table);
if (nc) {
ret = nvidia_p2p_put_pages_persistent(nv_mem_context->page_virt_start,
nv_mem_context->page_table, 0);
} else {
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
nv_mem_context->page_table);
}
#ifdef _DEBUG_ONLY_
/* Here we expect an error in real life cases that should be ignored - not printed.
@ -318,6 +324,16 @@ static void nv_mem_put_pages(struct sg_table *sg_head, void *context)
return;
}
static void nv_mem_put_pages(struct sg_table *sg_head, void *context)
{
nv_mem_put_pages_common(0, sg_head, context);
}
static void nv_mem_put_pages_nc(struct sg_table *sg_head, void *context)
{
nv_mem_put_pages_common(1, sg_head, context);
}
static void nv_mem_release(void *context)
{
struct nv_mem_context *nv_mem_context =
@ -396,8 +412,9 @@ static int nv_mem_get_pages_nc(unsigned long addr,
nv_mem_context->core_context = core_context;
nv_mem_context->page_size = GPU_PAGE_SIZE;
ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size,
&nv_mem_context->page_table, NULL, NULL);
ret = nvidia_p2p_get_pages_persistent(nv_mem_context->page_virt_start,
nv_mem_context->mapped_size,
&nv_mem_context->page_table, 0);
if (ret < 0) {
peer_err("error %d while calling nvidia_p2p_get_pages() with NULL callback\n", ret);
return ret;
@ -407,13 +424,13 @@ static int nv_mem_get_pages_nc(unsigned long addr,
}
static struct peer_memory_client nv_mem_client_nc = {
.acquire = nv_mem_acquire,
.get_pages = nv_mem_get_pages_nc,
.dma_map = nv_dma_map,
.dma_unmap = nv_dma_unmap,
.put_pages = nv_mem_put_pages,
.get_page_size = nv_mem_get_page_size,
.release = nv_mem_release,
.acquire = nv_mem_acquire,
.get_pages = nv_mem_get_pages_nc,
.dma_map = nv_dma_map,
.dma_unmap = nv_dma_unmap,
.put_pages = nv_mem_put_pages_nc,
.get_page_size = nv_mem_get_page_size,
.release = nv_mem_release,
};
#endif /* NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT */
@ -477,9 +494,6 @@ static int __init nv_mem_client_init(void)
}
// The nc client enables support for persistent pages.
// Thanks to this check, nvidia-peermem requires the new symbol from nvidia.ko, which
// prevents users to unintentionally load this module with unsupported nvidia.ko.
BUG_ON(!nvidia_p2p_cap_persistent_pages);
strcpy(nv_mem_client_nc.name, DRV_NAME "_nc");
strcpy(nv_mem_client_nc.version, DRV_VERSION);
reg_handle_nc = ib_register_peer_memory_client(&nv_mem_client_nc, NULL);

View File

@ -101,5 +101,6 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_vma_added_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += make_device_exclusive_range
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg

View File

@ -618,7 +618,7 @@ static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
// Using VM_DONTCOPY would be nice, but madvise(MADV_DOFORK) can reset that
// so we have to handle vm_open on fork anyway. We could disable MADV_DOFORK
// with VM_IO, but that causes other mapping issues.
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
nv_vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
vma->vm_ops = &uvm_vm_ops_managed;

View File

@ -153,7 +153,6 @@ done:
static NV_STATUS test_unexpected_completed_values(uvm_va_space_t *va_space)
{
NV_STATUS status;
uvm_gpu_t *gpu;
for_each_va_space_gpu(gpu, va_space) {

View File

@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2022 NVIDIA Corporation
Copyright (c) 2015-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@ -102,7 +102,7 @@ static NV_STATUS phys_mem_allocate_sysmem(uvm_page_tree_t *tree, NvLength size,
NvU64 dma_addr;
unsigned long flags = __GFP_ZERO;
uvm_memcg_context_t memcg_context;
uvm_va_space_t *va_space;
uvm_va_space_t *va_space = NULL;
struct mm_struct *mm = NULL;
if (tree->type == UVM_PAGE_TREE_TYPE_USER && tree->gpu_va_space && UVM_CGROUP_ACCOUNTING_SUPPORTED()) {

View File

@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2022 NVIDIA Corporation
Copyright (c) 2022-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@ -53,7 +53,7 @@ typedef enum
// The VA range is determined from either the uvm_va_range_t for managed
// allocations or the uvm_va_policy_node_t for HMM allocations.
//
typedef struct uvm_va_policy_struct
struct uvm_va_policy_struct
{
// Read duplication policy for this VA range (unset, enabled, or disabled).
uvm_read_duplication_policy_t read_duplication;
@ -66,7 +66,7 @@ typedef struct uvm_va_policy_struct
// their page tables updated to access the (possibly remote) pages.
uvm_processor_mask_t accessed_by;
} uvm_va_policy_t;
};
// Policy nodes are used for storing policies in HMM va_blocks.
// The va_block lock protects the tree so that invalidation callbacks can

View File

@ -452,7 +452,7 @@ static int nvidia_mmap_numa(
}
// Needed for the linux kernel for mapping compound pages
vma->vm_flags |= VM_MIXEDMAP;
nv_vm_flags_set(vma, VM_MIXEDMAP);
for (i = 0, addr = mmap_context->page_array[0]; i < pages;
addr = mmap_context->page_array[++i], start += PAGE_SIZE)
@ -596,7 +596,7 @@ int nvidia_mmap_helper(
}
up(&nvl->mmap_lock);
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND;
nv_vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND);
}
else
{
@ -663,15 +663,15 @@ int nvidia_mmap_helper(
NV_PRINT_AT(NV_DBG_MEMINFO, at);
vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED);
vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
nv_vm_flags_set(vma, VM_IO | VM_LOCKED | VM_RESERVED);
nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
}
if ((prot & NV_PROTECT_WRITEABLE) == 0)
{
vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot);
vma->vm_flags &= ~VM_WRITE;
vma->vm_flags &= ~VM_MAYWRITE;
nv_vm_flags_clear(vma, VM_WRITE);
nv_vm_flags_clear(vma, VM_MAYWRITE);
}
vma->vm_ops = &nv_vm_ops;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -31,6 +31,11 @@
#include "nv-p2p.h"
#include "rmp2pdefines.h"
typedef enum nv_p2p_page_table_type {
NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT = 0,
NV_P2P_PAGE_TABLE_TYPE_PERSISTENT,
} nv_p2p_page_table_type_t;
typedef struct nv_p2p_dma_mapping {
struct list_head list_node;
struct nvidia_p2p_dma_mapping *dma_mapping;
@ -44,13 +49,9 @@ typedef struct nv_p2p_mem_info {
struct list_head list_head;
struct semaphore lock;
} dma_mapping_list;
NvBool bPersistent;
void *private;
} nv_p2p_mem_info_t;
int nvidia_p2p_cap_persistent_pages = 1;
EXPORT_SYMBOL(nvidia_p2p_cap_persistent_pages);
// declared and created in nv.c
extern void *nvidia_p2p_page_t_cache;
@ -238,6 +239,7 @@ static void nv_p2p_free_page_table(
}
static NV_STATUS nv_p2p_put_pages(
nv_p2p_page_table_type_t pt_type,
nvidia_stack_t * sp,
uint64_t p2p_token,
uint32_t va_space,
@ -246,9 +248,6 @@ static NV_STATUS nv_p2p_put_pages(
)
{
NV_STATUS status;
struct nv_p2p_mem_info *mem_info = NULL;
mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table);
/*
* rm_p2p_put_pages returns NV_OK if the page_table was found and
@ -258,8 +257,15 @@ static NV_STATUS nv_p2p_put_pages(
* rm_p2p_put_pages returns NV_ERR_OBJECT_NOT_FOUND if the page_table
* was already unlinked.
*/
if (mem_info->bPersistent)
if (pt_type == NV_P2P_PAGE_TABLE_TYPE_PERSISTENT)
{
struct nv_p2p_mem_info *mem_info = NULL;
/*
* It is safe to access persistent page_table as there is no async
* callback which can free it unlike non-persistent page_table.
*/
mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table);
status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table);
}
else
@ -273,7 +279,8 @@ static NV_STATUS nv_p2p_put_pages(
nv_p2p_free_page_table(*page_table);
*page_table = NULL;
}
else if (!mem_info->bPersistent && (status == NV_ERR_OBJECT_NOT_FOUND))
else if ((pt_type == NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT) &&
(status == NV_ERR_OBJECT_NOT_FOUND))
{
status = NV_OK;
*page_table = NULL;
@ -327,7 +334,8 @@ static void nv_p2p_mem_info_free_callback(void *data)
nv_p2p_free_platform_data(&mem_info->page_table);
}
int nvidia_p2p_get_pages(
static int nv_p2p_get_pages(
nv_p2p_page_table_type_t pt_type,
uint64_t p2p_token,
uint32_t va_space,
uint64_t virtual_address,
@ -376,9 +384,10 @@ int nvidia_p2p_get_pages(
*page_table = &(mem_info->page_table);
mem_info->bPersistent = (free_callback == NULL);
//asign length to temporary variable since do_div macro does in-place division
/*
* assign length to temporary variable since do_div macro does in-place
* division
*/
temp_length = length;
do_div(temp_length, page_size);
page_count = temp_length;
@ -405,7 +414,7 @@ int nvidia_p2p_get_pages(
goto failed;
}
if (mem_info->bPersistent)
if (pt_type == NV_P2P_PAGE_TABLE_TYPE_PERSISTENT)
{
void *gpu_info = NULL;
@ -415,12 +424,15 @@ int nvidia_p2p_get_pages(
goto failed;
}
status = rm_p2p_get_gpu_info(sp, virtual_address, length, &gpu_uuid, &gpu_info);
status = rm_p2p_get_gpu_info(sp, virtual_address, length,
&gpu_uuid, &gpu_info);
if (status != NV_OK)
{
goto failed;
}
(*page_table)->gpu_uuid = gpu_uuid;
rc = nvidia_dev_get_uuid(gpu_uuid, sp);
if (rc != 0)
{
@ -432,8 +444,10 @@ int nvidia_p2p_get_pages(
bGetUuid = NV_TRUE;
status = rm_p2p_get_pages_persistent(sp, virtual_address, length, &mem_info->private,
physical_addresses, &entries, *page_table, gpu_info);
status = rm_p2p_get_pages_persistent(sp, virtual_address, length,
&mem_info->private,
physical_addresses, &entries,
*page_table, gpu_info);
if (status != NV_OK)
{
goto failed;
@ -449,10 +463,11 @@ int nvidia_p2p_get_pages(
{
goto failed;
}
(*page_table)->gpu_uuid = gpu_uuid;
}
bGetPages = NV_TRUE;
(*page_table)->gpu_uuid = gpu_uuid;
status = os_alloc_mem((void *)&(*page_table)->pages,
(entries * sizeof(page)));
@ -516,10 +531,12 @@ failed:
{
os_free_mem(physical_addresses);
}
if (wreqmb_h != NULL)
{
os_free_mem(wreqmb_h);
}
if (rreqmb_h != NULL)
{
os_free_mem(rreqmb_h);
@ -527,7 +544,7 @@ failed:
if (bGetPages)
{
(void)nv_p2p_put_pages(sp, p2p_token, va_space,
(void)nv_p2p_put_pages(pt_type, sp, p2p_token, va_space,
virtual_address, page_table);
}
@ -546,8 +563,45 @@ failed:
return nvidia_p2p_map_status(status);
}
int nvidia_p2p_get_pages(
uint64_t p2p_token,
uint32_t va_space,
uint64_t virtual_address,
uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void * data),
void *data
)
{
if (free_callback == NULL)
{
return -EINVAL;
}
return nv_p2p_get_pages(NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT,
p2p_token, va_space, virtual_address,
length, page_table, free_callback, data);
}
EXPORT_SYMBOL(nvidia_p2p_get_pages);
int nvidia_p2p_get_pages_persistent(
uint64_t virtual_address,
uint64_t length,
struct nvidia_p2p_page_table **page_table,
uint32_t flags
)
{
if (flags != 0)
{
return -EINVAL;
}
return nv_p2p_get_pages(NV_P2P_PAGE_TABLE_TYPE_PERSISTENT, 0, 0,
virtual_address, length, page_table,
NULL, NULL);
}
EXPORT_SYMBOL(nvidia_p2p_get_pages_persistent);
/*
* This function is a no-op, but is left in place (for now), in order to allow
* third-party callers to build and run without errors or warnings. This is OK,
@ -568,15 +622,14 @@ int nvidia_p2p_put_pages(
struct nvidia_p2p_page_table *page_table
)
{
struct nv_p2p_mem_info *mem_info = NULL;
NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0};
NV_STATUS status;
nvidia_stack_t *sp = NULL;
int rc = 0;
os_mem_copy(uuid, page_table->gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN);
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
if (page_table == NULL)
{
return 0;
}
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
@ -584,21 +637,56 @@ int nvidia_p2p_put_pages(
return -ENOMEM;
}
status = nv_p2p_put_pages(sp, p2p_token, va_space,
status = nv_p2p_put_pages(NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT,
sp, p2p_token, va_space,
virtual_address, &page_table);
if (mem_info->bPersistent)
{
nvidia_dev_put_uuid(uuid, sp);
}
nv_kmem_cache_free_stack(sp);
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_put_pages);
int nvidia_p2p_put_pages_persistent(
uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table,
uint32_t flags
)
{
NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0};
NV_STATUS status;
nvidia_stack_t *sp = NULL;
int rc = 0;
if (flags != 0)
{
return -EINVAL;
}
if (page_table == NULL)
{
return 0;
}
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return -ENOMEM;
}
os_mem_copy(uuid, page_table->gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN);
status = nv_p2p_put_pages(NV_P2P_PAGE_TABLE_TYPE_PERSISTENT,
sp, 0, 0, virtual_address, &page_table);
nvidia_dev_put_uuid(uuid, sp);
nv_kmem_cache_free_stack(sp);
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_put_pages_persistent);
int nvidia_p2p_dma_map_pages(
struct pci_dev *peer,
struct nvidia_p2p_page_table *page_table,

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -94,11 +94,10 @@ struct nvidia_p2p_params {
} nvidia_p2p_params_t;
/*
* Capability flag for users to detect
* Macro for users to detect
* driver support for persistent pages.
*/
extern int nvidia_p2p_cap_persistent_pages;
#define NVIDIA_P2P_CAP_PERSISTENT_PAGES
#define NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
/*
* This API is not supported.
@ -173,11 +172,6 @@ struct nvidia_p2p_page_table {
* A pointer to the function to be invoked when the pages
* underlying the virtual address range are freed
* implicitly.
* If NULL, persistent pages will be returned.
* This means the pages underlying the range of GPU virtual memory
* will persist until explicitly freed by nvidia_p2p_put_pages().
* Persistent GPU memory mappings are not supported on PowerPC,
* MIG-enabled devices and vGPU.
* @param[in] data
* A non-NULL opaque pointer to private data to be passed to the
* callback function.
@ -190,12 +184,48 @@ struct nvidia_p2p_page_table {
* insufficient resources were available to complete the operation.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address,
int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address, uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data), void *data);
/*
* @brief
* Pin and make the pages underlying a range of GPU virtual memory
* accessible to a third-party device. The pages will persist until
* explicitly freed by nvidia_p2p_put_pages_persistent().
*
* Persistent GPU memory mappings are not supported on PowerPC,
* MIG-enabled devices and vGPU.
*
* This API only supports pinned, GPU-resident memory, such as that provided
* by cudaMalloc().
*
* This API may sleep.
*
* @param[in] virtual_address
* The start address in the specified virtual address space.
* Address must be aligned to the 64KB boundary.
* @param[in] length
* The length of the requested P2P mapping.
* Length must be a multiple of 64KB.
* @param[out] page_table
* A pointer to an array of structures with P2P PTEs.
* @param[in] flags
* Must be set to zero for now.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -ENOMEM if the driver failed to allocate memory or if
* insufficient resources were available to complete the operation.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_get_pages_persistent(uint64_t virtual_address,
uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data),
void *data);
uint32_t flags);
#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003
@ -268,6 +298,8 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
* Release a set of pages previously made accessible to
* a third-party device.
*
* This API may sleep.
*
* @param[in] p2p_token
* A token that uniquely identifies the P2P mapping.
* @param[in] va_space
@ -282,10 +314,33 @@ int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address,
int nvidia_p2p_put_pages(uint64_t p2p_token,
uint32_t va_space, uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table);
/*
* @brief
* Release a set of persistent pages previously made accessible to
* a third-party device.
*
* This API may sleep.
*
* @param[in] virtual_address
* The start address in the specified virtual address space.
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
* @param[in] flags
* Must be set to zero for now.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_put_pages_persistent(uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table,
uint32_t flags);
/*
* @brief
* Free a third-party P2P page table. (This function is a no-op.)

View File

@ -165,7 +165,7 @@ NvBool nv_ats_supported = NVCPU_IS_PPC64LE
/* nvos_ functions.. do not take a state device parameter */
static int nvos_count_devices(void);
static nv_alloc_t *nvos_create_alloc(struct device *, int);
static nv_alloc_t *nvos_create_alloc(struct device *, NvU64);
static int nvos_free_alloc(nv_alloc_t *);
/***
@ -280,11 +280,12 @@ void nv_sev_init(
static
nv_alloc_t *nvos_create_alloc(
struct device *dev,
int num_pages
NvU64 num_pages
)
{
nv_alloc_t *at;
unsigned int pt_size, i;
nv_alloc_t *at;
NvU64 pt_size;
unsigned int i;
NV_KZALLOC(at, sizeof(nv_alloc_t));
if (at == NULL)
@ -295,6 +296,24 @@ nv_alloc_t *nvos_create_alloc(
at->dev = dev;
pt_size = num_pages * sizeof(nvidia_pte_t *);
//
// Check for multiplication overflow and check whether num_pages value can fit in at->num_pages.
//
if ((num_pages != 0) && ((pt_size / num_pages) != sizeof(nvidia_pte_t*)))
{
nv_printf(NV_DBG_ERRORS, "NVRM: Invalid page table allocation - Number of pages exceeds max value.\n");
NV_KFREE(at, sizeof(nv_alloc_t));
return NULL;
}
at->num_pages = num_pages;
if (at->num_pages != num_pages)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Invalid page table allocation - requested size overflows.\n");
NV_KFREE(at, sizeof(nv_alloc_t));
return NULL;
}
if (os_alloc_mem((void **)&at->page_table, pt_size) != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n");
@ -303,7 +322,6 @@ nv_alloc_t *nvos_create_alloc(
}
memset(at->page_table, 0, pt_size);
at->num_pages = num_pages;
NV_ATOMIC_SET(at->usage_count, 0);
for (i = 0; i < at->num_pages; i++)

View File

@ -230,6 +230,7 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += num_registered_fb
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_driver_has_driver_managed_dma
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build

View File

@ -213,6 +213,10 @@ namespace DisplayPort
virtual NvBool isDSCSupported() = 0;
virtual NvBool isDSCDecompressionSupported() = 0;
virtual NvBool isDSCPassThroughSupported() = 0;
virtual DscCaps getDscCaps() = 0;
//

View File

@ -335,6 +335,14 @@ namespace DisplayPort
//
bool bPowerDownPhyBeforeD3;
//
// Reset the MSTM_CTRL registers on Synaptics branch device irrespective of
// IRQ VECTOR register having stale message. Synaptics device needs to reset
// the topology before issue of new LAM message if previous LAM was not finished
// bug 3928070
//
bool bForceClearPendingMsg;
void sharedInit();
ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink);

View File

@ -447,6 +447,7 @@ namespace DisplayPort
bool getFECSupport();
NvBool isDSCPassThroughSupported();
NvBool isDSCSupported();
NvBool isDSCDecompressionSupported();
NvBool isDSCPossible();
bool isFECSupported();
bool readAndParseDSCCaps();

View File

@ -5539,7 +5539,8 @@ void ConnectorImpl::notifyLongPulse(bool statusConnected)
if (existingDev && existingDev->isFakedMuxDevice() && !bIsMuxOnDgpu)
{
DP_LOG((" NotifyLongPulse ignored as mux is not pointing to dGPU and there is a faked device"));
DP_LOG((" NotifyLongPulse ignored as mux is not pointing to dGPU and there is a faked device. Marking detect complete"));
sink->notifyDetectComplete();
return;
}
@ -5755,7 +5756,7 @@ void ConnectorImpl::notifyLongPulseInternal(bool statusConnected)
discoveryManager = new DiscoveryManager(messageManager, this, timer, hal);
// Check and clear if any pending message here
if (hal->clearPendingMsg())
if (hal->clearPendingMsg() || bForceClearPendingMsg)
{
DP_LOG(("DP> Stale MSG found: set branch to D3 and back to D0..."));
if (hal->isAtLeastVersion(1, 4))
@ -6513,6 +6514,7 @@ void ConnectorImpl::createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize)
// Initialize DSC state
newDev->dscCaps.bDSCSupported = true;
newDev->dscCaps.bDSCDecompressionSupported = true;
newDev->parseDscCaps(buffer, bufferSize);
dpMemCopy(newDev->rawDscCaps, buffer, DP_MIN(bufferSize, 16));
newDev->bDSCPossible = true;
@ -6797,6 +6799,7 @@ bool ConnectorImpl::updatePsrLinkState(bool bTrainLink)
{
// Bug 3438892 If the panel is turned off the reciever on its side,
// force panel link on by writting 600 = 1
this->hal->setDirtyLinkStatus(true);
if (this->isLinkLost())
{
hal->setPowerState(PowerStateD0);
@ -6961,5 +6964,6 @@ void ConnectorImpl::configInit()
bNoFallbackInPostLQA = 0;
LT2FecLatencyMs = 0;
bDscCapBasedOnParent = false;
bForceClearPendingMsg = false;
}

View File

@ -1508,7 +1508,11 @@ NvBool DeviceImpl::getDSCSupport()
{
if (FLD_TEST_DRF(_DPCD14, _DSC_SUPPORT, _DSC_SUPPORT, _YES, byte))
{
dscCaps.bDSCSupported = true;
dscCaps.bDSCDecompressionSupported = true;
}
if (FLD_TEST_DRF(_DPCD20, _DSC_SUPPORT, _PASS_THROUGH_SUPPORT, _YES, byte))
{
dscCaps.bDSCPassThroughSupported = true;
}
}
@ -1517,6 +1521,11 @@ NvBool DeviceImpl::getDSCSupport()
DP_LOG(("DP-DEV> DSC Support AUX READ failed for %s!", address.toString(sb)));
}
if (dscCaps.bDSCDecompressionSupported || dscCaps.bDSCPassThroughSupported)
{
dscCaps.bDSCSupported = true;
}
return dscCaps.bDSCSupported;
}
@ -1636,6 +1645,11 @@ NvBool DeviceImpl::isDSCSupported()
return dscCaps.bDSCSupported;
}
NvBool DeviceImpl::isDSCDecompressionSupported()
{
return dscCaps.bDSCDecompressionSupported;
}
NvBool DeviceImpl::isDSCPassThroughSupported()
{
return dscCaps.bDSCPassThroughSupported;
@ -1974,7 +1988,7 @@ void DeviceImpl::setDscDecompressionDevice(bool bDscCapBasedOnParent)
this->devDoingDscDecompression = this;
this->bDSCPossible = true;
}
else if (this->parent->isDSCSupported())
else if (this->parent->isDSCDecompressionSupported())
{
//
// This condition takes care of DSC capable sink devices
@ -1987,12 +2001,15 @@ void DeviceImpl::setDscDecompressionDevice(bool bDscCapBasedOnParent)
}
else
{
// This condition takes care of branch device capable of DSC.
this->devDoingDscDecompression = this;
this->bDSCPossible = true;
if (this->isDSCDecompressionSupported())
{
// This condition takes care of branch device capable of DSC decoding.
this->devDoingDscDecompression = this;
this->bDSCPossible = true;
}
}
}
else if (this->parent && this->parent->isDSCSupported())
}
else if (this->parent && this->parent->isDSCDecompressionSupported())
{
//
// This condition takes care of sink devices not capable of DSC
@ -2005,7 +2022,7 @@ void DeviceImpl::setDscDecompressionDevice(bool bDscCapBasedOnParent)
}
else
{
if (this->isDSCSupported())
if (this->isDSCDecompressionSupported())
{
this->bDSCPossible = true;
this->devDoingDscDecompression = this;

View File

@ -84,6 +84,17 @@ void ConnectorImpl::applyOuiWARs()
//
LT2FecLatencyMs = 57;
//
// This is to reset the MSTM control bit on the branch device. On this
// device, if continuous LAM message are sent very close then IRQ vector
// will fail to see stale/pending message and will not reset the MSTM_CTRL
// register. Currently making this specific to linux so as to have minimum
// effect on windows. Later proper fix for this will be generic.
//
#if defined(NV_UNIX)
bForceClearPendingMsg = true;
#endif
if (bDscMstCapBug3143315)
{
//

View File

@ -240,6 +240,7 @@ typedef enum
typedef struct DscCaps
{
NvBool bDSCSupported;
NvBool bDSCDecompressionSupported;
NvBool bDSCPassThroughSupported;
unsigned versionMajor, versionMinor;
unsigned rcBufferBlockSize;

View File

@ -43,18 +43,18 @@
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/VK526_25-165"
#define NV_BUILD_CHANGELIST_NUM (32673984)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/VK526_25-169"
#define NV_BUILD_CHANGELIST_NUM (32777847)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r525/VK526_25-165"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32673984)
#define NV_BUILD_NAME "rel/gpu_drv/r525/VK526_25-169"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32777847)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "VK526_25-24"
#define NV_BUILD_CHANGELIST_NUM (32673984)
#define NV_BUILD_BRANCH_VERSION "VK526_25-28"
#define NV_BUILD_CHANGELIST_NUM (32777696)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "531.54"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32673984)
#define NV_BUILD_NAME "531.83"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32777696)
#define NV_BUILD_BRANCH_BASE_VERSION R525
#endif
// End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "525.47.18"
#define NV_VERSION_STRING "525.47.22"
#else

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -24,6 +24,11 @@
#ifndef __gh100_dev_fsp_addendum_h__
#define __gh100_dev_fsp_addendum_h__
#define NV_GFW_FSP_UCODE_VERSION NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3(1)
#define NV_GFW_FSP_UCODE_VERSION_FULL 11:0
#define NV_GFW_FSP_UCODE_VERSION_MAJOR 11:8
#define NV_GFW_FSP_UCODE_VERSION_MINOR 7:0
//
// RM uses channel 0 for FSP EMEM on GH100.
//

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -64,5 +64,9 @@
#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2__DEVICE_MAP 0x00000016 /* */
#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2_VAL 31:0 /* RWIVF */
#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2_VAL_INIT 0x00000000 /* RWI-V */
#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3(i) (0x008f0330+(i)*4) /* RW-4A */
#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3__SIZE_1 4 /* */
#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3_VAL 31:0 /* RWIVF */
#define NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_3_VAL_INIT 0x00000000 /* RWI-V */
#endif // __gh100_dev_fsp_pri_h__

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -24,6 +24,14 @@
#ifndef __gh100_dev_gc6_island_h__
#define __gh100_dev_gc6_island_h__
#define NV_PGC6_SCI_SEC_TIMER_TIME_0 0x00118f54 /* RW-4R */
#define NV_PGC6_SCI_SEC_TIMER_TIME_0_NSEC 31:5 /* RWEUF */
#define NV_PGC6_SCI_SEC_TIMER_TIME_0_NSEC_ZERO 0x00000000 /* RWE-V */
#define NV_PGC6_SCI_SEC_TIMER_TIME_1 0x00118f58 /* RW-4R */
#define NV_PGC6_SCI_SEC_TIMER_TIME_1_NSEC 28:0 /* RWEUF */
#define NV_PGC6_SCI_SEC_TIMER_TIME_1_NSEC_ZERO 0x00000000 /* RWE-V */
#define NV_PGC6_SCI_SYS_TIMER_OFFSET_0 0x00118df4 /* RW-4R */
#define NV_PGC6_SCI_SYS_TIMER_OFFSET_0_UPDATE 0:0 /* RWEVF */
#define NV_PGC6_SCI_SYS_TIMER_OFFSET_0_UPDATE_DONE 0x00000000 /* R-E-V */

View File

@ -315,7 +315,8 @@ NvHdmi_QueryFRLConfig(NvHdmiPkt_Handle libHandle,
}
// if there is no FRL capability reported fail this call
if (pSinkCaps->linkMaxFRLRate == HDMI_FRL_DATA_RATE_NONE)
if ((pSrcCaps->linkMaxFRLRate == HDMI_FRL_DATA_RATE_NONE) ||
(pSinkCaps->linkMaxFRLRate == HDMI_FRL_DATA_RATE_NONE))
{
return NVHDMIPKT_FAIL;
}

View File

@ -2098,8 +2098,8 @@ NvU32 NvTiming_EDIDValidationMask(NvU8 *pEdid, NvU32 length, NvBool bIsStrongVal
// validate DTD blocks
pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pExt[((EIA861EXTENSION *)pExt)->offset];
while (pDTD->wDTPixelClock != 0 &&
(NvU8 *)pDTD - pExt < (int)sizeof(EIA861EXTENSION))
while ((pDTD->wDTPixelClock != 0) &&
(((NvU8 *)pDTD - pExt + sizeof(DETAILEDTIMINGDESCRIPTOR)) < ((NvU8)sizeof(EIA861EXTENSION) - 1)))
{
if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, NULL) != NVT_STATUS_SUCCESS)
{
@ -2342,8 +2342,8 @@ NvU32 NvTiming_EDIDStrongValidationMask(NvU8 *pEdid, NvU32 length)
// validate DTD blocks
pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pExt[((EIA861EXTENSION *)pExt)->offset];
while (pDTD->wDTPixelClock != 0 &&
(NvU8 *)pDTD - pExt < (int)sizeof(EIA861EXTENSION))
while ((pDTD->wDTPixelClock != 0) &&
(((NvU8 *)pDTD - pExt + sizeof(DETAILEDTIMINGDESCRIPTOR)) < ((NvU8)sizeof(EIA861EXTENSION) -1)))
{
if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, NULL) != NVT_STATUS_SUCCESS)
ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD);

View File

@ -397,7 +397,7 @@ void parse861ExtDetailedTiming(NvU8 *pEdidExt,
// Get all detailed timings in CEA ext block
pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pEdidExt[pEIA861->offset];
while((NvU8 *)pDTD < (pEdidExt + sizeof(EDIDV1STRUC)) && // Check that we're not going beyond this extension block.
while((NvU8 *)pDTD + sizeof(DETAILEDTIMINGDESCRIPTOR) < (pEdidExt + sizeof(EDIDV1STRUC) - 1) &&
pDTD->wDTPixelClock != 0)
{
NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming));
@ -1237,6 +1237,12 @@ NVT_STATUS get861ExtInfo(NvU8 *p, NvU32 size, NVT_EDID_CEA861_INFO *p861info)
return NVT_STATUS_ERR;
}
// DTD offset sanity check
if (p[2] >= 1 && p[2] <= 3)
{
return NVT_STATUS_ERR;
}
// don't do anything further if p861info is NULL
if (p861info == NULL)
{
@ -1299,6 +1305,11 @@ NVT_STATUS parseCta861DataBlockInfo(NvU8 *p,
tag = NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(p[i]);
payload = NVT_CEA861_GET_SHORT_DESCRIPTOR_SIZE(p[i]);
/*don't allow data colleciton totally size larger than [127 - 5 (tag, revision, offset, describing native video format, checksum)]*/
if ((i + payload > size) || (i + payload > 122))
{
return NVT_STATUS_ERR;
}
// move the pointer to the payload section or extended Tag Code
i++;

View File

@ -74,14 +74,23 @@ enum
/*!
* Read VRs
* Needed to be in sync with chips_a defines
*/
RM_SOE_CORE_CMD_GET_VOLTAGE_VALUES,
/*!
* Init PLM2 protected registers
*/
RM_SOE_CORE_CMD_INIT_L2_STATE
RM_SOE_CORE_CMD_INIT_L2_STATE,
/*!
* Read Power
*/
RM_SOE_CORE_CMD_GET_POWER_VALUES,
/*!
* Set NPORT interrupts
*/
RM_SOE_CORE_CMD_SET_NPORT_INTRS,
};
// Timeout for SOE reset callback function
@ -153,6 +162,18 @@ typedef struct
NvU8 cmdType;
} RM_SOE_CORE_CMD_L2_STATE;
typedef struct
{
NvU8 cmdType;
} RM_SOE_CORE_CMD_GET_POWER;
typedef struct
{
NvU8 cmdType;
NvU32 nport;
NvBool bEnable;
} RM_SOE_CORE_CMD_NPORT_INTRS;
typedef union
{
NvU8 cmdType;
@ -164,9 +185,10 @@ typedef union
RM_SOE_CORE_CMD_NPORT_TPROD_STATE nportTprodState;
RM_SOE_CORE_CMD_GET_VOLTAGE getVoltage;
RM_SOE_CORE_CMD_L2_STATE l2State;
RM_SOE_CORE_CMD_GET_POWER getPower;
RM_SOE_CORE_CMD_NPORT_INTRS nportIntrs;
} RM_SOE_CORE_CMD;
typedef struct
{
NvU8 msgType;
@ -176,9 +198,19 @@ typedef struct
NvU32 hvdd_mv;
} RM_SOE_CORE_MSG_GET_VOLTAGE;
typedef struct
{
NvU8 msgType;
NvU8 flcnStatus;
NvU32 vdd_w;
NvU32 dvdd_w;
NvU32 hvdd_w;
} RM_SOE_CORE_MSG_GET_POWER;
typedef union
{
NvU8 msgType;
RM_SOE_CORE_MSG_GET_VOLTAGE getVoltage;
RM_SOE_CORE_MSG_GET_POWER getPower;
} RM_SOE_CORE_MSG;
#endif // _SOECORE_H_

View File

@ -751,6 +751,19 @@ typedef struct
NvU32 hvdd_mv;
} NVSWITCH_CTRL_GET_VOLTAGE_PARAMS;
/*
* CTRL_NVSWITCH_GET_POWER
*
* Zero(0) indicates that a measurement is not available
* on the current platform.
*/
typedef struct
{
NvU32 vdd_w;
NvU32 dvdd_w;
NvU32 hvdd_w;
} NVSWITCH_GET_POWER_PARAMS;
/*
* CTRL_NVSWITCH_GET_ERRORS
*
@ -3534,6 +3547,15 @@ typedef struct
#define NVSWITCH_CTRL_I2C_MESSAGE_LENGTH_MAX 256
typedef enum
{
NVSWITCH_I2C_ACQUIRER_NONE = 0,
NVSWITCH_I2C_ACQUIRER_UNKNOWN,
NVSWITCH_I2C_ACQUIRER_IOCTL, // e.g. MODS
NVSWITCH_I2C_ACQUIRER_EXTERNAL, // e.g. Linux Direct
} NVSWITCH_I2C_ACQUIRER;
/*
* CTRL_NVSWITCH_I2C_INDEXED
*
@ -3816,8 +3838,9 @@ typedef struct
#define CTRL_NVSWITCH_CLEAR_COUNTERS 0x51
#define CTRL_NVSWITCH_SET_NVLINK_ERROR_THRESHOLD 0x52
#define CTRL_NVSWITCH_GET_NVLINK_ERROR_THRESHOLD 0x53
#define CTRL_NVSWITCH_GET_VOLTAGE 0x55
#define CTRL_NVSWITCH_GET_BOARD_PART_NUMBER 0x54
#define CTRL_NVSWITCH_GET_VOLTAGE 0x54
#define CTRL_NVSWITCH_GET_BOARD_PART_NUMBER 0x55
#define CTRL_NVSWITCH_GET_POWER 0x56
#ifdef __cplusplus
}

View File

@ -224,7 +224,8 @@
_op(NvlStatus, nvswitch_ctrl_set_nvlink_error_threshold, (nvswitch_device *device, NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_nvlink_error_threshold, (nvswitch_device *device, NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams), _arch) \
_op(NvlStatus, nvswitch_ctrl_therm_read_voltage, (nvswitch_device *device, NVSWITCH_CTRL_GET_VOLTAGE_PARAMS *info), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_board_part_number, (nvswitch_device *device, NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p), _arch)
_op(NvlStatus, nvswitch_ctrl_therm_read_power, (nvswitch_device *device, NVSWITCH_GET_POWER_PARAMS *info), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_board_part_number, (nvswitch_device *device, NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p), _arch) \
#define NVSWITCH_HAL_FUNCTION_LIST_LS10(_op, _arch) \
_op(NvlStatus, nvswitch_launch_ALI, (nvswitch_device *device), _arch) \

View File

@ -62,4 +62,11 @@ nvswitch_ctrl_therm_read_voltage_lr10
NVSWITCH_CTRL_GET_VOLTAGE_PARAMS *info
);
NvlStatus
nvswitch_ctrl_therm_read_power_lr10
(
nvswitch_device *device,
NVSWITCH_GET_POWER_PARAMS *info
);
#endif //_THERM_LR10_H_

View File

@ -497,8 +497,8 @@ typedef struct
NV_NPORT_PORTSTAT_LS10(_block, _reg, _idx, ), _data); \
}
#define NVSWITCH_DEFERRED_LINK_STATE_CHECK_INTERVAL_NS (10 * NVSWITCH_INTERVAL_1SEC_IN_NS)
#define NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS (10 * NVSWITCH_INTERVAL_1MSEC_IN_NS)
#define NVSWITCH_DEFERRED_LINK_STATE_CHECK_INTERVAL_NS (12 * NVSWITCH_INTERVAL_1SEC_IN_NS)
#define NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS (12 * NVSWITCH_INTERVAL_1MSEC_IN_NS)
// Struct used for passing around error masks in error handling functions
typedef struct
@ -792,7 +792,6 @@ typedef const struct
#define nvswitch_ctrl_get_info_ls10 nvswitch_ctrl_get_info_lr10
#define nvswitch_ctrl_set_switch_port_config_ls10 nvswitch_ctrl_set_switch_port_config_lr10
#define nvswitch_ctrl_get_fom_values_ls10 nvswitch_ctrl_get_fom_values_lr10
#define nvswitch_ctrl_get_throughput_counters_ls10 nvswitch_ctrl_get_throughput_counters_lr10
#define nvswitch_save_nvlink_seed_data_from_minion_to_inforom_ls10 nvswitch_save_nvlink_seed_data_from_minion_to_inforom_lr10
@ -868,7 +867,6 @@ NvlStatus nvswitch_ctrl_get_nvlink_status_ls10(nvswitch_device *device, NVSWITCH
NvlStatus nvswitch_ctrl_get_info_lr10(nvswitch_device *device, NVSWITCH_GET_INFO *p);
NvlStatus nvswitch_ctrl_set_switch_port_config_lr10(nvswitch_device *device, NVSWITCH_SET_SWITCH_PORT_CONFIG *p);
NvlStatus nvswitch_ctrl_get_fom_values_lr10(nvswitch_device *device, NVSWITCH_GET_FOM_VALUES_PARAMS *p);
NvlStatus nvswitch_ctrl_get_throughput_counters_lr10(nvswitch_device *device, NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p);
void nvswitch_save_nvlink_seed_data_from_minion_to_inforom_lr10(nvswitch_device *device, NvU32 linkId);
void nvswitch_store_seed_data_from_inforom_to_corelib_lr10(nvswitch_device *device);
@ -978,6 +976,7 @@ void nvswitch_link_disable_interrupts_ls10(nvswitch_device *device, NvU32 l
void nvswitch_execute_unilateral_link_shutdown_ls10(nvlink_link *link);
void nvswitch_init_dlpl_interrupts_ls10(nvlink_link *link);
void nvswitch_set_dlpl_interrupts_ls10(nvlink_link *link);
NvlStatus nvswitch_reset_and_drain_links_ls10(nvswitch_device *device, NvU64 link_mask);
void nvswitch_service_minion_all_links_ls10(nvswitch_device *device);
@ -997,9 +996,6 @@ NvlStatus nvswitch_launch_ALI_ls10(nvswitch_device *device);
NvlStatus nvswitch_ctrl_set_mc_rid_table_ls10(nvswitch_device *device, NVSWITCH_SET_MC_RID_TABLE_PARAMS *p);
NvlStatus nvswitch_ctrl_get_mc_rid_table_ls10(nvswitch_device *device, NVSWITCH_GET_MC_RID_TABLE_PARAMS *p);
void nvswitch_init_dlpl_interrupts_ls10(nvlink_link *link);
NvlStatus nvswitch_reset_and_drain_links_ls10(nvswitch_device *device, NvU64 link_mask);
void nvswitch_service_minion_all_links_ls10(nvswitch_device *device);
NvBool nvswitch_is_inforom_supported_ls10(nvswitch_device *device);

View File

@ -24,6 +24,21 @@
#ifndef _MINION_NVLINK_DEFINES_PUBLIC_H_
#define _MINION_NVLINK_DEFINES_PUBLIC_H_
//PAD REG READ API (Bug 2643883)
#define NV_MINION_UCODE_READUPHYPAD_ADDR 11:0
#define NV_MINION_UCODE_READUPHYPAD_LANE 15:12
//FIELD FOR DEBUG_MISC_i DATA REGISTERS
#define NV_MINION_DEBUG_MISC_0_LINK_STATE 7:0
#define NV_MINION_DEBUG_MISC_0_ISR_ID 15:8
#define NV_MINION_DEBUG_MISC_0_OTHER_DATA 31:16
// Recal values checks
#define NV_MINION_UCODE_L1_EXIT_MARGIN 100
#define NV_MINION_UCODE_L1_EXIT_MAX 200
#define NV_MINION_UCODE_RECOVERY_TIME 250
#define NV_MINION_UCODE_PEQ_TIME 96
// SUBCODES for DLCMD FAULT (uses DLCMDFAULR code) - dlCmdFault() - NVLINK_LINK_INT
typedef enum _MINION_STATUS
{
@ -31,4 +46,6 @@ typedef enum _MINION_STATUS
MINION_ALARM_BUSY = 80,
} MINION_STATUS;
#define LINKSTATUS_EMERGENCY_SHUTDOWN 0x29
#define LINKSTATUS_INITPHASE1 0x24
#endif // _MINION_NVLINK_DEFINES_PUBLIC_H_

View File

@ -45,5 +45,5 @@ NvlStatus nvswitch_soe_register_event_callbacks_ls10(nvswitch_device *device);
NvlStatus nvswitch_soe_restore_nport_state_ls10(nvswitch_device *device, NvU32 nport);
NvlStatus nvswitch_soe_issue_nport_reset_ls10(nvswitch_device *device, NvU32 nport);
void nvswitch_soe_init_l2_state_ls10(nvswitch_device *device);
NvlStatus nvswitch_soe_set_nport_interrupts_ls10(nvswitch_device *device, NvU32 nport, NvBool bEnable);
#endif //_SOE_LS10_H_

View File

@ -56,4 +56,11 @@ nvswitch_ctrl_therm_read_voltage_ls10
nvswitch_device *device,
NVSWITCH_CTRL_GET_VOLTAGE_PARAMS *info
);
NvlStatus
nvswitch_ctrl_therm_read_power_ls10
(
nvswitch_device *device,
NVSWITCH_GET_POWER_PARAMS *info
);
#endif //_THERM_LS10_H_

View File

@ -164,16 +164,6 @@ typedef struct
NvBool bBlockProtocol;
} NVSWITCH_I2C_HW_CMD, *PNVSWITCH_I2C_HW_CMD;
typedef enum
{
NVSWITCH_I2C_ACQUIRER_NONE = 0,
NVSWITCH_I2C_ACQUIRER_UNKNOWN,
NVSWITCH_I2C_ACQUIRER_IOCTL, // e.g. MODS
NVSWITCH_I2C_ACQUIRER_EXTERNAL, // e.g. Linux Direct
} NVSWITCH_I2C_ACQUIRER;
typedef enum {
i2cProfile_Standard,
i2cProfile_Fast,

View File

@ -590,7 +590,7 @@ const NvU32 soe_ucode_data_lr10_dbg[] = {
0xb3070bf4, 0xf80a00a4, 0xf8020a02, 0xffb9e400, 0xf4020a0f, 0x7cd9451b, 0x98000029, 0x903e049f,
0xf9180045, 0x00903308, 0x09f91828, 0x21009033, 0x5802f958, 0x93f003fe, 0x0c94b600, 0x08f4b9a6,
0xffe9e40f, 0x0c94b6ff, 0x0df4b9a6, 0x04ff980a, 0xd200f4b3, 0x350acfa0, 0x30f400f8, 0x05dcdff8,
0x52f90000, 0xa4b2ffbf, 0xfe0149fe, 0x99900142, 0xb2b0b21c, 0xa0d5b2c3, 0x0a14bd9f, 0x18229035,
0x52f90000, 0xa4b2ffbf, 0xfe0149fe, 0x99900142, 0xb2b0b21c, 0xa0d5b2c3, 0xbd350a9f, 0x18229014,
0x0046513e, 0x0bb24ab2, 0x2db2040c, 0x0028d77e, 0x8900ad33, 0xb329bf00, 0x900e0094, 0x00902011,
0x46513e04, 0x019eb900, 0x8904e9fd, 0xffff0000, 0xfcf094e9, 0x01f6f00b, 0x00ff00d9, 0x94e9ffff,
0xf00bbcf0, 0xf0d901b6, 0xfff0f0f0, 0xccf094e9, 0x01c6f00b, 0xccccccd9, 0x94e9ffcc, 0xf00bdcf0,
@ -2269,8 +2269,8 @@ const NvU32 soe_ucode_data_lr10_dbg[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xb22438cf, 0xcfd90bc8, 0xf23ebc55, 0x2e5c0e40,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x0d4a5d7d, 0x9c31ffb3, 0x95bc604f, 0x40cc834d,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xb63be2f4, 0x80eae4c6, 0xf2d546fa, 0xb745274e,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x226d94df, 0xdb2e0eeb, 0xd11c2f47, 0x7666acd9,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,

View File

@ -590,7 +590,7 @@ const NvU32 soe_ucode_data_lr10_prd[] = {
0xb3070bf4, 0xf80a00a4, 0xf8020a02, 0xffb9e400, 0xf4020a0f, 0x7cd9451b, 0x98000029, 0x903e049f,
0xf9180045, 0x00903308, 0x09f91828, 0x21009033, 0x5802f958, 0x93f003fe, 0x0c94b600, 0x08f4b9a6,
0xffe9e40f, 0x0c94b6ff, 0x0df4b9a6, 0x04ff980a, 0xd200f4b3, 0x350acfa0, 0x30f400f8, 0x05dcdff8,
0x52f90000, 0xa4b2ffbf, 0xfe0149fe, 0x99900142, 0xb2b0b21c, 0xa0d5b2c3, 0x0a14bd9f, 0x18229035,
0x52f90000, 0xa4b2ffbf, 0xfe0149fe, 0x99900142, 0xb2b0b21c, 0xa0d5b2c3, 0xbd350a9f, 0x18229014,
0x0046513e, 0x0bb24ab2, 0x2db2040c, 0x0028d77e, 0x8900ad33, 0xb329bf00, 0x900e0094, 0x00902011,
0x46513e04, 0x019eb900, 0x8904e9fd, 0xffff0000, 0xfcf094e9, 0x01f6f00b, 0x00ff00d9, 0x94e9ffff,
0xf00bbcf0, 0xf0d901b6, 0xfff0f0f0, 0xccf094e9, 0x01c6f00b, 0xccccccd9, 0x94e9ffcc, 0xf00bdcf0,
@ -2269,8 +2269,8 @@ const NvU32 soe_ucode_data_lr10_prd[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xb22438cf, 0xcfd90bc8, 0xf23ebc55, 0x2e5c0e40,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x0d4a5d7d, 0x9c31ffb3, 0x95bc604f, 0x40cc834d,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xb63be2f4, 0x80eae4c6, 0xf2d546fa, 0xb745274e,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x226d94df, 0xdb2e0eeb, 0xd11c2f47, 0x7666acd9,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,

View File

@ -3855,7 +3855,7 @@ nvswitch_initialize_device_state_lr10
}
else
{
NVSWITCH_PRINT(device, ERROR,
NVSWITCH_PRINT(device, WARN,
"%s: Skipping SPI init.\n",
__FUNCTION__);
}
@ -3874,7 +3874,7 @@ nvswitch_initialize_device_state_lr10
}
else
{
NVSWITCH_PRINT(device, ERROR,
NVSWITCH_PRINT(device, WARN,
"%s: Skipping SMBPBI init.\n",
__FUNCTION__);
}
@ -4579,17 +4579,6 @@ _nvswitch_get_info_revision_minor_ext
return (DRF_VAL(_PSMC, _BOOT_42, _MINOR_EXTENDED_REVISION, val));
}
static NvU32
_nvswitch_get_info_voltage
(
nvswitch_device *device
)
{
NvU32 voltage = 0;
return voltage;
}
static NvBool
_nvswitch_inforom_nvl_supported
(
@ -4769,7 +4758,7 @@ nvswitch_ctrl_get_info_lr10
p->info[i] = device->switch_pll.vco_freq_khz;
break;
case NVSWITCH_GET_INFO_INDEX_VOLTAGE_MVOLT:
p->info[i] = _nvswitch_get_info_voltage(device);
retval = -NVL_ERR_NOT_SUPPORTED;
break;
case NVSWITCH_GET_INFO_INDEX_PHYSICAL_ID:
p->info[i] = nvswitch_read_physical_id(device);
@ -6413,13 +6402,6 @@ nvswitch_ctrl_get_fom_values_lr10
return -NVL_BAD_ARGS;
}
if (nvswitch_is_link_in_reset(device, link))
{
NVSWITCH_PRINT(device, ERROR, "%s: link #%d is in reset\n",
__FUNCTION__, p->linkId);
return -NVL_ERR_INVALID_STATE;
}
status = nvswitch_minion_get_dl_status(device, p->linkId,
NV_NVLSTAT_TR16, 0, &statData);
p->figureOfMeritValues[0] = (NvU16) (statData & 0xFFFF);

View File

@ -314,3 +314,13 @@ nvswitch_ctrl_therm_read_voltage_lr10
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_ctrl_therm_read_power_lr10
(
nvswitch_device *device,
NVSWITCH_GET_POWER_PARAMS *info
)
{
return -NVL_ERR_NOT_SUPPORTED;
}

View File

@ -258,7 +258,7 @@ _nvswitch_initialize_route_interrupts
DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _EXTMCRID_ECC_LIMIT_ERR, _ENABLE) |
DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _RAM_ECC_LIMIT_ERR, _ENABLE) |
DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _INVALID_MCRID_ERR, _ENABLE);
// NOTE: _MC_TRIGGER_ERR is debug-use only
// NOTE: _MC_TRIGGER_ERR is debug-use only
}
static void
@ -456,8 +456,8 @@ _nvswitch_initialize_nport_interrupts_ls10
nvswitch_device *device
)
{
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
NvU32 val;
val =
@ -516,7 +516,7 @@ _nvswitch_initialize_nxbar_interrupts_ls10
DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_BURST_GT_9_DATA_VC, 1) |
DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _EGRESS_CDT_PARITY_ERROR, 1);
// Moving this L2 register access to SOE. Refer bug #3747687
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
NVSWITCH_BCAST_WR32_LS10(device, NXBAR, _NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, report_fatal);
#endif // 0
@ -532,7 +532,7 @@ _nvswitch_initialize_nxbar_interrupts_ls10
* IRQMASK is used to read in mask of interrupts
* IRQDEST is used to read in enabled interrupts that are routed to the HOST
*
* IRQSTAT & IRQMASK gives the pending interrupting on this minion
* IRQSTAT & IRQMASK gives the pending interrupting on this minion
*
* @param[in] device MINION on this device
* @param[in] instance MINION instance
@ -561,7 +561,7 @@ nvswitch_minion_service_falcon_interrupts_ls10
return -NVL_NOT_FOUND;
}
unhandled = pending;
unhandled = pending;
bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _WDTMR, 1);
if (nvswitch_test_flags(pending, bit))
@ -761,7 +761,7 @@ _nvswitch_service_priv_ring_ls10
if (pending != 0)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_PRIV_ERROR,
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_PRIV_ERROR,
"Fatal, Unexpected PRI error\n");
NVSWITCH_LOG_FATAL_DATA(device, _HW, _HW_HOST_PRIV_ERROR, 2, 0, NV_FALSE, &pending);
@ -821,7 +821,7 @@ _nvswitch_collect_nport_error_info_ls10
}
while (register_start <= register_end);
*idx = i;
return NVL_SUCCESS;
}
@ -2177,7 +2177,7 @@ _nvswitch_service_ingress_nonfatal_ls10_err_status_1:
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_FIRST_1,
report.raw_first & report.mask);
}
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_STATUS_0, pending_0);
NVSWITCH_ENG_WR32(device, NPORT, , link, _INGRESS, _ERR_STATUS_1, pending_1);
@ -4131,13 +4131,13 @@ _nvswitch_service_npg_fatal_ls10
NvU32 link;
pending = NVSWITCH_ENG_RD32(device, NPG, , npg, _NPG, _NPG_INTERRUPT_STATUS);
if (pending == 0)
{
return -NVL_NOT_FOUND;
}
mask =
mask =
DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _FATAL) |
DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _FATAL) |
DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _FATAL) |
@ -4234,7 +4234,7 @@ _nvswitch_service_npg_nonfatal_ls10
return -NVL_NOT_FOUND;
}
mask =
mask =
DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _NONFATAL) |
DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _NONFATAL) |
DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _NONFATAL) |
@ -4286,10 +4286,11 @@ static NvlStatus
_nvswitch_service_nvldl_fatal_ls10
(
nvswitch_device *device,
NvU32 nvlipt_instance
NvU32 nvlipt_instance,
NvU64 intrLinkMask
)
{
NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask, runtimeErrorMask = 0;
NvU64 enabledLinkMask, localLinkMask, localIntrLinkMask, runtimeErrorMask = 0;
NvU32 i;
nvlink_link *link;
NvU32 clocksMask = NVSWITCH_PER_LINK_CLOCK_SET(RXCLK) | NVSWITCH_PER_LINK_CLOCK_SET(TXCLK);
@ -4297,11 +4298,22 @@ _nvswitch_service_nvldl_fatal_ls10
NVSWITCH_LINK_TRAINING_ERROR_INFO linkTrainingErrorInfo = { 0 };
NVSWITCH_LINK_RUNTIME_ERROR_INFO linkRuntimeErrorInfo = { 0 };
//
// The passed in interruptLinkMask should contain a link that is part of the
// given nvlipt instance
//
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance);
localEnabledLinkMask = enabledLinkMask & localLinkMask;
localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask;
FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask)
if (localIntrLinkMask == 0)
{
NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask)
{
link = nvswitch_get_link(device, i);
if (link == NULL)
@ -4343,7 +4355,7 @@ _nvswitch_service_nvldl_fatal_ls10
NVSWITCH_PRINT(device, ERROR,
"%s: NVLDL[0x%x, 0x%llx]: Unable to send Runtime Error bitmask: 0x%llx,\n",
__FUNCTION__,
nvlipt_instance, localLinkMask,
nvlipt_instance, localIntrLinkMask,
runtimeErrorMask);
}
@ -4869,19 +4881,31 @@ NvlStatus
_nvswitch_service_nvltlc_fatal_ls10
(
nvswitch_device *device,
NvU32 nvlipt_instance
NvU32 nvlipt_instance,
NvU64 intrLinkMask
)
{
NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask;
NvU64 enabledLinkMask, localLinkMask, localIntrLinkMask;
NvU32 i;
nvlink_link *link;
NvlStatus status = -NVL_MORE_PROCESSING_REQUIRED;
//
// The passed in interruptLinkMask should contain a link that is part of the
// given nvlipt instance
//
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance);
localEnabledLinkMask = enabledLinkMask & localLinkMask;
localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask;
FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask)
if (localIntrLinkMask == 0)
{
NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask)
{
link = nvswitch_get_link(device, i);
if (link == NULL)
@ -5314,6 +5338,12 @@ _nvswitch_emit_link_errors_nvldl_fatal_link_ls10
{
NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_UP, "LTSSM Fault Up", NV_FALSE);
}
bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1);
if (nvswitch_test_flags(pending, bit))
{
NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_DOWN, "LTSSM Fault Down", NV_FALSE);
}
}
static void
@ -5342,6 +5372,12 @@ _nvswitch_emit_link_errors_nvldl_nonfatal_link_ls10
nvswitch_configure_error_rate_threshold_interrupt_ls10(nvlink, NV_FALSE);
NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_SHORT_ERROR_RATE, "RX Short Error Rate");
}
bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_CRC_COUNTER, 1);
if (nvswitch_test_flags(pending, bit))
{
NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_CRC_COUNTER, "RX CRC Error Rate");
}
}
static void
@ -5437,11 +5473,11 @@ _nvswitch_deferred_link_state_check_ls10
void *fn_args
)
{
NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS *pErrorReportParams =
NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS *pErrorReportParams =
(NVSWITCH_DEFERRED_ERROR_REPORTING_ARGS*)fn_args;
NvU32 nvlipt_instance = pErrorReportParams->nvlipt_instance;
NvU32 link = pErrorReportParams->link;
ls10_device *chip_device;
ls10_device *chip_device;
nvlink_link *pLink;
NvU64 linkState;
@ -5532,13 +5568,13 @@ _nvswitch_deferred_link_errors_check_ls10
NvU32 nvlipt_instance = pErrorReportParams->nvlipt_instance;
NvU32 link = pErrorReportParams->link;
ls10_device *chip_device;
NvU32 pending, bit;
NvU32 pending;
chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
pending = chip_device->deferredLinkErrors[link].fatalIntrMask.dl;
bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1);
if (nvswitch_test_flags(pending, bit))
if (FLD_TEST_DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1U, pending) ||
FLD_TEST_DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1U, pending) )
{
nvswitch_create_deferred_link_state_check_task_ls10(device, nvlipt_instance, link);
}
@ -5581,10 +5617,10 @@ _nvswitch_create_deferred_link_errors_task_ls10
pErrorReportParams->nvlipt_instance = nvlipt_instance;
pErrorReportParams->link = link;
status = nvswitch_task_create_args(device, (void*)pErrorReportParams,
status = nvswitch_task_create_args(device, (void*)pErrorReportParams,
&_nvswitch_deferred_link_errors_check_ls10,
NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS,
NVSWITCH_TASK_TYPE_FLAGS_RUN_ONCE |
NVSWITCH_TASK_TYPE_FLAGS_RUN_ONCE |
NVSWITCH_TASK_TYPE_FLAGS_VOID_PTR_ARGS);
}
@ -5645,7 +5681,7 @@ _nvswitch_service_nvldl_nonfatal_link_ls10
if (nvswitch_test_flags(pending, bit))
{
chip_device->deferredLinkErrors[link].nonFatalIntrMask.dl |= bit;
_nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link);
_nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link);
nvswitch_clear_flags(&unhandled, bit);
}
@ -5666,7 +5702,9 @@ _nvswitch_service_nvldl_nonfatal_link_ls10
bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_CRC_COUNTER, 1);
if (nvswitch_test_flags(pending, bit))
{
NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_CRC_COUNTER, "RX CRC Counter");
chip_device->deferredLinkErrors[link].nonFatalIntrMask.dl |= bit;
_nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link);
nvswitch_clear_flags(&unhandled, bit);
//
@ -5698,20 +5736,33 @@ static NvlStatus
_nvswitch_service_nvldl_nonfatal_ls10
(
nvswitch_device *device,
NvU32 nvlipt_instance
NvU32 nvlipt_instance,
NvU64 intrLinkMask
)
{
NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask;
NvU64 localLinkMask, enabledLinkMask, localIntrLinkMask;
NvU32 i;
nvlink_link *link;
NvlStatus status;
NvlStatus return_status = -NVL_NOT_FOUND;
NvU32 clocksMask = NVSWITCH_PER_LINK_CLOCK_SET(RXCLK) | NVSWITCH_PER_LINK_CLOCK_SET(TXCLK);
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance);
localEnabledLinkMask = enabledLinkMask & localLinkMask;
FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask)
//
// The passed in interruptLinkMask should contain a link that is part of the
// given nvlipt instance
//
localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance);
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask;
if (localIntrLinkMask == 0)
{
NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask)
{
link = nvswitch_get_link(device, i);
if (link == NULL)
@ -6084,20 +6135,28 @@ static NvlStatus
_nvswitch_service_nvltlc_nonfatal_ls10
(
nvswitch_device *device,
NvU32 nvlipt_instance
NvU32 nvlipt_instance,
NvU64 intrLinkMask
)
{
NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask;
NvU64 localLinkMask, enabledLinkMask, localIntrLinkMask;
NvU32 i;
nvlink_link *link;
NvlStatus status;
NvlStatus return_status = NVL_SUCCESS;
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(nvlipt_instance);
localEnabledLinkMask = enabledLinkMask & localLinkMask;
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask;
FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask)
if (localIntrLinkMask == 0)
{
NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask)
{
link = nvswitch_get_link(device, i);
if (link == NULL)
@ -6199,6 +6258,16 @@ _nvswitch_service_nvlipt_lnk_status_ls10
nvswitch_corelib_training_complete_ls10(link);
nvswitch_init_buffer_ready(device, link, NV_TRUE);
}
else if (mode == NVLINK_LINKSTATE_FAULT)
{
//
// If we are here then a previous state transition caused
// the link to FAULT as there is no TL Link state requests
// that explicitly transitions a link to fault. If that is the
// case, set the DL interrupts so any errors can be handled
//
nvswitch_set_dlpl_interrupts_ls10(link);
}
}
NVSWITCH_UNHANDLED_CHECK(device, unhandled);
@ -6225,7 +6294,7 @@ _nvswitch_service_nvlipt_lnk_nonfatal_ls10
{
ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
nvlink_link *link_info = nvswitch_get_link(device, link);
NvU32 lnkStateRequest, lnkStateStatus;
NvU32 lnkStateRequest, linkState;
NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
NvU32 pending, bit, unhandled;
@ -6253,26 +6322,21 @@ _nvswitch_service_nvlipt_lnk_nonfatal_ls10
if (nvswitch_test_flags(pending, bit))
{
//
// Read back LINK_STATE_REQUESTS and LINK_STATE_STATUS registers
// If request == ACTIVE, LINK_STATE_STATUS == ACTIVE_PENDING, request == ERROR
// and there is a pending FAULT_UP interrupt then redo reset_and_drain since the
// last try failed
//
// Read back LINK_STATE_REQUESTS and TOP_LINK_STATE registers
// If request == ACTIVE and TOP_LINK_STATE == FAULT there is a pending
// fault on training so re-run reset_and_drain
// Mark that the defered link error mechanism as seeing a reset_and_train re-try so
// the deferred task needs to re-create itself instead of continuing with the linkstate
// checks
//
lnkStateStatus = NVSWITCH_LINK_RD32_LS10(device, link_info->linkNumber, NVLIPT_LNK,
_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS);
linkState = NVSWITCH_LINK_RD32_LS10(device, link_info->linkNumber, NVLDL,
_NVLDL, _TOP_LINK_STATE);
lnkStateRequest = NVSWITCH_LINK_RD32_LS10(device, link_info->linkNumber,
NVLIPT_LNK , _NVLIPT_LNK , _CTRL_LINK_STATE_REQUEST);
if(FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _REQUEST, _ACTIVE, lnkStateRequest) &&
!(FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _STATUS, _REQUEST_SUCCESSFUL, lnkStateRequest) ||
FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _STATUS, _INIT, lnkStateRequest))&&
FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS, _CURRENTLINKSTATE, _ACTIVE_PENDING, lnkStateStatus) &&
DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1) & chip_device->deferredLinkErrors[link].fatalIntrMask.dl)
linkState == NV_NVLDL_TOP_LINK_STATE_STATE_FAULT)
{
chip_device->deferredLinkErrors[link].bResetAndDrainRetry = NV_TRUE;
device->hal.nvswitch_reset_and_drain_links(device, NVBIT64(link));
@ -6335,77 +6399,81 @@ static NvlStatus
_nvswitch_service_nvlipt_link_nonfatal_ls10
(
nvswitch_device *device,
NvU32 instance
NvU32 instance,
NvU64 intrLinkMask
)
{
NvU32 i, globalLink, bit, intrLink;
NvU32 interruptingLinks = 0;
NvU32 lnkStatusChangeLinks = 0;
NvlStatus status;
NvU64 link_enable_mask;
link_enable_mask = ((NvU64)device->regkeys.link_enable_mask2 << 32 |
(NvU64)device->regkeys.link_enable_mask);
for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
NvU32 i, intrLink;
NvU64 localLinkMask, enabledLinkMask, localIntrLinkMask;
NvU64 interruptingLinks = 0;
NvU64 lnkStatusChangeLinks = 0;
NvlStatus status = NVL_SUCCESS;
NvlStatus retStatus = NVL_SUCCESS;
//
// The passed in interruptLinkMask should contain a link that is part of the
// given nvlipt instance
//
localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(instance);
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask;
if (localIntrLinkMask == 0)
{
globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
if ((NVBIT64(globalLink) & link_enable_mask) == 0)
NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask)
{
if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != instance)
{
continue;
NVSWITCH_ASSERT(0);
break;
}
intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0);
intrLink = NVSWITCH_LINK_RD32(device, i, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0);
if(intrLink)
{
interruptingLinks |= NVBIT(i);
interruptingLinks |= NVBIT64(i);
}
intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _INTR_STATUS);
intrLink = NVSWITCH_LINK_RD32(device, i, NVLIPT_LNK, _NVLIPT_LNK, _INTR_STATUS);
if(intrLink)
{
lnkStatusChangeLinks |= NVBIT(i);
lnkStatusChangeLinks |= NVBIT64(i);
}
}
FOR_EACH_INDEX_IN_MASK_END;
if(lnkStatusChangeLinks)
{
for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
{
bit = NVBIT(i);
globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
if (nvswitch_test_flags(lnkStatusChangeLinks, bit))
{
if( _nvswitch_service_nvlipt_lnk_status_ls10(device, instance, globalLink) != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, WARN, "%s: Could not process nvlipt link status interrupt. Continuing. LinkId %d\n",
__FUNCTION__, globalLink);
}
}
}
}
if(interruptingLinks)
FOR_EACH_INDEX_IN_MASK(64, i, lnkStatusChangeLinks)
{
for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
if(_nvswitch_service_nvlipt_lnk_status_ls10(device, instance, i) != NVL_SUCCESS)
{
bit = NVBIT(i);
globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
if (nvswitch_test_flags(interruptingLinks, bit))
{
status = _nvswitch_service_nvlipt_lnk_nonfatal_ls10(device, instance, globalLink);
if (status != NVL_SUCCESS && status != -NVL_NOT_FOUND)
{
return -NVL_MORE_PROCESSING_REQUIRED;
}
}
NVSWITCH_PRINT(device, WARN, "%s: Could not process nvlipt link status interrupt. Continuing. LinkId %d\n",
__FUNCTION__, i);
}
return NVL_SUCCESS;
}
else
FOR_EACH_INDEX_IN_MASK_END;
FOR_EACH_INDEX_IN_MASK(64, i, interruptingLinks)
{
return -NVL_NOT_FOUND;
status = _nvswitch_service_nvlipt_lnk_nonfatal_ls10(device, instance, i);
if (status != NVL_SUCCESS && status != -NVL_NOT_FOUND)
{
retStatus = -NVL_MORE_PROCESSING_REQUIRED;
}
}
FOR_EACH_INDEX_IN_MASK_END;
return retStatus;
}
@ -6431,7 +6499,7 @@ _nvswitch_service_minion_fatal_ls10
return -NVL_NOT_FOUND;
}
unhandled = pending;
unhandled = pending;
bit = DRF_NUM(_MINION, _MINION_INTR, _FALCON_STALL, 0x1);
if (nvswitch_test_flags(pending, bit))
@ -6478,11 +6546,27 @@ _nvswitch_service_nvlw_nonfatal_ls10
)
{
NvlStatus status[3];
NvU32 reg;
NvU64 intrLinkMask = 0;
// TODO: @achaudhry invert handling so nvlipt_lnk is first
status[0] = _nvswitch_service_nvldl_nonfatal_ls10(device, instance);
status[1] = _nvswitch_service_nvltlc_nonfatal_ls10(device, instance);
status[2] = _nvswitch_service_nvlipt_link_nonfatal_ls10(device, instance);
reg = NVSWITCH_ENG_RD32_LS10(device, NVLW, instance, _NVLW, _TOP_INTR_1_STATUS);
intrLinkMask = DRF_VAL(_NVLW, _TOP_INTR_1_STATUS, _LINK, reg);
//
// Shift the mask of interrupting links from the local to the
// NVLW instance to a global mask
//
intrLinkMask = intrLinkMask << (NVSWITCH_LINKS_PER_NVLW_LS10*instance);
// If there is no pending link interrupts then there is nothing to service
if (intrLinkMask == 0)
{
return NVL_SUCCESS;
}
status[0] = _nvswitch_service_nvldl_nonfatal_ls10(device, instance, intrLinkMask);
status[1] = _nvswitch_service_nvltlc_nonfatal_ls10(device, instance, intrLinkMask);
status[2] = _nvswitch_service_nvlipt_link_nonfatal_ls10(device, instance, intrLinkMask);
if ((status[0] != NVL_SUCCESS) && (status[0] != -NVL_NOT_FOUND) &&
(status[1] != NVL_SUCCESS) && (status[1] != -NVL_NOT_FOUND) &&
@ -6588,45 +6672,44 @@ static NvlStatus
_nvswitch_service_nvlipt_link_fatal_ls10
(
nvswitch_device *device,
NvU32 instance
NvU32 instance,
NvU64 intrLinkMask
)
{
NvU32 i, globalLink, bit, intrLink;
NvU32 interruptingLinks = 0;
NvU32 i, intrLink;
NvU64 localLinkMask, enabledLinkMask, localIntrLinkMask;
NvlStatus status = NVL_SUCCESS;
//read in error status of current link
for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
//
// The passed in interruptLinkMask should contain a link that is part of the
// given nvlipt instance
//
localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64_LS10(instance);
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
localIntrLinkMask = localLinkMask & intrLinkMask & enabledLinkMask;
if (localIntrLinkMask == 0)
{
globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0);
if(intrLink)
{
interruptingLinks |= NVBIT(i);
}
NVSWITCH_PRINT(device, ERROR, "%s: Bad link mask provided for link interrupt servicing!\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
return -NVL_BAD_ARGS;
}
if(interruptingLinks)
// read in error status of current link
FOR_EACH_INDEX_IN_MASK(64, i, localIntrLinkMask)
{
for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
intrLink = NVSWITCH_LINK_RD32(device, i, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0);
if (intrLink != 0)
{
bit = NVBIT(i);
globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
if (nvswitch_test_flags(interruptingLinks, bit))
if( _nvswitch_service_nvlipt_lnk_fatal_ls10(device, instance, i) != NVL_SUCCESS)
{
if( _nvswitch_service_nvlipt_lnk_fatal_ls10(device, instance, globalLink) != NVL_SUCCESS)
{
return -NVL_MORE_PROCESSING_REQUIRED;
}
status = -NVL_MORE_PROCESSING_REQUIRED;
}
}
return NVL_SUCCESS;
}
else
{
return -NVL_NOT_FOUND;
}
FOR_EACH_INDEX_IN_MASK_END;
return status;
}
static NvlStatus
@ -6637,14 +6720,39 @@ _nvswitch_service_nvlw_fatal_ls10
)
{
NvlStatus status[6];
NvU64 intrLinkMask = 0;
NvU32 reg;
reg = NVSWITCH_ENG_RD32_LS10(device, NVLW, instance, _NVLW, _TOP_INTR_0_STATUS);
intrLinkMask = DRF_VAL(_NVLW, _TOP_INTR_0_STATUS, _LINK, reg);
//
// Shift the mask of interrupting links from the local to the
// NVLW instance to a global mask
//
intrLinkMask = intrLinkMask << (NVSWITCH_LINKS_PER_NVLW_LS10*instance);
status[0] = device->hal.nvswitch_service_minion_link(device, instance);
status[1] = _nvswitch_service_nvldl_fatal_ls10(device, instance);
status[2] = _nvswitch_service_nvltlc_fatal_ls10(device, instance);
status[3] = _nvswitch_service_minion_fatal_ls10(device, instance);
status[4] = _nvswitch_service_nvlipt_common_fatal_ls10(device, instance);
status[5] = _nvswitch_service_nvlipt_link_fatal_ls10(device, instance);
status[1] = _nvswitch_service_minion_fatal_ls10(device, instance);
status[2] = _nvswitch_service_nvlipt_common_fatal_ls10(device, instance);
//
// If there is a pending link interrupt on this nvlw instance then service
// those interrupts in the handlers below. Otherwise, mark the status's
// as success as there is nothing to service
//
if (intrLinkMask != 0)
{
status[3] = _nvswitch_service_nvldl_fatal_ls10(device, instance, intrLinkMask);
status[4] = _nvswitch_service_nvltlc_fatal_ls10(device, instance, intrLinkMask);
status[5] = _nvswitch_service_nvlipt_link_fatal_ls10(device, instance, intrLinkMask);
}
else
{
status[3] = NVL_SUCCESS;
status[4] = NVL_SUCCESS;
status[5] = NVL_SUCCESS;
}
if (status[0] != NVL_SUCCESS && status[0] != -NVL_NOT_FOUND &&
status[1] != NVL_SUCCESS && status[1] != -NVL_NOT_FOUND &&
@ -7068,7 +7176,8 @@ nvswitch_service_nvldl_fatal_link_ls10
{
ls10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LS10(device);
NvU32 pending, bit, unhandled;
NvBool bSkipIntrClear = NV_FALSE;
NvU32 dlDeferredIntrLinkMask = 0;
NvBool bRequireResetAndDrain = NV_FALSE;
NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 };
@ -7119,13 +7228,6 @@ nvswitch_service_nvldl_fatal_link_ls10
nvswitch_clear_flags(&unhandled, bit);
}
bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1);
if (nvswitch_test_flags(pending, bit))
{
NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_DOWN, "LTSSM Fault Down", NV_FALSE);
nvswitch_clear_flags(&unhandled, bit);
}
bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_PROTOCOL, 1);
if (nvswitch_test_flags(pending, bit))
{
@ -7155,22 +7257,19 @@ nvswitch_service_nvldl_fatal_link_ls10
}
//
// Note: LTSSM_FAULT_UP must be the last interrupt serviced in the NVLDL
// Note: LTSSM_FAULT_{UP/DOWN} must be the last interrupt serviced in the NVLDL
// Fatal tree. The last step of handling this interrupt is going into the
// reset_and_drain flow for the given link which will shutdown and reset
// the link. The reset portion will also wipe away any link state including
// pending DL interrupts. In order to log all error before wiping that state,
// service all other interrupts before this one
//
bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1);
bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1);
if (nvswitch_test_flags(pending, bit))
{
chip_device->deferredLinkErrors[link].fatalIntrMask.dl |= bit;
_nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link);
dlDeferredIntrLinkMask |= bit;
nvswitch_clear_flags(&unhandled, bit);
device->hal.nvswitch_reset_and_drain_links(device, NVBIT64(link));
//
// Since reset and drain will reset the link, including clearing
@ -7178,7 +7277,46 @@ nvswitch_service_nvldl_fatal_link_ls10
// where link clocks will not be on after reset and drain so there
// maybe PRI errors on writing to the register
//
bSkipIntrClear = NV_TRUE;
bRequireResetAndDrain = NV_TRUE;
}
bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1);
if (nvswitch_test_flags(pending, bit))
{
dlDeferredIntrLinkMask |= bit;
nvswitch_clear_flags(&unhandled, bit);
//
// Since reset and drain will reset the link, including clearing
// pending interrupts, skip the clear write below. There are cases
// where link clocks will not be on after reset and drain so there
// maybe PRI errors on writing to the register
//
bRequireResetAndDrain = NV_TRUE;
}
if (bRequireResetAndDrain)
{
//
// If there is a link state callback enabled for this link then
// we hit a consecutive FAULT_UP error. set bResetAndDrainRetry
// so the current callback on completion can create a new
// callback to retry the link state check to account for the added
// delay caused by taking a 2nd fault and having to re-train
//
// If there is no callback enabled then set the error mask
// and create the link errors deferred task.
//
if (chip_device->deferredLinkErrors[link].bLinkStateCallBackEnabled)
{
chip_device->deferredLinkErrors[link].bResetAndDrainRetry = NV_TRUE;
}
else
{
chip_device->deferredLinkErrors[link].fatalIntrMask.dl = dlDeferredIntrLinkMask;
_nvswitch_create_deferred_link_errors_task_ls10(device, nvlipt_instance, link);
}
device->hal.nvswitch_reset_and_drain_links(device, NVBIT64(link));
}
NVSWITCH_UNHANDLED_CHECK(device, unhandled);
@ -7190,7 +7328,7 @@ nvswitch_service_nvldl_fatal_link_ls10
report.raw_enable ^ pending);
}
if (!bSkipIntrClear)
if (!bRequireResetAndDrain)
{
NVSWITCH_LINK_WR32(device, link, NVLDL, _NVLDL_TOP, _INTR, pending);
}
@ -7244,7 +7382,7 @@ nvswitch_service_minion_link_ls10
}
unhandled = pending;
FOR_EACH_INDEX_IN_MASK(32, localLinkIdx, pending)
{
link = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + localLinkIdx;
@ -7308,7 +7446,7 @@ nvswitch_service_minion_link_ls10
case NV_MINION_NVLINK_LINK_INTR_CODE_NEGOTIATION_CONFIG_ERR:
NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link Negotiation Config Err Interrupt", NV_FALSE);
break;
case NV_MINION_NVLINK_LINK_INTR_CODE_BADINIT:
case NV_MINION_NVLINK_LINK_INTR_CODE_BADINIT:
NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link BADINIT interrupt", NV_FALSE);
break;
case NV_MINION_NVLINK_LINK_INTR_CODE_PMFAIL:

View File

@ -1230,12 +1230,29 @@ nvswitch_init_dlpl_interrupts_ls10
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR, 0xffffffff);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_SW2, 0xffffffff);
// Set the interrupt bits
nvswitch_set_dlpl_interrupts_ls10(link);
// Setup error rate thresholds
nvswitch_set_error_rate_threshold_ls10(link, NV_TRUE);
nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_TRUE);
}
void
nvswitch_set_dlpl_interrupts_ls10
(
nvlink_link *link
)
{
nvswitch_device *device = link->dev->pDevInfo;
NvU32 linkNumber = link->linkNumber;
// Stall tree routes to INTR_A which is connected to NVLIPT fatal tree
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_STALL_EN,
DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_REPLAY, _DISABLE) |
DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_RECOVERY_SHORT, _DISABLE) |
DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _LTSSM_FAULT_UP, _ENABLE) |
DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _LTSSM_FAULT_DOWN, _ENABLE) |
DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_FAULT_RAM, _ENABLE) |
DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_FAULT_INTERFACE, _ENABLE) |
DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_FAULT_SUBLINK_CHANGE, _DISABLE) |
@ -1262,9 +1279,6 @@ nvswitch_init_dlpl_interrupts_ls10
DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_CRC_COUNTER, _ENABLE) |
DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _LTSSM_PROTOCOL, _DISABLE) |
DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _MINION_REQUEST, _DISABLE));
nvswitch_set_error_rate_threshold_ls10(link, NV_TRUE);
nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_TRUE);
}
static NvU32

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -1103,10 +1103,17 @@ nvswitch_link_disable_interrupts_ls10
instance = link / NVSWITCH_LINKS_PER_NVLIPT_LS10;
localLinkIdx = link % NVSWITCH_LINKS_PER_NVLIPT_LS10;
NVSWITCH_NPORT_WR32_LS10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x0) |
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x0) |
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x0));
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_set_nport_interrupts_ls10(device, link, NV_FALSE);
}
else
{
NVSWITCH_NPORT_WR32_LS10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x0) |
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x0) |
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x0));
}
NVSWITCH_ENG_WR32(device, NVLW, , instance, _NVLW, _LINK_INTR_0_MASK(localLinkIdx),
DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _FATAL, 0x0) |
@ -1138,31 +1145,38 @@ _nvswitch_link_reset_interrupts_ls10
NvU32 eng_instance = link / NVSWITCH_LINKS_PER_NVLIPT_LS10;
NvU32 localLinkNum = link % NVSWITCH_LINKS_PER_NVLIPT_LS10;
NVSWITCH_NPORT_WR32_LS10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x1) |
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x1) |
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x1));
if (nvswitch_is_soe_supported(device))
{
nvswitch_soe_set_nport_interrupts_ls10(device, link, NV_TRUE);
}
else
{
NVSWITCH_NPORT_WR32_LS10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT,
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x1) |
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x1) |
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x1));
}
NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_0_MASK(localLinkNum),
DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _FATAL, 0x1) |
DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _NONFATAL, 0x0) |
DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0) |
DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x1) |
DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x0));
NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_0_MASK(localLinkNum),
DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _FATAL, 0x1) |
DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _NONFATAL, 0x0) |
DRF_NUM(_NVLW, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0) |
DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x1) |
DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x0));
NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_1_MASK(localLinkNum),
DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _FATAL, 0x0) |
DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _NONFATAL, 0x1) |
DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _CORRECTABLE, 0x1) |
DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x0) |
DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x1));
NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_1_MASK(localLinkNum),
DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _FATAL, 0x0) |
DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _NONFATAL, 0x1) |
DRF_NUM(_NVLW, _LINK_INTR_1_MASK, _CORRECTABLE, 0x1) |
DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR0, 0x0) |
DRF_NUM(_NVLW_LINK, _INTR_0_MASK, _INTR1, 0x1));
NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_2_MASK(localLinkNum),
DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _FATAL, 0x0) |
DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _NONFATAL, 0x0) |
DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0) |
DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR0, 0x0) |
DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR1, 0x0));
NVSWITCH_ENG_WR32(device, NVLW, , eng_instance, _NVLW, _LINK_INTR_2_MASK(localLinkNum),
DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _FATAL, 0x0) |
DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _NONFATAL, 0x0) |
DRF_NUM(_NVLW, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0) |
DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR0, 0x0) |
DRF_NUM(_NVLW_LINK, _INTR_2_MASK, _INTR1, 0x0));
// NVLIPT_LNK
regval = NVSWITCH_LINK_RD32_LS10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _INTR_CONTROL_LINK);
@ -1357,6 +1371,10 @@ nvswitch_reset_and_drain_links_ls10
NvU32 link_state;
NvU32 stat_data;
NvU32 link_intr_subcode;
NvBool bKeepPolling;
NvBool bIsLinkInEmergencyShutdown;
NvBool bAreDlClocksOn;
NVSWITCH_TIMEOUT timeout;
if (link_mask == 0)
{
@ -1425,10 +1443,9 @@ nvswitch_reset_and_drain_links_ls10
if (status != NVL_SUCCESS)
{
nvswitch_destroy_link(link_info);
return status;
}
return -NVL_ERR_INVALID_STATE;
continue;
}
//
@ -1438,10 +1455,42 @@ nvswitch_reset_and_drain_links_ls10
//
// Step 3.0 :
// Prior to starting port reset, perform unilateral shutdown on the
// LS10 side of the link, in case the links are not shutdown.
// Prior to starting port reset, ensure the links is in emergency shutdown
//
nvswitch_execute_unilateral_link_shutdown_ls10(link_info);
bIsLinkInEmergencyShutdown = NV_FALSE;
nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
do
{
bKeepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
status = nvswitch_minion_get_dl_status(device, link_info->linkNumber,
NV_NVLSTAT_UC01, 0, &stat_data);
if (status != NVL_SUCCESS)
{
continue;
}
link_state = DRF_VAL(_NVLSTAT, _UC01, _LINK_STATE, stat_data);
bIsLinkInEmergencyShutdown = (link_state == LINKSTATUS_EMERGENCY_SHUTDOWN) ?
NV_TRUE:NV_FALSE;
if (bIsLinkInEmergencyShutdown == NV_TRUE)
{
break;
}
}
while(bKeepPolling);
if (bIsLinkInEmergencyShutdown == NV_FALSE)
{
NVSWITCH_PRINT(device, ERROR,
"%s: link %d failed to enter emergency shutdown\n",
__FUNCTION__, link);
continue;
}
nvswitch_corelib_clear_link_state_ls10(link_info);
//
@ -1452,7 +1501,7 @@ nvswitch_reset_and_drain_links_ls10
// DEBUG_CLEAR (0x144) register
// - Assert NPortWarmReset[i] using the WARMRESET (0x140) register
//
// nvswitch_soe_issue_nport_reset_ls10(device, link);
nvswitch_soe_issue_nport_reset_ls10(device, link);
//
// Step 5.0 : Issue Minion request to perform the link reset sequence
@ -1483,6 +1532,10 @@ nvswitch_reset_and_drain_links_ls10
{
link_intr_subcode = DRF_VAL(_NVLSTAT, _MN00, _LINK_INTR_SUBCODE, stat_data);
}
else
{
continue;
}
if ((link_state == NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_MINION_REQUEST_FAIL) &&
(link_intr_subcode == MINION_ALARM_BUSY))
@ -1515,9 +1568,8 @@ nvswitch_reset_and_drain_links_ls10
if (status != NVL_SUCCESS)
{
nvswitch_destroy_link(link_info);
return status;
}
return status;
continue;
}
//
@ -1527,7 +1579,7 @@ nvswitch_reset_and_drain_links_ls10
// - Assert NPORT INITIALIZATION and program the state tracking RAMS
// - Restore NPORT state after reset
//
// nvswitch_soe_restore_nport_state_ls10(device, link);
nvswitch_soe_restore_nport_state_ls10(device, link);
// Step 7.0 : Re-program the routing table for DBEs
@ -1538,12 +1590,15 @@ nvswitch_reset_and_drain_links_ls10
status = nvlink_lib_register_link(device->nvlink_device, link_info);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to register link: 0x%x with the corelib\n",
__FUNCTION__, link);
nvswitch_destroy_link(link_info);
return status;
continue;
}
//
// Launch ALI training to re-initialize and train the links
// Step 9.0: Launch ALI training to re-initialize and train the links
// nvswitch_launch_ALI_link_training(device, link_info);
//
// Request active, but don't block. FM will come back and check
@ -1558,7 +1613,44 @@ nvswitch_reset_and_drain_links_ls10
NVSWITCH_PRINT(device, ERROR,
"%s: TL link state request to active for ALI failed for link: 0x%x\n",
__FUNCTION__, link);
continue;
}
bAreDlClocksOn = NV_FALSE;
nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
do
{
bKeepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
status = nvswitch_minion_get_dl_status(device, link_info->linkNumber,
NV_NVLSTAT_UC01, 0, &stat_data);
if (status != NVL_SUCCESS)
{
continue;
}
link_state = DRF_VAL(_NVLSTAT, _UC01, _LINK_STATE, stat_data);
bAreDlClocksOn = (link_state != LINKSTATUS_INITPHASE1) ?
NV_TRUE:NV_FALSE;
if (bAreDlClocksOn == NV_TRUE)
{
break;
}
}
while(bKeepPolling);
if (!bAreDlClocksOn)
{
NVSWITCH_PRINT(device, ERROR,
"%s: link: 0x%x doesn't have the TX/RX clocks on, skipping setting DL interrupts!\n",
__FUNCTION__, link);
continue;
}
nvswitch_set_dlpl_interrupts_ls10(link_info);
}
FOR_EACH_INDEX_IN_MASK_END;
@ -2647,6 +2739,46 @@ nvswitch_get_num_links_ls10
return NVSWITCH_NUM_LINKS_LS10;
}
NvlStatus
nvswitch_ctrl_get_fom_values_ls10
(
nvswitch_device *device,
NVSWITCH_GET_FOM_VALUES_PARAMS *p
)
{
NvlStatus status;
NvU32 statData;
nvlink_link *link;
link = nvswitch_get_link(device, p->linkId);
if (link == NULL)
{
NVSWITCH_PRINT(device, ERROR, "%s: link #%d invalid\n",
__FUNCTION__, p->linkId);
return -NVL_BAD_ARGS;
}
if (nvswitch_is_link_in_reset(device, link))
{
NVSWITCH_PRINT(device, ERROR, "%s: link #%d is in reset\n",
__FUNCTION__, p->linkId);
return -NVL_ERR_INVALID_STATE;
}
status = nvswitch_minion_get_dl_status(device, p->linkId,
NV_NVLSTAT_TR16, 0, &statData);
p->figureOfMeritValues[0] = (NvU16) (statData & 0xFFFF);
p->figureOfMeritValues[1] = (NvU16) ((statData >> 16) & 0xFFFF);
status = nvswitch_minion_get_dl_status(device, p->linkId,
NV_NVLSTAT_TR17, 0, &statData);
p->figureOfMeritValues[2] = (NvU16) (statData & 0xFFFF);
p->figureOfMeritValues[3] = (NvU16) ((statData >> 16) & 0xFFFF);
p->numLanes = nvswitch_get_sublink_width(device, p->linkId);
return status;
}
void
nvswitch_set_fatal_error_ls10
@ -5338,7 +5470,7 @@ nvswitch_ctrl_get_board_part_number_ls10
if (!pInforom->OBD.bValid)
{
NVSWITCH_PRINT(device, ERROR, "OBD data is not available\n");
return -NVL_ERR_GENERIC;
return -NVL_ERR_NOT_SUPPORTED;
}
pOBDObj = &pInforom->OBD.object.v2;

View File

@ -301,7 +301,12 @@ nvswitch_ctrl_i2c_indexed_ls10
}
return nvswitch_ctrl_i2c_indexed_lr10(device, pParams);
}
if (pParams->port == NVSWITCH_I2C_PORT_I2CA)
{
pParams->flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _SPEED_MODE, _100KHZ, pParams->flags);
}
if (pI2c->soeI2CSupported)
{
return soeI2CAccess_HAL(device, pParams);

View File

@ -400,7 +400,7 @@ nvswitch_soe_init_l2_state_ls10
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_CORE;
cmd.hdr.size = sizeof(cmd);
cmd.hdr.size = RM_SOE_CMD_SIZE(CORE, L2_STATE);
pL2State = &cmd.cmd.core.l2State;
pL2State->cmdType = RM_SOE_CORE_CMD_INIT_L2_STATE;
@ -420,6 +420,65 @@ nvswitch_soe_init_l2_state_ls10
}
}
/*
* @Brief : Enable/Disable NPORT interrupts
*
* @param[in] device
* @param[in] nport
*/
NvlStatus
nvswitch_soe_set_nport_interrupts_ls10
(
nvswitch_device *device,
NvU32 nport,
NvBool bEnable
)
{
FLCN *pFlcn;
NvU32 cmdSeqDesc = 0;
NV_STATUS status;
RM_FLCN_CMD_SOE cmd;
NVSWITCH_TIMEOUT timeout;
RM_SOE_CORE_CMD_NPORT_INTRS *pNportIntrs;
if (!nvswitch_is_soe_supported(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s: SOE is not supported\n",
__FUNCTION__);
return -NVL_ERR_INVALID_STATE;
}
pFlcn = device->pSoe->pFlcn;
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_CORE;
cmd.hdr.size = RM_SOE_CMD_SIZE(CORE, NPORT_INTRS);
pNportIntrs = &cmd.cmd.core.nportIntrs;
pNportIntrs->cmdType = RM_SOE_CORE_CMD_SET_NPORT_INTRS;
pNportIntrs->nport = nport;
pNportIntrs->bEnable = bEnable;
nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
NULL, // pMsg
NULL, // pPayload
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to send SET_NPORT_INTRS command to SOE, status 0x%x\n",
__FUNCTION__, status);
return -NVL_ERR_GENERIC;
}
return NVL_SUCCESS;
}
/*
* @Brief : Init sequence for SOE FSP RISCV image
*
@ -480,14 +539,6 @@ nvswitch_init_soe_ls10
return status;
}
//
// Set TRACEPC to stack mode for better ucode trace
// In Vulcan CR firmware, this is set to reduced mode in the SOE's manifest
//
data = flcnRiscvRegRead_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL);
data = FLD_SET_DRF(_PRISCV, _RISCV_TRACECTL, _MODE, _STACK, data);
flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL, data);
// Sanity the command and message queues as a final check
if (_nvswitch_soe_send_test_cmd(device) != NV_OK)
{

View File

@ -460,7 +460,7 @@ nvswitch_therm_soe_callback_ls10
}
//
// nvswitch_therm_read_voltage
// nvswitch_ctrl_therm_read_voltage
//
// Temperature and voltage are only available on SKUs which have thermal and
// voltage sensors.
@ -543,3 +543,86 @@ nvswitch_ctrl_therm_read_voltage_ls10
return NVL_SUCCESS;
}
//
// nvswitch_ctrl_therm_read_power
//
// Power is only available on SKUs which have thermal and
// voltage sensors.
//
NvlStatus
nvswitch_ctrl_therm_read_power_ls10
(
nvswitch_device *device,
NVSWITCH_GET_POWER_PARAMS *pParams
)
{
FLCN *pFlcn;
NvU32 cmdSeqDesc;
NV_STATUS status;
NvU8 flcnStatus;
RM_FLCN_CMD_SOE cmd;
RM_FLCN_MSG_SOE msg;
RM_SOE_CORE_CMD_GET_POWER *pGetPowerCmd;
NVSWITCH_TIMEOUT timeout;
if (!nvswitch_is_soe_supported(device))
{
return -NVL_ERR_NOT_SUPPORTED;
}
if (pParams == NULL)
{
return -NVL_BAD_ARGS;
}
pFlcn = device->pSoe->pFlcn;
nvswitch_os_memset(pParams, 0, sizeof(NVSWITCH_GET_POWER_PARAMS));
nvswitch_os_memset(&cmd, 0, sizeof(RM_FLCN_CMD_SOE));
nvswitch_os_memset(&msg, 0, sizeof(RM_FLCN_MSG_SOE));
cmd.hdr.unitId = RM_SOE_UNIT_CORE;
cmd.hdr.size = RM_SOE_CMD_SIZE(CORE, GET_POWER);
msg.hdr.unitId = RM_SOE_UNIT_CORE;
msg.hdr.size = RM_SOE_MSG_SIZE(CORE, GET_POWER);
pGetPowerCmd = &cmd.cmd.core.getPower;
pGetPowerCmd->cmdType = RM_SOE_CORE_CMD_GET_POWER_VALUES;
cmdSeqDesc = 0;
nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 5, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
(PRM_FLCN_MSG)&msg, // pMsg
NULL, // pPayload
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR, "%s: Failed to read power 0x%x\n",
__FUNCTION__, status);
return -NVL_ERR_INVALID_STATE;
}
flcnStatus = msg.msg.core.getPower.flcnStatus;
if (flcnStatus != FLCN_OK)
{
if (flcnStatus == FLCN_ERR_MORE_PROCESSING_REQUIRED)
{
return -NVL_MORE_PROCESSING_REQUIRED;
}
else
{
return -NVL_ERR_GENERIC;
}
}
pParams->vdd_w = msg.msg.core.getPower.vdd_w;
pParams->dvdd_w = msg.msg.core.getPower.dvdd_w;
pParams->hvdd_w = msg.msg.core.getPower.hvdd_w;
return NVL_SUCCESS;
}

View File

@ -3253,13 +3253,26 @@ _nvswitch_ctrl_get_board_part_number
NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p
)
{
if (!nvswitch_is_inforom_supported(device))
if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
{
NVSWITCH_PRINT(device, ERROR, "InfoROM is not supported\n");
return -NVL_ERR_NOT_SUPPORTED;
}
NVSWITCH_PRINT(device, INFO,
"%s: Skipping retrieval of board part number on FSF\n",
__FUNCTION__);
return device->hal.nvswitch_ctrl_get_board_part_number(device, p);
nvswitch_os_memset(p, 0, sizeof(NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR));
return NVL_SUCCESS;
}
else
{
if (!nvswitch_is_inforom_supported(device))
{
NVSWITCH_PRINT(device, ERROR, "InfoROM is not supported\n");
return -NVL_ERR_NOT_SUPPORTED;
}
return device->hal.nvswitch_ctrl_get_board_part_number(device, p);
}
}
static NvlStatus
@ -4732,6 +4745,16 @@ _nvswitch_ctrl_therm_read_voltage
return device->hal.nvswitch_ctrl_therm_read_voltage(device, info);
}
static NvlStatus
_nvswitch_ctrl_therm_read_power
(
nvswitch_device *device,
NVSWITCH_GET_POWER_PARAMS *info
)
{
return device->hal.nvswitch_ctrl_therm_read_power(device, info);
}
NvlStatus
nvswitch_lib_ctrl
(
@ -5071,6 +5094,9 @@ nvswitch_lib_ctrl
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_VOLTAGE,
_nvswitch_ctrl_therm_read_voltage,
NVSWITCH_CTRL_GET_VOLTAGE_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_POWER,
_nvswitch_ctrl_therm_read_power,
NVSWITCH_GET_POWER_PARAMS);
default:
nvswitch_os_print(NVSWITCH_DBG_LEVEL_INFO, "unknown ioctl %x\n", cmd);

View File

@ -90,8 +90,10 @@ nvswitch_smbpbi_post_init
if (status == NVL_SUCCESS)
{
#if defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS)
nvswitch_lib_smbpbi_log_sxid(device, NVSWITCH_ERR_NO_ERROR,
"NVSWITCH SMBPBI server is online.");
#endif // defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS)
NVSWITCH_PRINT(device, INFO, "%s: SMBPBI POST INIT completed\n", __FUNCTION__);
}

View File

@ -3701,6 +3701,9 @@ typedef struct NV2080_CTRL_GPU_GET_GFID_PARAMS {
* bEnable [IN]
* - Set to NV_TRUE if the GPU partition has been activated.
* - Set to NV_FALSE if the GPU partition will be deactivated.
* fabricPartitionId [IN]
* - Set the fabric manager partition ID dring partition activation.
* - Ignored during partition deactivation.
*
* Possible status values returned are:
* NV_OK
@ -3716,6 +3719,7 @@ typedef struct NV2080_CTRL_GPU_GET_GFID_PARAMS {
typedef struct NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS {
NvU32 gfid;
NvBool bEnable;
NvU32 fabricPartitionId;
} NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS;
/*!
@ -4112,4 +4116,24 @@ typedef NV2080_CTRL_GPU_MIGRATABLE_OPS_CMN_PARAMS NV2080_CTRL_GPU_MIGRATABLE_OPS
#define NV2080_CTRL_GPU_MIGRATABLE_OPS_VGPU_PARAMS_MESSAGE_ID (0xA8U)
typedef NV2080_CTRL_GPU_MIGRATABLE_OPS_CMN_PARAMS NV2080_CTRL_GPU_MIGRATABLE_OPS_VGPU_PARAMS;
/*
* NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2
*
* This command returns NVENC software sessions information for the associate GPU.
* This command is similar to NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO but doesn't have
* embedded pointers.
*
* Check NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO for detailed information.
*/
#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS_MESSAGE_ID (0xA9U)
typedef struct NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS {
NvU32 sessionInfoTblEntry;
NV2080_CTRL_NVENC_SW_SESSION_INFO sessionInfoTbl[NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES];
} NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS;
#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2 (0x208001a9U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS_MESSAGE_ID" */
/* _ctrl2080gpu_h_ */

View File

@ -76,4 +76,67 @@ typedef struct NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS {
NvBool bMemoryProfilingPermitted;
} NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS;
/*!
* NVB0CC_CTRL_CMD_INTERNAL_FREE_PMA_STREAM
*
* Internal logic for PMA Stream Free
*/
#define NVB0CC_CTRL_CMD_INTERNAL_FREE_PMA_STREAM (0xb0cc0206) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS_MESSAGE_ID" */
#define NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS_MESSAGE_ID (0x6U)
typedef struct NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS {
/*!
* [in] The PMA channel index associated with a given PMA stream.
*/
NvU32 pmaChannelIdx;
} NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS;
/*!
* NVB0CC_CTRL_CMD_INTERNAL_GET_MAX_PMAS
*
* Get the maximum number of PMA channels
*/
#define NVB0CC_CTRL_CMD_INTERNAL_GET_MAX_PMAS (0xb0cc0207) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS_MESSAGE_ID" */
#define NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS_MESSAGE_ID (0x7U)
typedef struct NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS {
/*!
* [out] Max number of PMA channels
*/
NvU32 maxPmaChannels;
} NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS;
/*!
* NVB0CC_CTRL_CMD_INTERNAL_BIND_PM_RESOURCES
*
* Internally bind PM resources.
*/
#define NVB0CC_CTRL_CMD_INTERNAL_BIND_PM_RESOURCES (0xb0cc0208) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | 0x8" */
/*!
* NVB0CC_CTRL_CMD_INTERNAL_UNBIND_PM_RESOURCES
*
* Internally unbind PM resources.
*/
#define NVB0CC_CTRL_CMD_INTERNAL_UNBIND_PM_RESOURCES (0xb0cc0209) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | 0x9" */
/*!
* NVB0CC_CTRL_CMD_INTERNAL_RESERVE_HWPM_LEGACY
*
* Reserve legacy HWPM resources
*/
#define NVB0CC_CTRL_CMD_INTERNAL_RESERVE_HWPM_LEGACY (0xb0cc020a) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS_MESSAGE_ID" */
#define NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS_MESSAGE_ID (0xaU)
typedef struct NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS {
/*!
* [in] Enable ctxsw for HWPM.
*/
NvBool ctxsw;
} NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS;
/* _ctrlb0ccinternal_h_ */

View File

@ -481,6 +481,7 @@ static inline void HsIncrementNextIndex(
}
static inline void HsChangeSurfaceFlipRefCount(
NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo,
NvBool increase)
{
@ -488,7 +489,7 @@ static inline void HsChangeSurfaceFlipRefCount(
if (increase) {
nvEvoIncrementSurfaceRefCnts(pSurfaceEvo);
} else {
nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo);
}
}
}

View File

@ -69,8 +69,6 @@ NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev(
struct NvKmsPerOpenDev *pOpenDev);
const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst(
const struct NvKmsPerOpenDev *pOpenDev);
NVDevEvoPtr nvGetDevEvoFromOpenDev(
const struct NvKmsPerOpenDev *pOpenDev);
void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32);

View File

@ -47,7 +47,8 @@ void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceRefCnts(NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo);
NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo);

View File

@ -118,7 +118,7 @@ SetCursorImage(NVDispEvoPtr pDispEvo,
}
if (pSurfaceEvoOld) {
nvEvoDecrementSurfaceRefCnts(pSurfaceEvoOld);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvoOld);
}
pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo = pSurfaceEvoNew;

View File

@ -2368,7 +2368,7 @@ static void ChangeSurfaceFlipRefCount(
if (increase) {
nvEvoIncrementSurfaceRefCnts(pSurfaceEvo);
} else {
nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo);
}
}
}

View File

@ -1835,16 +1835,21 @@ static void HsConfigInitFlipQueue(
}
static void HsConfigUpdateSurfaceRefCount(
NVDevEvoPtr pDevEvo,
const NVHsChannelConfig *pChannelConfig,
NvBool increase)
{
HsChangeSurfaceFlipRefCount(pChannelConfig->warpMesh.pSurface, increase);
HsChangeSurfaceFlipRefCount(
pDevEvo, pChannelConfig->warpMesh.pSurface, increase);
HsChangeSurfaceFlipRefCount(pChannelConfig->pBlendTexSurface, increase);
HsChangeSurfaceFlipRefCount(
pDevEvo, pChannelConfig->pBlendTexSurface, increase);
HsChangeSurfaceFlipRefCount(pChannelConfig->pOffsetTexSurface, increase);
HsChangeSurfaceFlipRefCount(
pDevEvo, pChannelConfig->pOffsetTexSurface, increase);
HsChangeSurfaceFlipRefCount(pChannelConfig->cursor.pSurfaceEvo, increase);
HsChangeSurfaceFlipRefCount(
pDevEvo, pChannelConfig->cursor.pSurfaceEvo, increase);
}
/*!
@ -2444,6 +2449,7 @@ void nvHsConfigStart(
*/
if (pHsConfigOneHead->pHsChannel != NULL) {
HsConfigUpdateSurfaceRefCount(
pDevEvo,
&pHsConfigOneHead->channelConfig,
TRUE /* increase */);
}
@ -2454,6 +2460,7 @@ void nvHsConfigStart(
*/
if (pDispEvo->pHsChannel[apiHead] != NULL) {
HsConfigUpdateSurfaceRefCount(
pDevEvo,
&pDispEvo->pHsChannel[apiHead]->config,
FALSE /* increase */);
}

View File

@ -197,6 +197,8 @@ static void HsIoctlSetCursorImage(
NVHsChannelEvoRec *pHsChannel,
NVSurfaceEvoRec *pSurfaceEvo)
{
NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
/*
* Increment the refcnt of the new surface, and
* decrement the refcnt of the old surface.
@ -206,10 +208,10 @@ static void HsIoctlSetCursorImage(
*/
HsChangeSurfaceFlipRefCount(
pSurfaceEvo, TRUE /* increase */);
pDevEvo, pSurfaceEvo, TRUE /* increase */);
HsChangeSurfaceFlipRefCount(
pHsChannel->config.cursor.pSurfaceEvo, FALSE /* increase */);
pDevEvo, pHsChannel->config.cursor.pSurfaceEvo, FALSE /* increase */);
pHsChannel->config.cursor.pSurfaceEvo = pSurfaceEvo;

View File

@ -549,24 +549,25 @@ static NvBool HsFlipQueueEntryIsReady(
* Update the reference count of all the surfaces described in the pHwState.
*/
static void HsUpdateFlipQueueEntrySurfaceRefCount(
NVDevEvoPtr pDevEvo,
const NVFlipChannelEvoHwState *pHwState,
NvBool increase)
{
HsChangeSurfaceFlipRefCount(
pHwState->pSurfaceEvo[NVKMS_LEFT], increase);
pDevEvo, pHwState->pSurfaceEvo[NVKMS_LEFT], increase);
HsChangeSurfaceFlipRefCount(
pHwState->pSurfaceEvo[NVKMS_RIGHT], increase);
pDevEvo, pHwState->pSurfaceEvo[NVKMS_RIGHT], increase);
HsChangeSurfaceFlipRefCount(
pHwState->completionNotifier.surface.pSurfaceEvo, increase);
pDevEvo, pHwState->completionNotifier.surface.pSurfaceEvo, increase);
if (!pHwState->syncObject.usingSyncpt) {
HsChangeSurfaceFlipRefCount(
pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, increase);
pDevEvo, pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, increase);
HsChangeSurfaceFlipRefCount(
pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, increase);
pDevEvo, pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, increase);
}
}
@ -605,7 +606,7 @@ static void HsReleaseFlipQueueEntry(
* HeadSurface no longer needs to read from the surfaces in pHwState;
* decrement their reference counts.
*/
HsUpdateFlipQueueEntrySurfaceRefCount(pHwState, FALSE);
HsUpdateFlipQueueEntrySurfaceRefCount(pDevEvo, pHwState, FALSE);
}
/*!
@ -687,6 +688,7 @@ void nvHsPushFlipQueueEntry(
const NvU8 layer,
const NVFlipChannelEvoHwState *pHwState)
{
NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
NVHsChannelFlipQueueEntry *pEntry = nvCalloc(1, sizeof(*pEntry));
@ -703,7 +705,7 @@ void nvHsPushFlipQueueEntry(
/* Increment the ref counts on the surfaces in the flip queue entry. */
HsUpdateFlipQueueEntrySurfaceRefCount(&pEntry->hwState, TRUE);
HsUpdateFlipQueueEntrySurfaceRefCount(pDevEvo, &pEntry->hwState, TRUE);
/* "Fast forward" through existing flip queue entries that are ready. */
@ -2092,6 +2094,17 @@ static NvBool HsCanOmitNonSgHsUpdate(NVHsChannelEvoPtr pHsChannel)
const NVSwapGroupRec *pHeadSwapGroup =
pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead];
/*
* When fullscreen swapgroup flipping, updating
* non-swapgroup content at vblank is unnecessary and
* dangerous, since it results in releasing client
* semaphores before their contents have actually been
* displayed.
*/
if (pHsChannel->swapGroupFlipping) {
return NV_TRUE;
}
/*
* In the case of a fullscreen swapgroup, we can generally omit updating
* the headsurface entirely upon vblank as long as the client is
@ -2251,8 +2264,11 @@ static void HsVBlankCallback(NVDispEvoRec *pDispEvo,
*/
/*
* When fullscreen swapgroup flipping, we don't need to update
* non-swapgroup content at vblank.
* When fullscreen swapgroup flipping, updating
* non-swapgroup content at vblank is unnecessary and
* dangerous, since it results in releasing client
* semaphores before their contents have actually been
* displayed.
*/
if (!pHsChannel->swapGroupFlipping) {
nvHsNextFrame(pHsDevice, pHsChannel,

View File

@ -992,7 +992,7 @@ void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo,
nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
if (isOwner) {
nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo);
} else {
nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo);
}
@ -1037,7 +1037,7 @@ void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo,
/* Remove the handle from the calling client's namespace. */
nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo);
}
void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo,
@ -1075,15 +1075,13 @@ void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo)
pSurfaceEvo->structRefCnt++;
}
void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo)
void nvEvoDecrementSurfaceRefCnts(NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo)
{
nvAssert(pSurfaceEvo->rmRefCnt >= 1);
pSurfaceEvo->rmRefCnt--;
if (pSurfaceEvo->rmRefCnt == 0) {
NVDevEvoPtr pDevEvo =
nvGetDevEvoFromOpenDev(pSurfaceEvo->owner.pOpenDev);
/*
* Don't sync if this surface was registered as not requiring display
* hardware access, to WAR timeouts that result from OGL unregistering
@ -1288,7 +1286,7 @@ void nvEvoUnregisterDeferredRequestFifo(
pDeferredRequestFifo->fifo,
0);
nvEvoDecrementSurfaceRefCnts(pDeferredRequestFifo->pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pDeferredRequestFifo->pSurfaceEvo);
nvFree(pDeferredRequestFifo);
}

View File

@ -5568,13 +5568,6 @@ NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo)
}
#endif
NVDevEvoPtr nvGetDevEvoFromOpenDev(
const struct NvKmsPerOpenDev *pOpenDev)
{
nvAssert(pOpenDev != NULL);
return pOpenDev->pDevEvo;
}
const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev(
const struct NvKmsPerOpenDev *pOpenDev)
{

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 200-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 200-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -468,6 +468,9 @@
#define ESC_4000_G4_DEVID 0xA1C1
#define ESC_4000_G4_SSDEVID 0x871E
// Lenovo Tomcat Workstation
#define LENOVO_TOMCAT_DEVID 0x1B81
#define LENOVO_TOMCAT_SSDEVID 0x104e
// NVIDIA C51
#define NVIDIA_C51_DEVICE_ID_MIN 0x2F0

View File

@ -923,16 +923,23 @@ NV_STATUS osAllocPagesInternal(
if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE)))
nv->force_dma32_alloc = NV_TRUE;
status = nv_alloc_pages(
NV_GET_NV_STATE(pGpu),
NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
memdescGetContiguity(pMemDesc, AT_CPU),
memdescGetCpuCacheAttrib(pMemDesc),
pSys->getProperty(pSys,
PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
unencrypted,
memdescGetPteArray(pMemDesc, AT_CPU),
&pMemData);
if (NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount) > NV_U32_MAX)
{
status = NV_ERR_INVALID_LIMIT;
}
else
{
status = nv_alloc_pages(
NV_GET_NV_STATE(pGpu),
NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
memdescGetContiguity(pMemDesc, AT_CPU),
memdescGetCpuCacheAttrib(pMemDesc),
pSys->getProperty(pSys,
PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
unencrypted,
memdescGetPteArray(pMemDesc, AT_CPU),
&pMemData);
}
if (nv && nv->force_dma32_alloc)
nv->force_dma32_alloc = NV_FALSE;
@ -942,7 +949,7 @@ NV_STATUS osAllocPagesInternal(
{
return status;
}
//
// If the OS layer doesn't think in RM page size, we need to inflate the
// PTE array into RM pages.

View File

@ -167,12 +167,25 @@ const NvU8 * RmGetGpuUuidRaw(
)
{
NV_STATUS rmStatus;
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
OBJGPU *pGpu = NULL;
NvU32 gidFlags;
NvBool isApiLockTaken = NV_FALSE;
if (pNv->nv_uuid_cache.valid)
goto done;
return pNv->nv_uuid_cache.uuid;
if (!rmapiLockIsOwner())
{
rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU);
if (rmStatus != NV_OK)
{
return NULL;
}
isApiLockTaken = NV_TRUE;
}
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
//
// PBI is not present in simulation and the loop inside
@ -193,7 +206,7 @@ const NvU8 * RmGetGpuUuidRaw(
rmStatus = gpumgrSetUuid(pNv->gpu_id, pNv->nv_uuid_cache.uuid);
if (rmStatus != NV_OK)
{
return NULL;
goto err;
}
pNv->nv_uuid_cache.valid = NV_TRUE;
@ -209,45 +222,35 @@ const NvU8 * RmGetGpuUuidRaw(
gidFlags = DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1)
| DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_FORMAT,_BINARY);
if (!rmapiLockIsOwner())
{
rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU);
if (rmStatus != NV_OK)
{
return NULL;
}
isApiLockTaken = NV_TRUE;
}
if (pGpu == NULL)
{
if (isApiLockTaken == NV_TRUE)
{
rmapiLockRelease();
}
return NULL;
}
if (!pGpu)
goto err;
rmStatus = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags);
if (isApiLockTaken == NV_TRUE)
{
rmapiLockRelease();
}
if (rmStatus != NV_OK)
return NULL;
goto err;
if (!pGpu->gpuUuid.isInitialized)
return NULL;
goto err;
// copy the uuid from the OBJGPU uuid cache
os_mem_copy(pNv->nv_uuid_cache.uuid, pGpu->gpuUuid.uuid, GPU_UUID_LEN);
pNv->nv_uuid_cache.valid = NV_TRUE;
done:
if (isApiLockTaken)
{
rmapiLockRelease();
}
return pNv->nv_uuid_cache.uuid;
err:
if (isApiLockTaken)
{
rmapiLockRelease();
}
return NULL;
}
static NV_STATUS RmGpuUuidRawToString(

View File

@ -220,6 +220,7 @@ void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) {
pThis->setProperty(pThis, PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK, ((NvBool)(0 == 0)));
}
pThis->setProperty(pThis, PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE, ((NvBool)(0 == 0)));
pThis->setProperty(pThis, PDB_PROP_GPU_SRIOV_HEAVY_FORCE_INVALIDATE_ALL_PDBS_WAR_BUG3896322, ((NvBool)(0 != 0)));
pThis->boardId = ~0;
@ -313,6 +314,17 @@ void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) {
{
pThis->bSriovCapable = ((NvBool)(0 == 0));
}
// Hal field -- bEnableBar1SparseForFillPteMemUnmap
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
{
pThis->bEnableBar1SparseForFillPteMemUnmap = ((NvBool)(0 == 0));
}
// default
else
{
pThis->bEnableBar1SparseForFillPteMemUnmap = ((NvBool)(0 != 0));
}
}
NV_STATUS __nvoc_ctor_Object(Object* );

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -733,6 +733,7 @@ typedef struct
PSRIOV_P2P_INFO pP2PInfo;
NvBool bP2PAllocated;
NvU32 maxP2pGfid;
NvU32 p2pFabricPartitionId;
} _GPU_SRIOV_STATE;
// Max # of instances for GPU children
@ -842,6 +843,7 @@ struct OBJGPU {
NvBool PDB_PROP_GPU_IN_HIBERNATE;
NvBool PDB_PROP_GPU_IN_PM_CODEPATH;
NvBool PDB_PROP_GPU_IN_PM_RESUME_CODEPATH;
NvBool PDB_PROP_GPU_PM_RESUME_WAR_BUG_3936017_ENABLED;
NvBool PDB_PROP_GPU_STATE_INITIALIZED;
NvBool PDB_PROP_GPU_EMULATION;
NvBool PDB_PROP_GPU_PRIMARY_DEVICE;
@ -917,6 +919,7 @@ struct OBJGPU {
NvBool PDB_PROP_GPU_IS_MXM_3X;
NvBool PDB_PROP_GPU_GSYNC_III_ATTACHED;
NvBool PDB_PROP_GPU_QSYNC_II_ATTACHED;
NvBool PDB_PROP_GPU_SRIOV_HEAVY_FORCE_INVALIDATE_ALL_PDBS_WAR_BUG3896322;
OS_GPU_INFO *pOsGpuInfo;
OS_RM_CAPS *pOsRmCaps;
NvU32 halImpl;
@ -1105,6 +1108,7 @@ struct OBJGPU {
NvU8 fabricProbeSlowdownThreshold;
NvBool bVgpuGspPluginOffloadEnabled;
NvBool bSriovCapable;
NvBool bEnableBar1SparseForFillPteMemUnmap;
};
#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__
@ -1236,6 +1240,8 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU;
#define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR03_PRESENT
#define PDB_PROP_GPU_IS_GEMINI_BASE_CAST
#define PDB_PROP_GPU_IS_GEMINI_BASE_NAME PDB_PROP_GPU_IS_GEMINI
#define PDB_PROP_GPU_SRIOV_HEAVY_FORCE_INVALIDATE_ALL_PDBS_WAR_BUG3896322_BASE_CAST
#define PDB_PROP_GPU_SRIOV_HEAVY_FORCE_INVALIDATE_ALL_PDBS_WAR_BUG3896322_BASE_NAME PDB_PROP_GPU_SRIOV_HEAVY_FORCE_INVALIDATE_ALL_PDBS_WAR_BUG3896322
#define PDB_PROP_GPU_STATE_INITIALIZED_BASE_CAST
#define PDB_PROP_GPU_STATE_INITIALIZED_BASE_NAME PDB_PROP_GPU_STATE_INITIALIZED
#define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_CAST
@ -1260,12 +1266,14 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU;
#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED
#define PDB_PROP_GPU_ZERO_FB_BASE_CAST
#define PDB_PROP_GPU_ZERO_FB_BASE_NAME PDB_PROP_GPU_ZERO_FB
#define PDB_PROP_GPU_PM_RESUME_WAR_BUG_3936017_ENABLED_BASE_CAST
#define PDB_PROP_GPU_PM_RESUME_WAR_BUG_3936017_ENABLED_BASE_NAME PDB_PROP_GPU_PM_RESUME_WAR_BUG_3936017_ENABLED
#define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_CAST
#define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_NAME PDB_PROP_GPU_SWRL_GRANULAR_LOCKING
#define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_CAST
#define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_NAME PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK
#define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_CAST
#define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_IGPU
#define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_CAST
#define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_NAME PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK
#define PDB_PROP_GPU_ATS_SUPPORTED_BASE_CAST
#define PDB_PROP_GPU_ATS_SUPPORTED_BASE_NAME PDB_PROP_GPU_ATS_SUPPORTED
#define PDB_PROP_GPU_EMULATION_BASE_CAST
@ -3795,6 +3803,7 @@ NV_STATUS gpuGetByHandle(struct RsClient *pClient, NvHandle hResource, NvBool *p
#define IS_GFID_VF(gfid) (((NvU32)(gfid)) != GPU_GFID_PF)
// Invalid P2P GFID
#define INVALID_P2P_GFID (0xFFFFFFFF)
#define INVALID_FABRIC_PARTITION_ID (0xFFFFFFFF)
//
// Generates GPU child accessor macros (i.e.: GPU_GET_{ENG})

View File

@ -550,8 +550,9 @@ NV_STATUS gpioWritePinHwEnum_MISSING(
return NV_ERR_NOT_SUPPORTED;
}
// GPIO:hal:CHECK_PROTECTION - GPIO disabled
NV_STATUS gpioCheckProtection_MISSING(
// GPIO:hal:OUTPUT_CNTL_CHECK_PROTECTION - GPIO disabled
NV_STATUS gpioOutputCntlCheckProtection_MISSING(
POBJGPU pGpu,
POBJGPIO pGpio,
NvU32 gpioPin,
NvBool *pbIsProtected
@ -560,6 +561,17 @@ NV_STATUS gpioCheckProtection_MISSING(
return NV_ERR_NOT_SUPPORTED;
}
// GPIO:hal:INPUT_CNTL_CHECK_PROTECTION - GPIO disabled
NV_STATUS gpioInputCntlCheckProtection_MISSING(
POBJGPU pGpu,
POBJGPIO pGpio,
NvU32 inputHwEnum,
NvBool *pbIsProtected
)
{
return NV_ERR_NOT_SUPPORTED;
}
// GPIO:hal:READ_INPUT - GPIO disabled
NV_STATUS gpioReadInput_FWCLIENT(
POBJGPIO pGpio,

View File

@ -98,6 +98,10 @@ static NV_STATUS __nvoc_thunk_KernelBus_engstateStateUnload(OBJGPU *pGpu, struct
return kbusStateUnload(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), flags);
}
static NV_STATUS __nvoc_thunk_KernelBus_engstateStatePostUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, NvU32 arg0) {
return kbusStatePostUnload(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
static void __nvoc_thunk_KernelBus_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus) {
kbusStateDestroy(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset));
}
@ -106,10 +110,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState(POBJGPU pGpu
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStatePostUnload(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStateInitUnlocked(POBJGPU pGpu, struct KernelBus *pEngstate) {
return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset));
}
@ -311,6 +311,12 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__kbusStateUnload__ = &kbusStateUnload_GM107;
}
// Hal function -- kbusStatePostUnload
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
pThis->__kbusStatePostUnload__ = &kbusStatePostUnload_56cd7a;
}
// Hal function -- kbusStateDestroy
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
{
@ -844,12 +850,12 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelBus_engstateStateUnload;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostUnload__ = &__nvoc_thunk_KernelBus_engstateStatePostUnload;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelBus_engstateStateDestroy;
pThis->__kbusReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState;
pThis->__kbusStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kbusStatePostUnload;
pThis->__kbusStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kbusStateInitUnlocked;
pThis->__kbusInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kbusInitMissing;

View File

@ -216,6 +216,7 @@ struct __nvoc_inner_struc_KernelBus_2__ {
NvU32 pageTblSize;
NvU32 pageDirInit;
NvU32 pageTblInit;
NvU32 cpuVisiblePgTblSize;
};
struct __nvoc_inner_struc_KernelBus_3__ {
@ -302,6 +303,7 @@ struct KernelBus {
NV_STATUS (*__kbusStatePostLoad__)(OBJGPU *, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStatePreUnload__)(OBJGPU *, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStateUnload__)(OBJGPU *, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStatePostUnload__)(OBJGPU *, struct KernelBus *, NvU32);
void (*__kbusStateDestroy__)(OBJGPU *, struct KernelBus *);
NV_STATUS (*__kbusTeardownBar2CpuAperture__)(OBJGPU *, struct KernelBus *, NvU32);
void (*__kbusGetP2PMailboxAttributes__)(OBJGPU *, struct KernelBus *, NvU32 *, NvU32 *, NvU32 *);
@ -349,7 +351,6 @@ struct KernelBus {
void (*__kbusUnmapCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, PMEMORY_DESCRIPTOR);
void (*__kbusTeardownCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, NvBool);
NV_STATUS (*__kbusReconcileTunableState__)(POBJGPU, struct KernelBus *, void *);
NV_STATUS (*__kbusStatePostUnload__)(POBJGPU, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStateInitUnlocked__)(POBJGPU, struct KernelBus *);
void (*__kbusInitMissing__)(POBJGPU, struct KernelBus *);
NV_STATUS (*__kbusStatePreInitUnlocked__)(POBJGPU, struct KernelBus *);
@ -462,6 +463,8 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusStatePreUnload_HAL(pGpu, pKernelBus, arg0) kbusStatePreUnload_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStateUnload(pGpu, pKernelBus, flags) kbusStateUnload_DISPATCH(pGpu, pKernelBus, flags)
#define kbusStateUnload_HAL(pGpu, pKernelBus, flags) kbusStateUnload_DISPATCH(pGpu, pKernelBus, flags)
#define kbusStatePostUnload(pGpu, pKernelBus, arg0) kbusStatePostUnload_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStatePostUnload_HAL(pGpu, pKernelBus, arg0) kbusStatePostUnload_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStateDestroy(pGpu, pKernelBus) kbusStateDestroy_DISPATCH(pGpu, pKernelBus)
#define kbusStateDestroy_HAL(pGpu, pKernelBus) kbusStateDestroy_DISPATCH(pGpu, pKernelBus)
#define kbusTeardownBar2CpuAperture(pGpu, pKernelBus, gfid) kbusTeardownBar2CpuAperture_DISPATCH(pGpu, pKernelBus, gfid)
@ -557,7 +560,6 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusTeardownCoherentCpuMapping(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusTeardownCoherentCpuMapping_HAL(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusReconcileTunableState(pGpu, pEngstate, pTunableState) kbusReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define kbusStatePostUnload(pGpu, pEngstate, arg0) kbusStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kbusStateInitUnlocked(pGpu, pEngstate) kbusStateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define kbusInitMissing(pGpu, pEngstate) kbusInitMissing_DISPATCH(pGpu, pEngstate)
#define kbusStatePreInitUnlocked(pGpu, pEngstate) kbusStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
@ -1090,6 +1092,24 @@ static inline void kbusDestroyPeerAccess(OBJGPU *pGpu, struct KernelBus *pKernel
#define kbusDestroyPeerAccess_HAL(pGpu, pKernelBus, peerNum) kbusDestroyPeerAccess(pGpu, pKernelBus, peerNum)
NvU32 kbusGetNvlinkPeerId_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pPeerGpu);
static inline NvU32 kbusGetNvlinkPeerId_c732fb(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pPeerGpu) {
return 4294967295U;
}
#ifdef __nvoc_kern_bus_h_disabled
static inline NvU32 kbusGetNvlinkPeerId(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pPeerGpu) {
NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!");
return 0;
}
#else //__nvoc_kern_bus_h_disabled
#define kbusGetNvlinkPeerId(pGpu, pKernelBus, pPeerGpu) kbusGetNvlinkPeerId_c732fb(pGpu, pKernelBus, pPeerGpu)
#endif //__nvoc_kern_bus_h_disabled
#define kbusGetNvlinkPeerId_HAL(pGpu, pKernelBus, pPeerGpu) kbusGetNvlinkPeerId(pGpu, pKernelBus, pPeerGpu)
NvU32 kbusGetPeerIdFromTable_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 locPeerIdx, NvU32 remPeerIdx);
@ -1637,6 +1657,14 @@ static inline NV_STATUS kbusStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelBus
return pKernelBus->__kbusStateUnload__(pGpu, pKernelBus, flags);
}
static inline NV_STATUS kbusStatePostUnload_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) {
return NV_OK;
}
static inline NV_STATUS kbusStatePostUnload_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) {
return pKernelBus->__kbusStatePostUnload__(pGpu, pKernelBus, arg0);
}
void kbusStateDestroy_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus);
static inline void kbusStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus) {
@ -2093,10 +2121,6 @@ static inline NV_STATUS kbusReconcileTunableState_DISPATCH(POBJGPU pGpu, struct
return pEngstate->__kbusReconcileTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NV_STATUS kbusStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return pEngstate->__kbusStatePostUnload__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS kbusStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate) {
return pEngstate->__kbusStateInitUnlocked__(pGpu, pEngstate);
}

View File

@ -441,6 +441,17 @@ static void __nvoc_init_funcTable_KernelFsp_1(KernelFsp *pThis, RmHalspecOwner *
pThis->__kfspCheckGspSecureScratch__ = &kfspCheckGspSecureScratch_491d52;
}
// Hal function -- kfspRequiresBug3957833WAR
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__kfspRequiresBug3957833WAR__ = &kfspRequiresBug3957833WAR_GH100;
}
// default
else
{
pThis->__kfspRequiresBug3957833WAR__ = &kfspRequiresBug3957833WAR_491d52;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelFsp_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelFsp_engstateStateDestroy;

View File

@ -166,6 +166,7 @@ struct KernelFsp {
NV_STATUS (*__kfspErrorCode2NvStatusMap__)(struct OBJGPU *, struct KernelFsp *, NvU32);
NvU64 (*__kfspGetExtraReservedMemorySize__)(struct OBJGPU *, struct KernelFsp *);
NvBool (*__kfspCheckGspSecureScratch__)(struct OBJGPU *, struct KernelFsp *);
NvBool (*__kfspRequiresBug3957833WAR__)(struct OBJGPU *, struct KernelFsp *);
NV_STATUS (*__kfspReconcileTunableState__)(POBJGPU, struct KernelFsp *, void *);
NV_STATUS (*__kfspStateLoad__)(POBJGPU, struct KernelFsp *, NvU32);
NV_STATUS (*__kfspStateUnload__)(POBJGPU, struct KernelFsp *, NvU32);
@ -293,6 +294,8 @@ NV_STATUS __nvoc_objCreate_KernelFsp(KernelFsp**, Dynamic*, NvU32);
#define kfspGetExtraReservedMemorySize_HAL(pGpu, pKernelFsp) kfspGetExtraReservedMemorySize_DISPATCH(pGpu, pKernelFsp)
#define kfspCheckGspSecureScratch(pGpu, pKernelFsp) kfspCheckGspSecureScratch_DISPATCH(pGpu, pKernelFsp)
#define kfspCheckGspSecureScratch_HAL(pGpu, pKernelFsp) kfspCheckGspSecureScratch_DISPATCH(pGpu, pKernelFsp)
#define kfspRequiresBug3957833WAR(pGpu, pKernelFsp) kfspRequiresBug3957833WAR_DISPATCH(pGpu, pKernelFsp)
#define kfspRequiresBug3957833WAR_HAL(pGpu, pKernelFsp) kfspRequiresBug3957833WAR_DISPATCH(pGpu, pKernelFsp)
#define kfspReconcileTunableState(pGpu, pEngstate, pTunableState) kfspReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define kfspStateLoad(pGpu, pEngstate, arg0) kfspStateLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kfspStateUnload(pGpu, pEngstate, arg0) kfspStateUnload_DISPATCH(pGpu, pEngstate, arg0)
@ -585,6 +588,16 @@ static inline NvBool kfspCheckGspSecureScratch_DISPATCH(struct OBJGPU *pGpu, str
return pKernelFsp->__kfspCheckGspSecureScratch__(pGpu, pKernelFsp);
}
NvBool kfspRequiresBug3957833WAR_GH100(struct OBJGPU *pGpu, struct KernelFsp *pKernelFsp);
static inline NvBool kfspRequiresBug3957833WAR_491d52(struct OBJGPU *pGpu, struct KernelFsp *pKernelFsp) {
return ((NvBool)(0 != 0));
}
static inline NvBool kfspRequiresBug3957833WAR_DISPATCH(struct OBJGPU *pGpu, struct KernelFsp *pKernelFsp) {
return pKernelFsp->__kfspRequiresBug3957833WAR__(pGpu, pKernelFsp);
}
static inline NV_STATUS kfspReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelFsp *pEngstate, void *pTunableState) {
return pEngstate->__kfspReconcileTunableState__(pGpu, pEngstate, pTunableState);
}

View File

@ -326,11 +326,11 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis
// Hal function -- kmemsysProgramSysmemFlushBuffer
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | AD102 | AD103 | AD104 | AD106 | AD107 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */
{
pThis->__kmemsysProgramSysmemFlushBuffer__ = &kmemsysProgramSysmemFlushBuffer_GM107;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__kmemsysProgramSysmemFlushBuffer__ = &kmemsysProgramSysmemFlushBuffer_GA100;
}

View File

@ -904,6 +904,17 @@ static inline void kgrctxFreeAssociatedCtxBuffers(struct OBJGPU *arg0, struct Ke
#define kgrctxFreeAssociatedCtxBuffers(arg0, arg1) kgrctxFreeAssociatedCtxBuffers_IMPL(arg0, arg1)
#endif //__nvoc_kernel_graphics_context_h_disabled
NvBool kgrctxIsFinalGlobalBufMapRefDuped_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelChannel *arg2, GR_GLOBALCTX_BUFFER bufId);
#ifdef __nvoc_kernel_graphics_context_h_disabled
static inline NvBool kgrctxIsFinalGlobalBufMapRefDuped(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelChannel *arg2, GR_GLOBALCTX_BUFFER bufId) {
NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!");
return NV_FALSE;
}
#else //__nvoc_kernel_graphics_context_h_disabled
#define kgrctxIsFinalGlobalBufMapRefDuped(arg0, arg1, arg2, bufId) kgrctxIsFinalGlobalBufMapRefDuped_IMPL(arg0, arg1, arg2, bufId)
#endif //__nvoc_kernel_graphics_context_h_disabled
#undef PRIVATE_FIELD

View File

@ -679,18 +679,18 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
// Hal function -- kgspGetWprHeapSize
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x100007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GH100 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */
{
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_5661b8;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */
{
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_15390a;
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_e3e8a1;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f00000UL) )) /* ChipHal: AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_AD102;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_cffea5;
}
}
// Hal function -- kgspInitVgpuPartitionLogging

View File

@ -828,16 +828,16 @@ static inline const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_DISPATCH
return pKernelGsp->__kgspGetBinArchiveBooterUnloadUcode__(pKernelGsp);
}
static inline NvU64 kgspGetWprHeapSize_5661b8(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return 64 * 1024 * 1024;
}
static inline NvU64 kgspGetWprHeapSize_15390a(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return 80 * 1024 * 1024;
static inline NvU64 kgspGetWprHeapSize_e3e8a1(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return 96 * 1024 * 1024;
}
NvU64 kgspGetWprHeapSize_AD102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
static inline NvU64 kgspGetWprHeapSize_cffea5(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return 104 * 1024 * 1024;
}
static inline NvU64 kgspGetWprHeapSize_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return pKernelGsp->__kgspGetWprHeapSize__(pGpu, pKernelGsp);
}

View File

@ -344,6 +344,9 @@ typedef struct MEMORY_DESCRIPTOR
// Serve as a head node in a list of submemdescs
MEMORY_DESCRIPTOR_LIST *pSubMemDescList;
// Reserved for RM exclusive use
NvBool bRmExclusiveUse;
// If strung in a intrusive linked list
ListNode node;
@ -650,6 +653,8 @@ PMEMORY_DESCRIPTOR memdescGetRootMemDesc(PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pRo
void memdescSetCustomHeap(PMEMORY_DESCRIPTOR);
NvBool memdescGetCustomHeap(PMEMORY_DESCRIPTOR);
NvBool memdescAcquireRmExclusiveUse(MEMORY_DESCRIPTOR *pMemDesc);
/*!
* @brief Get PTE kind
*

View File

@ -884,9 +884,13 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2236, 0x1482, 0x10de, "NVIDIA A10" },
{ 0x2237, 0x152f, 0x10de, "NVIDIA A10G" },
{ 0x2238, 0x1677, 0x10de, "NVIDIA A10M" },
{ 0x2322, 0x17a4, 0x10de, "NVIDIA H800 PCIe" },
{ 0x2324, 0x17a6, 0x10de, "NVIDIA H800" },
{ 0x2324, 0x17a8, 0x10de, "NVIDIA H800" },
{ 0x2330, 0x16c0, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
{ 0x2339, 0x17fc, 0x10de, "NVIDIA H100" },
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
{ 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" },
@ -914,6 +918,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x24B9, 0x0000, 0x0000, "NVIDIA RTX A3000 12GB Laptop GPU" },
{ 0x24BA, 0x0000, 0x0000, "NVIDIA RTX A4500 Laptop GPU" },
{ 0x24BB, 0x0000, 0x0000, "NVIDIA RTX A3000 12GB Laptop GPU" },
{ 0x24C7, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" },
{ 0x24C9, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x24DC, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Laptop GPU" },
{ 0x24DD, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Laptop GPU" },
@ -959,6 +964,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25B9, 0x0000, 0x0000, "NVIDIA RTX A1000 Laptop GPU" },
{ 0x25BA, 0x0000, 0x0000, "NVIDIA RTX A2000 8GB Laptop GPU" },
{ 0x25BB, 0x0000, 0x0000, "NVIDIA RTX A500 Laptop GPU" },
{ 0x25BC, 0x0000, 0x0000, "NVIDIA RTX A1000 6GB Laptop GPU" },
{ 0x25BD, 0x0000, 0x0000, "NVIDIA RTX A500 Laptop GPU" },
{ 0x25E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" },
{ 0x25E2, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" },
{ 0x25E5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" },
@ -973,16 +980,29 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B1, 0x16a1, 0x10de, "NVIDIA RTX 6000 Ada Generation" },
{ 0x26B1, 0x16a1, 0x17aa, "NVIDIA RTX 6000 Ada Generation" },
{ 0x26B5, 0x169d, 0x10de, "NVIDIA L40" },
{ 0x26B5, 0x17da, 0x10de, "NVIDIA L40" },
{ 0x2704, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080" },
{ 0x2717, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" },
{ 0x2730, 0x0000, 0x0000, "NVIDIA RTX 5000 Ada Generation Laptop GPU" },
{ 0x2757, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" },
{ 0x2782, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Ti" },
{ 0x2786, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070" },
{ 0x27A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" },
{ 0x27B0, 0x16fa, 0x1028, "NVIDIA RTX 4000 SFF Ada Generation" },
{ 0x27B0, 0x16fa, 0x103c, "NVIDIA RTX 4000 SFF Ada Generation" },
{ 0x27B0, 0x16fa, 0x10de, "NVIDIA RTX 4000 SFF Ada Generation" },
{ 0x27B0, 0x16fa, 0x17aa, "NVIDIA RTX 4000 SFF Ada Generation" },
{ 0x27B8, 0x16ca, 0x10de, "NVIDIA L4" },
{ 0x27B8, 0x16ee, 0x10de, "NVIDIA L4" },
{ 0x27BA, 0x0000, 0x0000, "NVIDIA RTX 4000 Ada Generation Laptop GPU" },
{ 0x27BB, 0x0000, 0x0000, "NVIDIA RTX 3500 Ada Generation Laptop GPU" },
{ 0x27E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" },
{ 0x2820, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" },
{ 0x2838, 0x0000, 0x0000, "NVIDIA RTX 3000 Ada Generation Laptop GPU" },
{ 0x2860, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" },
{ 0x28A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Laptop GPU" },
{ 0x28A1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" },
{ 0x28B8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Laptop GPU" },
{ 0x28E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Laptop GPU" },
{ 0x28E1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" },
{ 0x13BD, 0x11cc, 0x10DE, "GRID M10-0B" },
@ -1717,20 +1737,20 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2238, 0x16b8, 0x10DE, "NVIDIA A10M-10C" },
{ 0x2238, 0x16b9, 0x10DE, "NVIDIA A10M-20C" },
{ 0x2238, 0x16e6, 0x10DE, "NVIDIA A10M-1" },
{ 0x2322, 0x17e2, 0x10DE, "NVIDIA GPU-2322-17E2" },
{ 0x2322, 0x17e3, 0x10DE, "NVIDIA GPU-2322-17E3" },
{ 0x2322, 0x17e4, 0x10DE, "NVIDIA GPU-2322-17E4" },
{ 0x2322, 0x17e5, 0x10DE, "NVIDIA GPU-2322-17E5" },
{ 0x2322, 0x17e6, 0x10DE, "NVIDIA GPU-2322-17E6" },
{ 0x2322, 0x17e7, 0x10DE, "NVIDIA GPU-2322-17E7" },
{ 0x2322, 0x17e8, 0x10DE, "NVIDIA GPU-2322-17E8" },
{ 0x2322, 0x17e9, 0x10DE, "NVIDIA GPU-2322-17E9" },
{ 0x2322, 0x17ea, 0x10DE, "NVIDIA GPU-2322-17EA" },
{ 0x2322, 0x17eb, 0x10DE, "NVIDIA GPU-2322-17EB" },
{ 0x2322, 0x17ec, 0x10DE, "NVIDIA GPU-2322-17EC" },
{ 0x2322, 0x17ed, 0x10DE, "NVIDIA GPU-2322-17ED" },
{ 0x2322, 0x17ee, 0x10DE, "NVIDIA GPU-2322-17EE" },
{ 0x2322, 0x17ef, 0x10DE, "NVIDIA GPU-2322-17EF" },
{ 0x2322, 0x17e2, 0x10DE, "NVIDIA H800-1-10CME" },
{ 0x2322, 0x17e3, 0x10DE, "NVIDIA H800-1-10C" },
{ 0x2322, 0x17e4, 0x10DE, "NVIDIA H800-2-20C" },
{ 0x2322, 0x17e5, 0x10DE, "NVIDIA H800-3-40C" },
{ 0x2322, 0x17e6, 0x10DE, "NVIDIA H800-4-40C" },
{ 0x2322, 0x17e7, 0x10DE, "NVIDIA H800-7-80C" },
{ 0x2322, 0x17e8, 0x10DE, "NVIDIA H800-4C" },
{ 0x2322, 0x17e9, 0x10DE, "NVIDIA H800-5C" },
{ 0x2322, 0x17ea, 0x10DE, "NVIDIA H800-8C" },
{ 0x2322, 0x17eb, 0x10DE, "NVIDIA H800-10C" },
{ 0x2322, 0x17ec, 0x10DE, "NVIDIA H800-16C" },
{ 0x2322, 0x17ed, 0x10DE, "NVIDIA H800-20C" },
{ 0x2322, 0x17ee, 0x10DE, "NVIDIA H800-40C" },
{ 0x2322, 0x17ef, 0x10DE, "NVIDIA H800-80C" },
{ 0x2331, 0x16d3, 0x10DE, "NVIDIA H100-1-10C" },
{ 0x2331, 0x16d4, 0x10DE, "NVIDIA H100-2-20C" },
{ 0x2331, 0x16d5, 0x10DE, "NVIDIA H100-3-40C" },

View File

@ -239,6 +239,7 @@ struct OBJTMR {
NvBool bAlarmIntrEnabled;
PENG_INFO_LINK_NODE infoList;
struct OBJREFCNT *pGrTickFreqRefcnt;
NvU64 sysTimerOffsetNs;
};
#ifndef __NVOC_CLASS_OBJTMR_TYPEDEF__

View File

@ -192,12 +192,12 @@ static NvBool __nvoc_thunk_RmResource_profilerBaseAccessCallback(struct Profiler
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ProfilerBase[] =
{
{ /* [0] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReserveHwpmLegacy_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
/*flags=*/ 0x2010u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0101u,
/*paramSize=*/ sizeof(NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS),
@ -267,12 +267,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
#endif
},
{ /* [5] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdFreePmaStream_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0106u,
/*paramSize=*/ sizeof(NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS),
@ -282,12 +282,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
#endif
},
{ /* [6] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdBindPmResources_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
/*flags=*/ 0x2010u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0107u,
/*paramSize=*/ 0,
@ -297,12 +297,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
#endif
},
{ /* [7] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdUnbindPmResources_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
/*flags=*/ 0x2010u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0108u,
/*paramSize=*/ 0,
@ -327,12 +327,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
#endif
},
{ /* [9] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdExecRegops_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*flags=*/ 0x2210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc010au,
/*paramSize=*/ sizeof(NVB0CC_CTRL_EXEC_REG_OPS_PARAMS),
@ -444,6 +444,81 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
/*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "profilerBaseCtrlCmdInternalPermissionsInit"
#endif
},
{ /* [17] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalFreePmaStream_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*flags=*/ 0x610u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0206u,
/*paramSize=*/ sizeof(NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "profilerBaseCtrlCmdInternalFreePmaStream"
#endif
},
{ /* [18] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalGetMaxPmas_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*flags=*/ 0x610u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0207u,
/*paramSize=*/ sizeof(NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "profilerBaseCtrlCmdInternalGetMaxPmas"
#endif
},
{ /* [19] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalBindPmResources_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*flags=*/ 0x610u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0208u,
/*paramSize=*/ 0,
/*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "profilerBaseCtrlCmdInternalBindPmResources"
#endif
},
{ /* [20] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalUnbindPmResources_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*flags=*/ 0x610u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc0209u,
/*paramSize=*/ 0,
/*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "profilerBaseCtrlCmdInternalUnbindPmResources"
#endif
},
{ /* [21] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalReserveHwpmLegacy_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*flags=*/ 0x610u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc020au,
/*paramSize=*/ sizeof(NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "profilerBaseCtrlCmdInternalReserveHwpmLegacy"
#endif
},
@ -451,7 +526,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
const struct NVOC_EXPORT_INFO __nvoc_export_info_ProfilerBase =
{
/*numEntries=*/ 17,
/*numEntries=*/ 22,
/*pExportEntries=*/ __nvoc_exported_method_def_ProfilerBase
};
@ -498,10 +573,14 @@ static void __nvoc_init_funcTable_ProfilerBase_1(ProfilerBase *pThis, RmHalspecO
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
pThis->__profilerBaseCtrlCmdReserveHwpmLegacy__ = &profilerBaseCtrlCmdReserveHwpmLegacy_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
pThis->__profilerBaseCtrlCmdInternalReserveHwpmLegacy__ = &profilerBaseCtrlCmdInternalReserveHwpmLegacy_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__profilerBaseCtrlCmdReleaseHwpmLegacy__ = &profilerBaseCtrlCmdReleaseHwpmLegacy_IMPL;
#endif
@ -518,23 +597,39 @@ static void __nvoc_init_funcTable_ProfilerBase_1(ProfilerBase *pThis, RmHalspecO
pThis->__profilerBaseCtrlCmdAllocPmaStream__ = &profilerBaseCtrlCmdAllocPmaStream_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__profilerBaseCtrlCmdFreePmaStream__ = &profilerBaseCtrlCmdFreePmaStream_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
pThis->__profilerBaseCtrlCmdInternalFreePmaStream__ = &profilerBaseCtrlCmdInternalFreePmaStream_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
pThis->__profilerBaseCtrlCmdInternalGetMaxPmas__ = &profilerBaseCtrlCmdInternalGetMaxPmas_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
pThis->__profilerBaseCtrlCmdBindPmResources__ = &profilerBaseCtrlCmdBindPmResources_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
pThis->__profilerBaseCtrlCmdUnbindPmResources__ = &profilerBaseCtrlCmdUnbindPmResources_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
pThis->__profilerBaseCtrlCmdInternalBindPmResources__ = &profilerBaseCtrlCmdInternalBindPmResources_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
pThis->__profilerBaseCtrlCmdInternalUnbindPmResources__ = &profilerBaseCtrlCmdInternalUnbindPmResources_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__profilerBaseCtrlCmdPmaStreamUpdateGetPut__ = &profilerBaseCtrlCmdPmaStreamUpdateGetPut_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
pThis->__profilerBaseCtrlCmdExecRegops__ = &profilerBaseCtrlCmdExecRegops_IMPL;
#endif

View File

@ -67,13 +67,18 @@ struct ProfilerBase {
struct GpuResource *__nvoc_pbase_GpuResource;
struct ProfilerBase *__nvoc_pbase_ProfilerBase;
NV_STATUS (*__profilerBaseCtrlCmdReserveHwpmLegacy__)(struct ProfilerBase *, NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *);
NV_STATUS (*__profilerBaseCtrlCmdInternalReserveHwpmLegacy__)(struct ProfilerBase *, NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS *);
NV_STATUS (*__profilerBaseCtrlCmdReleaseHwpmLegacy__)(struct ProfilerBase *);
NV_STATUS (*__profilerBaseCtrlCmdReservePmAreaSmpc__)(struct ProfilerBase *, NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS *);
NV_STATUS (*__profilerBaseCtrlCmdReleasePmAreaSmpc__)(struct ProfilerBase *);
NV_STATUS (*__profilerBaseCtrlCmdAllocPmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *);
NV_STATUS (*__profilerBaseCtrlCmdFreePmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *);
NV_STATUS (*__profilerBaseCtrlCmdInternalFreePmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS *);
NV_STATUS (*__profilerBaseCtrlCmdInternalGetMaxPmas__)(struct ProfilerBase *, NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS *);
NV_STATUS (*__profilerBaseCtrlCmdBindPmResources__)(struct ProfilerBase *);
NV_STATUS (*__profilerBaseCtrlCmdUnbindPmResources__)(struct ProfilerBase *);
NV_STATUS (*__profilerBaseCtrlCmdInternalBindPmResources__)(struct ProfilerBase *);
NV_STATUS (*__profilerBaseCtrlCmdInternalUnbindPmResources__)(struct ProfilerBase *);
NV_STATUS (*__profilerBaseCtrlCmdPmaStreamUpdateGetPut__)(struct ProfilerBase *, NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *);
NV_STATUS (*__profilerBaseCtrlCmdExecRegops__)(struct ProfilerBase *, NVB0CC_CTRL_EXEC_REG_OPS_PARAMS *);
NV_STATUS (*__profilerBaseCtrlCmdInternalAllocPmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *);
@ -106,6 +111,13 @@ struct ProfilerBase {
NV_STATUS (*__profilerBaseControlLookup__)(struct ProfilerBase *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
NV_STATUS (*__profilerBaseMap__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
NvBool (*__profilerBaseAccessCallback__)(struct ProfilerBase *, struct RsClient *, void *, RsAccessRight);
NvU32 maxPmaChannels;
NvU32 pmaVchIdx;
NvBool bLegacyHwpm;
struct RsResourceRef **ppBytesAvailable;
struct RsResourceRef **ppStreamBuffers;
struct RsResourceRef *pBoundCntBuf;
struct RsResourceRef *pBoundPmaBuf;
};
#ifndef __NVOC_CLASS_ProfilerBase_TYPEDEF__
@ -137,13 +149,18 @@ NV_STATUS __nvoc_objCreate_ProfilerBase(ProfilerBase**, Dynamic*, NvU32, struct
__nvoc_objCreate_ProfilerBase((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
#define profilerBaseCtrlCmdReserveHwpmLegacy(pProfiler, pParams) profilerBaseCtrlCmdReserveHwpmLegacy_DISPATCH(pProfiler, pParams)
#define profilerBaseCtrlCmdInternalReserveHwpmLegacy(pProfiler, pParams) profilerBaseCtrlCmdInternalReserveHwpmLegacy_DISPATCH(pProfiler, pParams)
#define profilerBaseCtrlCmdReleaseHwpmLegacy(pProfiler) profilerBaseCtrlCmdReleaseHwpmLegacy_DISPATCH(pProfiler)
#define profilerBaseCtrlCmdReservePmAreaSmpc(pProfiler, pParams) profilerBaseCtrlCmdReservePmAreaSmpc_DISPATCH(pProfiler, pParams)
#define profilerBaseCtrlCmdReleasePmAreaSmpc(pProfiler) profilerBaseCtrlCmdReleasePmAreaSmpc_DISPATCH(pProfiler)
#define profilerBaseCtrlCmdAllocPmaStream(pProfiler, pParams) profilerBaseCtrlCmdAllocPmaStream_DISPATCH(pProfiler, pParams)
#define profilerBaseCtrlCmdFreePmaStream(pProfiler, pParams) profilerBaseCtrlCmdFreePmaStream_DISPATCH(pProfiler, pParams)
#define profilerBaseCtrlCmdInternalFreePmaStream(pProfiler, pParams) profilerBaseCtrlCmdInternalFreePmaStream_DISPATCH(pProfiler, pParams)
#define profilerBaseCtrlCmdInternalGetMaxPmas(pProfiler, pParams) profilerBaseCtrlCmdInternalGetMaxPmas_DISPATCH(pProfiler, pParams)
#define profilerBaseCtrlCmdBindPmResources(pProfiler) profilerBaseCtrlCmdBindPmResources_DISPATCH(pProfiler)
#define profilerBaseCtrlCmdUnbindPmResources(pProfiler) profilerBaseCtrlCmdUnbindPmResources_DISPATCH(pProfiler)
#define profilerBaseCtrlCmdInternalBindPmResources(pProfiler) profilerBaseCtrlCmdInternalBindPmResources_DISPATCH(pProfiler)
#define profilerBaseCtrlCmdInternalUnbindPmResources(pProfiler) profilerBaseCtrlCmdInternalUnbindPmResources_DISPATCH(pProfiler)
#define profilerBaseCtrlCmdPmaStreamUpdateGetPut(pProfiler, pParams) profilerBaseCtrlCmdPmaStreamUpdateGetPut_DISPATCH(pProfiler, pParams)
#define profilerBaseCtrlCmdExecRegops(pProfiler, pParams) profilerBaseCtrlCmdExecRegops_DISPATCH(pProfiler, pParams)
#define profilerBaseCtrlCmdInternalAllocPmaStream(pProfiler, pParams) profilerBaseCtrlCmdInternalAllocPmaStream_DISPATCH(pProfiler, pParams)
@ -213,6 +230,12 @@ static inline NV_STATUS profilerBaseCtrlCmdReserveHwpmLegacy_DISPATCH(struct Pro
return pProfiler->__profilerBaseCtrlCmdReserveHwpmLegacy__(pProfiler, pParams);
}
NV_STATUS profilerBaseCtrlCmdInternalReserveHwpmLegacy_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS *pParams);
static inline NV_STATUS profilerBaseCtrlCmdInternalReserveHwpmLegacy_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_RESERVE_HWPM_LEGACY_PARAMS *pParams) {
return pProfiler->__profilerBaseCtrlCmdInternalReserveHwpmLegacy__(pProfiler, pParams);
}
NV_STATUS profilerBaseCtrlCmdReleaseHwpmLegacy_IMPL(struct ProfilerBase *pProfiler);
static inline NV_STATUS profilerBaseCtrlCmdReleaseHwpmLegacy_DISPATCH(struct ProfilerBase *pProfiler) {
@ -243,6 +266,18 @@ static inline NV_STATUS profilerBaseCtrlCmdFreePmaStream_DISPATCH(struct Profile
return pProfiler->__profilerBaseCtrlCmdFreePmaStream__(pProfiler, pParams);
}
NV_STATUS profilerBaseCtrlCmdInternalFreePmaStream_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS *pParams);
static inline NV_STATUS profilerBaseCtrlCmdInternalFreePmaStream_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_FREE_PMA_STREAM_PARAMS *pParams) {
return pProfiler->__profilerBaseCtrlCmdInternalFreePmaStream__(pProfiler, pParams);
}
NV_STATUS profilerBaseCtrlCmdInternalGetMaxPmas_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS *pParams);
static inline NV_STATUS profilerBaseCtrlCmdInternalGetMaxPmas_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_GET_MAX_PMAS_PARAMS *pParams) {
return pProfiler->__profilerBaseCtrlCmdInternalGetMaxPmas__(pProfiler, pParams);
}
NV_STATUS profilerBaseCtrlCmdBindPmResources_IMPL(struct ProfilerBase *pProfiler);
static inline NV_STATUS profilerBaseCtrlCmdBindPmResources_DISPATCH(struct ProfilerBase *pProfiler) {
@ -255,6 +290,18 @@ static inline NV_STATUS profilerBaseCtrlCmdUnbindPmResources_DISPATCH(struct Pro
return pProfiler->__profilerBaseCtrlCmdUnbindPmResources__(pProfiler);
}
NV_STATUS profilerBaseCtrlCmdInternalBindPmResources_IMPL(struct ProfilerBase *pProfiler);
static inline NV_STATUS profilerBaseCtrlCmdInternalBindPmResources_DISPATCH(struct ProfilerBase *pProfiler) {
return pProfiler->__profilerBaseCtrlCmdInternalBindPmResources__(pProfiler);
}
NV_STATUS profilerBaseCtrlCmdInternalUnbindPmResources_IMPL(struct ProfilerBase *pProfiler);
static inline NV_STATUS profilerBaseCtrlCmdInternalUnbindPmResources_DISPATCH(struct ProfilerBase *pProfiler) {
return pProfiler->__profilerBaseCtrlCmdInternalUnbindPmResources__(pProfiler);
}
NV_STATUS profilerBaseCtrlCmdPmaStreamUpdateGetPut_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams);
static inline NV_STATUS profilerBaseCtrlCmdPmaStreamUpdateGetPut_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams) {

View File

@ -1415,6 +1415,21 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif
},
{ /* [79] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x208001a9u,
/*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2"
#endif
},
{ /* [80] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1429,7 +1444,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEventSetNotification"
#endif
},
{ /* [80] */
{ /* [81] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1444,7 +1459,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEventSetTrigger"
#endif
},
{ /* [81] */
{ /* [82] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1459,7 +1474,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEventSetMemoryNotifies"
#endif
},
{ /* [82] */
{ /* [83] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1474,7 +1489,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEventSetSemaphoreMemory"
#endif
},
{ /* [83] */
{ /* [84] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1489,7 +1504,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEventSetSemaMemValidation"
#endif
},
{ /* [84] */
{ /* [85] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1504,7 +1519,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEventSetTriggerFifo"
#endif
},
{ /* [85] */
{ /* [86] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1519,7 +1534,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdTimerSchedule"
#endif
},
{ /* [86] */
{ /* [87] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1534,7 +1549,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdTimerCancel"
#endif
},
{ /* [87] */
{ /* [88] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1549,7 +1564,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdTimerGetTime"
#endif
},
{ /* [88] */
{ /* [89] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1564,7 +1579,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdTimerGetRegisterOffset"
#endif
},
{ /* [89] */
{ /* [90] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1579,7 +1594,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo"
#endif
},
{ /* [90] */
{ /* [91] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1594,7 +1609,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdTimerSetGrTickFreq"
#endif
},
{ /* [91] */
{ /* [92] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1609,7 +1624,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdI2cReadBuffer"
#endif
},
{ /* [92] */
{ /* [93] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1624,7 +1639,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdI2cWriteBuffer"
#endif
},
{ /* [93] */
{ /* [94] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1639,7 +1654,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdI2cReadReg"
#endif
},
{ /* [94] */
{ /* [95] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1654,7 +1669,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdI2cWriteReg"
#endif
},
{ /* [95] */
{ /* [96] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1669,7 +1684,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBiosGetSKUInfo"
#endif
},
{ /* [96] */
{ /* [97] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1684,7 +1699,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBiosGetPostTime"
#endif
},
{ /* [97] */
{ /* [98] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1699,7 +1714,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBiosGetUefiSupport"
#endif
},
{ /* [98] */
{ /* [99] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1714,7 +1729,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBiosGetNbsiV2"
#endif
},
{ /* [99] */
{ /* [100] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1729,7 +1744,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBiosGetInfoV2"
#endif
},
{ /* [100] */
{ /* [101] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1744,7 +1759,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDisplayGetStaticInfo"
#endif
},
{ /* [101] */
{ /* [102] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1759,7 +1774,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysGetStaticConfig"
#endif
},
{ /* [102] */
{ /* [103] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1774,7 +1789,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer"
#endif
},
{ /* [103] */
{ /* [104] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1789,7 +1804,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer"
#endif
},
{ /* [104] */
{ /* [105] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1804,7 +1819,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetCaps"
#endif
},
{ /* [105] */
{ /* [106] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1819,7 +1834,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer"
#endif
},
{ /* [106] */
{ /* [107] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1834,7 +1849,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder"
#endif
},
{ /* [107] */
{ /* [108] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1849,7 +1864,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMsencGetCaps"
#endif
},
{ /* [108] */
{ /* [109] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1864,7 +1879,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks"
#endif
},
{ /* [109] */
{ /* [110] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x80000u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1879,7 +1894,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferPtes"
#endif
},
{ /* [110] */
{ /* [111] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1894,7 +1909,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize"
#endif
},
{ /* [111] */
{ /* [112] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1909,7 +1924,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetInfo"
#endif
},
{ /* [112] */
{ /* [113] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1924,7 +1939,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetZcullInfo"
#endif
},
{ /* [113] */
{ /* [114] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1939,7 +1954,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetRopInfo"
#endif
},
{ /* [114] */
{ /* [115] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1954,7 +1969,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetPpcMasks"
#endif
},
{ /* [115] */
{ /* [116] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1969,7 +1984,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo"
#endif
},
{ /* [116] */
{ /* [117] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1984,7 +1999,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier"
#endif
},
{ /* [117] */
{ /* [118] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1999,7 +2014,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGetChipInfo"
#endif
},
{ /* [118] */
{ /* [119] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2014,7 +2029,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize"
#endif
},
{ /* [119] */
{ /* [120] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2029,7 +2044,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines"
#endif
},
{ /* [120] */
{ /* [121] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2044,7 +2059,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGetDeviceInfoTable"
#endif
},
{ /* [121] */
{ /* [122] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2059,7 +2074,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGetUserRegisterAccessMap"
#endif
},
{ /* [122] */
{ /* [123] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2074,7 +2089,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGetConstructedFalconInfo"
#endif
},
{ /* [123] */
{ /* [124] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2089,7 +2104,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetPdbProperties"
#endif
},
{ /* [124] */
{ /* [125] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2104,7 +2119,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDisplayWriteInstMem"
#endif
},
{ /* [125] */
{ /* [126] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2119,7 +2134,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalRecoverAllComputeContexts"
#endif
},
{ /* [126] */
{ /* [127] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2134,7 +2149,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDisplayGetIpVersion"
#endif
},
{ /* [127] */
{ /* [128] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2149,7 +2164,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGetSmcMode"
#endif
},
{ /* [128] */
{ /* [129] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2164,7 +2179,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDisplaySetupRgLineIntr"
#endif
},
{ /* [129] */
{ /* [130] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2179,7 +2194,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysSetPartitionableMem"
#endif
},
{ /* [130] */
{ /* [131] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2194,7 +2209,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers"
#endif
},
{ /* [131] */
{ /* [132] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2209,7 +2224,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDisplaySetImportedImpData"
#endif
},
{ /* [132] */
{ /* [133] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2224,7 +2239,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDisplaySetChannelPushbuffer"
#endif
},
{ /* [133] */
{ /* [134] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2239,7 +2254,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGmmuGetStaticInfo"
#endif
},
{ /* [134] */
{ /* [135] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2254,7 +2269,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetHeapReservationSize"
#endif
},
{ /* [135] */
{ /* [136] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2269,7 +2284,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdIntrGetKernelTable"
#endif
},
{ /* [136] */
{ /* [137] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2284,7 +2299,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDisplayGetDisplayMask"
#endif
},
{ /* [137] */
{ /* [138] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2299,7 +2314,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalFifoGetNumChannels"
#endif
},
{ /* [138] */
{ /* [139] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2314,7 +2329,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles"
#endif
},
{ /* [139] */
{ /* [140] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2329,7 +2344,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines"
#endif
},
{ /* [140] */
{ /* [141] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2344,7 +2359,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges"
#endif
},
{ /* [141] */
{ /* [142] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2359,7 +2374,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKMemSysGetMIGMemoryConfig"
#endif
},
{ /* [142] */
{ /* [143] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2374,7 +2389,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetZbcReferenced"
#endif
},
{ /* [143] */
{ /* [144] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2389,7 +2404,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalRcWatchdogTimeout"
#endif
},
{ /* [144] */
{ /* [145] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2404,7 +2419,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable"
#endif
},
{ /* [145] */
{ /* [146] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2419,7 +2434,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysL2InvalidateEvict"
#endif
},
{ /* [146] */
{ /* [147] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2434,7 +2449,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches"
#endif
},
{ /* [147] */
{ /* [148] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2449,7 +2464,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysDisableNvlinkPeers"
#endif
},
{ /* [148] */
{ /* [149] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2464,7 +2479,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMemSysProgramRawCompressionMode"
#endif
},
{ /* [149] */
{ /* [150] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2479,7 +2494,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalBusFlushWithSysmembar"
#endif
},
{ /* [150] */
{ /* [151] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2494,7 +2509,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal"
#endif
},
{ /* [151] */
{ /* [152] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2509,7 +2524,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote"
#endif
},
{ /* [152] */
{ /* [153] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2524,7 +2539,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalBusDestroyP2pMailbox"
#endif
},
{ /* [153] */
{ /* [154] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2539,7 +2554,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalBusCreateC2cPeerMapping"
#endif
},
{ /* [154] */
{ /* [155] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2554,7 +2569,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping"
#endif
},
{ /* [155] */
{ /* [156] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2569,7 +2584,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfCudaLimitDisable"
#endif
},
{ /* [156] */
{ /* [157] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2584,7 +2599,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPmgrUnsetDynamicBoostLimit"
#endif
},
{ /* [157] */
{ /* [158] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2599,7 +2614,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfOptpCliClear"
#endif
},
{ /* [158] */
{ /* [159] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2614,7 +2629,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl"
#endif
},
{ /* [159] */
{ /* [160] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2629,7 +2644,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits"
#endif
},
{ /* [160] */
{ /* [161] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2644,7 +2659,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo"
#endif
},
{ /* [161] */
{ /* [162] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2659,7 +2674,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfGetAuxPowerState"
#endif
},
{ /* [162] */
{ /* [163] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2674,7 +2689,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdHshubPeerConnConfig"
#endif
},
{ /* [163] */
{ /* [164] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2689,7 +2704,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdHshubFirstLinkPeerId"
#endif
},
{ /* [164] */
{ /* [165] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2704,7 +2719,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdHshubGetHshubIdForLinks"
#endif
},
{ /* [165] */
{ /* [166] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2719,7 +2734,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdHshubGetNumUnits"
#endif
},
{ /* [166] */
{ /* [167] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2734,7 +2749,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdHshubNextHshubId"
#endif
},
{ /* [167] */
{ /* [168] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2749,7 +2764,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck"
#endif
},
{ /* [168] */
{ /* [169] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2764,7 +2779,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet"
#endif
},
{ /* [169] */
{ /* [170] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2779,7 +2794,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfBoostSet_2x"
#endif
},
{ /* [170] */
{ /* [171] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2794,7 +2809,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer"
#endif
},
{ /* [171] */
{ /* [172] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2809,7 +2824,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer"
#endif
},
{ /* [172] */
{ /* [173] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2824,7 +2839,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer"
#endif
},
{ /* [173] */
{ /* [174] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2839,7 +2854,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer"
#endif
},
{ /* [174] */
{ /* [175] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2854,7 +2869,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGmmuCopyReservedSplitGVASpacePdesServer"
#endif
},
{ /* [175] */
{ /* [176] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2869,7 +2884,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfBoostSet_3x"
#endif
},
{ /* [176] */
{ /* [177] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2884,7 +2899,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfBoostClear_3x"
#endif
},
{ /* [177] */
{ /* [178] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2899,7 +2914,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance"
#endif
},
{ /* [178] */
{ /* [179] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2914,7 +2929,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance"
#endif
},
{ /* [179] */
{ /* [180] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2929,7 +2944,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBifGetStaticInfo"
#endif
},
{ /* [180] */
{ /* [181] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2944,7 +2959,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr"
#endif
},
{ /* [181] */
{ /* [182] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2959,7 +2974,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr"
#endif
},
{ /* [182] */
{ /* [183] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2974,7 +2989,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBifGetAspmL1Flags"
#endif
},
{ /* [183] */
{ /* [184] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -2989,7 +3004,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount"
#endif
},
{ /* [184] */
{ /* [185] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3004,7 +3019,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCcuMap"
#endif
},
{ /* [185] */
{ /* [186] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3019,7 +3034,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCcuUnmap"
#endif
},
{ /* [186] */
{ /* [187] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3034,7 +3049,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalSetP2pCaps"
#endif
},
{ /* [187] */
{ /* [188] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3049,7 +3064,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalRemoveP2pCaps"
#endif
},
{ /* [188] */
{ /* [189] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3064,7 +3079,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGetPcieP2pCaps"
#endif
},
{ /* [189] */
{ /* [190] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3079,7 +3094,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBifSetPcieRo"
#endif
},
{ /* [190] */
{ /* [191] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3094,7 +3109,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetComputeInstanceProfiles"
#endif
},
{ /* [191] */
{ /* [192] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3109,7 +3124,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCcuSetStreamState"
#endif
},
{ /* [192] */
{ /* [193] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3124,7 +3139,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalInitGpuIntr"
#endif
},
{ /* [193] */
{ /* [194] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3139,7 +3154,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGsyncOptimizeTiming"
#endif
},
{ /* [194] */
{ /* [195] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3154,7 +3169,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGsyncGetDisplayIds"
#endif
},
{ /* [195] */
{ /* [196] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3169,7 +3184,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGsyncSetStereoSync"
#endif
},
{ /* [196] */
{ /* [197] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3184,7 +3199,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalFbsrInit"
#endif
},
{ /* [197] */
{ /* [198] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3199,7 +3214,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalFbsrSendRegionInfo"
#endif
},
{ /* [198] */
{ /* [199] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3214,7 +3229,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGsyncGetVactiveLines"
#endif
},
{ /* [199] */
{ /* [200] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3229,7 +3244,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalMemmgrGetVgpuHostRmReservedFb"
#endif
},
{ /* [200] */
{ /* [201] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3244,7 +3259,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalPostInitBrightcStateLoad"
#endif
},
{ /* [201] */
{ /* [202] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3259,7 +3274,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalNvlinkGetNumActiveLinksPerIoctrl"
#endif
},
{ /* [202] */
{ /* [203] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3274,7 +3289,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalNvlinkGetTotalNumLinksPerIoctrl"
#endif
},
{ /* [203] */
{ /* [204] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3289,7 +3304,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGsyncIsDisplayIdValid"
#endif
},
{ /* [204] */
{ /* [205] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3304,7 +3319,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGsyncSetOrRestoreGpioRasterSync"
#endif
},
{ /* [205] */
{ /* [206] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3319,7 +3334,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdInternalGetCoherentFbApertureSize"
#endif
},
{ /* [206] */
{ /* [207] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3334,7 +3349,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdSetGpfifo"
#endif
},
{ /* [207] */
{ /* [208] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3349,7 +3364,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoBindEngines"
#endif
},
{ /* [208] */
{ /* [209] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3364,7 +3379,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdSetOperationalProperties"
#endif
},
{ /* [209] */
{ /* [210] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3379,7 +3394,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetPhysicalChannelCount"
#endif
},
{ /* [210] */
{ /* [211] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3394,7 +3409,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetInfo"
#endif
},
{ /* [211] */
{ /* [212] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3409,7 +3424,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoDisableChannels"
#endif
},
{ /* [212] */
{ /* [213] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3424,7 +3439,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetChannelMemInfo"
#endif
},
{ /* [213] */
{ /* [214] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3439,7 +3454,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetUserdLocation"
#endif
},
{ /* [214] */
{ /* [215] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c2200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3454,7 +3469,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetDeviceInfoTable"
#endif
},
{ /* [215] */
{ /* [216] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3469,7 +3484,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoClearFaultedBit"
#endif
},
{ /* [216] */
{ /* [217] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2310u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3484,7 +3499,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoRunlistSetSchedPolicy"
#endif
},
{ /* [217] */
{ /* [218] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3499,7 +3514,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoUpdateChannelInfo"
#endif
},
{ /* [218] */
{ /* [219] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3514,7 +3529,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoDisableUsermodeChannels"
#endif
},
{ /* [219] */
{ /* [220] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3529,7 +3544,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoSetupVfZombieSubctxPdb"
#endif
},
{ /* [220] */
{ /* [221] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3544,7 +3559,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFifoGetAllocatedChannels"
#endif
},
{ /* [221] */
{ /* [222] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3559,7 +3574,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetInfo"
#endif
},
{ /* [222] */
{ /* [223] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3574,7 +3589,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswZcullMode"
#endif
},
{ /* [223] */
{ /* [224] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3589,7 +3604,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetZcullInfo"
#endif
},
{ /* [224] */
{ /* [225] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3604,7 +3619,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswPmMode"
#endif
},
{ /* [225] */
{ /* [226] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3619,7 +3634,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswZcullBind"
#endif
},
{ /* [226] */
{ /* [227] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3634,7 +3649,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswPmBind"
#endif
},
{ /* [227] */
{ /* [228] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3649,7 +3664,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrSetGpcTileMap"
#endif
},
{ /* [228] */
{ /* [229] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3664,7 +3679,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswSmpcMode"
#endif
},
{ /* [229] */
{ /* [230] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3679,7 +3694,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetSmToGpcTpcMappings"
#endif
},
{ /* [230] */
{ /* [231] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3694,7 +3709,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrSetCtxswPreemptionMode"
#endif
},
{ /* [231] */
{ /* [232] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3709,7 +3724,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrCtxswPreemptionBind"
#endif
},
{ /* [232] */
{ /* [233] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3724,7 +3739,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrPcSamplingMode"
#endif
},
{ /* [233] */
{ /* [234] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3739,7 +3754,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetROPInfo"
#endif
},
{ /* [234] */
{ /* [235] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3754,7 +3769,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCtxswStats"
#endif
},
{ /* [235] */
{ /* [236] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3769,7 +3784,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferSize"
#endif
},
{ /* [236] */
{ /* [237] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x80000u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3784,7 +3799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferInfo"
#endif
},
{ /* [237] */
{ /* [238] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3799,7 +3814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetGlobalSmOrder"
#endif
},
{ /* [238] */
{ /* [239] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3814,7 +3829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCurrentResidentChannel"
#endif
},
{ /* [239] */
{ /* [240] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3829,7 +3844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetVatAlarmData"
#endif
},
{ /* [240] */
{ /* [241] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3844,7 +3859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetAttributeBufferSize"
#endif
},
{ /* [241] */
{ /* [242] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3859,7 +3874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGfxPoolQuerySize"
#endif
},
{ /* [242] */
{ /* [243] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3874,7 +3889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGfxPoolInitialize"
#endif
},
{ /* [243] */
{ /* [244] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3889,7 +3904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGfxPoolAddSlots"
#endif
},
{ /* [244] */
{ /* [245] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3904,7 +3919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGfxPoolRemoveSlots"
#endif
},
{ /* [245] */
{ /* [246] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3919,7 +3934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCapsV2"
#endif
},
{ /* [246] */
{ /* [247] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3934,7 +3949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetInfoV2"
#endif
},
{ /* [247] */
{ /* [248] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3949,7 +3964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetGpcMask"
#endif
},
{ /* [248] */
{ /* [249] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3964,7 +3979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetTpcMask"
#endif
},
{ /* [249] */
{ /* [250] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3979,7 +3994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrSetTpcPartitionMode"
#endif
},
{ /* [250] */
{ /* [251] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -3994,7 +4009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetEngineContextProperties"
#endif
},
{ /* [251] */
{ /* [252] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4009,7 +4024,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetSmIssueRateModifier"
#endif
},
{ /* [252] */
{ /* [253] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4024,7 +4039,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrFecsBindEvtbufForUid"
#endif
},
{ /* [253] */
{ /* [254] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4039,7 +4054,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetPhysGpcMask"
#endif
},
{ /* [254] */
{ /* [255] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4054,7 +4069,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetPpcMask"
#endif
},
{ /* [255] */
{ /* [256] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4069,7 +4084,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetNumTpcsForGpc"
#endif
},
{ /* [256] */
{ /* [257] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4084,7 +4099,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetCtxswModes"
#endif
},
{ /* [257] */
{ /* [258] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4099,7 +4114,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetGpcTileMap"
#endif
},
{ /* [258] */
{ /* [259] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4114,7 +4129,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetZcullMask"
#endif
},
{ /* [259] */
{ /* [260] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8010u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4129,7 +4144,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2"
#endif
},
{ /* [260] */
{ /* [261] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4144,7 +4159,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKGrGetGfxGpcAndTpcInfo"
#endif
},
{ /* [261] */
{ /* [262] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4159,7 +4174,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetInfo"
#endif
},
{ /* [262] */
{ /* [263] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4174,7 +4189,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetInfoV2"
#endif
},
{ /* [263] */
{ /* [264] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4189,7 +4204,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetCarveoutAddressInfo"
#endif
},
{ /* [264] */
{ /* [265] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4204,7 +4219,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetCalibrationLockFailed"
#endif
},
{ /* [265] */
{ /* [266] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4219,7 +4234,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbFlushGpuCache"
#endif
},
{ /* [266] */
{ /* [267] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4234,7 +4249,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetGpuCacheAllocPolicy"
#endif
},
{ /* [267] */
{ /* [268] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4249,7 +4264,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetBar1Offset"
#endif
},
{ /* [268] */
{ /* [269] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4264,7 +4279,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetGpuCacheAllocPolicy"
#endif
},
{ /* [269] */
{ /* [270] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4279,7 +4294,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbIsKind"
#endif
},
{ /* [270] */
{ /* [271] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4294,7 +4309,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetGpuCacheInfo"
#endif
},
{ /* [271] */
{ /* [272] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4309,7 +4324,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2"
#endif
},
{ /* [272] */
{ /* [273] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4324,7 +4339,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2"
#endif
},
{ /* [273] */
{ /* [274] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4339,7 +4354,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetFBRegionInfo"
#endif
},
{ /* [274] */
{ /* [275] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4354,7 +4369,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetOfflinedPages"
#endif
},
{ /* [275] */
{ /* [276] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4369,7 +4384,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetLTCInfoForFBP"
#endif
},
{ /* [276] */
{ /* [277] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4384,7 +4399,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbCBCOp"
#endif
},
{ /* [277] */
{ /* [278] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4399,7 +4414,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetCtagsForCbcEviction"
#endif
},
{ /* [278] */
{ /* [279] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4414,7 +4429,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetupVprRegion"
#endif
},
{ /* [279] */
{ /* [280] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4429,7 +4444,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetCliManagedOfflinedPages"
#endif
},
{ /* [280] */
{ /* [281] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4444,7 +4459,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetCompBitCopyConstructInfo"
#endif
},
{ /* [281] */
{ /* [282] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4459,7 +4474,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetRrd"
#endif
},
{ /* [282] */
{ /* [283] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4474,7 +4489,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetReadLimit"
#endif
},
{ /* [283] */
{ /* [284] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4489,7 +4504,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbSetWriteLimit"
#endif
},
{ /* [284] */
{ /* [285] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4504,7 +4519,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbPatchPbrForMining"
#endif
},
{ /* [285] */
{ /* [286] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4519,7 +4534,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetMemAlignment"
#endif
},
{ /* [286] */
{ /* [287] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4534,7 +4549,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetRemappedRows"
#endif
},
{ /* [287] */
{ /* [288] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4549,7 +4564,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetFsInfo"
#endif
},
{ /* [288] */
{ /* [289] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4564,7 +4579,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetRowRemapperHistogram"
#endif
},
{ /* [289] */
{ /* [290] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4579,7 +4594,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetDynamicOfflinedPages"
#endif
},
{ /* [290] */
{ /* [291] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4594,7 +4609,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbUpdateNumaStatus"
#endif
},
{ /* [291] */
{ /* [292] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4609,7 +4624,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFbGetNumaInfo"
#endif
},
{ /* [292] */
{ /* [293] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4624,7 +4639,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcGetArchInfo"
#endif
},
{ /* [293] */
{ /* [294] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4639,7 +4654,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcServiceInterrupts"
#endif
},
{ /* [294] */
{ /* [295] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4654,7 +4669,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcGetManufacturer"
#endif
},
{ /* [295] */
{ /* [296] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4669,7 +4684,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcQueryHostclkSlowdownStatus"
#endif
},
{ /* [296] */
{ /* [297] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4684,7 +4699,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcSetHostclkSlowdownStatus"
#endif
},
{ /* [297] */
{ /* [298] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4699,7 +4714,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdMcChangeReplayableFaultOwnership"
#endif
},
{ /* [298] */
{ /* [299] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4714,7 +4729,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPciInfo"
#endif
},
{ /* [299] */
{ /* [300] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4729,7 +4744,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetInfo"
#endif
},
{ /* [300] */
{ /* [301] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4744,7 +4759,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPciBarInfo"
#endif
},
{ /* [301] */
{ /* [302] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4759,7 +4774,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetPcieLinkWidth"
#endif
},
{ /* [302] */
{ /* [303] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4774,7 +4789,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetPcieSpeed"
#endif
},
{ /* [303] */
{ /* [304] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4789,7 +4804,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed"
#endif
},
{ /* [304] */
{ /* [305] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4804,7 +4819,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed"
#endif
},
{ /* [305] */
{ /* [306] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4819,7 +4834,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusHWBCGetUpstreamBAR0"
#endif
},
{ /* [306] */
{ /* [307] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4834,7 +4849,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusServiceGpuMultifunctionState"
#endif
},
{ /* [307] */
{ /* [308] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4849,7 +4864,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPexCounters"
#endif
},
{ /* [308] */
{ /* [309] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4864,7 +4879,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusClearPexCounters"
#endif
},
{ /* [309] */
{ /* [310] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4879,7 +4894,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusFreezePexCounters"
#endif
},
{ /* [310] */
{ /* [311] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4894,7 +4909,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPexLaneCounters"
#endif
},
{ /* [311] */
{ /* [312] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4909,7 +4924,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPcieLtrLatency"
#endif
},
{ /* [312] */
{ /* [313] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4924,7 +4939,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetPcieLtrLatency"
#endif
},
{ /* [313] */
{ /* [314] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4939,7 +4954,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPexUtilCounters"
#endif
},
{ /* [314] */
{ /* [315] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4954,7 +4969,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusClearPexUtilCounters"
#endif
},
{ /* [315] */
{ /* [316] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4969,7 +4984,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetBFD"
#endif
},
{ /* [316] */
{ /* [317] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4984,7 +4999,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetAspmDisableFlags"
#endif
},
{ /* [317] */
{ /* [318] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -4999,7 +5014,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetInfoV2"
#endif
},
{ /* [318] */
{ /* [319] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5014,7 +5029,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusControlPublicAspmBits"
#endif
},
{ /* [319] */
{ /* [320] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5029,7 +5044,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetNvlinkPeerIdMask"
#endif
},
{ /* [320] */
{ /* [321] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5044,7 +5059,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetEomParameters"
#endif
},
{ /* [321] */
{ /* [322] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5059,7 +5074,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetUphyDlnCfgSpace"
#endif
},
{ /* [322] */
{ /* [323] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5074,7 +5089,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetEomStatus"
#endif
},
{ /* [323] */
{ /* [324] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5089,7 +5104,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPcieReqAtomicsCaps"
#endif
},
{ /* [324] */
{ /* [325] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5104,7 +5119,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics"
#endif
},
{ /* [325] */
{ /* [326] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5119,7 +5134,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetC2CInfo"
#endif
},
{ /* [326] */
{ /* [327] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5134,7 +5149,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSysmemAccess"
#endif
},
{ /* [327] */
{ /* [328] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5149,7 +5164,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetC2CErrorInfo"
#endif
},
{ /* [328] */
{ /* [329] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5164,7 +5179,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusSetP2pMapping"
#endif
},
{ /* [329] */
{ /* [330] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5179,7 +5194,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusUnsetP2pMapping"
#endif
},
{ /* [330] */
{ /* [331] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5194,7 +5209,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdKPerfBoost"
#endif
},
{ /* [331] */
{ /* [332] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5209,7 +5224,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfSetPowerstate"
#endif
},
{ /* [332] */
{ /* [333] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5224,7 +5239,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfRatedTdpGetControl"
#endif
},
{ /* [333] */
{ /* [334] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5239,7 +5254,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfRatedTdpSetControl"
#endif
},
{ /* [334] */
{ /* [335] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5254,7 +5269,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfSetAuxPowerState"
#endif
},
{ /* [335] */
{ /* [336] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5269,7 +5284,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfReservePerfmonHw"
#endif
},
{ /* [336] */
{ /* [337] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5284,7 +5299,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2"
#endif
},
{ /* [337] */
{ /* [338] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5299,7 +5314,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcReadVirtualMem"
#endif
},
{ /* [338] */
{ /* [339] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5314,7 +5329,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcGetErrorCount"
#endif
},
{ /* [339] */
{ /* [340] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5329,7 +5344,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcSetCleanErrorHistory"
#endif
},
{ /* [340] */
{ /* [341] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5344,7 +5359,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcGetWatchdogInfo"
#endif
},
{ /* [341] */
{ /* [342] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5359,7 +5374,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcDisableWatchdog"
#endif
},
{ /* [342] */
{ /* [343] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5374,7 +5389,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcEnableWatchdog"
#endif
},
{ /* [343] */
{ /* [344] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5389,7 +5404,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcReleaseWatchdogRequests"
#endif
},
{ /* [344] */
{ /* [345] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5404,7 +5419,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdSetRcRecovery"
#endif
},
{ /* [345] */
{ /* [346] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5419,7 +5434,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetRcRecovery"
#endif
},
{ /* [346] */
{ /* [347] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5434,7 +5449,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcSoftDisableWatchdog"
#endif
},
{ /* [347] */
{ /* [348] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5449,7 +5464,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdSetRcInfo"
#endif
},
{ /* [348] */
{ /* [349] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5464,7 +5479,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetRcInfo"
#endif
},
{ /* [349] */
{ /* [350] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5479,7 +5494,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdRcGetErrorV2"
#endif
},
{ /* [350] */
{ /* [351] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5494,7 +5509,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvdGetDumpSize"
#endif
},
{ /* [351] */
{ /* [352] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5509,7 +5524,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvdGetDump"
#endif
},
{ /* [352] */
{ /* [353] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5524,7 +5539,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvdGetNocatJournalRpt"
#endif
},
{ /* [353] */
{ /* [354] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5539,7 +5554,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvdSetNocatJournalData"
#endif
},
{ /* [354] */
{ /* [355] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5554,7 +5569,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDmaInvalidateTLB"
#endif
},
{ /* [355] */
{ /* [356] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5569,7 +5584,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdDmaGetInfo"
#endif
},
{ /* [356] */
{ /* [357] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5584,7 +5599,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPmgrGetModuleInfo"
#endif
},
{ /* [357] */
{ /* [358] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5599,7 +5614,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdLpwrDifrCtrl"
#endif
},
{ /* [358] */
{ /* [359] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5614,7 +5629,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdLpwrDifrPrefetchResponse"
#endif
},
{ /* [359] */
{ /* [360] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5629,7 +5644,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetCaps"
#endif
},
{ /* [360] */
{ /* [361] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5644,7 +5659,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetCePceMask"
#endif
},
{ /* [361] */
{ /* [362] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5659,7 +5674,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetCapsV2"
#endif
},
{ /* [362] */
{ /* [363] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5674,7 +5689,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeUpdatePceLceMappings"
#endif
},
{ /* [363] */
{ /* [364] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5689,7 +5704,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeUpdateClassDB"
#endif
},
{ /* [364] */
{ /* [365] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100e40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5704,7 +5719,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetPhysicalCaps"
#endif
},
{ /* [365] */
{ /* [366] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c0200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5719,7 +5734,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetFaultMethodBufferSize"
#endif
},
{ /* [366] */
{ /* [367] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5734,7 +5749,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetHubPceMask"
#endif
},
{ /* [367] */
{ /* [368] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5749,7 +5764,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetAllCaps"
#endif
},
{ /* [368] */
{ /* [369] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe40u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5764,7 +5779,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdCeGetAllPhysicalCaps"
#endif
},
{ /* [369] */
{ /* [370] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5779,7 +5794,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetNvlinkCaps"
#endif
},
{ /* [370] */
{ /* [371] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5794,7 +5809,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetNvlinkStatus"
#endif
},
{ /* [371] */
{ /* [372] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5809,7 +5824,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdBusGetNvlinkErrInfo"
#endif
},
{ /* [372] */
{ /* [373] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5824,7 +5839,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetNvlinkCounters"
#endif
},
{ /* [373] */
{ /* [374] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5839,7 +5854,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdClearNvlinkCounters"
#endif
},
{ /* [374] */
{ /* [375] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5854,7 +5869,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts"
#endif
},
{ /* [375] */
{ /* [376] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5869,7 +5884,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSetupEom"
#endif
},
{ /* [376] */
{ /* [377] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5884,7 +5899,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetPowerState"
#endif
},
{ /* [377] */
{ /* [378] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5899,7 +5914,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinGetLinkFomValues"
#endif
},
{ /* [378] */
{ /* [379] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5914,7 +5929,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetNvlinkEccErrors"
#endif
},
{ /* [379] */
{ /* [380] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5929,7 +5944,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkReadTpCounters"
#endif
},
{ /* [380] */
{ /* [381] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5944,7 +5959,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkEnableNvlinkPeer"
#endif
},
{ /* [381] */
{ /* [382] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5959,7 +5974,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetLpCounters"
#endif
},
{ /* [382] */
{ /* [383] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5974,7 +5989,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkCoreCallback"
#endif
},
{ /* [383] */
{ /* [384] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -5989,7 +6004,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetAliEnabled"
#endif
},
{ /* [384] */
{ /* [385] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6004,7 +6019,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid"
#endif
},
{ /* [385] */
{ /* [386] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6019,7 +6034,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkUpdateHshubMux"
#endif
},
{ /* [386] */
{ /* [387] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6034,7 +6049,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer"
#endif
},
{ /* [387] */
{ /* [388] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6049,7 +6064,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer"
#endif
},
{ /* [388] */
{ /* [389] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6064,7 +6079,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkRemoveNvlinkMapping"
#endif
},
{ /* [389] */
{ /* [390] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6079,7 +6094,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSaveRestoreHshubState"
#endif
},
{ /* [390] */
{ /* [391] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6094,7 +6109,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkProgramBufferready"
#endif
},
{ /* [391] */
{ /* [392] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6109,7 +6124,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkUpdateCurrentConfig"
#endif
},
{ /* [392] */
{ /* [393] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6124,7 +6139,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSetLoopbackMode"
#endif
},
{ /* [393] */
{ /* [394] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6139,7 +6154,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkUpdatePeerLinkMask"
#endif
},
{ /* [394] */
{ /* [395] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6154,7 +6169,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkUpdateLinkConnection"
#endif
},
{ /* [395] */
{ /* [396] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6169,7 +6184,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkEnableLinksPostTopology"
#endif
},
{ /* [396] */
{ /* [397] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6184,7 +6199,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPreLinkTrainAli"
#endif
},
{ /* [397] */
{ /* [398] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6199,7 +6214,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetRefreshCounters"
#endif
},
{ /* [398] */
{ /* [399] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6214,7 +6229,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkClearRefreshCounters"
#endif
},
{ /* [399] */
{ /* [400] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6229,7 +6244,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet"
#endif
},
{ /* [400] */
{ /* [401] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6244,7 +6259,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkLinkTrainAli"
#endif
},
{ /* [401] */
{ /* [402] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6259,7 +6274,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo"
#endif
},
{ /* [402] */
{ /* [403] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6274,7 +6289,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo"
#endif
},
{ /* [403] */
{ /* [404] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6289,7 +6304,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkProgramLinkSpeed"
#endif
},
{ /* [404] */
{ /* [405] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6304,7 +6319,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkAreLinksTrained"
#endif
},
{ /* [405] */
{ /* [406] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6319,7 +6334,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkResetLinks"
#endif
},
{ /* [406] */
{ /* [407] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6334,7 +6349,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkDisableDlInterrupts"
#endif
},
{ /* [407] */
{ /* [408] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6349,7 +6364,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetLinkAndClockInfo"
#endif
},
{ /* [408] */
{ /* [409] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6364,7 +6379,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSetupNvlinkSysmem"
#endif
},
{ /* [409] */
{ /* [410] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6379,7 +6394,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkProcessForcedConfigs"
#endif
},
{ /* [410] */
{ /* [411] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6394,7 +6409,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSyncLaneShutdownProps"
#endif
},
{ /* [411] */
{ /* [412] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6409,7 +6424,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts"
#endif
},
{ /* [412] */
{ /* [413] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6424,7 +6439,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask"
#endif
},
{ /* [413] */
{ /* [414] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6439,7 +6454,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr"
#endif
},
{ /* [414] */
{ /* [415] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100201u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6454,7 +6469,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo"
#endif
},
{ /* [415] */
{ /* [416] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6469,7 +6484,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkEnableLinks"
#endif
},
{ /* [416] */
{ /* [417] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6484,7 +6499,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkProcessInitDisabledLinks"
#endif
},
{ /* [417] */
{ /* [418] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6499,7 +6514,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkEomControl"
#endif
},
{ /* [418] */
{ /* [419] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6514,7 +6529,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkSetL1Threshold"
#endif
},
{ /* [419] */
{ /* [420] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6529,7 +6544,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkGetL1Threshold"
#endif
},
{ /* [420] */
{ /* [421] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1240u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6544,7 +6559,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkInbandSendData"
#endif
},
{ /* [421] */
{ /* [422] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6559,7 +6574,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkDirectConnectCheck"
#endif
},
{ /* [422] */
{ /* [423] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6574,7 +6589,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdNvlinkPostFaultUp"
#endif
},
{ /* [423] */
{ /* [424] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6589,7 +6604,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetDmemUsage"
#endif
},
{ /* [424] */
{ /* [425] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6604,7 +6619,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetEngineArch"
#endif
},
{ /* [425] */
{ /* [426] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6619,7 +6634,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerQueueInfo"
#endif
},
{ /* [426] */
{ /* [427] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6634,7 +6649,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlGet"
#endif
},
{ /* [427] */
{ /* [428] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6649,7 +6664,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlSet"
#endif
},
{ /* [428] */
{ /* [429] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6664,7 +6679,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferInfo"
#endif
},
{ /* [429] */
{ /* [430] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6679,7 +6694,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferSize"
#endif
},
{ /* [430] */
{ /* [431] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6694,7 +6709,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEccGetClientExposedCounters"
#endif
},
{ /* [431] */
{ /* [432] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6709,7 +6724,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaRange"
#endif
},
{ /* [432] */
{ /* [433] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6724,7 +6739,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaSetupInstanceMemBlock"
#endif
},
{ /* [433] */
{ /* [434] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6739,7 +6754,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetRange"
#endif
},
{ /* [434] */
{ /* [435] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6754,7 +6769,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetFabricMemStats"
#endif
},
{ /* [435] */
{ /* [436] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6769,7 +6784,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGspGetFeatures"
#endif
},
{ /* [436] */
{ /* [437] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6784,7 +6799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGrmgrGetGrFsInfo"
#endif
},
{ /* [437] */
{ /* [438] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6799,7 +6814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt"
#endif
},
{ /* [438] */
{ /* [439] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6814,7 +6829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff"
#endif
},
{ /* [439] */
{ /* [440] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6829,7 +6844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower"
#endif
},
{ /* [440] */
{ /* [441] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6844,7 +6859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixVidmemPersistenceStatus"
#endif
},
{ /* [441] */
{ /* [442] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6859,7 +6874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixUpdateTgpStatus"
#endif
},
{ /* [442] */
{ /* [443] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6874,7 +6889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalBootloadGspVgpuPluginTask"
#endif
},
{ /* [443] */
{ /* [444] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6889,7 +6904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalShutdownGspVgpuPluginTask"
#endif
},
{ /* [444] */
{ /* [445] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6904,7 +6919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalPgpuAddVgpuType"
#endif
},
{ /* [445] */
{ /* [446] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6919,7 +6934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalEnumerateVgpuPerPgpu"
#endif
},
{ /* [446] */
{ /* [447] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6934,7 +6949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalClearGuestVmInfo"
#endif
},
{ /* [447] */
{ /* [448] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6949,7 +6964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetVgpuFbUsage"
#endif
},
{ /* [448] */
{ /* [449] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6964,7 +6979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuEncoderCapacity"
#endif
},
{ /* [449] */
{ /* [450] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6979,7 +6994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalCleanupGspVgpuPluginResources"
#endif
},
{ /* [450] */
{ /* [451] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6994,7 +7009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuFsEncoding"
#endif
},
{ /* [451] */
{ /* [452] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7009,7 +7024,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuMigrationSupport"
#endif
},
{ /* [452] */
{ /* [453] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7024,7 +7039,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuMgrConfig"
#endif
},
{ /* [453] */
{ /* [454] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7039,7 +7054,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetAvailableHshubMask"
#endif
},
{ /* [454] */
{ /* [455] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7059,7 +7074,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice =
{
/*numEntries=*/ 455,
/*numEntries=*/ 456,
/*pExportEntries=*/ __nvoc_exported_method_def_Subdevice
};
@ -8063,6 +8078,10 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
pThis->__subdeviceCtrlCmdGpuGetNvencSwSessionInfo__ = &subdeviceCtrlCmdGpuGetNvencSwSessionInfo_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2__ = &subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__subdeviceCtrlCmdGpuGetNvfbcSwSessionStats__ = &subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_IMPL;
#endif
@ -8166,10 +8185,6 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
pThis->__subdeviceCtrlCmdGpuHandleGpuSR__ = &subdeviceCtrlCmdGpuHandleGpuSR_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u)
pThis->__subdeviceCtrlCmdGpuSetComputeModeRules__ = &subdeviceCtrlCmdGpuSetComputeModeRules_IMPL;
#endif
}
static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) {
@ -8180,6 +8195,10 @@ static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u)
pThis->__subdeviceCtrlCmdGpuSetComputeModeRules__ = &subdeviceCtrlCmdGpuSetComputeModeRules_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuQueryComputeModeRules__ = &subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL;
#endif

View File

@ -353,6 +353,7 @@ struct Subdevice {
NV_STATUS (*__subdeviceCtrlCmdGpuGetEncoderCapacity__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuGetNvencSwSessionStats__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuGetNvencSwSessionInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuGetNvfbcSwSessionStats__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuSetFabricAddr__)(struct Subdevice *, NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS *);
@ -898,6 +899,7 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdGpuGetEncoderCapacity(pSubdevice, pEncoderCapacityParams) subdeviceCtrlCmdGpuGetEncoderCapacity_DISPATCH(pSubdevice, pEncoderCapacityParams)
#define subdeviceCtrlCmdGpuGetNvencSwSessionStats(pSubdevice, pParams) subdeviceCtrlCmdGpuGetNvencSwSessionStats_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGpuGetNvencSwSessionInfo(pSubdevice, pParams) subdeviceCtrlCmdGpuGetNvencSwSessionInfo_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2(pSubdevice, pParams) subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGpuGetNvfbcSwSessionStats(pSubdevice, params) subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_DISPATCH(pSubdevice, params)
#define subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo(pSubdevice, params) subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo_DISPATCH(pSubdevice, params)
#define subdeviceCtrlCmdGpuSetFabricAddr(pSubdevice, pParams) subdeviceCtrlCmdGpuSetFabricAddr_DISPATCH(pSubdevice, pParams)
@ -2568,6 +2570,12 @@ static inline NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionInfo_DISPATCH(struct
return pSubdevice->__subdeviceCtrlCmdGpuGetNvencSwSessionInfo__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdGpuGetNvencSwSessionInfoV2__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *params);
static inline NV_STATUS subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *params) {

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -322,6 +322,16 @@ typedef struct SYS_STATIC_CONFIG
NvBool bOsSevEnabled;
} SYS_STATIC_CONFIG;
typedef enum
{
CPU_VENDOR_UNKNOWN = 0,
CPU_VENDOR_INTEL,
CPU_VENDOR_AMD,
CPU_VENDOR_WINCHIP,
CPU_VENDOR_CYRIX,
CPU_VENDOR_TRANSM
} CPU_VENDOR;
typedef struct
{
NvBool bInitialized; // Set to true once we id the CPU
@ -340,6 +350,7 @@ typedef struct
// filled in if CPU has embedded name
NvU32 family; // Vendor defined Family/extended Family
NvU32 model; // Vendor defined Model/extended Model
NvU8 vendor; // Vendor CPU_VENDOR
NvU32 coresOnDie; // # of cores on the die (0 if unknown)
NvU32 platformID; // Chip package type
NvU8 stepping; // Silicon stepping

View File

@ -153,6 +153,7 @@ typedef struct GspSystemInfo
BUSINFO FHBBusInfo;
BUSINFO chipsetIDInfo;
ACPI_METHOD_DATA acpiMethodData;
NvU64 sysTimerOffsetNs;
} GspSystemInfo;

View File

@ -52,6 +52,7 @@
#include "virtualization/hypervisor/hypervisor.h"
#include "finn_rm_api.h"
#include "os/os.h"
#include "objtmr.h"
#define SDK_ALL_CLASSES_INCLUDE_FULL_HEADER
#include "g_allclasses.h"
@ -362,7 +363,8 @@ static NV_STATUS _issueRpcLarge
pBuf8 = (NvU8 *)pBuffer;
remainingSize = bufSize;
entryLength = NV_MIN(bufSize, pRpc->maxRpcSize);
entryLength = NV_MIN(bufSize, vgpu_rpc_message_header_v->length);
NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength <= pRpc->maxRpcSize, NV_ERR_INVALID_STATE);
if (((NvU8 *)vgpu_rpc_message_header_v != pBuf8) && bBidirectional)
portMemCopy(pBuf8, entryLength, vgpu_rpc_message_header_v, entryLength);
@ -393,8 +395,11 @@ static NV_STATUS _issueRpcLarge
NV_ASSERT(0);
return nvStatus;
}
entryLength = vgpu_rpc_message_header_v->length - sizeof(rpc_message_header_v);
entryLength = vgpu_rpc_message_header_v->length;
NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength <= pRpc->maxRpcSize, NV_ERR_INVALID_STATE);
NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength >= sizeof(rpc_message_header_v), NV_ERR_INVALID_STATE);
entryLength -= sizeof(rpc_message_header_v);
if (entryLength > remainingSize)
entryLength = remainingSize;
@ -1312,6 +1317,9 @@ NV_STATUS rpcGspSetSystemInfo_v17_00
rpcInfo->bUpstreamL1PorMobileOnly = pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY);
rpcInfo->upstreamAddressValid = pGpu->gpuClData.upstreamPort.addr.valid;
OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
rpcInfo->sysTimerOffsetNs = pTmr->sysTimerOffsetNs;
status = _issueRpcAsync(pGpu, pRpc);
}

View File

@ -563,12 +563,11 @@ _rmGpuLocksAcquire(NvU32 gpuMask, NvU32 flags, NvU32 module, void *ra, NvU32 *pG
// If we own a higher order lock than one of the needed ones, we are
// violating the locking order and need to do a conditional acquire
// clz32(0) == ctz(0) == 32:
// owned=0b00110000, needed=0b00001100: (4 < (32-28)), bCond=FALSE
// owned=0b00110010, needed=0b00001100: (1 < (32-28)), bCond=TRUE
// owned=0b00010000, needed=0b11000011: (4 < (32-24)), bCond=TRUE
// owned=0b00000000, needed=0b00001100: (32 < (32-28)), bCond=FALSE
// owned=0b00000001, needed=0b00000000: (0 < (32-32)), bCond=FALSE
if (portUtilCountTrailingZeros32(ownedMask) < (32-portUtilCountLeadingZeros32(gpuMask)))
// owned=0b00001100, needed=0b00110000: ((32-28) > 4), bCond=FALSE
// owned=0b00001100, needed=0b00110010: ((32-28) > 1), bCond=TRUE
// owned=0b11000011, needed=0b00010000: ((32-24) > 4), bCond=TRUE
// owned=0b00000000, needed=0b00000001: ((32-32) > 0), bCond=FALSE
if ((32-portUtilCountLeadingZeros32(ownedMask)) > portUtilCountTrailingZeros32(gpuMask))
{
bCondAcquireCheck = NV_TRUE;
}

View File

@ -1375,3 +1375,36 @@ kbusGetFlaRange_GA100
return NV_OK;
}
/*!
* @brief Returns the Nvlink specific peer number from pGpu (Local) to pGpuPeer.
* Used only by VF.
*
* @param[in] pGpu Local
* @param[in] pKernelBus Local
* @param[in] pGpuPeer Remote
*
* @returns NvU32 bus peer number
*/
NvU32
kbusGetNvlinkPeerId_GA100
(
OBJGPU *pGpu,
KernelBus *pKernelBus,
OBJGPU *pGpuPeer
)
{
NvU32 gpuPeerInst = gpuGetInstance(pGpuPeer);
NvU32 peerId = pKernelBus->p2p.busNvlinkPeerNumberMask[gpuPeerInst];
if (peerId == 0)
{
NV_PRINTF(LEVEL_INFO,
"NVLINK P2P not set up between GPU%u and GPU%u, checking for PCIe P2P...\n",
gpuGetInstance(pGpu), gpuPeerInst);
return BUS_INVALID_PEER;
}
LOWESTBITIDX_32(peerId);
return peerId;
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -475,16 +475,12 @@ kbusStateUnload_GM107
kbusUnlinkP2P_HAL(pGpu, pKernelBus);
}
if (flags & GPU_STATE_FLAGS_PRESERVING)
if ((flags & GPU_STATE_FLAGS_PRESERVING) && !IS_VIRTUAL_WITH_SRIOV(pGpu))
{
if (!IS_GPU_GC6_STATE_ENTERING(pGpu))
{
status = kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, GPU_GFID_PF);
if (!IS_VIRTUAL_WITH_SRIOV(pGpu))
{
// Do not use BAR2 physical mode for bootstrapping BAR2 across S/R.
pKernelBus->bUsePhysicalBar2InitPagetable = NV_FALSE;
}
pKernelBus->bUsePhysicalBar2InitPagetable = NV_FALSE;
}
}
else
@ -1439,6 +1435,9 @@ kbusSetupBar2GpuVaSpace_GM107
status = mmuWalkReserveEntries(pWalk, pLevelFmt, pKernelBus->bar2[gfid].cpuVisibleBase,
pKernelBus->bar2[gfid].cpuVisibleLimit, NV_FALSE);
NV_ASSERT_OR_GOTO(NV_OK == status, cleanup);
pKernelBus->bar2[gfid].cpuVisiblePgTblSize = pKernelBus->bar2[gfid].pageTblInit * pKernelBus->bar2[gfid].pageTblSize;
status = mmuWalkSparsify(pWalk, pKernelBus->bar2[gfid].cpuVisibleBase, pKernelBus->bar2[gfid].cpuVisibleLimit, NV_TRUE);
NV_ASSERT_OR_GOTO(NV_OK == status, cleanup);
}

View File

@ -1243,11 +1243,3 @@ kbusIsGpuP2pAlive_IMPL
return (pKernelBus->totalP2pObjectsAliveRefCount > 0);
}
/**
* @brief Setup VF BAR2 during hibernate resume
*
* @param[in] pGpu
* @param[in] pKernelBus
* @param[in] flags
*/

View File

@ -167,6 +167,11 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
{
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get display mask from input buffer
// display mask is 4 byte long and available at byte 1
@ -181,27 +186,55 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
// get acpi id
acpiId = pfmFindAcpiId(pPfm, pGpu, displayMask);
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_MXMX(pGpu, acpiId, pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUON:
{
if (inOutDataSize < sizeof(NvU32))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_GPUON(pGpu, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUOFF:
{
if (inOutDataSize < sizeof(NvU32))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_GPUOFF(pGpu, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUSTA:
{
if (inOutDataSize < sizeof(NvU32))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_GPUSTA(pGpu, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
// acpi id is 4 byte long and available at byte 4
@ -213,11 +246,18 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS_DISP_MASK_OFFSET,
sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_MXDS(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
// acpi id is 4 byte long and available at byte 4
@ -229,11 +269,18 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS_DISP_MASK_OFFSET,
sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_MXDS(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
// acpi id is 4 byte long and available at byte 4
@ -245,31 +292,53 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM_DISP_MASK_OFFSET,
sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_MXDM(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
// get acpi id from input buffer
portMemCopy(&acpiId,
sizeof(NvU32),
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID_DISP_MASK_OFFSET,
sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_MXID(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
portMemCopy(&acpiId,
sizeof(NvU32),
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST_DISP_MASK_OFFSET,
sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_LRST(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
portMemCopy(&acpiId,
sizeof(NvU32),
((NvU8*) pInOutData) + NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID_DISP_MASK_OFFSET,
@ -283,6 +352,12 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
// acpi id is 4 byte long and available at byte 4
@ -297,11 +372,17 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
// get acpi id
acpiId = pfmFindAcpiId(pPfm, pGpu, displayMask);
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_MXMX(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS:
{
if (inOutDataSize < (NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS_DISP_MASK_OFFSET + sizeof(NvU32)))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
//
// get acpi id from input buffer
// acpi id is 4 byte long and available at byte 4
@ -316,19 +397,35 @@ dispcmnCtrlCmdSystemExecuteAcpiMethod_IMPL
// get acpi id
acpiId = pfmFindAcpiId(pPfm, pGpu, displayMask);
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_DOS(pGpu, acpiId, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_ROM:
{
NvU32 *pBuffer = (NvU32*) pInOutData;
if ((inOutDataSize < (2 * sizeof(NvU32))) || (inOutDataSize < pBuffer[1]))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
outDataSize = pBuffer[1];
outStatus = pGpu->pOS->osCallACPI_NVHG_ROM(pGpu, (NvU32*) pInOutData, (NvU32*) pInOutData);
break;
}
case NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DCS:
{
if (inOutDataSize < sizeof(NvU32))
{
outStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
// get display mask from input buffer
portMemCopy(&acpiId, sizeof(NvU32), pInOutData, sizeof(NvU32));
outDataSize = sizeof(NvU32);
outStatus = pOS->osCallACPI_NVHG_DCS(pGpu, acpiId, (NvU32*) pInOutData);
break;
}

Some files were not shown because too many files have changed in this diff Show More