535.146.02

This commit is contained in:
Bernhard Stoeckner 2023-12-07 15:09:52 +01:00
parent e573018659
commit 7165299dee
No known key found for this signature in database
GPG Key ID: 7D23DC2750FAC2E1
77 changed files with 965 additions and 362 deletions

View File

@ -2,6 +2,8 @@
## Release 535 Entries
### [535.146.02] 2023-12-07
### [535.129.03] 2023-10-31
### [535.113.01] 2023-09-21

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 535.129.03.
version 535.146.02.
## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
535.129.03 driver release. This can be achieved by installing
535.146.02 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@ -180,7 +180,7 @@ software applications.
## Compatible GPUs
The open-gpu-kernel-modules can be used on any Turing or later GPU
(see the table below). However, in the 535.129.03 release,
(see the table below). However, in the 535.146.02 release,
GeForce and Workstation support is still considered alpha-quality.
To enable use of the open kernel modules on GeForce and Workstation GPUs,
@ -188,7 +188,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
parameter to 1. For more details, see the NVIDIA GPU driver end user
README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.129.03/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.146.02/README/kernel_open.html
In the below table, if three IDs are listed, the first is the PCI Device
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
@ -750,8 +750,8 @@ Subsystem Device ID.
| NVIDIA H100 PCIe | 2331 10DE 1626 |
| NVIDIA H100 | 2339 10DE 17FC |
| NVIDIA H800 NVL | 233A 10DE 183A |
| GH200 120GB | 2342 10DE 16EB |
| GH200 480GB | 2342 10DE 1809 |
| NVIDIA GH200 120GB | 2342 10DE 16EB |
| NVIDIA GH200 480GB | 2342 10DE 1809 |
| NVIDIA GeForce RTX 3060 Ti | 2414 |
| NVIDIA GeForce RTX 3080 Ti Laptop GPU | 2420 |
| NVIDIA RTX A5500 Laptop GPU | 2438 |
@ -844,6 +844,7 @@ Subsystem Device ID.
| NVIDIA RTX 5000 Ada Generation | 26B2 103C 17FA |
| NVIDIA RTX 5000 Ada Generation | 26B2 10DE 17FA |
| NVIDIA RTX 5000 Ada Generation | 26B2 17AA 17FA |
| NVIDIA RTX 5880 Ada Generation | 26B3 10DE 1934 |
| NVIDIA L40 | 26B5 10DE 169D |
| NVIDIA L40 | 26B5 10DE 17DA |
| NVIDIA L40S | 26B9 10DE 1851 |

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.129.03\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.146.02\"
ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)

View File

@ -2067,4 +2067,6 @@ typedef enum
#include <linux/clk-provider.h>
#endif
#define NV_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL_GPL(symbol)
#endif /* _NV_LINUX_H_ */

View File

@ -924,6 +924,7 @@ NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *
NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *);
void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_msix_allowed (nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);

View File

@ -6282,6 +6282,21 @@ compile_test() {
compile_check_conftest "$CODE" "NV_MEMORY_FAILURE_MF_SW_SIMULATED_DEFINED" "" "types"
;;
crypto_tfm_ctx_aligned)
# Determine if 'crypto_tfm_ctx_aligned' is defined.
#
# Removed by commit 25c74a39e0f6 ("crypto: hmac - remove unnecessary
# alignment logic") in v6.7.
#
CODE="
#include <crypto/algapi.h>
void conftest_crypto_tfm_ctx_aligned(void) {
(void)crypto_tfm_ctx_aligned();
}"
compile_check_conftest "$CODE" "NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT" "" "functions"
;;
crypto)
#
# Determine if we support various crypto functions.

View File

@ -243,6 +243,15 @@ static int __nv_drm_nvkms_gem_obj_init(
NvU64 *pages = NULL;
NvU32 numPages = 0;
if ((size % PAGE_SIZE) != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"NvKmsKapiMemory 0x%p size should be in a multiple of page size to "
"create a gem object",
pMemory);
return -EINVAL;
}
nv_nvkms_memory->pPhysicalAddress = NULL;
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
nv_nvkms_memory->physically_mapped = false;

View File

@ -68,6 +68,9 @@ module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
static bool disable_vrr_memclk_switch = false;
module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
static bool opportunistic_display_sync = true;
module_param_named(opportunistic_display_sync, opportunistic_display_sync, bool, 0400);
/* These parameters are used for fault injection tests. Normally the defaults
* should be used. */
MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
@ -99,6 +102,11 @@ NvBool nvkms_disable_vrr_memclk_switch(void)
return disable_vrr_memclk_switch;
}
NvBool nvkms_opportunistic_display_sync(void)
{
return opportunistic_display_sync;
}
#define NVKMS_SYNCPT_STUBS_NEEDED
/*************************************************************************
@ -200,10 +208,24 @@ static inline int nvkms_read_trylock_pm_lock(void)
static inline void nvkms_read_lock_pm_lock(void)
{
if ((current->flags & PF_NOFREEZE)) {
/*
* Non-freezable tasks (i.e. kthreads in this case) don't have to worry
* about being frozen during system suspend, but do need to block so
* that the CPU can go idle during s2idle. Do a normal uninterruptible
* blocking wait for the PM lock.
*/
down_read(&nvkms_pm_lock);
} else {
/*
* For freezable tasks, make sure we give the kernel an opportunity to
* freeze if taking the PM lock fails.
*/
while (!down_read_trylock(&nvkms_pm_lock)) {
try_to_freeze();
cond_resched();
}
}
}
static inline void nvkms_read_unlock_pm_lock(void)

View File

@ -99,6 +99,7 @@ typedef struct {
NvBool nvkms_output_rounding_fix(void);
NvBool nvkms_disable_vrr_memclk_switch(void);
NvBool nvkms_opportunistic_display_sync(void);
void nvkms_call_rm (void *ops);
void* nvkms_alloc (size_t size,

View File

@ -1,8 +1,13 @@
/* SPDX-License-Identifier: Linux-OpenIB */
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
@ -43,7 +48,9 @@
MODULE_AUTHOR("Yishai Hadas");
MODULE_DESCRIPTION("NVIDIA GPU memory plug-in");
MODULE_LICENSE("Linux-OpenIB");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
enum {
NV_MEM_PEERDIRECT_SUPPORT_DEFAULT = 0,

View File

@ -362,7 +362,8 @@ static NV_STATUS push_cancel_on_gpu(uvm_gpu_t *gpu,
"Cancel targeting instance_ptr {0x%llx:%s}\n",
instance_ptr.address,
uvm_aperture_string(instance_ptr.aperture));
} else {
}
else {
status = uvm_push_begin_acquire(gpu->channel_manager,
UVM_CHANNEL_TYPE_MEMOPS,
&replayable_faults->replay_tracker,
@ -1679,7 +1680,8 @@ static NV_STATUS service_fault_batch_ats_sub_vma(uvm_gpu_va_space_t *gpu_va_spac
if (access_type <= UVM_FAULT_ACCESS_TYPE_READ) {
cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL;
}
else if (access_type >= UVM_FAULT_ACCESS_TYPE_WRITE) {
else {
UVM_ASSERT(access_type >= UVM_FAULT_ACCESS_TYPE_WRITE);
if (uvm_fault_access_type_mask_test(current_entry->access_type_mask, UVM_FAULT_ACCESS_TYPE_READ) &&
!uvm_page_mask_test(reads_serviced_mask, page_index))
cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL;

View File

@ -10748,7 +10748,7 @@ NV_STATUS uvm_va_block_check_logical_permissions(uvm_va_block_t *va_block,
uvm_va_block_context_t *va_block_context,
uvm_processor_id_t processor_id,
uvm_page_index_t page_index,
uvm_fault_type_t access_type,
uvm_fault_access_type_t access_type,
bool allow_migration)
{
uvm_va_range_t *va_range = va_block->va_range;

View File

@ -1000,7 +1000,7 @@ NV_STATUS uvm_va_block_check_logical_permissions(uvm_va_block_t *va_block,
uvm_va_block_context_t *va_block_context,
uvm_processor_id_t processor_id,
uvm_page_index_t page_index,
uvm_fault_type_t access_type,
uvm_fault_access_type_t access_type,
bool allow_migration);
// API for access privilege revocation

View File

@ -23,10 +23,16 @@
#include "internal_crypt_lib.h"
#ifdef USE_LKCA
#ifndef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT
#include <crypto/internal/hash.h>
#endif
#endif
void *lkca_hash_new(const char* alg_name)
{
#ifndef USE_LKCA
return false;
return NULL;
#else
//XXX: can we reuse crypto_shash part and just allocate desc
struct crypto_shash *alg;
@ -87,9 +93,24 @@ bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src)
struct crypto_shash *src_tfm = src->tfm;
struct crypto_shash *dst_tfm = dst->tfm;
int ss = crypto_shash_statesize(dst_tfm);
#ifdef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT
char *src_ipad = crypto_tfm_ctx_aligned(&src_tfm->base);
char *dst_ipad = crypto_tfm_ctx_aligned(&dst_tfm->base);
int ss = crypto_shash_statesize(dst_tfm);
#else
int ctx_size = crypto_shash_alg(dst_tfm)->base.cra_ctxsize;
char *src_ipad = crypto_shash_ctx(src_tfm);
char *dst_ipad = crypto_shash_ctx(dst_tfm);
/*
* Actual struct definition is hidden, so I assume data we need is at
* the end. In 6.0 the struct has a pointer to crpyto_shash followed by:
* 'u8 ipad[statesize];', then 'u8 opad[statesize];'
*/
src_ipad += ctx_size - 2 * ss;
dst_ipad += ctx_size - 2 * ss;
#endif
memcpy(dst_ipad, src_ipad, crypto_shash_blocksize(src->tfm));
memcpy(dst_ipad + ss, src_ipad + ss, crypto_shash_blocksize(src->tfm));
crypto_shash_clear_flags(dst->tfm, CRYPTO_TFM_NEED_KEY);

View File

@ -316,14 +316,14 @@ int nvidia_p2p_init_mapping(
return -ENOTSUPP;
}
EXPORT_SYMBOL(nvidia_p2p_init_mapping);
NV_EXPORT_SYMBOL(nvidia_p2p_init_mapping);
int nvidia_p2p_destroy_mapping(uint64_t p2p_token)
{
return -ENOTSUPP;
}
EXPORT_SYMBOL(nvidia_p2p_destroy_mapping);
NV_EXPORT_SYMBOL(nvidia_p2p_destroy_mapping);
static void nv_p2p_mem_info_free_callback(void *data)
{
@ -587,7 +587,7 @@ int nvidia_p2p_get_pages(
p2p_token, va_space, virtual_address,
length, page_table, free_callback, data);
}
EXPORT_SYMBOL(nvidia_p2p_get_pages);
NV_EXPORT_SYMBOL(nvidia_p2p_get_pages);
int nvidia_p2p_get_pages_persistent(
uint64_t virtual_address,
@ -605,7 +605,7 @@ int nvidia_p2p_get_pages_persistent(
virtual_address, length, page_table,
NULL, NULL);
}
EXPORT_SYMBOL(nvidia_p2p_get_pages_persistent);
NV_EXPORT_SYMBOL(nvidia_p2p_get_pages_persistent);
/*
* This function is a no-op, but is left in place (for now), in order to allow
@ -618,7 +618,7 @@ int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table)
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_free_page_table);
NV_EXPORT_SYMBOL(nvidia_p2p_free_page_table);
int nvidia_p2p_put_pages(
uint64_t p2p_token,
@ -650,7 +650,7 @@ int nvidia_p2p_put_pages(
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_put_pages);
NV_EXPORT_SYMBOL(nvidia_p2p_put_pages);
int nvidia_p2p_put_pages_persistent(
uint64_t virtual_address,
@ -690,7 +690,7 @@ int nvidia_p2p_put_pages_persistent(
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_put_pages_persistent);
NV_EXPORT_SYMBOL(nvidia_p2p_put_pages_persistent);
int nvidia_p2p_dma_map_pages(
struct pci_dev *peer,
@ -805,7 +805,7 @@ failed:
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
NV_EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
int nvidia_p2p_dma_unmap_pages(
struct pci_dev *peer,
@ -845,7 +845,7 @@ int nvidia_p2p_dma_unmap_pages(
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
NV_EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
/*
* This function is a no-op, but is left in place (for now), in order to allow
@ -860,7 +860,7 @@ int nvidia_p2p_free_dma_mapping(
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);
NV_EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);
int nvidia_p2p_register_rsync_driver(
nvidia_p2p_rsync_driver_t *driver,
@ -889,7 +889,7 @@ int nvidia_p2p_register_rsync_driver(
driver->wait_for_rsync, data);
}
EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver);
NV_EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver);
void nvidia_p2p_unregister_rsync_driver(
nvidia_p2p_rsync_driver_t *driver,
@ -921,7 +921,7 @@ void nvidia_p2p_unregister_rsync_driver(
driver->wait_for_rsync, data);
}
EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver);
NV_EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver);
int nvidia_p2p_get_rsync_registers(
nvidia_p2p_rsync_reg_info_t **reg_info
@ -1014,7 +1014,7 @@ int nvidia_p2p_get_rsync_registers(
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
NV_EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
void nvidia_p2p_put_rsync_registers(
nvidia_p2p_rsync_reg_info_t *reg_info
@ -1046,4 +1046,4 @@ void nvidia_p2p_put_rsync_registers(
os_free_mem(reg_info);
}
EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);
NV_EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);

View File

@ -1224,12 +1224,11 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
rm_read_registry_dword(sp, nv, NV_REG_ENABLE_MSI, &msi_config);
if (msi_config == 1)
{
if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSIX))
if (nvl->pci_dev->msix_cap && rm_is_msix_allowed(sp, nv))
{
nv_init_msix(nv);
}
if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSI) &&
!(nv->flags & NV_FLAG_USES_MSIX))
if (nvl->pci_dev->msi_cap && !(nv->flags & NV_FLAG_USES_MSIX))
{
nv_init_msi(nv);
}

View File

@ -195,6 +195,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
NV_CONFTEST_FUNCTION_COMPILE_TESTS += crypto_tfm_ctx_aligned
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active

View File

@ -1360,7 +1360,7 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
if (dev->pconCaps.maxHdmiLinkBandwidthGbps != 0)
{
NvU64 requiredBW = (NvU64)(modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth);
NvU64 availableBw = (NvU64)(dev->pconCaps.maxHdmiLinkBandwidthGbps * 1000000000);
NvU64 availableBw = (NvU64)(dev->pconCaps.maxHdmiLinkBandwidthGbps * (NvU64)1000000000);
if (requiredBW > availableBw)
{
compoundQueryResult = false;
@ -1375,10 +1375,10 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
else if (dev->pconCaps.maxTmdsClkRate != 0)
{
NvU64 maxTmdsClkRateU64 = (NvU64)(dev->pconCaps.maxTmdsClkRate);
NvU64 requireBw = (NvU64)(modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth);
NvU64 requiredBw = (NvU64)(modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth);
if (modesetParams.colorFormat == dpColorFormat_YCbCr420)
{
if (maxTmdsClkRateU64 < ((requireBw/24)/2))
if (maxTmdsClkRateU64 < ((requiredBw/24)/2))
{
compoundQueryResult = false;
return false;
@ -1386,7 +1386,7 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
}
else
{
if (maxTmdsClkRateU64 < (requireBw/24))
if (maxTmdsClkRateU64 < (requiredBw/24))
{
compoundQueryResult = false;
return false;

View File

@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r537_68
#define NV_BUILD_BRANCH r537_94
#endif
#ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r537_68
#define NV_PUBLIC_BRANCH r537_94
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/r537_68-335"
#define NV_BUILD_CHANGELIST_NUM (33430121)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/r537_94-386"
#define NV_BUILD_CHANGELIST_NUM (33606179)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r535/r537_68-335"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33430121)
#define NV_BUILD_NAME "rel/gpu_drv/r535/r537_94-386"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33606179)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r537_68-2"
#define NV_BUILD_CHANGELIST_NUM (33425293)
#define NV_BUILD_BRANCH_VERSION "r537_94-2"
#define NV_BUILD_CHANGELIST_NUM (33602158)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "537.70"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33425293)
#define NV_BUILD_NAME "537.99"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33602158)
#define NV_BUILD_BRANCH_BASE_VERSION R535
#endif
// End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "535.129.03"
#define NV_VERSION_STRING "535.146.02"
#else

View File

@ -39,48 +39,63 @@ extern "C" {
#endif //NV_UNIX
#endif //!__cplusplus
// Surprise removal capable TB3 and TB2 BUS Device ID
#define BUS_DEVICE_ID_TB3_ALPINE_RIDGE_01 0x1578
#define BUS_DEVICE_ID_TB3_02 0x1576
#define BUS_DEVICE_ID_TB3_03 0x15C0
#define BUS_DEVICE_ID_TB3_04 0x15D3
#define BUS_DEVICE_ID_TB3_05 0x15DA
#define BUS_DEVICE_ID_TB3_06 0x15EA
#define BUS_DEVICE_ID_TB3_07 0x15E7
#define BUS_DEVICE_ID_TB3_08 0x15EF
#define BUS_DEVICE_ID_TB3_09 0x1133
#define BUS_DEVICE_ID_TB3_10 0x1136
#define PARENT_EGPU_BUS_DEVICE_43 0x57A4
#define PARENT_EGPU_BUS_DEVICE_42 0x5786
#define PARENT_EGPU_BUS_DEVICE_41 0x1578
#define PARENT_EGPU_BUS_DEVICE_40 0x1576
#define PARENT_EGPU_BUS_DEVICE_39 0x15C0
#define PARENT_EGPU_BUS_DEVICE_38 0x15D3
#define PARENT_EGPU_BUS_DEVICE_37 0x15DA
#define PARENT_EGPU_BUS_DEVICE_36 0x15EA
#define PARENT_EGPU_BUS_DEVICE_35 0x15E7
#define PARENT_EGPU_BUS_DEVICE_34 0x15EF
#define PARENT_EGPU_BUS_DEVICE_33 0x1133
#define PARENT_EGPU_BUS_DEVICE_32 0x1136
// IceLake-U TB3 device ids. Below TB3 would be integrated to CPU.
#define BUS_DEVICE_ID_ICELAKE_TB3_01 0x8A1D
#define BUS_DEVICE_ID_ICELAKE_TB3_02 0x8A1F
#define BUS_DEVICE_ID_ICELAKE_TB3_03 0x8A21
#define BUS_DEVICE_ID_ICELAKE_TB3_04 0x8A23
#define BUS_DEVICE_ID_ICELAKE_TB3_05 0x8A0D
#define BUS_DEVICE_ID_ICELAKE_TB3_06 0x8A17
#define PARENT_EGPU_BUS_DEVICE_31 0x8A1D
#define PARENT_EGPU_BUS_DEVICE_30 0x8A1F
#define PARENT_EGPU_BUS_DEVICE_29 0x8A21
#define PARENT_EGPU_BUS_DEVICE_28 0x8A23
#define PARENT_EGPU_BUS_DEVICE_27 0x8A0D
#define PARENT_EGPU_BUS_DEVICE_26 0x8A17
// TigerLake Thunderbolt device ids.
#define BUS_DEVICE_ID_TIGERLAKE_TB3_01 0x9A1B
#define BUS_DEVICE_ID_TIGERLAKE_TB3_02 0x9A1D
#define BUS_DEVICE_ID_TIGERLAKE_TB3_03 0x9A1F
#define BUS_DEVICE_ID_TIGERLAKE_TB3_04 0x9A21
#define BUS_DEVICE_ID_TIGERLAKE_TB3_05 0x9A23
#define BUS_DEVICE_ID_TIGERLAKE_TB3_06 0x9A25
#define BUS_DEVICE_ID_TIGERLAKE_TB3_07 0x9A27
#define BUS_DEVICE_ID_TIGERLAKE_TB3_08 0x9A29
#define BUS_DEVICE_ID_TIGERLAKE_TB3_09 0x9A2B
#define BUS_DEVICE_ID_TIGERLAKE_TB3_10 0x9A2D
//#define BUS_DEVICE_ID_TB2_FALCON_RIDGE_DSL5520_01 0X156C // obsolete
#define BUS_DEVICE_ID_TB2_FALCON_RIDGE_DSL5520_02 0X156D
#define BUS_DEVICE_ID_TB2_03 0x157E
#define BUS_DEVICE_ID_TB2_04 0x156B
#define BUS_DEVICE_ID_TB2_05 0x1567
#define BUS_DEVICE_ID_TB2_06 0x1569
//#define BUS_DEVICE_ID_TB2_07 0x1548 // obsolete
#define BUS_DEVICE_ID_TB2_08 0x151B
#define BUS_DEVICE_ID_TB2_09 0x1549
#define BUS_DEVICE_ID_TB2_10 0x1513
#define PARENT_EGPU_BUS_DEVICE_25 0x9A1B
#define PARENT_EGPU_BUS_DEVICE_24 0x9A1D
#define PARENT_EGPU_BUS_DEVICE_23 0x9A1F
#define PARENT_EGPU_BUS_DEVICE_22 0x9A21
#define PARENT_EGPU_BUS_DEVICE_21 0x9A23
#define PARENT_EGPU_BUS_DEVICE_20 0x9A25
#define PARENT_EGPU_BUS_DEVICE_19 0x9A27
#define PARENT_EGPU_BUS_DEVICE_18 0x9A29
#define PARENT_EGPU_BUS_DEVICE_17 0x9A2B
#define PARENT_EGPU_BUS_DEVICE_16 0x9A2D
#define PARENT_EGPU_BUS_DEVICE_15 0x7EB2
#define PARENT_EGPU_BUS_DEVICE_14 0x7EC2
#define PARENT_EGPU_BUS_DEVICE_13 0x7EC3
#define PARENT_EGPU_BUS_DEVICE_12 0x7EB4
#define PARENT_EGPU_BUS_DEVICE_11 0x7EC4
#define PARENT_EGPU_BUS_DEVICE_10 0x7EB5
#define PARENT_EGPU_BUS_DEVICE_09 0x7EC5
#define PARENT_EGPU_BUS_DEVICE_08 0x7EC6
#define PARENT_EGPU_BUS_DEVICE_07 0x7EC7
#define PARENT_EGPU_BUS_DEVICE_06 0xA73E
#define PARENT_EGPU_BUS_DEVICE_05 0xA76D
#define PARENT_EGPU_BUS_DEVICE_04 0x466E
#define PARENT_EGPU_BUS_DEVICE_03 0x463F
#define PARENT_EGPU_BUS_DEVICE_02 0x462F
#define PARENT_EGPU_BUS_DEVICE_01 0x461F
#define PARENT_EGPU_BUS_DEVICE_02_08 0X156D
#define PARENT_EGPU_BUS_DEVICE_02_07 0x157E
#define PARENT_EGPU_BUS_DEVICE_02_06 0x156B
#define PARENT_EGPU_BUS_DEVICE_02_05 0x1567
#define PARENT_EGPU_BUS_DEVICE_02_04 0x1569
#define PARENT_EGPU_BUS_DEVICE_02_03 0x151B
#define PARENT_EGPU_BUS_DEVICE_02_02 0x1549
#define PARENT_EGPU_BUS_DEVICE_02_01 0x1513
//*****************************************************************************
// Function: isTB3DeviceID
@ -103,33 +118,51 @@ extern "C" {
EGPU_INLINE NvBool isTB3DeviceID(NvU16 deviceID)
{
NvU32 index;
NvU16 tb3DeviceIDList[]={ BUS_DEVICE_ID_TB3_ALPINE_RIDGE_01,
BUS_DEVICE_ID_TB3_02,
BUS_DEVICE_ID_TB3_03,
BUS_DEVICE_ID_TB3_04,
BUS_DEVICE_ID_TB3_05,
BUS_DEVICE_ID_TB3_06,
BUS_DEVICE_ID_TB3_07,
BUS_DEVICE_ID_TB3_08,
BUS_DEVICE_ID_TB3_09,
BUS_DEVICE_ID_TB3_10,
BUS_DEVICE_ID_ICELAKE_TB3_01,
BUS_DEVICE_ID_ICELAKE_TB3_02,
BUS_DEVICE_ID_ICELAKE_TB3_03,
BUS_DEVICE_ID_ICELAKE_TB3_04,
BUS_DEVICE_ID_ICELAKE_TB3_05,
BUS_DEVICE_ID_ICELAKE_TB3_06,
BUS_DEVICE_ID_TIGERLAKE_TB3_01,
BUS_DEVICE_ID_TIGERLAKE_TB3_02,
BUS_DEVICE_ID_TIGERLAKE_TB3_03,
BUS_DEVICE_ID_TIGERLAKE_TB3_04,
BUS_DEVICE_ID_TIGERLAKE_TB3_05,
BUS_DEVICE_ID_TIGERLAKE_TB3_06,
BUS_DEVICE_ID_TIGERLAKE_TB3_07,
BUS_DEVICE_ID_TIGERLAKE_TB3_08,
BUS_DEVICE_ID_TIGERLAKE_TB3_09,
BUS_DEVICE_ID_TIGERLAKE_TB3_10
NvU16 tb3DeviceIDList[]={ PARENT_EGPU_BUS_DEVICE_01,
PARENT_EGPU_BUS_DEVICE_02,
PARENT_EGPU_BUS_DEVICE_03,
PARENT_EGPU_BUS_DEVICE_04,
PARENT_EGPU_BUS_DEVICE_05,
PARENT_EGPU_BUS_DEVICE_06,
PARENT_EGPU_BUS_DEVICE_07,
PARENT_EGPU_BUS_DEVICE_08,
PARENT_EGPU_BUS_DEVICE_09,
PARENT_EGPU_BUS_DEVICE_10,
PARENT_EGPU_BUS_DEVICE_11,
PARENT_EGPU_BUS_DEVICE_12,
PARENT_EGPU_BUS_DEVICE_13,
PARENT_EGPU_BUS_DEVICE_14,
PARENT_EGPU_BUS_DEVICE_15,
PARENT_EGPU_BUS_DEVICE_16,
PARENT_EGPU_BUS_DEVICE_17,
PARENT_EGPU_BUS_DEVICE_18,
PARENT_EGPU_BUS_DEVICE_19,
PARENT_EGPU_BUS_DEVICE_20,
PARENT_EGPU_BUS_DEVICE_21,
PARENT_EGPU_BUS_DEVICE_22,
PARENT_EGPU_BUS_DEVICE_23,
PARENT_EGPU_BUS_DEVICE_24,
PARENT_EGPU_BUS_DEVICE_25,
PARENT_EGPU_BUS_DEVICE_26,
PARENT_EGPU_BUS_DEVICE_27,
PARENT_EGPU_BUS_DEVICE_28,
PARENT_EGPU_BUS_DEVICE_29,
PARENT_EGPU_BUS_DEVICE_30,
PARENT_EGPU_BUS_DEVICE_31,
PARENT_EGPU_BUS_DEVICE_32,
PARENT_EGPU_BUS_DEVICE_33,
PARENT_EGPU_BUS_DEVICE_34,
PARENT_EGPU_BUS_DEVICE_35,
PARENT_EGPU_BUS_DEVICE_36,
PARENT_EGPU_BUS_DEVICE_37,
PARENT_EGPU_BUS_DEVICE_38,
PARENT_EGPU_BUS_DEVICE_39,
PARENT_EGPU_BUS_DEVICE_40,
PARENT_EGPU_BUS_DEVICE_41,
PARENT_EGPU_BUS_DEVICE_42,
PARENT_EGPU_BUS_DEVICE_43
};
for (index = 0; index < (sizeof(tb3DeviceIDList)/sizeof(NvU16)); index++)
{
if (deviceID == tb3DeviceIDList[index])
@ -161,11 +194,14 @@ EGPU_INLINE NvBool isTB3DeviceID(NvU16 deviceID)
EGPU_INLINE NvBool isTB2DeviceID(NvU16 deviceID)
{
NvU32 index;
NvU16 tb2DeviceIDList[]={ BUS_DEVICE_ID_TB2_FALCON_RIDGE_DSL5520_02,
BUS_DEVICE_ID_TB2_03, BUS_DEVICE_ID_TB2_04,
BUS_DEVICE_ID_TB2_05, BUS_DEVICE_ID_TB2_06,
BUS_DEVICE_ID_TB2_08, BUS_DEVICE_ID_TB2_09,
BUS_DEVICE_ID_TB2_10
NvU16 tb2DeviceIDList[]={ PARENT_EGPU_BUS_DEVICE_02_01,
PARENT_EGPU_BUS_DEVICE_02_02,
PARENT_EGPU_BUS_DEVICE_02_03,
PARENT_EGPU_BUS_DEVICE_02_04,
PARENT_EGPU_BUS_DEVICE_02_05,
PARENT_EGPU_BUS_DEVICE_02_06,
PARENT_EGPU_BUS_DEVICE_02_07,
PARENT_EGPU_BUS_DEVICE_02_08
};
for (index = 0; index < (sizeof(tb2DeviceIDList)/sizeof(NvU16)); index++)
{

View File

@ -24,6 +24,7 @@
#ifndef __tu102_dev_gc6_island_h__
#define __tu102_dev_gc6_island_h__
#define NV_PGC6 0x118fff:0x118000 /* RW--D */
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK 0x00118128 /* RW-4R */
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION 3:0 /* RWIVF */
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */

View File

@ -28,6 +28,10 @@
#define NV_XVE_MSIX_CAP_HDR_ENABLE 31:31 /* RWIVF */
#define NV_XVE_MSIX_CAP_HDR_ENABLE_ENABLED 0x00000001 /* RW--V */
#define NV_XVE_MSIX_CAP_HDR_ENABLE_DISABLED 0x00000000 /* RWI-V */
#define NV_XVE_PRIV_MISC_1 0x0000041C /* RW-4R */
#define NV_XVE_PRIV_MISC_1_CYA_HIDE_MSIX_CAP 29:29 /* RWCVF */
#define NV_XVE_PRIV_MISC_1_CYA_HIDE_MSIX_CAP_TRUE 0x00000001 /* RW--V */
#define NV_XVE_PRIV_MISC_1_CYA_HIDE_MSIX_CAP_FALSE 0x00000000 /* RWC-V */
#define NV_XVE_SRIOV_CAP_HDR3 0x00000BD8 /* R--4R */
#define NV_XVE_SRIOV_CAP_HDR3_TOTAL_VFS 31:16 /* R-EVF */
#define NV_XVE_SRIOV_CAP_HDR5 0x00000BE0 /* R--4R */

View File

@ -227,7 +227,36 @@ typedef struct NV0080_CTRL_FB_GET_CAPS_V2_PARAMS {
NvU8 capsTbl[NV0080_CTRL_FB_CAPS_TBL_SIZE];
} NV0080_CTRL_FB_GET_CAPS_V2_PARAMS;
/**
* NV0080_CTRL_CMD_FB_SET_DEFAULT_VIDMEM_PHYSICALITY
*
* When clients allocate video memory specifying _DEFAULT (0) for
* NVOS32_ATTR_PHYSICALITY, RM will usually allocate contiguous memory.
* Clients can change that behavior with this command so that _DEFAULT maps to
* another value.
*
* The expectation is that clients currently implicitly rely on the default,
* but can be incrementally updated to explicitly specify _CONTIGUOUS where
* necessary and change the default for their allocations to _NONCONTIGUOUS or
* _ALLOW_NONCONTIGUOUS.
*
* In the future RM may be updated to globally default to _NONCONTIGUOUS or
* _ALLOW_NONCONTIGUOUS, and at that point this can be removed.
*/
#define NV0080_CTRL_CMD_FB_SET_DEFAULT_VIDMEM_PHYSICALITY (0x801308) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS_MESSAGE_ID" */
#define NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS_MESSAGE_ID (0x8U)
typedef struct NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS {
NvU32 value;
} NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS;
typedef enum NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY {
NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_DEFAULT = 0,
NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_NONCONTIGUOUS = 1,
NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_CONTIGUOUS = 2,
NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_ALLOW_NONCONTIGUOUS = 3,
} NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY;
/* _ctrl0080fb_h_ */

View File

@ -446,9 +446,13 @@
#define FOXCONN_EINSTEIN_64_DEVID 0xA1C1
#define FOXCONN_EINSTEIN_64_SSDEVID 0x7270
// Lenovo Tomcat Workstation
// Lenovo Tomcat/Falcon/Hornet Workstations
#define LENOVO_TOMCAT_DEVID 0x1B81
#define LENOVO_TOMCAT_SSDEVID 0x104e
#define LENOVO_FALCON_DEVID 0x7A8A
#define LENOVO_FALCON_SSDEVID 0x1055
#define LENOVO_HORNET_DEVID 0x7A8A
#define LENOVO_HORNET_SSDEVID 0x1056
// NVIDIA C51
#define NVIDIA_C51_DEVICE_ID_MIN 0x2F0

View File

@ -477,10 +477,10 @@ static int libos_printf_a(
# if defined(NVRM)
if (logDecode->curLineBufPtr == logDecode->lineBuffer)
{
// Prefix every line with NVRM GPUn Ucode-task: filename(lineNumber):
// Prefix every line with NVRM: GPUn Ucode-task: filename(lineNumber):
snprintf(
logDecode->curLineBufPtr, LIBOS_LOG_LINE_BUFFER_SIZE - 1,
"NVRM GPU%u %s-%s: %s(%u): ", pRec->log->gpuInstance,
NV_PRINTF_ADD_PREFIX("GPU%u %s-%s: %s(%u): "), pRec->log->gpuInstance,
logDecode->sourceName, pRec->log->taskPrefix, filename, pRec->meta->lineNumber);
logDecode->curLineBufPtr += portStringLength(logDecode->curLineBufPtr);
}

View File

@ -182,6 +182,8 @@ NvBool nvDowngradeColorSpaceAndBpc(
NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo,
NVHwModeTimingsEvoPtr pTimings,
enum NvKmsDpyAttributeCurrentColorSpaceValue *pColorSpace,
enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
const NvBool b2Heads1Or,
NVDscInfoEvoRec *pDscInfo,
const struct NvKmsModeValidationParams *pParams);

View File

@ -481,6 +481,7 @@ static inline void HsIncrementNextIndex(
}
static inline void HsChangeSurfaceFlipRefCount(
NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo,
NvBool increase)
{
@ -488,7 +489,7 @@ static inline void HsChangeSurfaceFlipRefCount(
if (increase) {
nvEvoIncrementSurfaceRefCnts(pSurfaceEvo);
} else {
nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo);
}
}
}

View File

@ -68,8 +68,6 @@ NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev(
struct NvKmsPerOpenDev *pOpenDev);
const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst(
const struct NvKmsPerOpenDev *pOpenDev);
NVDevEvoPtr nvGetDevEvoFromOpenDev(
const struct NvKmsPerOpenDev *pOpenDev);
void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32);

View File

@ -47,7 +47,8 @@ void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo);
void nvEvoDecrementSurfaceRefCnts(NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo);
NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo);

View File

@ -99,6 +99,7 @@ typedef struct {
NvBool nvkms_output_rounding_fix(void);
NvBool nvkms_disable_vrr_memclk_switch(void);
NvBool nvkms_opportunistic_display_sync(void);
void nvkms_call_rm (void *ops);
void* nvkms_alloc (size_t size,

View File

@ -118,7 +118,7 @@ SetCursorImageOneHead(NVDispEvoPtr pDispEvo,
}
if (pSurfaceEvoOld) {
nvEvoDecrementSurfaceRefCnts(pSurfaceEvoOld);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvoOld);
}
pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo = pSurfaceEvoNew;

View File

@ -1433,7 +1433,13 @@ static void UnlockRasterLockGroup(NVDevEvoPtr pDevEvo) {
NVDispEvoPtr pDispEvo = topo->pDispEvoOrder[i];
NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
NvU32 sd = pDispEvo->displayOwner;
NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
NVEvoSubDevPtr pEvoSubDev;
if (pDevEvo->gpus == NULL) {
continue;
}
pEvoSubDev = &pDevEvo->gpus[sd];
/* Initialize the assembly state */
SyncEvoLockState();
@ -1669,7 +1675,8 @@ void nvEvoLockStateSetMergeMode(NVDispEvoPtr pDispEvo)
/*
* FinishModesetOneTopology() - Set up raster lock between GPUs, if applicable,
* for one RasterLockTopology. Called in a loop from nvFinishModesetEvo().
* unless disabled via kernel module parameter, for one RasterLockTopology.
* Called in a loop from nvFinishModesetEvo().
*/
static void FinishModesetOneTopology(RasterLockTopology *topo)
@ -1686,6 +1693,11 @@ static void FinishModesetOneTopology(RasterLockTopology *topo)
NVDevEvoPtr pDevEvoFlipLockGroup = NULL;
NvBool mergeModeInUse = FALSE;
if (!nvkms_opportunistic_display_sync()) {
/* If opportunistic display sync is disabled, do not attempt rasterlock. */
return;
}
/*
* First, look for devices with VRR enabled. If we find any, go into the
* special VRR framelock mode.
@ -6197,13 +6209,15 @@ NvBool nvDowngradeColorSpaceAndBpc(
NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo,
NVHwModeTimingsEvoPtr pTimings,
enum NvKmsDpyAttributeCurrentColorSpaceValue *pColorSpace,
enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
const NvBool b2Heads1Or,
NVDscInfoEvoRec *pDscInfo,
const struct NvKmsModeValidationParams *pParams)
{
NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace;
enum NvKmsDpyAttributeColorBpcValue colorBpc;
enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace = *pColorSpace;
enum NvKmsDpyAttributeColorBpcValue colorBpc = *pColorBpc;
enum NvKmsDpyAttributeColorRangeValue colorRange;
const NVColorFormatInfoRec supportedColorFormats =
nvGetColorFormatInfo(pDpyEvo);
@ -6218,14 +6232,6 @@ NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo,
return TRUE;
}
if (pTimings->yuv420Mode != NV_YUV420_MODE_NONE) {
colorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420;
colorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
} else if (!nvGetDefaultColorSpace(&supportedColorFormats, &colorSpace,
&colorBpc)) {
return FALSE;
}
if (colorSpace != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) {
colorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED;
} else {
@ -6251,6 +6257,8 @@ NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo,
return FALSE;
}
*pColorSpace = colorSpace;
*pColorBpc = colorBpc;
return TRUE;
}

View File

@ -1825,16 +1825,21 @@ static void HsConfigInitFlipQueue(
}
static void HsConfigUpdateSurfaceRefCount(
NVDevEvoPtr pDevEvo,
const NVHsChannelConfig *pChannelConfig,
NvBool increase)
{
HsChangeSurfaceFlipRefCount(pChannelConfig->warpMesh.pSurface, increase);
HsChangeSurfaceFlipRefCount(
pDevEvo, pChannelConfig->warpMesh.pSurface, increase);
HsChangeSurfaceFlipRefCount(pChannelConfig->pBlendTexSurface, increase);
HsChangeSurfaceFlipRefCount(
pDevEvo, pChannelConfig->pBlendTexSurface, increase);
HsChangeSurfaceFlipRefCount(pChannelConfig->pOffsetTexSurface, increase);
HsChangeSurfaceFlipRefCount(
pDevEvo, pChannelConfig->pOffsetTexSurface, increase);
HsChangeSurfaceFlipRefCount(pChannelConfig->cursor.pSurfaceEvo, increase);
HsChangeSurfaceFlipRefCount(
pDevEvo, pChannelConfig->cursor.pSurfaceEvo, increase);
}
/*!
@ -2246,6 +2251,7 @@ void nvHsConfigStart(
*/
if (pHsConfigOneHead->pHsChannel != NULL) {
HsConfigUpdateSurfaceRefCount(
pDevEvo,
&pHsConfigOneHead->channelConfig,
TRUE /* increase */);
}
@ -2256,6 +2262,7 @@ void nvHsConfigStart(
*/
if (pDispEvo->pHsChannel[apiHead] != NULL) {
HsConfigUpdateSurfaceRefCount(
pDevEvo,
&pDispEvo->pHsChannel[apiHead]->config,
FALSE /* increase */);
}

View File

@ -169,6 +169,8 @@ static void HsIoctlSetCursorImage(
NVHsChannelEvoRec *pHsChannel,
NVSurfaceEvoRec *pSurfaceEvo)
{
NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
/*
* Increment the refcnt of the new surface, and
* decrement the refcnt of the old surface.
@ -178,10 +180,10 @@ static void HsIoctlSetCursorImage(
*/
HsChangeSurfaceFlipRefCount(
pSurfaceEvo, TRUE /* increase */);
pDevEvo, pSurfaceEvo, TRUE /* increase */);
HsChangeSurfaceFlipRefCount(
pHsChannel->config.cursor.pSurfaceEvo, FALSE /* increase */);
pDevEvo, pHsChannel->config.cursor.pSurfaceEvo, FALSE /* increase */);
pHsChannel->config.cursor.pSurfaceEvo = pSurfaceEvo;

View File

@ -549,21 +549,22 @@ static NvBool HsFlipQueueEntryIsReady(
* Update the reference count of all the surfaces described in the pFlipState.
*/
static void HsUpdateFlipQueueEntrySurfaceRefCount(
NVDevEvoPtr pDevEvo,
const NVHsLayerRequestedFlipState *pFlipState,
NvBool increase)
{
HsChangeSurfaceFlipRefCount(
pFlipState->pSurfaceEvo[NVKMS_LEFT], increase);
pDevEvo, pFlipState->pSurfaceEvo[NVKMS_LEFT], increase);
HsChangeSurfaceFlipRefCount(
pFlipState->pSurfaceEvo[NVKMS_RIGHT], increase);
pDevEvo, pFlipState->pSurfaceEvo[NVKMS_RIGHT], increase);
if (!pFlipState->syncObject.usingSyncpt) {
HsChangeSurfaceFlipRefCount(
pFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, increase);
pDevEvo, pFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, increase);
HsChangeSurfaceFlipRefCount(
pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, increase);
pDevEvo, pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, increase);
}
}
@ -602,7 +603,7 @@ static void HsReleaseFlipQueueEntry(
* HeadSurface no longer needs to read from the surfaces in pFlipState;
* decrement their reference counts.
*/
HsUpdateFlipQueueEntrySurfaceRefCount(pFlipState, FALSE);
HsUpdateFlipQueueEntrySurfaceRefCount(pDevEvo, pFlipState, FALSE);
}
/*!
@ -684,6 +685,7 @@ void nvHsPushFlipQueueEntry(
const NvU8 layer,
const NVHsLayerRequestedFlipState *pFlipState)
{
NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
NVHsChannelFlipQueueEntry *pEntry = nvCalloc(1, sizeof(*pEntry));
@ -700,7 +702,7 @@ void nvHsPushFlipQueueEntry(
/* Increment the ref counts on the surfaces in the flip queue entry. */
HsUpdateFlipQueueEntrySurfaceRefCount(&pEntry->hwState, TRUE);
HsUpdateFlipQueueEntrySurfaceRefCount(pDevEvo, &pEntry->hwState, TRUE);
/* "Fast forward" through existing flip queue entries that are ready. */
@ -722,7 +724,7 @@ void nvHsPushFlipQueueEntry(
* If this function returns TRUE, it is the caller's responsibility to
* eventually call
*
* HsUpdateFlipQueueEntrySurfaceRefCount(pFlipState, FALSE)
* HsUpdateFlipQueueEntrySurfaceRefCount(pDevEvo, pFlipState, FALSE)
*
* for the returned pFlipState.
*

View File

@ -1796,7 +1796,7 @@ static void ChangeSurfaceFlipRefCount(
if (increase) {
nvEvoIncrementSurfaceRefCnts(pSurfaceEvo);
} else {
nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo);
}
}
}

View File

@ -1645,6 +1645,10 @@ static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo,
NvU32 head;
NvBool ret = FALSE;
const NVColorFormatInfoRec supportedColorFormats = nvGetColorFormatInfo(pDpyEvo);
enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace;
enum NvKmsDpyAttributeColorBpcValue colorBpc;
if (modeName[0] == '\0') {
nvBuildModeName(pModeTimings->hVisible, pModeTimings->vVisible,
localModeName, sizeof(localModeName));
@ -1701,6 +1705,16 @@ static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo,
b2Heads1Or = nvEvoUse2Heads1OR(pDpyEvo, pTimingsEvo, pParams);
if (pTimingsEvo->yuv420Mode != NV_YUV420_MODE_NONE) {
colorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420;
colorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
} else if (!nvGetDefaultColorSpace(&supportedColorFormats, &colorSpace,
&colorBpc)) {
LogModeValidationEnd(pDispEvo, pInfoString,
"Failed to get default color space and Bpc");
goto done;
}
if (nvDpyIsHdmiEvo(pDpyEvo)) {
if (!nvHdmiFrlQueryConfig(pDpyEvo,
&pKmsMode->timings,
@ -1714,8 +1728,8 @@ static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo,
goto done;
}
} else {
if (!nvDPValidateModeEvo(pDpyEvo, pTimingsEvo, b2Heads1Or, pDscInfo,
pParams)) {
if (!nvDPValidateModeEvo(pDpyEvo, pTimingsEvo, &colorSpace, &colorBpc,
b2Heads1Or, pDscInfo, pParams)) {
LogModeValidationEnd(pDispEvo,
pInfoString, "DP Bandwidth check failed");
goto done;
@ -1734,14 +1748,7 @@ static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo,
/* Run the raster timings through IMP checking. */
{
const NVColorFormatInfoRec colorFormats =
nvGetColorFormatInfo(pDpyEvo);
enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace;
enum NvKmsDpyAttributeColorBpcValue colorBpc;
if (!nvGetDefaultColorSpace(&colorFormats, &colorSpace, &colorBpc) ||
!nvConstructHwModeTimingsImpCheckEvo(pDpyEvo->pConnectorEvo,
if (!nvConstructHwModeTimingsImpCheckEvo(pDpyEvo->pConnectorEvo,
pTimingsEvo,
(pDscInfo->type !=
NV_DSC_INFO_EVO_TYPE_DISABLED),
@ -1756,7 +1763,6 @@ static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo,
"GPU extended capability check failed");
goto done;
}
}
nvAssert(impOutNumHeads > 0);

View File

@ -958,7 +958,7 @@ void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo,
nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
if (isOwner) {
nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo);
} else {
nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo);
}
@ -1003,7 +1003,7 @@ void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo,
/* Remove the handle from the calling client's namespace. */
nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo);
}
void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo,
@ -1041,15 +1041,13 @@ void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo)
pSurfaceEvo->structRefCnt++;
}
void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo)
void nvEvoDecrementSurfaceRefCnts(NVDevEvoPtr pDevEvo,
NVSurfaceEvoPtr pSurfaceEvo)
{
nvAssert(pSurfaceEvo->rmRefCnt >= 1);
pSurfaceEvo->rmRefCnt--;
if (pSurfaceEvo->rmRefCnt == 0) {
NVDevEvoPtr pDevEvo =
nvGetDevEvoFromOpenDev(pSurfaceEvo->owner.pOpenDev);
/*
* Don't sync if this surface was registered as not requiring display
* hardware access, to WAR timeouts that result from OGL unregistering
@ -1224,7 +1222,7 @@ void nvEvoUnregisterDeferredRequestFifo(
pDeferredRequestFifo->fifo,
0);
nvEvoDecrementSurfaceRefCnts(pDeferredRequestFifo->pSurfaceEvo);
nvEvoDecrementSurfaceRefCnts(pDevEvo, pDeferredRequestFifo->pSurfaceEvo);
nvFree(pDeferredRequestFifo);
}

View File

@ -5917,13 +5917,6 @@ NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo)
}
#endif
NVDevEvoPtr nvGetDevEvoFromOpenDev(
const struct NvKmsPerOpenDev *pOpenDev)
{
nvAssert(pOpenDev != NULL);
return pOpenDev->pDevEvo;
}
const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev(
const struct NvKmsPerOpenDev *pOpenDev)
{

View File

@ -358,6 +358,7 @@ typedef struct
*/
NvU64 s0ix_gcoff_max_fb_size;
NvU32 pmc_boot_1;
NvU32 pmc_boot_42;
} nv_priv_t;

View File

@ -924,6 +924,7 @@ NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *
NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *);
void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_msix_allowed (nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);

View File

@ -1160,6 +1160,7 @@ NvBool RmInitPrivateState(
nv_priv_t *nvp;
NvU32 gpuId;
NvU32 pmc_boot_0 = 0;
NvU32 pmc_boot_1 = 0;
NvU32 pmc_boot_42 = 0;
NV_SET_NV_PRIV(pNv, NULL);
@ -1177,6 +1178,7 @@ NvBool RmInitPrivateState(
}
pmc_boot_0 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_0);
pmc_boot_1 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_1);
pmc_boot_42 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_42);
os_unmap_kernel_space(pNv->regs->map_u, os_page_size);
@ -1216,6 +1218,7 @@ NvBool RmInitPrivateState(
os_mem_set(nvp, 0, sizeof(*nvp));
nvp->status = NV_ERR_INVALID_STATE;
nvp->pmc_boot_0 = pmc_boot_0;
nvp->pmc_boot_1 = pmc_boot_1;
nvp->pmc_boot_42 = pmc_boot_42;
NV_SET_NV_PRIV(pNv, nvp);
@ -1234,7 +1237,7 @@ void RmClearPrivateState(
nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS];
nv_dynamic_power_t dynamicPowerCopy;
NvU32 x = 0;
NvU32 pmc_boot_0, pmc_boot_42;
NvU32 pmc_boot_0, pmc_boot_1, pmc_boot_42;
//
// Do not clear private state after GPU resets, it is used while
@ -1252,6 +1255,7 @@ void RmClearPrivateState(
pRegistryCopy = nvp->pRegistry;
dynamicPowerCopy = nvp->dynamic_power;
pmc_boot_0 = nvp->pmc_boot_0;
pmc_boot_1 = nvp->pmc_boot_1;
pmc_boot_42 = nvp->pmc_boot_42;
for (x = 0; x < MAX_I2C_ADAPTERS; x++)
@ -1267,6 +1271,7 @@ void RmClearPrivateState(
nvp->pRegistry = pRegistryCopy;
nvp->dynamic_power = dynamicPowerCopy;
nvp->pmc_boot_0 = pmc_boot_0;
nvp->pmc_boot_1 = pmc_boot_1;
nvp->pmc_boot_42 = pmc_boot_42;
for (x = 0; x < MAX_I2C_ADAPTERS; x++)

View File

@ -706,3 +706,27 @@ NV_STATUS rm_gpu_handle_mmu_faults(
return status;
}
NvBool NV_API_CALL rm_is_msix_allowed(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
nv_priv_t *pNvp = NV_GET_NV_PRIV(nv);
THREAD_STATE_NODE threadState;
void *fp;
NvBool ret = NV_FALSE;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK)
{
ret = gpumgrIsDeviceMsixAllowed(nv->regs->cpu_address,
pNvp->pmc_boot_1, pNvp->pmc_boot_42);
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return ret;
}

View File

@ -23,6 +23,7 @@
--undefined=rm_isr
--undefined=rm_isr_bh
--undefined=rm_isr_bh_unlocked
--undefined=rm_is_msix_allowed
--undefined=rm_perform_version_check
--undefined=rm_power_management
--undefined=rm_stop_user_channels

View File

@ -590,6 +590,21 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
#endif
},
{ /* [26] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdSetDefaultVidmemPhysicality_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*flags=*/ 0x11u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x801308u,
/*paramSize=*/ sizeof(NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdSetDefaultVidmemPhysicality"
#endif
},
{ /* [27] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -604,7 +619,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdHostGetCaps"
#endif
},
{ /* [27] */
{ /* [28] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -619,7 +634,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdHostGetCapsV2"
#endif
},
{ /* [28] */
{ /* [29] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -634,7 +649,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetCaps"
#endif
},
{ /* [29] */
{ /* [30] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -649,7 +664,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStartSelectedChannels"
#endif
},
{ /* [30] */
{ /* [31] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -664,7 +679,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetEngineContextProperties"
#endif
},
{ /* [31] */
{ /* [32] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -679,7 +694,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetChannelList"
#endif
},
{ /* [32] */
{ /* [33] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -694,7 +709,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetLatencyBufferSize"
#endif
},
{ /* [33] */
{ /* [34] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -709,7 +724,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoSetChannelProperties"
#endif
},
{ /* [34] */
{ /* [35] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -724,7 +739,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStopRunlist"
#endif
},
{ /* [35] */
{ /* [36] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -739,7 +754,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStartRunlist"
#endif
},
{ /* [36] */
{ /* [37] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -754,7 +769,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetCapsV2"
#endif
},
{ /* [37] */
{ /* [38] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -769,7 +784,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoIdleChannels"
#endif
},
{ /* [38] */
{ /* [39] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -784,7 +799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetPteInfo"
#endif
},
{ /* [39] */
{ /* [40] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -799,7 +814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaFlush"
#endif
},
{ /* [40] */
{ /* [41] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -814,7 +829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaAdvSchedGetVaCaps"
#endif
},
{ /* [41] */
{ /* [42] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -829,7 +844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetPdeInfo"
#endif
},
{ /* [42] */
{ /* [43] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -844,7 +859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetPteInfo"
#endif
},
{ /* [43] */
{ /* [44] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -859,7 +874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaInvalidateTLB"
#endif
},
{ /* [44] */
{ /* [45] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -874,7 +889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetCaps"
#endif
},
{ /* [45] */
{ /* [46] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -889,7 +904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetVASpaceSize"
#endif
},
{ /* [46] */
{ /* [47] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -904,7 +919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUpdatePde2"
#endif
},
{ /* [47] */
{ /* [48] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -919,7 +934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaEnablePrivilegedRange"
#endif
},
{ /* [48] */
{ /* [49] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c0000u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -934,7 +949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetDefaultVASpace"
#endif
},
{ /* [49] */
{ /* [50] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -949,7 +964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetPageDirectory"
#endif
},
{ /* [50] */
{ /* [51] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -964,7 +979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUnsetPageDirectory"
#endif
},
{ /* [51] */
{ /* [52] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -979,7 +994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdMsencGetCaps"
#endif
},
{ /* [52] */
{ /* [53] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -994,7 +1009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBspGetCapsV2"
#endif
},
{ /* [53] */
{ /* [54] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1009,7 +1024,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTSwitch"
#endif
},
{ /* [54] */
{ /* [55] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1024,7 +1039,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo"
#endif
},
{ /* [55] */
{ /* [56] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1039,7 +1054,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdNvjpgGetCapsV2"
#endif
},
{ /* [56] */
{ /* [57] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1054,7 +1069,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfCudaLimitDisable"
#endif
},
{ /* [57] */
{ /* [58] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1069,7 +1084,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount"
#endif
},
{ /* [58] */
{ /* [59] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1089,7 +1104,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
const struct NVOC_EXPORT_INFO __nvoc_export_info_Device =
{
/*numEntries=*/ 59,
/*numEntries=*/ 60,
/*pExportEntries=*/ __nvoc_exported_method_def_Device
};
@ -1234,6 +1249,10 @@ static void __nvoc_init_funcTable_Device_1(Device *pThis) {
pThis->__deviceCtrlCmdFbGetCapsV2__ = &deviceCtrlCmdFbGetCapsV2_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__deviceCtrlCmdSetDefaultVidmemPhysicality__ = &deviceCtrlCmdSetDefaultVidmemPhysicality_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
pThis->__deviceCtrlCmdFifoGetCaps__ = &deviceCtrlCmdFifoGetCaps_IMPL;
#endif

View File

@ -109,6 +109,7 @@ struct Device {
NV_STATUS (*__deviceCtrlCmdFbGetCompbitStoreInfo__)(struct Device *, NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFbGetCaps__)(struct Device *, NV0080_CTRL_FB_GET_CAPS_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFbGetCapsV2__)(struct Device *, NV0080_CTRL_FB_GET_CAPS_V2_PARAMS *);
NV_STATUS (*__deviceCtrlCmdSetDefaultVidmemPhysicality__)(struct Device *, NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFifoGetCaps__)(struct Device *, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFifoGetCapsV2__)(struct Device *, NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFifoStartSelectedChannels__)(struct Device *, NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *);
@ -182,6 +183,7 @@ struct Device {
NvU64 vaLimitInternal;
NvU64 vaSize;
NvU32 vaMode;
NvU32 defaultVidmemPhysicalityOverride;
struct KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
};
@ -241,6 +243,7 @@ NV_STATUS __nvoc_objCreate_Device(Device**, Dynamic*, NvU32, struct CALL_CONTEXT
#define deviceCtrlCmdFbGetCompbitStoreInfo(pDevice, pCompbitStoreParams) deviceCtrlCmdFbGetCompbitStoreInfo_DISPATCH(pDevice, pCompbitStoreParams)
#define deviceCtrlCmdFbGetCaps(pDevice, pFbCapsParams) deviceCtrlCmdFbGetCaps_DISPATCH(pDevice, pFbCapsParams)
#define deviceCtrlCmdFbGetCapsV2(pDevice, pFbCapsParams) deviceCtrlCmdFbGetCapsV2_DISPATCH(pDevice, pFbCapsParams)
#define deviceCtrlCmdSetDefaultVidmemPhysicality(pDevice, pParams) deviceCtrlCmdSetDefaultVidmemPhysicality_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdFifoGetCaps(pDevice, pFifoCapsParams) deviceCtrlCmdFifoGetCaps_DISPATCH(pDevice, pFifoCapsParams)
#define deviceCtrlCmdFifoGetCapsV2(pDevice, pFifoCapsParams) deviceCtrlCmdFifoGetCapsV2_DISPATCH(pDevice, pFifoCapsParams)
#define deviceCtrlCmdFifoStartSelectedChannels(pDevice, pStartSel) deviceCtrlCmdFifoStartSelectedChannels_DISPATCH(pDevice, pStartSel)
@ -465,6 +468,12 @@ static inline NV_STATUS deviceCtrlCmdFbGetCapsV2_DISPATCH(struct Device *pDevice
return pDevice->__deviceCtrlCmdFbGetCapsV2__(pDevice, pFbCapsParams);
}
NV_STATUS deviceCtrlCmdSetDefaultVidmemPhysicality_IMPL(struct Device *pDevice, NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdSetDefaultVidmemPhysicality_DISPATCH(struct Device *pDevice, NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdSetDefaultVidmemPhysicality__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdFifoGetCaps_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *pFifoCapsParams);
static inline NV_STATUS deviceCtrlCmdFifoGetCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *pFifoCapsParams) {

View File

@ -529,6 +529,7 @@ void gpumgrSetGpuRelease(void);
NvU8 gpumgrGetGpuBridgeType(void);
NvBool gpumgrAreAllGpusInOffloadMode(void);
NvBool gpumgrIsSafeToReadGpuInfo(void);
NvBool gpumgrIsDeviceMsixAllowed(RmPhysAddr bar0BaseAddr, NvU32 pmcBoot1, NvU32 pmcBoot42);
//
// gpumgrIsSubDeviceCountOne

View File

@ -765,6 +765,17 @@ static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
{
pThis->__gpuClearEccCounts__ = &gpuClearEccCounts_ac1694;
}
// Hal function -- gpuWaitForGfwBootComplete
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__gpuWaitForGfwBootComplete__ = &gpuWaitForGfwBootComplete_TU102;
}
// default
else
{
pThis->__gpuWaitForGfwBootComplete__ = &gpuWaitForGfwBootComplete_5baef9;
}
}
void __nvoc_init_funcTable_OBJGPU(OBJGPU *pThis) {

View File

@ -898,6 +898,7 @@ struct OBJGPU {
NvBool (*__gpuIsCtxBufAllocInPmaSupported__)(struct OBJGPU *);
void (*__gpuCheckEccCounts__)(struct OBJGPU *);
NV_STATUS (*__gpuClearEccCounts__)(struct OBJGPU *);
NV_STATUS (*__gpuWaitForGfwBootComplete__)(struct OBJGPU *);
NvBool PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED;
NvBool bVideoLinkDisabled;
GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel;
@ -1476,6 +1477,8 @@ NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32,
#define gpuCheckEccCounts_HAL(pGpu) gpuCheckEccCounts_DISPATCH(pGpu)
#define gpuClearEccCounts(pGpu) gpuClearEccCounts_DISPATCH(pGpu)
#define gpuClearEccCounts_HAL(pGpu) gpuClearEccCounts_DISPATCH(pGpu)
#define gpuWaitForGfwBootComplete(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
#define gpuWaitForGfwBootComplete_HAL(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
static inline NV_STATUS gpuConstructPhysical_56cd7a(struct OBJGPU *pGpu) {
return NV_OK;
}
@ -3228,6 +3231,16 @@ static inline NV_STATUS gpuClearEccCounts_DISPATCH(struct OBJGPU *pGpu) {
return pGpu->__gpuClearEccCounts__(pGpu);
}
NV_STATUS gpuWaitForGfwBootComplete_TU102(struct OBJGPU *pGpu);
static inline NV_STATUS gpuWaitForGfwBootComplete_5baef9(struct OBJGPU *pGpu) {
NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
}
static inline NV_STATUS gpuWaitForGfwBootComplete_DISPATCH(struct OBJGPU *pGpu) {
return pGpu->__gpuWaitForGfwBootComplete__(pGpu);
}
static inline PENGDESCRIPTOR gpuGetInitEngineDescriptors(struct OBJGPU *pGpu) {
return pGpu->engineOrder.pEngineInitDescriptors;
}
@ -4707,6 +4720,13 @@ VGPU_STATIC_INFO *gpuGetStaticInfo(struct OBJGPU *pGpu);
GspStaticConfigInfo *gpuGetGspStaticInfo(struct OBJGPU *pGpu);
#define GPU_GET_GSP_STATIC_INFO(pGpu) gpuGetGspStaticInfo(pGpu)
//
// This function needs to be called when OBJGPU is not created. HAL
// infrastructure cant be used for this case, so it has been added manually.
// It will be invoked directly by gpumgrIsDeviceMsixAllowed().
//
NvBool gpuIsMsixAllowed_TU102(RmPhysAddr bar0BaseAddr);
#endif // _OBJGPU_H_
#ifdef __cplusplus

View File

@ -527,6 +527,17 @@ static inline void krcWatchdogCallbackVblankRecovery(struct OBJGPU *pGpu, struct
#define krcWatchdogCallbackVblankRecovery(pGpu, pKernelRc) krcWatchdogCallbackVblankRecovery_IMPL(pGpu, pKernelRc)
#endif //__nvoc_kernel_rc_h_disabled
NV_STATUS krcWatchdogGetClientHandle_IMPL(struct KernelRc *arg0, NvHandle *arg1);
#ifdef __nvoc_kernel_rc_h_disabled
static inline NV_STATUS krcWatchdogGetClientHandle(struct KernelRc *arg0, NvHandle *arg1) {
NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kernel_rc_h_disabled
#define krcWatchdogGetClientHandle(arg0, arg1) krcWatchdogGetClientHandle_IMPL(arg0, arg1)
#endif //__nvoc_kernel_rc_h_disabled
#undef PRIVATE_FIELD

View File

@ -901,8 +901,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
{ 0x2339, 0x17fc, 0x10de, "NVIDIA H100" },
{ 0x233A, 0x183a, 0x10de, "NVIDIA H800 NVL" },
{ 0x2342, 0x16eb, 0x10de, "GH200 120GB" },
{ 0x2342, 0x1809, 0x10de, "GH200 480GB" },
{ 0x2342, 0x16eb, 0x10de, "NVIDIA GH200 120GB" },
{ 0x2342, 0x1809, 0x10de, "NVIDIA GH200 480GB" },
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
{ 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" },
@ -995,6 +995,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B2, 0x17fa, 0x103c, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B2, 0x17fa, 0x10de, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B2, 0x17fa, 0x17aa, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B3, 0x1934, 0x10de, "NVIDIA RTX 5880 Ada Generation" },
{ 0x26B5, 0x169d, 0x10de, "NVIDIA L40" },
{ 0x26B5, 0x17da, 0x10de, "NVIDIA L40" },
{ 0x26B9, 0x1851, 0x10de, "NVIDIA L40S" },
@ -2026,8 +2027,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B9, 0x189c, 0x10DE, "NVIDIA L40S-16A" },
{ 0x26B9, 0x189d, 0x10DE, "NVIDIA L40S-24A" },
{ 0x26B9, 0x189e, 0x10DE, "NVIDIA L40S-48A" },
{ 0x26B9, 0x189f, 0x10DE, "GeForce RTX 3050" },
{ 0x26B9, 0x18a0, 0x10DE, "GeForce RTX 3060" },
{ 0x26B9, 0x189f, 0x10DE, "NVIDIA GeForce RTX 3050" },
{ 0x26B9, 0x18a0, 0x10DE, "NVIDIA GeForce RTX 3060" },
{ 0x26B9, 0x18a1, 0x10DE, "NVIDIA L40S-1" },
{ 0x26B9, 0x18a2, 0x10DE, "NVIDIA L40S-2" },
{ 0x26B9, 0x18a3, 0x10DE, "NVIDIA L40S-3" },

View File

@ -1753,12 +1753,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif
},
{ /* [101] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBiosGetSKUInfo_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u)
/*flags=*/ 0x212u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20800808u,
/*paramSize=*/ sizeof(NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS),
@ -5668,12 +5668,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif
},
{ /* [362] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
/*flags=*/ 0x6210u,
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_DISPATCH,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u)
/*flags=*/ 0x4210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20801829u,
/*paramSize=*/ sizeof(NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS),
@ -7825,7 +7825,7 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
pThis->__subdeviceCtrlCmdBiosGetNbsiV2__ = &subdeviceCtrlCmdBiosGetNbsiV2_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u)
pThis->__subdeviceCtrlCmdBiosGetSKUInfo__ = &subdeviceCtrlCmdBiosGetSKUInfo_IMPL;
#endif
@ -7969,9 +7969,8 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
pThis->__subdeviceCtrlCmdBusGetEomStatus__ = &subdeviceCtrlCmdBusGetEomStatus_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
pThis->__subdeviceCtrlCmdBusGetPcieReqAtomicsCaps__ = &subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_IMPL;
#endif
// Hal function -- subdeviceCtrlCmdBusGetPcieReqAtomicsCaps
pThis->__subdeviceCtrlCmdBusGetPcieReqAtomicsCaps__ = &subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_92bfc3;
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
pThis->__subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics__ = &subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics_IMPL;

View File

@ -747,6 +747,7 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdBusGetUphyDlnCfgSpace(pSubdevice, pParams) subdeviceCtrlCmdBusGetUphyDlnCfgSpace_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetEomStatus(pSubdevice, pParams) subdeviceCtrlCmdBusGetEomStatus_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetPcieReqAtomicsCaps(pSubdevice, pParams) subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_HAL(pSubdevice, pParams) subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics(pSubdevice, pParams) subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetC2CInfo(pSubdevice, pParams) subdeviceCtrlCmdBusGetC2CInfo_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetC2CErrorInfo(pSubdevice, pParams) subdeviceCtrlCmdBusGetC2CErrorInfo_DISPATCH(pSubdevice, pParams)
@ -1487,7 +1488,10 @@ static inline NV_STATUS subdeviceCtrlCmdBusGetEomStatus_DISPATCH(struct Subdevic
return pSubdevice->__subdeviceCtrlCmdBusGetEomStatus__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_92bfc3(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS *pParams) {
NV_ASSERT_PRECOMP(0);
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdBusGetPcieReqAtomicsCaps__(pSubdevice, pParams);

View File

@ -79,6 +79,7 @@ typedef struct {
NvNotification *notifiers[NV_MAX_SUBDEVICES];
NvNotification *errorContext;
NvNotification *notifierToken;
NvBool bHandleValid;
} KernelWatchdog;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -22,6 +22,7 @@
*/
#include "core/core.h"
#include "core/locks.h"
#include "os/os.h"
#include "gpu/gpu.h"
#include "vgpu/vgpu_version.h"

View File

@ -1283,7 +1283,7 @@ done:
// RmMsgPrefix - Add the RmMsg prefix to the passed in string, returning
// the length of the formatted string.
//
// Format: "NVRM file linenum function timestamp: "
// Format: "NVRM: file linenum function timestamp: "
//
NvU32
RmMsgPrefix
@ -1306,7 +1306,8 @@ RmMsgPrefix
{
portStringCopy(str + len, totalLen - len, NV_PRINTF_PREFIX, sizeof(NV_PRINTF_PREFIX));
len += sizeof(NV_PRINTF_PREFIX) - 1;
space = " ";
portStringCopy(str + len, totalLen - len, NV_PRINTF_PREFIX_SEPARATOR, sizeof(NV_PRINTF_PREFIX_SEPARATOR));
len += sizeof(NV_PRINTF_PREFIX_SEPARATOR) - 1;
}
if (prefix & NVRM_MSG_PREFIX_FILE)

View File

@ -27,9 +27,14 @@
#include "gpu/mem_sys/kern_mem_sys.h"
#include "gpu/bus/kern_bus.h"
#include "gpu/bif/kernel_bif.h"
#include "gpu/mem_mgr/rm_page_size.h"
#include "nverror.h"
#include "jt.h"
#include "published/turing/tu102/dev_nv_xve.h"
#include "published/turing/tu102/dev_gc6_island.h"
#include "published/turing/tu102/dev_gc6_island_addendum.h"
/*!
* @brief Returns SR-IOV capabilities
*
@ -320,3 +325,176 @@ gpuClearEccCounts_TU102
return NV_OK;
}
//
// This function checks for GFW boot completion status by reading
// NV_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_COMPLETED bits and
// return true if GFW boot has completed.
//
// Either pGpu or pgc6VirtAddr should be not null.
// This function needs to be called in early init code-path where OBJGPU
// has not created. For that case, the NV_PGC6 base address will be mapped
// and pgc6VirtAddr will contain the virtual address for NV_PGC6.
// If pgc6VirtAddr is not null, then read the register with MEM_RD32,
// otherwise use the GPU_REG_RD32.
//
// The current GFW boot progress value will be returned in gfwBootProgressVal.
//
static NvBool
_gpuIsGfwBootCompleted_TU102
(
OBJGPU *pGpu,
NvU8 *pgc6VirtAddr,
NvU32 *gfwBootProgressVal
)
{
NvU32 regVal;
if (pgc6VirtAddr != NULL)
{
regVal = MEM_RD32(pgc6VirtAddr +
(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK -
DEVICE_BASE(NV_PGC6)));
}
else
{
regVal = GPU_REG_RD32(pGpu, NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK);
}
//
// Before reading the actual GFW_BOOT status register,
// we want to check that FWSEC has lowered its PLM first.
// If not then obviously it has not completed.
//
if (!FLD_TEST_DRF(_PGC6, _AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK,
_READ_PROTECTION_LEVEL0, _ENABLE, regVal))
{
*gfwBootProgressVal = 0x0;
return NV_FALSE;
}
if (pgc6VirtAddr != NULL)
{
regVal = MEM_RD32(pgc6VirtAddr +
(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT -
DEVICE_BASE(NV_PGC6)));
}
else
{
regVal = GPU_REG_RD32(pGpu, NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT);
}
*gfwBootProgressVal = DRF_VAL(_PGC6, _AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT,
_PROGRESS, regVal);
return FLD_TEST_DRF(_PGC6, _AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT,
_PROGRESS, _COMPLETED, regVal);
}
#define FWSECLIC_PROG_START_TIMEOUT 50000 // 50ms
#define FWSECLIC_PROG_COMPLETE_TIMEOUT 2000000 // 2s
#define GPU_GFW_BOOT_COMPLETION_TIMEOUT_US (FWSECLIC_PROG_START_TIMEOUT + \
FWSECLIC_PROG_COMPLETE_TIMEOUT)
NV_STATUS
gpuWaitForGfwBootComplete_TU102
(
OBJGPU *pGpu
)
{
NvU32 timeoutUs = GPU_GFW_BOOT_COMPLETION_TIMEOUT_US;
NvU32 gfwBootProgressVal = 0;
RMTIMEOUT timeout;
NV_STATUS status = NV_OK;
// Use the OS timer since the GPU timer is not ready yet
gpuSetTimeout(pGpu, gpuScaleTimeout(pGpu, timeoutUs), &timeout,
GPU_TIMEOUT_FLAGS_OSTIMER);
while (status == NV_OK)
{
if (_gpuIsGfwBootCompleted_TU102(pGpu, NULL, &gfwBootProgressVal))
{
return NV_OK;
}
status = gpuCheckTimeout(pGpu, &timeout);
}
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW_BOOT: (progress 0x%x)\n",
gfwBootProgressVal);
return status;
}
//
// Workaround for Bug 3809777.
//
// This function is not created through HAL infrastructure. It needs to be
// called when OBJGPU is not created. HAL infrastructure cant be used for
// this case, so it has been added manually. It will be invoked directly by
// gpumgrIsDeviceMsixAllowed() after checking the GPU architecture.
//
// When driver is running inside guest in pass-through mode, check if MSI-X
// is enabled by reading NV_XVE_PRIV_MISC_1_CYA_HIDE_MSIX_CAP. The devinit
// can disable MSI-X capability, if configured. The hypervisor issues reset
// before launching VM. After reset, the MSI-X capability will be visible
// for some duration and then devinit hides the MSI-X capability. This
// devinit will run in the background. During this time, the hypervisor can
// assume that MSI-X capability is present in the GPU and configure the guest
// GPU PCIe device instance with MSI-X capability. When GPU tries to use the
// MSI-X interrupts later, then interrupt wont be triggered. To identify
// this case, wait for GPU devinit completion and check if MSI-X capability
// is not hidden.
//
NvBool gpuIsMsixAllowed_TU102
(
RmPhysAddr bar0BaseAddr
)
{
NvU8 *vAddr;
NvU32 regVal;
NvU32 timeUs = 0;
NvU32 gfwBootProgressVal = 0;
NvBool bGfwBootCompleted = NV_FALSE;
ct_assert(DRF_SIZE(NV_PGC6) <= RM_PAGE_SIZE);
vAddr = osMapKernelSpace(bar0BaseAddr + DEVICE_BASE(NV_PGC6),
RM_PAGE_SIZE, NV_MEMORY_UNCACHED,
NV_PROTECT_READABLE);
if (vAddr == NULL)
{
return NV_FALSE;
}
while (timeUs < GPU_GFW_BOOT_COMPLETION_TIMEOUT_US)
{
bGfwBootCompleted = _gpuIsGfwBootCompleted_TU102(NULL, vAddr, &gfwBootProgressVal);
if (bGfwBootCompleted)
{
break;
}
osDelayUs(1000);
timeUs += 1000;
}
osUnmapKernelSpace(vAddr, RM_PAGE_SIZE);
if (!bGfwBootCompleted)
{
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW_BOOT: (progress 0x%x)\n",
gfwBootProgressVal);
return NV_FALSE;
}
vAddr = osMapKernelSpace(bar0BaseAddr + DEVICE_BASE(NV_PCFG) +
NV_XVE_PRIV_MISC_1, 4, NV_MEMORY_UNCACHED,
NV_PROTECT_READABLE);
if (vAddr == NULL)
{
return NV_FALSE;
}
regVal = MEM_RD32(vAddr);
osUnmapKernelSpace(vAddr, 4);
return FLD_TEST_DRF(_XVE, _PRIV_MISC_1, _CYA_HIDE_MSIX_CAP, _FALSE, regVal);
}

View File

@ -660,7 +660,8 @@ _class5080DeferredApiV2
callContext.secInfo.pProcessToken = (void *)(NvU64) gfid;
}
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(rmStatus,
resservSwapTlsCallContext(&pOldContext, &callContext), cleanup);
rmStatus = serverControl_Prologue(&g_resServ, &rmCtrlParams, &access, &releaseFlags);
@ -680,7 +681,7 @@ _class5080DeferredApiV2
}
}
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
rmStatus = serverControl_Epilogue(&g_resServ, &rmCtrlParams, access, &releaseFlags, rmStatus);
}

View File

@ -28,6 +28,7 @@
#include "kernel/gpu/mem_mgr/mem_mgr.h"
#include "kernel/gpu/gr/kernel_graphics.h"
#include "kernel/gpu/falcon/kernel_falcon.h"
#include "kernel/gpu/rc/kernel_rc.h"
#include "kernel/gpu/conf_compute/conf_compute.h"
@ -241,6 +242,16 @@ kchangrpapiConstruct_IMPL
}
if (!RMCFG_FEATURE_PLATFORM_GSP)
{
NvHandle hRcWatchdog;
//
// WAR for 4217716 - Force allocations made on behalf of watchdog client to
// RM reserved heap. This avoids a constant memory allocation from appearing
// due to the ctxBufPool reservation out of PMA.
//
rmStatus = krcWatchdogGetClientHandle(GPU_GET_KERNEL_RC(pGpu), &hRcWatchdog);
if ((rmStatus != NV_OK) || (pParams->hClient != hRcWatchdog))
{
NV_ASSERT_OK_OR_GOTO(rmStatus,
ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pCtxBufPool),
@ -250,6 +261,11 @@ kchangrpapiConstruct_IMPL
ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pChannelBufPool),
failed);
}
else
{
NV_PRINTF(LEVEL_INFO, "Skipping ctxBufPoolInit for RC watchdog\n");
}
}
NV_ASSERT_OK_OR_GOTO(rmStatus,
kchangrpInit(pGpu, pKernelChannelGroup, pVAS, gfid),

View File

@ -618,7 +618,7 @@ static NV_STATUS _gpuRmApiControl
callCtx.pControlParams = &rmCtrlParams;
callCtx.pLockInfo = rmCtrlParams.pLockInfo;
resservSwapTlsCallContext(&oldCtx, &callCtx);
NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&oldCtx, &callCtx));
if (pEntry->paramSize == 0)
{
@ -629,7 +629,7 @@ static NV_STATUS _gpuRmApiControl
status = ((NV_STATUS(*)(void*,void*))pEntry->pFunc)(pGpu->pCachedSubdevice, pParams);
}
resservRestoreTlsCallContext(oldCtx);
NV_ASSERT_OK(resservRestoreTlsCallContext(oldCtx));
}
else
{

View File

@ -381,6 +381,7 @@ gpuresControl_IMPL
RS_RES_CONTROL_PARAMS_INTERNAL *pParams
)
{
NV_ASSERT_OR_RETURN(pGpuResource->pGpu != NULL, NV_ERR_INVALID_STATE);
gpuresControlSetup(pParams, pGpuResource);
return resControl_IMPL(staticCast(pGpuResource, RsResource),

View File

@ -68,8 +68,7 @@ kgspConfigureFalcon_GA102
//
// No CrashCat queue when CC is enabled, as it's not encrypted.
// Don't bother enabling the host-side decoding either, as CrashCat
// currently only supports sysmem queue reporting on GA10x+.
// Don't bother enabling the host-side decoding either.
//
if (pCC == NULL || !pCC->getProperty(pCC, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED))
{
@ -77,7 +76,6 @@ kgspConfigureFalcon_GA102
falconConfig.crashcatEngConfig.bEnable = NV_TRUE;
falconConfig.crashcatEngConfig.pName = MAKE_NV_PRINTF_STR("GSP");
falconConfig.crashcatEngConfig.errorId = GSP_ERROR;
falconConfig.crashcatEngConfig.allocQueueSize = RM_PAGE_SIZE;
}
kflcnConfigureEngine(pGpu, staticCast(pKernelGsp, KernelFalcon), &falconConfig);

View File

@ -956,9 +956,6 @@ kgspIsWpr2Up_TU102
return (wpr2HiVal != 0);
}
#define FWSECLIC_PROG_START_TIMEOUT 50000 // 50ms
#define FWSECLIC_PROG_COMPLETE_TIMEOUT 2000000 // 2s
NV_STATUS
kgspWaitForGfwBootOk_TU102
(
@ -966,50 +963,15 @@ kgspWaitForGfwBootOk_TU102
KernelGsp *pKernelGsp
)
{
NvU32 timeoutUs = FWSECLIC_PROG_START_TIMEOUT + FWSECLIC_PROG_COMPLETE_TIMEOUT;
RMTIMEOUT timeout;
NV_STATUS status = NV_OK;
// Use the OS timer since the GPU timer is not ready yet
gpuSetTimeout(pGpu, gpuScaleTimeout(pGpu, timeoutUs), &timeout,
GPU_TIMEOUT_FLAGS_OSTIMER);
while (status == NV_OK)
status = gpuWaitForGfwBootComplete_HAL(pGpu);
if (status != NV_OK)
{
//
// Before reading the actual GFW_BOOT status register,
// we want to check that FWSEC has lowered its PLM first.
// If not then obviously it has not completed.
//
if (GPU_FLD_TEST_DRF_DEF(pGpu,
_PGC6,
_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK,
_READ_PROTECTION_LEVEL0,
_ENABLE)
)
{
if (GPU_FLD_TEST_DRF_DEF(pGpu,
_PGC6,
_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT,
_PROGRESS,
_COMPLETED)
)
{
return NV_OK;
}
}
status = gpuCheckTimeout(pGpu, &timeout);
}
// The wait failed if we reach here (as above loop returns upon success).
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW_BOOT: 0x%x (progress 0x%x, VBIOS version %s)\n",
status, GPU_REG_RD_DRF(pGpu,
_PGC6,
_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT,
_PROGRESS),
pKernelGsp->vbiosVersionStr);
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW boot complete: 0x%x VBIOS version %s\n",
status, pKernelGsp->vbiosVersionStr);
NV_PRINTF(LEVEL_ERROR, "(the GPU may be in a bad state and may need to be reset)\n");
}
return status;
}

View File

@ -1409,12 +1409,20 @@ _tsDiffToDuration
duration /= tsFreqUs;
// 999999us then 1000ms
if (duration >= 1000000)
{
duration /= 1000;
*pDurationUnitsChar = 'm';
}
// 9999ms then 10s
if (duration >= 10000)
{
duration /= 1000;
*pDurationUnitsChar = ' '; // so caller can always just append 's'
}
return duration;
}
@ -1467,7 +1475,7 @@ _kgspLogRpcHistoryEntry
duration = _tsDiffToDuration(duration, &durationUnitsChar);
NV_ERROR_LOG_DATA(pGpu, errorNum,
" %c%-4d %-4d %-21.21s 0x%016llx 0x%016llx 0x%016llx 0x%016llx %6lld%cs %c\n",
" %c%-4d %-4d %-21.21s 0x%016llx 0x%016llx 0x%016llx 0x%016llx %6llu%cs %c\n",
((historyIndex == 0) ? ' ' : '-'),
historyIndex,
pEntry->function,
@ -1556,23 +1564,32 @@ _kgspLogXid119
NvU32 expectedFunc
)
{
NvU32 historyEntry = pRpc->rpcHistoryCurrent;
RpcHistoryEntry *pHistoryEntry = &pRpc->rpcHistory[pRpc->rpcHistoryCurrent];
NvU64 ts_end = osGetTimestamp();
NvU64 duration;
char durationUnitsChar;
if (pRpc->timeoutCount == 1)
{
NV_PRINTF(LEVEL_ERROR,
"********************************* GSP Failure **********************************\n");
"********************************* GSP Timeout **********************************\n");
NV_PRINTF(LEVEL_ERROR,
"Note: Please also check logs above.\n");
}
NV_ASSERT(expectedFunc == pRpc->rpcHistory[historyEntry].function);
NV_ASSERT(expectedFunc == pHistoryEntry->function);
NV_ASSERT(ts_end > pHistoryEntry->ts_start);
duration = _tsDiffToDuration(ts_end - pHistoryEntry->ts_start, &durationUnitsChar);
NV_ERROR_LOG(pGpu, GSP_RPC_TIMEOUT,
"Timeout waiting for RPC from GPU%d GSP! Expected function %d (%s) (0x%x 0x%x).",
"Timeout after %llus of waiting for RPC response from GPU%d GSP! Expected function %d (%s) (0x%x 0x%x).",
(durationUnitsChar == 'm' ? duration / 1000 : duration),
gpuGetInstance(pGpu),
expectedFunc,
_getRpcName(expectedFunc),
pRpc->rpcHistory[historyEntry].data[0],
pRpc->rpcHistory[historyEntry].data[1]);
pHistoryEntry->data[0],
pHistoryEntry->data[1]);
if (pRpc->timeoutCount == 1)
{

View File

@ -1579,7 +1579,7 @@ intrServiceStallList_IMPL
}
}
resservSwapTlsCallContext(&pOldContext, NULL);
NV_ASSERT_OK_OR_ELSE(status, resservSwapTlsCallContext(&pOldContext, NULL), return);
// prevent the isr from coming in
_intrEnterCriticalSection(pGpu, pIntr, &intrMaskCtx);
@ -1608,7 +1608,7 @@ done:
// allow the isr to come in.
_intrExitCriticalSection(pGpu, pIntr, &intrMaskCtx);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
}

View File

@ -36,6 +36,7 @@
#include "gpu/mem_mgr/ce_utils.h"
#include "gpu/subdevice/subdevice.h"
#include "kernel/gpu/mem_mgr/ce_utils_sizes.h"
#include "vgpu/rpc_headers.h"
#include "class/clb0b5.h" // MAXWELL_DMA_COPY_A
#include "class/clc0b5.h" // PASCAL_DMA_COPY_A
@ -91,8 +92,21 @@ ceutilsConstruct_IMPL
status = serverGetClientUnderLock(&g_resServ, pChannel->hClient, &pChannel->pRsClient);
NV_ASSERT_OR_GOTO(status == NV_OK, free_client);
status = clientSetHandleGenerator(staticCast(pClient, RsClient), 1U, ~0U - 1U);
NV_ASSERT_OR_GOTO(status == NV_OK, free_client);
if (IS_VIRTUAL(pGpu))
{
NV_ASSERT_OK_OR_GOTO(
status,
clientSetHandleGenerator(staticCast(pClient, RsClient), RS_UNIQUE_HANDLE_BASE,
RS_UNIQUE_HANDLE_RANGE/2 - VGPU_RESERVED_HANDLE_RANGE),
free_client);
}
else
{
NV_ASSERT_OK_OR_GOTO(
status,
clientSetHandleGenerator(staticCast(pClient, RsClient), 1U, ~0U - 1U),
free_client);
}
pChannel->bClientAllocated = NV_TRUE;
pChannel->pGpu = pGpu;

View File

@ -249,6 +249,44 @@ deviceCtrlCmdFbGetCapsV2_IMPL
return rmStatus;
}
//
// deviceCtrlCmdSetDefaultVidmemPhysicality
//
// Lock Requirements:
// Assert that API lock held on entry
//
NV_STATUS
deviceCtrlCmdSetDefaultVidmemPhysicality_IMPL
(
Device *pDevice,
NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS *pParams
)
{
LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner());
NvU32 override;
switch (pParams->value)
{
case NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_DEFAULT:
override = NVOS32_ATTR_PHYSICALITY_DEFAULT;
break;
case NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_CONTIGUOUS:
override = NVOS32_ATTR_PHYSICALITY_CONTIGUOUS;
break;
case NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_NONCONTIGUOUS:
override = NVOS32_ATTR_PHYSICALITY_NONCONTIGUOUS;
break;
case NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_ALLOW_NONCONTIGUOUS:
override = NVOS32_ATTR_PHYSICALITY_ALLOW_NONCONTIGUOUS;
break;
default:
return NV_ERR_INVALID_ARGUMENT;
}
pDevice->defaultVidmemPhysicalityOverride = override;
return NV_OK;
}
//
// subdeviceCtrlCmdFbGetBar1Offset
//

View File

@ -52,7 +52,6 @@
#include "deprecated/rmapi_deprecated.h"
#include "nvRmReg.h"
//
// Watchdog object ids
//
@ -107,7 +106,6 @@
#define SUBDEVICE_MASK_ALL DRF_MASK(NV906F_DMA_SET_SUBDEVICE_MASK_VALUE)
NV_STATUS
krcWatchdogChangeState_IMPL
(
@ -402,7 +400,7 @@ krcWatchdogShutdown_IMPL
//
// Make sure to clear any old watchdog data this also clears
// WATCHDOG_FLAGS_INITIALIZED
// WATCHDOG_FLAGS_INITIALIZED, bHandleValid, and hClient
//
portMemSet(&pKernelRc->watchdog, 0, sizeof pKernelRc->watchdog);
portMemSet(&pKernelRc->watchdogChannelInfo, 0,
@ -519,6 +517,8 @@ krcWatchdogInit_IMPL
status = NV_ERR_NO_MEMORY;
goto error;
}
pKernelRc->watchdog.hClient = hClient;
pKernelRc->watchdog.bHandleValid = NV_TRUE;
}
if (bAcquireLock)
@ -1178,6 +1178,7 @@ error:
if (status != NV_OK)
{
pRmApi->Free(pRmApi, hClient, hClient);
pKernelRc->watchdog.bHandleValid = NV_FALSE;
}
portMemFree(pParams);
@ -1417,4 +1418,11 @@ krcWatchdogWriteNotifierToGpfifo_IMPL
SLI_LOOP_END;
}
NV_STATUS krcWatchdogGetClientHandle(KernelRc *pKernelRc, NvHandle *phClient)
{
if (!pKernelRc->watchdog.bHandleValid)
return NV_ERR_INVALID_STATE;
*phClient = pKernelRc->watchdog.hClient;
return NV_OK;
}

View File

@ -3893,3 +3893,41 @@ NvBool gpumgrIsSafeToReadGpuInfo(void)
//
return rmapiLockIsOwner() || (rmGpuLocksGetOwnedMask() != 0);
}
//
// Workaround for Bug 3809777. This is a HW bug happening in Ampere and
// Ada GPU's. For these GPU's, after device reset, CRS (Configuration Request
// Retry Status) is being released without waiting for GFW boot completion.
// MSI-X capability in the config space may be inconsistent when GFW boot
// is in progress, so this function checks if MSI-X is allowed.
// For Hopper and above, the CRS will be released after
// GFW boot completion, so the WAR is not needed.
// The bug will be exposed only when GPU is running inside guest in
// pass-through mode.
//
NvBool gpumgrIsDeviceMsixAllowed
(
RmPhysAddr bar0BaseAddr,
NvU32 pmcBoot1,
NvU32 pmcBoot42
)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
NvU32 chipArch;
if ((hypervisorGetHypervisorType(pHypervisor) == OS_HYPERVISOR_UNKNOWN) ||
!FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _REAL, pmcBoot1))
{
return NV_TRUE;
}
chipArch = DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, pmcBoot42);
if ((chipArch != NV_PMC_BOOT_42_ARCHITECTURE_AD100) &&
(chipArch != NV_PMC_BOOT_42_ARCHITECTURE_GA100))
{
return NV_TRUE;
}
return gpuIsMsixAllowed_TU102(bar0BaseAddr);
}

View File

@ -562,6 +562,14 @@ vidmemConstruct_IMPL
goto done;
}
if (FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _DEFAULT, pAllocData->attr))
{
pAllocData->attr =
FLD_SET_DRF_NUM(OS32, _ATTR, _PHYSICALITY,
pDevice->defaultVidmemPhysicalityOverride,
pAllocData->attr);
}
NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, stdmemValidateParams(pGpu, hClient, pAllocData));
NV_CHECK_OR_RETURN(LEVEL_WARNING,
DRF_VAL(OS32, _ATTR, _LOCATION, pAllocData->attr) == NVOS32_ATTR_LOCATION_VIDMEM &&

View File

@ -913,7 +913,8 @@ serverAllocResourceUnderLock
callContext.pLockInfo = pRmAllocParams->pLockInfo;
callContext.secInfo = *pRmAllocParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
NV_RM_RPC_ALLOC_OBJECT(pGpu,
pRmAllocParams->hClient,
pRmAllocParams->hParent,
@ -922,7 +923,7 @@ serverAllocResourceUnderLock
pRmAllocParams->pAllocParams,
pRmAllocParams->paramsSize,
status);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
goto done;

View File

@ -4148,9 +4148,10 @@ cliresCtrlCmdClientShareObject_IMPL
callContext.pResourceRef = pObjectRef;
callContext.secInfo = pCallContext->secInfo;
resservSwapTlsCallContext(&pOldCallContext, &callContext);
NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&pOldCallContext, &callContext));
status = clientShareResource(pClient, pObjectRef, pSharePolicy, &callContext);
resservRestoreTlsCallContext(pOldCallContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldCallContext));
if (status != NV_OK)
return status;

View File

@ -3446,8 +3446,6 @@ NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
(memdescGetAddressSpace(pAdjustedMemDesc) == ADDR_FABRIC_MC) ||
(memdescGetAddressSpace(pAdjustedMemDesc) == ADDR_FABRIC_V2))
{
KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMappingGpu);
isPeerSupported = NV_TRUE;
pPeerGpu = pAdjustedMemDesc->pGpu;
peerId = BUS_INVALID_PEER;
@ -3462,12 +3460,23 @@ NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
if (pPeerGpu != NULL)
{
if (IS_VIRTUAL_WITH_SRIOV(pMappingGpu) &&
!gpuIsWarBug200577889SriovHeavyEnabled(pMappingGpu))
{
peerId = kbusGetNvlinkPeerId_HAL(pMappingGpu,
GPU_GET_KERNEL_BUS(pMappingGpu),
pPeerGpu);
}
else
{
KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMappingGpu);
if ((pKernelNvlink != NULL) &&
knvlinkIsNvlinkP2pSupported(pMappingGpu, pKernelNvlink, pPeerGpu))
{
peerId = kbusGetPeerId_HAL(pMappingGpu, GPU_GET_KERNEL_BUS(pMappingGpu), pPeerGpu);
}
}
}
else
{
peerId = kbusGetNvSwitchPeerId_HAL(pMappingGpu,

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -523,7 +523,7 @@ clientCopyResource_IMPL
callContext.secInfo = *pParams->pSecInfo;
callContext.pLockInfo = pParams->pLockInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&pOldContext, &callContext));
//
// Kernel clients are allowed to dup anything, unless they request otherwise.
@ -560,7 +560,7 @@ clientCopyResource_IMPL
}
}
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
return status;
@ -668,9 +668,11 @@ _clientAllocResourceHelper
}
callContext.secInfo = *pParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), fail);
status = resservResourceFactory(pServer->pAllocator, &callContext, pParams, &pResource);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
goto fail;
@ -724,6 +726,8 @@ _clientAllocResourceHelper
fail:
if (pResource != NULL)
{
NV_STATUS callContextStatus;
RS_RES_FREE_PARAMS_INTERNAL params;
pOldContext = NULL;
@ -738,11 +742,20 @@ fail:
callContext.pResourceRef = pResourceRef;
callContext.pLockInfo = pParams->pLockInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
callContextStatus = resservSwapTlsCallContext(&pOldContext, &callContext);
if (callContextStatus == NV_OK)
{
resSetFreeParams(pResource, &callContext, &params);
objDelete(pResource);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
}
else
{
NV_PRINTF(LEVEL_ERROR, "Failed to set call context! Error: 0x%x\n",
callContextStatus);
}
}
if (pResourceRef != NULL)
@ -798,7 +811,9 @@ clientFreeResource_IMPL
if (pParams->pSecInfo != NULL)
callContext.secInfo = *pParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
resSetFreeParams(pResource, &callContext, pParams);
resPreDestruct(pResource);
@ -825,7 +840,7 @@ clientFreeResource_IMPL
pResourceRef->pResource = NULL;
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
done:
if (!pParams->bInvalidateOnly)
@ -872,9 +887,10 @@ clientUnmapMemory_IMPL
if (pSecInfo != NULL)
callContext.secInfo = *pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&pOldContext, &callContext));
status = resUnmap(pResourceRef->pResource, &callContext, pCpuMapping);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
{

View File

@ -1344,9 +1344,11 @@ serverControl
}
pLockInfo->pContextRef = pResourceRef->pParentRef;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
status = resControl(pResourceRef->pResource, &callContext, pParams);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
done:
@ -1521,7 +1523,8 @@ _serverShareResourceAccessClient
callContext.pResourceRef = pResourceRef;
callContext.secInfo = *pParams->pSecInfo;
callContext.pLockInfo = pParams->pLockInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
if (hClientOwner == hClientTarget)
{
@ -1544,7 +1547,7 @@ _serverShareResourceAccessClient
goto restore_context;
restore_context:
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
// NV_PRINTF(LEVEL_INFO, "hClientOwner %x: Shared hResource: %x with hClientTarget: %x\n",
// hClientOwner, pParams->hResource, hClientTarget);
@ -1631,9 +1634,11 @@ serverShareResourceAccess
callContext.secInfo = *pParams->pSecInfo;
callContext.pLockInfo = pParams->pLockInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
status = clientShareResource(pClient, pResourceRef, pParams->pSharePolicy, &callContext);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
goto done;
@ -1735,9 +1740,11 @@ serverMap
if (pParams->pSecInfo != NULL)
callContext.secInfo = *pParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
status = resMap(pResource, &callContext, pParams, pCpuMapping);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
goto done;
@ -1910,7 +1917,9 @@ serverInterMap
if (pParams->pSecInfo != NULL)
callContext.secInfo = *pParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
bRestoreCallContext = NV_TRUE;
status = refAddInterMapping(pMapperRef, pMappableRef, pContextRef, &pMapping);
@ -1934,7 +1943,7 @@ done:
serverInterMap_Epilogue(pServer, pParams, &releaseFlags);
if (bRestoreCallContext)
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
{
@ -2024,7 +2033,9 @@ serverInterUnmap
if (pLockInfo->pContextRef == NULL)
pLockInfo->pContextRef = pContextRef;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
bRestoreCallContext = NV_TRUE;
status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags);
@ -2045,7 +2056,7 @@ done:
serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags);
if (bRestoreCallContext)
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
_serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags);
serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags);

View File

@ -1,4 +1,4 @@
NVIDIA_VERSION = 535.129.03
NVIDIA_VERSION = 535.146.02
# This file.
VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))