mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2025-02-27 09:54:14 +01:00
535.43.08
This commit is contained in:
parent
eb5c7665a1
commit
18b7303c54
@ -2,6 +2,8 @@
|
||||
|
||||
## Release 535 Entries
|
||||
|
||||
### [535.43.08] 2023-08-17
|
||||
|
||||
### [535.43.02] 2023-05-30
|
||||
|
||||
#### Fixed
|
||||
|
19
README.md
19
README.md
@ -1,7 +1,7 @@
|
||||
# NVIDIA Linux Open GPU Kernel Module Source
|
||||
|
||||
This is the source release of the NVIDIA Linux open GPU kernel modules,
|
||||
version 535.43.02.
|
||||
version 535.43.08.
|
||||
|
||||
|
||||
## How to Build
|
||||
@ -17,7 +17,7 @@ as root:
|
||||
|
||||
Note that the kernel modules built here must be used with GSP
|
||||
firmware and user-space NVIDIA GPU driver components from a corresponding
|
||||
535.43.02 driver release. This can be achieved by installing
|
||||
535.43.08 driver release. This can be achieved by installing
|
||||
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
|
||||
option. E.g.,
|
||||
|
||||
@ -180,7 +180,7 @@ software applications.
|
||||
## Compatible GPUs
|
||||
|
||||
The open-gpu-kernel-modules can be used on any Turing or later GPU
|
||||
(see the table below). However, in the 535.43.02 release,
|
||||
(see the table below). However, in the 535.43.08 release,
|
||||
GeForce and Workstation support is still considered alpha-quality.
|
||||
|
||||
To enable use of the open kernel modules on GeForce and Workstation GPUs,
|
||||
@ -188,7 +188,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
|
||||
parameter to 1. For more details, see the NVIDIA GPU driver end user
|
||||
README here:
|
||||
|
||||
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.43.02/README/kernel_open.html
|
||||
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.43.08/README/kernel_open.html
|
||||
|
||||
In the below table, if three IDs are listed, the first is the PCI Device
|
||||
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
|
||||
@ -664,6 +664,7 @@ Subsystem Device ID.
|
||||
| NVIDIA A100 80GB PCIe | 20B5 10DE 1642 |
|
||||
| NVIDIA PG506-232 | 20B6 10DE 1492 |
|
||||
| NVIDIA A30 | 20B7 10DE 1532 |
|
||||
| NVIDIA A30 | 20B7 10DE 1804 |
|
||||
| NVIDIA A100-PCIE-40GB | 20F1 10DE 145F |
|
||||
| NVIDIA A800-SXM4-80GB | 20F3 10DE 179B |
|
||||
| NVIDIA A800-SXM4-80GB | 20F3 10DE 179C |
|
||||
@ -828,6 +829,10 @@ Subsystem Device ID.
|
||||
| NVIDIA RTX 6000 Ada Generation | 26B1 103C 16A1 |
|
||||
| NVIDIA RTX 6000 Ada Generation | 26B1 10DE 16A1 |
|
||||
| NVIDIA RTX 6000 Ada Generation | 26B1 17AA 16A1 |
|
||||
| NVIDIA RTX 5000 Ada Generation | 26B2 1028 17FA |
|
||||
| NVIDIA RTX 5000 Ada Generation | 26B2 103C 17FA |
|
||||
| NVIDIA RTX 5000 Ada Generation | 26B2 10DE 17FA |
|
||||
| NVIDIA RTX 5000 Ada Generation | 26B2 17AA 17FA |
|
||||
| NVIDIA L40 | 26B5 10DE 169D |
|
||||
| NVIDIA L40 | 26B5 10DE 17DA |
|
||||
| NVIDIA GeForce RTX 4080 | 2704 |
|
||||
@ -841,15 +846,21 @@ Subsystem Device ID.
|
||||
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 103C 16FA |
|
||||
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 10DE 16FA |
|
||||
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 17AA 16FA |
|
||||
| NVIDIA RTX 4000 Ada Generation | 27B2 1028 181B |
|
||||
| NVIDIA RTX 4000 Ada Generation | 27B2 103C 181B |
|
||||
| NVIDIA RTX 4000 Ada Generation | 27B2 10DE 181B |
|
||||
| NVIDIA RTX 4000 Ada Generation | 27B2 17AA 181B |
|
||||
| NVIDIA L4 | 27B8 10DE 16CA |
|
||||
| NVIDIA L4 | 27B8 10DE 16EE |
|
||||
| NVIDIA RTX 4000 Ada Generation Laptop GPU | 27BA |
|
||||
| NVIDIA RTX 3500 Ada Generation Laptop GPU | 27BB |
|
||||
| NVIDIA GeForce RTX 4080 Laptop GPU | 27E0 |
|
||||
| NVIDIA GeForce RTX 4060 Ti | 2803 |
|
||||
| NVIDIA GeForce RTX 4060 Ti | 2805 |
|
||||
| NVIDIA GeForce RTX 4070 Laptop GPU | 2820 |
|
||||
| NVIDIA RTX 3000 Ada Generation Laptop GPU | 2838 |
|
||||
| NVIDIA GeForce RTX 4070 Laptop GPU | 2860 |
|
||||
| NVIDIA GeForce RTX 4060 | 2882 |
|
||||
| NVIDIA GeForce RTX 4060 Laptop GPU | 28A0 |
|
||||
| NVIDIA GeForce RTX 4050 Laptop GPU | 28A1 |
|
||||
| NVIDIA RTX 2000 Ada Generation Laptop GPU | 28B8 |
|
||||
|
@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
|
||||
EXTRA_CFLAGS += -I$(src)
|
||||
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
|
||||
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
|
||||
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.43.02\"
|
||||
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.43.08\"
|
||||
|
||||
ifneq ($(SYSSRCHOST1X),)
|
||||
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
|
||||
@ -275,6 +275,7 @@ NV_HEADER_PRESENCE_TESTS = \
|
||||
asm/opal-api.h \
|
||||
sound/hdaudio.h \
|
||||
asm/pgtable_types.h \
|
||||
asm/page.h \
|
||||
linux/stringhash.h \
|
||||
linux/dma-map-ops.h \
|
||||
rdma/peer_mem.h \
|
||||
@ -300,7 +301,9 @@ NV_HEADER_PRESENCE_TESTS = \
|
||||
linux/vfio_pci_core.h \
|
||||
linux/mdev.h \
|
||||
soc/tegra/bpmp-abi.h \
|
||||
soc/tegra/bpmp.h
|
||||
soc/tegra/bpmp.h \
|
||||
linux/cc_platform.h \
|
||||
asm/cpufeature.h
|
||||
|
||||
# Filename to store the define for the header in $(1); this is only consumed by
|
||||
# the rule below that concatenates all of these together.
|
||||
|
@ -211,6 +211,7 @@
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/memory.h>
|
||||
|
||||
#include <linux/workqueue.h> /* workqueue */
|
||||
#include "nv-kthread-q.h" /* kthread based queue */
|
||||
@ -510,7 +511,11 @@ static inline void nv_vfree(void *ptr, NvU64 size)
|
||||
|
||||
static inline void *nv_ioremap(NvU64 phys, NvU64 size)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_INTEL_TDX_GUEST) && defined(NV_IOREMAP_DRIVER_HARDENED_PRESENT)
|
||||
void *ptr = ioremap_driver_hardened(phys, size);
|
||||
#else
|
||||
void *ptr = ioremap(phys, size);
|
||||
#endif
|
||||
if (ptr)
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
return ptr;
|
||||
@ -523,11 +528,11 @@ static inline void *nv_ioremap_nocache(NvU64 phys, NvU64 size)
|
||||
|
||||
static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size)
|
||||
{
|
||||
#if defined(NV_IOREMAP_CACHE_PRESENT)
|
||||
void *ptr = ioremap_cache(phys, size);
|
||||
if (ptr)
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
return ptr;
|
||||
void *ptr = NULL;
|
||||
#if IS_ENABLED(CONFIG_INTEL_TDX_GUEST) && defined(NV_IOREMAP_CACHE_SHARED_PRESENT)
|
||||
ptr = ioremap_cache_shared(phys, size);
|
||||
#elif defined(NV_IOREMAP_CACHE_PRESENT)
|
||||
ptr = ioremap_cache(phys, size);
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
//
|
||||
// ioremap_cache() has been only implemented correctly for ppc64le with
|
||||
@ -542,25 +547,32 @@ static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size)
|
||||
// (commit 40f1ce7fb7e8, kernel 3.0+) and that covers all kernels we
|
||||
// support on power.
|
||||
//
|
||||
void *ptr = ioremap_prot(phys, size, pgprot_val(PAGE_KERNEL));
|
||||
if (ptr)
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
return ptr;
|
||||
ptr = ioremap_prot(phys, size, pgprot_val(PAGE_KERNEL));
|
||||
#else
|
||||
return nv_ioremap(phys, size);
|
||||
#endif
|
||||
|
||||
if (ptr)
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline void *nv_ioremap_wc(NvU64 phys, NvU64 size)
|
||||
{
|
||||
#if defined(NV_IOREMAP_WC_PRESENT)
|
||||
void *ptr = ioremap_wc(phys, size);
|
||||
if (ptr)
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
return ptr;
|
||||
void *ptr = NULL;
|
||||
#if IS_ENABLED(CONFIG_INTEL_TDX_GUEST) && defined(NV_IOREMAP_DRIVER_HARDENED_WC_PRESENT)
|
||||
ptr = ioremap_driver_hardened_wc(phys, size);
|
||||
#elif defined(NV_IOREMAP_WC_PRESENT)
|
||||
ptr = ioremap_wc(phys, size);
|
||||
#else
|
||||
return nv_ioremap_nocache(phys, size);
|
||||
#endif
|
||||
|
||||
if (ptr)
|
||||
NV_MEMDBG_ADD(ptr, size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline void nv_iounmap(void *ptr, NvU64 size)
|
||||
@ -633,37 +645,24 @@ static NvBool nv_numa_node_has_memory(int node_id)
|
||||
free_pages(ptr, order); \
|
||||
}
|
||||
|
||||
extern NvU64 nv_shared_gpa_boundary;
|
||||
static inline pgprot_t nv_sme_clr(pgprot_t prot)
|
||||
{
|
||||
#if defined(__sme_clr)
|
||||
return __pgprot(__sme_clr(pgprot_val(prot)));
|
||||
#else
|
||||
return prot;
|
||||
#endif // __sme_clr
|
||||
}
|
||||
|
||||
static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot, NvU32 extra)
|
||||
{
|
||||
pgprot_t prot = __pgprot(pgprot_val(vm_prot) | extra);
|
||||
#if defined(CONFIG_AMD_MEM_ENCRYPT) && defined(NV_PGPROT_DECRYPTED_PRESENT)
|
||||
/*
|
||||
* When AMD memory encryption is enabled, device memory mappings with the
|
||||
* C-bit set read as 0xFF, so ensure the bit is cleared for user mappings.
|
||||
*
|
||||
* If cc_mkdec() is present, then pgprot_decrypted() can't be used.
|
||||
*/
|
||||
#if defined(NV_CC_MKDEC_PRESENT)
|
||||
if (nv_shared_gpa_boundary != 0)
|
||||
{
|
||||
/*
|
||||
* By design, a VM using vTOM doesn't see the SEV setting and
|
||||
* for AMD with vTOM, *set* means decrypted.
|
||||
*/
|
||||
prot = __pgprot(nv_shared_gpa_boundary | (pgprot_val(vm_prot)));
|
||||
}
|
||||
else
|
||||
{
|
||||
prot = __pgprot(__sme_clr(pgprot_val(vm_prot)));
|
||||
}
|
||||
#else
|
||||
prot = pgprot_decrypted(prot);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return prot;
|
||||
#if defined(pgprot_decrypted)
|
||||
return pgprot_decrypted(prot);
|
||||
#else
|
||||
return nv_sme_clr(prot);
|
||||
#endif // pgprot_decrypted
|
||||
}
|
||||
|
||||
#if defined(PAGE_KERNEL_NOENC)
|
||||
@ -1323,7 +1322,7 @@ nv_dma_maps_swiotlb(struct device *dev)
|
||||
* SEV memory encryption") forces SWIOTLB to be enabled when AMD SEV
|
||||
* is active in all cases.
|
||||
*/
|
||||
if (os_sev_enabled)
|
||||
if (os_cc_enabled)
|
||||
swiotlb_in_use = NV_TRUE;
|
||||
#endif
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@ -36,12 +36,21 @@ typedef int vm_fault_t;
|
||||
* pin_user_pages() was added by commit eddb1c228f7951d399240
|
||||
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6-rc1 (2020-01-30)
|
||||
*
|
||||
* Removed vmas parameter from pin_user_pages() by commit 40896a02751
|
||||
* ("mm/gup: remove vmas parameter from pin_user_pages()")
|
||||
* in linux-next, expected in v6.5-rc1 (2023-05-17)
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#if defined(NV_PIN_USER_PAGES_PRESENT)
|
||||
#if defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS)
|
||||
#define NV_PIN_USER_PAGES pin_user_pages
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages, vmas) \
|
||||
pin_user_pages(start, nr_pages, gup_flags, pages)
|
||||
#endif // NV_PIN_USER_PAGES_HAS_ARGS_VMAS
|
||||
#define NV_UNPIN_USER_PAGE unpin_user_page
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES NV_GET_USER_PAGES
|
||||
@ -64,11 +73,18 @@ typedef int vm_fault_t;
|
||||
* commit 8e50b8b07f462ab4b91bc1491b1c91bd75e4ad40 which cherry-picked the
|
||||
* replacement of the write and force parameters with gup_flags
|
||||
*
|
||||
* Removed vmas parameter from get_user_pages() by commit 7bbf9c8c99
|
||||
* ("mm/gup: remove unused vmas parameter from get_user_pages()")
|
||||
* in linux-next, expected in v6.5-rc1 (2023-05-17)
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
|
||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
|
||||
get_user_pages(start, nr_pages, flags, pages)
|
||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
|
||||
#define NV_GET_USER_PAGES get_user_pages
|
||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS)
|
||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS)
|
||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
|
||||
get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas)
|
||||
#else
|
||||
@ -81,13 +97,13 @@ typedef int vm_fault_t;
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE)
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS)
|
||||
return get_user_pages(start, nr_pages, write, force, pages, vmas);
|
||||
#else
|
||||
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE
|
||||
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||
return get_user_pages(current, current->mm, start, nr_pages, write,
|
||||
force, pages, vmas);
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS
|
||||
}
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
|
||||
|
||||
@ -100,15 +116,22 @@ typedef int vm_fault_t;
|
||||
* 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
|
||||
* in v5.9-rc1 (2020-08-11). *
|
||||
*
|
||||
* Removed unused vmas parameter from pin_user_pages_remote() by commit
|
||||
* 83bcc2e132("mm/gup: remove unused vmas parameter from pin_user_pages_remote()")
|
||||
* in linux-next, expected in v6.5-rc1 (2023-05-14)
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT)
|
||||
#if defined (NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK)
|
||||
#if defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
|
||||
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
|
||||
#else
|
||||
#elif defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS)
|
||||
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
|
||||
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
pin_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
|
||||
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE
|
||||
#endif // NV_PIN_USER_PAGES_REMOTE_PRESENT
|
||||
@ -135,22 +158,30 @@ typedef int vm_fault_t;
|
||||
* commit 64019a2e467a ("mm/gup: remove task_struct pointer for
|
||||
* all gup code") in v5.9-rc1 (2020-08-11).
|
||||
*
|
||||
* Removed vmas parameter from get_user_pages_remote() by commit a4bde14d549
|
||||
* ("mm/gup: remove vmas parameter from get_user_pages_remote()")
|
||||
* in linux-next, expected in v6.5-rc1 (2023-05-14)
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED)
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS)
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas)
|
||||
|
||||
#else
|
||||
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE
|
||||
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
@ -167,7 +198,7 @@ typedef int vm_fault_t;
|
||||
}
|
||||
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
|
||||
#else
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE)
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS)
|
||||
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
@ -185,7 +216,7 @@ typedef int vm_fault_t;
|
||||
#else
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages(NULL, mm, start, nr_pages, flags, pages, vmas)
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT
|
||||
|
||||
/*
|
||||
|
@ -510,6 +510,12 @@ struct nv_file_private_t
|
||||
nv_file_private_t *ctl_nvfp;
|
||||
void *ctl_nvfp_priv;
|
||||
NvU32 register_or_refcount;
|
||||
|
||||
//
|
||||
// True if a client or an event was ever allocated on this fd.
|
||||
// If false, RMAPI cleanup is skipped.
|
||||
//
|
||||
NvBool bCleanupRmapi;
|
||||
};
|
||||
|
||||
// Forward define the gpu ops structures
|
||||
@ -959,6 +965,8 @@ NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU
|
||||
|
||||
void NV_API_CALL rm_power_source_change_event (nvidia_stack_t *, NvU32);
|
||||
|
||||
void NV_API_CALL rm_request_dnotifier_state (nvidia_stack_t *, nv_state_t *);
|
||||
|
||||
void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64);
|
||||
|
@ -1455,12 +1455,12 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
|
||||
concurrently with the same UvmCslContext parameter in different threads. The caller must
|
||||
guarantee this exclusion.
|
||||
|
||||
* nvUvmInterfaceCslLogDeviceEncryption
|
||||
* nvUvmInterfaceCslRotateIv
|
||||
* nvUvmInterfaceCslEncrypt
|
||||
* nvUvmInterfaceCslDecrypt
|
||||
* nvUvmInterfaceCslSign
|
||||
* nvUvmInterfaceCslQueryMessagePool
|
||||
* nvUvmInterfaceCslIncrementIv
|
||||
*/
|
||||
|
||||
/*******************************************************************************
|
||||
@ -1495,62 +1495,17 @@ NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext,
|
||||
*/
|
||||
void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext);
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslLogDeviceEncryption
|
||||
|
||||
Returns an IV that can be later used in the nvUvmInterfaceCslEncrypt
|
||||
method. The IV contains a "freshness bit" which value is set by this method
|
||||
and subsequently dirtied by nvUvmInterfaceCslEncrypt to prevent
|
||||
non-malicious reuse of the IV.
|
||||
|
||||
See "CSL Interface and Locking" for locking requirements.
|
||||
This function does not perform dynamic memory allocation.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
encryptIv[OUT] - Parameter that is stored before a successful
|
||||
device encryption. It is used as an input to
|
||||
nvUvmInterfaceCslEncrypt.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INSUFFICIENT_RESOURCES - New IV would cause a counter to overflow.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslAcquireEncryptionIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslIv *encryptIv);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslLogDeviceEncryption
|
||||
|
||||
Logs and checks information about device encryption.
|
||||
|
||||
See "CSL Interface and Locking" for locking requirements.
|
||||
This function does not perform dynamic memory allocation.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
decryptIv[OUT] - Parameter that is stored before a successful
|
||||
device encryption. It is used as an input to
|
||||
nvUvmInterfaceCslDecrypt.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INSUFFICIENT_RESOURCES - The device encryption would cause a counter
|
||||
to overflow.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslLogDeviceEncryption(UvmCslContext *uvmCslContext,
|
||||
UvmCslIv *decryptIv);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslRotateIv
|
||||
|
||||
Rotates the IV for a given channel and direction.
|
||||
Rotates the IV for a given channel and operation.
|
||||
|
||||
This function will rotate the IV on both the CPU and the GPU.
|
||||
Outstanding messages that have been encrypted by the GPU should first be
|
||||
decrypted before calling this function with direction equal to
|
||||
UVM_CSL_DIR_GPU_TO_CPU. Similiarly, outstanding messages that have been
|
||||
decrypted before calling this function with operation equal to
|
||||
UVM_CSL_OPERATION_DECRYPT. Similarly, outstanding messages that have been
|
||||
encrypted by the CPU should first be decrypted before calling this function
|
||||
with direction equal to UVM_CSL_DIR_CPU_TO_GPU. For a given direction
|
||||
with operation equal to UVM_CSL_OPERATION_ENCRYPT. For a given operation
|
||||
the channel must be idle before calling this function. This function can be
|
||||
called regardless of the value of the IV's message counter.
|
||||
|
||||
@ -1559,17 +1514,17 @@ NV_STATUS nvUvmInterfaceCslLogDeviceEncryption(UvmCslContext *uvmCslContext,
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
direction[IN] - Either
|
||||
- UVM_CSL_DIR_CPU_TO_GPU
|
||||
- UVM_CSL_DIR_GPU_TO_CPU
|
||||
operation[IN] - Either
|
||||
- UVM_CSL_OPERATION_ENCRYPT
|
||||
- UVM_CSL_OPERATION_DECRYPT
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INSUFFICIENT_RESOURCES - The rotate operation would cause a counter
|
||||
to overflow.
|
||||
NV_ERR_INVALID_ARGUMENT - Invalid value for direction.
|
||||
NV_ERR_INVALID_ARGUMENT - Invalid value for operation.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslDirection direction);
|
||||
UvmCslOperation operation);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslEncrypt
|
||||
@ -1580,7 +1535,7 @@ NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
||||
this function produces undefined behavior. Performance is typically
|
||||
maximized when the input and output buffers are 16-byte aligned. This is
|
||||
natural alignment for AES block.
|
||||
The encryptIV can be obtained from nvUvmInterfaceCslAcquireEncryptionIv.
|
||||
The encryptIV can be obtained from nvUvmInterfaceCslIncrementIv.
|
||||
However, it is optional. If it is NULL, the next IV in line will be used.
|
||||
|
||||
See "CSL Interface and Locking" for locking requirements.
|
||||
@ -1623,12 +1578,18 @@ NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
bufferSize[IN] - Size of the input and output buffers in
|
||||
units of bytes. Value can range from 1 byte
|
||||
to (2^32) - 1 bytes.
|
||||
decryptIv[IN] - Parameter given by nvUvmInterfaceCslLogDeviceEncryption.
|
||||
bufferSize[IN] - Size of the input and output buffers in units of bytes.
|
||||
Value can range from 1 byte to (2^32) - 1 bytes.
|
||||
decryptIv[IN] - IV used to decrypt the ciphertext. Its value can either be given by
|
||||
nvUvmInterfaceCslIncrementIv, or, if NULL, the CSL context's
|
||||
internal counter is used.
|
||||
inputBuffer[IN] - Address of ciphertext input buffer.
|
||||
outputBuffer[OUT] - Address of plaintext output buffer.
|
||||
addAuthData[IN] - Address of the plaintext additional authenticated data used to
|
||||
calculate the authentication tag. Can be NULL.
|
||||
addAuthDataSize[IN] - Size of the additional authenticated data in units of bytes.
|
||||
Value can range from 1 byte to (2^32) - 1 bytes.
|
||||
This parameter is ignored if addAuthData is NULL.
|
||||
authTagBuffer[IN] - Address of authentication tag buffer.
|
||||
Its size is UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES.
|
||||
|
||||
@ -1643,6 +1604,8 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
||||
NvU8 const *inputBuffer,
|
||||
UvmCslIv const *decryptIv,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 const *addAuthData,
|
||||
NvU32 addAuthDataSize,
|
||||
NvU8 const *authTagBuffer);
|
||||
|
||||
/*******************************************************************************
|
||||
@ -1673,7 +1636,6 @@ NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
|
||||
NvU8 const *inputBuffer,
|
||||
NvU8 *authTagBuffer);
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslQueryMessagePool
|
||||
|
||||
@ -1684,14 +1646,45 @@ NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
direction[IN] - Either UVM_CSL_DIR_CPU_TO_GPU or UVM_CSL_DIR_GPU_TO_CPU.
|
||||
operation[IN] - Either UVM_CSL_OPERATION_ENCRYPT or UVM_CSL_OPERATION_DECRYPT.
|
||||
messageNum[OUT] - Number of messages left before overflow.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT - The value of the direction parameter is illegal.
|
||||
NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext,
|
||||
UvmCslDirection direction,
|
||||
UvmCslOperation operation,
|
||||
NvU64 *messageNum);
|
||||
|
||||
/*******************************************************************************
|
||||
nvUvmInterfaceCslIncrementIv
|
||||
|
||||
Increments the message counter by the specified amount.
|
||||
|
||||
If iv is non-NULL then the incremented value is returned.
|
||||
If operation is UVM_CSL_OPERATION_ENCRYPT then the returned IV's "freshness" bit is set and
|
||||
can be used in nvUvmInterfaceCslEncrypt. If operation is UVM_CSL_OPERATION_DECRYPT then
|
||||
the returned IV can be used in nvUvmInterfaceCslDecrypt.
|
||||
|
||||
See "CSL Interface and Locking" for locking requirements.
|
||||
This function does not perform dynamic memory allocation.
|
||||
|
||||
Arguments:
|
||||
uvmCslContext[IN/OUT] - The CSL context.
|
||||
operation[IN] - Either
|
||||
- UVM_CSL_OPERATION_ENCRYPT
|
||||
- UVM_CSL_OPERATION_DECRYPT
|
||||
increment[IN] - The amount by which the IV is incremented. Can be 0.
|
||||
iv[out] - If non-NULL, a buffer to store the incremented IV.
|
||||
|
||||
Error codes:
|
||||
NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal.
|
||||
NV_ERR_INSUFFICIENT_RESOURCES - Incrementing the message counter would result
|
||||
in an overflow.
|
||||
*/
|
||||
NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation,
|
||||
NvU64 increment,
|
||||
UvmCslIv *iv);
|
||||
|
||||
#endif // _NV_UVM_INTERFACE_H_
|
||||
|
@ -286,6 +286,7 @@ typedef struct UvmGpuChannelInfo_tag
|
||||
// so a channel can be controlled via another channel (SEC2 or WLC/LCIC)
|
||||
NvU64 gpFifoGpuVa;
|
||||
NvU64 gpPutGpuVa;
|
||||
NvU64 gpGetGpuVa;
|
||||
// GPU VA of work submission offset is needed in Confidential Computing
|
||||
// so CE channels can ring doorbell of other channels as required for
|
||||
// WLC/LCIC work submission
|
||||
@ -320,10 +321,6 @@ typedef struct UvmGpuChannelAllocParams_tag
|
||||
// The next two fields store UVM_BUFFER_LOCATION values
|
||||
NvU32 gpFifoLoc;
|
||||
NvU32 gpPutLoc;
|
||||
|
||||
// Allocate the channel as secure. This flag should only be set when
|
||||
// Confidential Compute is enabled.
|
||||
NvBool secure;
|
||||
} UvmGpuChannelAllocParams;
|
||||
|
||||
typedef struct UvmGpuPagingChannelAllocParams_tag
|
||||
@ -367,9 +364,6 @@ typedef struct
|
||||
// True if the CE can be used for P2P transactions
|
||||
NvBool p2p:1;
|
||||
|
||||
// True if the CE supports encryption
|
||||
NvBool secure:1;
|
||||
|
||||
// Mask of physical CEs assigned to this LCE
|
||||
//
|
||||
// The value returned by RM for this field may change when a GPU is
|
||||
@ -851,6 +845,14 @@ typedef union UvmFaultMetadataPacket_tag
|
||||
NvU8 _padding[32];
|
||||
} UvmFaultMetadataPacket;
|
||||
|
||||
// This struct shall not be accessed nor modified directly by UVM as it is
|
||||
// entirely managed by the RM layer
|
||||
typedef struct UvmCslContext_tag
|
||||
{
|
||||
struct ccslContext_t *ctx;
|
||||
void *nvidia_stack;
|
||||
} UvmCslContext;
|
||||
|
||||
typedef struct UvmGpuFaultInfo_tag
|
||||
{
|
||||
struct
|
||||
@ -908,6 +910,10 @@ typedef struct UvmGpuFaultInfo_tag
|
||||
// Confidential Computing is disabled.
|
||||
UvmFaultMetadataPacket *bufferMetadata;
|
||||
|
||||
// CSL context used for performing decryption of replayable faults when
|
||||
// Confidential Computing is enabled.
|
||||
UvmCslContext cslCtx;
|
||||
|
||||
// Indicates whether UVM owns the replayable fault buffer.
|
||||
// The value of this field is always NV_TRUE When Confidential Computing
|
||||
// is disabled.
|
||||
@ -1046,24 +1052,16 @@ typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo;
|
||||
typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams;
|
||||
typedef UvmPmaAllocationOptions gpuPmaAllocationOptions;
|
||||
|
||||
// This struct shall not be accessed nor modified directly by UVM as it is
|
||||
// entirely managed by the RM layer
|
||||
typedef struct UvmCslContext_tag
|
||||
{
|
||||
struct ccslContext_t *ctx;
|
||||
void *nvidia_stack;
|
||||
} UvmCslContext;
|
||||
|
||||
typedef struct UvmCslIv
|
||||
{
|
||||
NvU8 iv[12];
|
||||
NvU8 fresh;
|
||||
} UvmCslIv;
|
||||
|
||||
typedef enum UvmCslDirection
|
||||
typedef enum UvmCslOperation
|
||||
{
|
||||
UVM_CSL_DIR_CPU_TO_GPU,
|
||||
UVM_CSL_DIR_GPU_TO_CPU
|
||||
} UvmCslDirection;
|
||||
UVM_CSL_OPERATION_ENCRYPT,
|
||||
UVM_CSL_OPERATION_DECRYPT
|
||||
} UvmCslOperation;
|
||||
|
||||
#endif // _NV_UVM_TYPES_H_
|
||||
|
@ -214,8 +214,8 @@ NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address);
|
||||
extern NvU32 os_page_size;
|
||||
extern NvU64 os_page_mask;
|
||||
extern NvU8 os_page_shift;
|
||||
extern NvU32 os_sev_status;
|
||||
extern NvBool os_sev_enabled;
|
||||
extern NvBool os_cc_enabled;
|
||||
extern NvBool os_cc_tdx_enabled;
|
||||
extern NvBool os_dma_buf_enabled;
|
||||
|
||||
/*
|
||||
|
@ -103,13 +103,12 @@ NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, n
|
||||
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_device_encryption(nvidia_stack_t *, struct ccslContext_t *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_acquire_encryption_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU8 *, NvU8 const *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU8 *, NvU8 const *, NvU32, NvU8 const *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *);
|
||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *);
|
||||
|
||||
#endif
|
||||
|
@ -445,6 +445,9 @@ compile_test() {
|
||||
#if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT)
|
||||
#include <asm/pgtable_types.h>
|
||||
#endif
|
||||
#if defined(NV_ASM_PAGE_H_PRESENT)
|
||||
#include <asm/page.h>
|
||||
#endif
|
||||
#include <asm/set_memory.h>
|
||||
#else
|
||||
#include <asm/cacheflush.h>
|
||||
@ -467,6 +470,9 @@ compile_test() {
|
||||
#if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT)
|
||||
#include <asm/pgtable_types.h>
|
||||
#endif
|
||||
#if defined(NV_ASM_PAGE_H_PRESENT)
|
||||
#include <asm/page.h>
|
||||
#endif
|
||||
#include <asm/set_memory.h>
|
||||
#else
|
||||
#include <asm/cacheflush.h>
|
||||
@ -524,6 +530,9 @@ compile_test() {
|
||||
#if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT)
|
||||
#include <asm/pgtable_types.h>
|
||||
#endif
|
||||
#if defined(NV_ASM_PAGE_H_PRESENT)
|
||||
#include <asm/page.h>
|
||||
#endif
|
||||
#include <asm/set_memory.h>
|
||||
#else
|
||||
#include <asm/cacheflush.h>
|
||||
@ -551,6 +560,9 @@ compile_test() {
|
||||
#if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT)
|
||||
#include <asm/pgtable_types.h>
|
||||
#endif
|
||||
#if defined(NV_ASM_PAGE_H_PRESENT)
|
||||
#include <asm/page.h>
|
||||
#endif
|
||||
#include <asm/set_memory.h>
|
||||
#else
|
||||
#include <asm/cacheflush.h>
|
||||
@ -695,6 +707,50 @@ compile_test() {
|
||||
compile_check_conftest "$CODE" "NV_IOREMAP_WC_PRESENT" "" "functions"
|
||||
;;
|
||||
|
||||
ioremap_driver_hardened)
|
||||
#
|
||||
# Determine if the ioremap_driver_hardened() function is present.
|
||||
# It does not exist on all architectures.
|
||||
# TODO: Update the commit ID once the API is upstreamed.
|
||||
#
|
||||
CODE="
|
||||
#include <asm/io.h>
|
||||
void conftest_ioremap_driver_hardened(void) {
|
||||
ioremap_driver_hardened();
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_IOREMAP_DRIVER_HARDENED_PRESENT" "" "functions"
|
||||
;;
|
||||
|
||||
ioremap_driver_hardened_wc)
|
||||
#
|
||||
# Determine if the ioremap_driver_hardened_wc() function is present.
|
||||
# It does not exist on all architectures.
|
||||
# TODO: Update the commit ID once the API is upstreamed.
|
||||
#
|
||||
CODE="
|
||||
#include <asm/io.h>
|
||||
void conftest_ioremap_driver_hardened_wc(void) {
|
||||
ioremap_driver_hardened_wc();
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_IOREMAP_DRIVER_HARDENED_WC_PRESENT" "" "functions"
|
||||
;;
|
||||
|
||||
ioremap_cache_shared)
|
||||
#
|
||||
# Determine if the ioremap_cache_shared() function is present.
|
||||
# It does not exist on all architectures.
|
||||
# TODO: Update the commit ID once the API is upstreamed.
|
||||
#
|
||||
CODE="
|
||||
#include <asm/io.h>
|
||||
void conftest_ioremap_cache_shared(void) {
|
||||
ioremap_cache_shared();
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_IOREMAP_CACHE_SHARED_PRESENT" "" "functions"
|
||||
;;
|
||||
dom0_kernel_present)
|
||||
# Add config parameter if running on DOM0.
|
||||
if [ -n "$VGX_BUILD" ]; then
|
||||
@ -919,6 +975,21 @@ compile_test() {
|
||||
compile_check_conftest "$CODE" "NV_VFIO_MIGRATION_OPS_PRESENT" "" "types"
|
||||
;;
|
||||
|
||||
vfio_precopy_info)
|
||||
#
|
||||
# Determine if vfio_precopy_info struct is present or not
|
||||
#
|
||||
# Added by commit 4db52602a6074 ("vfio: Extend the device migration
|
||||
# protocol with PRE_COPY" in v6.2
|
||||
#
|
||||
CODE="
|
||||
#include <linux/vfio.h>
|
||||
struct vfio_precopy_info precopy_info;
|
||||
"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_VFIO_PRECOPY_INFO_PRESENT" "" "types"
|
||||
;;
|
||||
|
||||
vfio_log_ops)
|
||||
#
|
||||
# Determine if vfio_log_ops struct is present or not
|
||||
@ -1120,6 +1191,23 @@ compile_test() {
|
||||
compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_DMA_UNMAP" "" "types"
|
||||
;;
|
||||
|
||||
vfio_device_ops_has_bind_iommufd)
|
||||
#
|
||||
# Determine if 'vfio_device_ops' struct has 'bind_iommufd' field.
|
||||
#
|
||||
# Added by commit a4d1f91db5021 ("vfio-iommufd: Support iommufd
|
||||
# for physical VFIO devices") in v6.2
|
||||
#
|
||||
CODE="
|
||||
#include <linux/pci.h>
|
||||
#include <linux/vfio.h>
|
||||
int conftest_vfio_device_ops_has_bind_iommufd(void) {
|
||||
return offsetof(struct vfio_device_ops, bind_iommufd);
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_BIND_IOMMUFD" "" "types"
|
||||
;;
|
||||
|
||||
pci_irq_vector_helpers)
|
||||
#
|
||||
# Determine if pci_alloc_irq_vectors(), pci_free_irq_vectors()
|
||||
@ -2395,6 +2483,10 @@ compile_test() {
|
||||
# commit 768ae309a961 ("mm: replace get_user_pages() write/force
|
||||
# parameters with gup_flags") in v4.9 (2016-10-13)
|
||||
#
|
||||
# Removed vmas parameter from get_user_pages() by commit 7bbf9c8c99
|
||||
# ("mm/gup: remove unused vmas parameter from get_user_pages()")
|
||||
# in linux-next, expected in v6.5-rc1
|
||||
#
|
||||
# linux-4.4.168 cherry-picked commit 768ae309a961 without
|
||||
# c12d2da56d0e which is covered in Conftest #3.
|
||||
#
|
||||
@ -2404,22 +2496,28 @@ compile_test() {
|
||||
# passing conftest's
|
||||
#
|
||||
set_get_user_pages_defines () {
|
||||
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" | append_conftest "functions"
|
||||
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" | append_conftest "functions"
|
||||
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions"
|
||||
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions"
|
||||
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" | append_conftest "functions"
|
||||
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" | append_conftest "functions"
|
||||
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_FLAGS" ]; then
|
||||
@ -2427,6 +2525,7 @@ compile_test() {
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_HAS_ARGS_FLAGS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# Conftest #1: Check if get_user_pages accepts 6 arguments.
|
||||
@ -2447,14 +2546,15 @@ compile_test() {
|
||||
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
|
||||
rm -f conftest$$.c
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE"
|
||||
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS"
|
||||
rm -f conftest$$.o
|
||||
return
|
||||
fi
|
||||
|
||||
# Conftest #2: Check if get_user_pages has gup_flags instead of
|
||||
# write and force parameters. And that gup doesn't accept a
|
||||
# task_struct and mm_struct as its first arguments.
|
||||
# task_struct and mm_struct as its first arguments. get_user_pages
|
||||
# has vm_area_struct as its last argument.
|
||||
# Return if available.
|
||||
# Fall through to conftest #3 on failure.
|
||||
|
||||
@ -2472,16 +2572,17 @@ compile_test() {
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_FLAGS"
|
||||
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS"
|
||||
rm -f conftest$$.o
|
||||
return
|
||||
fi
|
||||
|
||||
# Conftest #3: Check if get_user_pages has gup_flags instead of
|
||||
# write and force parameters AND that gup has task_struct and
|
||||
# mm_struct as its first arguments.
|
||||
# write and force parameters. The gup has task_struct and
|
||||
# mm_struct as its first arguments. get_user_pages
|
||||
# has vm_area_struct as its last argument.
|
||||
# Return if available.
|
||||
# Fall through to default case if absent.
|
||||
# Fall through to conftest #4 on failure.
|
||||
|
||||
echo "$CONFTEST_PREAMBLE
|
||||
#include <linux/mm.h>
|
||||
@ -2499,12 +2600,35 @@ compile_test() {
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS"
|
||||
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS"
|
||||
rm -f conftest$$.o
|
||||
return
|
||||
fi
|
||||
|
||||
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE"
|
||||
# Conftest #4: gup doesn't accept a task_struct and mm_struct as
|
||||
# its first arguments. check if get_user_pages() does not take
|
||||
# vmas argument.
|
||||
# Fall through to default case otherwise.
|
||||
|
||||
echo "$CONFTEST_PREAMBLE
|
||||
#include <linux/mm.h>
|
||||
long get_user_pages(unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int gup_flags,
|
||||
struct page **pages) {
|
||||
return 0;
|
||||
}" > conftest$$.c
|
||||
|
||||
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_FLAGS"
|
||||
rm -f conftest$$.o
|
||||
return
|
||||
fi
|
||||
|
||||
set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS"
|
||||
|
||||
return
|
||||
;;
|
||||
@ -2531,6 +2655,10 @@ compile_test() {
|
||||
# commit 64019a2e467a ("mm/gup: remove task_struct pointer for
|
||||
# all gup code") in v5.9-rc1 (2020-08-11).
|
||||
#
|
||||
# Removed vmas parameter from get_user_pages_remote() by commit
|
||||
# a4bde14d549 ("mm/gup: remove vmas parameter from get_user_pages_remote()")
|
||||
# in linux-next, expected in v6.5-rc1
|
||||
#
|
||||
|
||||
#
|
||||
# This function sets the NV_GET_USER_PAGES_REMOTE_* macros as per
|
||||
@ -2543,22 +2671,28 @@ compile_test() {
|
||||
echo "#define NV_GET_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions"
|
||||
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions"
|
||||
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" | append_conftest "functions"
|
||||
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" | append_conftest "functions"
|
||||
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" | append_conftest "functions"
|
||||
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" | append_conftest "functions"
|
||||
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS" ]; then
|
||||
echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED" ]; then
|
||||
@ -2566,6 +2700,7 @@ compile_test() {
|
||||
else
|
||||
echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# conftest #1: check if get_user_pages_remote() is available
|
||||
@ -2588,8 +2723,8 @@ compile_test() {
|
||||
fi
|
||||
|
||||
#
|
||||
# conftest #2: check if get_user_pages_remote() has write and
|
||||
# force arguments. Return if these arguments are present
|
||||
# conftest #2: check if get_user_pages_remote() has write, force
|
||||
# and vmas arguments. Return if these arguments are present
|
||||
# Fall through to conftest #3 if these args are absent.
|
||||
#
|
||||
echo "$CONFTEST_PREAMBLE
|
||||
@ -2609,14 +2744,14 @@ compile_test() {
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE"
|
||||
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS"
|
||||
rm -f conftest$$.o
|
||||
return
|
||||
fi
|
||||
|
||||
#
|
||||
# conftest #3: check if get_user_pages_remote() has gpu_flags
|
||||
# arguments. Return if these arguments are present
|
||||
# conftest #3: check if get_user_pages_remote() has gpu_flags and
|
||||
# vmas arguments. Return if these arguments are present
|
||||
# Fall through to conftest #4 if these args are absent.
|
||||
#
|
||||
echo "$CONFTEST_PREAMBLE
|
||||
@ -2635,13 +2770,14 @@ compile_test() {
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS"
|
||||
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS"
|
||||
rm -f conftest$$.o
|
||||
return
|
||||
fi
|
||||
|
||||
#
|
||||
# conftest #4: check if get_user_pages_remote() has locked argument
|
||||
# conftest #4: check if get_user_pages_remote() has locked and
|
||||
# vmas argument
|
||||
# Return if these arguments are present. Fall through to conftest #5
|
||||
# if these args are absent.
|
||||
#
|
||||
@ -2662,7 +2798,7 @@ compile_test() {
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED"
|
||||
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS"
|
||||
rm -f conftest$$.o
|
||||
return
|
||||
fi
|
||||
@ -2686,10 +2822,34 @@ compile_test() {
|
||||
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS"
|
||||
rm -f conftest$$.o
|
||||
fi
|
||||
|
||||
#
|
||||
# conftest #6: check if get_user_pages_remote() does not take
|
||||
# vmas argument.
|
||||
#
|
||||
echo "$CONFTEST_PREAMBLE
|
||||
#include <linux/mm.h>
|
||||
long get_user_pages_remote(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int gup_flags,
|
||||
struct page **pages,
|
||||
int *locked) {
|
||||
return 0;
|
||||
}" > conftest$$.c
|
||||
|
||||
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED"
|
||||
rm -f conftest$$.o
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
pin_user_pages)
|
||||
@ -2701,17 +2861,65 @@ compile_test() {
|
||||
# pin_user_pages() was added by commit eddb1c228f7951d399240
|
||||
# ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in
|
||||
# v5.6-rc1 (2020-01-30)
|
||||
#
|
||||
# Removed vmas parameter from pin_user_pages() by commit
|
||||
# 40896a02751("mm/gup: remove vmas parameter from pin_user_pages()")
|
||||
# in linux-next, expected in v6.5-rc1
|
||||
|
||||
set_pin_user_pages_defines () {
|
||||
if [ "$1" = "" ]; then
|
||||
echo "#undef NV_PIN_USER_PAGES_PRESENT" | append_conftest "functions"
|
||||
else
|
||||
echo "#define NV_PIN_USER_PAGES_PRESENT" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_PIN_USER_PAGES_HAS_ARGS_VMAS" ]; then
|
||||
echo "#define NV_PIN_USER_PAGES_HAS_ARGS_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_PIN_USER_PAGES_HAS_ARGS_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# conftest #1: check if pin_user_pages() is available
|
||||
# return if not available.
|
||||
# Fall through to conftest #2 if it is present
|
||||
#
|
||||
CODE="
|
||||
echo "$CONFTEST_PREAMBLE
|
||||
#include <linux/mm.h>
|
||||
void conftest_pin_user_pages(void) {
|
||||
pin_user_pages();
|
||||
}"
|
||||
}" > conftest$$.c
|
||||
|
||||
compile_check_conftest "$CODE" "NV_PIN_USER_PAGES_PRESENT" "" "functions"
|
||||
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_pin_user_pages_defines ""
|
||||
rm -f conftest$$.o
|
||||
return
|
||||
fi
|
||||
|
||||
# conftest #2: Check if pin_user_pages() has vmas argument
|
||||
echo "$CONFTEST_PREAMBLE
|
||||
#include <linux/mm.h>
|
||||
long pin_user_pages(unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int gup_flags,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas) {
|
||||
return 0;
|
||||
}" > conftest$$.c
|
||||
|
||||
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_pin_user_pages_defines "NV_PIN_USER_PAGES_HAS_ARGS_VMAS"
|
||||
rm -f conftest$$.o
|
||||
else
|
||||
set_pin_user_pages_defines "NV_PIN_USER_PAGES_PRESENT"
|
||||
fi
|
||||
;;
|
||||
|
||||
pin_user_pages_remote)
|
||||
@ -2724,6 +2932,10 @@ compile_test() {
|
||||
# pin_user_pages_remote() removed 'tsk' parameter by
|
||||
# commit 64019a2e467a ("mm/gup: remove task_struct pointer for
|
||||
# all gup code") in v5.9-rc1 (2020-08-11).
|
||||
#
|
||||
# Removed unused vmas parameter from pin_user_pages_remote() by
|
||||
# commit 83bcc2e132 ("mm/gup: remove unused vmas parameter from
|
||||
# pin_user_pages_remote()") in linux-next, expected in v6.5-rc1
|
||||
|
||||
#
|
||||
# This function sets the NV_PIN_USER_PAGES_REMOTE_* macros as per
|
||||
@ -2736,10 +2948,16 @@ compile_test() {
|
||||
echo "#define NV_PIN_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" ]; then
|
||||
echo "#define NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" | append_conftest "functions"
|
||||
if [ "$1" = "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" ]; then
|
||||
echo "#define NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" | append_conftest "functions"
|
||||
echo "#undef NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
|
||||
if [ "$1" = "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" ]; then
|
||||
echo "#define NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" | append_conftest "functions"
|
||||
else
|
||||
echo "#undef NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" | append_conftest "functions"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -2762,7 +2980,11 @@ compile_test() {
|
||||
return
|
||||
fi
|
||||
|
||||
# conftest #2: Check if pin_user_pages_remote() has tsk argument
|
||||
# conftest #2: Check if pin_user_pages_remote() has tsk and
|
||||
# vmas argument
|
||||
# Return if these arguments are present else fall through to
|
||||
# conftest #3
|
||||
|
||||
echo "$CONFTEST_PREAMBLE
|
||||
#include <linux/mm.h>
|
||||
long pin_user_pages_remote(struct task_struct *tsk,
|
||||
@ -2780,11 +3002,34 @@ compile_test() {
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK"
|
||||
set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS"
|
||||
rm -f conftest$$.o
|
||||
return
|
||||
fi
|
||||
|
||||
# conftest #3: Check if pin_user_pages_remote() has vmas argument
|
||||
echo "$CONFTEST_PREAMBLE
|
||||
#include <linux/mm.h>
|
||||
long pin_user_pages_remote(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int gup_flags,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
int *locked) {
|
||||
return 0;
|
||||
}" > conftest$$.c
|
||||
|
||||
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
|
||||
rm -f conftest$$.c
|
||||
|
||||
if [ -f conftest$$.o ]; then
|
||||
set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS"
|
||||
rm -f conftest$$.o
|
||||
else
|
||||
set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_PRESENT"
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
vfio_pin_pages_has_vfio_device_arg)
|
||||
@ -4699,40 +4944,22 @@ compile_test() {
|
||||
compile_check_conftest "$CODE" "NV_PCI_CHANNEL_STATE_PRESENT" "" "types"
|
||||
;;
|
||||
|
||||
pgprot_decrypted)
|
||||
cc_platform_has)
|
||||
#
|
||||
# Determine if the macro 'pgprot_decrypted()' is present.
|
||||
# Determine if 'cc_platform_has()' is present.
|
||||
#
|
||||
# Added by commit 21729f81ce8a ("x86/mm: Provide general kernel
|
||||
# support for memory encryption") in v4.14 (2017-07-18)
|
||||
# Added by commit aa5a461171f9 ("x86/sev: Add an x86 version of
|
||||
# cc_platform_has()") in v5.15.3 (2021-10-04)
|
||||
CODE="
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
void conftest_pgprot_decrypted(void)
|
||||
if(pgprot_decrypted()) {}
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_PGPROT_DECRYPTED_PRESENT" "" "functions"
|
||||
|
||||
;;
|
||||
|
||||
cc_mkdec)
|
||||
#
|
||||
# Determine if cc_mkdec() is present.
|
||||
#
|
||||
# cc_mkdec() by commit b577f542f93c ("x86/coco: Add API to handle
|
||||
# encryption mask) in v5.18-rc1 (2022-02-22).
|
||||
#
|
||||
CODE="
|
||||
#if defined(NV_ASM_COCO_H_PRESENT)
|
||||
#include <asm/coco.h>
|
||||
#if defined(NV_LINUX_CC_PLATFORM_H_PRESENT)
|
||||
#include <linux/cc_platform.h>
|
||||
#endif
|
||||
|
||||
void conftest_cc_mkdec(void) {
|
||||
cc_mkdec();
|
||||
void conftest_cc_platfrom_has(void) {
|
||||
cc_platform_has();
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_CC_MKDEC_PRESENT" "" "functions"
|
||||
compile_check_conftest "$CODE" "NV_CC_PLATFORM_PRESENT" "" "functions"
|
||||
;;
|
||||
|
||||
drm_prime_pages_to_sg_has_drm_device_arg)
|
||||
@ -6052,6 +6279,68 @@ compile_test() {
|
||||
compile_check_conftest "$CODE" "NV_MEMORY_FAILURE_MF_SW_SIMULATED_DEFINED" "" "types"
|
||||
;;
|
||||
|
||||
crypto)
|
||||
#
|
||||
# Determine if we support various crypto functions.
|
||||
# This test is not complete and may return false positive.
|
||||
#
|
||||
CODE="
|
||||
#include <crypto/akcipher.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/ecc_curve.h>
|
||||
#include <crypto/ecdh.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/internal/ecc.h>
|
||||
#include <crypto/kpp.h>
|
||||
#include <crypto/public_key.h>
|
||||
#include <crypto/sm3.h>
|
||||
#include <keys/asymmetric-type.h>
|
||||
#include <linux/crypto.h>
|
||||
void conftest_crypto(void) {
|
||||
struct shash_desc sd;
|
||||
struct crypto_shash cs;
|
||||
(void)crypto_shash_tfm_digest;
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_CRYPTO_PRESENT" "" "symbols"
|
||||
;;
|
||||
|
||||
mempolicy_has_unified_nodes)
|
||||
#
|
||||
# Determine if the 'mempolicy' structure has
|
||||
# nodes union.
|
||||
#
|
||||
# nodes field was added by commit 269fbe72cd ("mm/mempolicy:
|
||||
# use unified 'nodes' for bind/interleave/prefer policies") in
|
||||
# v5.14 (2021-06-30).
|
||||
#
|
||||
CODE="
|
||||
#include <linux/mempolicy.h>
|
||||
int conftest_mempolicy_has_unified_nodes(void) {
|
||||
return offsetof(struct mempolicy, nodes);
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_MEMPOLICY_HAS_UNIFIED_NODES" "" "types"
|
||||
;;
|
||||
|
||||
mempolicy_has_home_node)
|
||||
#
|
||||
# Determine if the 'mempolicy' structure has
|
||||
# home_node field.
|
||||
#
|
||||
# home_node field was added by commit c6018b4b254
|
||||
# ("mm/mempolicy: add set_mempolicy_home_node syscall") in v5.17
|
||||
# (2022-01-14).
|
||||
#
|
||||
CODE="
|
||||
#include <linux/mempolicy.h>
|
||||
int conftest_mempolicy_has_home_node(void) {
|
||||
return offsetof(struct mempolicy, home_node);
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_MEMPOLICY_HAS_HOME_NODE" "" "types"
|
||||
;;
|
||||
|
||||
# When adding a new conftest entry, please use the correct format for
|
||||
# specifying the relevant upstream Linux kernel commit.
|
||||
#
|
||||
@ -6385,8 +6674,8 @@ case "$5" in
|
||||
if [ "$VFIO_IOMMU_PRESENT" != "0" ] && [ "$KVM_PRESENT" != "0" ] ; then
|
||||
|
||||
# On x86_64, vGPU requires MDEV framework to be present.
|
||||
# On aarch64, vGPU requires vfio-pci-core framework to be present.
|
||||
if ([ "$ARCH" = "arm64" ] && [ "$VFIO_PCI_CORE_PRESENT" != "0" ]) ||
|
||||
# On aarch64, vGPU requires MDEV or vfio-pci-core framework to be present.
|
||||
if ([ "$ARCH" = "arm64" ] && ([ "$VFIO_MDEV_PRESENT" != "0" ] || [ "$VFIO_PCI_CORE_PRESENT" != "0" ])) ||
|
||||
([ "$ARCH" = "x86_64" ] && [ "$VFIO_MDEV_PRESENT" != "0" ];) then
|
||||
exit 0
|
||||
fi
|
||||
@ -6398,8 +6687,8 @@ case "$5" in
|
||||
echo "CONFIG_VFIO_IOMMU_TYPE1";
|
||||
fi
|
||||
|
||||
if [ "$ARCH" = "arm64" ] && [ "$VFIO_PCI_CORE_PRESENT" = "0" ]; then
|
||||
echo "CONFIG_VFIO_PCI_CORE";
|
||||
if [ "$ARCH" = "arm64" ] && [ "$VFIO_MDEV_PRESENT" = "0" ] && [ "$VFIO_PCI_CORE_PRESENT" = "0" ]; then
|
||||
echo "either CONFIG_VFIO_MDEV or CONFIG_VFIO_PCI_CORE";
|
||||
fi
|
||||
|
||||
if [ "$ARCH" = "x86_64" ] && [ "$VFIO_MDEV_PRESENT" = "0" ]; then
|
||||
|
@ -1367,8 +1367,23 @@ static struct drm_driver nv_drm_driver = {
|
||||
.ioctls = nv_drm_ioctls,
|
||||
.num_ioctls = ARRAY_SIZE(nv_drm_ioctls),
|
||||
|
||||
/*
|
||||
* linux-next commit 71a7974ac701 ("drm/prime: Unexport helpers for fd/handle
|
||||
* conversion") unexports drm_gem_prime_handle_to_fd() and
|
||||
* drm_gem_prime_fd_to_handle().
|
||||
*
|
||||
* Prior linux-next commit 6b85aa68d9d5 ("drm: Enable PRIME import/export for
|
||||
* all drivers") made these helpers the default when .prime_handle_to_fd /
|
||||
* .prime_fd_to_handle are unspecified, so it's fine to just skip specifying
|
||||
* them if the helpers aren't present.
|
||||
*/
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_drm_gem_prime_handle_to_fd
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
#endif
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_drm_gem_prime_fd_to_handle
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
#endif
|
||||
|
||||
.gem_prime_import = nv_drm_gem_prime_import,
|
||||
.gem_prime_import_sg_table = nv_drm_gem_prime_import_sg_table,
|
||||
|
||||
|
@ -179,6 +179,7 @@ static inline int nv_drm_gem_handle_create(struct drm_file *filp,
|
||||
return drm_gem_handle_create(filp, &nv_gem->base, handle);
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
@ -187,6 +188,7 @@ static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem
|
||||
return nv_gem->base.dma_buf ? nv_gem->base.dma_buf->resv : &nv_gem->resv;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
|
@ -54,6 +54,8 @@ NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
|
||||
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
|
||||
|
@ -65,6 +65,9 @@
|
||||
static bool output_rounding_fix = true;
|
||||
module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
|
||||
|
||||
static bool disable_vrr_memclk_switch = false;
|
||||
module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
|
||||
|
||||
/* These parameters are used for fault injection tests. Normally the defaults
|
||||
* should be used. */
|
||||
MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
|
||||
@ -91,6 +94,11 @@ NvBool nvkms_output_rounding_fix(void)
|
||||
return output_rounding_fix;
|
||||
}
|
||||
|
||||
NvBool nvkms_disable_vrr_memclk_switch(void)
|
||||
{
|
||||
return disable_vrr_memclk_switch;
|
||||
}
|
||||
|
||||
#define NVKMS_SYNCPT_STUBS_NEEDED
|
||||
|
||||
/*************************************************************************
|
||||
|
@ -98,6 +98,8 @@ typedef struct {
|
||||
|
||||
NvBool nvkms_output_rounding_fix(void);
|
||||
|
||||
NvBool nvkms_disable_vrr_memclk_switch(void);
|
||||
|
||||
void nvkms_call_rm (void *ops);
|
||||
void* nvkms_alloc (size_t size,
|
||||
NvBool zero);
|
||||
|
@ -108,5 +108,7 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_device_range
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_mm_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
|
||||
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg
|
||||
|
@ -121,6 +121,8 @@ bool uvm_hal_ampere_ce_memcopy_is_valid_c6b5(uvm_push_t *push, uvm_gpu_address_t
|
||||
return true;
|
||||
|
||||
if (uvm_channel_is_proxy(push->channel)) {
|
||||
uvm_pushbuffer_t *pushbuffer;
|
||||
|
||||
if (dst.is_virtual) {
|
||||
UVM_ERR_PRINT("Destination address of memcopy must be physical, not virtual\n");
|
||||
return false;
|
||||
@ -142,7 +144,8 @@ bool uvm_hal_ampere_ce_memcopy_is_valid_c6b5(uvm_push_t *push, uvm_gpu_address_t
|
||||
return false;
|
||||
}
|
||||
|
||||
push_begin_gpu_va = uvm_pushbuffer_get_gpu_va_for_push(push->channel->pool->manager->pushbuffer, push);
|
||||
pushbuffer = uvm_channel_get_pushbuffer(push->channel);
|
||||
push_begin_gpu_va = uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push);
|
||||
|
||||
if ((src.address < push_begin_gpu_va) || (src.address >= push_begin_gpu_va + uvm_push_get_size(push))) {
|
||||
UVM_ERR_PRINT("Source address of memcopy must point to pushbuffer\n");
|
||||
@ -177,10 +180,13 @@ bool uvm_hal_ampere_ce_memcopy_is_valid_c6b5(uvm_push_t *push, uvm_gpu_address_t
|
||||
// irrespective of the virtualization mode.
|
||||
void uvm_hal_ampere_ce_memcopy_patch_src_c6b5(uvm_push_t *push, uvm_gpu_address_t *src)
|
||||
{
|
||||
uvm_pushbuffer_t *pushbuffer;
|
||||
|
||||
if (!uvm_channel_is_proxy(push->channel))
|
||||
return;
|
||||
|
||||
src->address -= uvm_pushbuffer_get_gpu_va_for_push(push->channel->pool->manager->pushbuffer, push);
|
||||
pushbuffer = uvm_channel_get_pushbuffer(push->channel);
|
||||
src->address -= uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push);
|
||||
}
|
||||
|
||||
bool uvm_hal_ampere_ce_memset_is_valid_c6b5(uvm_push_t *push,
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "uvm_va_range.h"
|
||||
#include "uvm_ats_faults.h"
|
||||
#include "uvm_migrate_pageable.h"
|
||||
#include <linux/mempolicy.h>
|
||||
|
||||
// TODO: Bug 2103669: Implement a real prefetching policy and remove or adapt
|
||||
// these experimental parameters. These are intended to help guide that policy.
|
||||
@ -79,7 +80,7 @@ static NV_STATUS service_ats_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
NvU64 start,
|
||||
size_t length,
|
||||
uvm_fault_access_type_t access_type,
|
||||
uvm_fault_client_type_t client_type)
|
||||
uvm_ats_fault_context_t *ats_context)
|
||||
{
|
||||
uvm_va_space_t *va_space = gpu_va_space->va_space;
|
||||
struct mm_struct *mm = va_space->va_space_mm.mm;
|
||||
@ -95,17 +96,18 @@ static NV_STATUS service_ats_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
// 2) guest physical -> host physical
|
||||
//
|
||||
// The overall ATS translation will fault if either of those translations is
|
||||
// invalid. The get_user_pages() call above handles translation #1, but not
|
||||
// #2. We don't know if we're running as a guest, but in case we are we can
|
||||
// force that translation to be valid by touching the guest physical address
|
||||
// from the CPU. If the translation is not valid then the access will cause
|
||||
// a hypervisor fault. Note that dma_map_page() can't establish mappings
|
||||
// used by GPU ATS SVA translations. GPU accesses to host physical addresses
|
||||
// obtained as a result of the address translation request uses the CPU
|
||||
// address space instead of the IOMMU address space since the translated
|
||||
// host physical address isn't necessarily an IOMMU address. The only way to
|
||||
// establish guest physical to host physical mapping in the CPU address
|
||||
// space is to touch the page from the CPU.
|
||||
// invalid. The pin_user_pages() call within uvm_migrate_pageable() call
|
||||
// below handles translation #1, but not #2. We don't know if we're running
|
||||
// as a guest, but in case we are we can force that translation to be valid
|
||||
// by touching the guest physical address from the CPU. If the translation
|
||||
// is not valid then the access will cause a hypervisor fault. Note that
|
||||
// dma_map_page() can't establish mappings used by GPU ATS SVA translations.
|
||||
// GPU accesses to host physical addresses obtained as a result of the
|
||||
// address translation request uses the CPU address space instead of the
|
||||
// IOMMU address space since the translated host physical address isn't
|
||||
// necessarily an IOMMU address. The only way to establish guest physical to
|
||||
// host physical mapping in the CPU address space is to touch the page from
|
||||
// the CPU.
|
||||
//
|
||||
// We assume that the hypervisor mappings are all VM_PFNMAP, VM_SHARED, and
|
||||
// VM_WRITE, meaning that the mappings are all granted write access on any
|
||||
@ -118,18 +120,24 @@ static NV_STATUS service_ats_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
{
|
||||
.va_space = va_space,
|
||||
.mm = mm,
|
||||
.dst_id = gpu_va_space->gpu->parent->id,
|
||||
.dst_node_id = -1,
|
||||
.dst_id = ats_context->residency_id,
|
||||
.dst_node_id = ats_context->residency_node,
|
||||
.populate_permissions = write ? UVM_POPULATE_PERMISSIONS_WRITE : UVM_POPULATE_PERMISSIONS_ANY,
|
||||
.touch = true,
|
||||
.skip_mapped = true,
|
||||
.populate_on_cpu_alloc_failures = true,
|
||||
.user_space_start = &user_space_start,
|
||||
.user_space_length = &user_space_length,
|
||||
};
|
||||
|
||||
UVM_ASSERT(uvm_ats_can_service_faults(gpu_va_space, mm));
|
||||
|
||||
expand_fault_region(vma, start, length, client_type, &uvm_migrate_args.start, &uvm_migrate_args.length);
|
||||
expand_fault_region(vma,
|
||||
start,
|
||||
length,
|
||||
ats_context->client_type,
|
||||
&uvm_migrate_args.start,
|
||||
&uvm_migrate_args.length);
|
||||
|
||||
// We are trying to use migrate_vma API in the kernel (if it exists) to
|
||||
// populate and map the faulting region on the GPU. We want to do this only
|
||||
@ -165,6 +173,58 @@ static void flush_tlb_write_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
uvm_tlb_batch_invalidate(&ats_invalidate->write_faults_tlb_batch, addr, size, PAGE_SIZE, UVM_MEMBAR_NONE);
|
||||
}
|
||||
|
||||
static void ats_batch_select_residency(uvm_gpu_va_space_t *gpu_va_space,
|
||||
struct vm_area_struct *vma,
|
||||
uvm_ats_fault_context_t *ats_context)
|
||||
{
|
||||
uvm_gpu_t *gpu = gpu_va_space->gpu;
|
||||
int residency = uvm_gpu_numa_node(gpu);
|
||||
|
||||
#if defined(NV_MEMPOLICY_HAS_UNIFIED_NODES)
|
||||
struct mempolicy *vma_policy = vma_policy(vma);
|
||||
unsigned short mode;
|
||||
|
||||
if (!vma_policy)
|
||||
goto done;
|
||||
|
||||
mode = vma_policy->mode;
|
||||
|
||||
if ((mode == MPOL_BIND) || (mode == MPOL_PREFERRED_MANY) || (mode == MPOL_PREFERRED)) {
|
||||
int home_node = NUMA_NO_NODE;
|
||||
|
||||
#if defined(NV_MEMPOLICY_HAS_HOME_NODE)
|
||||
if ((mode != MPOL_PREFERRED) && (vma_policy->home_node != NUMA_NO_NODE))
|
||||
home_node = vma_policy->home_node;
|
||||
#endif
|
||||
|
||||
// Prefer home_node if set. Otherwise, prefer the faulting GPU if it's
|
||||
// in the list of preferred nodes, else prefer the closest_cpu_numa_node
|
||||
// to the GPU if closest_cpu_numa_node is in the list of preferred
|
||||
// nodes. Fallback to the faulting GPU if all else fails.
|
||||
if (home_node != NUMA_NO_NODE) {
|
||||
residency = home_node;
|
||||
}
|
||||
else if (!node_isset(residency, vma_policy->nodes)) {
|
||||
int closest_cpu_numa_node = gpu->parent->closest_cpu_numa_node;
|
||||
|
||||
if ((closest_cpu_numa_node != NUMA_NO_NODE) && node_isset(closest_cpu_numa_node, vma_policy->nodes))
|
||||
residency = gpu->parent->closest_cpu_numa_node;
|
||||
else
|
||||
residency = first_node(vma_policy->nodes);
|
||||
}
|
||||
}
|
||||
|
||||
// Update gpu if residency is not the faulting gpu.
|
||||
if (residency != uvm_gpu_numa_node(gpu))
|
||||
gpu = uvm_va_space_find_gpu_with_memory_node_id(gpu_va_space->va_space, residency);
|
||||
|
||||
done:
|
||||
#endif
|
||||
|
||||
ats_context->residency_id = gpu ? gpu->parent->id : UVM_ID_CPU;
|
||||
ats_context->residency_node = residency;
|
||||
}
|
||||
|
||||
NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
struct vm_area_struct *vma,
|
||||
NvU64 base,
|
||||
@ -205,6 +265,8 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
uvm_page_mask_zero(write_fault_mask);
|
||||
}
|
||||
|
||||
ats_batch_select_residency(gpu_va_space, vma, ats_context);
|
||||
|
||||
for_each_va_block_subregion_in_mask(subregion, write_fault_mask, region) {
|
||||
NvU64 start = base + (subregion.first * PAGE_SIZE);
|
||||
size_t length = uvm_va_block_region_num_pages(subregion) * PAGE_SIZE;
|
||||
@ -215,7 +277,7 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
UVM_ASSERT(start >= vma->vm_start);
|
||||
UVM_ASSERT((start + length) <= vma->vm_end);
|
||||
|
||||
status = service_ats_faults(gpu_va_space, vma, start, length, access_type, client_type);
|
||||
status = service_ats_faults(gpu_va_space, vma, start, length, access_type, ats_context);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
@ -244,11 +306,12 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
for_each_va_block_subregion_in_mask(subregion, read_fault_mask, region) {
|
||||
NvU64 start = base + (subregion.first * PAGE_SIZE);
|
||||
size_t length = uvm_va_block_region_num_pages(subregion) * PAGE_SIZE;
|
||||
uvm_fault_access_type_t access_type = UVM_FAULT_ACCESS_TYPE_READ;
|
||||
|
||||
UVM_ASSERT(start >= vma->vm_start);
|
||||
UVM_ASSERT((start + length) <= vma->vm_end);
|
||||
|
||||
status = service_ats_faults(gpu_va_space, vma, start, length, UVM_FAULT_ACCESS_TYPE_READ, client_type);
|
||||
status = service_ats_faults(gpu_va_space, vma, start, length, access_type, ats_context);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
|
@ -338,11 +338,6 @@ static NV_STATUS test_memcpy_and_memset_inner(uvm_gpu_t *gpu,
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
if (!gpu->parent->ce_hal->memcopy_is_valid(&push, dst, src)) {
|
||||
TEST_NV_CHECK_RET(uvm_push_end_and_wait(&push));
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
// The input virtual addresses exist in UVM's internal address space, not
|
||||
// the proxy address space
|
||||
if (uvm_channel_is_proxy(push.channel)) {
|
||||
@ -401,7 +396,7 @@ static NV_STATUS test_memcpy_and_memset_inner(uvm_gpu_t *gpu,
|
||||
static NV_STATUS test_memcpy_and_memset(uvm_gpu_t *gpu)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
bool is_proxy_va_space;
|
||||
bool is_proxy_va_space = false;
|
||||
uvm_gpu_address_t gpu_verif_addr;
|
||||
void *cpu_verif_addr;
|
||||
uvm_mem_t *verif_mem = NULL;
|
||||
@ -437,6 +432,34 @@ static NV_STATUS test_memcpy_and_memset(uvm_gpu_t *gpu)
|
||||
}
|
||||
}
|
||||
|
||||
// Virtual address (in UVM's internal address space) backed by sysmem
|
||||
TEST_NV_CHECK_GOTO(uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_SYS, size, 0, &sys_rm_mem), done);
|
||||
gpu_addresses[0] = uvm_rm_mem_get_gpu_va(sys_rm_mem, gpu, is_proxy_va_space);
|
||||
|
||||
if (uvm_conf_computing_mode_enabled(gpu)) {
|
||||
for (i = 0; i < iterations; ++i) {
|
||||
for (s = 0; s < ARRAY_SIZE(element_sizes); s++) {
|
||||
TEST_NV_CHECK_GOTO(test_memcpy_and_memset_inner(gpu,
|
||||
gpu_addresses[0],
|
||||
gpu_addresses[0],
|
||||
size,
|
||||
element_sizes[s],
|
||||
gpu_verif_addr,
|
||||
cpu_verif_addr,
|
||||
i),
|
||||
done);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Because gpu_verif_addr is in sysmem, when the Confidential
|
||||
// Computing feature is enabled, only the previous cases are valid.
|
||||
// TODO: Bug 3839176: the test partially waived on Confidential
|
||||
// Computing because it assumes that GPU can access system memory
|
||||
// without using encryption.
|
||||
goto done;
|
||||
}
|
||||
|
||||
// Using a page size equal to the allocation size ensures that the UVM
|
||||
// memories about to be allocated are physically contiguous. And since the
|
||||
// size is a valid GPU page size, the memories can be virtually mapped on
|
||||
@ -448,37 +471,22 @@ static NV_STATUS test_memcpy_and_memset(uvm_gpu_t *gpu)
|
||||
// Physical address in sysmem
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_alloc(&mem_params, &sys_uvm_mem), done);
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_phys(sys_uvm_mem, gpu), done);
|
||||
gpu_addresses[0] = uvm_mem_gpu_address_physical(sys_uvm_mem, gpu, 0, size);
|
||||
gpu_addresses[1] = uvm_mem_gpu_address_physical(sys_uvm_mem, gpu, 0, size);
|
||||
|
||||
// Physical address in vidmem
|
||||
mem_params.backing_gpu = gpu;
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_alloc(&mem_params, &gpu_uvm_mem), done);
|
||||
gpu_addresses[1] = uvm_mem_gpu_address_physical(gpu_uvm_mem, gpu, 0, size);
|
||||
gpu_addresses[2] = uvm_mem_gpu_address_physical(gpu_uvm_mem, gpu, 0, size);
|
||||
|
||||
// Virtual address (in UVM's internal address space) backed by vidmem
|
||||
TEST_NV_CHECK_GOTO(uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_GPU, size, 0, &gpu_rm_mem), done);
|
||||
is_proxy_va_space = false;
|
||||
gpu_addresses[2] = uvm_rm_mem_get_gpu_va(gpu_rm_mem, gpu, is_proxy_va_space);
|
||||
gpu_addresses[3] = uvm_rm_mem_get_gpu_va(gpu_rm_mem, gpu, is_proxy_va_space);
|
||||
|
||||
// Virtual address (in UVM's internal address space) backed by sysmem
|
||||
TEST_NV_CHECK_GOTO(uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_SYS, size, 0, &sys_rm_mem), done);
|
||||
gpu_addresses[3] = uvm_rm_mem_get_gpu_va(sys_rm_mem, gpu, is_proxy_va_space);
|
||||
|
||||
for (i = 0; i < iterations; ++i) {
|
||||
for (j = 0; j < ARRAY_SIZE(gpu_addresses); ++j) {
|
||||
for (k = 0; k < ARRAY_SIZE(gpu_addresses); ++k) {
|
||||
for (s = 0; s < ARRAY_SIZE(element_sizes); s++) {
|
||||
// Because gpu_verif_addr is in sysmem, when the Confidential
|
||||
// Computing feature is enabled, only the following cases are
|
||||
// valid.
|
||||
//
|
||||
// TODO: Bug 3839176: the test partially waived on
|
||||
// Confidential Computing because it assumes that GPU can
|
||||
// access system memory without using encryption.
|
||||
if (uvm_conf_computing_mode_enabled(gpu) &&
|
||||
!(gpu_addresses[k].is_unprotected && gpu_addresses[j].is_unprotected)) {
|
||||
continue;
|
||||
}
|
||||
TEST_NV_CHECK_GOTO(test_memcpy_and_memset_inner(gpu,
|
||||
gpu_addresses[k],
|
||||
gpu_addresses[j],
|
||||
@ -752,7 +760,7 @@ static NV_STATUS alloc_vidmem_protected(uvm_gpu_t *gpu, uvm_mem_t **mem, size_t
|
||||
|
||||
*mem = NULL;
|
||||
|
||||
TEST_NV_CHECK_RET(uvm_mem_alloc_vidmem_protected(size, gpu, mem));
|
||||
TEST_NV_CHECK_RET(uvm_mem_alloc_vidmem(size, gpu, mem));
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(*mem, gpu), err);
|
||||
TEST_NV_CHECK_GOTO(zero_vidmem(*mem), err);
|
||||
|
||||
|
@ -152,7 +152,7 @@ static NvU32 uvm_channel_update_progress_with_max(uvm_channel_t *channel,
|
||||
break;
|
||||
|
||||
if (entry->type == UVM_GPFIFO_ENTRY_TYPE_NORMAL) {
|
||||
uvm_pushbuffer_mark_completed(channel->pool->manager->pushbuffer, entry);
|
||||
uvm_pushbuffer_mark_completed(channel, entry);
|
||||
list_add_tail(&entry->push_info->available_list_node, &channel->available_push_infos);
|
||||
}
|
||||
|
||||
@ -272,19 +272,26 @@ static bool try_claim_channel(uvm_channel_t *channel, NvU32 num_gpfifo_entries)
|
||||
|
||||
static void unlock_channel_for_push(uvm_channel_t *channel)
|
||||
{
|
||||
if (uvm_channel_is_secure(channel)) {
|
||||
NvU32 index = uvm_channel_index_in_pool(channel);
|
||||
NvU32 index;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
if (!uvm_conf_computing_mode_enabled(gpu))
|
||||
return;
|
||||
|
||||
index = uvm_channel_index_in_pool(channel);
|
||||
|
||||
uvm_channel_pool_assert_locked(channel->pool);
|
||||
UVM_ASSERT(test_bit(index, channel->pool->push_locks));
|
||||
|
||||
__clear_bit(index, channel->pool->push_locks);
|
||||
uvm_up_out_of_order(&channel->pool->push_sem);
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_channel_locked_for_push(uvm_channel_t *channel)
|
||||
{
|
||||
if (uvm_channel_is_secure(channel))
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
if (uvm_conf_computing_mode_enabled(gpu))
|
||||
return test_bit(uvm_channel_index_in_pool(channel), channel->pool->push_locks);
|
||||
|
||||
// For CE and proxy channels, we always return that the channel is locked,
|
||||
@ -295,25 +302,25 @@ static bool is_channel_locked_for_push(uvm_channel_t *channel)
|
||||
|
||||
static void lock_channel_for_push(uvm_channel_t *channel)
|
||||
{
|
||||
if (uvm_channel_is_secure(channel)) {
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
NvU32 index = uvm_channel_index_in_pool(channel);
|
||||
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(gpu));
|
||||
uvm_channel_pool_assert_locked(channel->pool);
|
||||
|
||||
UVM_ASSERT(!test_bit(index, channel->pool->push_locks));
|
||||
|
||||
__set_bit(index, channel->pool->push_locks);
|
||||
}
|
||||
}
|
||||
|
||||
static bool test_claim_and_lock_channel(uvm_channel_t *channel, NvU32 num_gpfifo_entries)
|
||||
{
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
NvU32 index = uvm_channel_index_in_pool(channel);
|
||||
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(gpu));
|
||||
uvm_channel_pool_assert_locked(channel->pool);
|
||||
|
||||
if (uvm_channel_is_secure(channel) &&
|
||||
!test_bit(index, channel->pool->push_locks) &&
|
||||
try_claim_channel_locked(channel, num_gpfifo_entries)) {
|
||||
if (!test_bit(index, channel->pool->push_locks) && try_claim_channel_locked(channel, num_gpfifo_entries)) {
|
||||
lock_channel_for_push(channel);
|
||||
return true;
|
||||
}
|
||||
@ -321,57 +328,15 @@ static bool test_claim_and_lock_channel(uvm_channel_t *channel, NvU32 num_gpfifo
|
||||
return false;
|
||||
}
|
||||
|
||||
// Reserve a channel in the specified CE pool
|
||||
static NV_STATUS channel_reserve_in_ce_pool(uvm_channel_pool_t *pool, uvm_channel_t **channel_out)
|
||||
{
|
||||
uvm_channel_t *channel;
|
||||
uvm_spin_loop_t spin;
|
||||
|
||||
UVM_ASSERT(pool);
|
||||
UVM_ASSERT(uvm_channel_pool_is_ce(pool));
|
||||
|
||||
uvm_for_each_channel_in_pool(channel, pool) {
|
||||
// TODO: Bug 1764953: Prefer idle/less busy channels
|
||||
if (try_claim_channel(channel, 1)) {
|
||||
*channel_out = channel;
|
||||
return NV_OK;
|
||||
}
|
||||
}
|
||||
|
||||
uvm_spin_loop_init(&spin);
|
||||
while (1) {
|
||||
uvm_for_each_channel_in_pool(channel, pool) {
|
||||
NV_STATUS status;
|
||||
|
||||
uvm_channel_update_progress(channel);
|
||||
|
||||
if (try_claim_channel(channel, 1)) {
|
||||
*channel_out = channel;
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
status = uvm_channel_check_errors(channel);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
UVM_SPIN_LOOP(&spin);
|
||||
}
|
||||
}
|
||||
|
||||
UVM_ASSERT_MSG(0, "Cannot get here?!\n");
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
|
||||
// Reserve a channel in the specified secure pool
|
||||
static NV_STATUS channel_reserve_in_secure_pool(uvm_channel_pool_t *pool, uvm_channel_t **channel_out)
|
||||
// Reserve a channel in the specified pool. The channel is locked until the push
|
||||
// ends
|
||||
static NV_STATUS channel_reserve_and_lock_in_pool(uvm_channel_pool_t *pool, uvm_channel_t **channel_out)
|
||||
{
|
||||
uvm_channel_t *channel;
|
||||
uvm_spin_loop_t spin;
|
||||
NvU32 index;
|
||||
|
||||
UVM_ASSERT(pool);
|
||||
UVM_ASSERT(pool->secure);
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(pool->manager->gpu));
|
||||
|
||||
// This semaphore is uvm_up() in unlock_channel_for_push() as part of the
|
||||
@ -426,6 +391,51 @@ done:
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
// Reserve a channel in the specified pool
|
||||
static NV_STATUS channel_reserve_in_pool(uvm_channel_pool_t *pool, uvm_channel_t **channel_out)
|
||||
{
|
||||
uvm_channel_t *channel;
|
||||
uvm_spin_loop_t spin;
|
||||
|
||||
UVM_ASSERT(pool);
|
||||
|
||||
if (uvm_conf_computing_mode_enabled(pool->manager->gpu))
|
||||
return channel_reserve_and_lock_in_pool(pool, channel_out);
|
||||
|
||||
uvm_for_each_channel_in_pool(channel, pool) {
|
||||
// TODO: Bug 1764953: Prefer idle/less busy channels
|
||||
if (try_claim_channel(channel, 1)) {
|
||||
*channel_out = channel;
|
||||
return NV_OK;
|
||||
}
|
||||
}
|
||||
|
||||
uvm_spin_loop_init(&spin);
|
||||
while (1) {
|
||||
uvm_for_each_channel_in_pool(channel, pool) {
|
||||
NV_STATUS status;
|
||||
|
||||
uvm_channel_update_progress(channel);
|
||||
|
||||
if (try_claim_channel(channel, 1)) {
|
||||
*channel_out = channel;
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
status = uvm_channel_check_errors(channel);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
UVM_SPIN_LOOP(&spin);
|
||||
}
|
||||
}
|
||||
|
||||
UVM_ASSERT_MSG(0, "Cannot get here?!\n");
|
||||
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
|
||||
NV_STATUS uvm_channel_reserve_type(uvm_channel_manager_t *manager, uvm_channel_type_t type, uvm_channel_t **channel_out)
|
||||
{
|
||||
uvm_channel_pool_t *pool = manager->pool_to_use.default_for_type[type];
|
||||
@ -433,10 +443,7 @@ NV_STATUS uvm_channel_reserve_type(uvm_channel_manager_t *manager, uvm_channel_t
|
||||
UVM_ASSERT(pool != NULL);
|
||||
UVM_ASSERT(type < UVM_CHANNEL_TYPE_COUNT);
|
||||
|
||||
if (pool->secure)
|
||||
return channel_reserve_in_secure_pool(pool, channel_out);
|
||||
|
||||
return channel_reserve_in_ce_pool(pool, channel_out);
|
||||
return channel_reserve_in_pool(pool, channel_out);
|
||||
}
|
||||
|
||||
NV_STATUS uvm_channel_reserve_gpu_to_gpu(uvm_channel_manager_t *manager,
|
||||
@ -452,10 +459,7 @@ NV_STATUS uvm_channel_reserve_gpu_to_gpu(uvm_channel_manager_t *manager,
|
||||
|
||||
UVM_ASSERT(pool->pool_type == UVM_CHANNEL_POOL_TYPE_CE);
|
||||
|
||||
if (pool->secure)
|
||||
return channel_reserve_in_secure_pool(pool, channel_out);
|
||||
|
||||
return channel_reserve_in_ce_pool(pool, channel_out);
|
||||
return channel_reserve_in_pool(pool, channel_out);
|
||||
}
|
||||
|
||||
NV_STATUS uvm_channel_manager_wait(uvm_channel_manager_t *manager)
|
||||
@ -491,7 +495,7 @@ static NvU32 channel_get_available_push_info_index(uvm_channel_t *channel)
|
||||
return push_info - channel->push_infos;
|
||||
}
|
||||
|
||||
static void channel_semaphore_gpu_encrypt_payload(uvm_push_t *push, uvm_channel_t *channel, NvU64 semaphore_va)
|
||||
static void channel_semaphore_gpu_encrypt_payload(uvm_push_t *push, NvU64 semaphore_va)
|
||||
{
|
||||
NvU32 iv_index;
|
||||
uvm_gpu_address_t notifier_gpu_va;
|
||||
@ -499,12 +503,14 @@ static void channel_semaphore_gpu_encrypt_payload(uvm_push_t *push, uvm_channel_
|
||||
uvm_gpu_address_t semaphore_gpu_va;
|
||||
uvm_gpu_address_t encrypted_payload_gpu_va;
|
||||
uvm_gpu_t *gpu = push->gpu;
|
||||
uvm_channel_t *channel = push->channel;
|
||||
uvm_gpu_semaphore_t *semaphore = &channel->tracking_sem.semaphore;
|
||||
UvmCslIv *iv_cpu_addr = semaphore->conf_computing.ivs;
|
||||
NvU32 payload_size = sizeof(*semaphore->payload);
|
||||
NvU32 *last_pushed_notifier = &semaphore->conf_computing.last_pushed_notifier;
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_secure_ce(channel));
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(gpu));
|
||||
UVM_ASSERT(uvm_channel_is_ce(channel));
|
||||
|
||||
encrypted_payload_gpu_va = uvm_rm_mem_get_gpu_va(semaphore->conf_computing.encrypted_payload, gpu, false);
|
||||
notifier_gpu_va = uvm_rm_mem_get_gpu_va(semaphore->conf_computing.notifier, gpu, false);
|
||||
@ -538,19 +544,21 @@ NV_STATUS uvm_channel_begin_push(uvm_channel_t *channel, uvm_push_t *push)
|
||||
{
|
||||
NV_STATUS status;
|
||||
uvm_channel_manager_t *manager;
|
||||
uvm_gpu_t *gpu;
|
||||
|
||||
UVM_ASSERT(channel);
|
||||
UVM_ASSERT(push);
|
||||
|
||||
manager = channel->pool->manager;
|
||||
|
||||
gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
// Only SEC2 and WLC with set up fixed schedule can use direct push
|
||||
// submission. All other cases (including WLC pre-schedule) need to
|
||||
// reserve a launch channel that will be used to submit this push
|
||||
// indirectly.
|
||||
if (uvm_conf_computing_mode_enabled(uvm_channel_get_gpu(channel)) &&
|
||||
!(uvm_channel_is_wlc(channel) && uvm_channel_manager_is_wlc_ready(manager)) &&
|
||||
!uvm_channel_is_sec2(channel)) {
|
||||
if (uvm_conf_computing_mode_enabled(gpu) && uvm_channel_is_ce(channel) &&
|
||||
!(uvm_channel_is_wlc(channel) && uvm_channel_manager_is_wlc_ready(manager))) {
|
||||
uvm_channel_type_t indirect_channel_type = uvm_channel_manager_is_wlc_ready(manager) ?
|
||||
UVM_CHANNEL_TYPE_WLC :
|
||||
UVM_CHANNEL_TYPE_SEC2;
|
||||
@ -559,9 +567,9 @@ NV_STATUS uvm_channel_begin_push(uvm_channel_t *channel, uvm_push_t *push)
|
||||
return status;
|
||||
}
|
||||
|
||||
// For secure channels, channel's lock should have been acquired in
|
||||
// uvm_channel_reserve() or channel_reserve_in_secure_pool() before
|
||||
// reaching here.
|
||||
// When the Confidential Computing feature is enabled, the channel's lock
|
||||
// should have already been acquired in uvm_channel_reserve() or
|
||||
// channel_reserve_and_lock_in_pool().
|
||||
UVM_ASSERT(is_channel_locked_for_push(channel));
|
||||
|
||||
push->channel = channel;
|
||||
@ -586,9 +594,8 @@ static void internal_channel_submit_work(uvm_push_t *push, NvU32 push_size, NvU3
|
||||
NvU64 *gpfifo_entry;
|
||||
NvU64 pushbuffer_va;
|
||||
uvm_channel_t *channel = push->channel;
|
||||
uvm_channel_manager_t *channel_manager = channel->pool->manager;
|
||||
uvm_pushbuffer_t *pushbuffer = channel_manager->pushbuffer;
|
||||
uvm_gpu_t *gpu = channel_manager->gpu;
|
||||
uvm_pushbuffer_t *pushbuffer = uvm_channel_get_pushbuffer(channel);
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
BUILD_BUG_ON(sizeof(*gpfifo_entry) != NVB06F_GP_ENTRY__SIZE);
|
||||
UVM_ASSERT(!uvm_channel_is_proxy(channel));
|
||||
@ -644,12 +651,11 @@ static void proxy_channel_submit_work(uvm_push_t *push, NvU32 push_size)
|
||||
static void do_semaphore_release(uvm_push_t *push, NvU64 semaphore_va, NvU32 new_payload)
|
||||
{
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
||||
|
||||
if (uvm_channel_is_ce(push->channel))
|
||||
gpu->parent->ce_hal->semaphore_release(push, semaphore_va, new_payload);
|
||||
else if (uvm_channel_is_sec2(push->channel))
|
||||
gpu->parent->sec2_hal->semaphore_release(push, semaphore_va, new_payload);
|
||||
else
|
||||
UVM_ASSERT_MSG(0, "Semaphore release on an unsupported channel.\n");
|
||||
gpu->parent->sec2_hal->semaphore_release(push, semaphore_va, new_payload);
|
||||
}
|
||||
|
||||
static void uvm_channel_tracking_semaphore_release(uvm_push_t *push, NvU64 semaphore_va, NvU32 new_payload)
|
||||
@ -668,8 +674,8 @@ static void uvm_channel_tracking_semaphore_release(uvm_push_t *push, NvU64 semap
|
||||
// needs to be scheduled to get an encrypted shadow copy in unprotected
|
||||
// sysmem. This allows UVM to later decrypt it and observe the new
|
||||
// semaphore value.
|
||||
if (uvm_channel_is_secure_ce(push->channel))
|
||||
channel_semaphore_gpu_encrypt_payload(push, push->channel, semaphore_va);
|
||||
if (uvm_conf_computing_mode_enabled(push->gpu) && uvm_channel_is_ce(push->channel))
|
||||
channel_semaphore_gpu_encrypt_payload(push, semaphore_va);
|
||||
}
|
||||
|
||||
static uvm_channel_t *get_paired_channel(uvm_channel_t *channel)
|
||||
@ -746,15 +752,12 @@ static void internal_channel_submit_work_wlc(uvm_push_t *push)
|
||||
wmb();
|
||||
|
||||
// Ring the WLC doorbell to start processing the above push
|
||||
UVM_GPU_WRITE_ONCE(*wlc_channel->channel_info.workSubmissionOffset,
|
||||
wlc_channel->channel_info.workSubmissionToken);
|
||||
UVM_GPU_WRITE_ONCE(*wlc_channel->channel_info.workSubmissionOffset, wlc_channel->channel_info.workSubmissionToken);
|
||||
}
|
||||
|
||||
static NV_STATUS internal_channel_submit_work_indirect(uvm_push_t *push,
|
||||
NvU32 old_cpu_put,
|
||||
NvU32 new_gpu_put)
|
||||
static void internal_channel_submit_work_indirect_wlc(uvm_push_t *push, NvU32 old_cpu_put, NvU32 new_gpu_put)
|
||||
{
|
||||
uvm_pushbuffer_t *pushbuffer = push->channel->pool->manager->pushbuffer;
|
||||
uvm_pushbuffer_t *pushbuffer = uvm_channel_get_pushbuffer(push->channel);
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
||||
|
||||
uvm_push_t indirect_push;
|
||||
@ -765,10 +768,207 @@ static NV_STATUS internal_channel_submit_work_indirect(uvm_push_t *push,
|
||||
NvU64 push_enc_gpu = uvm_pushbuffer_get_unprotected_gpu_va_for_push(pushbuffer, push);
|
||||
void *push_enc_auth_tag;
|
||||
uvm_gpu_address_t push_enc_auth_tag_gpu;
|
||||
NvU64 gpfifo_gpu = push->channel->channel_info.gpFifoGpuVa + old_cpu_put * sizeof(gpfifo_entry);
|
||||
NvU64 gpfifo_gpu_va = push->channel->channel_info.gpFifoGpuVa + old_cpu_put * sizeof(gpfifo_entry);
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_ce(push->channel));
|
||||
UVM_ASSERT(uvm_channel_is_wlc(push->launch_channel));
|
||||
|
||||
// WLC submissions are done under channel lock, so there should be no
|
||||
// contention to get the right submission order.
|
||||
UVM_ASSERT(push->channel->conf_computing.gpu_put == old_cpu_put);
|
||||
|
||||
// This can never stall or return error. WLC launch after WLC channels are
|
||||
// initialized uses private static pb space and it neither needs the general
|
||||
// PB space, nor it counts towards max concurrent pushes.
|
||||
status = uvm_push_begin_on_reserved_channel(push->launch_channel,
|
||||
&indirect_push,
|
||||
"Worklaunch to '%s' via '%s'",
|
||||
push->channel->name,
|
||||
push->launch_channel->name);
|
||||
UVM_ASSERT(status == NV_OK);
|
||||
|
||||
|
||||
// Move over the pushbuffer data
|
||||
// WLC channels use a static preallocated space for launch auth tags
|
||||
push_enc_auth_tag = indirect_push.channel->conf_computing.launch_auth_tag_cpu;
|
||||
push_enc_auth_tag_gpu = uvm_gpu_address_virtual(indirect_push.channel->conf_computing.launch_auth_tag_gpu_va);
|
||||
|
||||
uvm_conf_computing_cpu_encrypt(indirect_push.channel,
|
||||
push_enc_cpu,
|
||||
push->begin,
|
||||
NULL,
|
||||
uvm_push_get_size(push),
|
||||
push_enc_auth_tag);
|
||||
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
|
||||
|
||||
gpu->parent->ce_hal->decrypt(&indirect_push,
|
||||
uvm_gpu_address_virtual(uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push)),
|
||||
uvm_gpu_address_virtual(push_enc_gpu),
|
||||
uvm_push_get_size(push),
|
||||
push_enc_auth_tag_gpu);
|
||||
|
||||
gpu->parent->host_hal->set_gpfifo_entry(&gpfifo_entry,
|
||||
uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push),
|
||||
uvm_push_get_size(push),
|
||||
UVM_GPFIFO_SYNC_PROCEED);
|
||||
|
||||
gpu->parent->ce_hal->memset_8(&indirect_push,
|
||||
uvm_gpu_address_virtual(gpfifo_gpu_va),
|
||||
gpfifo_entry,
|
||||
sizeof(gpfifo_entry));
|
||||
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU);
|
||||
do_semaphore_release(&indirect_push, push->channel->channel_info.gpPutGpuVa, new_gpu_put);
|
||||
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU);
|
||||
do_semaphore_release(&indirect_push,
|
||||
push->channel->channel_info.workSubmissionOffsetGpuVa,
|
||||
push->channel->channel_info.workSubmissionToken);
|
||||
|
||||
// Ignore return value of push_wait. It can only fail with channel error
|
||||
// which will be detected when waiting for the primary push.
|
||||
(void)uvm_push_end_and_wait(&indirect_push);
|
||||
|
||||
push->channel->conf_computing.gpu_put = new_gpu_put;
|
||||
}
|
||||
|
||||
static void update_gpput_via_sec2(uvm_push_t *sec2_push, uvm_channel_t *channel, NvU32 new_gpu_put)
|
||||
{
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(sec2_push);
|
||||
void *gpput_auth_tag_cpu, *gpput_enc_cpu;
|
||||
uvm_gpu_address_t gpput_auth_tag_gpu, gpput_enc_gpu;
|
||||
NvU32 gpput_scratchpad[UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT/sizeof(new_gpu_put)];
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_sec2(sec2_push->channel));
|
||||
|
||||
gpput_enc_cpu = uvm_push_get_single_inline_buffer(sec2_push,
|
||||
UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT,
|
||||
UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT,
|
||||
&gpput_enc_gpu);
|
||||
gpput_auth_tag_cpu = uvm_push_get_single_inline_buffer(sec2_push,
|
||||
UVM_CONF_COMPUTING_AUTH_TAG_SIZE,
|
||||
UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT,
|
||||
&gpput_auth_tag_gpu);
|
||||
|
||||
// Update GPPUT. The update needs 4B write to specific offset,
|
||||
// however we can only do 16B aligned decrypt writes.
|
||||
// A poison value is written to all other locations, this is ignored in
|
||||
// most locations and overwritten by HW for GPGET location
|
||||
memset(gpput_scratchpad, 0, sizeof(gpput_scratchpad));
|
||||
UVM_ASSERT(sizeof(*gpput_scratchpad) == sizeof(new_gpu_put));
|
||||
gpput_scratchpad[(channel->channel_info.gpPutGpuVa % UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT) /
|
||||
sizeof(*gpput_scratchpad)] = new_gpu_put;
|
||||
|
||||
// Set value of GPGET to be the same as GPPUT. It will be overwritten by
|
||||
// HW next time GET value changes. UVM never reads GPGET.
|
||||
// However, RM does read it when freeing a channel. When this function
|
||||
// is called from 'channel_manager_stop_wlc' we set the value of GPGET
|
||||
// to the same value as GPPUT. Mismatch between these two values makes
|
||||
// RM wait for any "pending" tasks, leading to significant delays in the
|
||||
// channel teardown sequence.
|
||||
UVM_ASSERT(channel->channel_info.gpPutGpuVa / UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT ==
|
||||
channel->channel_info.gpGetGpuVa / UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT);
|
||||
gpput_scratchpad[(channel->channel_info.gpGetGpuVa % UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT) /
|
||||
sizeof(*gpput_scratchpad)] = new_gpu_put;
|
||||
|
||||
uvm_conf_computing_cpu_encrypt(sec2_push->channel,
|
||||
gpput_enc_cpu,
|
||||
gpput_scratchpad,
|
||||
NULL,
|
||||
sizeof(gpput_scratchpad),
|
||||
gpput_auth_tag_cpu);
|
||||
gpu->parent->sec2_hal->decrypt(sec2_push,
|
||||
UVM_ALIGN_DOWN(channel->channel_info.gpPutGpuVa,
|
||||
UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT),
|
||||
gpput_enc_gpu.address,
|
||||
sizeof(gpput_scratchpad),
|
||||
gpput_auth_tag_gpu.address);
|
||||
}
|
||||
|
||||
static void set_gpfifo_via_sec2(uvm_push_t *sec2_push, uvm_channel_t *channel, NvU32 put, NvU64 value)
|
||||
{
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(sec2_push);
|
||||
void *gpfifo_auth_tag_cpu, *gpfifo_enc_cpu;
|
||||
uvm_gpu_address_t gpfifo_auth_tag_gpu, gpfifo_enc_gpu;
|
||||
NvU64 gpfifo_gpu = channel->channel_info.gpFifoGpuVa + put * sizeof(value);
|
||||
NvU64 gpfifo_scratchpad[2];
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_sec2(sec2_push->channel));
|
||||
|
||||
gpfifo_enc_cpu = uvm_push_get_single_inline_buffer(sec2_push,
|
||||
sizeof(gpfifo_scratchpad),
|
||||
UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT,
|
||||
&gpfifo_enc_gpu);
|
||||
gpfifo_auth_tag_cpu = uvm_push_get_single_inline_buffer(sec2_push,
|
||||
UVM_CONF_COMPUTING_AUTH_TAG_SIZE,
|
||||
UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT,
|
||||
&gpfifo_auth_tag_gpu);
|
||||
|
||||
if (IS_ALIGNED(gpfifo_gpu, UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT)) {
|
||||
gpfifo_scratchpad[0] = value;
|
||||
|
||||
// Set the value of the odd entry to noop.
|
||||
// It will be overwritten when the next entry is submitted.
|
||||
gpu->parent->host_hal->set_gpfifo_noop(&gpfifo_scratchpad[1]);
|
||||
}
|
||||
else {
|
||||
uvm_gpfifo_entry_t *previous_gpfifo;
|
||||
|
||||
UVM_ASSERT(put > 0);
|
||||
|
||||
previous_gpfifo = &channel->gpfifo_entries[put - 1];
|
||||
|
||||
if (previous_gpfifo->type == UVM_GPFIFO_ENTRY_TYPE_CONTROL) {
|
||||
gpfifo_scratchpad[0] = previous_gpfifo->control_value;
|
||||
}
|
||||
else {
|
||||
uvm_pushbuffer_t *pushbuffer = uvm_channel_get_pushbuffer(channel);
|
||||
NvU64 prev_pb_va = uvm_pushbuffer_get_gpu_va_base(pushbuffer) + previous_gpfifo->pushbuffer_offset;
|
||||
|
||||
// Reconstruct the previous gpfifo entry. UVM_GPFIFO_SYNC_WAIT is
|
||||
// used only in static WLC schedule.
|
||||
// Overwriting the previous entry with the same value doesn't hurt,
|
||||
// whether the previous entry has been processed or not
|
||||
gpu->parent->host_hal->set_gpfifo_entry(&gpfifo_scratchpad[0],
|
||||
prev_pb_va,
|
||||
previous_gpfifo->pushbuffer_size,
|
||||
UVM_GPFIFO_SYNC_PROCEED);
|
||||
}
|
||||
|
||||
gpfifo_scratchpad[1] = value;
|
||||
}
|
||||
|
||||
uvm_conf_computing_cpu_encrypt(sec2_push->channel,
|
||||
gpfifo_enc_cpu,
|
||||
gpfifo_scratchpad,
|
||||
NULL,
|
||||
sizeof(gpfifo_scratchpad),
|
||||
gpfifo_auth_tag_cpu);
|
||||
gpu->parent->sec2_hal->decrypt(sec2_push,
|
||||
UVM_ALIGN_DOWN(gpfifo_gpu, UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT),
|
||||
gpfifo_enc_gpu.address,
|
||||
sizeof(gpfifo_scratchpad),
|
||||
gpfifo_auth_tag_gpu.address);
|
||||
}
|
||||
|
||||
static NV_STATUS internal_channel_submit_work_indirect_sec2(uvm_push_t *push, NvU32 old_cpu_put, NvU32 new_gpu_put)
|
||||
{
|
||||
uvm_pushbuffer_t *pushbuffer = uvm_channel_get_pushbuffer(push->channel);
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
||||
|
||||
uvm_push_t indirect_push;
|
||||
NV_STATUS status;
|
||||
NvU64 gpfifo_entry;
|
||||
|
||||
void *push_enc_cpu = uvm_pushbuffer_get_unprotected_cpu_va_for_push(pushbuffer, push);
|
||||
NvU64 push_enc_gpu = uvm_pushbuffer_get_unprotected_gpu_va_for_push(pushbuffer, push);
|
||||
void *push_auth_tag_cpu;
|
||||
uvm_gpu_address_t push_auth_tag_gpu;
|
||||
uvm_spin_loop_t spin;
|
||||
|
||||
UVM_ASSERT(!uvm_channel_is_sec2(push->channel));
|
||||
UVM_ASSERT(uvm_channel_is_ce(push->channel));
|
||||
UVM_ASSERT(uvm_channel_is_sec2(push->launch_channel));
|
||||
|
||||
// If the old_cpu_put is not equal to the last gpu put, other pushes are
|
||||
// pending that need to be submitted. That push/es' submission will update
|
||||
@ -790,60 +990,36 @@ static NV_STATUS internal_channel_submit_work_indirect(uvm_push_t *push,
|
||||
|
||||
|
||||
// Move over the pushbuffer data
|
||||
if (uvm_channel_is_sec2(indirect_push.channel)) {
|
||||
push_enc_auth_tag = uvm_push_get_single_inline_buffer(&indirect_push,
|
||||
push_auth_tag_cpu = uvm_push_get_single_inline_buffer(&indirect_push,
|
||||
UVM_CONF_COMPUTING_AUTH_TAG_SIZE,
|
||||
UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT,
|
||||
&push_enc_auth_tag_gpu);
|
||||
}
|
||||
else {
|
||||
// Auth tags cannot be in protected vidmem.
|
||||
// WLC channels use a static preallocated space for launch auth tags
|
||||
push_enc_auth_tag = indirect_push.channel->conf_computing.launch_auth_tag_cpu;
|
||||
push_enc_auth_tag_gpu = uvm_gpu_address_virtual(indirect_push.channel->conf_computing.launch_auth_tag_gpu_va);
|
||||
}
|
||||
&push_auth_tag_gpu);
|
||||
|
||||
uvm_conf_computing_cpu_encrypt(indirect_push.channel,
|
||||
push_enc_cpu,
|
||||
push->begin,
|
||||
NULL,
|
||||
uvm_push_get_size(push),
|
||||
push_enc_auth_tag);
|
||||
push_auth_tag_cpu);
|
||||
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
|
||||
|
||||
if (uvm_channel_is_sec2(indirect_push.channel)) {
|
||||
gpu->parent->sec2_hal->decrypt(&indirect_push,
|
||||
uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push),
|
||||
push_enc_gpu,
|
||||
uvm_push_get_size(push),
|
||||
push_enc_auth_tag_gpu.address);
|
||||
}
|
||||
else {
|
||||
gpu->parent->ce_hal->decrypt(&indirect_push,
|
||||
uvm_gpu_address_virtual(uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push)),
|
||||
uvm_gpu_address_virtual(push_enc_gpu),
|
||||
uvm_push_get_size(push),
|
||||
push_enc_auth_tag_gpu);
|
||||
}
|
||||
push_auth_tag_gpu.address);
|
||||
|
||||
gpu->parent->host_hal->set_gpfifo_entry(&gpfifo_entry,
|
||||
uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push),
|
||||
uvm_push_get_size(push),
|
||||
UVM_GPFIFO_SYNC_PROCEED);
|
||||
|
||||
// TODO: Bug 2649842: RFE - Investigate using 64-bit semaphore
|
||||
// SEC2 needs encrypt decrypt to be 16B aligned GPFIFO entries are only 8B
|
||||
// Use 2x semaphore release to set the values directly.
|
||||
// We could use a single 64 bit release if it were available
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
|
||||
do_semaphore_release(&indirect_push, gpfifo_gpu, NvU64_LO32(gpfifo_entry));
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
|
||||
do_semaphore_release(&indirect_push, gpfifo_gpu + 4, NvU64_HI32(gpfifo_entry));
|
||||
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU);
|
||||
do_semaphore_release(&indirect_push, push->channel->channel_info.gpPutGpuVa, new_gpu_put);
|
||||
set_gpfifo_via_sec2(&indirect_push, push->channel, old_cpu_put, gpfifo_entry);
|
||||
update_gpput_via_sec2(&indirect_push, push->channel, new_gpu_put);
|
||||
|
||||
// Ring the doorbell
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU);
|
||||
do_semaphore_release(&indirect_push,
|
||||
push->channel->channel_info.workSubmissionOffsetGpuVa,
|
||||
@ -858,6 +1034,57 @@ static NV_STATUS internal_channel_submit_work_indirect(uvm_push_t *push,
|
||||
return status;
|
||||
}
|
||||
|
||||
// When the Confidential Computing feature is enabled, the CPU is unable to
|
||||
// access and read the pushbuffer. This is because it is located in the CPR of
|
||||
// vidmem in this configuration. This function allows UVM to retrieve the
|
||||
// content of the pushbuffer in an encrypted form for later decryption, hence,
|
||||
// simulating the original access pattern. E.g, reading timestamp semaphores.
|
||||
// See also: decrypt_push().
|
||||
static void encrypt_push(uvm_push_t *push)
|
||||
{
|
||||
NvU64 push_protected_gpu_va;
|
||||
NvU64 push_unprotected_gpu_va;
|
||||
uvm_gpu_address_t auth_tag_gpu_va;
|
||||
uvm_channel_t *channel = push->channel;
|
||||
uvm_push_crypto_bundle_t *crypto_bundle;
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
||||
NvU32 push_size = uvm_push_get_size(push);
|
||||
uvm_push_info_t *push_info = uvm_push_info_from_push(push);
|
||||
uvm_pushbuffer_t *pushbuffer = uvm_channel_get_pushbuffer(channel);
|
||||
unsigned auth_tag_offset = UVM_CONF_COMPUTING_AUTH_TAG_SIZE * push->push_info_index;
|
||||
|
||||
if (!uvm_conf_computing_mode_enabled(gpu))
|
||||
return;
|
||||
|
||||
if (!push_info->on_complete)
|
||||
return;
|
||||
|
||||
if (!uvm_channel_is_ce(channel))
|
||||
return;
|
||||
|
||||
if (push_size == 0)
|
||||
return;
|
||||
|
||||
UVM_ASSERT(!uvm_channel_is_wlc(channel));
|
||||
UVM_ASSERT(!uvm_channel_is_lcic(channel));
|
||||
UVM_ASSERT(channel->conf_computing.push_crypto_bundles != NULL);
|
||||
|
||||
crypto_bundle = channel->conf_computing.push_crypto_bundles + push->push_info_index;
|
||||
auth_tag_gpu_va = uvm_rm_mem_get_gpu_va(channel->conf_computing.push_crypto_bundle_auth_tags, gpu, false);
|
||||
auth_tag_gpu_va.address += auth_tag_offset;
|
||||
|
||||
crypto_bundle->push_size = push_size;
|
||||
push_protected_gpu_va = uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push);
|
||||
push_unprotected_gpu_va = uvm_pushbuffer_get_unprotected_gpu_va_for_push(pushbuffer, push);
|
||||
|
||||
uvm_conf_computing_log_gpu_encryption(channel, &crypto_bundle->iv);
|
||||
gpu->parent->ce_hal->encrypt(push,
|
||||
uvm_gpu_address_virtual_unprotected(push_unprotected_gpu_va),
|
||||
uvm_gpu_address_virtual(push_protected_gpu_va),
|
||||
push_size,
|
||||
auth_tag_gpu_va);
|
||||
}
|
||||
|
||||
void uvm_channel_end_push(uvm_push_t *push)
|
||||
{
|
||||
uvm_channel_t *channel = push->channel;
|
||||
@ -870,10 +1097,13 @@ void uvm_channel_end_push(uvm_push_t *push)
|
||||
NvU32 push_size;
|
||||
NvU32 cpu_put;
|
||||
NvU32 new_cpu_put;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
bool needs_sec2_work_submit = false;
|
||||
|
||||
channel_pool_lock(channel->pool);
|
||||
|
||||
encrypt_push(push);
|
||||
|
||||
new_tracking_value = ++channel->tracking_sem.queued_value;
|
||||
new_payload = (NvU32)new_tracking_value;
|
||||
|
||||
@ -882,7 +1112,7 @@ void uvm_channel_end_push(uvm_push_t *push)
|
||||
|
||||
if (uvm_channel_is_wlc(channel) && uvm_channel_manager_is_wlc_ready(channel_manager)) {
|
||||
uvm_channel_t *paired_lcic = wlc_get_paired_lcic(channel);
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
gpu->parent->ce_hal->semaphore_reduction_inc(push,
|
||||
paired_lcic->channel_info.gpPutGpuVa,
|
||||
paired_lcic->num_gpfifo_entries - 1);
|
||||
@ -896,7 +1126,7 @@ void uvm_channel_end_push(uvm_push_t *push)
|
||||
// pushes. However, direct pushes to WLC can be smaller than this
|
||||
// size. This is used e.g. by indirect submission of control
|
||||
// gpfifo entries.
|
||||
channel_manager->gpu->parent->host_hal->noop(push, UVM_MAX_WLC_PUSH_SIZE - uvm_push_get_size(push));
|
||||
gpu->parent->host_hal->noop(push, UVM_MAX_WLC_PUSH_SIZE - uvm_push_get_size(push));
|
||||
}
|
||||
}
|
||||
|
||||
@ -914,7 +1144,7 @@ void uvm_channel_end_push(uvm_push_t *push)
|
||||
// Indirect submission via SEC2/WLC needs pushes to be aligned for
|
||||
// encryption/decryption. The pushbuffer_size of this push
|
||||
// influences starting address of the next push.
|
||||
if (uvm_conf_computing_mode_enabled(uvm_channel_get_gpu(channel)))
|
||||
if (uvm_conf_computing_mode_enabled(gpu))
|
||||
entry->pushbuffer_size = UVM_ALIGN_UP(push_size, UVM_CONF_COMPUTING_BUF_ALIGNMENT);
|
||||
entry->push_info = &channel->push_infos[push->push_info_index];
|
||||
entry->type = UVM_GPFIFO_ENTRY_TYPE_NORMAL;
|
||||
@ -928,16 +1158,13 @@ void uvm_channel_end_push(uvm_push_t *push)
|
||||
else if (uvm_channel_is_wlc(channel) && uvm_channel_manager_is_wlc_ready(channel_manager)) {
|
||||
internal_channel_submit_work_wlc(push);
|
||||
}
|
||||
else if (uvm_conf_computing_mode_enabled(channel_manager->gpu) && !uvm_channel_is_sec2(channel)) {
|
||||
else if (uvm_conf_computing_mode_enabled(gpu) && uvm_channel_is_ce(channel)) {
|
||||
if (uvm_channel_manager_is_wlc_ready(channel_manager)) {
|
||||
NV_STATUS status = internal_channel_submit_work_indirect(push, cpu_put, new_cpu_put);
|
||||
|
||||
// This codepath should only be used during initialization and thus
|
||||
// NEVER return an error.
|
||||
UVM_ASSERT(status == NV_OK);
|
||||
internal_channel_submit_work_indirect_wlc(push, cpu_put, new_cpu_put);
|
||||
}
|
||||
else {
|
||||
// submitting via SEC2 starts a push, postpone until this push is ended
|
||||
// submitting via SEC2 starts a push, postpone until this push is
|
||||
// ended
|
||||
needs_sec2_work_submit = true;
|
||||
}
|
||||
}
|
||||
@ -963,7 +1190,7 @@ void uvm_channel_end_push(uvm_push_t *push)
|
||||
wmb();
|
||||
|
||||
if (needs_sec2_work_submit) {
|
||||
NV_STATUS status = internal_channel_submit_work_indirect(push, cpu_put, new_cpu_put);
|
||||
NV_STATUS status = internal_channel_submit_work_indirect_sec2(push, cpu_put, new_cpu_put);
|
||||
|
||||
// This codepath should only be used during initialization and thus
|
||||
// NEVER return an error.
|
||||
@ -976,12 +1203,13 @@ void uvm_channel_end_push(uvm_push_t *push)
|
||||
|
||||
static void submit_ctrl_gpfifo(uvm_channel_t *channel, uvm_gpfifo_entry_t *entry, NvU32 new_cpu_put)
|
||||
{
|
||||
uvm_gpu_t *gpu = channel->pool->manager->gpu;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
NvU32 cpu_put = channel->cpu_put;
|
||||
NvU64 *gpfifo_entry;
|
||||
|
||||
UVM_ASSERT(entry == &channel->gpfifo_entries[cpu_put]);
|
||||
if (uvm_conf_computing_mode_enabled(gpu) && !uvm_channel_is_sec2(channel))
|
||||
|
||||
if (uvm_conf_computing_mode_enabled(gpu) && uvm_channel_is_ce(channel))
|
||||
return;
|
||||
|
||||
gpfifo_entry = (NvU64*)channel->channel_info.gpFifoEntries + cpu_put;
|
||||
@ -1007,9 +1235,8 @@ static NV_STATUS submit_ctrl_gpfifo_indirect(uvm_channel_t *channel,
|
||||
uvm_channel_type_t indirect_channel_type = uvm_channel_manager_is_wlc_ready(channel->pool->manager) ?
|
||||
UVM_CHANNEL_TYPE_WLC :
|
||||
UVM_CHANNEL_TYPE_SEC2;
|
||||
NvU64 gpfifo_gpu = channel->channel_info.gpFifoGpuVa + (old_cpu_put * sizeof(entry->control_value));
|
||||
|
||||
UVM_ASSERT(!uvm_channel_is_sec2(channel));
|
||||
UVM_ASSERT(uvm_channel_is_ce(channel));
|
||||
|
||||
// If the old_cpu_put is not equal to the last gpu put,
|
||||
// Another push(es) is pending that needs to be submitted.
|
||||
@ -1026,17 +1253,26 @@ static NV_STATUS submit_ctrl_gpfifo_indirect(uvm_channel_t *channel,
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
// TODO: Bug 2649842: RFE - Investigate using 64-bit semaphore
|
||||
// SEC2 needs encrypt decrypt to be 16B aligned GPFIFO entries are only 8B
|
||||
// Use 2x semaphore release to set the values directly.
|
||||
// One 64bit semahore release can be used instead once implemented.
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
|
||||
do_semaphore_release(&indirect_push, gpfifo_gpu, NvU64_LO32(entry->control_value));
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
|
||||
do_semaphore_release(&indirect_push, gpfifo_gpu + 4, NvU64_HI32(entry->control_value));
|
||||
if (uvm_channel_is_sec2(indirect_push.channel)) {
|
||||
set_gpfifo_via_sec2(&indirect_push, channel, old_cpu_put, entry->control_value);
|
||||
update_gpput_via_sec2(&indirect_push, channel, new_gpu_put);
|
||||
} else {
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(&indirect_push);
|
||||
NvU64 gpfifo_gpu_va = channel->channel_info.gpFifoGpuVa + (old_cpu_put * sizeof(entry->control_value));
|
||||
|
||||
gpu->parent->ce_hal->memset_8(&indirect_push,
|
||||
uvm_gpu_address_virtual(gpfifo_gpu_va),
|
||||
entry->control_value,
|
||||
sizeof(entry->control_value));
|
||||
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU);
|
||||
do_semaphore_release(&indirect_push, channel->channel_info.gpPutGpuVa, new_gpu_put);
|
||||
}
|
||||
|
||||
uvm_push_set_flag(&indirect_push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU);
|
||||
do_semaphore_release(&indirect_push,
|
||||
channel->channel_info.workSubmissionOffsetGpuVa,
|
||||
channel->channel_info.workSubmissionToken);
|
||||
|
||||
status = uvm_push_end_and_wait(&indirect_push);
|
||||
if (status != NV_OK)
|
||||
@ -1044,9 +1280,6 @@ static NV_STATUS submit_ctrl_gpfifo_indirect(uvm_channel_t *channel,
|
||||
|
||||
channel->conf_computing.gpu_put = new_gpu_put;
|
||||
|
||||
// The above SEC2 work transferred everything
|
||||
// Ring the doorbell
|
||||
UVM_GPU_WRITE_ONCE(*channel->channel_info.workSubmissionOffset, channel->channel_info.workSubmissionToken);
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
@ -1059,6 +1292,7 @@ static void write_ctrl_gpfifo(uvm_channel_t *channel, NvU64 ctrl_fifo_entry_valu
|
||||
NvU32 cpu_put;
|
||||
NvU32 new_cpu_put;
|
||||
bool needs_indirect_submit = false;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
channel_pool_lock(channel->pool);
|
||||
|
||||
@ -1081,7 +1315,7 @@ static void write_ctrl_gpfifo(uvm_channel_t *channel, NvU64 ctrl_fifo_entry_valu
|
||||
--channel->current_gpfifo_count;
|
||||
|
||||
submit_ctrl_gpfifo(channel, entry, new_cpu_put);
|
||||
if (uvm_conf_computing_mode_enabled(channel->pool->manager->gpu) && !uvm_channel_is_sec2(channel))
|
||||
if (uvm_conf_computing_mode_enabled(gpu) && uvm_channel_is_ce(channel))
|
||||
needs_indirect_submit = true;
|
||||
|
||||
channel->cpu_put = new_cpu_put;
|
||||
@ -1154,16 +1388,15 @@ NV_STATUS uvm_channel_write_ctrl_gpfifo(uvm_channel_t *channel, NvU64 ctrl_fifo_
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static NV_STATUS uvm_channel_reserve_secure(uvm_channel_t *channel, NvU32 num_gpfifo_entries)
|
||||
static NV_STATUS channel_reserve_and_lock(uvm_channel_t *channel, NvU32 num_gpfifo_entries)
|
||||
{
|
||||
uvm_spin_loop_t spin;
|
||||
NV_STATUS status = NV_OK;
|
||||
uvm_channel_pool_t *pool = channel->pool;
|
||||
|
||||
// This semaphore is uvm_up() in unlock_channel_for_push() as part of the
|
||||
// uvm_channel_end_push() routine. Note that different than in
|
||||
// channel_reserve_in_secure_pool, we cannot pick an unlocked channel from
|
||||
// the secure pool, even when there is one available and *channel is locked.
|
||||
// channel_reserve_and_lock_in_pool, we cannot pick an unlocked channel from
|
||||
// the pool, even when there is one available and *channel is locked.
|
||||
// Not a concern given that uvm_channel_reserve() is not the common-case for
|
||||
// channel reservation, and only used for channel initialization, GPFIFO
|
||||
// control work submission, and testing.
|
||||
@ -1178,6 +1411,8 @@ static NV_STATUS uvm_channel_reserve_secure(uvm_channel_t *channel, NvU32 num_gp
|
||||
|
||||
uvm_spin_loop_init(&spin);
|
||||
while (1) {
|
||||
NV_STATUS status;
|
||||
|
||||
uvm_channel_update_progress(channel);
|
||||
|
||||
channel_pool_lock(pool);
|
||||
@ -1205,9 +1440,10 @@ NV_STATUS uvm_channel_reserve(uvm_channel_t *channel, NvU32 num_gpfifo_entries)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
uvm_spin_loop_t spin;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
if (uvm_channel_is_secure(channel))
|
||||
return uvm_channel_reserve_secure(channel, num_gpfifo_entries);
|
||||
if (uvm_conf_computing_mode_enabled(gpu))
|
||||
return channel_reserve_and_lock(channel, num_gpfifo_entries);
|
||||
|
||||
if (try_claim_channel(channel, num_gpfifo_entries))
|
||||
return NV_OK;
|
||||
@ -1347,8 +1583,10 @@ NvU64 uvm_channel_update_completed_value(uvm_channel_t *channel)
|
||||
static NV_STATUS csl_init(uvm_channel_t *channel)
|
||||
{
|
||||
NV_STATUS status;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(gpu));
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_secure(channel));
|
||||
uvm_mutex_init(&channel->csl.ctx_lock, UVM_LOCK_ORDER_LEAF);
|
||||
|
||||
status = uvm_rm_locked_call(nvUvmInterfaceCslInitContext(&channel->csl.ctx, channel->handle));
|
||||
@ -1358,7 +1596,7 @@ static NV_STATUS csl_init(uvm_channel_t *channel)
|
||||
else {
|
||||
UVM_DBG_PRINT("nvUvmInterfaceCslInitContext() failed: %s, GPU %s\n",
|
||||
nvstatusToString(status),
|
||||
uvm_gpu_name(channel->pool->manager->gpu));
|
||||
uvm_gpu_name(gpu));
|
||||
}
|
||||
|
||||
return status;
|
||||
@ -1378,16 +1616,23 @@ static void csl_destroy(uvm_channel_t *channel)
|
||||
|
||||
static void free_conf_computing_buffers(uvm_channel_t *channel)
|
||||
{
|
||||
UVM_ASSERT(uvm_channel_is_secure_ce(channel));
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(gpu));
|
||||
UVM_ASSERT(uvm_channel_is_ce(channel));
|
||||
|
||||
uvm_rm_mem_free(channel->conf_computing.static_pb_protected_vidmem);
|
||||
uvm_rm_mem_free(channel->conf_computing.static_pb_unprotected_sysmem);
|
||||
uvm_rm_mem_free(channel->conf_computing.static_notifier_unprotected_sysmem);
|
||||
uvm_rm_mem_free(channel->conf_computing.push_crypto_bundle_auth_tags);
|
||||
uvm_kvfree(channel->conf_computing.static_pb_protected_sysmem);
|
||||
uvm_kvfree(channel->conf_computing.push_crypto_bundles);
|
||||
channel->conf_computing.static_pb_protected_vidmem = NULL;
|
||||
channel->conf_computing.static_pb_unprotected_sysmem = NULL;
|
||||
channel->conf_computing.static_notifier_unprotected_sysmem = NULL;
|
||||
channel->conf_computing.push_crypto_bundle_auth_tags = NULL;
|
||||
channel->conf_computing.static_pb_protected_sysmem = NULL;
|
||||
channel->conf_computing.push_crypto_bundles = NULL;
|
||||
|
||||
uvm_rm_mem_free(channel->tracking_sem.semaphore.conf_computing.encrypted_payload);
|
||||
uvm_rm_mem_free(channel->tracking_sem.semaphore.conf_computing.notifier);
|
||||
@ -1402,10 +1647,12 @@ static void free_conf_computing_buffers(uvm_channel_t *channel)
|
||||
static NV_STATUS alloc_conf_computing_buffers_semaphore(uvm_channel_t *channel)
|
||||
{
|
||||
uvm_gpu_semaphore_t *semaphore = &channel->tracking_sem.semaphore;
|
||||
uvm_gpu_t *gpu = channel->pool->manager->gpu;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
NV_STATUS status;
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_secure_ce(channel));
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(gpu));
|
||||
UVM_ASSERT(uvm_channel_is_ce(channel));
|
||||
|
||||
status = uvm_rm_mem_alloc_and_map_cpu(gpu,
|
||||
UVM_RM_MEM_TYPE_SYS,
|
||||
sizeof(semaphore->conf_computing.last_pushed_notifier),
|
||||
@ -1444,18 +1691,22 @@ static NV_STATUS alloc_conf_computing_buffers_semaphore(uvm_channel_t *channel)
|
||||
|
||||
static NV_STATUS alloc_conf_computing_buffers_wlc(uvm_channel_t *channel)
|
||||
{
|
||||
uvm_gpu_t *gpu = channel->pool->manager->gpu;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
size_t aligned_wlc_push_size = UVM_ALIGN_UP(UVM_MAX_WLC_PUSH_SIZE, UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT);
|
||||
NV_STATUS status = uvm_rm_mem_alloc_and_map_cpu(gpu,
|
||||
UVM_RM_MEM_TYPE_SYS,
|
||||
UVM_MAX_WLC_PUSH_SIZE + UVM_CONF_COMPUTING_AUTH_TAG_SIZE * 2,
|
||||
aligned_wlc_push_size + UVM_CONF_COMPUTING_AUTH_TAG_SIZE * 2,
|
||||
PAGE_SIZE,
|
||||
&channel->conf_computing.static_pb_unprotected_sysmem);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
// Both pushes will be targets for SEC2 decrypt operations and have to
|
||||
// be aligned for SEC2. The first push location will also be a target
|
||||
// for CE decrypt operation and has to be aligned for CE decrypt.
|
||||
status = uvm_rm_mem_alloc(gpu,
|
||||
UVM_RM_MEM_TYPE_GPU,
|
||||
UVM_MAX_WLC_PUSH_SIZE * 2,
|
||||
UVM_ALIGN_UP(UVM_MAX_WLC_PUSH_SIZE, UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT) * 2,
|
||||
UVM_CONF_COMPUTING_BUF_ALIGNMENT,
|
||||
&channel->conf_computing.static_pb_protected_vidmem);
|
||||
if (status != NV_OK)
|
||||
@ -1464,16 +1715,16 @@ static NV_STATUS alloc_conf_computing_buffers_wlc(uvm_channel_t *channel)
|
||||
channel->conf_computing.static_pb_unprotected_sysmem_cpu =
|
||||
uvm_rm_mem_get_cpu_va(channel->conf_computing.static_pb_unprotected_sysmem);
|
||||
channel->conf_computing.static_pb_unprotected_sysmem_auth_tag_cpu =
|
||||
(char*)channel->conf_computing.static_pb_unprotected_sysmem_cpu + UVM_MAX_WLC_PUSH_SIZE;
|
||||
(char*)channel->conf_computing.static_pb_unprotected_sysmem_cpu + aligned_wlc_push_size;
|
||||
|
||||
// The location below is only used for launch pushes but reuses
|
||||
// the same sysmem allocation
|
||||
channel->conf_computing.launch_auth_tag_cpu =
|
||||
(char*)channel->conf_computing.static_pb_unprotected_sysmem_cpu +
|
||||
UVM_MAX_WLC_PUSH_SIZE + UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
|
||||
aligned_wlc_push_size + UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
|
||||
channel->conf_computing.launch_auth_tag_gpu_va =
|
||||
uvm_rm_mem_get_gpu_uvm_va(channel->conf_computing.static_pb_unprotected_sysmem, gpu) +
|
||||
UVM_MAX_WLC_PUSH_SIZE + UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
|
||||
aligned_wlc_push_size + UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
|
||||
|
||||
channel->conf_computing.static_pb_protected_sysmem = uvm_kvmalloc(UVM_MAX_WLC_PUSH_SIZE + UVM_PAGE_SIZE_4K);
|
||||
if (!channel->conf_computing.static_pb_protected_sysmem)
|
||||
@ -1484,7 +1735,7 @@ static NV_STATUS alloc_conf_computing_buffers_wlc(uvm_channel_t *channel)
|
||||
|
||||
static NV_STATUS alloc_conf_computing_buffers_lcic(uvm_channel_t *channel)
|
||||
{
|
||||
uvm_gpu_t *gpu = channel->pool->manager->gpu;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
const size_t notifier_size = sizeof(*channel->conf_computing.static_notifier_entry_unprotected_sysmem_cpu);
|
||||
NV_STATUS status = uvm_rm_mem_alloc_and_map_cpu(gpu,
|
||||
UVM_RM_MEM_TYPE_SYS,
|
||||
@ -1519,21 +1770,44 @@ static NV_STATUS alloc_conf_computing_buffers_lcic(uvm_channel_t *channel)
|
||||
static NV_STATUS alloc_conf_computing_buffers(uvm_channel_t *channel)
|
||||
{
|
||||
NV_STATUS status;
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(gpu));
|
||||
UVM_ASSERT(uvm_channel_is_ce(channel));
|
||||
|
||||
status = alloc_conf_computing_buffers_semaphore(channel);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
if (uvm_channel_is_wlc(channel))
|
||||
if (uvm_channel_is_wlc(channel)) {
|
||||
status = alloc_conf_computing_buffers_wlc(channel);
|
||||
else if (uvm_channel_is_lcic(channel))
|
||||
}
|
||||
else if (uvm_channel_is_lcic(channel)) {
|
||||
status = alloc_conf_computing_buffers_lcic(channel);
|
||||
}
|
||||
else {
|
||||
void *push_crypto_bundles = uvm_kvmalloc_zero(sizeof(*channel->conf_computing.push_crypto_bundles) *
|
||||
channel->num_gpfifo_entries);
|
||||
|
||||
if (push_crypto_bundles == NULL)
|
||||
return NV_ERR_NO_MEMORY;
|
||||
|
||||
channel->conf_computing.push_crypto_bundles = push_crypto_bundles;
|
||||
|
||||
status = uvm_rm_mem_alloc_and_map_cpu(gpu,
|
||||
UVM_RM_MEM_TYPE_SYS,
|
||||
channel->num_gpfifo_entries * UVM_CONF_COMPUTING_AUTH_TAG_SIZE,
|
||||
UVM_CONF_COMPUTING_BUF_ALIGNMENT,
|
||||
&channel->conf_computing.push_crypto_bundle_auth_tags);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void channel_destroy(uvm_channel_pool_t *pool, uvm_channel_t *channel)
|
||||
{
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
UVM_ASSERT(pool->num_channels > 0);
|
||||
|
||||
if (channel->tracking_sem.queued_value > 0) {
|
||||
@ -1557,9 +1831,10 @@ static void channel_destroy(uvm_channel_pool_t *pool, uvm_channel_t *channel)
|
||||
|
||||
uvm_kvfree(channel->gpfifo_entries);
|
||||
|
||||
if (uvm_channel_is_secure(channel)) {
|
||||
if (uvm_conf_computing_mode_enabled(gpu)) {
|
||||
csl_destroy(channel);
|
||||
if (uvm_channel_is_secure_ce(channel))
|
||||
|
||||
if (uvm_channel_is_ce(channel))
|
||||
free_conf_computing_buffers(channel);
|
||||
}
|
||||
|
||||
@ -1646,8 +1921,6 @@ static NV_STATUS internal_channel_create(uvm_channel_t *channel)
|
||||
channel_alloc_params.gpPutLoc = UVM_BUFFER_LOCATION_SYS;
|
||||
}
|
||||
|
||||
channel_alloc_params.secure = channel->pool->secure;
|
||||
|
||||
status = uvm_rm_locked_call(nvUvmInterfaceChannelAllocate(channel_get_tsg(channel),
|
||||
&channel_alloc_params,
|
||||
&channel->handle,
|
||||
@ -1669,8 +1942,7 @@ static NV_STATUS internal_channel_create(uvm_channel_t *channel)
|
||||
channel_info->hwChannelId,
|
||||
uvm_channel_is_sec2(channel) ? "SEC2" :
|
||||
uvm_channel_is_wlc(channel) ? "WLC" :
|
||||
uvm_channel_is_lcic(channel) ? "LCIC" :
|
||||
uvm_channel_is_secure(channel) ? "CE (secure)" : "CE",
|
||||
uvm_channel_is_lcic(channel) ? "LCIC" : "CE",
|
||||
channel->pool->engine_index);
|
||||
|
||||
return NV_OK;
|
||||
@ -1722,7 +1994,7 @@ static NV_STATUS channel_create(uvm_channel_pool_t *pool, uvm_channel_t *channel
|
||||
channel->tools.pending_event_count = 0;
|
||||
INIT_LIST_HEAD(&channel->tools.channel_list_node);
|
||||
|
||||
if (uvm_conf_computing_mode_enabled(gpu) && !uvm_channel_is_sec2(channel))
|
||||
if (uvm_conf_computing_mode_enabled(gpu) && uvm_channel_is_ce(channel))
|
||||
semaphore_pool = gpu->secure_semaphore_pool;
|
||||
|
||||
status = uvm_gpu_tracking_semaphore_alloc(semaphore_pool, &channel->tracking_sem);
|
||||
@ -1748,7 +2020,7 @@ static NV_STATUS channel_create(uvm_channel_pool_t *pool, uvm_channel_t *channel
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (uvm_channel_is_secure(channel)) {
|
||||
if (uvm_conf_computing_mode_enabled(gpu)) {
|
||||
status = csl_init(channel);
|
||||
if (status != NV_OK)
|
||||
goto error;
|
||||
@ -1816,7 +2088,7 @@ static NV_STATUS channel_init(uvm_channel_t *channel)
|
||||
|
||||
if (uvm_gpu_has_pushbuffer_segments(gpu)) {
|
||||
NvU64 gpfifo_entry;
|
||||
uvm_pushbuffer_t *pushbuffer = channel->pool->manager->pushbuffer;
|
||||
uvm_pushbuffer_t *pushbuffer = uvm_channel_get_pushbuffer(channel);
|
||||
NvU64 pb_base = uvm_pushbuffer_get_gpu_va_base(pushbuffer);
|
||||
|
||||
if (uvm_channel_is_sec2(channel))
|
||||
@ -1836,10 +2108,8 @@ static NV_STATUS channel_init(uvm_channel_t *channel)
|
||||
|
||||
if (uvm_channel_is_ce(channel))
|
||||
gpu->parent->ce_hal->init(&push);
|
||||
else if (uvm_channel_is_sec2(channel))
|
||||
gpu->parent->sec2_hal->init(&push);
|
||||
else
|
||||
UVM_ASSERT_MSG(0, "Unknown channel type!");
|
||||
gpu->parent->sec2_hal->init(&push);
|
||||
|
||||
gpu->parent->host_hal->init(&push);
|
||||
|
||||
@ -1894,11 +2164,6 @@ static unsigned channel_pool_type_num_tsgs(uvm_channel_pool_type_t pool_type)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool pool_type_is_valid(uvm_channel_pool_type_t pool_type)
|
||||
{
|
||||
return(is_power_of_2(pool_type) && (pool_type < UVM_CHANNEL_POOL_TYPE_MASK));
|
||||
}
|
||||
|
||||
static UVM_GPU_CHANNEL_ENGINE_TYPE pool_type_to_engine_type(uvm_channel_pool_type_t pool_type)
|
||||
{
|
||||
if (pool_type == UVM_CHANNEL_POOL_TYPE_SEC2)
|
||||
@ -1970,7 +2235,7 @@ static NV_STATUS channel_pool_add(uvm_channel_manager_t *channel_manager,
|
||||
unsigned num_tsgs;
|
||||
uvm_channel_pool_t *pool;
|
||||
|
||||
UVM_ASSERT(pool_type_is_valid(pool_type));
|
||||
UVM_ASSERT(uvm_pool_type_is_valid(pool_type));
|
||||
|
||||
pool = channel_manager->channel_pools + channel_manager->num_channel_pools;
|
||||
channel_manager->num_channel_pools++;
|
||||
@ -2001,10 +2266,10 @@ static NV_STATUS channel_pool_add(uvm_channel_manager_t *channel_manager,
|
||||
num_channels = channel_pool_type_num_channels(pool_type);
|
||||
UVM_ASSERT(num_channels <= UVM_CHANNEL_MAX_NUM_CHANNELS_PER_POOL);
|
||||
|
||||
if (pool->secure) {
|
||||
if (uvm_conf_computing_mode_enabled(channel_manager->gpu)) {
|
||||
// Use different order lock for SEC2 and WLC channels.
|
||||
// This allows reserving a SEC2 or WLC channel for indirect work
|
||||
// submission while holding a reservation for a secure channel.
|
||||
// submission while holding a reservation for a channel.
|
||||
uvm_lock_order_t order = uvm_channel_pool_is_sec2(pool) ? UVM_LOCK_ORDER_CSL_SEC2_PUSH :
|
||||
(uvm_channel_pool_is_wlc(pool) ? UVM_LOCK_ORDER_CSL_WLC_PUSH :
|
||||
UVM_LOCK_ORDER_CSL_PUSH);
|
||||
@ -2038,23 +2303,6 @@ static NV_STATUS channel_pool_add(uvm_channel_manager_t *channel_manager,
|
||||
return status;
|
||||
}
|
||||
|
||||
static NV_STATUS channel_pool_add_secure(uvm_channel_manager_t *channel_manager,
|
||||
uvm_channel_pool_type_t pool_type,
|
||||
unsigned engine_index,
|
||||
uvm_channel_pool_t **pool_out)
|
||||
{
|
||||
uvm_channel_pool_t *pool = channel_manager->channel_pools + channel_manager->num_channel_pools;
|
||||
|
||||
pool->secure = true;
|
||||
return channel_pool_add(channel_manager, pool_type, engine_index, pool_out);
|
||||
}
|
||||
|
||||
bool uvm_channel_type_requires_secure_pool(uvm_gpu_t *gpu, uvm_channel_type_t channel_type)
|
||||
{
|
||||
// For now, all channels are secure channels
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ce_usable_for_channel_type(uvm_channel_type_t type, const UvmGpuCopyEngineCaps *cap)
|
||||
{
|
||||
if (!cap->supported || cap->grce)
|
||||
@ -2202,13 +2450,6 @@ static NV_STATUS pick_ce_for_channel_type(uvm_channel_manager_t *manager,
|
||||
if (!ce_usable_for_channel_type(type, cap))
|
||||
continue;
|
||||
|
||||
if (uvm_conf_computing_mode_is_hcc(manager->gpu)) {
|
||||
// All usable CEs are secure
|
||||
UVM_ASSERT(cap->secure);
|
||||
|
||||
// Multi-PCE LCEs are disallowed
|
||||
UVM_ASSERT(hweight32(cap->cePceMask) == 1);
|
||||
}
|
||||
__set_bit(i, manager->ce_mask);
|
||||
|
||||
if (best_ce == UVM_COPY_ENGINE_COUNT_MAX) {
|
||||
@ -2264,7 +2505,7 @@ out:
|
||||
return status;
|
||||
}
|
||||
|
||||
// Return the non-secure pool corresponding to the given CE index
|
||||
// Return the pool corresponding to the given CE index
|
||||
//
|
||||
// This function cannot be used to access the proxy pool in SR-IOV heavy.
|
||||
static uvm_channel_pool_t *channel_manager_ce_pool(uvm_channel_manager_t *manager, NvU32 ce)
|
||||
@ -2444,7 +2685,15 @@ static void init_channel_manager_conf(uvm_channel_manager_t *manager)
|
||||
// access through the bus, because no cache coherence message is exchanged.
|
||||
if (uvm_gpu_is_coherent(gpu->parent)) {
|
||||
manager->conf.gpfifo_loc = UVM_BUFFER_LOCATION_SYS;
|
||||
manager->conf.gpput_loc = UVM_BUFFER_LOCATION_SYS;
|
||||
|
||||
// On GPUs with limited ESCHED addressing range, e.g., Volta on P9, RM
|
||||
// cannot guarantee that USERD/GPPUT physical address is accessible by
|
||||
// ESCHED. We set GPPUT location to vidmem where physical addresses are
|
||||
// all accessible by ESCHED. We use the max_host_va as a proxy for the
|
||||
// PA limitation, since all architectures with 40b VA limits also have
|
||||
// 40b PA limits.
|
||||
manager->conf.gpput_loc = gpu->parent->max_host_va == (1ull << 40) ? UVM_BUFFER_LOCATION_VID :
|
||||
UVM_BUFFER_LOCATION_SYS;
|
||||
}
|
||||
else {
|
||||
// By default we place GPFIFO and GPPUT on vidmem as it potentially has
|
||||
@ -2467,24 +2716,17 @@ static void init_channel_manager_conf(uvm_channel_manager_t *manager)
|
||||
static unsigned channel_manager_get_max_pools(uvm_channel_manager_t *manager)
|
||||
{
|
||||
unsigned num_channel_pools;
|
||||
unsigned num_used_ce = bitmap_weight(manager->ce_mask, UVM_COPY_ENGINE_COUNT_MAX);
|
||||
|
||||
// Create one CE channel pool per usable CE
|
||||
num_channel_pools = num_used_ce;
|
||||
num_channel_pools = bitmap_weight(manager->ce_mask, UVM_COPY_ENGINE_COUNT_MAX);
|
||||
|
||||
// CE proxy channel pool.
|
||||
if (uvm_gpu_uses_proxy_channel_pool(manager->gpu))
|
||||
num_channel_pools++;
|
||||
|
||||
if (uvm_conf_computing_mode_enabled(manager->gpu)) {
|
||||
|
||||
// Create one CE secure channel pool per usable CE
|
||||
if (uvm_conf_computing_mode_is_hcc(manager->gpu))
|
||||
num_channel_pools += num_used_ce;
|
||||
|
||||
// SEC2 pool, WLC pool, LCIC pool
|
||||
if (uvm_conf_computing_mode_enabled(manager->gpu))
|
||||
num_channel_pools += 3;
|
||||
}
|
||||
|
||||
return num_channel_pools;
|
||||
}
|
||||
@ -2516,38 +2758,6 @@ static NV_STATUS channel_manager_create_ce_pools(uvm_channel_manager_t *manager,
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static NV_STATUS channel_manager_create_ce_secure_pools(uvm_channel_manager_t *manager, unsigned *preferred_ce)
|
||||
{
|
||||
unsigned ce;
|
||||
|
||||
if (!uvm_conf_computing_mode_is_hcc(manager->gpu))
|
||||
return NV_OK;
|
||||
|
||||
for_each_set_bit(ce, manager->ce_mask, UVM_COPY_ENGINE_COUNT_MAX) {
|
||||
NV_STATUS status;
|
||||
unsigned type;
|
||||
uvm_channel_pool_t *pool = NULL;
|
||||
|
||||
status = channel_pool_add_secure(manager, UVM_CHANNEL_POOL_TYPE_CE, ce, &pool);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
for (type = 0; type < UVM_CHANNEL_TYPE_CE_COUNT; type++) {
|
||||
unsigned preferred = preferred_ce[type];
|
||||
|
||||
if (preferred != ce)
|
||||
continue;
|
||||
|
||||
if (uvm_channel_type_requires_secure_pool(manager->gpu, type)) {
|
||||
UVM_ASSERT(manager->pool_to_use.default_for_type[type] == NULL);
|
||||
manager->pool_to_use.default_for_type[type] = pool;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static NV_STATUS setup_wlc_schedule(uvm_channel_t *wlc)
|
||||
{
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(wlc);
|
||||
@ -2576,7 +2786,7 @@ static NV_STATUS setup_wlc_schedule(uvm_channel_t *wlc)
|
||||
// "decrypt_push" represents WLC decrypt push, constructed using fake_push.
|
||||
// Copied to wlc_pb_base + UVM_MAX_WLC_PUSH_SIZE, as the second of the two
|
||||
// pushes that make the WLC fixed schedule.
|
||||
NvU64 decrypt_push_protected_gpu = protected_vidmem + UVM_MAX_WLC_PUSH_SIZE;
|
||||
NvU64 decrypt_push_protected_gpu = UVM_ALIGN_UP(protected_vidmem + UVM_MAX_WLC_PUSH_SIZE, UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT);
|
||||
NvU64 decrypt_push_unprotected_gpu = unprotected_sysmem_gpu + gpfifo_size;
|
||||
void *decrypt_push_unprotected_cpu = (char*)gpfifo_unprotected_cpu + gpfifo_size;
|
||||
|
||||
@ -2587,7 +2797,7 @@ static NV_STATUS setup_wlc_schedule(uvm_channel_t *wlc)
|
||||
BUILD_BUG_ON(sizeof(*wlc_gpfifo_entries) != sizeof(*wlc->channel_info.gpFifoEntries));
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_wlc(wlc));
|
||||
UVM_ASSERT(tag_offset == UVM_MAX_WLC_PUSH_SIZE);
|
||||
UVM_ASSERT(tag_offset == UVM_ALIGN_UP(UVM_MAX_WLC_PUSH_SIZE, UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT));
|
||||
|
||||
// WLC schedule consists of two parts, the number of entries needs to be even.
|
||||
// This also guarantees that the size is 16B aligned
|
||||
@ -2692,11 +2902,9 @@ static NV_STATUS setup_wlc_schedule(uvm_channel_t *wlc)
|
||||
|
||||
// Prime the WLC by setting "PUT" two steps ahead. Reuse the current
|
||||
// cpu_put value that was used during channel initialization.
|
||||
// Don't update wlc->cpu_put, it will be used to track
|
||||
// submitted pushes as any other channel.
|
||||
do_semaphore_release(&sec2_push,
|
||||
wlc->channel_info.gpPutGpuVa,
|
||||
(wlc->cpu_put + 2) % wlc->num_gpfifo_entries);
|
||||
// Don't update wlc->cpu_put, it will be used to track submitted pushes
|
||||
// as any other channel.
|
||||
update_gpput_via_sec2(&sec2_push, wlc, (wlc->cpu_put + 2) % wlc->num_gpfifo_entries);
|
||||
|
||||
status = uvm_push_end_and_wait(&sec2_push);
|
||||
|
||||
@ -2877,6 +3085,64 @@ static NV_STATUS channel_manager_setup_wlc_lcic(uvm_channel_pool_t *wlc_pool, uv
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static NV_STATUS channel_manager_create_conf_computing_pools(uvm_channel_manager_t *manager, unsigned *preferred_ce)
|
||||
{
|
||||
NV_STATUS status;
|
||||
unsigned wlc_lcic_ce_index;
|
||||
uvm_channel_pool_t *sec2_pool = NULL;
|
||||
uvm_channel_pool_t *wlc_pool = NULL;
|
||||
uvm_channel_pool_t *lcic_pool = NULL;
|
||||
|
||||
if (!uvm_conf_computing_mode_enabled(manager->gpu))
|
||||
return NV_OK;
|
||||
|
||||
status = uvm_rm_mem_alloc(manager->gpu,
|
||||
UVM_RM_MEM_TYPE_SYS,
|
||||
sizeof(UvmCslIv),
|
||||
UVM_CONF_COMPUTING_BUF_ALIGNMENT,
|
||||
&manager->gpu->conf_computing.iv_rm_mem);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
// Create SEC2 pool. This needs to be done first, initialization of
|
||||
// other channels needs SEC2.
|
||||
status = channel_pool_add(manager, UVM_CHANNEL_POOL_TYPE_SEC2, 0, &sec2_pool);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_SEC2] = sec2_pool;
|
||||
|
||||
// Use the same CE as CPU TO GPU channels for WLC/LCIC
|
||||
// Both need to use the same engine for the fixed schedule to work.
|
||||
// TODO: Bug 3981928: [hcc][uvm] Optimize parameters of WLC/LCIC secure
|
||||
// work launch
|
||||
// Find a metric to select the best CE to use
|
||||
wlc_lcic_ce_index = preferred_ce[UVM_CHANNEL_TYPE_CPU_TO_GPU];
|
||||
|
||||
// Create WLC/LCIC pools. This should be done early, CE channels use
|
||||
// them for secure launch. The WLC pool must be created before the LCIC.
|
||||
status = channel_pool_add(manager, UVM_CHANNEL_POOL_TYPE_WLC, wlc_lcic_ce_index, &wlc_pool);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_WLC] = wlc_pool;
|
||||
|
||||
status = channel_pool_add(manager, UVM_CHANNEL_POOL_TYPE_LCIC, wlc_lcic_ce_index, &lcic_pool);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
status = channel_manager_setup_wlc_lcic(wlc_pool, lcic_pool);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
// The LCIC pool must be assigned after the call to
|
||||
// channel_manager_setup_wlc_lcic(). It determines WLC and LCIC channels
|
||||
// are ready to be used for secure work submission.
|
||||
manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_LCIC] = lcic_pool;
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static NV_STATUS channel_manager_create_pools(uvm_channel_manager_t *manager)
|
||||
{
|
||||
NV_STATUS status;
|
||||
@ -2897,62 +3163,11 @@ static NV_STATUS channel_manager_create_pools(uvm_channel_manager_t *manager)
|
||||
if (!manager->channel_pools)
|
||||
return NV_ERR_NO_MEMORY;
|
||||
|
||||
if (uvm_conf_computing_mode_enabled(manager->gpu)) {
|
||||
uvm_channel_pool_t *sec2_pool = NULL;
|
||||
uvm_channel_pool_t *wlc_pool = NULL;
|
||||
uvm_channel_pool_t *lcic_pool = NULL;
|
||||
unsigned wlc_lcic_ce_index;
|
||||
|
||||
status = uvm_rm_mem_alloc(manager->gpu,
|
||||
UVM_RM_MEM_TYPE_SYS,
|
||||
sizeof(UvmCslIv),
|
||||
UVM_CONF_COMPUTING_BUF_ALIGNMENT,
|
||||
&manager->gpu->conf_computing.iv_rm_mem);
|
||||
|
||||
status = channel_manager_create_conf_computing_pools(manager, preferred_ce);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
// Create SEC2 pool. This needs to be done first, initialization of
|
||||
// other channels needs SEC2.
|
||||
status = channel_pool_add_secure(manager, UVM_CHANNEL_POOL_TYPE_SEC2, 0, &sec2_pool);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_SEC2] = sec2_pool;
|
||||
|
||||
// Use the same CE as CPU TO GPU channels for WLC/LCIC
|
||||
// Both need to use the same engine for the fixed schedule to work.
|
||||
// TODO: Bug 3981928: [hcc][uvm] Optimize parameters of WLC/LCIC secure
|
||||
// work launch
|
||||
// Find a metric to select the best CE to use
|
||||
wlc_lcic_ce_index = preferred_ce[UVM_CHANNEL_TYPE_CPU_TO_GPU];
|
||||
|
||||
// Create WLC/LCIC pools. This should be done early, CE channels use
|
||||
// them for secure launch. The WLC pool must be created before the LCIC.
|
||||
status = channel_pool_add_secure(manager, UVM_CHANNEL_POOL_TYPE_WLC, wlc_lcic_ce_index, &wlc_pool);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_WLC] = wlc_pool;
|
||||
|
||||
status = channel_pool_add_secure(manager, UVM_CHANNEL_POOL_TYPE_LCIC, wlc_lcic_ce_index, &lcic_pool);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
status = channel_manager_setup_wlc_lcic(wlc_pool, lcic_pool);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
// The LCIC pool must be assigned after the call to
|
||||
// channel_manager_setup_wlc_lcic(). It determines WLC and LCIC channels
|
||||
// are ready to be used for secure work submission.
|
||||
manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_LCIC] = lcic_pool;
|
||||
status = channel_manager_create_ce_secure_pools(manager, preferred_ce);
|
||||
}
|
||||
else {
|
||||
status = channel_manager_create_ce_pools(manager, preferred_ce);
|
||||
}
|
||||
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
@ -3048,9 +3263,7 @@ static void channel_manager_stop_wlc(uvm_channel_manager_t *manager)
|
||||
// Every gpfifo entry advances the gpu put of WLC by two so the current
|
||||
// value is: (cpu_put * 2) % num_gpfifo_entries and it's ahead of the
|
||||
// get pointer by 2.
|
||||
do_semaphore_release(&push,
|
||||
channel->channel_info.gpPutGpuVa,
|
||||
(channel->cpu_put * 2 - 2) % channel->num_gpfifo_entries);
|
||||
update_gpput_via_sec2(&push, channel, (channel->cpu_put * 2 - 2) % channel->num_gpfifo_entries);
|
||||
}
|
||||
|
||||
status = uvm_push_end_and_wait(&push);
|
||||
|
@ -104,16 +104,14 @@ typedef enum
|
||||
// ----------------------------------
|
||||
// Channel type with fixed schedules
|
||||
|
||||
// Work Launch Channel (WLC) is a specialized channel
|
||||
// for launching work on other channels when
|
||||
// Confidential Computing is enabled.
|
||||
// It is paired with LCIC (below)
|
||||
// Work Launch Channel (WLC) is a specialized channel for launching work on
|
||||
// other channels when the Confidential Computing is feature enabled. It is
|
||||
// paired with LCIC (below)
|
||||
UVM_CHANNEL_TYPE_WLC,
|
||||
|
||||
// Launch Confirmation Indicator Channel (LCIC) is a
|
||||
// specialized channel with fixed schedule. It gets
|
||||
// triggered by executing WLC work, and makes sure that
|
||||
// WLC get/put pointers are up-to-date.
|
||||
// Launch Confirmation Indicator Channel (LCIC) is a specialized channel
|
||||
// with fixed schedule. It gets triggered by executing WLC work, and makes
|
||||
// sure that WLC get/put pointers are up-to-date.
|
||||
UVM_CHANNEL_TYPE_LCIC,
|
||||
|
||||
UVM_CHANNEL_TYPE_COUNT,
|
||||
@ -242,11 +240,9 @@ typedef struct
|
||||
DECLARE_BITMAP(push_locks, UVM_CHANNEL_MAX_NUM_CHANNELS_PER_POOL);
|
||||
|
||||
// Counting semaphore for available and unlocked channels, it must be
|
||||
// acquired before submitting work to a secure channel.
|
||||
// acquired before submitting work to a channel when the Confidential
|
||||
// Computing feature is enabled.
|
||||
uvm_semaphore_t push_sem;
|
||||
|
||||
// See uvm_channel_is_secure() documentation.
|
||||
bool secure;
|
||||
} uvm_channel_pool_t;
|
||||
|
||||
struct uvm_channel_struct
|
||||
@ -304,8 +300,9 @@ struct uvm_channel_struct
|
||||
// its internal operation and each push may modify this state.
|
||||
uvm_mutex_t push_lock;
|
||||
|
||||
// Every secure channel has cryptographic state in HW, which is
|
||||
// mirrored here for CPU-side operations.
|
||||
// When the Confidential Computing feature is enabled, every channel has
|
||||
// cryptographic state in HW, which is mirrored here for CPU-side
|
||||
// operations.
|
||||
UvmCslContext ctx;
|
||||
bool is_ctx_initialized;
|
||||
|
||||
@ -355,6 +352,13 @@ struct uvm_channel_struct
|
||||
// Encryption auth tags have to be located in unprotected sysmem.
|
||||
void *launch_auth_tag_cpu;
|
||||
NvU64 launch_auth_tag_gpu_va;
|
||||
|
||||
// Used to decrypt the push back to protected sysmem.
|
||||
// This happens when profilers register callbacks for migration data.
|
||||
uvm_push_crypto_bundle_t *push_crypto_bundles;
|
||||
|
||||
// Accompanying authentication tags for the crypto bundles
|
||||
uvm_rm_mem_t *push_crypto_bundle_auth_tags;
|
||||
} conf_computing;
|
||||
|
||||
// RM channel information
|
||||
@ -452,46 +456,28 @@ struct uvm_channel_manager_struct
|
||||
// Create a channel manager for the GPU
|
||||
NV_STATUS uvm_channel_manager_create(uvm_gpu_t *gpu, uvm_channel_manager_t **manager_out);
|
||||
|
||||
static bool uvm_channel_pool_is_ce(uvm_channel_pool_t *pool);
|
||||
|
||||
// A channel is secure if it has HW encryption capabilities.
|
||||
//
|
||||
// Secure channels are treated differently in the UVM driver. Each secure
|
||||
// channel has a unique CSL context associated with it, has relatively
|
||||
// restrictive reservation policies (in comparison with non-secure channels),
|
||||
// it is requested to be allocated differently by RM, etc.
|
||||
static bool uvm_channel_pool_is_secure(uvm_channel_pool_t *pool)
|
||||
static bool uvm_pool_type_is_valid(uvm_channel_pool_type_t pool_type)
|
||||
{
|
||||
return pool->secure;
|
||||
}
|
||||
|
||||
static bool uvm_channel_is_secure(uvm_channel_t *channel)
|
||||
{
|
||||
return uvm_channel_pool_is_secure(channel->pool);
|
||||
return (is_power_of_2(pool_type) && (pool_type < UVM_CHANNEL_POOL_TYPE_MASK));
|
||||
}
|
||||
|
||||
static bool uvm_channel_pool_is_sec2(uvm_channel_pool_t *pool)
|
||||
{
|
||||
UVM_ASSERT(pool->pool_type < UVM_CHANNEL_POOL_TYPE_MASK);
|
||||
UVM_ASSERT(uvm_pool_type_is_valid(pool->pool_type));
|
||||
|
||||
return (pool->pool_type == UVM_CHANNEL_POOL_TYPE_SEC2);
|
||||
}
|
||||
|
||||
static bool uvm_channel_pool_is_secure_ce(uvm_channel_pool_t *pool)
|
||||
{
|
||||
return uvm_channel_pool_is_secure(pool) && uvm_channel_pool_is_ce(pool);
|
||||
}
|
||||
|
||||
static bool uvm_channel_pool_is_wlc(uvm_channel_pool_t *pool)
|
||||
{
|
||||
UVM_ASSERT(pool->pool_type < UVM_CHANNEL_POOL_TYPE_MASK);
|
||||
UVM_ASSERT(uvm_pool_type_is_valid(pool->pool_type));
|
||||
|
||||
return (pool->pool_type == UVM_CHANNEL_POOL_TYPE_WLC);
|
||||
}
|
||||
|
||||
static bool uvm_channel_pool_is_lcic(uvm_channel_pool_t *pool)
|
||||
{
|
||||
UVM_ASSERT(pool->pool_type < UVM_CHANNEL_POOL_TYPE_MASK);
|
||||
UVM_ASSERT(uvm_pool_type_is_valid(pool->pool_type));
|
||||
|
||||
return (pool->pool_type == UVM_CHANNEL_POOL_TYPE_LCIC);
|
||||
}
|
||||
@ -501,11 +487,6 @@ static bool uvm_channel_is_sec2(uvm_channel_t *channel)
|
||||
return uvm_channel_pool_is_sec2(channel->pool);
|
||||
}
|
||||
|
||||
static bool uvm_channel_is_secure_ce(uvm_channel_t *channel)
|
||||
{
|
||||
return uvm_channel_pool_is_secure_ce(channel->pool);
|
||||
}
|
||||
|
||||
static bool uvm_channel_is_wlc(uvm_channel_t *channel)
|
||||
{
|
||||
return uvm_channel_pool_is_wlc(channel->pool);
|
||||
@ -516,12 +497,9 @@ static bool uvm_channel_is_lcic(uvm_channel_t *channel)
|
||||
return uvm_channel_pool_is_lcic(channel->pool);
|
||||
}
|
||||
|
||||
bool uvm_channel_type_requires_secure_pool(uvm_gpu_t *gpu, uvm_channel_type_t channel_type);
|
||||
NV_STATUS uvm_channel_secure_init(uvm_gpu_t *gpu, uvm_channel_t *channel);
|
||||
|
||||
static bool uvm_channel_pool_is_proxy(uvm_channel_pool_t *pool)
|
||||
{
|
||||
UVM_ASSERT(pool->pool_type < UVM_CHANNEL_POOL_TYPE_MASK);
|
||||
UVM_ASSERT(uvm_pool_type_is_valid(pool->pool_type));
|
||||
|
||||
return pool->pool_type == UVM_CHANNEL_POOL_TYPE_CE_PROXY;
|
||||
}
|
||||
@ -533,11 +511,7 @@ static bool uvm_channel_is_proxy(uvm_channel_t *channel)
|
||||
|
||||
static bool uvm_channel_pool_is_ce(uvm_channel_pool_t *pool)
|
||||
{
|
||||
UVM_ASSERT(pool->pool_type < UVM_CHANNEL_POOL_TYPE_MASK);
|
||||
if (uvm_channel_pool_is_wlc(pool) || uvm_channel_pool_is_lcic(pool))
|
||||
return true;
|
||||
|
||||
return (pool->pool_type == UVM_CHANNEL_POOL_TYPE_CE) || uvm_channel_pool_is_proxy(pool);
|
||||
return !uvm_channel_pool_is_sec2(pool);
|
||||
}
|
||||
|
||||
static bool uvm_channel_is_ce(uvm_channel_t *channel)
|
||||
@ -679,6 +653,11 @@ static uvm_gpu_t *uvm_channel_get_gpu(uvm_channel_t *channel)
|
||||
return channel->pool->manager->gpu;
|
||||
}
|
||||
|
||||
static uvm_pushbuffer_t *uvm_channel_get_pushbuffer(uvm_channel_t *channel)
|
||||
{
|
||||
return channel->pool->manager->pushbuffer;
|
||||
}
|
||||
|
||||
// Index of a channel within the owning pool
|
||||
static unsigned uvm_channel_index_in_pool(const uvm_channel_t *channel)
|
||||
{
|
||||
|
@ -681,9 +681,10 @@ done:
|
||||
}
|
||||
|
||||
// The following test is inspired by uvm_push_test.c:test_concurrent_pushes.
|
||||
// This test verifies that concurrent pushes using the same secure channel pool
|
||||
// select different channels.
|
||||
NV_STATUS test_secure_channel_selection(uvm_va_space_t *va_space)
|
||||
// This test verifies that concurrent pushes using the same channel pool
|
||||
// select different channels, when the Confidential Computing feature is
|
||||
// enabled.
|
||||
NV_STATUS test_conf_computing_channel_selection(uvm_va_space_t *va_space)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
uvm_channel_pool_t *pool;
|
||||
@ -703,9 +704,6 @@ NV_STATUS test_secure_channel_selection(uvm_va_space_t *va_space)
|
||||
uvm_channel_type_t channel_type;
|
||||
|
||||
for (channel_type = 0; channel_type < UVM_CHANNEL_TYPE_COUNT; channel_type++) {
|
||||
if (!uvm_channel_type_requires_secure_pool(gpu, channel_type))
|
||||
continue;
|
||||
|
||||
pool = gpu->channel_manager->pool_to_use.default_for_type[channel_type];
|
||||
TEST_CHECK_RET(pool != NULL);
|
||||
|
||||
@ -997,7 +995,7 @@ NV_STATUS uvm_test_channel_sanity(UVM_TEST_CHANNEL_SANITY_PARAMS *params, struct
|
||||
if (status != NV_OK)
|
||||
goto done;
|
||||
|
||||
status = test_secure_channel_selection(va_space);
|
||||
status = test_conf_computing_channel_selection(va_space);
|
||||
if (status != NV_OK)
|
||||
goto done;
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "uvm_conf_computing.h"
|
||||
#include "uvm_kvmalloc.h"
|
||||
#include "uvm_gpu.h"
|
||||
#include "uvm_hal.h"
|
||||
#include "uvm_mem.h"
|
||||
#include "uvm_processors.h"
|
||||
#include "uvm_tracker.h"
|
||||
@ -60,8 +61,7 @@ NV_STATUS uvm_conf_computing_init_parent_gpu(const uvm_parent_gpu_t *parent)
|
||||
|
||||
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
|
||||
|
||||
// TODO: Bug 2844714.
|
||||
// Since we have no routine to traverse parent gpus,
|
||||
// TODO: Bug 2844714: since we have no routine to traverse parent GPUs,
|
||||
// find first child GPU and get its parent.
|
||||
first = uvm_global_processor_mask_find_first_gpu(&g_uvm_global.retained_gpus);
|
||||
if (!first)
|
||||
@ -378,11 +378,12 @@ void uvm_conf_computing_log_gpu_encryption(uvm_channel_t *channel, UvmCslIv *iv)
|
||||
NV_STATUS status;
|
||||
|
||||
uvm_mutex_lock(&channel->csl.ctx_lock);
|
||||
status = nvUvmInterfaceCslLogDeviceEncryption(&channel->csl.ctx, iv);
|
||||
status = nvUvmInterfaceCslIncrementIv(&channel->csl.ctx, UVM_CSL_OPERATION_DECRYPT, 1, iv);
|
||||
uvm_mutex_unlock(&channel->csl.ctx_lock);
|
||||
|
||||
// nvUvmInterfaceLogDeviceEncryption fails when a 64-bit encryption counter
|
||||
// overflows. This is not supposed to happen on CC.
|
||||
// TODO: Bug 4014720: If nvUvmInterfaceCslIncrementIv returns with
|
||||
// NV_ERR_INSUFFICIENT_RESOURCES then the IV needs to be rotated via
|
||||
// nvUvmInterfaceCslRotateIv.
|
||||
UVM_ASSERT(status == NV_OK);
|
||||
}
|
||||
|
||||
@ -391,11 +392,12 @@ void uvm_conf_computing_acquire_encryption_iv(uvm_channel_t *channel, UvmCslIv *
|
||||
NV_STATUS status;
|
||||
|
||||
uvm_mutex_lock(&channel->csl.ctx_lock);
|
||||
status = nvUvmInterfaceCslAcquireEncryptionIv(&channel->csl.ctx, iv);
|
||||
status = nvUvmInterfaceCslIncrementIv(&channel->csl.ctx, UVM_CSL_OPERATION_ENCRYPT, 1, iv);
|
||||
uvm_mutex_unlock(&channel->csl.ctx_lock);
|
||||
|
||||
// nvUvmInterfaceLogDeviceEncryption fails when a 64-bit encryption counter
|
||||
// overflows. This is not supposed to happen on CC.
|
||||
// TODO: Bug 4014720: If nvUvmInterfaceCslIncrementIv returns with
|
||||
// NV_ERR_INSUFFICIENT_RESOURCES then the IV needs to be rotated via
|
||||
// nvUvmInterfaceCslRotateIv.
|
||||
UVM_ASSERT(status == NV_OK);
|
||||
}
|
||||
|
||||
@ -439,8 +441,58 @@ NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel,
|
||||
(const NvU8 *) src_cipher,
|
||||
src_iv,
|
||||
(NvU8 *) dst_plain,
|
||||
NULL,
|
||||
0,
|
||||
(const NvU8 *) auth_tag_buffer);
|
||||
uvm_mutex_unlock(&channel->csl.ctx_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS uvm_conf_computing_fault_decrypt(uvm_parent_gpu_t *parent_gpu,
|
||||
void *dst_plain,
|
||||
const void *src_cipher,
|
||||
const void *auth_tag_buffer,
|
||||
NvU8 valid)
|
||||
{
|
||||
NV_STATUS status;
|
||||
|
||||
// There is no dedicated lock for the CSL context associated with replayable
|
||||
// faults. The mutual exclusion required by the RM CSL API is enforced by
|
||||
// relying on the GPU replayable service lock (ISR lock), since fault
|
||||
// decryption is invoked as part of fault servicing.
|
||||
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock));
|
||||
|
||||
UVM_ASSERT(!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu));
|
||||
|
||||
status = nvUvmInterfaceCslDecrypt(&parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx,
|
||||
parent_gpu->fault_buffer_hal->entry_size(parent_gpu),
|
||||
(const NvU8 *) src_cipher,
|
||||
NULL,
|
||||
(NvU8 *) dst_plain,
|
||||
&valid,
|
||||
sizeof(valid),
|
||||
(const NvU8 *) auth_tag_buffer);
|
||||
|
||||
if (status != NV_OK)
|
||||
UVM_ERR_PRINT("nvUvmInterfaceCslDecrypt() failed: %s, GPU %s\n", nvstatusToString(status), parent_gpu->name);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu, NvU64 increment)
|
||||
{
|
||||
NV_STATUS status;
|
||||
|
||||
// See comment in uvm_conf_computing_fault_decrypt
|
||||
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock));
|
||||
|
||||
UVM_ASSERT(!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu));
|
||||
|
||||
status = nvUvmInterfaceCslIncrementIv(&parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx,
|
||||
UVM_CSL_OPERATION_DECRYPT,
|
||||
increment,
|
||||
NULL);
|
||||
|
||||
UVM_ASSERT(status == NV_OK);
|
||||
}
|
||||
|
@ -42,9 +42,11 @@
|
||||
// Use sizeof(UvmCslIv) to refer to the IV size.
|
||||
#define UVM_CONF_COMPUTING_IV_ALIGNMENT 16
|
||||
|
||||
// SEC2 decrypt operation buffers are required to be 16-bytes aligned. CE
|
||||
// encrypt/decrypt can be unaligned if the buffer lies in a single 32B segment.
|
||||
// Otherwise, they need to be 32B aligned.
|
||||
// SEC2 decrypt operation buffers are required to be 16-bytes aligned.
|
||||
#define UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT 16
|
||||
|
||||
// CE encrypt/decrypt can be unaligned if the entire buffer lies in a single
|
||||
// 32B segment. Otherwise, it needs to be 32B aligned.
|
||||
#define UVM_CONF_COMPUTING_BUF_ALIGNMENT 32
|
||||
|
||||
#define UVM_CONF_COMPUTING_DMA_BUFFER_SIZE UVM_VA_BLOCK_SIZE
|
||||
@ -175,4 +177,28 @@ NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel,
|
||||
const UvmCslIv *src_iv,
|
||||
size_t size,
|
||||
const void *auth_tag_buffer);
|
||||
|
||||
// CPU decryption of a single replayable fault, encrypted by GSP-RM.
|
||||
//
|
||||
// Replayable fault decryption depends not only on the encrypted fault contents,
|
||||
// and the authentication tag, but also on the plaintext valid bit associated
|
||||
// with the fault.
|
||||
//
|
||||
// When decrypting data previously encrypted by the Copy Engine, use
|
||||
// uvm_conf_computing_cpu_decrypt instead.
|
||||
//
|
||||
// Locking: this function must be invoked while holding the replayable ISR lock.
|
||||
NV_STATUS uvm_conf_computing_fault_decrypt(uvm_parent_gpu_t *parent_gpu,
|
||||
void *dst_plain,
|
||||
const void *src_cipher,
|
||||
const void *auth_tag_buffer,
|
||||
NvU8 valid);
|
||||
|
||||
// Increment the CPU-side decrypt IV of the CSL context associated with
|
||||
// replayable faults. The function is a no-op if the given increment is zero.
|
||||
//
|
||||
// The IV associated with a fault CSL context is a 64-bit counter.
|
||||
//
|
||||
// Locking: this function must be invoked while holding the replayable ISR lock.
|
||||
void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu, NvU64 increment);
|
||||
#endif // __UVM_CONF_COMPUTING_H__
|
||||
|
@ -50,6 +50,7 @@ typedef struct uvm_channel_struct uvm_channel_t;
|
||||
typedef struct uvm_user_channel_struct uvm_user_channel_t;
|
||||
typedef struct uvm_push_struct uvm_push_t;
|
||||
typedef struct uvm_push_info_struct uvm_push_info_t;
|
||||
typedef struct uvm_push_crypto_bundle_struct uvm_push_crypto_bundle_t;
|
||||
typedef struct uvm_push_acquire_info_struct uvm_push_acquire_info_t;
|
||||
typedef struct uvm_pushbuffer_struct uvm_pushbuffer_t;
|
||||
typedef struct uvm_gpfifo_entry_struct uvm_gpfifo_entry_t;
|
||||
|
@ -198,6 +198,12 @@ typedef struct
|
||||
|
||||
// Client type of the service requestor.
|
||||
uvm_fault_client_type_t client_type;
|
||||
|
||||
// New residency ID of the faulting region.
|
||||
uvm_processor_id_t residency_id;
|
||||
|
||||
// New residency NUMA node ID of the faulting region.
|
||||
int residency_node;
|
||||
} uvm_ats_fault_context_t;
|
||||
|
||||
struct uvm_fault_service_batch_context_struct
|
||||
|
@ -177,31 +177,34 @@ bool uvm_gpu_non_replayable_faults_pending(uvm_parent_gpu_t *parent_gpu)
|
||||
return has_pending_faults == NV_TRUE;
|
||||
}
|
||||
|
||||
static NvU32 fetch_non_replayable_fault_buffer_entries(uvm_gpu_t *gpu)
|
||||
static NV_STATUS fetch_non_replayable_fault_buffer_entries(uvm_parent_gpu_t *parent_gpu, NvU32 *cached_faults)
|
||||
{
|
||||
NV_STATUS status;
|
||||
NvU32 i = 0;
|
||||
NvU32 cached_faults = 0;
|
||||
uvm_fault_buffer_entry_t *fault_cache;
|
||||
NvU32 entry_size = gpu->parent->fault_buffer_hal->entry_size(gpu->parent);
|
||||
uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable;
|
||||
NvU32 i;
|
||||
NvU32 entry_size = parent_gpu->fault_buffer_hal->entry_size(parent_gpu);
|
||||
uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &parent_gpu->fault_buffer_info.non_replayable;
|
||||
char *current_hw_entry = (char *)non_replayable_faults->shadow_buffer_copy;
|
||||
uvm_fault_buffer_entry_t *fault_entry = non_replayable_faults->fault_cache;
|
||||
|
||||
fault_cache = non_replayable_faults->fault_cache;
|
||||
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.non_replayable_faults.service_lock));
|
||||
UVM_ASSERT(parent_gpu->non_replayable_faults_supported);
|
||||
|
||||
UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.non_replayable_faults.service_lock));
|
||||
UVM_ASSERT(gpu->parent->non_replayable_faults_supported);
|
||||
status = nvUvmInterfaceGetNonReplayableFaults(&parent_gpu->fault_buffer_info.rm_info,
|
||||
current_hw_entry,
|
||||
cached_faults);
|
||||
|
||||
status = nvUvmInterfaceGetNonReplayableFaults(&gpu->parent->fault_buffer_info.rm_info,
|
||||
non_replayable_faults->shadow_buffer_copy,
|
||||
&cached_faults);
|
||||
UVM_ASSERT(status == NV_OK);
|
||||
if (status != NV_OK) {
|
||||
UVM_ERR_PRINT("nvUvmInterfaceGetNonReplayableFaults() failed: %s, GPU %s\n",
|
||||
nvstatusToString(status),
|
||||
parent_gpu->name);
|
||||
|
||||
uvm_global_set_fatal_error(status);
|
||||
return status;
|
||||
}
|
||||
|
||||
// Parse all faults
|
||||
for (i = 0; i < cached_faults; ++i) {
|
||||
uvm_fault_buffer_entry_t *fault_entry = &non_replayable_faults->fault_cache[i];
|
||||
|
||||
gpu->parent->fault_buffer_hal->parse_non_replayable_entry(gpu->parent, current_hw_entry, fault_entry);
|
||||
for (i = 0; i < *cached_faults; ++i) {
|
||||
parent_gpu->fault_buffer_hal->parse_non_replayable_entry(parent_gpu, current_hw_entry, fault_entry);
|
||||
|
||||
// The GPU aligns the fault addresses to 4k, but all of our tracking is
|
||||
// done in PAGE_SIZE chunks which might be larger.
|
||||
@ -226,9 +229,10 @@ static NvU32 fetch_non_replayable_fault_buffer_entries(uvm_gpu_t *gpu)
|
||||
}
|
||||
|
||||
current_hw_entry += entry_size;
|
||||
fault_entry++;
|
||||
}
|
||||
|
||||
return cached_faults;
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
// In SRIOV, the UVM (guest) driver does not have access to the privileged
|
||||
@ -705,21 +709,28 @@ exit_no_channel:
|
||||
uvm_va_space_up_read(va_space);
|
||||
uvm_va_space_mm_release_unlock(va_space, mm);
|
||||
|
||||
if (status != NV_OK)
|
||||
UVM_DBG_PRINT("Error servicing non-replayable faults on GPU: %s\n", uvm_gpu_name(gpu));
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void uvm_gpu_service_non_replayable_fault_buffer(uvm_gpu_t *gpu)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
NvU32 cached_faults;
|
||||
|
||||
// If this handler is modified to handle fewer than all of the outstanding
|
||||
// faults, then special handling will need to be added to uvm_suspend()
|
||||
// to guarantee that fault processing has completed before control is
|
||||
// returned to the RM.
|
||||
while ((cached_faults = fetch_non_replayable_fault_buffer_entries(gpu)) > 0) {
|
||||
do {
|
||||
NV_STATUS status;
|
||||
NvU32 i;
|
||||
|
||||
status = fetch_non_replayable_fault_buffer_entries(gpu->parent, &cached_faults);
|
||||
if (status != NV_OK)
|
||||
return;
|
||||
|
||||
// Differently to replayable faults, we do not batch up and preprocess
|
||||
// non-replayable faults since getting multiple faults on the same
|
||||
// memory region is not very likely
|
||||
@ -728,10 +739,7 @@ void uvm_gpu_service_non_replayable_fault_buffer(uvm_gpu_t *gpu)
|
||||
for (i = 0; i < cached_faults; ++i) {
|
||||
status = service_fault(gpu, &gpu->parent->fault_buffer_info.non_replayable.fault_cache[i]);
|
||||
if (status != NV_OK)
|
||||
break;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (status != NV_OK)
|
||||
UVM_DBG_PRINT("Error servicing non-replayable faults on GPU: %s\n", uvm_gpu_name(gpu));
|
||||
} while (cached_faults > 0);
|
||||
}
|
||||
|
@ -486,7 +486,9 @@ static NV_STATUS cancel_fault_precise_va(uvm_gpu_t *gpu,
|
||||
return status;
|
||||
}
|
||||
|
||||
static NV_STATUS push_replay_on_gpu(uvm_gpu_t *gpu, uvm_fault_replay_type_t type, uvm_fault_service_batch_context_t *batch_context)
|
||||
static NV_STATUS push_replay_on_gpu(uvm_gpu_t *gpu,
|
||||
uvm_fault_replay_type_t type,
|
||||
uvm_fault_service_batch_context_t *batch_context)
|
||||
{
|
||||
NV_STATUS status;
|
||||
uvm_push_t push;
|
||||
@ -572,6 +574,19 @@ static NV_STATUS hw_fault_buffer_flush_locked(uvm_parent_gpu_t *parent_gpu)
|
||||
return status;
|
||||
}
|
||||
|
||||
static void fault_buffer_skip_replayable_entry(uvm_parent_gpu_t *parent_gpu, NvU32 index)
|
||||
{
|
||||
UVM_ASSERT(parent_gpu->fault_buffer_hal->entry_is_valid(parent_gpu, index));
|
||||
|
||||
// Flushed faults are never decrypted, but the decryption IV associated with
|
||||
// replayable faults still requires manual adjustment so it is kept in sync
|
||||
// with the encryption IV on the GSP-RM's side.
|
||||
if (!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu))
|
||||
uvm_conf_computing_fault_increment_decrypt_iv(parent_gpu, 1);
|
||||
|
||||
parent_gpu->fault_buffer_hal->entry_clear_valid(parent_gpu, index);
|
||||
}
|
||||
|
||||
static NV_STATUS fault_buffer_flush_locked(uvm_gpu_t *gpu,
|
||||
uvm_gpu_buffer_flush_mode_t flush_mode,
|
||||
uvm_fault_replay_type_t fault_replay,
|
||||
@ -610,7 +625,7 @@ static NV_STATUS fault_buffer_flush_locked(uvm_gpu_t *gpu,
|
||||
// Wait until valid bit is set
|
||||
UVM_SPIN_WHILE(!parent_gpu->fault_buffer_hal->entry_is_valid(parent_gpu, get), &spin);
|
||||
|
||||
parent_gpu->fault_buffer_hal->entry_clear_valid(parent_gpu, get);
|
||||
fault_buffer_skip_replayable_entry(parent_gpu, get);
|
||||
++get;
|
||||
if (get == replayable_faults->max_faults)
|
||||
get = 0;
|
||||
@ -785,7 +800,7 @@ static bool fetch_fault_buffer_try_merge_entry(uvm_fault_buffer_entry_t *current
|
||||
// This optimization cannot be performed during fault cancel on Pascal GPUs
|
||||
// (fetch_mode == FAULT_FETCH_MODE_ALL) since we need accurate tracking of all
|
||||
// the faults in each uTLB in order to guarantee precise fault attribution.
|
||||
static void fetch_fault_buffer_entries(uvm_gpu_t *gpu,
|
||||
static NV_STATUS fetch_fault_buffer_entries(uvm_gpu_t *gpu,
|
||||
uvm_fault_service_batch_context_t *batch_context,
|
||||
fault_fetch_mode_t fetch_mode)
|
||||
{
|
||||
@ -796,6 +811,7 @@ static void fetch_fault_buffer_entries(uvm_gpu_t *gpu,
|
||||
NvU32 utlb_id;
|
||||
uvm_fault_buffer_entry_t *fault_cache;
|
||||
uvm_spin_loop_t spin;
|
||||
NV_STATUS status = NV_OK;
|
||||
uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable;
|
||||
const bool in_pascal_cancel_path = (!gpu->parent->fault_cancel_va_supported && fetch_mode == FAULT_FETCH_MODE_ALL);
|
||||
const bool may_filter = uvm_perf_fault_coalesce && !in_pascal_cancel_path;
|
||||
@ -851,7 +867,9 @@ static void fetch_fault_buffer_entries(uvm_gpu_t *gpu,
|
||||
smp_mb__after_atomic();
|
||||
|
||||
// Got valid bit set. Let's cache.
|
||||
gpu->parent->fault_buffer_hal->parse_entry(gpu->parent, get, current_entry);
|
||||
status = gpu->parent->fault_buffer_hal->parse_replayable_entry(gpu->parent, get, current_entry);
|
||||
if (status != NV_OK)
|
||||
goto done;
|
||||
|
||||
// The GPU aligns the fault addresses to 4k, but all of our tracking is
|
||||
// done in PAGE_SIZE chunks which might be larger.
|
||||
@ -918,6 +936,8 @@ done:
|
||||
|
||||
batch_context->num_cached_faults = fault_index;
|
||||
batch_context->num_coalesced_faults = num_coalesced_faults;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
// Sort comparator for pointers to fault buffer entries that sorts by
|
||||
@ -2475,7 +2495,10 @@ static NV_STATUS cancel_faults_precise_tlb(uvm_gpu_t *gpu, uvm_fault_service_bat
|
||||
batch_context->has_throttled_faults = false;
|
||||
|
||||
// 5) Fetch all faults from buffer
|
||||
fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_ALL);
|
||||
status = fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_ALL);
|
||||
if (status != NV_OK)
|
||||
break;
|
||||
|
||||
++batch_context->batch_id;
|
||||
|
||||
UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults);
|
||||
@ -2612,7 +2635,10 @@ void uvm_gpu_service_replayable_faults(uvm_gpu_t *gpu)
|
||||
batch_context->has_fatal_faults = false;
|
||||
batch_context->has_throttled_faults = false;
|
||||
|
||||
fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_BATCH_READY);
|
||||
status = fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_BATCH_READY);
|
||||
if (status != NV_OK)
|
||||
break;
|
||||
|
||||
if (batch_context->num_cached_faults == 0)
|
||||
break;
|
||||
|
||||
|
@ -579,8 +579,10 @@ static void uvm_gpu_semaphore_encrypted_payload_update(uvm_channel_t *channel, u
|
||||
void *auth_tag_cpu_addr = uvm_rm_mem_get_cpu_va(semaphore->conf_computing.auth_tag);
|
||||
NvU32 *gpu_notifier_cpu_addr = (NvU32 *)uvm_rm_mem_get_cpu_va(semaphore->conf_computing.notifier);
|
||||
NvU32 *payload_cpu_addr = (NvU32 *)uvm_rm_mem_get_cpu_va(semaphore->conf_computing.encrypted_payload);
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_secure_ce(channel));
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(gpu));
|
||||
UVM_ASSERT(uvm_channel_is_ce(channel));
|
||||
|
||||
last_observed_notifier = semaphore->conf_computing.last_observed_notifier;
|
||||
gpu_notifier = UVM_READ_ONCE(*gpu_notifier_cpu_addr);
|
||||
|
@ -91,9 +91,9 @@ struct uvm_gpu_tracking_semaphore_struct
|
||||
// Create a semaphore pool for a GPU.
|
||||
NV_STATUS uvm_gpu_semaphore_pool_create(uvm_gpu_t *gpu, uvm_gpu_semaphore_pool_t **pool_out);
|
||||
|
||||
// When the Confidential Computing feature is enabled, pools associated with
|
||||
// secure CE channels are allocated in the CPR of vidmem and as such have
|
||||
// all the associated access restrictions. Because of this, they're called
|
||||
// When the Confidential Computing feature is enabled, semaphore pools
|
||||
// associated with CE channels are allocated in the CPR of vidmem and as such
|
||||
// have all the associated access restrictions. Because of this, they're called
|
||||
// secure pools and secure semaphores are allocated out of said secure pools.
|
||||
NV_STATUS uvm_gpu_semaphore_secure_pool_create(uvm_gpu_t *gpu, uvm_gpu_semaphore_pool_t **pool_out);
|
||||
|
||||
|
@ -373,7 +373,7 @@ static uvm_hal_class_ops_t fault_buffer_table[] =
|
||||
.read_get = uvm_hal_maxwell_fault_buffer_read_get_unsupported,
|
||||
.write_get = uvm_hal_maxwell_fault_buffer_write_get_unsupported,
|
||||
.get_ve_id = uvm_hal_maxwell_fault_buffer_get_ve_id_unsupported,
|
||||
.parse_entry = uvm_hal_maxwell_fault_buffer_parse_entry_unsupported,
|
||||
.parse_replayable_entry = uvm_hal_maxwell_fault_buffer_parse_replayable_entry_unsupported,
|
||||
.entry_is_valid = uvm_hal_maxwell_fault_buffer_entry_is_valid_unsupported,
|
||||
.entry_clear_valid = uvm_hal_maxwell_fault_buffer_entry_clear_valid_unsupported,
|
||||
.entry_size = uvm_hal_maxwell_fault_buffer_entry_size_unsupported,
|
||||
@ -396,7 +396,7 @@ static uvm_hal_class_ops_t fault_buffer_table[] =
|
||||
.read_put = uvm_hal_pascal_fault_buffer_read_put,
|
||||
.read_get = uvm_hal_pascal_fault_buffer_read_get,
|
||||
.write_get = uvm_hal_pascal_fault_buffer_write_get,
|
||||
.parse_entry = uvm_hal_pascal_fault_buffer_parse_entry,
|
||||
.parse_replayable_entry = uvm_hal_pascal_fault_buffer_parse_replayable_entry,
|
||||
.entry_is_valid = uvm_hal_pascal_fault_buffer_entry_is_valid,
|
||||
.entry_clear_valid = uvm_hal_pascal_fault_buffer_entry_clear_valid,
|
||||
.entry_size = uvm_hal_pascal_fault_buffer_entry_size,
|
||||
@ -411,7 +411,7 @@ static uvm_hal_class_ops_t fault_buffer_table[] =
|
||||
.read_get = uvm_hal_volta_fault_buffer_read_get,
|
||||
.write_get = uvm_hal_volta_fault_buffer_write_get,
|
||||
.get_ve_id = uvm_hal_volta_fault_buffer_get_ve_id,
|
||||
.parse_entry = uvm_hal_volta_fault_buffer_parse_entry,
|
||||
.parse_replayable_entry = uvm_hal_volta_fault_buffer_parse_replayable_entry,
|
||||
.parse_non_replayable_entry = uvm_hal_volta_fault_buffer_parse_non_replayable_entry,
|
||||
.get_fault_type = uvm_hal_volta_fault_buffer_get_fault_type,
|
||||
}
|
||||
|
@ -485,11 +485,24 @@ typedef NvU32 (*uvm_hal_fault_buffer_read_get_t)(uvm_parent_gpu_t *parent_gpu);
|
||||
typedef void (*uvm_hal_fault_buffer_write_get_t)(uvm_parent_gpu_t *parent_gpu, NvU32 get);
|
||||
typedef NvU8 (*uvm_hal_fault_buffer_get_ve_id_t)(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type);
|
||||
|
||||
// Parse the entry on the given buffer index. This also clears the valid bit of
|
||||
// the entry in the buffer.
|
||||
typedef void (*uvm_hal_fault_buffer_parse_entry_t)(uvm_parent_gpu_t *gpu,
|
||||
// Parse the replayable entry at the given buffer index. This also clears the
|
||||
// valid bit of the entry in the buffer.
|
||||
typedef NV_STATUS (*uvm_hal_fault_buffer_parse_replayable_entry_t)(uvm_parent_gpu_t *gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry);
|
||||
|
||||
NV_STATUS uvm_hal_maxwell_fault_buffer_parse_replayable_entry_unsupported(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry);
|
||||
|
||||
NV_STATUS uvm_hal_pascal_fault_buffer_parse_replayable_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry);
|
||||
|
||||
NV_STATUS uvm_hal_volta_fault_buffer_parse_replayable_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry);
|
||||
|
||||
typedef bool (*uvm_hal_fault_buffer_entry_is_valid_t)(uvm_parent_gpu_t *parent_gpu, NvU32 index);
|
||||
typedef void (*uvm_hal_fault_buffer_entry_clear_valid_t)(uvm_parent_gpu_t *parent_gpu, NvU32 index);
|
||||
typedef NvU32 (*uvm_hal_fault_buffer_entry_size_t)(uvm_parent_gpu_t *parent_gpu);
|
||||
@ -508,9 +521,6 @@ NvU32 uvm_hal_maxwell_fault_buffer_read_put_unsupported(uvm_parent_gpu_t *parent
|
||||
NvU32 uvm_hal_maxwell_fault_buffer_read_get_unsupported(uvm_parent_gpu_t *parent_gpu);
|
||||
void uvm_hal_maxwell_fault_buffer_write_get_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index);
|
||||
NvU8 uvm_hal_maxwell_fault_buffer_get_ve_id_unsupported(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type);
|
||||
void uvm_hal_maxwell_fault_buffer_parse_entry_unsupported(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry);
|
||||
uvm_fault_type_t uvm_hal_maxwell_fault_buffer_get_fault_type_unsupported(const NvU32 *fault_entry);
|
||||
|
||||
void uvm_hal_pascal_enable_replayable_faults(uvm_parent_gpu_t *parent_gpu);
|
||||
@ -519,18 +529,14 @@ void uvm_hal_pascal_clear_replayable_faults(uvm_parent_gpu_t *parent_gpu, NvU32
|
||||
NvU32 uvm_hal_pascal_fault_buffer_read_put(uvm_parent_gpu_t *parent_gpu);
|
||||
NvU32 uvm_hal_pascal_fault_buffer_read_get(uvm_parent_gpu_t *parent_gpu);
|
||||
void uvm_hal_pascal_fault_buffer_write_get(uvm_parent_gpu_t *parent_gpu, NvU32 index);
|
||||
void uvm_hal_pascal_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry);
|
||||
|
||||
uvm_fault_type_t uvm_hal_pascal_fault_buffer_get_fault_type(const NvU32 *fault_entry);
|
||||
|
||||
NvU32 uvm_hal_volta_fault_buffer_read_put(uvm_parent_gpu_t *parent_gpu);
|
||||
NvU32 uvm_hal_volta_fault_buffer_read_get(uvm_parent_gpu_t *parent_gpu);
|
||||
void uvm_hal_volta_fault_buffer_write_get(uvm_parent_gpu_t *parent_gpu, NvU32 index);
|
||||
NvU8 uvm_hal_volta_fault_buffer_get_ve_id(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type);
|
||||
void uvm_hal_volta_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry);
|
||||
|
||||
uvm_fault_type_t uvm_hal_volta_fault_buffer_get_fault_type(const NvU32 *fault_entry);
|
||||
|
||||
void uvm_hal_turing_disable_replayable_faults(uvm_parent_gpu_t *parent_gpu);
|
||||
@ -772,7 +778,7 @@ struct uvm_fault_buffer_hal_struct
|
||||
uvm_hal_fault_buffer_read_get_t read_get;
|
||||
uvm_hal_fault_buffer_write_get_t write_get;
|
||||
uvm_hal_fault_buffer_get_ve_id_t get_ve_id;
|
||||
uvm_hal_fault_buffer_parse_entry_t parse_entry;
|
||||
uvm_hal_fault_buffer_parse_replayable_entry_t parse_replayable_entry;
|
||||
uvm_hal_fault_buffer_entry_is_valid_t entry_is_valid;
|
||||
uvm_hal_fault_buffer_entry_clear_valid_t entry_clear_valid;
|
||||
uvm_hal_fault_buffer_entry_size_t entry_size;
|
||||
|
@ -128,6 +128,13 @@ static uvm_gpu_address_t uvm_gpu_address_virtual(NvU64 va)
|
||||
return address;
|
||||
}
|
||||
|
||||
static uvm_gpu_address_t uvm_gpu_address_virtual_unprotected(NvU64 va)
|
||||
{
|
||||
uvm_gpu_address_t address = uvm_gpu_address_virtual(va);
|
||||
address.is_unprotected = true;
|
||||
return address;
|
||||
}
|
||||
|
||||
// Create a physical GPU address
|
||||
static uvm_gpu_address_t uvm_gpu_address_physical(uvm_aperture_t aperture, NvU64 pa)
|
||||
{
|
||||
|
@ -2575,7 +2575,7 @@ static NV_STATUS dmamap_src_sysmem_pages(uvm_va_block_t *va_block,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (folio_test_swapcache(page_folio(src_page))) {
|
||||
if (PageSwapCache(src_page)) {
|
||||
// TODO: Bug 4050579: Remove this when swap cached pages can be
|
||||
// migrated.
|
||||
if (service_context) {
|
||||
|
@ -61,7 +61,11 @@ void uvm_hal_hopper_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
||||
// GH180.
|
||||
parent_gpu->ce_phys_vidmem_write_supported = !uvm_gpu_is_coherent(parent_gpu);
|
||||
|
||||
parent_gpu->peer_copy_mode = g_uvm_global.peer_copy_mode;
|
||||
// TODO: Bug 4174553: [HGX-SkinnyJoe][GH180] channel errors discussion/debug
|
||||
// portion for the uvm tests became nonresponsive after
|
||||
// some time and then failed even after reboot
|
||||
parent_gpu->peer_copy_mode = uvm_gpu_is_coherent(parent_gpu) ?
|
||||
UVM_GPU_PEER_COPY_MODE_VIRTUAL : g_uvm_global.peer_copy_mode;
|
||||
|
||||
// All GR context buffers may be mapped to 57b wide VAs. All "compute" units
|
||||
// accessing GR context buffers support the 57-bit VA range.
|
||||
|
@ -491,7 +491,6 @@ void uvm_hal_hopper_ce_encrypt(uvm_push_t *push,
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
||||
|
||||
UVM_ASSERT(uvm_conf_computing_mode_is_hcc(gpu));
|
||||
UVM_ASSERT(uvm_push_is_fake(push) || uvm_channel_is_secure(push->channel));
|
||||
UVM_ASSERT(IS_ALIGNED(auth_tag.address, UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT));
|
||||
|
||||
if (!src.is_virtual)
|
||||
@ -540,7 +539,6 @@ void uvm_hal_hopper_ce_decrypt(uvm_push_t *push,
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
||||
|
||||
UVM_ASSERT(uvm_conf_computing_mode_is_hcc(gpu));
|
||||
UVM_ASSERT(!push->channel || uvm_channel_is_secure(push->channel));
|
||||
UVM_ASSERT(IS_ALIGNED(auth_tag.address, UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT));
|
||||
|
||||
// The addressing mode (and aperture, if applicable) of the source and
|
||||
|
@ -166,6 +166,7 @@ void uvm_hal_hopper_sec2_decrypt(uvm_push_t *push, NvU64 dst_va, NvU64 src_va, N
|
||||
NvU32 *csl_sign_init = push->next;
|
||||
|
||||
// Check that the provided alignment matches HW
|
||||
BUILD_BUG_ON(UVM_CONF_COMPUTING_SEC2_BUF_ALIGNMENT != (1 << HWSHIFT(CBA2, DECRYPT_COPY_DST_ADDR_LO, DATA)));
|
||||
BUILD_BUG_ON(UVM_CONF_COMPUTING_BUF_ALIGNMENT < (1 << HWSHIFT(CBA2, DECRYPT_COPY_DST_ADDR_LO, DATA)));
|
||||
BUILD_BUG_ON(UVM_CONF_COMPUTING_BUF_ALIGNMENT % (1 << HWSHIFT(CBA2, DECRYPT_COPY_DST_ADDR_LO, DATA)) != 0);
|
||||
|
||||
|
@ -153,6 +153,10 @@ static inline const struct cpumask *uvm_cpumask_of_node(int node)
|
||||
#define VM_MIXEDMAP 0x00000000
|
||||
#endif
|
||||
|
||||
#if !defined(MPOL_PREFERRED_MANY)
|
||||
#define MPOL_PREFERRED_MANY 5
|
||||
#endif
|
||||
|
||||
//
|
||||
// printk.h already defined pr_fmt, so we have to redefine it so the pr_*
|
||||
// routines pick up our version
|
||||
|
@ -279,13 +279,14 @@
|
||||
// Operations not allowed while holding the lock:
|
||||
// - GPU memory allocation which can evict memory (would require nesting
|
||||
// block locks)
|
||||
//
|
||||
// - GPU DMA Allocation pool lock (gpu->conf_computing.dma_buffer_pool.lock)
|
||||
// Order: UVM_LOCK_ORDER_CONF_COMPUTING_DMA_BUFFER_POOL
|
||||
// Condition: The Confidential Computing feature is enabled
|
||||
// Exclusive lock (mutex)
|
||||
//
|
||||
// Protects:
|
||||
// - Protect the state of the uvm_conf_computing_dma_buffer_pool_t
|
||||
// when the Confidential Computing feature is enabled on the system.
|
||||
//
|
||||
// - Chunk mapping lock (gpu->root_chunk_mappings.bitlocks and
|
||||
// gpu->sysmem_mappings.bitlock)
|
||||
@ -321,22 +322,25 @@
|
||||
// Operations not allowed while holding this lock
|
||||
// - GPU memory allocation which can evict
|
||||
//
|
||||
// - Secure channel CSL channel pool semaphore
|
||||
// - CE channel CSL channel pool semaphore
|
||||
// Order: UVM_LOCK_ORDER_CSL_PUSH
|
||||
// Semaphore per SEC2 channel pool
|
||||
// Condition: The Confidential Computing feature is enabled
|
||||
// Semaphore per CE channel pool
|
||||
//
|
||||
// The semaphore controls concurrent pushes to secure channels. Secure work
|
||||
// submission depends on channel availability in GPFIFO entries (as in any
|
||||
// other channel type) but also on channel locking. Each secure channel has a
|
||||
// lock to enforce ordering of pushes. The channel's CSL lock is taken on
|
||||
// channel reservation until uvm_push_end. Secure channels are stateful
|
||||
// channels and the CSL lock protects their CSL state/context.
|
||||
// The semaphore controls concurrent pushes to CE channels that are not WCL
|
||||
// channels. Secure work submission depends on channel availability in
|
||||
// GPFIFO entries (as in any other channel type) but also on channel
|
||||
// locking. Each channel has a lock to enforce ordering of pushes. The
|
||||
// channel's CSL lock is taken on channel reservation until uvm_push_end.
|
||||
// When the Confidential Computing feature is enabled, channels are
|
||||
// stateful, and the CSL lock protects their CSL state/context.
|
||||
//
|
||||
// Operations allowed while holding this lock
|
||||
// - Pushing work to CE secure channels
|
||||
// - Pushing work to CE channels (except for WLC channels)
|
||||
//
|
||||
// - WLC CSL channel pool semaphore
|
||||
// Order: UVM_LOCK_ORDER_CSL_WLC_PUSH
|
||||
// Condition: The Confidential Computing feature is enabled
|
||||
// Semaphore per WLC channel pool
|
||||
//
|
||||
// The semaphore controls concurrent pushes to WLC channels. WLC work
|
||||
@ -346,8 +350,8 @@
|
||||
// channel reservation until uvm_push_end. SEC2 channels are stateful
|
||||
// channels and the CSL lock protects their CSL state/context.
|
||||
//
|
||||
// This lock ORDER is different and sits below generic secure channel CSL
|
||||
// lock and above SEC2 CSL lock. This reflects the dual nature of WLC
|
||||
// This lock ORDER is different and sits below the generic channel CSL
|
||||
// lock and above the SEC2 CSL lock. This reflects the dual nature of WLC
|
||||
// channels; they use SEC2 indirect work launch during initialization,
|
||||
// and after their schedule is initialized they provide indirect launch
|
||||
// functionality to other CE channels.
|
||||
@ -357,6 +361,7 @@
|
||||
//
|
||||
// - SEC2 CSL channel pool semaphore
|
||||
// Order: UVM_LOCK_ORDER_SEC2_CSL_PUSH
|
||||
// Condition: The Confidential Computing feature is enabled
|
||||
// Semaphore per SEC2 channel pool
|
||||
//
|
||||
// The semaphore controls concurrent pushes to SEC2 channels. SEC2 work
|
||||
@ -366,9 +371,9 @@
|
||||
// channel reservation until uvm_push_end. SEC2 channels are stateful
|
||||
// channels and the CSL lock protects their CSL state/context.
|
||||
//
|
||||
// This lock ORDER is different and lower than the generic secure channel
|
||||
// lock to allow secure work submission to use a SEC2 channel to submit
|
||||
// work before releasing the CSL lock of the originating secure channel.
|
||||
// This lock ORDER is different and lower than UVM_LOCK_ORDER_CSL_PUSH
|
||||
// to allow secure work submission to use a SEC2 channel to submit
|
||||
// work before releasing the CSL lock of the originating channel.
|
||||
//
|
||||
// Operations allowed while holding this lock
|
||||
// - Pushing work to SEC2 channels
|
||||
@ -408,16 +413,18 @@
|
||||
//
|
||||
// - WLC Channel lock
|
||||
// Order: UVM_LOCK_ORDER_WLC_CHANNEL
|
||||
// Condition: The Confidential Computing feature is enabled
|
||||
// Spinlock (uvm_spinlock_t)
|
||||
//
|
||||
// Lock protecting the state of WLC channels in a channel pool. This lock
|
||||
// is separate from the above generic channel lock to allow for indirect
|
||||
// worklaunch pushes while holding the main channel lock.
|
||||
// (WLC pushes don't need any of the pushbuffer locks described above)
|
||||
// is separate from the generic channel lock (UVM_LOCK_ORDER_CHANNEL)
|
||||
// to allow for indirect worklaunch pushes while holding the main channel
|
||||
// lock (WLC pushes don't need any of the pushbuffer locks described
|
||||
// above)
|
||||
//
|
||||
// - Tools global VA space list lock (g_tools_va_space_list_lock)
|
||||
// Order: UVM_LOCK_ORDER_TOOLS_VA_SPACE_LIST
|
||||
// Reader/writer lock (rw_sempahore)
|
||||
// Reader/writer lock (rw_semaphore)
|
||||
//
|
||||
// This lock protects the list of VA spaces used when broadcasting
|
||||
// UVM profiling events.
|
||||
@ -437,9 +444,10 @@
|
||||
//
|
||||
// - Tracking semaphores
|
||||
// Order: UVM_LOCK_ORDER_SECURE_SEMAPHORE
|
||||
// When the Confidential Computing feature is enabled, CE semaphores are
|
||||
// encrypted, and require to take the CSL lock (UVM_LOCK_ORDER_LEAF) to
|
||||
// decrypt the payload.
|
||||
// Condition: The Confidential Computing feature is enabled
|
||||
//
|
||||
// CE semaphore payloads are encrypted, and require to take the CSL lock
|
||||
// (UVM_LOCK_ORDER_LEAF) to decrypt the payload.
|
||||
//
|
||||
// - Leaf locks
|
||||
// Order: UVM_LOCK_ORDER_LEAF
|
||||
|
@ -68,11 +68,12 @@ uvm_fault_type_t uvm_hal_maxwell_fault_buffer_get_fault_type_unsupported(const N
|
||||
return UVM_FAULT_TYPE_COUNT;
|
||||
}
|
||||
|
||||
void uvm_hal_maxwell_fault_buffer_parse_entry_unsupported(uvm_parent_gpu_t *parent_gpu,
|
||||
NV_STATUS uvm_hal_maxwell_fault_buffer_parse_replayable_entry_unsupported(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry)
|
||||
{
|
||||
UVM_ASSERT_MSG(false, "fault_buffer_parse_entry is not supported on GPU: %s.\n", parent_gpu->name);
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
bool uvm_hal_maxwell_fault_buffer_entry_is_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index)
|
||||
|
@ -392,12 +392,6 @@ static NV_STATUS uvm_mem_alloc_vidmem(NvU64 size, uvm_gpu_t *gpu, uvm_mem_t **me
|
||||
return uvm_mem_alloc(¶ms, mem_out);
|
||||
}
|
||||
|
||||
// Helper for allocating protected vidmem with the default page size
|
||||
static NV_STATUS uvm_mem_alloc_vidmem_protected(NvU64 size, uvm_gpu_t *gpu, uvm_mem_t **mem_out)
|
||||
{
|
||||
return uvm_mem_alloc_vidmem(size, gpu, mem_out);
|
||||
}
|
||||
|
||||
// Helper for allocating sysmem and mapping it on the CPU
|
||||
static NV_STATUS uvm_mem_alloc_sysmem_and_map_cpu_kernel(NvU64 size, struct mm_struct *mm, uvm_mem_t **mem_out)
|
||||
{
|
||||
|
@ -134,6 +134,22 @@ static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block,
|
||||
// first map operation
|
||||
uvm_page_mask_complement(&va_block_context->caller_page_mask, &va_block->maybe_mapped_pages);
|
||||
|
||||
if (uvm_va_block_is_hmm(va_block) && !UVM_ID_IS_CPU(dest_id)) {
|
||||
// Do not map pages that are already resident on the CPU. This is in
|
||||
// order to avoid breaking system-wide atomic operations on HMM. HMM's
|
||||
// implementation of system-side atomic operations involves restricting
|
||||
// mappings to one processor (CPU or a GPU) at a time. If we were to
|
||||
// grant a GPU a mapping to system memory, this gets into trouble
|
||||
// because, on the CPU side, Linux can silently upgrade PTE permissions
|
||||
// (move from read-only, to read-write, without any MMU notifiers
|
||||
// firing), thus breaking the model by allowing simultaneous read-write
|
||||
// access from two separate processors. To avoid that, just don't map
|
||||
// such pages at all, when migrating.
|
||||
uvm_page_mask_andnot(&va_block_context->caller_page_mask,
|
||||
&va_block_context->caller_page_mask,
|
||||
uvm_va_block_resident_mask_get(va_block, UVM_ID_CPU));
|
||||
}
|
||||
|
||||
// Only map those pages that are not mapped anywhere else (likely due
|
||||
// to a first touch or a migration). We pass
|
||||
// UvmEventMapRemoteCauseInvalid since the destination processor of a
|
||||
@ -953,6 +969,7 @@ NV_STATUS uvm_api_migrate(UVM_MIGRATE_PARAMS *params, struct file *filp)
|
||||
.populate_permissions = UVM_POPULATE_PERMISSIONS_INHERIT,
|
||||
.touch = false,
|
||||
.skip_mapped = false,
|
||||
.populate_on_cpu_alloc_failures = false,
|
||||
.user_space_start = ¶ms->userSpaceStart,
|
||||
.user_space_length = ¶ms->userSpaceLength,
|
||||
};
|
||||
|
@ -507,6 +507,22 @@ static NV_STATUS migrate_vma_copy_pages(struct vm_area_struct *vma,
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
void migrate_vma_cleanup_pages(unsigned long *dst, unsigned long npages)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
struct page *dst_page = migrate_pfn_to_page(dst[i]);
|
||||
|
||||
if (!dst_page)
|
||||
continue;
|
||||
|
||||
unlock_page(dst_page);
|
||||
__free_page(dst_page);
|
||||
dst[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void uvm_migrate_vma_alloc_and_copy(struct migrate_vma *args, migrate_vma_state_t *state)
|
||||
{
|
||||
struct vm_area_struct *vma = args->vma;
|
||||
@ -531,6 +547,10 @@ void uvm_migrate_vma_alloc_and_copy(struct migrate_vma *args, migrate_vma_state_
|
||||
|
||||
if (state->status == NV_OK)
|
||||
state->status = tracker_status;
|
||||
|
||||
// Mark all pages as not migrating if we're failing
|
||||
if (state->status != NV_OK)
|
||||
migrate_vma_cleanup_pages(args->dst, state->num_pages);
|
||||
}
|
||||
|
||||
void uvm_migrate_vma_alloc_and_copy_helper(struct vm_area_struct *vma,
|
||||
@ -802,7 +822,7 @@ static NV_STATUS migrate_pageable_vma_region(struct vm_area_struct *vma,
|
||||
// If the destination is the CPU, signal user-space to retry with a
|
||||
// different node. Otherwise, just try to populate anywhere in the
|
||||
// system
|
||||
if (UVM_ID_IS_CPU(uvm_migrate_args->dst_id)) {
|
||||
if (UVM_ID_IS_CPU(uvm_migrate_args->dst_id) && !uvm_migrate_args->populate_on_cpu_alloc_failures) {
|
||||
*next_addr = start + find_first_bit(state->scratch2_mask, num_pages) * PAGE_SIZE;
|
||||
return NV_ERR_MORE_PROCESSING_REQUIRED;
|
||||
}
|
||||
@ -961,13 +981,10 @@ NV_STATUS uvm_migrate_pageable(uvm_migrate_args_t *uvm_migrate_args)
|
||||
// We only check that dst_node_id is a valid node in the system and it
|
||||
// doesn't correspond to a GPU node. This is fine because
|
||||
// alloc_pages_node will clamp the allocation to
|
||||
// cpuset_current_mems_allowed, and uvm_migrate_pageable is only called
|
||||
// from process context (uvm_migrate) when dst_id is CPU. UVM bottom
|
||||
// half never calls uvm_migrate_pageable when dst_id is CPU. So, assert
|
||||
// that we're in a user thread. However, this would need to change if we
|
||||
// wanted to call this function from a bottom half with CPU dst_id.
|
||||
UVM_ASSERT(!(current->flags & PF_KTHREAD));
|
||||
|
||||
// cpuset_current_mems_allowed when uvm_migrate_pageable is called from
|
||||
// process context (uvm_migrate) when dst_id is CPU. UVM bottom half
|
||||
// calls uvm_migrate_pageable with CPU dst_id only when the VMA memory
|
||||
// policy is set to dst_node_id and dst_node_id is not NUMA_NO_NODE.
|
||||
if (!nv_numa_node_has_memory(dst_node_id) ||
|
||||
uvm_va_space_find_gpu_with_memory_node_id(va_space, dst_node_id) != NULL)
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
|
@ -43,6 +43,7 @@ typedef struct
|
||||
uvm_populate_permissions_t populate_permissions;
|
||||
bool touch : 1;
|
||||
bool skip_mapped : 1;
|
||||
bool populate_on_cpu_alloc_failures : 1;
|
||||
NvU64 *user_space_start;
|
||||
NvU64 *user_space_length;
|
||||
} uvm_migrate_args_t;
|
||||
|
@ -214,7 +214,7 @@ static UvmFaultMetadataPacket *get_fault_buffer_entry_metadata(uvm_parent_gpu_t
|
||||
return fault_entry_metadata + index;
|
||||
}
|
||||
|
||||
void uvm_hal_pascal_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
NV_STATUS uvm_hal_pascal_fault_buffer_parse_replayable_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry)
|
||||
{
|
||||
@ -280,6 +280,8 @@ void uvm_hal_pascal_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
|
||||
// Automatically clear valid bit for the entry in the fault buffer
|
||||
uvm_hal_pascal_fault_buffer_entry_clear_valid(parent_gpu, index);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
bool uvm_hal_pascal_fault_buffer_entry_is_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index)
|
||||
|
@ -1455,7 +1455,18 @@ static uvm_perf_thrashing_hint_t get_hint_for_migration_thrashing(va_space_thras
|
||||
hint.type = UVM_PERF_THRASHING_HINT_TYPE_NONE;
|
||||
|
||||
closest_resident_id = uvm_va_block_page_get_closest_resident(va_block, page_index, requester);
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
// HMM pages always start out resident on the CPU but may not be
|
||||
// recorded in the va_block state because hmm_range_fault() or
|
||||
// similar functions haven't been called to get an accurate snapshot
|
||||
// of the Linux state. We can assume pages are CPU resident for the
|
||||
// purpose of deciding where to migrate to reduce thrashing.
|
||||
if (UVM_ID_IS_INVALID(closest_resident_id))
|
||||
closest_resident_id = UVM_ID_CPU;
|
||||
}
|
||||
else {
|
||||
UVM_ASSERT(UVM_ID_IS_VALID(closest_resident_id));
|
||||
}
|
||||
|
||||
if (thrashing_processors_can_access(va_space, page_thrashing, preferred_location)) {
|
||||
// The logic in uvm_va_block_select_residency chooses the preferred
|
||||
|
@ -391,11 +391,13 @@ uvm_gpu_address_t uvm_push_inline_data_end(uvm_push_inline_data_t *data)
|
||||
inline_data_address = (NvU64) (uintptr_t)(push->next + 1);
|
||||
}
|
||||
else {
|
||||
uvm_pushbuffer_t *pushbuffer = uvm_channel_get_pushbuffer(channel);
|
||||
|
||||
// Offset of the inlined data within the push.
|
||||
inline_data_address = (push->next - push->begin + 1) * UVM_METHOD_SIZE;
|
||||
|
||||
// Add GPU VA of the push begin
|
||||
inline_data_address += uvm_pushbuffer_get_gpu_va_for_push(channel->pool->manager->pushbuffer, push);
|
||||
inline_data_address += uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push);
|
||||
}
|
||||
|
||||
// This will place a noop right before the inline data that was written.
|
||||
@ -438,10 +440,8 @@ NvU64 *uvm_push_timestamp(uvm_push_t *push)
|
||||
|
||||
if (uvm_channel_is_ce(push->channel))
|
||||
gpu->parent->ce_hal->semaphore_timestamp(push, address.address);
|
||||
else if (uvm_channel_is_sec2(push->channel))
|
||||
gpu->parent->sec2_hal->semaphore_timestamp(push, address.address);
|
||||
else
|
||||
UVM_ASSERT_MSG(0, "Semaphore release timestamp on an unsupported channel.\n");
|
||||
gpu->parent->sec2_hal->semaphore_timestamp(push, address.address);
|
||||
|
||||
return timestamp;
|
||||
}
|
||||
|
@ -64,6 +64,14 @@ typedef enum
|
||||
UVM_PUSH_FLAG_COUNT,
|
||||
} uvm_push_flag_t;
|
||||
|
||||
struct uvm_push_crypto_bundle_struct {
|
||||
// Initialization vector used to decrypt the push
|
||||
UvmCslIv iv;
|
||||
|
||||
// Size of the pushbuffer that is encrypted/decrypted
|
||||
NvU32 push_size;
|
||||
};
|
||||
|
||||
struct uvm_push_struct
|
||||
{
|
||||
// Location of the first method of the push
|
||||
@ -369,11 +377,6 @@ static bool uvm_push_has_space(uvm_push_t *push, NvU32 free_space)
|
||||
NV_STATUS uvm_push_begin_fake(uvm_gpu_t *gpu, uvm_push_t *push);
|
||||
void uvm_push_end_fake(uvm_push_t *push);
|
||||
|
||||
static bool uvm_push_is_fake(uvm_push_t *push)
|
||||
{
|
||||
return !push->channel;
|
||||
}
|
||||
|
||||
// Begin an inline data fragment in the push
|
||||
//
|
||||
// The inline data will be ignored by the GPU, but can be referenced from
|
||||
|
@ -40,10 +40,9 @@
|
||||
|
||||
static NvU32 get_push_begin_size(uvm_channel_t *channel)
|
||||
{
|
||||
if (uvm_channel_is_sec2(channel)) {
|
||||
// SEC2 channels allocate CSL signature buffer at the beginning.
|
||||
if (uvm_channel_is_sec2(channel))
|
||||
return UVM_CONF_COMPUTING_SIGN_BUF_MAX_SIZE + UVM_METHOD_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -51,10 +50,14 @@ static NvU32 get_push_begin_size(uvm_channel_t *channel)
|
||||
// This is the storage required by a semaphore release.
|
||||
static NvU32 get_push_end_min_size(uvm_channel_t *channel)
|
||||
{
|
||||
uvm_gpu_t *gpu = uvm_channel_get_gpu(channel);
|
||||
|
||||
if (uvm_conf_computing_mode_enabled(gpu)) {
|
||||
if (uvm_channel_is_ce(channel)) {
|
||||
if (uvm_channel_is_wlc(channel)) {
|
||||
// Space (in bytes) used by uvm_push_end() on a Secure CE channel.
|
||||
// Note that Secure CE semaphore release pushes two memset and one
|
||||
// Space (in bytes) used by uvm_push_end() on a CE channel when
|
||||
// the Confidential Computing feature is enabled.
|
||||
//
|
||||
// Note that CE semaphore release pushes two memset and one
|
||||
// encryption method on top of the regular release.
|
||||
// Memset size
|
||||
// -------------
|
||||
@ -75,43 +78,44 @@ static NvU32 get_push_end_min_size(uvm_channel_t *channel)
|
||||
//
|
||||
// TOTAL : 144 Bytes
|
||||
|
||||
if (uvm_channel_is_wlc(channel)) {
|
||||
// Same as CE + LCIC GPPut update + LCIC doorbell
|
||||
return 24 + 144 + 24 + 24;
|
||||
}
|
||||
else if (uvm_channel_is_secure_ce(channel)) {
|
||||
|
||||
return 24 + 144;
|
||||
}
|
||||
// Space (in bytes) used by uvm_push_end() on a CE channel.
|
||||
return 24;
|
||||
}
|
||||
else if (uvm_channel_is_sec2(channel)) {
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_sec2(channel));
|
||||
|
||||
// A perfectly aligned inline buffer in SEC2 semaphore release.
|
||||
// We add UVM_METHOD_SIZE because of the NOP method to reserve
|
||||
// UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES (the inline buffer.)
|
||||
return 48 + UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES + UVM_METHOD_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
UVM_ASSERT(uvm_channel_is_ce(channel));
|
||||
|
||||
// Space (in bytes) used by uvm_push_end() on a CE channel.
|
||||
return 24;
|
||||
}
|
||||
|
||||
static NvU32 get_push_end_max_size(uvm_channel_t *channel)
|
||||
{
|
||||
if (uvm_channel_is_ce(channel)) {
|
||||
if (uvm_channel_is_wlc(channel)) {
|
||||
// WLC pushes are always padded to UVM_MAX_WLC_PUSH_SIZE
|
||||
if (uvm_channel_is_wlc(channel))
|
||||
return UVM_MAX_WLC_PUSH_SIZE;
|
||||
}
|
||||
// Space (in bytes) used by uvm_push_end() on a CE channel.
|
||||
return get_push_end_min_size(channel);
|
||||
}
|
||||
else if (uvm_channel_is_sec2(channel)) {
|
||||
|
||||
// Space (in bytes) used by uvm_push_end() on a SEC2 channel.
|
||||
// Note that SEC2 semaphore release uses an inline buffer with alignment
|
||||
// requirements. This is the "worst" case semaphore_release storage.
|
||||
if (uvm_channel_is_sec2(channel))
|
||||
return 48 + UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES + UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
UVM_ASSERT(uvm_channel_is_ce(channel));
|
||||
|
||||
// Space (in bytes) used by uvm_push_end() on a CE channel.
|
||||
return get_push_end_min_size(channel);
|
||||
}
|
||||
|
||||
static NV_STATUS test_push_end_size(uvm_va_space_t *va_space)
|
||||
@ -294,10 +298,19 @@ static NV_STATUS test_concurrent_pushes(uvm_va_space_t *va_space)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
uvm_gpu_t *gpu;
|
||||
NvU32 i;
|
||||
uvm_push_t *pushes;
|
||||
uvm_tracker_t tracker = UVM_TRACKER_INIT();
|
||||
uvm_channel_type_t channel_type = UVM_CHANNEL_TYPE_GPU_INTERNAL;
|
||||
uvm_tracker_t tracker;
|
||||
|
||||
// When the Confidential Computing feature is enabled, a channel reserved at
|
||||
// the start of a push cannot be reserved again until that push ends. The
|
||||
// test is waived, because the number of pushes it starts per pool exceeds
|
||||
// the number of channels in the pool, so it would block indefinitely.
|
||||
gpu = uvm_va_space_find_first_gpu(va_space);
|
||||
|
||||
if ((gpu != NULL) && uvm_conf_computing_mode_enabled(gpu))
|
||||
return NV_OK;
|
||||
|
||||
uvm_tracker_init(&tracker);
|
||||
|
||||
// As noted above, this test does unsafe things that would be detected by
|
||||
// lock tracking, opt-out.
|
||||
@ -310,16 +323,11 @@ static NV_STATUS test_concurrent_pushes(uvm_va_space_t *va_space)
|
||||
}
|
||||
|
||||
for_each_va_space_gpu(gpu, va_space) {
|
||||
NvU32 i;
|
||||
|
||||
// A secure channels reserved at the start of a push cannot be reserved
|
||||
// again until that push ends. The test would block indefinitely
|
||||
// if secure pools are not skipped, because the number of pushes started
|
||||
// per pool exceeds the number of channels in the pool.
|
||||
if (uvm_channel_type_requires_secure_pool(gpu, channel_type))
|
||||
goto done;
|
||||
for (i = 0; i < UVM_PUSH_MAX_CONCURRENT_PUSHES; ++i) {
|
||||
uvm_push_t *push = &pushes[i];
|
||||
status = uvm_push_begin(gpu->channel_manager, channel_type, push, "concurrent push %u", i);
|
||||
status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, push, "concurrent push %u", i);
|
||||
TEST_CHECK_GOTO(status == NV_OK, done);
|
||||
}
|
||||
for (i = 0; i < UVM_PUSH_MAX_CONCURRENT_PUSHES; ++i) {
|
||||
@ -776,15 +784,6 @@ static NV_STATUS test_timestamp_on_gpu(uvm_gpu_t *gpu)
|
||||
NvU32 i;
|
||||
NvU64 last_stamp = 0;
|
||||
|
||||
// TODO: Bug 3988992: [UVM][HCC] RFE - Support encrypted semaphore for secure CE channels
|
||||
// This test is waived when Confidential Computing is enabled because it
|
||||
// assumes that CPU can directly read the result of a semaphore timestamp
|
||||
// operation. Instead the operation needs to be follower up by an encrypt
|
||||
// -decrypt trip to be accessible to CPU. This will be cleaner and simpler
|
||||
// once encrypted semaphores are available.
|
||||
if (uvm_conf_computing_mode_enabled(gpu))
|
||||
return NV_OK;
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "Releasing a timestamp");
|
||||
if (status != NV_OK)
|
||||
|
@ -449,21 +449,68 @@ static uvm_pushbuffer_chunk_t *gpfifo_to_chunk(uvm_pushbuffer_t *pushbuffer, uvm
|
||||
return chunk;
|
||||
}
|
||||
|
||||
void uvm_pushbuffer_mark_completed(uvm_pushbuffer_t *pushbuffer, uvm_gpfifo_entry_t *gpfifo)
|
||||
static void decrypt_push(uvm_channel_t *channel, uvm_gpfifo_entry_t *gpfifo)
|
||||
{
|
||||
NV_STATUS status;
|
||||
NvU32 auth_tag_offset;
|
||||
void *auth_tag_cpu_va;
|
||||
void *push_protected_cpu_va;
|
||||
void *push_unprotected_cpu_va;
|
||||
NvU32 pushbuffer_offset = gpfifo->pushbuffer_offset;
|
||||
NvU32 push_info_index = gpfifo->push_info - channel->push_infos;
|
||||
uvm_pushbuffer_t *pushbuffer = uvm_channel_get_pushbuffer(channel);
|
||||
uvm_push_crypto_bundle_t *crypto_bundle = channel->conf_computing.push_crypto_bundles + push_info_index;
|
||||
|
||||
if (channel->conf_computing.push_crypto_bundles == NULL)
|
||||
return;
|
||||
|
||||
// When the crypto bundle is used, the push size cannot be zero
|
||||
if (crypto_bundle->push_size == 0)
|
||||
return;
|
||||
|
||||
UVM_ASSERT(!uvm_channel_is_wlc(channel));
|
||||
UVM_ASSERT(!uvm_channel_is_lcic(channel));
|
||||
|
||||
push_protected_cpu_va = (char *)get_base_cpu_va(pushbuffer) + pushbuffer_offset;
|
||||
push_unprotected_cpu_va = (char *)uvm_rm_mem_get_cpu_va(pushbuffer->memory_unprotected_sysmem) + pushbuffer_offset;
|
||||
auth_tag_offset = push_info_index * UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
|
||||
auth_tag_cpu_va = (char *)uvm_rm_mem_get_cpu_va(channel->conf_computing.push_crypto_bundle_auth_tags) +
|
||||
auth_tag_offset;
|
||||
|
||||
status = uvm_conf_computing_cpu_decrypt(channel,
|
||||
push_protected_cpu_va,
|
||||
push_unprotected_cpu_va,
|
||||
&crypto_bundle->iv,
|
||||
crypto_bundle->push_size,
|
||||
auth_tag_cpu_va);
|
||||
|
||||
// A decryption failure here is not fatal because it does not
|
||||
// prevent UVM from running fine in the future and cannot be used
|
||||
// maliciously to leak information or otherwise derail UVM from its
|
||||
// regular duties.
|
||||
UVM_ASSERT_MSG_RELEASE(status == NV_OK, "Pushbuffer decryption failure: %s\n", nvstatusToString(status));
|
||||
|
||||
// Avoid reusing the bundle across multiple pushes
|
||||
crypto_bundle->push_size = 0;
|
||||
}
|
||||
|
||||
void uvm_pushbuffer_mark_completed(uvm_channel_t *channel, uvm_gpfifo_entry_t *gpfifo)
|
||||
{
|
||||
uvm_pushbuffer_chunk_t *chunk;
|
||||
uvm_push_info_t *push_info = gpfifo->push_info;
|
||||
bool need_to_update_chunk = false;
|
||||
uvm_push_info_t *push_info = gpfifo->push_info;
|
||||
uvm_pushbuffer_t *pushbuffer = uvm_channel_get_pushbuffer(channel);
|
||||
|
||||
UVM_ASSERT(gpfifo->type == UVM_GPFIFO_ENTRY_TYPE_NORMAL);
|
||||
|
||||
chunk = gpfifo_to_chunk(pushbuffer, gpfifo);
|
||||
|
||||
if (push_info->on_complete != NULL)
|
||||
if (push_info->on_complete != NULL) {
|
||||
decrypt_push(channel, gpfifo);
|
||||
push_info->on_complete(push_info->on_complete_data);
|
||||
|
||||
push_info->on_complete = NULL;
|
||||
push_info->on_complete_data = NULL;
|
||||
}
|
||||
|
||||
uvm_spin_lock(&pushbuffer->lock);
|
||||
|
||||
|
@ -161,22 +161,22 @@
|
||||
// * WFI: 8B
|
||||
// Total: 64B
|
||||
//
|
||||
// Push space needed for secure work launch is 224B. The push is constructed
|
||||
// Push space needed for secure work launch is 364B. The push is constructed
|
||||
// in 'internal_channel_submit_work_indirect' and 'uvm_channel_end_push'
|
||||
// * CE decrypt (of indirect PB): 56B
|
||||
// * 2*semaphore release (indirect GPFIFO entry): 2*24B
|
||||
// * memset_8 (indirect GPFIFO entry): 44B
|
||||
// * semaphore release (indirect GPPUT): 24B
|
||||
// * semaphore release (indirect doorbell): 24B
|
||||
// Appendix added in 'uvm_channel_end_push':
|
||||
// * semaphore release (WLC tracking): 168B
|
||||
// * semaphore increment (memcopy): 24B
|
||||
// * semaphore release (payload): 24B
|
||||
// * notifier memset: 40B
|
||||
// * payload encryption: 64B
|
||||
// * notifier memset: 40B
|
||||
// * semaphore increment (LCIC GPPUT): 24B
|
||||
// * semaphore release (LCIC doorbell): 24B
|
||||
// Total: 368B
|
||||
#define UVM_MAX_WLC_PUSH_SIZE (368)
|
||||
// Total: 364B
|
||||
#define UVM_MAX_WLC_PUSH_SIZE (364)
|
||||
|
||||
// Push space needed for static LCIC schedule, as initialized in
|
||||
// 'setup_lcic_schedule':
|
||||
@ -184,7 +184,7 @@
|
||||
// * semaphore increment (WLC GPPUT): 24B
|
||||
// * semaphore increment (WLC GPPUT): 24B
|
||||
// * semaphore increment (LCIC tracking): 160B
|
||||
// * semaphore increment (memcopy): 24B
|
||||
// * semaphore increment (payload): 24B
|
||||
// * notifier memcopy: 36B
|
||||
// * payload encryption: 64B
|
||||
// * notifier memcopy: 36B
|
||||
@ -258,7 +258,7 @@ NV_STATUS uvm_pushbuffer_begin_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *pu
|
||||
|
||||
// Complete a pending push
|
||||
// Updates the chunk state the pending push used
|
||||
void uvm_pushbuffer_mark_completed(uvm_pushbuffer_t *pushbuffer, uvm_gpfifo_entry_t *gpfifo);
|
||||
void uvm_pushbuffer_mark_completed(uvm_channel_t *channel, uvm_gpfifo_entry_t *gpfifo);
|
||||
|
||||
// Get the GPU VA for an ongoing push
|
||||
NvU64 uvm_pushbuffer_get_gpu_va_for_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push);
|
||||
|
@ -213,6 +213,7 @@ done:
|
||||
typedef enum
|
||||
{
|
||||
MEM_ALLOC_TYPE_SYSMEM_DMA,
|
||||
MEM_ALLOC_TYPE_SYSMEM_PROTECTED,
|
||||
MEM_ALLOC_TYPE_VIDMEM_PROTECTED
|
||||
} mem_alloc_type_t;
|
||||
|
||||
@ -269,14 +270,20 @@ static NV_STATUS alloc_and_init_mem(uvm_gpu_t *gpu, uvm_mem_t **mem, size_t size
|
||||
*mem = NULL;
|
||||
|
||||
if (type == MEM_ALLOC_TYPE_VIDMEM_PROTECTED) {
|
||||
TEST_NV_CHECK_RET(uvm_mem_alloc_vidmem_protected(size, gpu, mem));
|
||||
TEST_NV_CHECK_RET(uvm_mem_alloc_vidmem(size, gpu, mem));
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(*mem, gpu), err);
|
||||
TEST_NV_CHECK_GOTO(ce_memset_gpu(gpu, *mem, size, 0xdead), err);
|
||||
}
|
||||
else {
|
||||
if (type == MEM_ALLOC_TYPE_SYSMEM_DMA) {
|
||||
TEST_NV_CHECK_RET(uvm_mem_alloc_sysmem_dma(size, gpu, NULL, mem));
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_map_cpu_kernel(*mem), err);
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(*mem, gpu), err);
|
||||
}
|
||||
else {
|
||||
TEST_NV_CHECK_RET(uvm_mem_alloc_sysmem(size, NULL, mem));
|
||||
}
|
||||
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_map_cpu_kernel(*mem), err);
|
||||
write_range_cpu(*mem, size, 0xdeaddead);
|
||||
}
|
||||
|
||||
@ -341,9 +348,9 @@ static NV_STATUS cpu_decrypt(uvm_channel_t *channel,
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
// gpu_encrypt uses a secure CE for encryption (instead of SEC2). SEC2 does not
|
||||
// support encryption. The following function is copied from uvm_ce_test.c and
|
||||
// adapted to SEC2 tests.
|
||||
// gpu_encrypt uses the Copy Engine for encryption, instead of SEC2. SEC2 does
|
||||
// not support encryption. The following function is copied from uvm_ce_test.c
|
||||
// and adapted to SEC2 tests.
|
||||
static void gpu_encrypt(uvm_push_t *push,
|
||||
uvm_mem_t *dst_mem,
|
||||
uvm_mem_t *src_mem,
|
||||
@ -405,48 +412,6 @@ static void gpu_decrypt(uvm_push_t *push,
|
||||
}
|
||||
}
|
||||
|
||||
// This test only uses sysmem so that we can use the CPU for encryption and SEC2
|
||||
// for decryption, i.e., the test doesn't depend on any other GPU engine for
|
||||
// the encryption operation (refer to test_cpu_to_gpu_roundtrip()). This is not
|
||||
// how SEC2 is used in the driver. The intended SEC2 usage is to decrypt from
|
||||
// unprotected sysmem to protected vidmem, which is tested in
|
||||
// test_cpu_to_gpu_roundtrip().
|
||||
static NV_STATUS test_cpu_to_gpu_sysmem(uvm_gpu_t *gpu, size_t copy_size, size_t size)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
uvm_mem_t *src_plain = NULL;
|
||||
uvm_mem_t *cipher = NULL;
|
||||
uvm_mem_t *dst_plain = NULL;
|
||||
uvm_mem_t *auth_tag_mem = NULL;
|
||||
size_t auth_tag_buffer_size = (size / copy_size) * UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
|
||||
uvm_push_t push;
|
||||
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &src_plain, size, MEM_ALLOC_TYPE_SYSMEM_DMA), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &dst_plain, size, MEM_ALLOC_TYPE_SYSMEM_DMA), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &cipher, size, MEM_ALLOC_TYPE_SYSMEM_DMA), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &auth_tag_mem, auth_tag_buffer_size, MEM_ALLOC_TYPE_SYSMEM_DMA), out);
|
||||
|
||||
write_range_cpu(src_plain, size, uvm_get_stale_thread_id());
|
||||
write_range_cpu(dst_plain, size, 0xA5A5A5A5);
|
||||
|
||||
TEST_NV_CHECK_GOTO(uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_SEC2, &push, "enc(cpu)_dec(gpu)"), out);
|
||||
|
||||
cpu_encrypt(push.channel, cipher, src_plain, auth_tag_mem, size, copy_size);
|
||||
gpu_decrypt(&push, dst_plain, cipher, auth_tag_mem, size, copy_size);
|
||||
|
||||
uvm_push_end_and_wait(&push);
|
||||
|
||||
TEST_CHECK_GOTO(mem_match(src_plain, dst_plain), out);
|
||||
|
||||
out:
|
||||
uvm_mem_free(auth_tag_mem);
|
||||
uvm_mem_free(cipher);
|
||||
uvm_mem_free(dst_plain);
|
||||
uvm_mem_free(src_plain);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
// This test depends on the CE for the encryption, so we assume tests from
|
||||
// uvm_ce_test.c have successfully passed.
|
||||
static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu, size_t copy_size, size_t size)
|
||||
@ -461,19 +426,16 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu, size_t copy_size, siz
|
||||
size_t auth_tag_buffer_size = (size / copy_size) * UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
|
||||
uvm_push_t push;
|
||||
UvmCslIv *decrypt_iv;
|
||||
uvm_tracker_t tracker;
|
||||
|
||||
decrypt_iv = uvm_kvmalloc_zero((size / copy_size) * sizeof(UvmCslIv));
|
||||
if (!decrypt_iv)
|
||||
return NV_ERR_NO_MEMORY;
|
||||
|
||||
uvm_tracker_init(&tracker);
|
||||
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &src_plain, size, MEM_ALLOC_TYPE_SYSMEM_DMA), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &src_plain, size, MEM_ALLOC_TYPE_SYSMEM_PROTECTED), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &src_cipher, size, MEM_ALLOC_TYPE_SYSMEM_DMA), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &dst_cipher, size, MEM_ALLOC_TYPE_SYSMEM_DMA), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &dst_plain, size, MEM_ALLOC_TYPE_VIDMEM_PROTECTED), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &dst_plain_cpu, size, MEM_ALLOC_TYPE_SYSMEM_DMA), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &dst_plain_cpu, size, MEM_ALLOC_TYPE_SYSMEM_PROTECTED), out);
|
||||
TEST_NV_CHECK_GOTO(alloc_and_init_mem(gpu, &auth_tag_mem, auth_tag_buffer_size, MEM_ALLOC_TYPE_SYSMEM_DMA), out);
|
||||
|
||||
write_range_cpu(src_plain, size, uvm_get_stale_thread_id());
|
||||
@ -483,14 +445,12 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu, size_t copy_size, siz
|
||||
cpu_encrypt(push.channel, src_cipher, src_plain, auth_tag_mem, size, copy_size);
|
||||
gpu_decrypt(&push, dst_plain, src_cipher, auth_tag_mem, size, copy_size);
|
||||
|
||||
uvm_push_end(&push);
|
||||
TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), out);
|
||||
// Wait for SEC2 before launching the CE part.
|
||||
// SEC2 is only allowed to release semaphores in unprotected sysmem,
|
||||
// and CE can only acquire semaphores in protected vidmem.
|
||||
TEST_NV_CHECK_GOTO(uvm_push_end_and_wait(&push), out);
|
||||
|
||||
TEST_NV_CHECK_GOTO(uvm_push_begin_acquire(gpu->channel_manager,
|
||||
UVM_CHANNEL_TYPE_GPU_TO_CPU,
|
||||
&tracker,
|
||||
&push,
|
||||
"enc(gpu)_dec(cpu)"),
|
||||
TEST_NV_CHECK_GOTO(uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_TO_CPU, &push, "enc(gpu)_dec(cpu)"),
|
||||
out);
|
||||
|
||||
gpu_encrypt(&push, dst_cipher, dst_plain, decrypt_iv, auth_tag_mem, size, copy_size);
|
||||
@ -521,8 +481,6 @@ out:
|
||||
|
||||
uvm_kvfree(decrypt_iv);
|
||||
|
||||
uvm_tracker_deinit(&tracker);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -545,7 +503,6 @@ static NV_STATUS test_encryption_decryption(uvm_gpu_t *gpu)
|
||||
|
||||
UVM_ASSERT(size % copy_sizes[i] == 0);
|
||||
|
||||
TEST_NV_CHECK_RET(test_cpu_to_gpu_sysmem(gpu, copy_sizes[i], size));
|
||||
TEST_NV_CHECK_RET(test_cpu_to_gpu_roundtrip(gpu, copy_sizes[i], size));
|
||||
}
|
||||
|
||||
|
@ -229,6 +229,24 @@ static void unmap_user_pages(struct page **pages, void *addr, NvU64 size)
|
||||
uvm_kvfree(pages);
|
||||
}
|
||||
|
||||
// This must be called with the mmap_lock held in read mode or better.
|
||||
static NV_STATUS check_vmas(struct mm_struct *mm, NvU64 start_va, NvU64 size)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
NvU64 addr = start_va;
|
||||
NvU64 region_end = start_va + size;
|
||||
|
||||
do {
|
||||
vma = find_vma(mm, addr);
|
||||
if (!vma || !(addr >= vma->vm_start) || uvm_file_is_nvidia_uvm(vma->vm_file))
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
|
||||
addr = vma->vm_end;
|
||||
} while (addr < region_end);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
// Map virtual memory of data from [user_va, user_va + size) of current process into kernel.
|
||||
// Sets *addr to kernel mapping and *pages to the array of struct pages that contain the memory.
|
||||
static NV_STATUS map_user_pages(NvU64 user_va, NvU64 size, void **addr, struct page ***pages)
|
||||
@ -237,7 +255,6 @@ static NV_STATUS map_user_pages(NvU64 user_va, NvU64 size, void **addr, struct p
|
||||
long ret = 0;
|
||||
long num_pages;
|
||||
long i;
|
||||
struct vm_area_struct **vmas = NULL;
|
||||
|
||||
*addr = NULL;
|
||||
*pages = NULL;
|
||||
@ -254,22 +271,30 @@ static NV_STATUS map_user_pages(NvU64 user_va, NvU64 size, void **addr, struct p
|
||||
goto fail;
|
||||
}
|
||||
|
||||
vmas = uvm_kvmalloc(sizeof(struct vm_area_struct *) * num_pages);
|
||||
if (vmas == NULL) {
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
// Although uvm_down_read_mmap_lock() is preferable due to its participation
|
||||
// in the UVM lock dependency tracker, it cannot be used here. That's
|
||||
// because pin_user_pages() may fault in HMM pages which are GPU-resident.
|
||||
// When that happens, the UVM page fault handler would record another
|
||||
// mmap_read_lock() on the same thread as this one, leading to a false
|
||||
// positive lock dependency report.
|
||||
//
|
||||
// Therefore, use the lower level nv_mmap_read_lock() here.
|
||||
nv_mmap_read_lock(current->mm);
|
||||
status = check_vmas(current->mm, user_va, size);
|
||||
if (status != NV_OK) {
|
||||
nv_mmap_read_unlock(current->mm);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
nv_mmap_read_lock(current->mm);
|
||||
ret = NV_PIN_USER_PAGES(user_va, num_pages, FOLL_WRITE, *pages, vmas);
|
||||
ret = NV_PIN_USER_PAGES(user_va, num_pages, FOLL_WRITE, *pages, NULL);
|
||||
nv_mmap_read_unlock(current->mm);
|
||||
|
||||
if (ret != num_pages) {
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
if (page_count((*pages)[i]) > MAX_PAGE_COUNT || uvm_file_is_nvidia_uvm(vmas[i]->vm_file)) {
|
||||
if (page_count((*pages)[i]) > MAX_PAGE_COUNT) {
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto fail;
|
||||
}
|
||||
@ -279,15 +304,12 @@ static NV_STATUS map_user_pages(NvU64 user_va, NvU64 size, void **addr, struct p
|
||||
if (*addr == NULL)
|
||||
goto fail;
|
||||
|
||||
uvm_kvfree(vmas);
|
||||
return NV_OK;
|
||||
|
||||
fail:
|
||||
if (*pages == NULL)
|
||||
return status;
|
||||
|
||||
uvm_kvfree(vmas);
|
||||
|
||||
if (ret > 0)
|
||||
uvm_put_user_pages_dirty(*pages, ret);
|
||||
else if (ret < 0)
|
||||
|
@ -69,6 +69,14 @@ static NV_STATUS test_tracker_completion(uvm_va_space_t *va_space)
|
||||
gpu = uvm_va_space_find_first_gpu(va_space);
|
||||
TEST_CHECK_RET(gpu != NULL);
|
||||
|
||||
// TODO: Bug 4008734: [UVM][HCC] Extend secure tracking semaphore mechanism
|
||||
// to all semaphore
|
||||
// This test allocates semaphore in vidmem and then releases it from the CPU
|
||||
// SEC2 channels cannot target semaphores in vidmem. Moreover, CPU cannot
|
||||
// directly release values to vidmem for CE channels.
|
||||
if (uvm_conf_computing_mode_enabled(gpu))
|
||||
return NV_OK;
|
||||
|
||||
TEST_NV_CHECK_RET(uvm_gpu_semaphore_alloc(gpu->semaphore_pool, &sema));
|
||||
|
||||
uvm_tracker_init(&tracker);
|
||||
|
@ -2083,12 +2083,6 @@ static uvm_processor_id_t block_page_get_closest_resident_in_mask(uvm_va_block_t
|
||||
return id;
|
||||
}
|
||||
|
||||
// HMM va_blocks don't know if a page is CPU resident until either
|
||||
// migrate_vma_setup() or hmm_range_fault() is called. If a page isn't
|
||||
// resident anywhere, assume it is CPU resident.
|
||||
if (uvm_va_block_is_hmm(va_block))
|
||||
return UVM_ID_CPU;
|
||||
|
||||
return UVM_ID_INVALID;
|
||||
}
|
||||
|
||||
@ -2888,7 +2882,7 @@ static uvm_va_block_region_t block_phys_contig_region(uvm_va_block_t *block,
|
||||
{
|
||||
if (UVM_ID_IS_CPU(resident_id)) {
|
||||
uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index);
|
||||
return uvm_va_block_region(page_index, page_index + uvm_cpu_chunk_num_pages(chunk));
|
||||
return uvm_cpu_chunk_block_region(block, chunk, page_index);
|
||||
}
|
||||
else {
|
||||
uvm_chunk_size_t chunk_size;
|
||||
@ -3061,7 +3055,7 @@ static NV_STATUS conf_computing_copy_pages_finish(uvm_va_block_t *block,
|
||||
void *auth_tag_buffer_base = uvm_mem_get_cpu_addr_kernel(dma_buffer->auth_tag);
|
||||
void *staging_buffer_base = uvm_mem_get_cpu_addr_kernel(dma_buffer->alloc);
|
||||
|
||||
UVM_ASSERT(uvm_channel_is_secure(push->channel));
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled(push->gpu));
|
||||
|
||||
if (UVM_ID_IS_GPU(copy_state->dst.id))
|
||||
return NV_OK;
|
||||
@ -3112,7 +3106,7 @@ static void block_copy_push(uvm_va_block_t *block,
|
||||
|
||||
uvm_push_set_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
|
||||
|
||||
if (uvm_channel_is_secure(push->channel)) {
|
||||
if (uvm_conf_computing_mode_enabled(gpu)) {
|
||||
if (UVM_ID_IS_CPU(copy_state->src.id))
|
||||
conf_computing_block_copy_push_cpu_to_gpu(block, copy_state, region, push);
|
||||
else
|
||||
@ -3140,19 +3134,18 @@ static NV_STATUS block_copy_end_push(uvm_va_block_t *block,
|
||||
// at that point.
|
||||
uvm_push_end(push);
|
||||
|
||||
if ((push_status == NV_OK) && uvm_channel_is_secure(push->channel))
|
||||
if ((push_status == NV_OK) && uvm_conf_computing_mode_enabled(push->gpu))
|
||||
push_status = conf_computing_copy_pages_finish(block, copy_state, push);
|
||||
|
||||
tracker_status = uvm_tracker_add_push_safe(copy_tracker, push);
|
||||
if (push_status == NV_OK)
|
||||
push_status = tracker_status;
|
||||
|
||||
if (uvm_channel_is_secure(push->channel)) {
|
||||
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
||||
if (uvm_conf_computing_mode_enabled(push->gpu)) {
|
||||
uvm_tracker_t local_tracker = UVM_TRACKER_INIT();
|
||||
|
||||
uvm_tracker_overwrite_with_push(&local_tracker, push);
|
||||
uvm_conf_computing_dma_buffer_free(&gpu->conf_computing.dma_buffer_pool,
|
||||
uvm_conf_computing_dma_buffer_free(&push->gpu->conf_computing.dma_buffer_pool,
|
||||
copy_state->dma_buffer,
|
||||
&local_tracker);
|
||||
copy_state->dma_buffer = NULL;
|
||||
@ -7189,6 +7182,7 @@ static NV_STATUS block_map_gpu_to(uvm_va_block_t *va_block,
|
||||
}
|
||||
|
||||
static void map_get_allowed_destinations(uvm_va_block_t *block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
const uvm_va_policy_t *policy,
|
||||
uvm_processor_id_t id,
|
||||
uvm_processor_mask_t *allowed_mask)
|
||||
@ -7200,7 +7194,10 @@ static void map_get_allowed_destinations(uvm_va_block_t *block,
|
||||
uvm_processor_mask_zero(allowed_mask);
|
||||
uvm_processor_mask_set(allowed_mask, policy->preferred_location);
|
||||
}
|
||||
else if ((uvm_va_policy_is_read_duplicate(policy, va_space) || uvm_id_equal(policy->preferred_location, id)) &&
|
||||
else if ((uvm_va_policy_is_read_duplicate(policy, va_space) ||
|
||||
(uvm_id_equal(policy->preferred_location, id) &&
|
||||
!is_uvm_fault_force_sysmem_set() &&
|
||||
!uvm_hmm_must_use_sysmem(block, va_block_context))) &&
|
||||
uvm_va_space_processor_has_memory(va_space, id)) {
|
||||
// When operating under read-duplication we should only map the local
|
||||
// processor to cause fault-and-duplicate of remote pages.
|
||||
@ -7285,7 +7282,7 @@ NV_STATUS uvm_va_block_map(uvm_va_block_t *va_block,
|
||||
|
||||
// Map per resident location so we can more easily detect physically-
|
||||
// contiguous mappings.
|
||||
map_get_allowed_destinations(va_block, va_block_context->policy, id, &allowed_destinations);
|
||||
map_get_allowed_destinations(va_block, va_block_context, va_block_context->policy, id, &allowed_destinations);
|
||||
|
||||
for_each_closest_id(resident_id, &allowed_destinations, id, va_space) {
|
||||
if (UVM_ID_IS_CPU(id)) {
|
||||
@ -9614,16 +9611,10 @@ static uvm_prot_t compute_new_permission(uvm_va_block_t *va_block,
|
||||
if (uvm_processor_mask_empty(&revoke_processors))
|
||||
new_prot = UVM_PROT_READ_WRITE;
|
||||
}
|
||||
if (logical_prot == UVM_PROT_READ_WRITE_ATOMIC) {
|
||||
// HMM allocations with logical read/write/atomic permission can be
|
||||
// upgraded without notifying the driver so assume read/write/atomic
|
||||
// even if the fault is only for reading.
|
||||
if (new_prot == UVM_PROT_READ_WRITE ||
|
||||
(UVM_ID_IS_CPU(fault_processor_id) && uvm_va_block_is_hmm(va_block))) {
|
||||
if (logical_prot == UVM_PROT_READ_WRITE_ATOMIC && new_prot == UVM_PROT_READ_WRITE) {
|
||||
if (uvm_processor_mask_test(&va_space->has_native_atomics[uvm_id_value(new_residency)], fault_processor_id))
|
||||
new_prot = UVM_PROT_READ_WRITE_ATOMIC;
|
||||
}
|
||||
}
|
||||
|
||||
return new_prot;
|
||||
}
|
||||
@ -9859,8 +9850,6 @@ out:
|
||||
return status == NV_OK ? tracker_status : status;
|
||||
}
|
||||
|
||||
// TODO: Bug 1750144: check logical permissions from HMM to know what's the
|
||||
// maximum allowed.
|
||||
uvm_prot_t uvm_va_block_page_compute_highest_permission(uvm_va_block_t *va_block,
|
||||
uvm_processor_id_t processor_id,
|
||||
uvm_page_index_t page_index)
|
||||
@ -9937,14 +9926,18 @@ uvm_prot_t uvm_va_block_page_compute_highest_permission(uvm_va_block_t *va_block
|
||||
// Exclude the processor for which the mapping protections are being computed
|
||||
uvm_processor_mask_clear(&write_mappings, processor_id);
|
||||
|
||||
// At this point, any processor with atomic mappings either has native atomics support to the
|
||||
// processor with the resident copy or has disabled system-wide atomics. If the requesting
|
||||
// processor has disabled system-wide atomics or has native atomics to that processor, we can
|
||||
// map with ATOMIC privileges. Likewise, if there are no other processors with WRITE or ATOMIC
|
||||
// mappings, we can map with ATOMIC privileges.
|
||||
// At this point, any processor with atomic mappings either has native
|
||||
// atomics support to the processor with the resident copy or has
|
||||
// disabled system-wide atomics. If the requesting processor has
|
||||
// disabled system-wide atomics or has native atomics to that processor,
|
||||
// we can map with ATOMIC privileges. Likewise, if there are no other
|
||||
// processors with WRITE or ATOMIC mappings, we can map with ATOMIC
|
||||
// privileges. For HMM, don't allow GPU atomic access to remote mapped
|
||||
// system memory even if there are no write mappings since CPU access
|
||||
// can be upgraded without notification.
|
||||
if (!uvm_processor_mask_test(&va_space->system_wide_atomics_enabled_processors, processor_id) ||
|
||||
uvm_processor_mask_test(&va_space->has_native_atomics[uvm_id_value(residency)], processor_id) ||
|
||||
uvm_processor_mask_empty(&write_mappings)) {
|
||||
(uvm_processor_mask_empty(&write_mappings) && !uvm_va_block_is_hmm(va_block))) {
|
||||
return UVM_PROT_READ_WRITE_ATOMIC;
|
||||
}
|
||||
|
||||
|
@ -418,15 +418,6 @@ void uvm_va_space_destroy(uvm_va_space_t *va_space)
|
||||
uvm_global_processor_mask_t retained_gpus;
|
||||
LIST_HEAD(deferred_free_list);
|
||||
|
||||
// Normally we'd expect this to happen as part of uvm_mm_release()
|
||||
// but if userspace never initialized uvm_mm_fd that won't happen.
|
||||
// We don't have to take the va_space_mm spinlock and update state
|
||||
// here because we know no other thread can be in or subsequently
|
||||
// call uvm_api_mm_initialize successfully because the UVM
|
||||
// file-descriptor has been released.
|
||||
if (va_space->va_space_mm.state == UVM_VA_SPACE_MM_STATE_UNINITIALIZED)
|
||||
uvm_va_space_mm_unregister(va_space);
|
||||
|
||||
// Remove the VA space from the global list before we start tearing things
|
||||
// down so other threads can't see the VA space in a partially-valid state.
|
||||
uvm_mutex_lock(&g_uvm_global.va_spaces.lock);
|
||||
@ -532,7 +523,14 @@ void uvm_va_space_destroy(uvm_va_space_t *va_space)
|
||||
|
||||
uvm_deferred_free_object_list(&deferred_free_list);
|
||||
|
||||
// MM FD teardown should already have destroyed va_space_mm
|
||||
// Normally we'd expect this to happen as part of uvm_mm_release()
|
||||
// but if userspace never initialized uvm_mm_fd that won't happen.
|
||||
// We don't have to take the va_space_mm spinlock and update state
|
||||
// here because we know no other thread can be in or subsequently
|
||||
// call uvm_api_mm_initialize successfully because the UVM
|
||||
// file-descriptor has been released.
|
||||
if (va_space->va_space_mm.state == UVM_VA_SPACE_MM_STATE_UNINITIALIZED)
|
||||
uvm_va_space_mm_unregister(va_space);
|
||||
UVM_ASSERT(!uvm_va_space_mm_alive(&va_space->va_space_mm));
|
||||
|
||||
uvm_mutex_lock(&g_uvm_global.global_lock);
|
||||
|
@ -25,7 +25,8 @@
|
||||
#include "uvm_global.h"
|
||||
#include "uvm_gpu.h"
|
||||
#include "uvm_hal.h"
|
||||
#include "uvm_push.h"
|
||||
#include "uvm_conf_computing.h"
|
||||
#include "nv_uvm_types.h"
|
||||
#include "hwref/volta/gv100/dev_fault.h"
|
||||
#include "hwref/volta/gv100/dev_fb.h"
|
||||
#include "clc369.h"
|
||||
@ -246,6 +247,20 @@ static NvU32 *get_fault_buffer_entry(uvm_parent_gpu_t *parent_gpu, NvU32 index)
|
||||
return fault_entry;
|
||||
}
|
||||
|
||||
// See uvm_pascal_fault_buffer.c::get_fault_buffer_entry_metadata
|
||||
static UvmFaultMetadataPacket *get_fault_buffer_entry_metadata(uvm_parent_gpu_t *parent_gpu, NvU32 index)
|
||||
{
|
||||
UvmFaultMetadataPacket *fault_entry_metadata;
|
||||
|
||||
UVM_ASSERT(index < parent_gpu->fault_buffer_info.replayable.max_faults);
|
||||
UVM_ASSERT(!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu));
|
||||
|
||||
fault_entry_metadata = parent_gpu->fault_buffer_info.rm_info.replayable.bufferMetadata;
|
||||
UVM_ASSERT(fault_entry_metadata != NULL);
|
||||
|
||||
return fault_entry_metadata + index;
|
||||
}
|
||||
|
||||
static void parse_fault_entry_common(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 *fault_entry,
|
||||
uvm_fault_buffer_entry_t *buffer_entry)
|
||||
@ -323,24 +338,47 @@ static void parse_fault_entry_common(uvm_parent_gpu_t *parent_gpu,
|
||||
UVM_ASSERT_MSG(replayable_fault_enabled, "Fault with REPLAYABLE_FAULT_EN bit unset\n");
|
||||
}
|
||||
|
||||
void uvm_hal_volta_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
NV_STATUS uvm_hal_volta_fault_buffer_parse_replayable_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
NvU32 index,
|
||||
uvm_fault_buffer_entry_t *buffer_entry)
|
||||
{
|
||||
fault_buffer_entry_c369_t entry;
|
||||
NvU32 *fault_entry;
|
||||
BUILD_BUG_ON(NVC369_BUF_SIZE > UVM_GPU_MMU_MAX_FAULT_PACKET_SIZE);
|
||||
|
||||
BUILD_BUG_ON(sizeof(entry) > UVM_GPU_MMU_MAX_FAULT_PACKET_SIZE);
|
||||
|
||||
// Valid bit must be set before this function is called
|
||||
UVM_ASSERT(parent_gpu->fault_buffer_hal->entry_is_valid(parent_gpu, index));
|
||||
|
||||
fault_entry = get_fault_buffer_entry(parent_gpu, index);
|
||||
|
||||
// When Confidential Computing is enabled, faults are encrypted by RM, so
|
||||
// they need to be decrypted before they can be parsed
|
||||
if (!uvm_parent_gpu_replayable_fault_buffer_is_uvm_owned(parent_gpu)) {
|
||||
NV_STATUS status;
|
||||
UvmFaultMetadataPacket *fault_entry_metadata = get_fault_buffer_entry_metadata(parent_gpu, index);
|
||||
|
||||
status = uvm_conf_computing_fault_decrypt(parent_gpu,
|
||||
&entry,
|
||||
fault_entry,
|
||||
fault_entry_metadata->authTag,
|
||||
fault_entry_metadata->valid);
|
||||
if (status != NV_OK) {
|
||||
uvm_global_set_fatal_error(status);
|
||||
return status;
|
||||
}
|
||||
|
||||
fault_entry = (NvU32 *) &entry;
|
||||
}
|
||||
|
||||
parse_fault_entry_common(parent_gpu, fault_entry, buffer_entry);
|
||||
|
||||
UVM_ASSERT(buffer_entry->is_replayable);
|
||||
|
||||
// Automatically clear valid bit for the entry in the fault buffer
|
||||
parent_gpu->fault_buffer_hal->entry_clear_valid(parent_gpu, index);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
void uvm_hal_volta_fault_buffer_parse_non_replayable_entry(uvm_parent_gpu_t *parent_gpu,
|
||||
|
@ -939,6 +939,12 @@ nvswitch_os_get_os_version
|
||||
NvU32 *pBuildNum
|
||||
);
|
||||
|
||||
NvlStatus
|
||||
nvswitch_os_get_pid
|
||||
(
|
||||
NvU32 *pPid
|
||||
);
|
||||
|
||||
void
|
||||
nvswitch_lib_smbpbi_log_sxid
|
||||
(
|
||||
|
211
kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h
Normal file
211
kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h
Normal file
@ -0,0 +1,211 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_AEAD_H
|
||||
#define CRYPTLIB_AEAD_H
|
||||
|
||||
/*=====================================================================================
|
||||
* Authenticated Encryption with Associated data (AEAD) Cryptography Primitives
|
||||
*=====================================================================================
|
||||
*/
|
||||
|
||||
#if LIBSPDM_AEAD_GCM_SUPPORT
|
||||
/**
|
||||
* Performs AEAD AES-GCM authenticated encryption on a data buffer and additional authenticated
|
||||
* data.
|
||||
*
|
||||
* iv_size must be 12, otherwise false is returned.
|
||||
* key_size must be 16 or 32, otherwise false is returned.
|
||||
* tag_size must be 12, 13, 14, 15, 16, otherwise false is returned.
|
||||
*
|
||||
* @param[in] key Pointer to the encryption key.
|
||||
* @param[in] key_size Size of the encryption key in bytes.
|
||||
* @param[in] iv Pointer to the IV value.
|
||||
* @param[in] iv_size Size of the IV value in bytes.
|
||||
* @param[in] a_data Pointer to the additional authenticated data.
|
||||
* @param[in] a_data_size Size of the additional authenticated data in bytes.
|
||||
* @param[in] data_in Pointer to the input data buffer to be encrypted.
|
||||
* @param[in] data_in_size Size of the input data buffer in bytes.
|
||||
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
|
||||
* @param[in] tag_size Size of the authentication tag in bytes.
|
||||
* @param[out] data_out Pointer to a buffer that receives the encryption output.
|
||||
* @param[out] data_out_size Size of the output data buffer in bytes.
|
||||
*
|
||||
* @retval true AEAD AES-GCM authenticated encryption succeeded.
|
||||
* @retval false AEAD AES-GCM authenticated encryption failed.
|
||||
**/
|
||||
extern bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
uint8_t *tag_out, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size);
|
||||
|
||||
/**
|
||||
* Performs AEAD AES-GCM authenticated decryption on a data buffer and additional authenticated
|
||||
* data.
|
||||
*
|
||||
* iv_size must be 12, otherwise false is returned.
|
||||
* key_size must be 16 or 32, otherwise false is returned.
|
||||
* tag_size must be 12, 13, 14, 15, 16, otherwise false is returned.
|
||||
*
|
||||
* If data verification fails, false is returned.
|
||||
*
|
||||
* @param[in] key Pointer to the encryption key.
|
||||
* @param[in] key_size Size of the encryption key in bytes.
|
||||
* @param[in] iv Pointer to the IV value.
|
||||
* @param[in] iv_size Size of the IV value in bytes.
|
||||
* @param[in] a_data Pointer to the additional authenticated data.
|
||||
* @param[in] a_data_size Size of the additional authenticated data in bytes.
|
||||
* @param[in] data_in Pointer to the input data buffer to be decrypted.
|
||||
* @param[in] data_in_size Size of the input data buffer in bytes.
|
||||
* @param[in] tag Pointer to a buffer that contains the authentication tag.
|
||||
* @param[in] tag_size Size of the authentication tag in bytes.
|
||||
* @param[out] data_out Pointer to a buffer that receives the decryption output.
|
||||
* @param[out] data_out_size Size of the output data buffer in bytes.
|
||||
*
|
||||
* @retval true AEAD AES-GCM authenticated decryption succeeded.
|
||||
* @retval false AEAD AES-GCM authenticated decryption failed.
|
||||
**/
|
||||
extern bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
const uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size);
|
||||
#endif /* LIBSPDM_AEAD_GCM_SUPPORT */
|
||||
|
||||
#if LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
|
||||
/**
|
||||
* Performs AEAD ChaCha20Poly1305 authenticated encryption on a data buffer and additional
|
||||
* authenticated data.
|
||||
*
|
||||
* iv_size must be 12, otherwise false is returned.
|
||||
* key_size must be 32, otherwise false is returned.
|
||||
* tag_size must be 16, otherwise false is returned.
|
||||
*
|
||||
* @param[in] key Pointer to the encryption key.
|
||||
* @param[in] key_size Size of the encryption key in bytes.
|
||||
* @param[in] iv Pointer to the IV value.
|
||||
* @param[in] iv_size Size of the IV value in bytes.
|
||||
* @param[in] a_data Pointer to the additional authenticated data.
|
||||
* @param[in] a_data_size Size of the additional authenticated data in bytes.
|
||||
* @param[in] data_in Pointer to the input data buffer to be encrypted.
|
||||
* @param[in] data_in_size Size of the input data buffer in bytes.
|
||||
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
|
||||
* @param[in] tag_size Size of the authentication tag in bytes.
|
||||
* @param[out] data_out Pointer to a buffer that receives the encryption output.
|
||||
* @param[out] data_out_size Size of the output data buffer in bytes.
|
||||
*
|
||||
* @retval true AEAD ChaCha20Poly1305 authenticated encryption succeeded.
|
||||
* @retval false AEAD ChaCha20Poly1305 authenticated encryption failed.
|
||||
**/
|
||||
extern bool libspdm_aead_chacha20_poly1305_encrypt(
|
||||
const uint8_t *key, size_t key_size, const uint8_t *iv,
|
||||
size_t iv_size, const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size, uint8_t *tag_out,
|
||||
size_t tag_size, uint8_t *data_out, size_t *data_out_size);
|
||||
|
||||
/**
|
||||
* Performs AEAD ChaCha20Poly1305 authenticated decryption on a data buffer and additional authenticated data (AAD).
|
||||
*
|
||||
* iv_size must be 12, otherwise false is returned.
|
||||
* key_size must be 32, otherwise false is returned.
|
||||
* tag_size must be 16, otherwise false is returned.
|
||||
*
|
||||
* If data verification fails, false is returned.
|
||||
*
|
||||
* @param[in] key Pointer to the encryption key.
|
||||
* @param[in] key_size Size of the encryption key in bytes.
|
||||
* @param[in] iv Pointer to the IV value.
|
||||
* @param[in] iv_size Size of the IV value in bytes.
|
||||
* @param[in] a_data Pointer to the additional authenticated data.
|
||||
* @param[in] a_data_size Size of the additional authenticated data in bytes.
|
||||
* @param[in] data_in Pointer to the input data buffer to be decrypted.
|
||||
* @param[in] data_in_size Size of the input data buffer in bytes.
|
||||
* @param[in] tag Pointer to a buffer that contains the authentication tag.
|
||||
* @param[in] tag_size Size of the authentication tag in bytes.
|
||||
* @param[out] data_out Pointer to a buffer that receives the decryption output.
|
||||
* @param[out] data_out_size Size of the output data buffer in bytes.
|
||||
*
|
||||
* @retval true AEAD ChaCha20Poly1305 authenticated decryption succeeded.
|
||||
* @retval false AEAD ChaCha20Poly1305 authenticated decryption failed.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_aead_chacha20_poly1305_decrypt(
|
||||
const uint8_t *key, size_t key_size, const uint8_t *iv,
|
||||
size_t iv_size, const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size, const uint8_t *tag,
|
||||
size_t tag_size, uint8_t *data_out, size_t *data_out_size);
|
||||
#endif /* LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT */
|
||||
|
||||
#if LIBSPDM_AEAD_SM4_SUPPORT
|
||||
/**
|
||||
* Performs AEAD SM4-GCM authenticated encryption on a data buffer and additional authenticated
|
||||
* data.
|
||||
*
|
||||
* iv_size must be 12, otherwise false is returned.
|
||||
* key_size must be 16, otherwise false is returned.
|
||||
* tag_size must be 16, otherwise false is returned.
|
||||
*
|
||||
* @param[in] key Pointer to the encryption key.
|
||||
* @param[in] key_size Size of the encryption key in bytes.
|
||||
* @param[in] iv Pointer to the IV value.
|
||||
* @param[in] iv_size Size of the IV value in bytes.
|
||||
* @param[in] a_data Pointer to the additional authenticated data.
|
||||
* @param[in] a_data_size Size of the additional authenticated data in bytes.
|
||||
* @param[in] data_in Pointer to the input data buffer to be encrypted.
|
||||
* @param[in] data_in_size Size of the input data buffer in bytes.
|
||||
* @param[out] tag_out Pointer to a buffer that receives the authentication tag output.
|
||||
* @param[in] tag_size Size of the authentication tag in bytes.
|
||||
* @param[out] data_out Pointer to a buffer that receives the encryption output.
|
||||
* @param[out] data_out_size Size of the output data buffer in bytes.
|
||||
*
|
||||
* @retval true AEAD SM4-GCM authenticated encryption succeeded.
|
||||
* @retval false AEAD SM4-GCM authenticated encryption failed.
|
||||
**/
|
||||
extern bool libspdm_aead_sm4_gcm_encrypt(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
uint8_t *tag_out, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size);
|
||||
|
||||
/**
|
||||
* Performs AEAD SM4-GCM authenticated decryption on a data buffer and additional authenticated
|
||||
* data.
|
||||
*
|
||||
* iv_size must be 12, otherwise false is returned.
|
||||
* key_size must be 16, otherwise false is returned.
|
||||
* tag_size must be 16, otherwise false is returned.
|
||||
*
|
||||
* If data verification fails, false is returned.
|
||||
*
|
||||
* @param[in] key Pointer to the encryption key.
|
||||
* @param[in] key_size Size of the encryption key in bytes.
|
||||
* @param[in] iv Pointer to the IV value.
|
||||
* @param[in] iv_size Size of the IV value in bytes.
|
||||
* @param[in] a_data Pointer to the additional authenticated data.
|
||||
* @param[in] a_data_size Size of the additional authenticated data in bytes.
|
||||
* @param[in] data_in Pointer to the input data buffer to be decrypted.
|
||||
* @param[in] data_in_size Size of the input data buffer in bytes.
|
||||
* @param[in] tag Pointer to a buffer that contains the authentication tag.
|
||||
* @param[in] tag_size Size of the authentication tag in bytes.
|
||||
* @param[out] data_out Pointer to a buffer that receives the decryption output.
|
||||
* @param[out] data_out_size Size of the output data buffer in bytes.
|
||||
*
|
||||
* @retval true AEAD SM4-GCM authenticated decryption succeeded.
|
||||
* @retval false AEAD SM4-GCM authenticated decryption failed.
|
||||
**/
|
||||
extern bool libspdm_aead_sm4_gcm_decrypt(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
const uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size);
|
||||
#endif /* LIBSPDM_AEAD_SM4_SUPPORT */
|
||||
|
||||
#endif /* CRYPTLIB_AEAD_H */
|
416
kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h
Normal file
416
kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h
Normal file
@ -0,0 +1,416 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_CERT_H
|
||||
#define CRYPTLIB_CERT_H
|
||||
|
||||
/**
|
||||
* Retrieve the tag and length of the tag.
|
||||
*
|
||||
* @param ptr The position in the ASN.1 data.
|
||||
* @param end End of data.
|
||||
* @param length The variable that will receive the length.
|
||||
* @param tag The expected tag.
|
||||
*
|
||||
* @retval true Get tag successful.
|
||||
* @retval false Failed to get tag or tag not match.
|
||||
**/
|
||||
extern bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length, uint32_t tag);
|
||||
|
||||
/**
|
||||
* Retrieve the subject bytes from one X.509 certificate.
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If subject_size is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] cert_subject Pointer to the retrieved certificate subject bytes.
|
||||
* @param[in, out] subject_size The size in bytes of the cert_subject buffer on input,
|
||||
* and the size of buffer returned cert_subject on output.
|
||||
*
|
||||
* @retval true The certificate subject retrieved successfully.
|
||||
* @retval false Invalid certificate, or the subject_size is too small for the result.
|
||||
* The subject_size will be updated with the required size.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *cert_subject,
|
||||
size_t *subject_size);
|
||||
|
||||
/**
|
||||
* Retrieve the version from one X.509 certificate.
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If cert_size is 0, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] version Pointer to the retrieved version integer.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
**/
|
||||
extern bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size, size_t *version);
|
||||
|
||||
/**
|
||||
* Retrieve the serialNumber from one X.509 certificate.
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If cert_size is 0, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] serial_number Pointer to the retrieved certificate serial_number bytes.
|
||||
* @param[in, out] serial_number_size The size in bytes of the serial_number buffer on input,
|
||||
* and the size of buffer returned serial_number on output.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
**/
|
||||
extern bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *serial_number,
|
||||
size_t *serial_number_size);
|
||||
|
||||
/**
|
||||
* Retrieve the issuer bytes from one X.509 certificate.
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If issuer_size is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] cert_issuer Pointer to the retrieved certificate subject bytes.
|
||||
* @param[in, out] issuer_size The size in bytes of the cert_issuer buffer on input,
|
||||
* and the size of buffer returned cert_issuer on output.
|
||||
*
|
||||
* @retval true The certificate issuer retrieved successfully.
|
||||
* @retval false Invalid certificate, or the issuer_size is too small for the result.
|
||||
* The issuer_size will be updated with the required size.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *cert_issuer,
|
||||
size_t *issuer_size);
|
||||
|
||||
/**
|
||||
* Retrieve Extension data from one X.509 certificate.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[in] oid Object identifier buffer
|
||||
* @param[in] oid_size Object identifier buffer size
|
||||
* @param[out] extension_data Extension bytes.
|
||||
* @param[in, out] extension_data_size Extension bytes size.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
**/
|
||||
extern bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
|
||||
const uint8_t *oid, size_t oid_size,
|
||||
uint8_t *extension_data,
|
||||
size_t *extension_data_size);
|
||||
|
||||
/**
|
||||
* Retrieve the Validity from one X.509 certificate
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If CertIssuerSize is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] from notBefore Pointer to date_time object.
|
||||
* @param[in,out] from_size notBefore date_time object size.
|
||||
* @param[out] to notAfter Pointer to date_time object.
|
||||
* @param[in,out] to_size notAfter date_time object size.
|
||||
*
|
||||
* Note: libspdm_x509_compare_date_time to compare date_time oject
|
||||
* x509SetDateTime to get a date_time object from a date_time_str
|
||||
*
|
||||
* @retval true The certificate Validity retrieved successfully.
|
||||
* @retval false Invalid certificate, or Validity retrieve failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *from, size_t *from_size, uint8_t *to,
|
||||
size_t *to_size);
|
||||
|
||||
/**
|
||||
* Format a date_time object into DataTime buffer
|
||||
*
|
||||
* If date_time_str is NULL, then return false.
|
||||
* If date_time_size is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] date_time_str date_time string like YYYYMMDDhhmmssZ
|
||||
* Ref: https://www.w3.org/TR/NOTE-datetime
|
||||
* Z stand for UTC time
|
||||
* @param[out] date_time Pointer to a date_time object.
|
||||
* @param[in,out] date_time_size date_time object buffer size.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
**/
|
||||
extern bool libspdm_x509_set_date_time(const char *date_time_str, void *date_time,
|
||||
size_t *date_time_size);
|
||||
|
||||
/**
|
||||
* Compare date_time1 object and date_time2 object.
|
||||
*
|
||||
* If date_time1 is NULL, then return -2.
|
||||
* If date_time2 is NULL, then return -2.
|
||||
* If date_time1 == date_time2, then return 0
|
||||
* If date_time1 > date_time2, then return 1
|
||||
* If date_time1 < date_time2, then return -1
|
||||
*
|
||||
* @param[in] date_time1 Pointer to a date_time Ojbect
|
||||
* @param[in] date_time2 Pointer to a date_time Object
|
||||
*
|
||||
* @retval 0 If date_time1 == date_time2
|
||||
* @retval 1 If date_time1 > date_time2
|
||||
* @retval -1 If date_time1 < date_time2
|
||||
**/
|
||||
extern int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2);
|
||||
|
||||
/**
|
||||
* Retrieve the key usage from one X.509 certificate.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] usage Key usage (LIBSPDM_CRYPTO_X509_KU_*)
|
||||
*
|
||||
* @retval true The certificate key usage retrieved successfully.
|
||||
* @retval false Invalid certificate, or usage is NULL
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, size_t *usage);
|
||||
|
||||
/**
|
||||
* Retrieve the Extended key usage from one X.509 certificate.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] usage Key usage bytes.
|
||||
* @param[in, out] usage_size Key usage buffer sizs in bytes.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
**/
|
||||
extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
|
||||
size_t cert_size, uint8_t *usage,
|
||||
size_t *usage_size);
|
||||
|
||||
/**
|
||||
* Retrieve the basic constraints from one X.509 certificate.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] basic_constraints Basic constraints bytes.
|
||||
* @param[in, out] basic_constraints_size Basic constraints buffer sizs in bytes.
|
||||
*
|
||||
* @retval true
|
||||
* @retval false
|
||||
**/
|
||||
extern bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
|
||||
size_t cert_size,
|
||||
uint8_t *basic_constraints,
|
||||
size_t *basic_constraints_size);
|
||||
|
||||
/**
|
||||
* Verify one X509 certificate was issued by the trusted CA.
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If ca_cert is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate to be verified.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[in] ca_cert Pointer to the DER-encoded trusted CA certificate.
|
||||
* @param[in] ca_cert_size Size of the CA Certificate in bytes.
|
||||
*
|
||||
* @retval true The certificate was issued by the trusted CA.
|
||||
* @retval false Invalid certificate or the certificate was not issued by the given
|
||||
* trusted CA.
|
||||
* @retval false This interface is not supported.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
|
||||
const uint8_t *ca_cert, size_t ca_cert_size);
|
||||
|
||||
/**
|
||||
* Verify one X509 certificate was issued by the trusted CA.
|
||||
*
|
||||
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
|
||||
* where the first certificate is signed by the Root
|
||||
* Certificate or is the Root Cerificate itself. and
|
||||
* subsequent cerificate is signed by the preceding
|
||||
* cerificate.
|
||||
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
|
||||
*
|
||||
* @param[in] root_cert Trusted Root Certificate buffer.
|
||||
*
|
||||
* @param[in] root_cert_length Trusted Root Certificate buffer length.
|
||||
*
|
||||
* @retval true All cerificates were issued by the first certificate in X509Certchain.
|
||||
* @retval false Invalid certificate or the certificate was not issued by the given
|
||||
* trusted CA.
|
||||
**/
|
||||
extern bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length,
|
||||
const uint8_t *cert_chain,
|
||||
size_t cert_chain_length);
|
||||
|
||||
/**
|
||||
* Get one X509 certificate from cert_chain.
|
||||
*
|
||||
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
|
||||
* where the first certificate is signed by the Root
|
||||
* Certificate or is the Root Cerificate itself. and
|
||||
* subsequent cerificate is signed by the preceding
|
||||
* cerificate.
|
||||
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
|
||||
*
|
||||
* @param[in] cert_index Index of certificate. If index is -1 indecate the
|
||||
* last certificate in cert_chain.
|
||||
*
|
||||
* @param[out] cert The certificate at the index of cert_chain.
|
||||
* @param[out] cert_length The length certificate at the index of cert_chain.
|
||||
*
|
||||
* @retval true Success.
|
||||
* @retval false Failed to get certificate from certificate chain.
|
||||
**/
|
||||
extern bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
|
||||
size_t cert_chain_length,
|
||||
const int32_t cert_index, const uint8_t **cert,
|
||||
size_t *cert_length);
|
||||
|
||||
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
|
||||
/**
|
||||
* Retrieve the RSA public key from one DER-encoded X509 certificate.
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] rsa_context Pointer to new-generated RSA context which contain the retrieved
|
||||
* RSA public key component. Use libspdm_rsa_free() function to free the
|
||||
* resource.
|
||||
*
|
||||
* @retval true RSA public key was retrieved successfully.
|
||||
* @retval false Fail to retrieve RSA public key from X509 certificate.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
|
||||
void **rsa_context);
|
||||
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
|
||||
|
||||
#if LIBSPDM_ECDSA_SUPPORT
|
||||
/**
|
||||
* Retrieve the EC public key from one DER-encoded X509 certificate.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] ec_context Pointer to new-generated EC DSA context which contain the retrieved
|
||||
* EC public key component. Use libspdm_ec_free() function to free the
|
||||
* resource.
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If ec_context is NULL, then return false.
|
||||
*
|
||||
* @retval true EC public key was retrieved successfully.
|
||||
* @retval false Fail to retrieve EC public key from X509 certificate.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
|
||||
void **ec_context);
|
||||
#endif /* LIBSPDM_ECDSA_SUPPORT */
|
||||
|
||||
#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT)
|
||||
/**
|
||||
* Retrieve the Ed public key from one DER-encoded X509 certificate.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] ecd_context Pointer to new-generated Ed DSA context which contain the retrieved
|
||||
* Ed public key component. Use libspdm_ecd_free() function to free the
|
||||
* resource.
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If ecd_context is NULL, then return false.
|
||||
*
|
||||
* @retval true Ed public key was retrieved successfully.
|
||||
* @retval false Fail to retrieve Ed public key from X509 certificate.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
|
||||
void **ecd_context);
|
||||
#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */
|
||||
|
||||
#if LIBSPDM_SM2_DSA_SUPPORT
|
||||
/**
|
||||
* Retrieve the sm2 public key from one DER-encoded X509 certificate.
|
||||
*
|
||||
* @param[in] cert Pointer to the DER-encoded X509 certificate.
|
||||
* @param[in] cert_size Size of the X509 certificate in bytes.
|
||||
* @param[out] sm2_context Pointer to new-generated sm2 context which contain the retrieved
|
||||
* sm2 public key component. Use sm2_free() function to free the
|
||||
* resource.
|
||||
*
|
||||
* If cert is NULL, then return false.
|
||||
* If sm2_context is NULL, then return false.
|
||||
*
|
||||
* @retval true sm2 public key was retrieved successfully.
|
||||
* @retval false Fail to retrieve sm2 public key from X509 certificate.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
|
||||
void **sm2_context);
|
||||
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
|
||||
|
||||
#if LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP
|
||||
/**
|
||||
* Generate a CSR.
|
||||
*
|
||||
* @param[in] hash_nid hash algo for sign
|
||||
* @param[in] asym_nid asym algo for sign
|
||||
*
|
||||
* @param[in] requester_info requester info to gen CSR
|
||||
* @param[in] requester_info_length The len of requester info
|
||||
*
|
||||
* @param[in] context Pointer to asymmetric context
|
||||
* @param[in] subject_name Subject name: should be break with ',' in the middle
|
||||
* example: "C=AA,CN=BB"
|
||||
*
|
||||
* Subject names should contain a comma-separated list of OID types and values:
|
||||
* The valid OID type name is in:
|
||||
* {"CN", "commonName", "C", "countryName", "O", "organizationName","L",
|
||||
* "OU", "organizationalUnitName", "ST", "stateOrProvinceName", "emailAddress",
|
||||
* "serialNumber", "postalAddress", "postalCode", "dnQualifier", "title",
|
||||
* "SN","givenName","GN", "initials", "pseudonym", "generationQualifier", "domainComponent", "DC"}.
|
||||
* Note: The object of C and countryName should be CSR Supported Country Codes
|
||||
*
|
||||
* @param[in] csr_len For input, csr_len is the size of store CSR buffer.
|
||||
* For output, csr_len is CSR len for DER format
|
||||
* @param[in] csr_pointer For input, csr_pointer is buffer address to store CSR.
|
||||
* For output, csr_pointer is address for stored CSR.
|
||||
* The csr_pointer address will be changed.
|
||||
*
|
||||
* @retval true Success.
|
||||
* @retval false Failed to gen CSR.
|
||||
**/
|
||||
extern bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid,
|
||||
uint8_t *requester_info, size_t requester_info_length,
|
||||
void *context, char *subject_name,
|
||||
size_t *csr_len, uint8_t **csr_pointer);
|
||||
#endif /* LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP */
|
||||
|
||||
#endif /* CRYPTLIB_CERT_H */
|
98
kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h
Normal file
98
kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h
Normal file
@ -0,0 +1,98 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_DH_H
|
||||
#define CRYPTLIB_DH_H
|
||||
|
||||
/*=====================================================================================
|
||||
* Diffie-Hellman Key Exchange Primitives
|
||||
*=====================================================================================
|
||||
*/
|
||||
|
||||
#if LIBSPDM_FFDHE_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one Diffie-Hellman context for subsequent use with the NID.
|
||||
*
|
||||
* @param nid cipher NID
|
||||
*
|
||||
* @return Pointer to the Diffie-Hellman context that has been initialized.
|
||||
* If the allocations fails, libspdm_dh_new_by_nid() returns NULL.
|
||||
* If the interface is not supported, libspdm_dh_new_by_nid() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_dh_new_by_nid(size_t nid);
|
||||
|
||||
/**
|
||||
* Release the specified DH context.
|
||||
*
|
||||
* @param[in] dh_context Pointer to the DH context to be released.
|
||||
**/
|
||||
void libspdm_dh_free(void *dh_context);
|
||||
|
||||
/**
|
||||
* Generates DH public key.
|
||||
*
|
||||
* This function generates random secret exponent, and computes the public key, which is
|
||||
* returned via parameter public_key and public_key_size. DH context is updated accordingly.
|
||||
* If the public_key buffer is too small to hold the public key, false is returned and
|
||||
* public_key_size is set to the required buffer size to obtain the public key.
|
||||
*
|
||||
* If dh_context is NULL, then return false.
|
||||
* If public_key_size is NULL, then return false.
|
||||
* If public_key_size is large enough but public_key is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* For FFDHE2048, the public_size is 256.
|
||||
* For FFDHE3072, the public_size is 384.
|
||||
* For FFDHE4096, the public_size is 512.
|
||||
*
|
||||
* @param[in, out] dh_context Pointer to the DH context.
|
||||
* @param[out] public_key Pointer to the buffer to receive generated public key.
|
||||
* @param[in, out] public_key_size On input, the size of public_key buffer in bytes.
|
||||
* On output, the size of data returned in public_key buffer in
|
||||
* bytes.
|
||||
*
|
||||
* @retval true DH public key generation succeeded.
|
||||
* @retval false DH public key generation failed.
|
||||
* @retval false public_key_size is not large enough.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_dh_generate_key(void *dh_context, uint8_t *public_key, size_t *public_key_size);
|
||||
|
||||
/**
|
||||
* Computes exchanged common key.
|
||||
*
|
||||
* Given peer's public key, this function computes the exchanged common key, based on its own
|
||||
* context including value of prime modulus and random secret exponent.
|
||||
*
|
||||
* If dh_context is NULL, then return false.
|
||||
* If peer_public_key is NULL, then return false.
|
||||
* If key_size is NULL, then return false.
|
||||
* If key is NULL, then return false.
|
||||
* If key_size is not large enough, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* For FFDHE2048, the peer_public_size and key_size is 256.
|
||||
* For FFDHE3072, the peer_public_size and key_size is 384.
|
||||
* For FFDHE4096, the peer_public_size and key_size is 512.
|
||||
*
|
||||
* @param[in, out] dh_context Pointer to the DH context.
|
||||
* @param[in] peer_public_key Pointer to the peer's public key.
|
||||
* @param[in] peer_public_key_size size of peer's public key in bytes.
|
||||
* @param[out] key Pointer to the buffer to receive generated key.
|
||||
* @param[in, out] key_size On input, the size of key buffer in bytes.
|
||||
* On output, the size of data returned in key buffer in
|
||||
* bytes.
|
||||
*
|
||||
* @retval true DH exchanged key generation succeeded.
|
||||
* @retval false DH exchanged key generation failed.
|
||||
* @retval false key_size is not large enough.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_dh_compute_key(void *dh_context, const uint8_t *peer_public_key,
|
||||
size_t peer_public_key_size, uint8_t *key,
|
||||
size_t *key_size);
|
||||
#endif /* LIBSPDM_FFDHE_SUPPORT */
|
||||
#endif /* CRYPTLIB_DH_H */
|
162
kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h
Normal file
162
kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h
Normal file
@ -0,0 +1,162 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_EC_H
|
||||
#define CRYPTLIB_EC_H
|
||||
|
||||
/*=====================================================================================
|
||||
* Elliptic Curve Primitives
|
||||
*=====================================================================================*/
|
||||
|
||||
#if (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT)
|
||||
/**
|
||||
* Allocates and Initializes one Elliptic Curve context for subsequent use with the NID.
|
||||
*
|
||||
* @param nid cipher NID
|
||||
*
|
||||
* @return Pointer to the Elliptic Curve context that has been initialized.
|
||||
* If the allocations fails, libspdm_ec_new_by_nid() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_ec_new_by_nid(size_t nid);
|
||||
|
||||
/**
|
||||
* Release the specified EC context.
|
||||
*
|
||||
* @param[in] ec_context Pointer to the EC context to be released.
|
||||
**/
|
||||
extern void libspdm_ec_free(void *ec_context);
|
||||
#endif /* (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT) */
|
||||
|
||||
#if LIBSPDM_ECDHE_SUPPORT
|
||||
/**
|
||||
* Generates EC key and returns EC public key (X, Y).
|
||||
*
|
||||
* This function generates random secret, and computes the public key (X, Y), which is
|
||||
* returned via parameter public, public_size.
|
||||
* X is the first half of public with size being public_size / 2,
|
||||
* Y is the second half of public with size being public_size / 2.
|
||||
* EC context is updated accordingly.
|
||||
* If the public buffer is too small to hold the public X, Y, false is returned and
|
||||
* public_size is set to the required buffer size to obtain the public X, Y.
|
||||
*
|
||||
* For P-256, the public_size is 64. first 32-byte is X, second 32-byte is Y.
|
||||
* For P-384, the public_size is 96. first 48-byte is X, second 48-byte is Y.
|
||||
* For P-521, the public_size is 132. first 66-byte is X, second 66-byte is Y.
|
||||
*
|
||||
* If ec_context is NULL, then return false.
|
||||
* If public_size is NULL, then return false.
|
||||
* If public_size is large enough but public is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] ec_context Pointer to the EC context.
|
||||
* @param[out] public Pointer to the buffer to receive generated public X,Y.
|
||||
* @param[in, out] public_size On input, the size of public buffer in bytes.
|
||||
* On output, the size of data returned in public buffer in bytes.
|
||||
*
|
||||
* @retval true EC public X,Y generation succeeded.
|
||||
* @retval false EC public X,Y generation failed.
|
||||
* @retval false public_size is not large enough.
|
||||
**/
|
||||
extern bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_key, size_t *public_key_size);
|
||||
|
||||
/**
|
||||
* Computes exchanged common key.
|
||||
*
|
||||
* Given peer's public key (X, Y), this function computes the exchanged common key,
|
||||
* based on its own context including value of curve parameter and random secret.
|
||||
* X is the first half of peer_public with size being peer_public_size / 2,
|
||||
* Y is the second half of peer_public with size being peer_public_size / 2.
|
||||
*
|
||||
* If ec_context is NULL, then return false.
|
||||
* If peer_public is NULL, then return false.
|
||||
* If peer_public_size is 0, then return false.
|
||||
* If key is NULL, then return false.
|
||||
* If key_size is not large enough, then return false.
|
||||
*
|
||||
* For P-256, the peer_public_size is 64. first 32-byte is X, second 32-byte is Y.
|
||||
* The key_size is 32.
|
||||
* For P-384, the peer_public_size is 96. first 48-byte is X, second 48-byte is Y.
|
||||
* The key_size is 48.
|
||||
* For P-521, the peer_public_size is 132. first 66-byte is X, second 66-byte is Y.
|
||||
* The key_size is 66.
|
||||
*
|
||||
* @param[in, out] ec_context Pointer to the EC context.
|
||||
* @param[in] peer_public Pointer to the peer's public X,Y.
|
||||
* @param[in] peer_public_size Size of peer's public X,Y in bytes.
|
||||
* @param[out] key Pointer to the buffer to receive generated key.
|
||||
* @param[in, out] key_size On input, the size of key buffer in bytes.
|
||||
* On output, the size of data returned in key buffer in bytes.
|
||||
*
|
||||
* @retval true EC exchanged key generation succeeded.
|
||||
* @retval false EC exchanged key generation failed.
|
||||
* @retval false key_size is not large enough.
|
||||
**/
|
||||
extern bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
|
||||
size_t peer_public_size, uint8_t *key,
|
||||
size_t *key_size);
|
||||
#endif /* LIBSPDM_ECDHE_SUPPORT */
|
||||
|
||||
#if LIBSPDM_ECDSA_SUPPORT
|
||||
/**
|
||||
* Carries out the EC-DSA signature.
|
||||
*
|
||||
* This function carries out the EC-DSA signature.
|
||||
* If the signature buffer is too small to hold the contents of signature, false
|
||||
* is returned and sig_size is set to the required buffer size to obtain the signature.
|
||||
*
|
||||
* If ec_context is NULL, then return false.
|
||||
* If message_hash is NULL, then return false.
|
||||
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
|
||||
* SHA3_384, SHA3_512.
|
||||
* If sig_size is large enough but signature is NULL, then return false.
|
||||
*
|
||||
* For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S.
|
||||
* For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S.
|
||||
* For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S.
|
||||
*
|
||||
* @param[in] ec_context Pointer to EC context for signature generation.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] message_hash Pointer to octet message hash to be signed.
|
||||
* @param[in] hash_size Size of the message hash in bytes.
|
||||
* @param[out] signature Pointer to buffer to receive EC-DSA signature.
|
||||
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
|
||||
* On output, the size of data returned in signature buffer in bytes.
|
||||
*
|
||||
* @retval true signature successfully generated in EC-DSA.
|
||||
* @retval false signature generation failed.
|
||||
* @retval false sig_size is too small.
|
||||
**/
|
||||
extern bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
uint8_t *signature, size_t *sig_size);
|
||||
|
||||
/**
|
||||
* Verifies the EC-DSA signature.
|
||||
*
|
||||
* If ec_context is NULL, then return false.
|
||||
* If message_hash is NULL, then return false.
|
||||
* If signature is NULL, then return false.
|
||||
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
|
||||
* SHA3_384, SHA3_512.
|
||||
*
|
||||
* For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S.
|
||||
* For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S.
|
||||
* For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S.
|
||||
*
|
||||
* @param[in] ec_context Pointer to EC context for signature verification.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] message_hash Pointer to octet message hash to be checked.
|
||||
* @param[in] hash_size Size of the message hash in bytes.
|
||||
* @param[in] signature Pointer to EC-DSA signature to be verified.
|
||||
* @param[in] sig_size Size of signature in bytes.
|
||||
*
|
||||
* @retval true Valid signature encoded in EC-DSA.
|
||||
* @retval false Invalid signature or invalid EC context.
|
||||
**/
|
||||
extern bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
const uint8_t *signature, size_t sig_size);
|
||||
#endif /* LIBSPDM_ECDSA_SUPPORT */
|
||||
#endif /* CRYPTLIB_EC_H */
|
100
kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h
Normal file
100
kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h
Normal file
@ -0,0 +1,100 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_ECD_H
|
||||
#define CRYPTLIB_ECD_H
|
||||
|
||||
/*=====================================================================================
|
||||
* Edwards-Curve Primitives
|
||||
*=====================================================================================*/
|
||||
|
||||
#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT)
|
||||
/**
|
||||
* Allocates and Initializes one Edwards-Curve context for subsequent use with the NID.
|
||||
*
|
||||
* @param nid cipher NID
|
||||
*
|
||||
* @return Pointer to the Edwards-Curve context that has been initialized.
|
||||
* If the allocations fails, libspdm_ecd_new_by_nid() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_ecd_new_by_nid(size_t nid);
|
||||
|
||||
/**
|
||||
* Release the specified Ed context.
|
||||
*
|
||||
* @param[in] ecd_context Pointer to the Ed context to be released.
|
||||
**/
|
||||
extern void libspdm_ecd_free(void *ecd_context);
|
||||
|
||||
/**
|
||||
* Carries out the Ed-DSA signature.
|
||||
*
|
||||
* This function carries out the Ed-DSA signature.
|
||||
* If the signature buffer is too small to hold the contents of signature, false
|
||||
* is returned and sig_size is set to the required buffer size to obtain the signature.
|
||||
*
|
||||
* If ecd_context is NULL, then return false.
|
||||
* If message is NULL, then return false.
|
||||
* hash_nid must be NULL.
|
||||
* If sig_size is large enough but signature is NULL, then return false.
|
||||
*
|
||||
* For ed25519, context must be NULL and context_size must be 0.
|
||||
* For ed448, context must be maximum of 255 octets.
|
||||
*
|
||||
* For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S.
|
||||
* For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S.
|
||||
*
|
||||
* @param[in] ecd_context Pointer to Ed context for signature generation.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] context The EDDSA signing context.
|
||||
* @param[in] context_size Size of EDDSA signing context.
|
||||
* @param[in] message Pointer to octet message to be signed (before hash).
|
||||
* @param[in] size size of the message in bytes.
|
||||
* @param[out] signature Pointer to buffer to receive Ed-DSA signature.
|
||||
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
|
||||
* On output, the size of data returned in signature buffer in bytes.
|
||||
*
|
||||
* @retval true signature successfully generated in Ed-DSA.
|
||||
* @retval false signature generation failed.
|
||||
* @retval false sig_size is too small.
|
||||
**/
|
||||
extern bool libspdm_eddsa_sign(const void *ecd_context, size_t hash_nid,
|
||||
const uint8_t *context, size_t context_size,
|
||||
const uint8_t *message, size_t size, uint8_t *signature,
|
||||
size_t *sig_size);
|
||||
|
||||
/**
|
||||
* Verifies the Ed-DSA signature.
|
||||
*
|
||||
* If ecd_context is NULL, then return false.
|
||||
* If message is NULL, then return false.
|
||||
* If signature is NULL, then return false.
|
||||
* hash_nid must be NULL.
|
||||
*
|
||||
* For ed25519, context must be NULL and context_size must be 0.
|
||||
* For ed448, context must be maximum of 255 octets.
|
||||
*
|
||||
* For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S.
|
||||
* For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S.
|
||||
*
|
||||
* @param[in] ecd_context Pointer to Ed context for signature verification.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] context The EDDSA signing context.
|
||||
* @param[in] context_size Size of EDDSA signing context.
|
||||
* @param[in] message Pointer to octet message to be checked (before hash).
|
||||
* @param[in] size Size of the message in bytes.
|
||||
* @param[in] signature Pointer to Ed-DSA signature to be verified.
|
||||
* @param[in] sig_size Size of signature in bytes.
|
||||
*
|
||||
* @retval true Valid signature encoded in Ed-DSA.
|
||||
* @retval false Invalid signature or invalid Ed context.
|
||||
**/
|
||||
extern bool libspdm_eddsa_verify(const void *ecd_context, size_t hash_nid,
|
||||
const uint8_t *context, size_t context_size,
|
||||
const uint8_t *message, size_t size,
|
||||
const uint8_t *signature, size_t sig_size);
|
||||
#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */
|
||||
#endif /* CRYPTLIB_ECD_H */
|
772
kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h
Normal file
772
kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h
Normal file
@ -0,0 +1,772 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_HASH_H
|
||||
#define CRYPTLIB_HASH_H
|
||||
|
||||
/* SHA-256 digest size in bytes. */
|
||||
#define LIBSPDM_SHA256_DIGEST_SIZE 32
|
||||
|
||||
/* SHA-384 digest size in bytes. */
|
||||
#define LIBSPDM_SHA384_DIGEST_SIZE 48
|
||||
|
||||
/* SHA-512 digest size in bytes. */
|
||||
#define LIBSPDM_SHA512_DIGEST_SIZE 64
|
||||
|
||||
/* SHA3-256 digest size in bytes. */
|
||||
#define LIBSPDM_SHA3_256_DIGEST_SIZE 32
|
||||
|
||||
/* SHA3-384 digest size in bytes. */
|
||||
#define LIBSPDM_SHA3_384_DIGEST_SIZE 48
|
||||
|
||||
/* SHA3-512 digest size in bytes. */
|
||||
#define LIBSPDM_SHA3_512_DIGEST_SIZE 64
|
||||
|
||||
/* SM3_256 digest size in bytes. */
|
||||
#define LIBSPDM_SM3_256_DIGEST_SIZE 32
|
||||
|
||||
/*=====================================================================================
|
||||
* One-way cryptographic hash SHA2 primitives.
|
||||
*=====================================================================================
|
||||
*/
|
||||
#if LIBSPDM_SHA256_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HASH_CTX context for subsequent SHA-256 use.
|
||||
*
|
||||
* @return Pointer to the HASH_CTX context that has been initialized.
|
||||
* If the allocations fails, sha256_new() returns NULL. *
|
||||
**/
|
||||
extern void *libspdm_sha256_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HASH_CTX context.
|
||||
*
|
||||
* @param[in] sha256_context Pointer to the HASH_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_sha256_free(void *sha256_context);
|
||||
|
||||
/**
|
||||
* Initializes user-supplied memory pointed to by sha256_context as SHA-256 hash context for
|
||||
* subsequent use.
|
||||
*
|
||||
* If sha256_context is NULL, then return false.
|
||||
*
|
||||
* @param[out] sha256_context Pointer to SHA-256 context being initialized.
|
||||
*
|
||||
* @retval true SHA-256 context initialization succeeded.
|
||||
* @retval false SHA-256 context initialization failed.
|
||||
**/
|
||||
extern bool libspdm_sha256_init(void *sha256_context);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing SHA-256 context.
|
||||
*
|
||||
* If sha256_context is NULL, then return false.
|
||||
* If new_sha256_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] sha256_context Pointer to SHA-256 context being copied.
|
||||
* @param[out] new_sha256_context Pointer to new SHA-256 context.
|
||||
*
|
||||
* @retval true SHA-256 context copy succeeded.
|
||||
* @retval false SHA-256 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha256_duplicate(const void *sha256_context, void *new_sha256_context);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates SHA-256 context.
|
||||
*
|
||||
* This function performs SHA-256 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* SHA-256 context should be already correctly initialized by libspdm_sha256_init(), and must not
|
||||
* have been finalized by libspdm_sha256_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If sha256_context is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha256_context Pointer to the SHA-256 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true SHA-256 data digest succeeded.
|
||||
* @retval false SHA-256 data digest failed.
|
||||
**/
|
||||
extern bool libspdm_sha256_update(void *sha256_context, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the SHA-256 digest value.
|
||||
*
|
||||
* This function completes SHA-256 hash computation and populates the digest value into
|
||||
* the specified memory. After this function has been called, the SHA-256 context cannot
|
||||
* be used again. SHA-256 context should be already correctly initialized by libspdm_sha256_init(),
|
||||
* and must not have been finalized by libspdm_sha256_final(). Behavior with invalid SHA-256 context
|
||||
* is undefined.
|
||||
*
|
||||
* If sha256_context is NULL, then return false.
|
||||
* If hash_value is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha256_context Pointer to the SHA-256 context.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest
|
||||
* value (32 bytes).
|
||||
*
|
||||
* @retval true SHA-256 digest computation succeeded.
|
||||
* @retval false SHA-256 digest computation failed.
|
||||
**/
|
||||
extern bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value);
|
||||
|
||||
/**
|
||||
* Computes the SHA-256 message digest of an input data buffer.
|
||||
*
|
||||
* This function performs the SHA-256 message digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest value (32 bytes).
|
||||
*
|
||||
* @retval true SHA-256 digest computation succeeded.
|
||||
* @retval false SHA-256 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
|
||||
#endif /* LIBSPDM_SHA256_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA384_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HASH_CTX context for subsequent SHA-384 use.
|
||||
*
|
||||
* @return Pointer to the HASH_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_sha384_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_sha384_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HASH_CTX context.
|
||||
*
|
||||
* @param[in] sha384_context Pointer to the HASH_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_sha384_free(void *sha384_context);
|
||||
|
||||
/**
|
||||
* Initializes user-supplied memory pointed to by sha384_context as SHA-384 hash context for
|
||||
* subsequent use.
|
||||
*
|
||||
* If sha384_context is NULL, then return false.
|
||||
*
|
||||
* @param[out] sha384_context Pointer to SHA-384 context being initialized.
|
||||
*
|
||||
* @retval true SHA-384 context initialization succeeded.
|
||||
* @retval false SHA-384 context initialization failed.
|
||||
**/
|
||||
extern bool libspdm_sha384_init(void *sha384_context);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing SHA-384 context.
|
||||
*
|
||||
* If sha384_context is NULL, then return false.
|
||||
* If new_sha384_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] sha384_context Pointer to SHA-384 context being copied.
|
||||
* @param[out] new_sha384_context Pointer to new SHA-384 context.
|
||||
*
|
||||
* @retval true SHA-384 context copy succeeded.
|
||||
* @retval false SHA-384 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha384_duplicate(const void *sha384_context, void *new_sha384_context);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates SHA-384 context.
|
||||
*
|
||||
* This function performs SHA-384 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* SHA-384 context should be already correctly initialized by libspdm_sha384_init(), and must not
|
||||
* have been finalized by libspdm_sha384_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If sha384_context is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha384_context Pointer to the SHA-384 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true SHA-384 data digest succeeded.
|
||||
* @retval false SHA-384 data digest failed.
|
||||
**/
|
||||
extern bool libspdm_sha384_update(void *sha384_context, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the SHA-384 digest value.
|
||||
*
|
||||
* This function completes SHA-384 hash computation and populates the digest value into
|
||||
* the specified memory. After this function has been called, the SHA-384 context cannot
|
||||
* be used again. SHA-384 context should be already correctly initialized by libspdm_sha384_init(),
|
||||
* and must not have been finalized by libspdm_sha384_final(). Behavior with invalid SHA-384 context
|
||||
* is undefined.
|
||||
*
|
||||
* If sha384_context is NULL, then return false.
|
||||
* If hash_value is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha384_context Pointer to the SHA-384 context.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest
|
||||
* value (48 bytes).
|
||||
*
|
||||
* @retval true SHA-384 digest computation succeeded.
|
||||
* @retval false SHA-384 digest computation failed.
|
||||
**/
|
||||
extern bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value);
|
||||
|
||||
/**
|
||||
* Computes the SHA-384 message digest of an input data buffer.
|
||||
*
|
||||
* This function performs the SHA-384 message digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest value (48 bytes).
|
||||
*
|
||||
* @retval true SHA-384 digest computation succeeded.
|
||||
* @retval false SHA-384 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha384_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
|
||||
#endif /* LIBSPDM_SHA384_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA512_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HASH_CTX context for subsequent SHA-512 use.
|
||||
*
|
||||
* @return Pointer to the HASH_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_sha512_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_sha512_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HASH_CTX context.
|
||||
*
|
||||
* @param[in] sha512_context Pointer to the HASH_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_sha512_free(void *sha512_context);
|
||||
|
||||
/**
|
||||
* Initializes user-supplied memory pointed by sha512_context as SHA-512 hash context for
|
||||
* subsequent use.
|
||||
*
|
||||
* If sha512_context is NULL, then return false.
|
||||
*
|
||||
* @param[out] sha512_context Pointer to SHA-512 context being initialized.
|
||||
*
|
||||
* @retval true SHA-512 context initialization succeeded.
|
||||
* @retval false SHA-512 context initialization failed.
|
||||
**/
|
||||
extern bool libspdm_sha512_init(void *sha512_context);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing SHA-512 context.
|
||||
*
|
||||
* If sha512_context is NULL, then return false.
|
||||
* If new_sha512_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] sha512_context Pointer to SHA-512 context being copied.
|
||||
* @param[out] new_sha512_context Pointer to new SHA-512 context.
|
||||
*
|
||||
* @retval true SHA-512 context copy succeeded.
|
||||
* @retval false SHA-512 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha512_duplicate(const void *sha512_context, void *new_sha512_context);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates SHA-512 context.
|
||||
*
|
||||
* This function performs SHA-512 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* SHA-512 context should be already correctly initialized by libspdm_sha512_init(), and must not
|
||||
* have been finalized by libspdm_sha512_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If sha512_context is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha512_context Pointer to the SHA-512 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true SHA-512 data digest succeeded.
|
||||
* @retval false SHA-512 data digest failed.
|
||||
**/
|
||||
extern bool libspdm_sha512_update(void *sha512_context, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the SHA-512 digest value.
|
||||
*
|
||||
* This function completes SHA-512 hash computation and populates the digest value into
|
||||
* the specified memory. After this function has been called, the SHA-512 context cannot
|
||||
* be used again. SHA-512 context should be already correctly initialized by libspdm_sha512_init(),
|
||||
* and must not have been finalized by libspdm_sha512_final(). Behavior with invalid SHA-512 context
|
||||
* is undefined.
|
||||
*
|
||||
* If sha512_context is NULL, then return false.
|
||||
* If hash_value is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha512_context Pointer to the SHA-512 context.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest
|
||||
* value (64 bytes).
|
||||
*
|
||||
* @retval true SHA-512 digest computation succeeded.
|
||||
* @retval false SHA-512 digest computation failed.
|
||||
**/
|
||||
extern bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value);
|
||||
|
||||
/**
|
||||
* Computes the SHA-512 message digest of an input data buffer.
|
||||
*
|
||||
* This function performs the SHA-512 message digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest value (64 bytes).
|
||||
*
|
||||
* @retval true SHA-512 digest computation succeeded.
|
||||
* @retval false SHA-512 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha512_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
|
||||
#endif /* LIBSPDM_SHA512_SUPPORT */
|
||||
|
||||
/*=====================================================================================
|
||||
* One-way cryptographic hash SHA3 primitives.
|
||||
*=====================================================================================
|
||||
*/
|
||||
#if LIBSPDM_SHA3_256_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HASH_CTX context for subsequent SHA3-256 use.
|
||||
*
|
||||
* @return Pointer to the HASH_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_sha3_256_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_sha3_256_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HASH_CTX context.
|
||||
*
|
||||
* @param[in] sha3_256_context Pointer to the HASH_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_sha3_256_free(void *sha3_256_context);
|
||||
|
||||
/**
|
||||
* Initializes user-supplied memory pointed by sha3_256_context as SHA3-256 hash context for
|
||||
* subsequent use.
|
||||
*
|
||||
* If sha3_256_context is NULL, then return false.
|
||||
*
|
||||
* @param[out] sha3_256_context Pointer to SHA3-256 context being initialized.
|
||||
*
|
||||
* @retval true SHA3-256 context initialization succeeded.
|
||||
* @retval false SHA3-256 context initialization failed.
|
||||
**/
|
||||
extern bool libspdm_sha3_256_init(void *sha3_256_context);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing SHA3-256 context.
|
||||
*
|
||||
* If sha3_256_context is NULL, then return false.
|
||||
* If new_sha3_256_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] sha3_256_context Pointer to SHA3-256 context being copied.
|
||||
* @param[out] new_sha3_256_context Pointer to new SHA3-256 context.
|
||||
*
|
||||
* @retval true SHA3-256 context copy succeeded.
|
||||
* @retval false SHA3-256 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha3_256_duplicate(const void *sha3_256_context, void *new_sha3_256_context);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates SHA3-256 context.
|
||||
*
|
||||
* This function performs SHA3-256 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* SHA3-256 context should be already correctly initialized by libspdm_sha3_256_init(), and must not
|
||||
* have been finalized by libspdm_sha3_256_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If sha3_256_context is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha3_256_context Pointer to the SHA3-256 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size size of data buffer in bytes.
|
||||
*
|
||||
* @retval true SHA3-256 data digest succeeded.
|
||||
* @retval false SHA3-256 data digest failed.
|
||||
**/
|
||||
extern bool libspdm_sha3_256_update(void *sha3_256_context, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the SHA3-256 digest value.
|
||||
*
|
||||
* This function completes SHA3-256 hash computation and populates the digest value into
|
||||
* the specified memory. After this function has been called, the SHA3-512 context cannot
|
||||
* be used again. SHA3-256 context should be already correctly initialized by
|
||||
* libspdm_sha3_256_init(), and must not have been finalized by libspdm_sha3_256_final().
|
||||
* Behavior with invalid SHA3-256 context is undefined.
|
||||
*
|
||||
* If sha3_256_context is NULL, then return false.
|
||||
* If hash_value is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha3_256_context Pointer to the SHA3-256 context.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest
|
||||
* value (32 bytes).
|
||||
*
|
||||
* @retval true SHA3-256 digest computation succeeded.
|
||||
* @retval false SHA3-256 digest computation failed.
|
||||
**/
|
||||
extern bool libspdm_sha3_256_final(void *sha3_256_context, uint8_t *hash_value);
|
||||
|
||||
/**
|
||||
* Computes the SHA3-256 message digest of an input data buffer.
|
||||
*
|
||||
* This function performs the SHA3-256 message digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest value (32 bytes).
|
||||
*
|
||||
* @retval true SHA3-256 digest computation succeeded.
|
||||
* @retval false SHA3-256 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
|
||||
#endif /* LIBSPDM_SHA3_256_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA3_384_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HASH_CTX context for subsequent SHA3-384 use.
|
||||
*
|
||||
* @return Pointer to the HASH_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_sha3_384_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_sha3_384_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HASH_CTX context.
|
||||
*
|
||||
* @param[in] sha3_384_context Pointer to the HASH_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_sha3_384_free(void *sha3_384_context);
|
||||
|
||||
/**
|
||||
* Initializes user-supplied memory pointed by sha3_384_context as SHA3-384 hash context for
|
||||
* subsequent use.
|
||||
*
|
||||
* If sha3_384_context is NULL, then return false.
|
||||
*
|
||||
* @param[out] sha3_384_context Pointer to SHA3-384 context being initialized.
|
||||
*
|
||||
* @retval true SHA3-384 context initialization succeeded.
|
||||
* @retval false SHA3-384 context initialization failed.
|
||||
**/
|
||||
extern bool libspdm_sha3_384_init(void *sha3_384_context);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing SHA3-384 context.
|
||||
*
|
||||
* If sha3_384_context is NULL, then return false.
|
||||
* If new_sha3_384_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] sha3_384_context Pointer to SHA3-384 context being copied.
|
||||
* @param[out] new_sha3_384_context Pointer to new SHA3-384 context.
|
||||
*
|
||||
* @retval true SHA3-384 context copy succeeded.
|
||||
* @retval false SHA3-384 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha3_384_duplicate(const void *sha3_384_context, void *new_sha3_384_context);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates SHA3-384 context.
|
||||
*
|
||||
* This function performs SHA3-384 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* SHA3-384 context should be already correctly initialized by libspdm_sha3_384_init(), and must not
|
||||
* have been finalized by libspdm_sha3_384_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If sha3_384_context is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha3_384_context Pointer to the SHA3-384 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true SHA3-384 data digest succeeded.
|
||||
* @retval false SHA3-384 data digest failed.
|
||||
**/
|
||||
extern bool libspdm_sha3_384_update(void *sha3_384_context, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the SHA3-384 digest value.
|
||||
*
|
||||
* This function completes SHA3-384 hash computation and populates the digest value into
|
||||
* the specified memory. After this function has been called, the SHA3-384 context cannot
|
||||
* be used again. SHA3-384 context should be already correctly initialized by
|
||||
* libspdm_sha3_384_init(), and must not have been finalized by libspdm_sha3_384_final().
|
||||
* Behavior with invalid SHA3-384 context is undefined.
|
||||
*
|
||||
* If sha3_384_context is NULL, then return false.
|
||||
* If hash_value is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha3_384_context Pointer to the SHA3-384 context.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest
|
||||
* value (48 bytes).
|
||||
*
|
||||
* @retval true SHA3-384 digest computation succeeded.
|
||||
* @retval false SHA3-384 digest computation failed.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_sha3_384_final(void *sha3_384_context, uint8_t *hash_value);
|
||||
|
||||
/**
|
||||
* Computes the SHA3-384 message digest of an input data buffer.
|
||||
*
|
||||
* This function performs the SHA3-384 message digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest value (48 bytes).
|
||||
*
|
||||
* @retval true SHA3-384 digest computation succeeded.
|
||||
* @retval false SHA3-384 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha3_384_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
|
||||
#endif /* LIBSPDM_SHA3_384_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA3_512_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HASH_CTX context for subsequent SHA3-512 use.
|
||||
*
|
||||
* @return Pointer to the HASH_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_sha3_512_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_sha3_512_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HASH_CTX context.
|
||||
*
|
||||
* @param[in] sha3_512_context Pointer to the HASH_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_sha3_512_free(void *sha3_512_context);
|
||||
|
||||
/**
|
||||
* Initializes user-supplied memory pointed by sha3_512_context as SHA3-512 hash context for
|
||||
* subsequent use.
|
||||
*
|
||||
* If sha3_512_context is NULL, then return false.
|
||||
*
|
||||
* @param[out] sha3_512_context Pointer to SHA3-512 context being initialized.
|
||||
*
|
||||
* @retval true SHA3-512 context initialization succeeded.
|
||||
* @retval false SHA3-512 context initialization failed.
|
||||
**/
|
||||
extern bool libspdm_sha3_512_init(void *sha3_512_context);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing SHA3-512 context.
|
||||
*
|
||||
* If sha3_512_context is NULL, then return false.
|
||||
* If new_sha3_512_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] sha3_512_context Pointer to SHA3-512 context being copied.
|
||||
* @param[out] new_sha3_512_context Pointer to new SHA3-512 context.
|
||||
*
|
||||
* @retval true SHA3-512 context copy succeeded.
|
||||
* @retval false SHA3-512 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_sha3_512_duplicate(const void *sha3_512_context, void *new_sha3_512_context);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates SHA3-512 context.
|
||||
*
|
||||
* This function performs SHA3-512 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* SHA3-512 context should be already correctly initialized by libspdm_sha3_512_init(), and must not
|
||||
* have been finalized by libspdm_sha3_512_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If sha3_512_context is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha3_512_context Pointer to the SHA3-512 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true SHA3-512 data digest succeeded.
|
||||
* @retval false SHA3-512 data digest failed.
|
||||
**/
|
||||
extern bool libspdm_sha3_512_update(void *sha3_512_context, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the SHA3-512 digest value.
|
||||
*
|
||||
* This function completes SHA3-512 hash computation and populates the digest value into
|
||||
* the specified memory. After this function has been called, the SHA3-512 context cannot
|
||||
* be used again. SHA3-512 context should be already correctly initialized by
|
||||
* libspdm_sha3_512_init(), and must not have been finalized by libspdm_sha3_512_final().
|
||||
* Behavior with invalid SHA3-512 context is undefined.
|
||||
*
|
||||
* If sha3_512_context is NULL, then return false.
|
||||
* If hash_value is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sha3_512_context Pointer to the SHA3-512 context.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest
|
||||
* value (64 bytes).
|
||||
*
|
||||
* @retval true SHA3-512 digest computation succeeded.
|
||||
* @retval false SHA3-512 digest computation failed.
|
||||
**/
|
||||
extern bool libspdm_sha3_512_final(void *sha3_512_context, uint8_t *hash_value);
|
||||
|
||||
/**
|
||||
* Computes the SHA3-512 message digest of an input data buffer.
|
||||
*
|
||||
* This function performs the SHA3-512 message digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest value (64 bytes).
|
||||
*
|
||||
* @retval true SHA3-512 digest computation succeeded.
|
||||
* @retval false SHA3-512 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sha3_512_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
|
||||
#endif /* LIBSPDM_SHA3_512_SUPPORT */
|
||||
|
||||
/*=====================================================================================
|
||||
* One-Way Cryptographic hash SM3 Primitives
|
||||
*=====================================================================================
|
||||
*/
|
||||
|
||||
#if LIBSPDM_SM3_256_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HASH_CTX context for subsequent SM3-256 use.
|
||||
*
|
||||
* @return Pointer to the HASH_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_sm3_256_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_sm3_256_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HASH_CTX context.
|
||||
*
|
||||
* @param[in] sm3_context Pointer to the HASH_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_sm3_256_free(void *sm3_context);
|
||||
|
||||
/**
|
||||
* Initializes user-supplied memory pointed by sm3_context as SM3 hash context for
|
||||
* subsequent use.
|
||||
*
|
||||
* If sm3_context is NULL, then return false.
|
||||
*
|
||||
* @param[out] sm3_context Pointer to SM3 context being initialized.
|
||||
*
|
||||
* @retval true SM3 context initialization succeeded.
|
||||
* @retval false SM3 context initialization failed.
|
||||
**/
|
||||
extern bool libspdm_sm3_256_init(void *sm3_context);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing SM3 context.
|
||||
*
|
||||
* If sm3_context is NULL, then return false.
|
||||
* If new_sm3_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] sm3_context Pointer to SM3 context being copied.
|
||||
* @param[out] new_sm3_context Pointer to new SM3 context.
|
||||
*
|
||||
* @retval true SM3 context copy succeeded.
|
||||
* @retval false SM3 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sm3_256_duplicate(const void *sm3_context, void *new_sm3_context);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates SM3 context.
|
||||
*
|
||||
* This function performs SM3 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* SM3 context should be already correctly initialized by sm3_init(), and should not be finalized
|
||||
* by sm3_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If sm3_context is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sm3_context Pointer to the SM3 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true SM3 data digest succeeded.
|
||||
* @retval false SM3 data digest failed.
|
||||
**/
|
||||
extern bool libspdm_sm3_256_update(void *sm3_context, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the SM3 digest value.
|
||||
*
|
||||
* This function completes SM3 hash computation and retrieves the digest value into
|
||||
* the specified memory. After this function has been called, the SM3 context cannot
|
||||
* be used again. SM3 context should be already correctly initialized by sm3_init(), and should not
|
||||
* be finalized by sm3_final(). Behavior with invalid SM3 context is undefined.
|
||||
*
|
||||
* If sm3_context is NULL, then return false.
|
||||
* If hash_value is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sm3_context Pointer to the SM3 context.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes).
|
||||
*
|
||||
* @retval true SM3 digest computation succeeded.
|
||||
* @retval false SM3 digest computation failed.
|
||||
**/
|
||||
extern bool libspdm_sm3_256_final(void *sm3_context, uint8_t *hash_value);
|
||||
|
||||
/**
|
||||
* Computes the SM3 message digest of an input data buffer.
|
||||
*
|
||||
* This function performs the SM3 message digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be hashed.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes).
|
||||
*
|
||||
* @retval true SM3 digest computation succeeded.
|
||||
* @retval false SM3 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_sm3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value);
|
||||
#endif /* LIBSPDM_SM3_256_SUPPORT */
|
||||
|
||||
#endif /* CRYPTLIB_HASH_H */
|
266
kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h
Normal file
266
kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h
Normal file
@ -0,0 +1,266 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_HKDF_H
|
||||
#define CRYPTLIB_HKDF_H
|
||||
|
||||
/*=====================================================================================
|
||||
* Key Derivation Function Primitives
|
||||
*=====================================================================================*/
|
||||
|
||||
#if LIBSPDM_SHA256_SUPPORT
|
||||
/**
|
||||
* Derive SHA-256 HMAC-based Extract key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[in] salt Pointer to the salt value.
|
||||
* @param[in] salt_size Salt size in bytes.
|
||||
* @param[out] prk_out Pointer to buffer to receive prk value.
|
||||
* @param[in] prk_out_size Size of prk bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size);
|
||||
|
||||
/**
|
||||
* Derive SHA256 HMAC-based Expand key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] prk Pointer to the user-supplied key.
|
||||
* @param[in] prk_size Key size in bytes.
|
||||
* @param[in] info Pointer to the application specific info.
|
||||
* @param[in] info_size Info size in bytes.
|
||||
* @param[out] out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size);
|
||||
#endif /* LIBSPDM_SHA256_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA384_SUPPORT
|
||||
/**
|
||||
* Derive SHA384 HMAC-based Extract key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[in] salt Pointer to the salt value.
|
||||
* @param[in] salt_size Salt size in bytes.
|
||||
* @param[out] prk_out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] prk_out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size);
|
||||
|
||||
/**
|
||||
* Derive SHA384 HMAC-based Expand key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] prk Pointer to the user-supplied key.
|
||||
* @param[in] prk_size Key size in bytes.
|
||||
* @param[in] info Pointer to the application specific info.
|
||||
* @param[in] info_size Info size in bytes.
|
||||
* @param[out] out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size);
|
||||
#endif /* LIBSPDM_SHA384_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA512_SUPPORT
|
||||
/**
|
||||
* Derive SHA512 HMAC-based Extract key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[in] salt Pointer to the salt value.
|
||||
* @param[in] salt_size Salt size in bytes.
|
||||
* @param[out] prk_out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] prk_out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size);
|
||||
|
||||
/**
|
||||
* Derive SHA512 HMAC-based Expand key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] prk Pointer to the user-supplied key.
|
||||
* @param[in] prk_size Key size in bytes.
|
||||
* @param[in] info Pointer to the application specific info.
|
||||
* @param[in] info_size Info size in bytes.
|
||||
* @param[out] out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size);
|
||||
#endif /* LIBSPDM_SHA512_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA3_256_SUPPORT
|
||||
/**
|
||||
* Derive SHA3_256 HMAC-based Extract key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[in] salt Pointer to the salt value.
|
||||
* @param[in] salt_size Salt size in bytes.
|
||||
* @param[out] prk_out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] prk_out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha3_256_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size);
|
||||
|
||||
/**
|
||||
* Derive SHA3_256 HMAC-based Expand key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] prk Pointer to the user-supplied key.
|
||||
* @param[in] prk_size Key size in bytes.
|
||||
* @param[in] info Pointer to the application specific info.
|
||||
* @param[in] info_size Info size in bytes.
|
||||
* @param[out] out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha3_256_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size);
|
||||
#endif /* LIBSPDM_SHA3_256_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA3_384_SUPPORT
|
||||
/**
|
||||
* Derive SHA3_384 HMAC-based Extract key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[in] salt Pointer to the salt value.
|
||||
* @param[in] salt_size Salt size in bytes.
|
||||
* @param[out] prk_out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] prk_out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha3_384_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size);
|
||||
|
||||
/**
|
||||
* Derive SHA3_384 HMAC-based Expand key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] prk Pointer to the user-supplied key.
|
||||
* @param[in] prk_size Key size in bytes.
|
||||
* @param[in] info Pointer to the application specific info.
|
||||
* @param[in] info_size Info size in bytes.
|
||||
* @param[out] out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha3_384_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size);
|
||||
#endif /* LIBSPDM_SHA3_384_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA3_512_SUPPORT
|
||||
/**
|
||||
* Derive SHA3_512 HMAC-based Extract key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[in] salt Pointer to the salt value.
|
||||
* @param[in] salt_size Salt size in bytes.
|
||||
* @param[out] prk_out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] prk_out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha3_512_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size);
|
||||
|
||||
/**
|
||||
* Derive SHA3_512 HMAC-based Expand key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] prk Pointer to the user-supplied key.
|
||||
* @param[in] prk_size Key size in bytes.
|
||||
* @param[in] info Pointer to the application specific info.
|
||||
* @param[in] info_size Info size in bytes.
|
||||
* @param[out] out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sha3_512_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size);
|
||||
#endif /* LIBSPDM_SHA3_512_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SM3_256_SUPPORT
|
||||
/**
|
||||
* Derive SM3_256 HMAC-based Extract key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[in] salt Pointer to the salt value.
|
||||
* @param[in] salt_size Salt size in bytes.
|
||||
* @param[out] prk_out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] prk_out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sm3_256_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size);
|
||||
|
||||
/**
|
||||
* Derive SM3_256 HMAC-based Expand key Derivation Function (HKDF).
|
||||
*
|
||||
* @param[in] prk Pointer to the user-supplied key.
|
||||
* @param[in] prk_size Key size in bytes.
|
||||
* @param[in] info Pointer to the application specific info.
|
||||
* @param[in] info_size Info size in bytes.
|
||||
* @param[out] out Pointer to buffer to receive hkdf value.
|
||||
* @param[in] out_size Size of hkdf bytes to generate.
|
||||
*
|
||||
* @retval true Hkdf generated successfully.
|
||||
* @retval false Hkdf generation failed.
|
||||
**/
|
||||
extern bool libspdm_hkdf_sm3_256_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size);
|
||||
#endif /* LIBSPDM_SM3_256_SUPPORT */
|
||||
|
||||
#endif /* CRYPTLIB_HKDF_H */
|
833
kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h
Normal file
833
kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h
Normal file
@ -0,0 +1,833 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_MAC_H
|
||||
#define CRYPTLIB_MAC_H
|
||||
|
||||
/*=====================================================================================
|
||||
* Message Authentication Code (MAC) Primitives
|
||||
*=====================================================================================
|
||||
*/
|
||||
|
||||
#if LIBSPDM_SHA256_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA256 use.
|
||||
*
|
||||
* @return Pointer to the HMAC_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_hmac_sha256_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_hmac_sha256_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HMAC_CTX context.
|
||||
*
|
||||
* @param[in] hmac_sha256_ctx Pointer to the HMAC_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_hmac_sha256_free(void *hmac_sha256_ctx);
|
||||
|
||||
/**
|
||||
* Set user-supplied key for subsequent use. It must be done before any
|
||||
* calling to libspdm_hmac_sha256_update().
|
||||
*
|
||||
* If hmac_sha256_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[out] hmac_sha256_ctx Pointer to HMAC-SHA256 context.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
*
|
||||
* @retval true The key is set successfully.
|
||||
* @retval false The key is set unsuccessfully.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key, size_t key_size);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing HMAC-SHA256 context.
|
||||
*
|
||||
* If hmac_sha256_ctx is NULL, then return false.
|
||||
* If new_hmac_sha256_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] hmac_sha256_ctx Pointer to HMAC-SHA256 context being copied.
|
||||
* @param[out] new_hmac_sha256_ctx Pointer to new HMAC-SHA256 context.
|
||||
*
|
||||
* @retval true HMAC-SHA256 context copy succeeded.
|
||||
* @retval false HMAC-SHA256 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx, void *new_hmac_sha256_ctx);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates HMAC-SHA256 context.
|
||||
*
|
||||
* This function performs HMAC-SHA256 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should not be
|
||||
* finalized by libspdm_hmac_sha256_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If hmac_sha256_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true HMAC-SHA256 data digest succeeded.
|
||||
* @retval false HMAC-SHA256 data digest failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the HMAC-SHA256 digest value.
|
||||
*
|
||||
* This function completes HMAC-SHA256 hash computation and retrieves the digest value into
|
||||
* the specified memory. After this function has been called, the HMAC-SHA256 context cannot
|
||||
* be used again. HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should
|
||||
* not be finalized by libspdm_hmac_sha256_final(). Behavior with invalid HMAC-SHA256 context is
|
||||
* undefined.
|
||||
*
|
||||
* If hmac_sha256_ctx is NULL, then return false.
|
||||
* If hmac_value is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest
|
||||
* value (32 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA256 digest computation succeeded.
|
||||
* @retval false HMAC-SHA256 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value);
|
||||
|
||||
/**
|
||||
* Computes the HMAC-SHA256 digest of a input data buffer.
|
||||
*
|
||||
* This function performs the HMAC-SHA256 digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest
|
||||
* value (32 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA256 digest computation succeeded.
|
||||
* @retval false HMAC-SHA256 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha256_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value);
|
||||
#endif /* LIBSPDM_SHA256_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA384_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA384 use.
|
||||
*
|
||||
* @return Pointer to the HMAC_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_hmac_sha384_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_hmac_sha384_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HMAC_CTX context.
|
||||
*
|
||||
* @param[in] hmac_sha384_ctx Pointer to the HMAC_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_hmac_sha384_free(void *hmac_sha384_ctx);
|
||||
|
||||
/**
|
||||
* Set user-supplied key for subsequent use. It must be done before any
|
||||
* calling to libspdm_hmac_sha384_update().
|
||||
*
|
||||
* If hmac_sha384_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[out] hmac_sha384_ctx Pointer to HMAC-SHA384 context.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size key size in bytes.
|
||||
*
|
||||
* @retval true The key is set successfully.
|
||||
* @retval false The key is set unsuccessfully.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key, size_t key_size);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing HMAC-SHA384 context.
|
||||
*
|
||||
* If hmac_sha384_ctx is NULL, then return false.
|
||||
* If new_hmac_sha384_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] hmac_sha384_ctx Pointer to HMAC-SHA384 context being copied.
|
||||
* @param[out] new_hmac_sha384_ctx Pointer to new HMAC-SHA384 context.
|
||||
*
|
||||
* @retval true HMAC-SHA384 context copy succeeded.
|
||||
* @retval false HMAC-SHA384 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx, void *new_hmac_sha384_ctx);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates HMAC-SHA384 context.
|
||||
*
|
||||
* This function performs HMAC-SHA384 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should not be
|
||||
* finalized by libspdm_hmac_sha384_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If hmac_sha384_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true HMAC-SHA384 data digest succeeded.
|
||||
* @retval false HMAC-SHA384 data digest failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the HMAC-SHA384 digest value.
|
||||
*
|
||||
* This function completes HMAC-SHA384 hash computation and retrieves the digest value into
|
||||
* the specified memory. After this function has been called, the HMAC-SHA384 context cannot
|
||||
* be used again. HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should
|
||||
* not be finalized by libspdm_hmac_sha384_final(). Behavior with invalid HMAC-SHA384 context is
|
||||
* undefined.
|
||||
*
|
||||
* If hmac_sha384_ctx is NULL, then return false.
|
||||
* If hmac_value is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest
|
||||
* value (48 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA384 digest computation succeeded.
|
||||
* @retval false HMAC-SHA384 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value);
|
||||
|
||||
/**
|
||||
* Computes the HMAC-SHA384 digest of a input data buffer.
|
||||
*
|
||||
* This function performs the HMAC-SHA384 digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest
|
||||
* value (48 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA384 digest computation succeeded.
|
||||
* @retval false HMAC-SHA384 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha384_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value);
|
||||
#endif /* LIBSPDM_SHA384_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA512_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA512 use.
|
||||
*
|
||||
* @return Pointer to the HMAC_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_hmac_sha512_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_hmac_sha512_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HMAC_CTX context.
|
||||
*
|
||||
* @param[in] hmac_sha512_ctx Pointer to the HMAC_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_hmac_sha512_free(void *hmac_sha512_ctx);
|
||||
|
||||
/**
|
||||
* Set user-supplied key for subsequent use. It must be done before any
|
||||
* calling to libspdm_hmac_sha512_update().
|
||||
*
|
||||
* If hmac_sha512_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[out] hmac_sha512_ctx Pointer to HMAC-SHA512 context.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
*
|
||||
* @retval true The key is set successfully.
|
||||
* @retval false The key is set unsuccessfully.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key, size_t key_size);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing HMAC-SHA512 context.
|
||||
*
|
||||
* If hmac_sha512_ctx is NULL, then return false.
|
||||
* If new_hmac_sha512_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] hmac_sha512_ctx Pointer to HMAC-SHA512 context being copied.
|
||||
* @param[out] new_hmac_sha512_ctx Pointer to new HMAC-SHA512 context.
|
||||
*
|
||||
* @retval true HMAC-SHA512 context copy succeeded.
|
||||
* @retval false HMAC-SHA512 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx, void *new_hmac_sha512_ctx);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates HMAC-SHA512 context.
|
||||
*
|
||||
* This function performs HMAC-SHA512 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should not be
|
||||
* finalized by libspdm_hmac_sha512_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If hmac_sha512_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true HMAC-SHA512 data digest succeeded.
|
||||
* @retval false HMAC-SHA512 data digest failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the HMAC-SHA512 digest value.
|
||||
*
|
||||
* This function completes HMAC-SHA512 hash computation and retrieves the digest value into
|
||||
* the specified memory. After this function has been called, the HMAC-SHA512 context cannot
|
||||
* be used again. HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should
|
||||
* not be finalized by libspdm_hmac_sha512_final(). Behavior with invalid HMAC-SHA512 context is
|
||||
* undefined.
|
||||
*
|
||||
* If hmac_sha512_ctx is NULL, then return false.
|
||||
* If hmac_value is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest
|
||||
* value (64 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA512 digest computation succeeded.
|
||||
* @retval false HMAC-SHA512 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value);
|
||||
|
||||
/**
|
||||
* Computes the HMAC-SHA512 digest of a input data buffer.
|
||||
*
|
||||
* This function performs the HMAC-SHA512 digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest
|
||||
* value (64 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA512 digest computation succeeded.
|
||||
* @retval false HMAC-SHA512 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_hmac_sha512_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value);
|
||||
#endif /* LIBSPDM_SHA512_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA3_256_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-256 use.
|
||||
*
|
||||
* @return Pointer to the HMAC_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_hmac_sha3_256_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_hmac_sha3_256_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HMAC_CTX context.
|
||||
*
|
||||
* @param[in] hmac_sha3_256_ctx Pointer to the HMAC_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_hmac_sha3_256_free(void *hmac_sha3_256_ctx);
|
||||
|
||||
/**
|
||||
* Set user-supplied key for subsequent use. It must be done before any
|
||||
* calling to libspdm_hmac_sha3_256_update().
|
||||
*
|
||||
* If hmac_sha3_256_ctx is NULL, then return false.
|
||||
*
|
||||
* @param[out] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
*
|
||||
* @retval true The key is set successfully.
|
||||
* @retval false The key is set unsuccessfully.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_256_set_key(void *hmac_sha3_256_ctx,
|
||||
const uint8_t *key,
|
||||
size_t key_size);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing HMAC-SHA3-256 context.
|
||||
*
|
||||
* If hmac_sha3_256_ctx is NULL, then return false.
|
||||
* If new_hmac_sha3_256_ctx is NULL, then return false.
|
||||
*
|
||||
* @param[in] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context being copied.
|
||||
* @param[out] new_hmac_sha3_256_ctx Pointer to new HMAC-SHA3-256 context.
|
||||
*
|
||||
* @retval true HMAC-SHA3-256 context copy succeeded.
|
||||
* @retval false HMAC-SHA3-256 context copy failed.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_256_duplicate(const void *hmac_sha3_256_ctx,
|
||||
void *new_hmac_sha3_256_ctx);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates HMAC-SHA3-256 context.
|
||||
*
|
||||
* This function performs HMAC-SHA3-256 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and should not be
|
||||
* finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If hmac_sha3_256_ctx is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true HMAC-SHA3-256 data digest succeeded.
|
||||
* @retval false HMAC-SHA3-256 data digest failed.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_256_update(void *hmac_sha3_256_ctx,
|
||||
const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the HMAC-SHA3-256 digest value.
|
||||
*
|
||||
* This function completes HMAC-SHA3-256 hash computation and retrieves the digest value into
|
||||
* the specified memory. After this function has been called, the HMAC-SHA3-256 context cannot
|
||||
* be used again. HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and
|
||||
* should not be finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid HMAC-SHA3-256
|
||||
* context is undefined.
|
||||
*
|
||||
* If hmac_sha3_256_ctx is NULL, then return false.
|
||||
* If hmac_value is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest
|
||||
* value (32 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA3-256 digest computation succeeded.
|
||||
* @retval false HMAC-SHA3-256 digest computation failed.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_256_final(void *hmac_sha3_256_ctx, uint8_t *hmac_value);
|
||||
|
||||
/**
|
||||
* Computes the HMAC-SHA3-256 digest of a input data buffer.
|
||||
*
|
||||
* This function performs the HMAC-SHA3-256 digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest
|
||||
* value (32 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA3-256 digest computation succeeded.
|
||||
* @retval false HMAC-SHA3-256 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_256_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value);
|
||||
#endif /* LIBSPDM_SHA3_256_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA3_384_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-384 use.
|
||||
*
|
||||
* @return Pointer to the HMAC_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_hmac_sha3_384_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_hmac_sha3_384_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HMAC_CTX context.
|
||||
*
|
||||
* @param[in] hmac_sha3_384_ctx Pointer to the HMAC_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_hmac_sha3_384_free(void *hmac_sha3_384_ctx);
|
||||
|
||||
/**
|
||||
* Set user-supplied key for subsequent use. It must be done before any
|
||||
* calling to libspdm_hmac_sha3_384_update().
|
||||
*
|
||||
* If hmac_sha3_384_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[out] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
*
|
||||
* @retval true The key is set successfully.
|
||||
* @retval false The key is set unsuccessfully.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_384_set_key(void *hmac_sha3_384_ctx,
|
||||
const uint8_t *key,
|
||||
size_t key_size);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing HMAC-SHA3-384 context.
|
||||
*
|
||||
* If hmac_sha3_384_ctx is NULL, then return false.
|
||||
* If new_hmac_sha3_384_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context being copied.
|
||||
* @param[out] new_hmac_sha3_384_ctx Pointer to new HMAC-SHA3-384 context.
|
||||
*
|
||||
* @retval true HMAC-SHA3-384 context copy succeeded.
|
||||
* @retval false HMAC-SHA3-384 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_384_duplicate(const void *hmac_sha3_384_ctx,
|
||||
void *new_hmac_sha3_384_ctx);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates HMAC-SHA3-384 context.
|
||||
*
|
||||
* This function performs HMAC-SHA3-384 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and should not be
|
||||
* finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If hmac_sha3_384_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true HMAC-SHA3-384 data digest succeeded.
|
||||
* @retval false HMAC-SHA3-384 data digest failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_384_update(void *hmac_sha3_384_ctx, const void *data,
|
||||
size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the HMAC-SHA3-384 digest value.
|
||||
*
|
||||
* This function completes HMAC-SHA3-384 hash computation and retrieves the digest value into
|
||||
* the specified memory. After this function has been called, the HMAC-SHA3-384 context cannot
|
||||
* be used again. HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and
|
||||
* should not be finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid HMAC-SHA3-384
|
||||
* context is undefined.
|
||||
*
|
||||
* If hmac_sha3_384_ctx is NULL, then return false.
|
||||
* If hmac_value is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest
|
||||
* value (48 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA3-384 digest computation succeeded.
|
||||
* @retval false HMAC-SHA3-384 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_384_final(void *hmac_sha3_384_ctx, uint8_t *hmac_value);
|
||||
|
||||
/**
|
||||
* Computes the HMAC-SHA3-384 digest of a input data buffer.
|
||||
*
|
||||
* This function performs the HMAC-SHA3-384 digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest
|
||||
* value (48 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA3-384 digest computation succeeded.
|
||||
* @retval false HMAC-SHA3-384 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_384_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value);
|
||||
#endif /* LIBSPDM_SHA3_384_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SHA3_512_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-512 use.
|
||||
*
|
||||
* @return Pointer to the HMAC_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_hmac_sha3_512_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_hmac_sha3_512_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HMAC_CTX context.
|
||||
*
|
||||
* @param[in] hmac_sha3_512_ctx Pointer to the HMAC_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_hmac_sha3_512_free(void *hmac_sha3_512_ctx);
|
||||
|
||||
/**
|
||||
* Set user-supplied key for subsequent use. It must be done before any
|
||||
* calling to libspdm_hmac_sha3_512_update().
|
||||
*
|
||||
* If hmac_sha3_512_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[out] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
*
|
||||
* @retval true The key is set successfully.
|
||||
* @retval false The key is set unsuccessfully.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_512_set_key(void *hmac_sha3_512_ctx,
|
||||
const uint8_t *key,
|
||||
size_t key_size);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing HMAC-SHA3-512 context.
|
||||
*
|
||||
* If hmac_sha3_512_ctx is NULL, then return false.
|
||||
* If new_hmac_sha3_512_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context being copied.
|
||||
* @param[out] new_hmac_sha3_512_ctx Pointer to new HMAC-SHA3-512 context.
|
||||
*
|
||||
* @retval true HMAC-SHA3-512 context copy succeeded.
|
||||
* @retval false HMAC-SHA3-512 context copy failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_512_duplicate(const void *hmac_sha3_512_ctx,
|
||||
void *new_hmac_sha3_512_ctx);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates HMAC-SHA3-512 context.
|
||||
*
|
||||
* This function performs HMAC-SHA3-512 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and should not be
|
||||
* finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If hmac_sha3_512_ctx is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true HMAC-SHA3-512 data digest succeeded.
|
||||
* @retval false HMAC-SHA3-512 data digest failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_512_update(void *hmac_sha3_512_ctx,
|
||||
const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the HMAC-SHA3-512 digest value.
|
||||
*
|
||||
* This function completes HMAC-SHA3-512 hash computation and retrieves the digest value into
|
||||
* the specified memory. After this function has been called, the HMAC-SHA3-512 context cannot
|
||||
* be used again. HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and
|
||||
* should not be finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid HMAC-SHA3-512
|
||||
* context is undefined.
|
||||
*
|
||||
* If hmac_sha3_512_ctx is NULL, then return false.
|
||||
* If hmac_value is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest
|
||||
* value (64 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA3-512 digest computation succeeded.
|
||||
* @retval false HMAC-SHA3-512 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_512_final(void *hmac_sha3_512_ctx, uint8_t *hmac_value);
|
||||
|
||||
/**
|
||||
* Computes the HMAC-SHA3-512 digest of a input data buffer.
|
||||
*
|
||||
* This function performs the HMAC-SHA3-512 digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest
|
||||
* value (64 bytes).
|
||||
*
|
||||
* @retval true HMAC-SHA3-512 digest computation succeeded.
|
||||
* @retval false HMAC-SHA3-512 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sha3_512_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value);
|
||||
#endif /* LIBSPDM_SHA3_512_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SM3_256_SUPPORT
|
||||
/**
|
||||
* Allocates and initializes one HMAC_CTX context for subsequent HMAC-SM3-256 use.
|
||||
*
|
||||
* @return Pointer to the HMAC_CTX context that has been initialized.
|
||||
* If the allocations fails, libspdm_hmac_sm3_256_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_hmac_sm3_256_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified HMAC_CTX context.
|
||||
*
|
||||
* @param[in] hmac_sm3_256_ctx Pointer to the HMAC_CTX context to be released.
|
||||
**/
|
||||
extern void libspdm_hmac_sm3_256_free(void *hmac_sm3_256_ctx);
|
||||
|
||||
/**
|
||||
* Set user-supplied key for subsequent use. It must be done before any
|
||||
* calling to libspdm_hmac_sm3_256_update().
|
||||
*
|
||||
* If hmac_sm3_256_ctx is NULL, then return false.
|
||||
*
|
||||
* @param[out] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
*
|
||||
* @retval true The key is set successfully.
|
||||
* @retval false The key is set unsuccessfully.
|
||||
**/
|
||||
extern bool libspdm_hmac_sm3_256_set_key(void *hmac_sm3_256_ctx,
|
||||
const uint8_t *key, size_t key_size);
|
||||
|
||||
/**
|
||||
* Makes a copy of an existing HMAC-SM3-256 context.
|
||||
*
|
||||
* If hmac_sm3_256_ctx is NULL, then return false.
|
||||
* If new_hmac_sm3_256_ctx is NULL, then return false.
|
||||
*
|
||||
* @param[in] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context being copied.
|
||||
* @param[out] new_hmac_sm3_256_ctx Pointer to new HMAC-SM3-256 context.
|
||||
*
|
||||
* @retval true HMAC-SM3-256 context copy succeeded.
|
||||
* @retval false HMAC-SM3-256 context copy failed.
|
||||
**/
|
||||
extern bool libspdm_hmac_sm3_256_duplicate(const void *hmac_sm3_256_ctx,
|
||||
void *new_hmac_sm3_256_ctx);
|
||||
|
||||
/**
|
||||
* Digests the input data and updates HMAC-SM3-256 context.
|
||||
*
|
||||
* This function performs HMAC-SM3-256 digest on a data buffer of the specified size.
|
||||
* It can be called multiple times to compute the digest of long or discontinuous data streams.
|
||||
* HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and should not be
|
||||
* finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid context is undefined.
|
||||
*
|
||||
* If hmac_sm3_256_ctx is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context.
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
*
|
||||
* @retval true HMAC-SM3-256 data digest succeeded.
|
||||
* @retval false HMAC-SM3-256 data digest failed.
|
||||
**/
|
||||
extern bool libspdm_hmac_sm3_256_update(void *hmac_sm3_256_ctx, const void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Completes computation of the HMAC-SM3-256 digest value.
|
||||
*
|
||||
* This function completes HMAC-SM3-256 hash computation and retrieves the digest value into
|
||||
* the specified memory. After this function has been called, the HMAC-SM3-256 context cannot
|
||||
* be used again. HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and
|
||||
* should not be finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid HMAC-SM3-256
|
||||
* context is undefined.
|
||||
*
|
||||
* If hmac_sm3_256_ctx is NULL, then return false.
|
||||
* If hmac_value is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest
|
||||
* value (32 bytes).
|
||||
*
|
||||
* @retval true HMAC-SM3-256 digest computation succeeded.
|
||||
* @retval false HMAC-SM3-256 digest computation failed.
|
||||
**/
|
||||
extern bool libspdm_hmac_sm3_256_final(void *hmac_sm3_256_ctx, uint8_t *hmac_value);
|
||||
|
||||
/**
|
||||
* Computes the HMAC-SM3-256 digest of a input data buffer.
|
||||
*
|
||||
* This function performs the HMAC-SM3-256 digest of a given data buffer, and places
|
||||
* the digest value into the specified memory.
|
||||
*
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] data Pointer to the buffer containing the data to be digested.
|
||||
* @param[in] data_size Size of data buffer in bytes.
|
||||
* @param[in] key Pointer to the user-supplied key.
|
||||
* @param[in] key_size Key size in bytes.
|
||||
* @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest
|
||||
* value (32 bytes).
|
||||
*
|
||||
* @retval true HMAC-SM3-256 digest computation succeeded.
|
||||
* @retval false HMAC-SM3-256 digest computation failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_hmac_sm3_256_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value);
|
||||
#endif /* LIBSPDM_SM3_256_SUPPORT */
|
||||
|
||||
#endif /* CRYPTLIB_MAC_H */
|
30
kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h
Normal file
30
kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h
Normal file
@ -0,0 +1,30 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_RNG_H
|
||||
#define CRYPTLIB_RNG_H
|
||||
|
||||
/*=====================================================================================
|
||||
* Random Number Generation Primitive
|
||||
*=====================================================================================*/
|
||||
|
||||
/**
|
||||
* Generates a random byte stream of the specified size. If initialization, testing, or seeding of
|
||||
* the (pseudo)random number generator is required it should be done before this function is called.
|
||||
*
|
||||
* If output is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[out] output Pointer to buffer to receive random value.
|
||||
* @param[in] size Size of random bytes to generate.
|
||||
*
|
||||
* @retval true Random byte stream generated successfully.
|
||||
* @retval false Generation of random byte stream failed.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_random_bytes(uint8_t *output, size_t size);
|
||||
|
||||
#endif /* CRYPTLIB_RNG_H */
|
264
kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h
Normal file
264
kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h
Normal file
@ -0,0 +1,264 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_RSA_H
|
||||
#define CRYPTLIB_RSA_H
|
||||
|
||||
/*=====================================================================================
|
||||
* RSA Cryptography Primitives
|
||||
*=====================================================================================
|
||||
*/
|
||||
|
||||
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
|
||||
/* RSA key Tags Definition used in libspdm_rsa_set_key() function for key component
|
||||
* identification.
|
||||
*/
|
||||
typedef enum {
|
||||
LIBSPDM_RSA_KEY_N, /*< RSA public Modulus (N)*/
|
||||
LIBSPDM_RSA_KEY_E, /*< RSA public exponent (e)*/
|
||||
LIBSPDM_RSA_KEY_D, /*< RSA Private exponent (d)*/
|
||||
LIBSPDM_RSA_KEY_P, /*< RSA secret prime factor of Modulus (p)*/
|
||||
LIBSPDM_RSA_KEY_Q, /*< RSA secret prime factor of Modules (q)*/
|
||||
LIBSPDM_RSA_KEY_DP, /*< p's CRT exponent (== d mod (p - 1))*/
|
||||
LIBSPDM_RSA_KEY_DQ, /*< q's CRT exponent (== d mod (q - 1))*/
|
||||
LIBSPDM_RSA_KEY_Q_INV /*< The CRT coefficient (== 1/q mod p)*/
|
||||
} libspdm_rsa_key_tag_t;
|
||||
|
||||
/**
|
||||
* Allocates and initializes one RSA context for subsequent use.
|
||||
*
|
||||
* @return Pointer to the RSA context that has been initialized.
|
||||
* If the allocations fails, libspdm_rsa_new() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_rsa_new(void);
|
||||
|
||||
/**
|
||||
* Release the specified RSA context.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
*
|
||||
* @param[in] rsa_context Pointer to the RSA context to be released.
|
||||
**/
|
||||
extern void libspdm_rsa_free(void *rsa_context);
|
||||
|
||||
/**
|
||||
* Sets the tag-designated key component into the established RSA context.
|
||||
*
|
||||
* This function sets the tag-designated RSA key component into the established
|
||||
* RSA context from the user-specified non-negative integer (octet string format
|
||||
* represented in RSA PKCS#1).
|
||||
* If big_number is NULL, then the specified key component in RSA context is cleared.
|
||||
* If rsa_context is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] rsa_context Pointer to RSA context being set.
|
||||
* @param[in] key_tag tag of RSA key component being set.
|
||||
* @param[in] big_number Pointer to octet integer buffer.
|
||||
* If NULL, then the specified key component in RSA
|
||||
* context is cleared.
|
||||
* @param[in] bn_size Size of big number buffer in bytes.
|
||||
* If big_number is NULL, then it is ignored.
|
||||
*
|
||||
* @retval true RSA key component was set successfully.
|
||||
* @retval false Invalid RSA key component tag.
|
||||
**/
|
||||
extern bool libspdm_rsa_set_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
|
||||
const uint8_t *big_number, size_t bn_size);
|
||||
|
||||
/**
|
||||
* Gets the tag-designated RSA key component from the established RSA context.
|
||||
*
|
||||
* This function retrieves the tag-designated RSA key component from the
|
||||
* established RSA context as a non-negative integer (octet string format
|
||||
* represented in RSA PKCS#1).
|
||||
* If specified key component has not been set or has been cleared, then returned
|
||||
* bn_size is set to 0.
|
||||
* If the big_number buffer is too small to hold the contents of the key, false
|
||||
* is returned and bn_size is set to the required buffer size to obtain the key.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If bn_size is NULL, then return false.
|
||||
* If bn_size is large enough but big_number is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] rsa_context Pointer to RSA context being set.
|
||||
* @param[in] key_tag Tag of RSA key component being set.
|
||||
* @param[out] big_number Pointer to octet integer buffer.
|
||||
* @param[in, out] bn_size On input, the size of big number buffer in bytes.
|
||||
* On output, the size of data returned in big number buffer in bytes.
|
||||
*
|
||||
* @retval true RSA key component was retrieved successfully.
|
||||
* @retval false Invalid RSA key component tag.
|
||||
* @retval false bn_size is too small.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_rsa_get_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
|
||||
uint8_t *big_number, size_t *bn_size);
|
||||
|
||||
/**
|
||||
* Generates RSA key components.
|
||||
*
|
||||
* This function generates RSA key components. It takes RSA public exponent E and
|
||||
* length in bits of RSA modulus N as input, and generates all key components.
|
||||
* If public_exponent is NULL, the default RSA public exponent (0x10001) will be used.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in, out] rsa_context Pointer to RSA context being set.
|
||||
* @param[in] modulus_length Length of RSA modulus N in bits.
|
||||
* @param[in] public_exponent Pointer to RSA public exponent.
|
||||
* @param[in] public_exponent_size Size of RSA public exponent buffer in bytes.
|
||||
*
|
||||
* @retval true RSA key component was generated successfully.
|
||||
* @retval false Invalid RSA key component tag.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_rsa_generate_key(void *rsa_context, size_t modulus_length,
|
||||
const uint8_t *public_exponent,
|
||||
size_t public_exponent_size);
|
||||
|
||||
/**
|
||||
* Validates key components of RSA context.
|
||||
* NOTE: This function performs integrity checks on all the RSA key material, so
|
||||
* the RSA key structure must contain all the private key data.
|
||||
*
|
||||
* This function validates key components of RSA context in following aspects:
|
||||
* - Whether p is a prime
|
||||
* - Whether q is a prime
|
||||
* - Whether n = p * q
|
||||
* - Whether d*e = 1 mod lcm(p-1,q-1)
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] rsa_context Pointer to RSA context to check.
|
||||
*
|
||||
* @retval true RSA key components are valid.
|
||||
* @retval false RSA key components are not valid.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_rsa_check_key(void *rsa_context);
|
||||
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
|
||||
|
||||
#if LIBSPDM_RSA_SSA_SUPPORT
|
||||
/**
|
||||
* Carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme.
|
||||
*
|
||||
* This function carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme
|
||||
* defined in RSA PKCS#1. If the signature buffer is too small to hold the contents of signature,
|
||||
* false is returned and sig_size is set to the required buffer size to obtain the signature.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If message_hash is NULL, then return false.
|
||||
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
|
||||
* SHA3_384, SHA3_512.
|
||||
* If sig_size is large enough but signature is NULL, then return false.
|
||||
* If this interface is not supported, then return false.
|
||||
*
|
||||
* @param[in] rsa_context Pointer to RSA context for signature generation.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] message_hash Pointer to octet message hash to be signed.
|
||||
* @param[in] hash_size Size of the message hash in bytes.
|
||||
* @param[out] signature Pointer to buffer to receive RSA PKCS1-v1_5 signature.
|
||||
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
|
||||
* On output, the size of data returned in signature buffer in bytes.
|
||||
*
|
||||
* @retval true signature successfully generated in PKCS1-v1_5.
|
||||
* @retval false signature generation failed.
|
||||
* @retval false sig_size is too small.
|
||||
* @retval false This interface is not supported.
|
||||
**/
|
||||
extern bool libspdm_rsa_pkcs1_sign_with_nid(void *rsa_context, size_t hash_nid,
|
||||
const uint8_t *message_hash,
|
||||
size_t hash_size, uint8_t *signature,
|
||||
size_t *sig_size);
|
||||
|
||||
/**
|
||||
* Verifies the RSA-SSA signature with EMSA-PKCS1-v1_5 encoding scheme defined in RSA PKCS#1.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If message_hash is NULL, then return false.
|
||||
* If signature is NULL, then return false.
|
||||
* If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256,
|
||||
* SHA3_384, SHA3_512.
|
||||
*
|
||||
* @param[in] rsa_context Pointer to RSA context for signature verification.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] message_hash Pointer to octet message hash to be checked.
|
||||
* @param[in] hash_size Size of the message hash in bytes.
|
||||
* @param[in] signature Pointer to RSA PKCS1-v1_5 signature to be verified.
|
||||
* @param[in] sig_size Size of signature in bytes.
|
||||
*
|
||||
* @retval true Valid signature encoded in PKCS1-v1_5.
|
||||
* @retval false Invalid signature or invalid RSA context.
|
||||
**/
|
||||
extern bool libspdm_rsa_pkcs1_verify_with_nid(void *rsa_context, size_t hash_nid,
|
||||
const uint8_t *message_hash,
|
||||
size_t hash_size, const uint8_t *signature,
|
||||
size_t sig_size);
|
||||
#endif /* LIBSPDM_RSA_SSA_SUPPORT */
|
||||
|
||||
#if LIBSPDM_RSA_PSS_SUPPORT
|
||||
/**
|
||||
* Carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme.
|
||||
*
|
||||
* This function carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme defined
|
||||
* in RSA PKCS#1 v2.2.
|
||||
*
|
||||
* The salt length is same as digest length.
|
||||
*
|
||||
* If the signature buffer is too small to hold the contents of signature, false
|
||||
* is returned and sig_size is set to the required buffer size to obtain the signature.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If message_hash is NULL, then return false.
|
||||
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384,
|
||||
* SHA3_512.
|
||||
* If sig_size is large enough but signature is NULL, then return false.
|
||||
*
|
||||
* @param[in] rsa_context Pointer to RSA context for signature generation.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] message_hash Pointer to octet message hash to be signed.
|
||||
* @param[in] hash_size Size of the message hash in bytes.
|
||||
* @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature.
|
||||
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
|
||||
* On output, the size of data returned in signature buffer in bytes.
|
||||
*
|
||||
* @retval true signature successfully generated in RSA-SSA PSS.
|
||||
* @retval false signature generation failed.
|
||||
* @retval false sig_size is too small.
|
||||
**/
|
||||
extern bool libspdm_rsa_pss_sign(void *rsa_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
uint8_t *signature, size_t *sig_size);
|
||||
|
||||
/**
|
||||
* Verifies the RSA-SSA signature with EMSA-PSS encoding scheme defined in
|
||||
* RSA PKCS#1 v2.2.
|
||||
*
|
||||
* The salt length is same as digest length.
|
||||
*
|
||||
* If rsa_context is NULL, then return false.
|
||||
* If message_hash is NULL, then return false.
|
||||
* If signature is NULL, then return false.
|
||||
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384,
|
||||
* SHA3_512.
|
||||
*
|
||||
* @param[in] rsa_context Pointer to RSA context for signature verification.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] message_hash Pointer to octet message hash to be checked.
|
||||
* @param[in] hash_size Size of the message hash in bytes.
|
||||
* @param[in] signature Pointer to RSA-SSA PSS signature to be verified.
|
||||
* @param[in] sig_size Size of signature in bytes.
|
||||
*
|
||||
* @retval true Valid signature encoded in RSA-SSA PSS.
|
||||
* @retval false Invalid signature or invalid RSA context.
|
||||
**/
|
||||
extern bool libspdm_rsa_pss_verify(void *rsa_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
const uint8_t *signature, size_t sig_size);
|
||||
#endif /* LIBSPDM_RSA_PSS_SUPPORT */
|
||||
#endif /* CRYPTLIB_RSA_H */
|
194
kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h
Normal file
194
kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h
Normal file
@ -0,0 +1,194 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_SM2_H
|
||||
#define CRYPTLIB_SM2_H
|
||||
|
||||
/*=====================================================================================
|
||||
* Shang-Mi2 Primitives
|
||||
*=====================================================================================*/
|
||||
|
||||
#if LIBSPDM_SM2_DSA_SUPPORT
|
||||
/**
|
||||
* Allocates and Initializes one Shang-Mi2 context for subsequent use.
|
||||
*
|
||||
* @param nid cipher NID
|
||||
*
|
||||
* @return Pointer to the Shang-Mi2 context that has been initialized.
|
||||
* If the allocations fails, sm2_new_by_nid() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_sm2_dsa_new_by_nid(size_t nid);
|
||||
|
||||
/**
|
||||
* Release the specified sm2 context.
|
||||
*
|
||||
* @param[in] sm2_context Pointer to the sm2 context to be released.
|
||||
**/
|
||||
extern void libspdm_sm2_dsa_free(void *sm2_context);
|
||||
|
||||
/**
|
||||
* Carries out the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2.
|
||||
*
|
||||
* This function carries out the SM2 signature.
|
||||
* If the signature buffer is too small to hold the contents of signature, false
|
||||
* is returned and sig_size is set to the required buffer size to obtain the signature.
|
||||
*
|
||||
* If sm2_context is NULL, then return false.
|
||||
* If message is NULL, then return false.
|
||||
* hash_nid must be SM3_256.
|
||||
* If sig_size is large enough but signature is NULL, then return false.
|
||||
*
|
||||
* The id_a_size must be smaller than 2^16-1.
|
||||
* The sig_size is 64. first 32-byte is R, second 32-byte is S.
|
||||
*
|
||||
* @param[in] sm2_context Pointer to sm2 context for signature generation.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] id_a The ID-A of the signing context.
|
||||
* @param[in] id_a_size Size of ID-A signing context.
|
||||
* @param[in] message Pointer to octet message to be signed (before hash).
|
||||
* @param[in] size Size of the message in bytes.
|
||||
* @param[out] signature Pointer to buffer to receive SM2 signature.
|
||||
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
|
||||
* On output, the size of data returned in signature buffer in bytes.
|
||||
*
|
||||
* @retval true signature successfully generated in SM2.
|
||||
* @retval false signature generation failed.
|
||||
* @retval false sig_size is too small.
|
||||
**/
|
||||
extern bool libspdm_sm2_dsa_sign(const void *sm2_context, size_t hash_nid,
|
||||
const uint8_t *id_a, size_t id_a_size,
|
||||
const uint8_t *message, size_t size,
|
||||
uint8_t *signature, size_t *sig_size);
|
||||
|
||||
/**
|
||||
* Verifies the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2.
|
||||
*
|
||||
* If sm2_context is NULL, then return false.
|
||||
* If message is NULL, then return false.
|
||||
* If signature is NULL, then return false.
|
||||
* hash_nid must be SM3_256.
|
||||
*
|
||||
* The id_a_size must be smaller than 2^16-1.
|
||||
* The sig_size is 64. first 32-byte is R, second 32-byte is S.
|
||||
*
|
||||
* @param[in] sm2_context Pointer to SM2 context for signature verification.
|
||||
* @param[in] hash_nid hash NID
|
||||
* @param[in] id_a The ID-A of the signing context.
|
||||
* @param[in] id_a_size Size of ID-A signing context.
|
||||
* @param[in] message Pointer to octet message to be checked (before hash).
|
||||
* @param[in] size Size of the message in bytes.
|
||||
* @param[in] signature Pointer to SM2 signature to be verified.
|
||||
* @param[in] sig_size Size of signature in bytes.
|
||||
*
|
||||
* @retval true Valid signature encoded in SM2.
|
||||
* @retval false Invalid signature or invalid sm2 context.
|
||||
*
|
||||
**/
|
||||
extern bool libspdm_sm2_dsa_verify(const void *sm2_context, size_t hash_nid,
|
||||
const uint8_t *id_a, size_t id_a_size,
|
||||
const uint8_t *message, size_t size,
|
||||
const uint8_t *signature, size_t sig_size);
|
||||
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
|
||||
|
||||
#if LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT
|
||||
/**
|
||||
* Allocates and Initializes one Shang-Mi2 context for subsequent use.
|
||||
*
|
||||
* @param nid cipher NID
|
||||
*
|
||||
* @return Pointer to the Shang-Mi2 context that has been initialized.
|
||||
* If the allocations fails, sm2_new_by_nid() returns NULL.
|
||||
**/
|
||||
extern void *libspdm_sm2_key_exchange_new_by_nid(size_t nid);
|
||||
|
||||
/**
|
||||
* Release the specified sm2 context.
|
||||
*
|
||||
* @param[in] sm2_context Pointer to the sm2 context to be released.
|
||||
*
|
||||
**/
|
||||
extern void libspdm_sm2_key_exchange_free(void *sm2_context);
|
||||
|
||||
/**
|
||||
* Initialize the specified sm2 context.
|
||||
*
|
||||
* @param[in] sm2_context Pointer to the sm2 context to be released.
|
||||
* @param[in] hash_nid hash NID, only SM3 is valid.
|
||||
* @param[in] id_a The ID-A of the key exchange context.
|
||||
* @param[in] id_a_size Size of ID-A key exchange context.
|
||||
* @param[in] id_b The ID-B of the key exchange context.
|
||||
* @param[in] id_b_size Size of ID-B key exchange context.
|
||||
* @param[in] is_initiator If the caller is initiator.
|
||||
*
|
||||
* @retval true sm2 context is initialized.
|
||||
* @retval false sm2 context is not initialized.
|
||||
**/
|
||||
extern bool libspdm_sm2_key_exchange_init(const void *sm2_context, size_t hash_nid,
|
||||
const uint8_t *id_a, size_t id_a_size,
|
||||
const uint8_t *id_b, size_t id_b_size,
|
||||
bool is_initiator);
|
||||
|
||||
/**
|
||||
* Generates sm2 key and returns sm2 public key (X, Y), based upon GB/T 32918.3-2016: SM2 - Part3.
|
||||
*
|
||||
* This function generates random secret, and computes the public key (X, Y), which is
|
||||
* returned via parameter public, public_size.
|
||||
* X is the first half of public with size being public_size / 2,
|
||||
* Y is the second half of public with size being public_size / 2.
|
||||
* sm2 context is updated accordingly.
|
||||
* If the public buffer is too small to hold the public X, Y, false is returned and
|
||||
* public_size is set to the required buffer size to obtain the public X, Y.
|
||||
*
|
||||
* The public_size is 64. first 32-byte is X, second 32-byte is Y.
|
||||
*
|
||||
* If sm2_context is NULL, then return false.
|
||||
* If public_size is NULL, then return false.
|
||||
* If public_size is large enough but public is NULL, then return false.
|
||||
*
|
||||
* @param[in, out] sm2_context Pointer to the sm2 context.
|
||||
* @param[out] public_data Pointer to the buffer to receive generated public X,Y.
|
||||
* @param[in, out] public_size On input, the size of public buffer in bytes.
|
||||
* On output, the size of data returned in public buffer in bytes.
|
||||
*
|
||||
* @retval true sm2 public X,Y generation succeeded.
|
||||
* @retval false sm2 public X,Y generation failed.
|
||||
* @retval false public_size is not large enough.
|
||||
**/
|
||||
extern bool libspdm_sm2_key_exchange_generate_key(void *sm2_context, uint8_t *public_data,
|
||||
size_t *public_size);
|
||||
|
||||
/**
|
||||
* Computes exchanged common key, based upon GB/T 32918.3-2016: SM2 - Part3.
|
||||
*
|
||||
* Given peer's public key (X, Y), this function computes the exchanged common key,
|
||||
* based on its own context including value of curve parameter and random secret.
|
||||
* X is the first half of peer_public with size being peer_public_size / 2,
|
||||
* Y is the second half of peer_public with size being peer_public_size / 2.
|
||||
*
|
||||
* If sm2_context is NULL, then return false.
|
||||
* If peer_public is NULL, then return false.
|
||||
* If peer_public_size is 0, then return false.
|
||||
* If key is NULL, then return false.
|
||||
*
|
||||
* The id_a_size and id_b_size must be smaller than 2^16-1.
|
||||
* The peer_public_size is 64. first 32-byte is X, second 32-byte is Y.
|
||||
* The key_size must be smaller than 2^32-1, limited by KDF function.
|
||||
*
|
||||
* @param[in, out] sm2_context Pointer to the sm2 context.
|
||||
* @param[in] peer_public Pointer to the peer's public X,Y.
|
||||
* @param[in] peer_public_size Size of peer's public X,Y in bytes.
|
||||
* @param[out] key Pointer to the buffer to receive generated key.
|
||||
* @param[in] key_size On input, the size of key buffer in bytes.
|
||||
*
|
||||
* @retval true sm2 exchanged key generation succeeded.
|
||||
* @retval false sm2 exchanged key generation failed.
|
||||
**/
|
||||
extern bool libspdm_sm2_key_exchange_compute_key(void *sm2_context,
|
||||
const uint8_t *peer_public,
|
||||
size_t peer_public_size, uint8_t *key,
|
||||
size_t *key_size);
|
||||
#endif /* LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT */
|
||||
#endif /* CRYPTLIB_SM2_H */
|
71
kernel-open/nvidia/internal/libspdm_lib_config.h
Normal file
71
kernel-open/nvidia/internal/libspdm_lib_config.h
Normal file
@ -0,0 +1,71 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef LIBSPDM_LIB_CONFIG_H
|
||||
#define LIBSPDM_LIB_CONFIG_H
|
||||
|
||||
#ifndef LIBSPDM_CONFIG
|
||||
#include "library/spdm_lib_config.h"
|
||||
#else
|
||||
#include LIBSPDM_CONFIG
|
||||
#endif
|
||||
|
||||
#if defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) && \
|
||||
!defined(LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP)
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use " \
|
||||
"LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a " \
|
||||
"future release.")
|
||||
#else
|
||||
#warning LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use \
|
||||
LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a \
|
||||
future release.
|
||||
#endif /* _MSC_VER */
|
||||
#endif /* defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) */
|
||||
|
||||
#if defined(LIBSPDM_ENABLE_CHUNK_CAP) && !defined(LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP)
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP " \
|
||||
"instead. This warning will be removed in a future release.")
|
||||
#else
|
||||
#warning LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP \
|
||||
instead. This warning will be removed in a future release.
|
||||
#endif /* _MSC_VER */
|
||||
#endif /* defined(LIBSPDM_ENABLE_CHUNK_CAP) */
|
||||
|
||||
#if defined(MDEPKG_NDEBUG) && !defined(LIBSPDM_DEBUG_ENABLE)
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE " \
|
||||
"instead. This warning will be removed in a future release.")
|
||||
#else
|
||||
#warning MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE \
|
||||
instead. This warning will be removed in a future release.
|
||||
#endif /* _MSC_VER */
|
||||
#endif /* defined(MDEPKG_NDEBUG) */
|
||||
|
||||
#if defined(LIBSPDM_DEBUG_ENABLE)
|
||||
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
|
||||
#undef LIBSPDM_DEBUG_PRINT_ENABLE
|
||||
#undef LIBSPDM_DEBUG_BLOCK_ENABLE
|
||||
|
||||
#define LIBSPDM_DEBUG_ASSERT_ENABLE (LIBSPDM_DEBUG_ENABLE)
|
||||
#define LIBSPDM_DEBUG_PRINT_ENABLE (LIBSPDM_DEBUG_ENABLE)
|
||||
#define LIBSPDM_DEBUG_BLOCK_ENABLE (LIBSPDM_DEBUG_ENABLE)
|
||||
#elif defined(MDEPKG_NDEBUG)
|
||||
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
|
||||
#undef LIBSPDM_DEBUG_PRINT_ENABLE
|
||||
#undef LIBSPDM_DEBUG_BLOCK_ENABLE
|
||||
|
||||
#define LIBSPDM_DEBUG_ASSERT_ENABLE 0
|
||||
#define LIBSPDM_DEBUG_PRINT_ENABLE 0
|
||||
#define LIBSPDM_DEBUG_BLOCK_ENABLE 0
|
||||
#endif /* defined(LIBSPDM_DEBUG_ENABLE) */
|
||||
|
||||
#if LIBSPDM_CHECK_MACRO
|
||||
#include "internal/libspdm_macro_check.h"
|
||||
#endif /* LIBSPDM_CHECK_MACRO */
|
||||
|
||||
#endif /* LIBSPDM_LIB_CONFIG_H */
|
154
kernel-open/nvidia/internal_crypt_lib.h
Normal file
154
kernel-open/nvidia/internal_crypt_lib.h
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __INTERNAL_CRYPT_LIB_H__
|
||||
#define __INTERNAL_CRYPT_LIB_H__
|
||||
|
||||
/*
|
||||
* This code uses Linux Kernel Crypto API extensively. Web page written by
|
||||
* Stephan Mueller and Marek Vasut is a good starting reference on how linux
|
||||
* kernel provides crypto api.
|
||||
*/
|
||||
#include "conftest.h"
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
// Check if ECDH/ECDSA are there, on some platforms they might not be...
|
||||
#ifndef AUTOCONF_INCLUDED
|
||||
#if defined(NV_GENERATED_AUTOCONF_H_PRESENT)
|
||||
#include <generated/autoconf.h>
|
||||
#else
|
||||
#include <linux/autoconf.h>
|
||||
#endif
|
||||
#endif
|
||||
#if \
|
||||
(defined(CONFIG_CRYPTO_AEAD) || defined(CONFIG_CRYPTO_AEAD_MODULE)) && \
|
||||
(defined(CONFIG_CRYPTO_AKCIPHER) || defined(CONFIG_CRYPTO_AKCIPHER_MODULE)) && \
|
||||
(defined(CONFIG_CRYPTO_SKCIPHER) || defined(CONFIG_CRYPTO_SKCIPHER_MODULE)) && \
|
||||
(defined(CONFIG_CRYPTO_HASH) || defined(CONFIG_CRYPTO_HASH_MODULE)) && \
|
||||
(defined(CONFIG_CRYPTO_HMAC) || defined(CONFIG_CRYPTO_HMAC_MODULE)) && \
|
||||
(defined(CONFIG_CRYPTO_ECDH) || defined(CONFIG_CRYPTO_ECDH_MODULE)) && \
|
||||
(defined(CONFIG_CRYPTO_ECDSA) || defined(CONFIG_CRYPTO_ECDSA_MODULE)) && \
|
||||
(defined(CONFIG_X509_CERTIFICATE_PARSER) || defined(CONFIG_X509_CERTIFICATE_PARSER_MODULE))
|
||||
#define NV_CONFIG_CRYPTO_PRESENT 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* It is possible that we don't have access to all the functions we have. This
|
||||
* could be either because we are running non-gpl kernel, because kernel is too
|
||||
* old or even just user disabled. If we should use LKCA, include headers, else
|
||||
* define stubs to return errors.
|
||||
*/
|
||||
#if defined(NV_CRYPTO_PRESENT) && defined (NV_CONFIG_CRYPTO_PRESENT)
|
||||
#define USE_LKCA 1
|
||||
#endif
|
||||
|
||||
#ifdef USE_LKCA
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/sm3.h>
|
||||
|
||||
// HASH_MAX_DIGESTSIZE is available since 4.20.
|
||||
// This value is accurate as of 6.1
|
||||
#ifndef HASH_MAX_DIGESTSIZE
|
||||
#define HASH_MAX_DIGESTSIZE 64
|
||||
#endif
|
||||
|
||||
#else
|
||||
// Just stub everything out
|
||||
struct shash_desc;
|
||||
struct crypto_shash;
|
||||
#define crypto_shash_setkey(...) -ENOMEM
|
||||
#define crypto_shash_init(...) -ENOMEM
|
||||
#define crypto_shash_update(...) -ENOMEM
|
||||
#define crypto_shash_update(...) -ENOMEM
|
||||
#define crypto_shash_final(...) -ENOMEM
|
||||
#endif
|
||||
|
||||
#define CHAR_BIT 8U
|
||||
#undef SIZE_MAX
|
||||
#define SIZE_MAX 8
|
||||
|
||||
#include "library/cryptlib.h"
|
||||
|
||||
#define LIBSPDM_ASSERT(...)
|
||||
struct lkca_aead_ctx;
|
||||
int lkca_aead_alloc(struct lkca_aead_ctx **ctx, char const *alg);
|
||||
void lkca_aead_free(struct lkca_aead_ctx *ctx);
|
||||
int lkca_aead_ex(struct lkca_aead_ctx *ctx,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size,
|
||||
bool enc);
|
||||
|
||||
int libspdm_aead(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
const uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size,
|
||||
bool enc, char const *alg);
|
||||
|
||||
void *lkca_hash_new(const char* alg_name);
|
||||
void lkca_hash_free(struct shash_desc *ctx);
|
||||
bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src);
|
||||
bool lkca_hash_all(const char* alg_name, const void *data,
|
||||
size_t data_size, uint8_t *hash_value);
|
||||
bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src);
|
||||
bool lkca_hmac_set_key(struct shash_desc *ctx, const uint8_t *key, size_t key_size);
|
||||
bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size,
|
||||
const uint8_t *data, size_t data_size, uint8_t *hash_value);
|
||||
bool lkca_hkdf_extract_and_expand(const char *alg_name,
|
||||
const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size);
|
||||
bool lkca_hkdf_expand(const char *alg_name,
|
||||
const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size);
|
||||
|
||||
|
||||
bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size);
|
||||
bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
|
||||
size_t public_key_size);
|
||||
bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key,
|
||||
size_t *public_key_size);
|
||||
bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data,
|
||||
size_t *public_size);
|
||||
bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public,
|
||||
size_t peer_public_size, uint8_t *key,
|
||||
size_t *key_size);
|
||||
bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
const uint8_t *signature, size_t sig_size);
|
||||
#endif
|
109
kernel-open/nvidia/library/cryptlib.h
Normal file
109
kernel-open/nvidia/library/cryptlib.h
Normal file
@ -0,0 +1,109 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
/** @file
|
||||
* Defines base cryptographic library APIs.
|
||||
* The Base Cryptographic Library provides implementations of basic cryptography
|
||||
* primitives (hash Serials, HMAC, AES, RSA, Diffie-Hellman, Elliptic Curve, etc) for security
|
||||
* functionality enabling.
|
||||
**/
|
||||
|
||||
#ifndef CRYPTLIB_H
|
||||
#define CRYPTLIB_H
|
||||
|
||||
#include "internal/libspdm_lib_config.h"
|
||||
|
||||
#define LIBSPDM_CRYPTO_NID_NULL 0x0000
|
||||
|
||||
/* Hash */
|
||||
#define LIBSPDM_CRYPTO_NID_SHA256 0x0001
|
||||
#define LIBSPDM_CRYPTO_NID_SHA384 0x0002
|
||||
#define LIBSPDM_CRYPTO_NID_SHA512 0x0003
|
||||
#define LIBSPDM_CRYPTO_NID_SHA3_256 0x0004
|
||||
#define LIBSPDM_CRYPTO_NID_SHA3_384 0x0005
|
||||
#define LIBSPDM_CRYPTO_NID_SHA3_512 0x0006
|
||||
#define LIBSPDM_CRYPTO_NID_SM3_256 0x0007
|
||||
|
||||
/* Signing */
|
||||
#define LIBSPDM_CRYPTO_NID_RSASSA2048 0x0101
|
||||
#define LIBSPDM_CRYPTO_NID_RSASSA3072 0x0102
|
||||
#define LIBSPDM_CRYPTO_NID_RSASSA4096 0x0103
|
||||
#define LIBSPDM_CRYPTO_NID_RSAPSS2048 0x0104
|
||||
#define LIBSPDM_CRYPTO_NID_RSAPSS3072 0x0105
|
||||
#define LIBSPDM_CRYPTO_NID_RSAPSS4096 0x0106
|
||||
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P256 0x0107
|
||||
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P384 0x0108
|
||||
#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P521 0x0109
|
||||
#define LIBSPDM_CRYPTO_NID_SM2_DSA_P256 0x010A
|
||||
#define LIBSPDM_CRYPTO_NID_EDDSA_ED25519 0x010B
|
||||
#define LIBSPDM_CRYPTO_NID_EDDSA_ED448 0x010C
|
||||
|
||||
/* Key Exchange */
|
||||
#define LIBSPDM_CRYPTO_NID_FFDHE2048 0x0201
|
||||
#define LIBSPDM_CRYPTO_NID_FFDHE3072 0x0202
|
||||
#define LIBSPDM_CRYPTO_NID_FFDHE4096 0x0203
|
||||
#define LIBSPDM_CRYPTO_NID_SECP256R1 0x0204
|
||||
#define LIBSPDM_CRYPTO_NID_SECP384R1 0x0205
|
||||
#define LIBSPDM_CRYPTO_NID_SECP521R1 0x0206
|
||||
#define LIBSPDM_CRYPTO_NID_SM2_KEY_EXCHANGE_P256 0x0207
|
||||
#define LIBSPDM_CRYPTO_NID_CURVE_X25519 0x0208
|
||||
#define LIBSPDM_CRYPTO_NID_CURVE_X448 0x0209
|
||||
|
||||
/* AEAD */
|
||||
#define LIBSPDM_CRYPTO_NID_AES_128_GCM 0x0301
|
||||
#define LIBSPDM_CRYPTO_NID_AES_256_GCM 0x0302
|
||||
#define LIBSPDM_CRYPTO_NID_CHACHA20_POLY1305 0x0303
|
||||
#define LIBSPDM_CRYPTO_NID_SM4_128_GCM 0x0304
|
||||
|
||||
/* X.509 v3 key usage extension flags. */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_DIGITAL_SIGNATURE 0x80 /* bit 0 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_NON_REPUDIATION 0x40 /* bit 1 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_KEY_ENCIPHERMENT 0x20 /* bit 2 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_DATA_ENCIPHERMENT 0x10 /* bit 3 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_KEY_AGREEMENT 0x08 /* bit 4 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_KEY_CERT_SIGN 0x04 /* bit 5 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_CRL_SIGN 0x02 /* bit 6 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_ENCIPHER_ONLY 0x01 /* bit 7 */
|
||||
#define LIBSPDM_CRYPTO_X509_KU_DECIPHER_ONLY 0x8000 /* bit 8 */
|
||||
|
||||
/* These constants comply with the DER encoded ASN.1 type tags. */
|
||||
#define LIBSPDM_CRYPTO_ASN1_BOOLEAN 0x01
|
||||
#define LIBSPDM_CRYPTO_ASN1_INTEGER 0x02
|
||||
#define LIBSPDM_CRYPTO_ASN1_BIT_STRING 0x03
|
||||
#define LIBSPDM_CRYPTO_ASN1_OCTET_STRING 0x04
|
||||
#define LIBSPDM_CRYPTO_ASN1_NULL 0x05
|
||||
#define LIBSPDM_CRYPTO_ASN1_OID 0x06
|
||||
#define LIBSPDM_CRYPTO_ASN1_UTF8_STRING 0x0C
|
||||
#define LIBSPDM_CRYPTO_ASN1_SEQUENCE 0x10
|
||||
#define LIBSPDM_CRYPTO_ASN1_SET 0x11
|
||||
#define LIBSPDM_CRYPTO_ASN1_PRINTABLE_STRING 0x13
|
||||
#define LIBSPDM_CRYPTO_ASN1_T61_STRING 0x14
|
||||
#define LIBSPDM_CRYPTO_ASN1_IA5_STRING 0x16
|
||||
#define LIBSPDM_CRYPTO_ASN1_UTC_TIME 0x17
|
||||
#define LIBSPDM_CRYPTO_ASN1_GENERALIZED_TIME 0x18
|
||||
#define LIBSPDM_CRYPTO_ASN1_UNIVERSAL_STRING 0x1C
|
||||
#define LIBSPDM_CRYPTO_ASN1_BMP_STRING 0x1E
|
||||
#define LIBSPDM_CRYPTO_ASN1_PRIMITIVE 0x00
|
||||
#define LIBSPDM_CRYPTO_ASN1_CONSTRUCTED 0x20
|
||||
#define LIBSPDM_CRYPTO_ASN1_CONTEXT_SPECIFIC 0x80
|
||||
|
||||
#define LIBSPDM_CRYPTO_ASN1_TAG_CLASS_MASK 0xC0
|
||||
#define LIBSPDM_CRYPTO_ASN1_TAG_PC_MASK 0x20
|
||||
#define LIBSPDM_CRYPTO_ASN1_TAG_VALUE_MASK 0x1F
|
||||
|
||||
#include "hal/library/cryptlib/cryptlib_hash.h"
|
||||
#include "hal/library/cryptlib/cryptlib_mac.h"
|
||||
#include "hal/library/cryptlib/cryptlib_aead.h"
|
||||
#include "hal/library/cryptlib/cryptlib_cert.h"
|
||||
#include "hal/library/cryptlib/cryptlib_hkdf.h"
|
||||
#include "hal/library/cryptlib/cryptlib_rsa.h"
|
||||
#include "hal/library/cryptlib/cryptlib_ec.h"
|
||||
#include "hal/library/cryptlib/cryptlib_dh.h"
|
||||
#include "hal/library/cryptlib/cryptlib_ecd.h"
|
||||
#include "hal/library/cryptlib/cryptlib_sm2.h"
|
||||
#include "hal/library/cryptlib/cryptlib_rng.h"
|
||||
|
||||
#endif /* CRYPTLIB_H */
|
415
kernel-open/nvidia/library/spdm_lib_config.h
Normal file
415
kernel-open/nvidia/library/spdm_lib_config.h
Normal file
@ -0,0 +1,415 @@
|
||||
/**
|
||||
* Copyright Notice:
|
||||
* Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
**/
|
||||
|
||||
#ifndef SPDM_LIB_CONFIG_H
|
||||
#define SPDM_LIB_CONFIG_H
|
||||
|
||||
/* Enables assertions and debug printing. When `LIBSPDM_DEBUG_ENABLE` is defined it overrides or
|
||||
* sets the values of `LIBSPDM_DEBUG_PRINT_ENABLE`, `LIBSPDM_DEBUG_ASSERT_ENABLE`, and
|
||||
* `LIBSPDM_BLOCK_ENABLE` to the value of `LIBSPDM_DEBUG_ENABLE`.
|
||||
*
|
||||
* Note that if this file is used with CMake and `DTARGET=Release` is defined, then all debugging
|
||||
* is disabled.
|
||||
*/
|
||||
#ifndef LIBSPDM_DEBUG_ENABLE
|
||||
#define LIBSPDM_DEBUG_ENABLE 1
|
||||
#endif
|
||||
|
||||
/* The SPDM specification allows a Responder to return up to 256 version entries in the `VERSION`
|
||||
* response to the Requester, including duplicate entries. For a Requester this value specifies the
|
||||
* maximum number of entries that libspdm will tolerate in a `VERSION` response before returning an
|
||||
* error. A similiar macro, `SPDM_MAX_VERSION_COUNT`, exists for the Responder. However this macro
|
||||
* is not meant to be configured by the Integrator.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_VERSION_COUNT
|
||||
#define LIBSPDM_MAX_VERSION_COUNT 5
|
||||
#endif
|
||||
|
||||
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.RequesterContext` and,
|
||||
* if supported by the Responder, `PSK_EXCHANGE_RSP.ResponderContext` fields. The fields are
|
||||
* typically random or monotonically increasing numbers.
|
||||
*/
|
||||
#ifndef LIBSPDM_PSK_CONTEXT_LENGTH
|
||||
#define LIBSPDM_PSK_CONTEXT_LENGTH LIBSPDM_MAX_HASH_SIZE
|
||||
#endif
|
||||
/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.PSKHint` field.*/
|
||||
#ifndef LIBSPDM_PSK_MAX_HINT_LENGTH
|
||||
#define LIBSPDM_PSK_MAX_HINT_LENGTH 16
|
||||
#endif
|
||||
|
||||
/* libspdm allows an Integrator to specify multiple root certificates as trust anchors when
|
||||
* verifying certificate chains from an endpoint. This value specifies the maximum number of root
|
||||
* certificates that libspdm can support.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_ROOT_CERT_SUPPORT
|
||||
#define LIBSPDM_MAX_ROOT_CERT_SUPPORT 10
|
||||
#endif
|
||||
|
||||
/* If the Responder supports it a Requester is allowed to establish multiple secure sessions with
|
||||
* the Responder. This value specifies the maximum number of sessions libspdm can support.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_SESSION_COUNT
|
||||
#define LIBSPDM_MAX_SESSION_COUNT 4
|
||||
#endif
|
||||
/* This value specifies the maximum size, in bytes, of a certificate chain that can be stored in a
|
||||
* libspdm context.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_CERT_CHAIN_SIZE
|
||||
#define LIBSPDM_MAX_CERT_CHAIN_SIZE 0x1000
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE
|
||||
#define LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE 0x1000
|
||||
#endif
|
||||
/* Partial certificates can be retrieved from a Requester or Responder and through multiple messages
|
||||
* the complete certificate chain can be constructed. This value specifies the maximum size,
|
||||
* in bytes, of a partial certificate that can be sent or received.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN
|
||||
#define LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN 1024
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
|
||||
#define LIBSPDM_MAX_MESSAGE_BUFFER_SIZE 0x1200
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_MESSAGE_SMALL_BUFFER_SIZE
|
||||
#define LIBSPDM_MAX_MESSAGE_SMALL_BUFFER_SIZE 0x100 /* to hold message_a before negotiate*/
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_MESSAGE_MEDIUM_BUFFER_SIZE
|
||||
#define LIBSPDM_MAX_MESSAGE_MEDIUM_BUFFER_SIZE 0x300 /* to hold message_k before finished_key is ready*/
|
||||
#endif
|
||||
|
||||
/* If the Responder replies with a Busy `ERROR` response to a request then the Requester is free to
|
||||
* retry sending the request. This value specifies the maximum number of times libspdm will retry
|
||||
* sending the request before returning an error. If its value is 0 then libspdm will not send any
|
||||
* retry requests.
|
||||
*/
|
||||
#ifndef LIBSPDM_MAX_REQUEST_RETRY_TIMES
|
||||
#define LIBSPDM_MAX_REQUEST_RETRY_TIMES 3
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_SESSION_STATE_CALLBACK_NUM
|
||||
#define LIBSPDM_MAX_SESSION_STATE_CALLBACK_NUM 4
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_CONNECTION_STATE_CALLBACK_NUM
|
||||
#define LIBSPDM_MAX_CONNECTION_STATE_CALLBACK_NUM 4
|
||||
#endif
|
||||
#ifndef LIBSPDM_MAX_KEY_UPDATE_CALLBACK_NUM
|
||||
#define LIBSPDM_MAX_KEY_UPDATE_CALLBACK_NUM 4
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_MAX_CSR_SIZE
|
||||
#define LIBSPDM_MAX_CSR_SIZE 0x1000
|
||||
#endif
|
||||
|
||||
/* To ensure integrity in communication between the Requester and the Responder libspdm calculates
|
||||
* cryptographic digests and signatures over multiple requests and responses. This value specifies
|
||||
* whether libspdm will use a running calculation over the transcript, where requests and responses
|
||||
* are discarded as they are cryptographically consumed, or whether libspdm will buffer the entire
|
||||
* transcript before calculating the digest or signature.
|
||||
*/
|
||||
#ifndef LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT
|
||||
#define LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT 0
|
||||
#endif
|
||||
|
||||
|
||||
/* Cryptography Configuration
|
||||
* In each category, at least one should be selected.
|
||||
* NOTE: Not all combination can be supported. E.g. Don't mix NIST algo with SMx.*/
|
||||
|
||||
#ifndef LIBSPDM_RSA_SSA_SUPPORT
|
||||
#define LIBSPDM_RSA_SSA_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_RSA_PSS_SUPPORT
|
||||
#define LIBSPDM_RSA_PSS_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_ECDSA_SUPPORT
|
||||
#define LIBSPDM_ECDSA_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SM2_DSA_SUPPORT
|
||||
#define LIBSPDM_SM2_DSA_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_EDDSA_ED25519_SUPPORT
|
||||
#define LIBSPDM_EDDSA_ED25519_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_EDDSA_ED448_SUPPORT
|
||||
#define LIBSPDM_EDDSA_ED448_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_FFDHE_SUPPORT
|
||||
#define LIBSPDM_FFDHE_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_ECDHE_SUPPORT
|
||||
#define LIBSPDM_ECDHE_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT
|
||||
#define LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_AEAD_GCM_SUPPORT
|
||||
#define LIBSPDM_AEAD_GCM_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
|
||||
#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_AEAD_SM4_SUPPORT
|
||||
#define LIBSPDM_AEAD_SM4_SUPPORT 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_SHA256_SUPPORT
|
||||
#define LIBSPDM_SHA256_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SHA384_SUPPORT
|
||||
#define LIBSPDM_SHA384_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SHA512_SUPPORT
|
||||
#define LIBSPDM_SHA512_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SHA3_256_SUPPORT
|
||||
#define LIBSPDM_SHA3_256_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SHA3_384_SUPPORT
|
||||
#define LIBSPDM_SHA3_384_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SHA3_512_SUPPORT
|
||||
#define LIBSPDM_SHA3_512_SUPPORT 1
|
||||
#endif
|
||||
#ifndef LIBSPDM_SM3_256_SUPPORT
|
||||
#define LIBSPDM_SM3_256_SUPPORT 1
|
||||
#endif
|
||||
|
||||
/* Code space optimization for Optional request/response messages.*/
|
||||
|
||||
/* Consumers of libspdm may wish to not fully implement all of the optional
|
||||
* SPDM request/response messages. Therefore we have provided these
|
||||
* SPDM_ENABLE_CAPABILITY_***_CAP compile time switches as an optimization
|
||||
* disable the code (#if 0) related to said optional capability, thereby
|
||||
* reducing the code space used in the image.*/
|
||||
|
||||
/* A single switch may enable/disable a single capability or group of related
|
||||
* capabilities.*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_CERT_CAP - Enable/Disable single CERT capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP - Enable/Disable single CHAL capability.
|
||||
* LIBSPDM_ENABLE_CAPABILTIY_MEAS_CAP - Enable/Disables multiple MEAS capabilities:
|
||||
* (MEAS_CAP_NO_SIG, MEAS_CAP_SIG, MEAS_FRESH_CAP)*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP - Enable/Disable single Key Exchange capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP - Enable/Disable PSK_EX and PSK_FINISH.*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP - Enable/Disable mutual authentication.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP - Enable/Disable encapsulated message.*/
|
||||
|
||||
/* LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP - Enable/Disable get csr capability.
|
||||
* LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP - Enable/Disable set certificate capability. */
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CERT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CERT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_PSK_EX_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP 1
|
||||
#endif
|
||||
|
||||
#ifndef LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
|
||||
#define LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MinDataTransferSize = 42
|
||||
*
|
||||
* H = HashLen = HmacLen = [32, 64]
|
||||
* S = SigLen = [64, 512]
|
||||
* D = ExchangeDataLen = [64, 512]
|
||||
* R = RequesterContextLen >= 32
|
||||
* R = ResponderContextLen >= 0
|
||||
* O = OpaqueDataLen <= 1024
|
||||
*
|
||||
* Max Chunk No = 1, if (message size <= 42)
|
||||
* Max Chunk No = [(message size + 4) / 30] roundup, if (message size > 42)
|
||||
*
|
||||
* +==========================+==========================================+=========+
|
||||
* | Command | Size |MaxChunk |
|
||||
* +==========================+==========================================+=========+
|
||||
* | GET_VERSION | 4 | 1 |
|
||||
* | VERSION {1.0, 1.1, 1.2} | 6 + 2 * 3 = 12 | 1 |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | GET_CAPABILITIES 1.2 | 20 | 1 |
|
||||
* | CAPABILITIES 1.2 | 20 | 1 |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | ERROR | 4 | 1 |
|
||||
* | ERROR(ResponseTooLarge) | 4 + 4 = 8 | 1 |
|
||||
* | ERROR(LargeResponse) | 4 + 1 = 5 | 1 |
|
||||
* | ERROR(ResponseNotReady) | 4 + 4 = 8 | 1 |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | CHUNK_SEND header | 12 + L0 (0 or 4) | 1 |
|
||||
* | CHUNK_RESPONSE header | 12 + L0 (0 or 4) | 1 |
|
||||
* +==========================+==========================================+=========+
|
||||
* | NEGOTIATE_ALGORITHMS 1.2 | 32 + 4 * 4 = 48 | 2 |
|
||||
* | ALGORITHMS 1.2 | 36 + 4 * 4 = 52 | 2 |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | GET_DIGESTS 1.2 | 4 | 1 |
|
||||
* | DIGESTS 1.2 | 4 + H * SlotNum = [36, 516] | [1, 18] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | GET_CERTIFICATE 1.2 | 8 | 1 |
|
||||
* | CERTIFICATE 1.2 | 8 + PortionLen | [1, ] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | CHALLENGE 1.2 | 40 | 1 |
|
||||
* | CHALLENGE_AUTH 1.2 | 38 + H * 2 + S [+ O] = [166, 678] | [6, 23] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | GET_MEASUREMENTS 1.2 | 5 + Nounce (0 or 32) | 1 |
|
||||
* | MEASUREMENTS 1.2 | 42 + MeasRecLen (+ S) [+ O] = [106, 554] | [4, 19] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | KEY_EXCHANGE 1.2 | 42 + D [+ O] = [106, 554] | [4, 19] |
|
||||
* | KEY_EXCHANGE_RSP 1.2 | 42 + D + H + S (+ H) [+ O] = [234, 1194] | [8, 40] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | FINISH 1.2 | 4 (+ S) + H = [100, 580] | [4, 20] |
|
||||
* | FINISH_RSP 1.2 | 4 (+ H) = [36, 69] | [1, 3] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | PSK_EXCHANGE 1.2 | 12 [+ PSKHint] + R [+ O] = 44 | 2 |
|
||||
* | PSK_EXCHANGE_RSP 1.2 | 12 + R + H (+ H) [+ O] = [108, 172] | [4, 6] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | PSK_FINISH 1.2 | 4 + H = [36, 68] | [1, 3] |
|
||||
* | PSK_FINISH_RSP 1.2 | 4 | 1 |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | GET_CSR 1.2 | 8 + RequesterInfoLen [+ O] | [1, ] |
|
||||
* | CSR 1.2 | 8 + CSRLength | [1, ] |
|
||||
* +--------------------------+------------------------------------------+---------+
|
||||
* | SET_CERTIFICATE 1.2 | 4 + CertChainLen | [1, ] |
|
||||
* | SET_CERTIFICATE_RSP 1.2 | 4 | 1 |
|
||||
* +==========================+==========================================+=========+
|
||||
*/
|
||||
|
||||
/* Maximum size of a large SPDM message.
|
||||
* If chunk is unsupported, it must be same as LIBSPDM_DATA_TRANSFER_SIZE.
|
||||
* If chunk is supported, it must be larger than LIBSPDM_DATA_TRANSFER_SIZE.
|
||||
* It matches MaxSPDMmsgSize in SPDM specification. */
|
||||
#ifndef LIBSPDM_MAX_SPDM_MSG_SIZE
|
||||
#define LIBSPDM_MAX_SPDM_MSG_SIZE LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
|
||||
#endif
|
||||
|
||||
/* Maximum size of a single SPDM message.
|
||||
* It matches DataTransferSize in SPDM specification. */
|
||||
#ifndef LIBSPDM_DATA_TRANSFER_SIZE
|
||||
#define LIBSPDM_DATA_TRANSFER_SIZE LIBSPDM_MAX_MESSAGE_BUFFER_SIZE
|
||||
#endif
|
||||
|
||||
/* Required sender/receive buffer in device io.
|
||||
* NOTE: This is transport specific. Below configuration is just an example.
|
||||
* +-------+--------+---------------------------+------+--+------+---+--------+-----+
|
||||
* | TYPE |TransHdr| EncryptionHeader |AppHdr| |Random|MAC|AlignPad|FINAL|
|
||||
* | | |SessionId|SeqNum|Len|AppLen| | | | | | |
|
||||
* +-------+--------+---------------------------+------+ +------+---+--------+-----+
|
||||
* | MCTP | 1 | 4 | 2 | 2 | 2 | 1 | | 32 | 12| 0 | 56 |
|
||||
* |PCI_DOE| 8 | 4 | 0 | 2 | 2 | 0 | | 0 | 12| 3 | 31 |
|
||||
* +-------+--------+---------------------------+------+--+------+---+--------+-----+
|
||||
*/
|
||||
#ifndef LIBSPDM_TRANSPORT_ADDITIONAL_SIZE
|
||||
#define LIBSPDM_TRANSPORT_ADDITIONAL_SIZE 64
|
||||
#endif
|
||||
#ifndef LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE
|
||||
#define LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE (LIBSPDM_DATA_TRANSFER_SIZE + \
|
||||
LIBSPDM_TRANSPORT_ADDITIONAL_SIZE)
|
||||
#endif
|
||||
|
||||
|
||||
/* Required scratch buffer size for libspdm internal usage.
|
||||
* It may be used to hold the encrypted/decrypted message and/or last sent/received message.
|
||||
* It may be used to hold the large request/response and intermediate send/receive buffer
|
||||
* in case of chunking.
|
||||
*
|
||||
* If chunking is not supported, it may be just LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE.
|
||||
* If chunking is supported, it should be at least below.
|
||||
*
|
||||
* +---------------+--------------+--------------------------+------------------------------+
|
||||
* |SECURE_MESSAGE |LARGE_MESSAGE | SENDER_RECEIVER | LARGE_SENDER_RECEIVER |
|
||||
* +---------------+--------------+--------------------------+------------------------------+
|
||||
* |<-Secure msg ->|<-Large msg ->|<-Snd/Rcv buf for chunk ->|<-Snd/Rcv buf for large msg ->|
|
||||
*
|
||||
* The value is NOT configurable.
|
||||
* The value MAY be changed in different libspdm version.
|
||||
* It is exposed here, just in case the libspdm consumer wants to configure the setting at build time.
|
||||
*/
|
||||
#if LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP
|
||||
|
||||
/* first section */
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_OFFSET 0
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
|
||||
|
||||
/* second section */
|
||||
#define LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_OFFSET (LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY)
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
|
||||
|
||||
/* third section */
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_OFFSET \
|
||||
(LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY)
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
|
||||
|
||||
/* fourth section */
|
||||
#define LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_OFFSET \
|
||||
(LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY)
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_CAPACITY (LIBSPDM_MAX_SPDM_MSG_SIZE)
|
||||
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SIZE (LIBSPDM_SCRATCH_BUFFER_SECURE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_LARGE_MESSAGE_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_SENDER_RECEIVER_CAPACITY + \
|
||||
LIBSPDM_SCRATCH_BUFFER_LARGE_SENDER_RECEIVER_CAPACITY \
|
||||
)
|
||||
|
||||
#else
|
||||
#define LIBSPDM_SCRATCH_BUFFER_SIZE (LIBSPDM_SENDER_RECEIVE_BUFFER_SIZE)
|
||||
#endif
|
||||
|
||||
/* Enable message logging.
|
||||
* See https://github.com/DMTF/libspdm/blob/main/doc/user_guide.md#message-logging
|
||||
* for more information */
|
||||
#ifndef LIBSPDM_ENABLE_MSG_LOG
|
||||
#define LIBSPDM_ENABLE_MSG_LOG 1
|
||||
#endif
|
||||
|
||||
/* Enable macro checking during compilation. */
|
||||
#ifndef LIBSPDM_CHECK_MACRO
|
||||
#define LIBSPDM_CHECK_MACRO 0
|
||||
#endif
|
||||
|
||||
#endif /* SPDM_LIB_CONFIG_H */
|
470
kernel-open/nvidia/libspdm_aead.c
Normal file
470
kernel-open/nvidia/libspdm_aead.c
Normal file
@ -0,0 +1,470 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
#include "nvspdm_cryptlib_extensions.h"
|
||||
|
||||
#ifdef USE_LKCA
|
||||
#define BUFFER_SIZE (2 * 1024 * 1024)
|
||||
#define AUTH_TAG_SIZE 16
|
||||
struct lkca_aead_ctx
|
||||
{
|
||||
struct crypto_aead *aead;
|
||||
struct aead_request *req;
|
||||
char *a_data_buffer;
|
||||
char *in_buffer;
|
||||
char *out_buffer;
|
||||
char tag[AUTH_TAG_SIZE];
|
||||
};
|
||||
#endif
|
||||
|
||||
int libspdm_aead_prealloc(void **context, char const *alg)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return -ENODEV;
|
||||
#else
|
||||
struct lkca_aead_ctx *ctx;
|
||||
|
||||
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (ctx == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
ctx->aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0);
|
||||
if (IS_ERR(ctx->aead)) {
|
||||
pr_notice("could not allocate AEAD algorithm\n");
|
||||
kfree(ctx);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ctx->req = aead_request_alloc(ctx->aead, GFP_KERNEL);
|
||||
if (ctx->req == NULL) {
|
||||
pr_info("could not allocate skcipher request\n");
|
||||
crypto_free_aead(ctx->aead);
|
||||
kfree(ctx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctx->a_data_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
|
||||
if (ctx->a_data_buffer == NULL) {
|
||||
aead_request_free(ctx->req);
|
||||
crypto_free_aead(ctx->aead);
|
||||
kfree(ctx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctx->in_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
|
||||
if (ctx->in_buffer == NULL) {
|
||||
kfree(ctx->a_data_buffer);
|
||||
aead_request_free(ctx->req);
|
||||
crypto_free_aead(ctx->aead);
|
||||
kfree(ctx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctx->out_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
|
||||
if (ctx->out_buffer == NULL) {
|
||||
kfree(ctx->a_data_buffer);
|
||||
kfree(ctx->in_buffer);
|
||||
aead_request_free(ctx->req);
|
||||
crypto_free_aead(ctx->aead);
|
||||
kfree(ctx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
*context = ctx;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
void libspdm_aead_free(void *context)
|
||||
{
|
||||
#ifdef USE_LKCA
|
||||
struct lkca_aead_ctx *ctx = context;
|
||||
crypto_free_aead(ctx->aead);
|
||||
aead_request_free(ctx->req);
|
||||
kfree(ctx->a_data_buffer);
|
||||
kfree(ctx->in_buffer);
|
||||
kfree(ctx->out_buffer);
|
||||
kfree(ctx);
|
||||
#endif
|
||||
}
|
||||
|
||||
#define SG_AEAD_AAD 0
|
||||
#define SG_AEAD_TEXT 1
|
||||
#define SG_AEAD_SIG 2
|
||||
// Number of fields in AEAD scatterlist
|
||||
#define SG_AEAD_LEN 3
|
||||
|
||||
#ifdef USE_LKCA
|
||||
// This function doesn't do any allocs, it uses temp buffers instead
|
||||
static int lkca_aead_internal(struct crypto_aead *aead,
|
||||
struct aead_request *req,
|
||||
const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
struct scatterlist sg_in[],
|
||||
struct scatterlist sg_out[],
|
||||
size_t a_data_size,
|
||||
size_t data_in_size,
|
||||
size_t *data_out_size,
|
||||
size_t tag_size,
|
||||
bool enc)
|
||||
{
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
int rc = 0;
|
||||
|
||||
if (crypto_aead_setkey(aead, key, key_size)) {
|
||||
pr_info("key could not be set\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (crypto_aead_ivsize(aead) != iv_size) {
|
||||
pr_info("iv could not be set\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
aead_request_set_ad(req, a_data_size);
|
||||
|
||||
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
|
||||
|
||||
if (enc) {
|
||||
aead_request_set_crypt(req, sg_in, sg_out, data_in_size, (u8 *) iv);
|
||||
rc = crypto_wait_req(crypto_aead_encrypt(req), &wait);
|
||||
} else {
|
||||
aead_request_set_crypt(req, sg_in, sg_out, data_in_size + tag_size, (u8 *) iv);
|
||||
rc = crypto_wait_req(crypto_aead_decrypt(req), &wait);
|
||||
}
|
||||
|
||||
if (rc != 0) {
|
||||
pr_info("Encryption FAILED\n");
|
||||
}
|
||||
|
||||
*data_out_size = data_in_size;
|
||||
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
||||
int libspdm_aead_prealloced(void *context,
|
||||
const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size,
|
||||
bool enc)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return -ENODEV;
|
||||
#else
|
||||
int rc = 0;
|
||||
struct scatterlist sg_in[SG_AEAD_LEN];
|
||||
struct scatterlist sg_out[SG_AEAD_LEN];
|
||||
struct lkca_aead_ctx *ctx = context;
|
||||
|
||||
|
||||
sg_init_table(sg_in, SG_AEAD_LEN);
|
||||
sg_init_table(sg_out, SG_AEAD_LEN);
|
||||
|
||||
if (!virt_addr_valid(a_data)) {
|
||||
if (a_data_size > BUFFER_SIZE) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
sg_set_buf(&sg_in[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size);
|
||||
sg_set_buf(&sg_out[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size);
|
||||
|
||||
memcpy(ctx->a_data_buffer, a_data, a_data_size);
|
||||
} else {
|
||||
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size);
|
||||
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size);
|
||||
}
|
||||
|
||||
if (!virt_addr_valid(data_in)) {
|
||||
if (data_in_size > BUFFER_SIZE) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
sg_set_buf(&sg_in[SG_AEAD_TEXT], ctx->in_buffer, data_in_size);
|
||||
memcpy(ctx->in_buffer, data_in, data_in_size);
|
||||
} else {
|
||||
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size);
|
||||
}
|
||||
|
||||
if (!virt_addr_valid(data_out)) {
|
||||
if (data_in_size > BUFFER_SIZE) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
sg_set_buf(&sg_out[SG_AEAD_TEXT], ctx->out_buffer, data_in_size);
|
||||
} else {
|
||||
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size);
|
||||
}
|
||||
|
||||
// Tag is small enough that memcpy is cheaper than checking if page is virtual
|
||||
if(tag_size > AUTH_TAG_SIZE) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
sg_set_buf(&sg_in[SG_AEAD_SIG], ctx->tag, tag_size);
|
||||
sg_set_buf(&sg_out[SG_AEAD_SIG], ctx->tag, tag_size);
|
||||
|
||||
if(!enc)
|
||||
memcpy(ctx->tag, tag, tag_size);
|
||||
|
||||
rc = lkca_aead_internal(ctx->aead, ctx->req, key, key_size, iv, iv_size,
|
||||
sg_in, sg_out, a_data_size, data_in_size,
|
||||
data_out_size, tag_size, enc);
|
||||
|
||||
if (enc) {
|
||||
memcpy(tag, ctx->tag, tag_size);
|
||||
}
|
||||
|
||||
if (!virt_addr_valid(data_out)) {
|
||||
memcpy(data_out, ctx->out_buffer, data_in_size);
|
||||
}
|
||||
|
||||
return rc;
|
||||
#endif
|
||||
}
|
||||
|
||||
int libspdm_aead(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
const uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size,
|
||||
bool enc, char const *alg)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return -ENODEV;
|
||||
#else
|
||||
struct crypto_aead *aead = NULL;
|
||||
struct aead_request *req = NULL;
|
||||
struct scatterlist sg_in[SG_AEAD_LEN];
|
||||
struct scatterlist sg_out[SG_AEAD_LEN];
|
||||
uint8_t *a_data_shadow = NULL;
|
||||
uint8_t *data_in_shadow = NULL;
|
||||
uint8_t *data_out_shadow = NULL;
|
||||
uint8_t *tag_shadow = NULL;
|
||||
int rc = 0;
|
||||
|
||||
aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0);
|
||||
if (IS_ERR(aead)) {
|
||||
pr_notice("could not allocate AEAD algorithm\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
req = aead_request_alloc(aead, GFP_KERNEL);
|
||||
if (req == NULL) {
|
||||
pr_info("could not allocate skcipher request\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_init_table(sg_in, SG_AEAD_LEN);
|
||||
sg_init_table(sg_out, SG_AEAD_LEN);
|
||||
|
||||
if (!virt_addr_valid(a_data)) {
|
||||
a_data_shadow = kmalloc(a_data_size, GFP_KERNEL);
|
||||
if (a_data_shadow == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data_shadow, a_data_size);
|
||||
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data_shadow, a_data_size);
|
||||
|
||||
memcpy(a_data_shadow, a_data, a_data_size);
|
||||
} else {
|
||||
sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size);
|
||||
sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size);
|
||||
}
|
||||
|
||||
if (!virt_addr_valid(data_in)) {
|
||||
data_in_shadow = kmalloc(data_in_size, GFP_KERNEL);
|
||||
if (data_in_shadow == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in_shadow, data_in_size);
|
||||
|
||||
memcpy(data_in_shadow, data_in, data_in_size);
|
||||
} else {
|
||||
sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size);
|
||||
}
|
||||
|
||||
if (!virt_addr_valid(data_out)) {
|
||||
data_out_shadow = kmalloc(data_in_size, GFP_KERNEL);
|
||||
if (data_out_shadow == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out_shadow, data_in_size);
|
||||
} else {
|
||||
sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size);
|
||||
}
|
||||
|
||||
if (!virt_addr_valid(tag)) {
|
||||
tag_shadow = kmalloc(tag_size, GFP_KERNEL);
|
||||
if (tag_shadow == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_set_buf(&sg_in[SG_AEAD_SIG], tag_shadow, tag_size);
|
||||
sg_set_buf(&sg_out[SG_AEAD_SIG], tag_shadow, tag_size);
|
||||
|
||||
if(!enc)
|
||||
memcpy(tag_shadow, tag, tag_size);
|
||||
} else {
|
||||
sg_set_buf(&sg_in[SG_AEAD_SIG], tag, tag_size);
|
||||
sg_set_buf(&sg_out[SG_AEAD_SIG], tag, tag_size);
|
||||
}
|
||||
|
||||
rc = lkca_aead_internal(aead, req, key, key_size, iv, iv_size,
|
||||
sg_in, sg_out, a_data_size, data_in_size,
|
||||
data_out_size, tag_size, enc);
|
||||
|
||||
if (enc && (tag_shadow != NULL))
|
||||
memcpy((uint8_t *) tag, tag_shadow, tag_size);
|
||||
|
||||
if (data_out_shadow != NULL)
|
||||
memcpy(data_out, data_out_shadow, data_in_size);
|
||||
|
||||
out:
|
||||
if (a_data_shadow != NULL)
|
||||
kfree(a_data_shadow);
|
||||
if (data_in_shadow != NULL)
|
||||
kfree(data_in_shadow);
|
||||
if (data_out != NULL)
|
||||
kfree(data_out_shadow);
|
||||
if (tag != NULL)
|
||||
kfree(tag_shadow);
|
||||
if (aead != NULL)
|
||||
crypto_free_aead(aead);
|
||||
if (req != NULL)
|
||||
aead_request_free(req);
|
||||
return rc;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Wrapper to make look like libspdm
|
||||
bool libspdm_aead_gcm_prealloc(void **context)
|
||||
{
|
||||
return libspdm_aead_prealloc(context, "gcm(aes)") == 0;
|
||||
}
|
||||
|
||||
bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context,
|
||||
const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
uint8_t *tag_out, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (data_in_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
if (a_data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
if (iv_size != 12) {
|
||||
return false;
|
||||
}
|
||||
switch (key_size) {
|
||||
case 16:
|
||||
case 24:
|
||||
case 32:
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
if ((tag_size < 12) || (tag_size > 16)) {
|
||||
return false;
|
||||
}
|
||||
if (data_out_size != NULL) {
|
||||
if ((*data_out_size > INT_MAX) ||
|
||||
(*data_out_size < data_in_size)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size,
|
||||
a_data, a_data_size, data_in, data_in_size,
|
||||
tag_out, tag_size, data_out, data_out_size, true);
|
||||
|
||||
*data_out_size = data_in_size;
|
||||
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
|
||||
const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
const uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size)
|
||||
{
|
||||
int ret;
|
||||
if (data_in_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
if (a_data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
if (iv_size != 12) {
|
||||
return false;
|
||||
}
|
||||
switch (key_size) {
|
||||
case 16:
|
||||
case 24:
|
||||
case 32:
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
if ((tag_size < 12) || (tag_size > 16)) {
|
||||
return false;
|
||||
}
|
||||
if (data_out_size != NULL) {
|
||||
if ((*data_out_size > INT_MAX) ||
|
||||
(*data_out_size < data_in_size)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size,
|
||||
a_data, a_data_size, data_in, data_in_size,
|
||||
(uint8_t *) tag, tag_size, data_out, data_out_size, false);
|
||||
|
||||
*data_out_size = data_in_size;
|
||||
|
||||
return ret == 0;
|
||||
|
||||
}
|
||||
|
117
kernel-open/nvidia/libspdm_aead_aes_gcm.c
Normal file
117
kernel-open/nvidia/libspdm_aead_aes_gcm.c
Normal file
@ -0,0 +1,117 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
uint8_t *tag_out, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (data_in_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
if (a_data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
if (iv_size != 12) {
|
||||
return false;
|
||||
}
|
||||
switch (key_size) {
|
||||
case 16:
|
||||
case 24:
|
||||
case 32:
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
if ((tag_size < 12) || (tag_size > 16)) {
|
||||
return false;
|
||||
}
|
||||
if (data_out_size != NULL) {
|
||||
if ((*data_out_size > INT_MAX) ||
|
||||
(*data_out_size < data_in_size)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size,
|
||||
data_in, data_in_size, tag_out, tag_size,
|
||||
data_out, data_out_size, true, "gcm(aes)");
|
||||
|
||||
*data_out_size = data_in_size;
|
||||
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
const uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size)
|
||||
{
|
||||
int ret;
|
||||
if (data_in_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
if (a_data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
if (iv_size != 12) {
|
||||
return false;
|
||||
}
|
||||
switch (key_size) {
|
||||
case 16:
|
||||
case 24:
|
||||
case 32:
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
if ((tag_size < 12) || (tag_size > 16)) {
|
||||
return false;
|
||||
}
|
||||
if (data_out_size != NULL) {
|
||||
if ((*data_out_size > INT_MAX) ||
|
||||
(*data_out_size < data_in_size)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size,
|
||||
data_in, data_in_size, tag, tag_size,
|
||||
data_out, data_out_size, false, "gcm(aes)");
|
||||
|
||||
*data_out_size = data_in_size;
|
||||
|
||||
return ret == 0;
|
||||
|
||||
}
|
172
kernel-open/nvidia/libspdm_ec.c
Normal file
172
kernel-open/nvidia/libspdm_ec.c
Normal file
@ -0,0 +1,172 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
static bool lkca_ecdsa_sign(void *ec_context,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
uint8_t *signature, size_t *sig_size)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
|
||||
size_t public_key_size)
|
||||
{
|
||||
if (ec_context == NULL || public_key == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_ec_set_pub_key(ec_context, public_key, public_key_size);
|
||||
}
|
||||
|
||||
bool libspdm_ec_get_pub_key(void *ec_context, uint8_t *public_key,
|
||||
size_t *public_key_size)
|
||||
{
|
||||
if (ec_context == NULL || public_key_size == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (public_key == NULL && *public_key_size != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_ec_get_pub_key(ec_context, public_key, public_key_size);
|
||||
}
|
||||
|
||||
bool libspdm_ec_check_key(const void *ec_context)
|
||||
{
|
||||
/* TBD*/
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_data,
|
||||
size_t *public_size)
|
||||
{
|
||||
if (ec_context == NULL || public_size == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (public_data == NULL && *public_size != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_ec_generate_key(ec_context, public_data, public_size);
|
||||
}
|
||||
|
||||
bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
|
||||
size_t peer_public_size, uint8_t *key,
|
||||
size_t *key_size)
|
||||
{
|
||||
if (ec_context == NULL || peer_public == NULL || key_size == NULL ||
|
||||
key == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (peer_public_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_ec_compute_key(ec_context, peer_public, peer_public_size, key,
|
||||
key_size);
|
||||
}
|
||||
|
||||
bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
uint8_t *signature, size_t *sig_size)
|
||||
{
|
||||
if (ec_context == NULL || message_hash == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (signature == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (hash_nid) {
|
||||
case LIBSPDM_CRYPTO_NID_SHA256:
|
||||
if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
case LIBSPDM_CRYPTO_NID_SHA384:
|
||||
if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
case LIBSPDM_CRYPTO_NID_SHA512:
|
||||
if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_ecdsa_sign(ec_context, message_hash, hash_size, signature, sig_size);
|
||||
}
|
||||
|
||||
bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
const uint8_t *signature, size_t sig_size)
|
||||
{
|
||||
if (ec_context == NULL || message_hash == NULL || signature == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (sig_size > INT_MAX || sig_size == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (hash_nid) {
|
||||
case LIBSPDM_CRYPTO_NID_SHA256:
|
||||
if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
case LIBSPDM_CRYPTO_NID_SHA384:
|
||||
if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
case LIBSPDM_CRYPTO_NID_SHA512:
|
||||
if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_ecdsa_verify(ec_context, hash_nid, message_hash, hash_size,
|
||||
signature, sig_size);
|
||||
}
|
326
kernel-open/nvidia/libspdm_ecc.c
Normal file
326
kernel-open/nvidia/libspdm_ecc.c
Normal file
@ -0,0 +1,326 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
#ifdef USE_LKCA
|
||||
#include <linux/module.h>
|
||||
MODULE_SOFTDEP("pre: ecdh_generic,ecdsa_generic");
|
||||
|
||||
#include <crypto/akcipher.h>
|
||||
#include <crypto/ecdh.h>
|
||||
#include <crypto/internal/ecc.h>
|
||||
|
||||
struct ecc_ctx {
|
||||
unsigned int curve_id;
|
||||
u64 priv_key[ECC_MAX_DIGITS]; // In big endian
|
||||
|
||||
struct {
|
||||
// ecdsa wants byte preceding pub_key to be set to '4'
|
||||
u64 pub_key_prefix;
|
||||
u64 pub_key[2 * ECC_MAX_DIGITS];
|
||||
};
|
||||
|
||||
bool pub_key_set;
|
||||
bool priv_key_set;
|
||||
char const *name;
|
||||
int size;
|
||||
};
|
||||
#endif
|
||||
|
||||
void *libspdm_ec_new_by_nid(size_t nid)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return NULL;
|
||||
#else
|
||||
struct ecc_ctx *ctx;
|
||||
|
||||
if ((nid != LIBSPDM_CRYPTO_NID_SECP256R1) && (nid != LIBSPDM_CRYPTO_NID_SECP384R1)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (nid == LIBSPDM_CRYPTO_NID_SECP256R1) {
|
||||
ctx->curve_id = ECC_CURVE_NIST_P256;
|
||||
ctx->size = 64;
|
||||
ctx->name = "ecdsa-nist-p256";
|
||||
} else {
|
||||
ctx->curve_id = ECC_CURVE_NIST_P384;
|
||||
ctx->size = 96;
|
||||
ctx->name = "ecdsa-nist-p384";
|
||||
}
|
||||
ctx->pub_key_set = false;
|
||||
ctx->priv_key_set = false;
|
||||
|
||||
return ctx;
|
||||
#endif
|
||||
}
|
||||
|
||||
void libspdm_ec_free(void *ec_context)
|
||||
{
|
||||
#ifdef USE_LKCA
|
||||
kfree(ec_context);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
struct ecc_ctx *ctx = context;
|
||||
unsigned int ndigits = ctx->size / 16;
|
||||
|
||||
if (key_size != (ctx->size / 2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memcpy(ctx->priv_key, key, key_size);
|
||||
|
||||
// XXX: if this fails, do we want to retry generating new key?
|
||||
if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ctx->pub_key_set = true;
|
||||
ctx->priv_key_set = true;
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
|
||||
size_t public_key_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
struct ecc_ctx *ctx = ec_context;
|
||||
struct ecc_point pub_key;
|
||||
unsigned int ndigits;
|
||||
|
||||
if (public_key_size != ctx->size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We can reuse pub_key for now
|
||||
ndigits = ctx->size / 16;
|
||||
pub_key = ECC_POINT_INIT(ctx->pub_key, ctx->pub_key + ndigits, ndigits);
|
||||
|
||||
ecc_swap_digits(public_key, ctx->pub_key, ndigits);
|
||||
ecc_swap_digits(((u64 *)public_key) + ndigits, ctx->pub_key + ndigits, ndigits);
|
||||
if(ecc_is_pubkey_valid_full(ecc_get_curve(ctx->curve_id), &pub_key)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memcpy(ctx->pub_key, public_key, public_key_size);
|
||||
ctx->pub_key_set = true;
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key,
|
||||
size_t *public_key_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
struct ecc_ctx *ctx = ec_context;
|
||||
|
||||
if (*public_key_size < ctx->size) {
|
||||
*public_key_size = ctx->size;
|
||||
return false;
|
||||
}
|
||||
*public_key_size = ctx->size;
|
||||
|
||||
memcpy(public_key, ctx->pub_key, ctx->size);
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data,
|
||||
size_t *public_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
struct ecc_ctx *ctx = ec_context;
|
||||
|
||||
unsigned int ndigits = ctx->size / 16;
|
||||
|
||||
if(ecc_gen_privkey(ctx->curve_id, ndigits, ctx->priv_key)) {
|
||||
return false;
|
||||
}
|
||||
// XXX: if this fails, do we want to retry generating new key?
|
||||
if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memcpy(public_data, ctx->pub_key, ctx->size);
|
||||
*public_size = ctx->size;
|
||||
ctx->priv_key_set = true;
|
||||
ctx->pub_key_set = true;
|
||||
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public,
|
||||
size_t peer_public_size, uint8_t *key,
|
||||
size_t *key_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
struct ecc_ctx *ctx = ec_context;
|
||||
|
||||
if (peer_public_size != ctx->size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ctx->priv_key_set) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((ctx->size / 2) > *key_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (crypto_ecdh_shared_secret(ctx->curve_id, ctx->size / 16,
|
||||
(const u64 *) ctx->priv_key,
|
||||
(const u64 *) peer_public,
|
||||
(u64 *) key)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
*key_size = ctx->size / 2;
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid,
|
||||
const uint8_t *message_hash, size_t hash_size,
|
||||
const uint8_t *signature, size_t sig_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
struct ecc_ctx *ctx = ec_context;
|
||||
|
||||
// Roundabout way
|
||||
u64 ber_max_len = 3 + 2 * (4 + (ECC_MAX_BYTES));
|
||||
u64 ber_len = 0;
|
||||
u8 *ber = NULL;
|
||||
u8 *pub_key;
|
||||
struct akcipher_request *req = NULL;
|
||||
struct crypto_akcipher *tfm = NULL;
|
||||
struct scatterlist sg;
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
int err;
|
||||
|
||||
if (sig_size != ctx->size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if(ctx->pub_key_set == false){
|
||||
return false;
|
||||
}
|
||||
|
||||
tfm = crypto_alloc_akcipher(ctx->name, CRYPTO_ALG_TYPE_AKCIPHER, 0);
|
||||
if (IS_ERR(tfm)) {
|
||||
pr_info("ALLOC FAILED\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
pub_key = (u8 *) ctx->pub_key;
|
||||
pub_key--; // Go back into byte of pub_key_prefix
|
||||
*pub_key = 4; // And set it to 4 to placate kernel
|
||||
if ((err = crypto_akcipher_set_pub_key(tfm, pub_key, ctx->size + 1)) != 0) {
|
||||
pr_info("SET PUB KEY FAILED: %d\n", -err);
|
||||
goto failTfm;
|
||||
}
|
||||
|
||||
req = akcipher_request_alloc(tfm, GFP_KERNEL);
|
||||
if (IS_ERR(req)) {
|
||||
pr_info("REQUEST ALLOC FAILED\n");
|
||||
goto failTfm;
|
||||
}
|
||||
|
||||
// We concatenate signature and hash and ship it to kernel
|
||||
ber = kmalloc(ber_max_len + hash_size, GFP_KERNEL);
|
||||
if (ber == NULL) {
|
||||
goto failReq;
|
||||
}
|
||||
|
||||
// XXX: NOTE THIS WILL WORK ONLY FOR 256 AND 384 bits. For larger keys
|
||||
// length field will be longer than 1 byte and I haven't taken care of that!
|
||||
|
||||
// Signature
|
||||
ber[ber_len++] = 0x30;
|
||||
ber[ber_len++] = 2 * (2 + ctx->size / 2);
|
||||
ber[ber_len++] = 0x02;
|
||||
if (signature[0] > 127) {
|
||||
ber[ber_len++] = ctx->size / 2 + 1;
|
||||
ber[1]++;
|
||||
ber[ber_len++] = 0;
|
||||
} else {
|
||||
ber[ber_len++] = ctx->size / 2;
|
||||
}
|
||||
memcpy(ber + ber_len, signature, sig_size / 2);
|
||||
ber_len += sig_size / 2;
|
||||
ber[ber_len++] = 0x02;
|
||||
if (signature[sig_size / 2] > 127) {
|
||||
ber[ber_len++] = ctx->size / 2 + 1;
|
||||
ber[1]++;
|
||||
ber[ber_len++] = 0;
|
||||
} else {
|
||||
ber[ber_len++] = ctx->size / 2;
|
||||
}
|
||||
memcpy(ber + ber_len, signature + sig_size / 2, sig_size / 2);
|
||||
ber_len += sig_size / 2;
|
||||
|
||||
// Just append hash, for scatterlists it can't be on stack anyway
|
||||
memcpy(ber + ber_len, message_hash, hash_size);
|
||||
|
||||
sg_init_one(&sg, ber, ber_len + hash_size);
|
||||
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
|
||||
akcipher_request_set_crypt(req, &sg, NULL, ber_len, hash_size);
|
||||
err = crypto_wait_req(crypto_akcipher_verify(req), &wait);
|
||||
|
||||
if (err != 0){
|
||||
pr_info("Verify FAILED %d\n", -err);
|
||||
}
|
||||
|
||||
kfree(ber);
|
||||
failReq:
|
||||
akcipher_request_free(req);
|
||||
failTfm:
|
||||
crypto_free_akcipher(tfm);
|
||||
|
||||
return err == 0;
|
||||
#endif
|
||||
}
|
158
kernel-open/nvidia/libspdm_hkdf.c
Normal file
158
kernel-open/nvidia/libspdm_hkdf.c
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
// RFC 5869 has some very non-intuitive points, reading it is advised
|
||||
static bool lkca_hkdf_expand_only(struct crypto_shash *alg,
|
||||
const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
int ret;
|
||||
int i;
|
||||
uint8_t ctr = 1;
|
||||
uint8_t tmp[HASH_MAX_DIGESTSIZE];
|
||||
SHASH_DESC_ON_STACK(desc, alg);
|
||||
desc->tfm = alg;
|
||||
|
||||
ret = crypto_shash_setkey(desc->tfm, prk, prk_size);
|
||||
if (ret != 0) {
|
||||
pr_info("key size mismatch %ld\n", prk_size);
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0, ctr = 1; i < out_size; i += prk_size, ctr++) {
|
||||
ret = crypto_shash_init(desc);
|
||||
if (ret) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (i != 0) {
|
||||
ret = crypto_shash_update(desc, out + i - prk_size, prk_size);
|
||||
if (ret) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (info_size > 0) {
|
||||
ret = crypto_shash_update(desc, info, info_size);
|
||||
if (ret) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ret = crypto_shash_update(desc, &ctr, 1);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
if ((out_size - i) < prk_size) {
|
||||
ret = crypto_shash_final(desc, tmp);
|
||||
if (ret) {
|
||||
return false;
|
||||
}
|
||||
memcpy(out + i, tmp, out_size - i);
|
||||
memzero_explicit(tmp, sizeof(tmp));
|
||||
} else {
|
||||
ret = crypto_shash_final(desc, out + i);
|
||||
if (ret) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_hkdf_extract_and_expand(const char *alg_name,
|
||||
const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
int ret = 0;
|
||||
struct crypto_shash *alg;
|
||||
uint8_t prk[HASH_MAX_DIGESTSIZE];
|
||||
|
||||
if (key == NULL || salt == NULL || info == NULL || out == NULL ||
|
||||
key_size > sizeof(prk) || salt_size > INT_MAX || info_size > INT_MAX ||
|
||||
out_size > (sizeof(prk) * 255)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
alg = crypto_alloc_shash(alg_name, 0, 0);
|
||||
if (IS_ERR(alg)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_setkey(alg, salt, salt_size);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
}
|
||||
ret = crypto_shash_tfm_digest(alg, key, key_size, prk);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = !lkca_hkdf_expand_only(alg, prk, crypto_shash_digestsize(alg), info, info_size, out, out_size);
|
||||
|
||||
out:
|
||||
crypto_free_shash(alg);
|
||||
return ret == 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_hkdf_expand(const char *alg_name,
|
||||
const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
bool ret = false;
|
||||
struct crypto_shash *alg;
|
||||
|
||||
if (prk == NULL || info == NULL || out == NULL || prk_size > (512 / 8) ||
|
||||
info_size > INT_MAX || (out_size > (prk_size * 255))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
alg = crypto_alloc_shash(alg_name, 0, 0);
|
||||
if (IS_ERR(alg)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = lkca_hkdf_expand_only(alg, prk, prk_size, info, info_size, out, out_size);
|
||||
|
||||
crypto_free_shash(alg);
|
||||
return ret;
|
||||
#endif
|
||||
}
|
111
kernel-open/nvidia/libspdm_hkdf_sha.c
Normal file
111
kernel-open/nvidia/libspdm_hkdf_sha.c
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
bool libspdm_hkdf_sha256_extract_and_expand(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size)
|
||||
{
|
||||
return lkca_hkdf_extract_and_expand("hmac(sha256)", key, key_size,
|
||||
salt, salt_size, info, info_size,
|
||||
out, out_size);
|
||||
}
|
||||
|
||||
bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size)
|
||||
{
|
||||
if (prk_out_size != (256 / 8))
|
||||
return false;
|
||||
|
||||
return libspdm_hmac_sha256_all(key, key_size, salt, salt_size, prk_out);
|
||||
}
|
||||
|
||||
bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size)
|
||||
{
|
||||
return lkca_hkdf_expand("hmac(sha256)", prk, prk_size, info, info_size,
|
||||
out, out_size);
|
||||
}
|
||||
|
||||
bool libspdm_hkdf_sha384_extract_and_expand(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size)
|
||||
{
|
||||
return lkca_hkdf_extract_and_expand("hmac(sha384)", key, key_size,
|
||||
salt, salt_size, info, info_size,
|
||||
out, out_size);
|
||||
}
|
||||
|
||||
bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size)
|
||||
{
|
||||
if (prk_out_size != (384 / 8))
|
||||
return false;
|
||||
|
||||
return libspdm_hmac_sha384_all(key, key_size, salt, salt_size, prk_out);
|
||||
}
|
||||
|
||||
bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size)
|
||||
{
|
||||
return lkca_hkdf_expand("hmac(sha384)", prk, prk_size, info, info_size,
|
||||
out, out_size);
|
||||
}
|
||||
|
||||
bool libspdm_hkdf_sha512_extract_and_expand(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size)
|
||||
{
|
||||
return lkca_hkdf_extract_and_expand("hmac(sha512)", key, key_size,
|
||||
salt, salt_size, info, info_size, out,
|
||||
out_size);
|
||||
}
|
||||
|
||||
bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size,
|
||||
const uint8_t *salt, size_t salt_size,
|
||||
uint8_t *prk_out, size_t prk_out_size)
|
||||
{
|
||||
if (prk_out_size != (512 / 8))
|
||||
return false;
|
||||
|
||||
return libspdm_hmac_sha512_all(key, key_size, salt, salt_size, prk_out);
|
||||
}
|
||||
|
||||
bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size,
|
||||
const uint8_t *info, size_t info_size,
|
||||
uint8_t *out, size_t out_size)
|
||||
{
|
||||
return lkca_hkdf_expand("hmac(sha512)", prk, prk_size, info, info_size,
|
||||
out, out_size);
|
||||
}
|
282
kernel-open/nvidia/libspdm_hmac_sha.c
Normal file
282
kernel-open/nvidia/libspdm_hmac_sha.c
Normal file
@ -0,0 +1,282 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
void *libspdm_hmac_sha256_new(void)
|
||||
{
|
||||
return lkca_hash_new("hmac(sha256)");
|
||||
}
|
||||
|
||||
void libspdm_hmac_sha256_free(void *hmac_sha256_ctx)
|
||||
{
|
||||
lkca_hash_free(hmac_sha256_ctx);
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key,
|
||||
size_t key_size)
|
||||
{
|
||||
if (hmac_sha256_ctx == NULL)
|
||||
return false;
|
||||
|
||||
return lkca_hmac_set_key(hmac_sha256_ctx, key, key_size);
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx,
|
||||
void *new_hmac_sha256_ctx)
|
||||
{
|
||||
if (hmac_sha256_ctx == NULL || new_hmac_sha256_ctx == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hmac_duplicate(new_hmac_sha256_ctx, hmac_sha256_ctx);
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data,
|
||||
size_t data_size)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (hmac_sha256_ctx == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_update(hmac_sha256_ctx, data, data_size);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (hmac_sha256_ctx == NULL || hmac_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_final(hmac_sha256_ctx, hmac_value);
|
||||
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha256_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value)
|
||||
{
|
||||
if (hmac_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hmac_all("hmac(sha256)", key, key_size, data, data_size, hmac_value);
|
||||
}
|
||||
|
||||
void *libspdm_hmac_sha384_new(void)
|
||||
{
|
||||
return lkca_hash_new("hmac(sha384)");
|
||||
}
|
||||
|
||||
void libspdm_hmac_sha384_free(void *hmac_sha384_ctx)
|
||||
{
|
||||
lkca_hash_free(hmac_sha384_ctx);
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key,
|
||||
size_t key_size)
|
||||
{
|
||||
if (hmac_sha384_ctx == NULL)
|
||||
return false;
|
||||
|
||||
return lkca_hmac_set_key(hmac_sha384_ctx, key, key_size);
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx,
|
||||
void *new_hmac_sha384_ctx)
|
||||
{
|
||||
if (hmac_sha384_ctx == NULL || new_hmac_sha384_ctx == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hmac_duplicate(new_hmac_sha384_ctx, hmac_sha384_ctx);
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data,
|
||||
size_t data_size)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (hmac_sha384_ctx == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_update(hmac_sha384_ctx, data, data_size);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (hmac_sha384_ctx == NULL || hmac_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_final(hmac_sha384_ctx, hmac_value);
|
||||
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha384_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value)
|
||||
{
|
||||
if (hmac_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hmac_all("hmac(sha384)", key, key_size, data, data_size, hmac_value);
|
||||
}
|
||||
|
||||
void *libspdm_hmac_sha512_new(void)
|
||||
{
|
||||
return lkca_hash_new("hmac(sha512)");
|
||||
}
|
||||
|
||||
void libspdm_hmac_sha512_free(void *hmac_sha512_ctx)
|
||||
{
|
||||
lkca_hash_free(hmac_sha512_ctx);
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key,
|
||||
size_t key_size)
|
||||
{
|
||||
if (hmac_sha512_ctx == NULL)
|
||||
return false;
|
||||
|
||||
return lkca_hmac_set_key(hmac_sha512_ctx, key, key_size);
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx,
|
||||
void *new_hmac_sha512_ctx)
|
||||
{
|
||||
if (new_hmac_sha512_ctx == NULL || new_hmac_sha512_ctx == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hmac_duplicate(new_hmac_sha512_ctx, hmac_sha512_ctx);
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data,
|
||||
size_t data_size)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (hmac_sha512_ctx == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_update(hmac_sha512_ctx, data, data_size);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (hmac_sha512_ctx == NULL || hmac_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_final(hmac_sha512_ctx, hmac_value);
|
||||
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_hmac_sha512_all(const void *data, size_t data_size,
|
||||
const uint8_t *key, size_t key_size,
|
||||
uint8_t *hmac_value)
|
||||
{
|
||||
if (hmac_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hmac_all("hmac(sha512)", key, key_size, data, data_size, hmac_value);
|
||||
}
|
37
kernel-open/nvidia/libspdm_rand.c
Normal file
37
kernel-open/nvidia/libspdm_rand.c
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
// This is non-gpl symbol and not part of LKCA so no need to stub it out
|
||||
bool libspdm_random_bytes(uint8_t *output, size_t size)
|
||||
{
|
||||
get_random_bytes(output, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
// This is specifically allowed by spdm
|
||||
bool libspdm_random_seed(const uint8_t *seed, size_t seed_size)
|
||||
{
|
||||
return true;
|
||||
}
|
264
kernel-open/nvidia/libspdm_sha.c
Normal file
264
kernel-open/nvidia/libspdm_sha.c
Normal file
@ -0,0 +1,264 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
void *libspdm_sha256_new(void)
|
||||
{
|
||||
return lkca_hash_new("sha256");
|
||||
}
|
||||
|
||||
void libspdm_sha256_free(void *sha256_ctx)
|
||||
{
|
||||
lkca_hash_free(sha256_ctx);
|
||||
}
|
||||
|
||||
bool libspdm_sha256_init(void *sha256_context)
|
||||
{
|
||||
return crypto_shash_init(sha256_context) == 0;
|
||||
}
|
||||
|
||||
bool libspdm_sha256_duplicate(const void *sha256_context,
|
||||
void *new_sha256_context)
|
||||
{
|
||||
if (sha256_context == NULL || new_sha256_context == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hash_duplicate(new_sha256_context, sha256_context);
|
||||
}
|
||||
|
||||
bool libspdm_sha256_update(void *sha256_context, const void *data,
|
||||
size_t data_size)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (sha256_context == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_update(sha256_context, data, data_size);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (sha256_context == NULL || hash_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_final(sha256_context, hash_value);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_sha256_hash_all(const void *data, size_t data_size,
|
||||
uint8_t *hash_value)
|
||||
{
|
||||
if (hash_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hash_all("sha256", data, data_size, hash_value);
|
||||
}
|
||||
|
||||
void *libspdm_sha384_new(void)
|
||||
{
|
||||
return lkca_hash_new("sha384");
|
||||
}
|
||||
|
||||
void libspdm_sha384_free(void *sha384_ctx)
|
||||
{
|
||||
lkca_hash_free(sha384_ctx);
|
||||
}
|
||||
|
||||
bool libspdm_sha384_init(void *sha384_context)
|
||||
{
|
||||
return crypto_shash_init(sha384_context) == 0;
|
||||
}
|
||||
|
||||
bool libspdm_sha384_duplicate(const void *sha384_context,
|
||||
void *new_sha384_context)
|
||||
{
|
||||
if (sha384_context == NULL || new_sha384_context == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hash_duplicate(new_sha384_context, sha384_context);
|
||||
}
|
||||
|
||||
bool libspdm_sha384_update(void *sha384_context, const void *data,
|
||||
size_t data_size)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (sha384_context == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_update(sha384_context, data, data_size);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (sha384_context == NULL || hash_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_final(sha384_context, hash_value);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_sha384_hash_all(const void *data, size_t data_size,
|
||||
uint8_t *hash_value)
|
||||
{
|
||||
if (hash_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hash_all("sha384", data, data_size, hash_value);
|
||||
}
|
||||
|
||||
void *libspdm_sha512_new(void)
|
||||
{
|
||||
return lkca_hash_new("sha512");
|
||||
}
|
||||
|
||||
void libspdm_sha512_free(void *sha512_ctx)
|
||||
{
|
||||
lkca_hash_free(sha512_ctx);
|
||||
}
|
||||
|
||||
bool libspdm_sha512_init(void *sha512_context)
|
||||
{
|
||||
return crypto_shash_init(sha512_context) == 0;
|
||||
}
|
||||
|
||||
bool libspdm_sha512_duplicate(const void *sha512_context,
|
||||
void *new_sha512_context)
|
||||
{
|
||||
if (sha512_context == NULL || new_sha512_context == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hash_duplicate(new_sha512_context, sha512_context);
|
||||
}
|
||||
|
||||
bool libspdm_sha512_update(void *sha512_context, const void *data,
|
||||
size_t data_size)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (sha512_context == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_update(sha512_context, data, data_size);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value)
|
||||
{
|
||||
int32_t ret;
|
||||
|
||||
if (sha512_context == NULL || hash_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_final(sha512_context, hash_value);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_sha512_hash_all(const void *data, size_t data_size,
|
||||
uint8_t *hash_value)
|
||||
{
|
||||
if (hash_value == NULL) {
|
||||
return false;
|
||||
}
|
||||
if (data == NULL && data_size != 0) {
|
||||
return false;
|
||||
}
|
||||
if (data_size > INT_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return lkca_hash_all("sha512", data, data_size, hash_value);
|
||||
}
|
160
kernel-open/nvidia/libspdm_shash.c
Normal file
160
kernel-open/nvidia/libspdm_shash.c
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
void *lkca_hash_new(const char* alg_name)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
//XXX: can we reuse crypto_shash part and just allocate desc
|
||||
struct crypto_shash *alg;
|
||||
struct shash_desc *desc;
|
||||
|
||||
alg = crypto_alloc_shash(alg_name, 0, 0);
|
||||
if (IS_ERR(alg)) {
|
||||
printk (KERN_INFO "Failed to alloc %s\n", alg_name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(alg), GFP_KERNEL);
|
||||
if (desc == NULL){
|
||||
printk (KERN_INFO "Kernel out of mem\n");
|
||||
crypto_free_shash(alg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
desc->tfm = alg;
|
||||
|
||||
return desc;
|
||||
#endif
|
||||
}
|
||||
|
||||
void lkca_hash_free(struct shash_desc *ctx)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
#else
|
||||
crypto_free_shash(ctx->tfm);
|
||||
kfree(ctx);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
SHASH_DESC_ON_STACK(tmp, src);
|
||||
|
||||
if (crypto_shash_export((struct shash_desc *) src, tmp)) {
|
||||
return false;
|
||||
}
|
||||
if (crypto_shash_import(dst, tmp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
// in LKCA hmac export doesn't export ipad/opad, so we need to WAR it
|
||||
|
||||
struct crypto_shash *src_tfm = src->tfm;
|
||||
struct crypto_shash *dst_tfm = dst->tfm;
|
||||
char *src_ipad = crypto_tfm_ctx_aligned(&src_tfm->base);
|
||||
char *dst_ipad = crypto_tfm_ctx_aligned(&dst_tfm->base);
|
||||
int ss = crypto_shash_statesize(dst_tfm);
|
||||
memcpy(dst_ipad, src_ipad, crypto_shash_blocksize(src->tfm));
|
||||
memcpy(dst_ipad + ss, src_ipad + ss, crypto_shash_blocksize(src->tfm));
|
||||
crypto_shash_clear_flags(dst->tfm, CRYPTO_TFM_NEED_KEY);
|
||||
|
||||
return lkca_hash_duplicate(dst, src);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_hash_all(const char* alg_name, const void *data,
|
||||
size_t data_size, uint8_t *hash_value)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
int ret;
|
||||
struct crypto_shash *alg;
|
||||
alg = crypto_alloc_shash(alg_name, 0, 0);
|
||||
if (IS_ERR(alg)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value);
|
||||
|
||||
crypto_free_shash(alg);
|
||||
|
||||
return (ret == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_hmac_set_key(struct shash_desc *desc, const uint8_t *key, size_t key_size)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
int ret;
|
||||
ret = crypto_shash_setkey(desc->tfm, key, key_size);
|
||||
if (ret == 0) {
|
||||
ret = crypto_shash_init(desc);
|
||||
}
|
||||
return ret == 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size,
|
||||
const uint8_t *data, size_t data_size, uint8_t *hash_value)
|
||||
{
|
||||
#ifndef USE_LKCA
|
||||
return false;
|
||||
#else
|
||||
int ret;
|
||||
struct crypto_shash *alg;
|
||||
alg = crypto_alloc_shash(alg_name, 0, 0);
|
||||
if (IS_ERR(alg)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = crypto_shash_setkey(alg, key, key_size);
|
||||
|
||||
if (ret == 0){
|
||||
ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value);
|
||||
}
|
||||
|
||||
crypto_free_shash(alg);
|
||||
|
||||
return (ret == 0);
|
||||
#endif
|
||||
}
|
||||
|
456
kernel-open/nvidia/libspdm_x509.c
Normal file
456
kernel-open/nvidia/libspdm_x509.c
Normal file
@ -0,0 +1,456 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* libspdm_x509_verify_cert_chain, libspdm_x509_get_cert_from_cert_chain, check
|
||||
* and prototypes taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
*/
|
||||
|
||||
#include "internal_crypt_lib.h"
|
||||
|
||||
#ifdef USE_LKCA
|
||||
#include <crypto/public_key.h>
|
||||
#include <keys/asymmetric-type.h>
|
||||
#endif
|
||||
|
||||
bool libspdm_x509_construct_certificate(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t **single_x509_cert)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_construct_certificate_stack(uint8_t **x509_stack, ...)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
void libspdm_x509_free(void *x509_cert)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
}
|
||||
|
||||
void libspdm_x509_stack_free(void *x509_stack)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
}
|
||||
|
||||
static bool lkca_asn1_get_tag(uint8_t const *ptr, uint8_t const *end,
|
||||
size_t *length, uint32_t tag)
|
||||
{
|
||||
uint64_t max_len = end - ptr;
|
||||
|
||||
// Chain must be less than 1 GB
|
||||
if ((max_len < 2) || (max_len > (1024 * 1024 * 1024))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We only deal with universal and application tags
|
||||
if (ptr[0] != tag) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ptr[1] < 0x80) {
|
||||
*length = ptr[1] + 2;
|
||||
} else if (ptr[1] == 0x81) {
|
||||
if (max_len < 3) {
|
||||
return false;
|
||||
}
|
||||
*length = ptr[2] + 3;
|
||||
} else if (ptr[1] == 0x82) {
|
||||
if (max_len < 4) {
|
||||
return false;
|
||||
}
|
||||
*length = (ptr[2] << 8) + ptr[3] + 4;
|
||||
} else {
|
||||
// In theory it could be bigger than 64KB
|
||||
return false;
|
||||
}
|
||||
|
||||
if (*length > max_len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length,
|
||||
uint32_t tag)
|
||||
{
|
||||
return lkca_asn1_get_tag(*ptr, end, length, tag);
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *cert_subject,
|
||||
size_t *subject_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_common_name(const uint8_t *cert, size_t cert_size,
|
||||
char *common_name,
|
||||
size_t *common_name_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
libspdm_x509_get_organization_name(const uint8_t *cert, size_t cert_size,
|
||||
char *name_buffer,
|
||||
size_t *name_buffer_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT)
|
||||
bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
|
||||
void **rsa_context)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
|
||||
|
||||
bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
|
||||
void **ec_context)
|
||||
{
|
||||
#ifdef USE_LKCA
|
||||
bool ret = false;
|
||||
uint32_t key_size = 0;
|
||||
struct key_preparsed_payload lkca_cert;
|
||||
struct public_key *pub;
|
||||
|
||||
lkca_cert.data = cert;
|
||||
lkca_cert.datalen = cert_size;
|
||||
|
||||
if (cert == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if(key_type_asymmetric.preparse(&lkca_cert)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
pub = lkca_cert.payload.data[asym_crypto];
|
||||
// -1 is since lkca prepends '4' to public keys...
|
||||
key_size = pub->keylen - 1;
|
||||
|
||||
if (key_size == (2 * 256 / 8)) {
|
||||
*ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP256R1);
|
||||
} else if (key_size == (2 * 384 / 8)) {
|
||||
*ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP384R1);
|
||||
} else {
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (*ec_context == NULL) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
// Again skip '4' in key to be in line with spdm protocol. We will add it
|
||||
// back in ecda_verify
|
||||
if (!lkca_ec_set_pub_key(*ec_context, (char *) pub->key + 1, key_size)) {
|
||||
libspdm_ec_free(*ec_context);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = true;
|
||||
err:
|
||||
key_type_asymmetric.free_preparse(&lkca_cert);
|
||||
return ret;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
|
||||
void **ecd_context)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size,
|
||||
void **sm2_context)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
static int lkca_x509_verify_cert(const uint8_t *cert, size_t cert_size,
|
||||
const uint8_t *ca_cert, size_t ca_cert_size)
|
||||
{
|
||||
#ifdef USE_LKCA
|
||||
int ret;
|
||||
struct key_preparsed_payload lkca_cert;
|
||||
struct key_preparsed_payload lkca_ca_cert;
|
||||
|
||||
lkca_cert.data = cert;
|
||||
lkca_cert.datalen = cert_size;
|
||||
lkca_ca_cert.data = ca_cert;
|
||||
lkca_ca_cert.datalen = ca_cert_size;
|
||||
|
||||
ret = key_type_asymmetric.preparse(&lkca_cert);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = key_type_asymmetric.preparse(&lkca_ca_cert);
|
||||
if (ret) {
|
||||
key_type_asymmetric.free_preparse(&lkca_cert);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = public_key_verify_signature(lkca_ca_cert.payload.data[asym_crypto],
|
||||
lkca_cert.payload.data[asym_auth]);
|
||||
|
||||
key_type_asymmetric.free_preparse(&lkca_cert);
|
||||
key_type_asymmetric.free_preparse(&lkca_ca_cert);
|
||||
|
||||
return ret;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
|
||||
const uint8_t *ca_cert, size_t ca_cert_size)
|
||||
{
|
||||
return lkca_x509_verify_cert(cert, cert_size, ca_cert, ca_cert_size) == 0;
|
||||
}
|
||||
|
||||
bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length,
|
||||
const uint8_t *cert_chain, size_t cert_chain_length)
|
||||
{
|
||||
size_t preceding_cert_len;
|
||||
const uint8_t *preceding_cert;
|
||||
size_t current_cert_len;
|
||||
const uint8_t *current_cert;
|
||||
bool verify_flag;
|
||||
int ret;
|
||||
|
||||
verify_flag = false;
|
||||
preceding_cert = root_cert;
|
||||
preceding_cert_len = root_cert_length;
|
||||
|
||||
current_cert = cert_chain;
|
||||
|
||||
|
||||
/* Get Current certificate from certificates buffer and Verify with preceding cert*/
|
||||
do {
|
||||
if (!lkca_asn1_get_tag(
|
||||
current_cert, cert_chain + cert_chain_length, ¤t_cert_len,
|
||||
LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) {
|
||||
break;
|
||||
}
|
||||
|
||||
ret = lkca_x509_verify_cert(current_cert, current_cert_len,
|
||||
preceding_cert, preceding_cert_len);
|
||||
if (ret != 0) {
|
||||
verify_flag = false;
|
||||
break;
|
||||
} else {
|
||||
verify_flag = true;
|
||||
}
|
||||
|
||||
preceding_cert = current_cert;
|
||||
preceding_cert_len = current_cert_len;
|
||||
|
||||
current_cert = current_cert + current_cert_len;
|
||||
} while (true);
|
||||
|
||||
return verify_flag;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
|
||||
size_t cert_chain_length,
|
||||
const int32_t cert_index, const uint8_t **cert,
|
||||
size_t *cert_length)
|
||||
{
|
||||
size_t asn1_len;
|
||||
int32_t current_index;
|
||||
size_t current_cert_len;
|
||||
const uint8_t *current_cert;
|
||||
|
||||
current_cert_len = 0;
|
||||
|
||||
/* Check input parameters.*/
|
||||
if ((cert_chain == NULL) || (cert == NULL) || (cert_index < -1) ||
|
||||
(cert_length == NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
current_cert = cert_chain;
|
||||
current_index = -1;
|
||||
|
||||
/* Traverse the certificate chain*/
|
||||
while (true) {
|
||||
/* Get asn1 tag len*/
|
||||
if (!lkca_asn1_get_tag(
|
||||
current_cert, cert_chain + cert_chain_length, &asn1_len,
|
||||
LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) {
|
||||
break;
|
||||
}
|
||||
|
||||
current_cert_len = asn1_len;
|
||||
current_index++;
|
||||
|
||||
if (current_index == cert_index) {
|
||||
*cert = current_cert;
|
||||
*cert_length = current_cert_len;
|
||||
return true;
|
||||
}
|
||||
|
||||
current_cert = current_cert + current_cert_len;
|
||||
}
|
||||
|
||||
/* If cert_index is -1, Return the last certificate*/
|
||||
if (cert_index == -1 && current_index >= 0) {
|
||||
*cert = current_cert - current_cert_len;
|
||||
*cert_length = current_cert_len;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_tbs_cert(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t **tbs_cert, size_t *tbs_cert_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size,
|
||||
size_t *version)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *serial_number,
|
||||
size_t *serial_number_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *cert_issuer,
|
||||
size_t *issuer_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
libspdm_x509_get_issuer_common_name(const uint8_t *cert, size_t cert_size,
|
||||
char *common_name,
|
||||
size_t *common_name_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
libspdm_x509_get_issuer_orgnization_name(const uint8_t *cert, size_t cert_size,
|
||||
char *name_buffer,
|
||||
size_t *name_buffer_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_signature_algorithm(const uint8_t *cert,
|
||||
size_t cert_size, uint8_t *oid,
|
||||
size_t *oid_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
|
||||
const uint8_t *oid, size_t oid_size,
|
||||
uint8_t *extension_data,
|
||||
size_t *extension_data_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
|
||||
uint8_t *from, size_t *from_size, uint8_t *to,
|
||||
size_t *to_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size,
|
||||
size_t *usage)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
|
||||
size_t cert_size, uint8_t *usage,
|
||||
size_t *usage_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
|
||||
size_t cert_size,
|
||||
uint8_t *basic_constraints,
|
||||
size_t *basic_constraints_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool libspdm_x509_set_date_time(char const *date_time_str, void *date_time, size_t *date_time_size)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return -3;
|
||||
}
|
||||
|
||||
bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid,
|
||||
uint8_t *requester_info, size_t requester_info_length,
|
||||
void *context, char *subject_name,
|
||||
size_t *csr_len, uint8_t **csr_pointer)
|
||||
{
|
||||
LIBSPDM_ASSERT(false);
|
||||
return false;
|
||||
}
|
@ -2691,3 +2691,17 @@ nvswitch_os_get_supported_register_events_params
|
||||
*os_descriptor = NV_FALSE;
|
||||
return NVL_SUCCESS;
|
||||
}
|
||||
|
||||
NvlStatus
|
||||
nvswitch_os_get_pid
|
||||
(
|
||||
NvU32 *pPid
|
||||
)
|
||||
{
|
||||
if (pPid != NULL)
|
||||
{
|
||||
*pPid = task_pid_nr(current);
|
||||
}
|
||||
|
||||
return NVL_SUCCESS;
|
||||
}
|
||||
|
@ -86,6 +86,14 @@
|
||||
|
||||
#include <linux/ioport.h>
|
||||
|
||||
#if defined(NV_LINUX_CC_PLATFORM_H_PRESENT)
|
||||
#include <linux/cc_platform.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_ASM_CPUFEATURE_H_PRESENT)
|
||||
#include <asm/cpufeature.h>
|
||||
#endif
|
||||
|
||||
#include "conftest/patches.h"
|
||||
|
||||
#define RM_THRESHOLD_TOTAL_IRQ_COUNT 100000
|
||||
@ -139,8 +147,6 @@ struct semaphore nv_linux_devices_lock;
|
||||
|
||||
static NvTristate nv_chipset_is_io_coherent = NV_TRISTATE_INDETERMINATE;
|
||||
|
||||
NvU64 nv_shared_gpa_boundary = 0;
|
||||
|
||||
// True if all the successfully probed devices support ATS
|
||||
// Assigned at device probe (module init) time
|
||||
NvBool nv_ats_supported = NVCPU_IS_PPC64LE
|
||||
@ -234,77 +240,23 @@ struct dev_pm_ops nv_pm_ops = {
|
||||
*** STATIC functions
|
||||
***/
|
||||
|
||||
#if defined(NVCPU_X86_64)
|
||||
#define NV_AMD_SEV_BIT BIT(1)
|
||||
|
||||
#define NV_GENMASK_ULL(h, l) \
|
||||
(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
|
||||
|
||||
static
|
||||
void get_shared_gpa_boundary(
|
||||
void nv_detect_conf_compute_platform(
|
||||
void
|
||||
)
|
||||
{
|
||||
NvU32 priv_high = cpuid_ebx(0x40000003);
|
||||
if (priv_high & BIT(22))
|
||||
#if defined(NV_CC_PLATFORM_PRESENT)
|
||||
os_cc_enabled = cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
|
||||
|
||||
#if defined(X86_FEATURE_TDX_GUEST)
|
||||
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
|
||||
{
|
||||
NvU32 isolation_config_b = cpuid_ebx(0x4000000C);
|
||||
nv_shared_gpa_boundary = ((NvU64)1) << ((isolation_config_b & NV_GENMASK_ULL(11, 6)) >> 6);
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
NvBool nv_is_sev_supported(
|
||||
void
|
||||
)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
|
||||
/* Check for the SME/SEV support leaf */
|
||||
eax = 0x80000000;
|
||||
ecx = 0;
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
if (eax < 0x8000001f)
|
||||
return NV_FALSE;
|
||||
|
||||
/* By design, a VM using vTOM doesn't see the SEV setting */
|
||||
get_shared_gpa_boundary();
|
||||
if (nv_shared_gpa_boundary != 0)
|
||||
return NV_TRUE;
|
||||
|
||||
eax = 0x8000001f;
|
||||
ecx = 0;
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
/* Check whether SEV is supported */
|
||||
if (!(eax & NV_AMD_SEV_BIT))
|
||||
return NV_FALSE;
|
||||
|
||||
return NV_TRUE;
|
||||
os_cc_tdx_enabled = NV_TRUE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static
|
||||
void nv_sev_init(
|
||||
void
|
||||
)
|
||||
{
|
||||
#if defined(MSR_AMD64_SEV) && defined(NVCPU_X86_64)
|
||||
NvU32 lo_val, hi_val;
|
||||
|
||||
if (!nv_is_sev_supported())
|
||||
return;
|
||||
|
||||
rdmsr(MSR_AMD64_SEV, lo_val, hi_val);
|
||||
|
||||
os_sev_status = lo_val;
|
||||
#if defined(MSR_AMD64_SEV_ENABLED)
|
||||
os_sev_enabled = (os_sev_status & MSR_AMD64_SEV_ENABLED);
|
||||
#endif
|
||||
|
||||
/* By design, a VM using vTOM doesn't see the SEV setting */
|
||||
if (nv_shared_gpa_boundary != 0)
|
||||
os_sev_enabled = NV_TRUE;
|
||||
|
||||
#else
|
||||
os_cc_enabled = NV_FALSE;
|
||||
os_cc_tdx_enabled = NV_FALSE;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -710,7 +662,7 @@ nv_module_init(nv_stack_t **sp)
|
||||
}
|
||||
|
||||
nv_init_rsync_info();
|
||||
nv_sev_init();
|
||||
nv_detect_conf_compute_platform();
|
||||
|
||||
if (!rm_init_rm(*sp))
|
||||
{
|
||||
@ -1396,6 +1348,8 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
|
||||
nv->flags |= NV_FLAG_OPEN;
|
||||
|
||||
rm_request_dnotifier_state(sp, nv);
|
||||
|
||||
/*
|
||||
* Now that RM init is done, allow dynamic power to control the GPU in FINE
|
||||
* mode, if enabled. (If the mode is COARSE, this unref will do nothing
|
||||
@ -4568,19 +4522,19 @@ NvU64 NV_API_CALL nv_get_dma_start_address(
|
||||
* as the starting address for all DMA mappings.
|
||||
*/
|
||||
saved_dma_mask = pci_dev->dma_mask;
|
||||
if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64)) != 0)
|
||||
if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64)) != 0)
|
||||
{
|
||||
goto done;
|
||||
}
|
||||
|
||||
dma_addr = pci_map_single(pci_dev, NULL, 1, DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(pci_dev, dma_addr))
|
||||
dma_addr = dma_map_single(&pci_dev->dev, NULL, 1, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&pci_dev->dev, dma_addr))
|
||||
{
|
||||
pci_set_dma_mask(pci_dev, saved_dma_mask);
|
||||
dma_set_mask(&pci_dev->dev, saved_dma_mask);
|
||||
goto done;
|
||||
}
|
||||
|
||||
pci_unmap_single(pci_dev, dma_addr, 1, DMA_BIDIRECTIONAL);
|
||||
dma_unmap_single(&pci_dev->dev, dma_addr, 1, DMA_BIDIRECTIONAL);
|
||||
|
||||
/*
|
||||
* From IBM: "For IODA2, native DMA bypass or KVM TCE-based implementation
|
||||
@ -4612,7 +4566,7 @@ NvU64 NV_API_CALL nv_get_dma_start_address(
|
||||
*/
|
||||
nv_printf(NV_DBG_WARNINGS,
|
||||
"NVRM: DMA window limited by platform\n");
|
||||
pci_set_dma_mask(pci_dev, saved_dma_mask);
|
||||
dma_set_mask(&pci_dev->dev, saved_dma_mask);
|
||||
goto done;
|
||||
}
|
||||
else if ((dma_addr & saved_dma_mask) != 0)
|
||||
@ -4631,7 +4585,7 @@ NvU64 NV_API_CALL nv_get_dma_start_address(
|
||||
*/
|
||||
nv_printf(NV_DBG_WARNINGS,
|
||||
"NVRM: DMA window limited by memory size\n");
|
||||
pci_set_dma_mask(pci_dev, saved_dma_mask);
|
||||
dma_set_mask(&pci_dev->dev, saved_dma_mask);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
@ -290,10 +290,6 @@ NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(struct gpuDevice *device);
|
||||
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
|
||||
gpuChannelHandle channel);
|
||||
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
|
||||
NV_STATUS nvGpuOpsCcslLogDeviceEncryption(struct ccslContext_t *ctx,
|
||||
NvU8 *decryptIv);
|
||||
NV_STATUS nvGpuOpsCcslAcquireEncryptionIv(struct ccslContext_t *ctx,
|
||||
NvU8 *encryptIv);
|
||||
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
|
||||
NvU8 direction);
|
||||
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
|
||||
@ -312,6 +308,8 @@ NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
|
||||
NvU8 const *inputBuffer,
|
||||
NvU8 const *decryptIv,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 const *addAuthData,
|
||||
NvU32 addAuthDataSize,
|
||||
NvU8 const *authTagBuffer);
|
||||
NV_STATUS nvGpuOpsCcslSign(struct ccslContext_t *ctx,
|
||||
NvU32 bufferSize,
|
||||
@ -320,5 +318,9 @@ NV_STATUS nvGpuOpsCcslSign(struct ccslContext_t *ctx,
|
||||
NV_STATUS nvGpuOpsQueryMessagePool(struct ccslContext_t *ctx,
|
||||
NvU8 direction,
|
||||
NvU64 *messageNum);
|
||||
NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
|
||||
NvU8 direction,
|
||||
NvU64 increment,
|
||||
NvU8 *iv);
|
||||
|
||||
#endif /* _NV_GPU_OPS_H_*/
|
||||
|
@ -126,6 +126,19 @@ static void nvUvmFreeSafeStack(nvidia_stack_t *sp)
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
}
|
||||
|
||||
static NV_STATUS nvUvmDestroyFaultInfoAndStacks(nvidia_stack_t *sp,
|
||||
uvmGpuDeviceHandle device,
|
||||
UvmGpuFaultInfo *pFaultInfo)
|
||||
{
|
||||
nv_kmem_cache_free_stack(pFaultInfo->replayable.cslCtx.nvidia_stack);
|
||||
nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_bh_sp);
|
||||
nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_sp);
|
||||
|
||||
return rm_gpu_ops_destroy_fault_info(sp,
|
||||
(gpuDeviceHandle)device,
|
||||
pFaultInfo);
|
||||
}
|
||||
|
||||
NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatformInfo *gpuInfo)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
@ -196,7 +209,7 @@ NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session,
|
||||
memset(platformInfo, 0, sizeof(*platformInfo));
|
||||
platformInfo->atsSupported = nv_ats_supported;
|
||||
|
||||
platformInfo->sevEnabled = os_sev_enabled;
|
||||
platformInfo->sevEnabled = os_cc_enabled;
|
||||
|
||||
status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session);
|
||||
|
||||
@ -855,6 +868,7 @@ NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device,
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
NV_STATUS status;
|
||||
int err;
|
||||
|
||||
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
||||
{
|
||||
@ -864,36 +878,48 @@ NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device,
|
||||
status = rm_gpu_ops_init_fault_info(sp,
|
||||
(gpuDeviceHandle)device,
|
||||
pFaultInfo);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto done;
|
||||
}
|
||||
|
||||
// Preallocate a stack for functions called from ISR top half
|
||||
pFaultInfo->nonReplayable.isr_sp = NULL;
|
||||
pFaultInfo->nonReplayable.isr_bh_sp = NULL;
|
||||
if (status == NV_OK)
|
||||
{
|
||||
pFaultInfo->replayable.cslCtx.nvidia_stack = NULL;
|
||||
|
||||
// NOTE: nv_kmem_cache_alloc_stack does not allocate a stack on PPC.
|
||||
// Therefore, the pointer can be NULL on success. Always use the
|
||||
// returned error code to determine if the operation was successful.
|
||||
int err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_sp);
|
||||
if (!err)
|
||||
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_sp);
|
||||
if (err)
|
||||
{
|
||||
goto error;
|
||||
}
|
||||
|
||||
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_bh_sp);
|
||||
if (err)
|
||||
{
|
||||
nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_sp);
|
||||
pFaultInfo->nonReplayable.isr_sp = NULL;
|
||||
}
|
||||
goto error;
|
||||
}
|
||||
|
||||
// The cslCtx.ctx pointer is not NULL only when ConfidentialComputing is enabled.
|
||||
if (pFaultInfo->replayable.cslCtx.ctx != NULL)
|
||||
{
|
||||
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->replayable.cslCtx.nvidia_stack);
|
||||
if (err)
|
||||
{
|
||||
rm_gpu_ops_destroy_fault_info(sp,
|
||||
(gpuDeviceHandle)device,
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
goto done;
|
||||
|
||||
error:
|
||||
nvUvmDestroyFaultInfoAndStacks(sp,
|
||||
device,
|
||||
pFaultInfo);
|
||||
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
return status;
|
||||
}
|
||||
@ -949,23 +975,9 @@ NV_STATUS nvUvmInterfaceDestroyFaultInfo(uvmGpuDeviceHandle device,
|
||||
nvidia_stack_t *sp = nvUvmGetSafeStack();
|
||||
NV_STATUS status;
|
||||
|
||||
// Free the preallocated stack for functions called from ISR
|
||||
if (pFaultInfo->nonReplayable.isr_sp != NULL)
|
||||
{
|
||||
nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_sp);
|
||||
pFaultInfo->nonReplayable.isr_sp = NULL;
|
||||
}
|
||||
|
||||
if (pFaultInfo->nonReplayable.isr_bh_sp != NULL)
|
||||
{
|
||||
nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_bh_sp);
|
||||
pFaultInfo->nonReplayable.isr_bh_sp = NULL;
|
||||
}
|
||||
|
||||
status = rm_gpu_ops_destroy_fault_info(sp,
|
||||
(gpuDeviceHandle)device,
|
||||
status = nvUvmDestroyFaultInfoAndStacks(sp,
|
||||
device,
|
||||
pFaultInfo);
|
||||
|
||||
nvUvmFreeSafeStack(sp);
|
||||
return status;
|
||||
}
|
||||
@ -1504,44 +1516,18 @@ void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext)
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslLogDeviceEncryption(UvmCslContext *uvmCslContext,
|
||||
UvmCslIv *decryptIv)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_log_device_encryption(sp, uvmCslContext->ctx, (NvU8 *)decryptIv);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslLogDeviceEncryption);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslDirection direction)
|
||||
UvmCslOperation operation)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_rotate_iv(sp, uvmCslContext->ctx, direction);
|
||||
status = rm_gpu_ops_ccsl_rotate_iv(sp, uvmCslContext->ctx, operation);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslRotateIv);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslAcquireEncryptionIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslIv *encryptIv)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
BUILD_BUG_ON(NV_OFFSETOF(UvmCslIv, fresh) != sizeof(encryptIv->iv));
|
||||
|
||||
status = rm_gpu_ops_ccsl_acquire_encryption_iv(sp, uvmCslContext->ctx, (NvU8*)encryptIv);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslAcquireEncryptionIv);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
@ -1566,6 +1552,8 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
||||
NvU8 const *inputBuffer,
|
||||
UvmCslIv const *decryptIv,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 const *addAuthData,
|
||||
NvU32 addAuthDataSize,
|
||||
NvU8 const *authTagBuffer)
|
||||
{
|
||||
NV_STATUS status;
|
||||
@ -1577,6 +1565,8 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
||||
inputBuffer,
|
||||
(NvU8 *)decryptIv,
|
||||
outputBuffer,
|
||||
addAuthData,
|
||||
addAuthDataSize,
|
||||
authTagBuffer);
|
||||
|
||||
return status;
|
||||
@ -1598,18 +1588,32 @@ NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslSign);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext,
|
||||
UvmCslDirection direction,
|
||||
UvmCslOperation operation,
|
||||
NvU64 *messageNum)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_query_message_pool(sp, uvmCslContext->ctx, direction, messageNum);
|
||||
status = rm_gpu_ops_ccsl_query_message_pool(sp, uvmCslContext->ctx, operation, messageNum);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslQueryMessagePool);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation,
|
||||
NvU64 increment,
|
||||
UvmCslIv *iv)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_increment_iv(sp, uvmCslContext->ctx, operation, increment, (NvU8 *)iv);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslIncrementIv);
|
||||
|
||||
#else // NV_UVM_ENABLE
|
||||
|
||||
NV_STATUS nv_uvm_suspend(void)
|
||||
|
@ -32,6 +32,17 @@ NVIDIA_SOURCES += nvidia/nv-msi.c
|
||||
NVIDIA_SOURCES += nvidia/nv-caps.c
|
||||
NVIDIA_SOURCES += nvidia/nv-frontend.c
|
||||
NVIDIA_SOURCES += nvidia/nv_uvm_interface.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_aead.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_ecc.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_hkdf.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_rand.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_shash.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_aead_aes_gcm.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_sha.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_hmac_sha.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_hkdf_sha.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_ec.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_x509.c
|
||||
NVIDIA_SOURCES += nvidia/nvlink_linux.c
|
||||
NVIDIA_SOURCES += nvidia/nvlink_caps.c
|
||||
NVIDIA_SOURCES += nvidia/linux_nvswitch.c
|
||||
|
@ -120,6 +120,9 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_array_uc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_array_uc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened_wc
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache_shared
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_get_domain_bus_and_slot
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_num_physpages
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
|
||||
@ -156,8 +159,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += full_name_hash
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pgprot_decrypted
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_mkdec
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_platform_has
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += unsafe_follow_pfn
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
|
||||
@ -222,6 +224,8 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_set_init
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_clear_init_cb
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_alloc_mem_from_gscco
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_free_gscco_mem
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_bytes
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
|
||||
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@ -30,6 +30,9 @@ extern "C" {
|
||||
|
||||
#include "nvlink_common.h"
|
||||
|
||||
#define TOP_LEVEL_LOCKING_DISABLED 1
|
||||
#define PER_LINK_LOCKING_DISABLED 1
|
||||
|
||||
#define NVLINK_FREE(x) nvlink_free((void *)x)
|
||||
|
||||
// Memory management functions
|
||||
|
41
kernel-open/nvidia/nvspdm_cryptlib_extensions.h
Normal file
41
kernel-open/nvidia/nvspdm_cryptlib_extensions.h
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
bool libspdm_aead_gcm_prealloc(void **context);
|
||||
void libspdm_aead_free(void *context);
|
||||
bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context,
|
||||
const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
uint8_t *tag_out, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size);
|
||||
bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
|
||||
const uint8_t *key, size_t key_size,
|
||||
const uint8_t *iv, size_t iv_size,
|
||||
const uint8_t *a_data, size_t a_data_size,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
const uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size);
|
||||
|
@ -41,8 +41,8 @@ extern nv_kthread_q_t nv_kthread_q;
|
||||
NvU32 os_page_size = PAGE_SIZE;
|
||||
NvU64 os_page_mask = NV_PAGE_MASK;
|
||||
NvU8 os_page_shift = PAGE_SHIFT;
|
||||
NvU32 os_sev_status = 0;
|
||||
NvBool os_sev_enabled = 0;
|
||||
NvBool os_cc_enabled = 0;
|
||||
NvBool os_cc_tdx_enabled = 0;
|
||||
|
||||
#if defined(CONFIG_DMA_SHARED_BUFFER)
|
||||
NvBool os_dma_buf_enabled = NV_TRUE;
|
||||
@ -1787,6 +1787,10 @@ NV_STATUS NV_API_CALL os_numa_memblock_size
|
||||
NvU64 *memblock_size
|
||||
)
|
||||
{
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_memory_block_size_bytes
|
||||
*memblock_size = memory_block_size_bytes();
|
||||
return NV_OK;
|
||||
#endif
|
||||
if (nv_ctl_device.numa_memblock_size == 0)
|
||||
return NV_ERR_INVALID_STATE;
|
||||
*memblock_size = nv_ctl_device.numa_memblock_size;
|
||||
@ -2118,6 +2122,53 @@ void NV_API_CALL os_nv_cap_close_fd
|
||||
nv_cap_close_fd(fd);
|
||||
}
|
||||
|
||||
typedef struct os_numa_gpu_mem_hotplug_notifier_s
|
||||
{
|
||||
NvU64 start_pa;
|
||||
NvU64 size;
|
||||
nv_pci_info_t pci_info;
|
||||
struct notifier_block memory_notifier;
|
||||
} os_numa_gpu_mem_hotplug_notifier_t;
|
||||
|
||||
static int os_numa_verify_gpu_memory_zone(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
os_numa_gpu_mem_hotplug_notifier_t *notifier = container_of(nb,
|
||||
os_numa_gpu_mem_hotplug_notifier_t,
|
||||
memory_notifier);
|
||||
struct memory_notify *mhp = data;
|
||||
NvU64 start_pa = PFN_PHYS(mhp->start_pfn);
|
||||
NvU64 size = PFN_PHYS(mhp->nr_pages);
|
||||
|
||||
if (action == MEM_GOING_ONLINE)
|
||||
{
|
||||
// Check if onlining memory falls in the GPU memory range
|
||||
if ((start_pa >= notifier->start_pa) &&
|
||||
(start_pa + size) <= (notifier->start_pa + notifier->size))
|
||||
{
|
||||
/*
|
||||
* Verify GPU memory NUMA node has memory only in ZONE_MOVABLE before
|
||||
* onlining the memory so that incorrect auto online setting doesn't
|
||||
* cause the memory onlined in a zone where kernel allocations
|
||||
* could happen, resulting in GPU memory hot unpluggable and requiring
|
||||
* system reboot.
|
||||
*/
|
||||
if (page_zonenum((pfn_to_page(mhp->start_pfn))) != ZONE_MOVABLE)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Failing GPU memory onlining as the onlining zone "
|
||||
"is not movable. pa: 0x%llx size: 0x%llx\n"
|
||||
"NVRM: The NVIDIA GPU %04x:%02x:%02x.%x installed in the system\n"
|
||||
"NVRM: requires auto onlining mode online_movable enabled in\n"
|
||||
"NVRM: /sys/devices/system/memory/auto_online_blocks\n",
|
||||
start_pa, size, notifier->pci_info.domain, notifier->pci_info.bus,
|
||||
notifier->pci_info.slot, notifier->pci_info.function);
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_numa_add_gpu_memory
|
||||
(
|
||||
void *handle,
|
||||
@ -2129,8 +2180,16 @@ NV_STATUS NV_API_CALL os_numa_add_gpu_memory
|
||||
#if defined(NV_ADD_MEMORY_DRIVER_MANAGED_PRESENT)
|
||||
int node = 0;
|
||||
nv_linux_state_t *nvl = pci_get_drvdata(handle);
|
||||
nv_state_t *nv = NV_STATE_PTR(nvl);
|
||||
NvU64 base = offset + nvl->coherent_link_info.gpu_mem_pa;
|
||||
int ret;
|
||||
os_numa_gpu_mem_hotplug_notifier_t notifier =
|
||||
{
|
||||
.start_pa = base,
|
||||
.size = size,
|
||||
.pci_info = nv->pci_info,
|
||||
.memory_notifier.notifier_call = os_numa_verify_gpu_memory_zone,
|
||||
};
|
||||
|
||||
if (nodeId == NULL)
|
||||
{
|
||||
@ -2149,21 +2208,31 @@ NV_STATUS NV_API_CALL os_numa_add_gpu_memory
|
||||
|
||||
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_ONLINE_IN_PROGRESS);
|
||||
|
||||
ret = register_memory_notifier(¬ifier.memory_notifier);
|
||||
if (ret)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: Memory hotplug notifier registration failed\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
#ifdef NV_ADD_MEMORY_DRIVER_MANAGED_HAS_MHP_FLAGS_ARG
|
||||
ret = add_memory_driver_managed(node, base, size, "System RAM (NVIDIA)", MHP_NONE);
|
||||
#else
|
||||
ret = add_memory_driver_managed(node, base, size, "System RAM (NVIDIA)");
|
||||
#endif
|
||||
unregister_memory_notifier(¬ifier.memory_notifier);
|
||||
|
||||
if (ret == 0)
|
||||
{
|
||||
struct zone *zone = &NODE_DATA(node)->node_zones[ZONE_MOVABLE];
|
||||
NvU64 start_pfn = base >> PAGE_SHIFT;
|
||||
NvU64 end_pfn = (base + size) >> PAGE_SHIFT;
|
||||
|
||||
/* Verify the full GPU memory range passed on is onlined */
|
||||
if (zone->zone_start_pfn != start_pfn ||
|
||||
zone_end_pfn(zone) != end_pfn)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "GPU memory zone movable auto onlining failed!\n");
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: GPU memory zone movable auto onlining failed!\n");
|
||||
#ifdef NV_OFFLINE_AND_REMOVE_MEMORY_PRESENT
|
||||
#ifdef NV_REMOVE_MEMORY_HAS_NID_ARG
|
||||
if (offline_and_remove_memory(node, base, size) != 0)
|
||||
@ -2171,7 +2240,7 @@ NV_STATUS NV_API_CALL os_numa_add_gpu_memory
|
||||
if (offline_and_remove_memory(base, size) != 0)
|
||||
#endif
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "offline_and_remove_memory failed\n");
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: offline_and_remove_memory failed\n");
|
||||
}
|
||||
#endif
|
||||
goto failed;
|
||||
|
@ -1068,6 +1068,12 @@ bool DeviceImpl::getSDPExtnForColorimetrySupported()
|
||||
}
|
||||
}
|
||||
|
||||
if (!targetDevice)
|
||||
{
|
||||
DP_ASSERT(0 && "targetDevice is invalid for SDP_EXT COLORIMETRY");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Send remote DPCD for devices behind the branch
|
||||
if ((AuxBus::success == targetDevice->getDpcdData(NV_DPCD_TRAINING_AUX_RD_INTERVAL,
|
||||
&byte, sizeof byte, &size, &nakReason)) &&
|
||||
|
@ -30,6 +30,7 @@
|
||||
#define GPS_FUNC_SUPPORT 0x00000000 // Bit list of supported functions
|
||||
#define GPS_FUNC_GETOBJBYTYPE 0x00000010 // Fetch any specific Object by Type
|
||||
#define GPS_FUNC_GETALLOBJS 0x00000011 // Fetch all Objects
|
||||
#define GPS_FUNC_REQUESTDXSTATE 0x00000012 // Request D-Notifier state
|
||||
#define GPS_FUNC_GETCALLBACKS 0x00000013 // Get system requested callbacks
|
||||
#define GPS_FUNC_PCONTROL 0x0000001C // GPU power control function
|
||||
#define GPS_FUNC_PSHARESTATUS 0x00000020 // Get system requested Power Steering settings
|
||||
|
@ -36,25 +36,25 @@
|
||||
// and then checked back in. You cannot make changes to these sections without
|
||||
// corresponding changes to the buildmeister script
|
||||
#ifndef NV_BUILD_BRANCH
|
||||
#define NV_BUILD_BRANCH r535_87
|
||||
#define NV_BUILD_BRANCH VK535_87
|
||||
#endif
|
||||
#ifndef NV_PUBLIC_BRANCH
|
||||
#define NV_PUBLIC_BRANCH r535_87
|
||||
#define NV_PUBLIC_BRANCH VK535_87
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
|
||||
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/r535_87-122"
|
||||
#define NV_BUILD_CHANGELIST_NUM (32882771)
|
||||
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/VK535_87-128"
|
||||
#define NV_BUILD_CHANGELIST_NUM (33195052)
|
||||
#define NV_BUILD_TYPE "Official"
|
||||
#define NV_BUILD_NAME "rel/gpu_drv/r535/r535_87-122"
|
||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32882771)
|
||||
#define NV_BUILD_NAME "rel/gpu_drv/r535/VK535_87-128"
|
||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33195052)
|
||||
|
||||
#else /* Windows builds */
|
||||
#define NV_BUILD_BRANCH_VERSION "r535_87-4"
|
||||
#define NV_BUILD_CHANGELIST_NUM (32875904)
|
||||
#define NV_BUILD_BRANCH_VERSION "VK535_87-8"
|
||||
#define NV_BUILD_CHANGELIST_NUM (33190458)
|
||||
#define NV_BUILD_TYPE "Official"
|
||||
#define NV_BUILD_NAME "535.93"
|
||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32875904)
|
||||
#define NV_BUILD_NAME "537.16"
|
||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33190458)
|
||||
#define NV_BUILD_BRANCH_BASE_VERSION R535
|
||||
#endif
|
||||
// End buildmeister python edited section
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2009 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2009 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@ -443,6 +443,7 @@ static const PNPVendorId PNPVendorIds[] =
|
||||
{ "SAN", _VENDOR_NAME_ENTRY("Sanyo Electric Co.,Ltd.") },
|
||||
{ "SCC", _VENDOR_NAME_ENTRY("SORD") },
|
||||
{ "SCD", _VENDOR_NAME_ENTRY("Sanyo") },
|
||||
{ "SDC", _VENDOR_NAME_ENTRY("Samsung Display Corp.") },
|
||||
{ "SDI", _VENDOR_NAME_ENTRY("Samtron/Sigma Designs") },
|
||||
{ "SDT", _VENDOR_NAME_ENTRY("Siemens AG") },
|
||||
{ "SEA", _VENDOR_NAME_ENTRY("Segate") },
|
||||
|
@ -4,7 +4,7 @@
|
||||
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
|
||||
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
|
||||
|
||||
#define NV_VERSION_STRING "535.43.02"
|
||||
#define NV_VERSION_STRING "535.43.08"
|
||||
|
||||
#else
|
||||
|
||||
|
@ -112,21 +112,12 @@ struct _NVLOG_BUFFER
|
||||
#endif // NVOS_IS_UNIX
|
||||
|
||||
|
||||
//
|
||||
// Due to this file's peculiar location, NvPort may or may not be includable
|
||||
// This hack will go away when NvLog is moved into common/shared
|
||||
//
|
||||
#if NVOS_IS_MACINTOSH
|
||||
|
||||
#if !PORT_IS_KERNEL_BUILD
|
||||
typedef struct PORT_SPINLOCK PORT_SPINLOCK;
|
||||
#else
|
||||
#include "nvport/nvport.h"
|
||||
#endif
|
||||
typedef struct PORT_MUTEX PORT_MUTEX;
|
||||
typedef struct PORT_RWLOCK PORT_RWLOCK;
|
||||
|
||||
#elif !defined(PORT_IS_KERNEL_BUILD)
|
||||
typedef struct PORT_SPINLOCK PORT_SPINLOCK;
|
||||
#else
|
||||
#if PORT_IS_KERNEL_BUILD
|
||||
#include "nvport/nvport.h"
|
||||
#endif
|
||||
|
||||
@ -143,11 +134,35 @@ typedef struct _NVLOG_LOGGER
|
||||
NvU32 nextFree;
|
||||
/** Total number of free buffer slots */
|
||||
NvU32 totalFree;
|
||||
/** Lock for all buffer oprations */
|
||||
/** Lock for some buffer oprations */
|
||||
PORT_SPINLOCK* mainLock;
|
||||
/** Lock for creating/deleting pBuffers and accessing them from RmCtrls */
|
||||
PORT_MUTEX* buffersLock;
|
||||
/** Lock for registering/deregistering flush callbacks */
|
||||
PORT_RWLOCK *flushCbsLock;
|
||||
} NVLOG_LOGGER;
|
||||
extern NVLOG_LOGGER NvLogLogger;
|
||||
|
||||
/**
|
||||
* NvLog uses two locks:
|
||||
* - NVLOG_LOGGER::mainLock is used to protect some accesses to pBuffers, or
|
||||
* an individual pBuffers entry depending on locking flags.
|
||||
* - NVLOG_LOGGER::buffersLock is used to protect creating/deleting pBuffers and accessing them
|
||||
* from certain RmCtrl handlers.
|
||||
*
|
||||
* Historically in most contexts obtaining RMAPI lock would suffice, and mainLock would optionally
|
||||
* be used for certain buffers. Ioctl NV_ESC_RM_NVLOG_CTRL cannot touch RMAPI lock and needs
|
||||
* to access NvLog. The latter operation might race if called at an inopportune time: e.g. if the
|
||||
* ioctl is called during RM init when KGSP creates/deletes GSP NvLog buffers. Using buffersLock is
|
||||
* thus necessary to resolve the potential race.
|
||||
*
|
||||
* This leads to an unfortunate sequence where mainLock and buffersLock are nested. The latter lock
|
||||
* cannot be removed as it is used in IRQ paths.
|
||||
*
|
||||
* This should be refactored to use a single RWLock that does conditional acquire in possible IRQ
|
||||
* paths.
|
||||
*/
|
||||
|
||||
//
|
||||
// Buffer flags
|
||||
//
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user