2023-05-30 10:11:36 -07:00
|
|
|
/*******************************************************************************
|
2024-01-24 17:51:53 +01:00
|
|
|
Copyright (c) 2021-2023 NVIDIA Corporation
|
2023-05-30 10:11:36 -07:00
|
|
|
|
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
of this software and associated documentation files (the "Software"), to
|
|
|
|
deal in the Software without restriction, including without limitation the
|
|
|
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
furnished to do so, subject to the following conditions:
|
|
|
|
|
|
|
|
The above copyright notice and this permission notice shall be
|
|
|
|
included in all copies or substantial portions of the Software.
|
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
*******************************************************************************/
|
|
|
|
|
|
|
|
#include "uvm_common.h"
|
|
|
|
#include "uvm_global.h"
|
|
|
|
#include "uvm_conf_computing.h"
|
|
|
|
#include "uvm_kvmalloc.h"
|
|
|
|
#include "uvm_gpu.h"
|
2023-07-18 15:54:53 +02:00
|
|
|
#include "uvm_hal.h"
|
2023-05-30 10:11:36 -07:00
|
|
|
#include "uvm_mem.h"
|
|
|
|
#include "uvm_processors.h"
|
|
|
|
#include "uvm_tracker.h"
|
|
|
|
#include "nv_uvm_interface.h"
|
|
|
|
#include "uvm_va_block.h"
|
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
// The maximum number of secure operations per push is:
|
|
|
|
// UVM_MAX_PUSH_SIZE / min(CE encryption size, CE decryption size)
|
|
|
|
// + 1 (tracking semaphore) = 128 * 1024 / 56 + 1 = 2342
|
|
|
|
#define UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_MIN 2342lu
|
2023-05-30 10:11:36 -07:00
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
// Channels use 32-bit counters so the value after rotation is 0xffffffff.
|
|
|
|
// setting the limit to this value (or higher) will result in rotation
|
|
|
|
// on every check. However, pre-emptive rotation when submitting control
|
|
|
|
// GPFIFO entries relies on the fact that multiple successive checks after
|
|
|
|
// rotation do not trigger more rotations if there was no IV used in between.
|
|
|
|
#define UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_MAX 0xfffffffelu
|
2023-05-30 10:11:36 -07:00
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
// Attempt rotation when two billion IVs are left. IV rotation call can fail if
|
|
|
|
// the necessary locks are not available, so multiple attempts may be need for
|
|
|
|
// IV rotation to succeed.
|
|
|
|
#define UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_DEFAULT (1lu << 31)
|
|
|
|
|
|
|
|
// Start rotating after 500 encryption/decryptions when running tests.
|
|
|
|
#define UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_TESTS ((1lu << 32) - 500lu)
|
|
|
|
static ulong uvm_conf_computing_channel_iv_rotation_limit = UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_DEFAULT;
|
2023-05-30 10:11:36 -07:00
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
module_param(uvm_conf_computing_channel_iv_rotation_limit, ulong, S_IRUGO);
|
|
|
|
|
|
|
|
static UvmGpuConfComputeMode uvm_conf_computing_get_mode(const uvm_parent_gpu_t *parent)
|
2023-05-30 10:11:36 -07:00
|
|
|
{
|
2024-01-24 17:51:53 +01:00
|
|
|
return parent->rm_info.gpuConfComputeCaps.mode;
|
2023-05-30 10:11:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
bool uvm_conf_computing_mode_is_hcc(const uvm_gpu_t *gpu)
|
|
|
|
{
|
|
|
|
return uvm_conf_computing_get_mode(gpu->parent) == UVM_GPU_CONF_COMPUTE_MODE_HCC;
|
|
|
|
}
|
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
void uvm_conf_computing_check_parent_gpu(const uvm_parent_gpu_t *parent)
|
2023-05-30 10:11:36 -07:00
|
|
|
{
|
2024-01-24 17:51:53 +01:00
|
|
|
uvm_parent_gpu_t *other_parent;
|
|
|
|
UvmGpuConfComputeMode parent_mode = uvm_conf_computing_get_mode(parent);
|
2023-05-30 10:11:36 -07:00
|
|
|
|
|
|
|
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
|
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
// The Confidential Computing state of the GPU should match that of the
|
|
|
|
// system.
|
|
|
|
UVM_ASSERT((parent_mode != UVM_GPU_CONF_COMPUTE_MODE_NONE) == g_uvm_global.conf_computing_enabled);
|
2023-10-17 09:25:29 -07:00
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
// All GPUs derive Confidential Computing status from their parent. By
|
|
|
|
// current policy all parent GPUs have identical Confidential Computing
|
|
|
|
// status.
|
|
|
|
for_each_parent_gpu(other_parent)
|
|
|
|
UVM_ASSERT(parent_mode == uvm_conf_computing_get_mode(other_parent));
|
2023-05-30 10:11:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dma_buffer_destroy_locked(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool,
|
|
|
|
uvm_conf_computing_dma_buffer_t *dma_buffer)
|
|
|
|
{
|
|
|
|
uvm_assert_mutex_locked(&dma_buffer_pool->lock);
|
|
|
|
|
|
|
|
list_del(&dma_buffer->node);
|
|
|
|
uvm_tracker_wait_deinit(&dma_buffer->tracker);
|
|
|
|
|
|
|
|
uvm_mem_free(dma_buffer->alloc);
|
|
|
|
uvm_mem_free(dma_buffer->auth_tag);
|
|
|
|
uvm_kvfree(dma_buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uvm_gpu_t *dma_buffer_pool_to_gpu(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool)
|
|
|
|
{
|
|
|
|
return container_of(dma_buffer_pool, uvm_gpu_t, conf_computing.dma_buffer_pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate and map a new DMA stage buffer to CPU and GPU (VA)
|
|
|
|
static NV_STATUS dma_buffer_create(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool,
|
|
|
|
uvm_conf_computing_dma_buffer_t **dma_buffer_out)
|
|
|
|
{
|
|
|
|
uvm_gpu_t *dma_owner;
|
|
|
|
uvm_conf_computing_dma_buffer_t *dma_buffer;
|
|
|
|
uvm_mem_t *alloc = NULL;
|
|
|
|
NV_STATUS status = NV_OK;
|
|
|
|
size_t auth_tags_size = (UVM_CONF_COMPUTING_DMA_BUFFER_SIZE / PAGE_SIZE) * UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
|
|
|
|
|
|
|
|
dma_buffer = uvm_kvmalloc_zero(sizeof(*dma_buffer));
|
|
|
|
if (!dma_buffer)
|
|
|
|
return NV_ERR_NO_MEMORY;
|
|
|
|
|
|
|
|
dma_owner = dma_buffer_pool_to_gpu(dma_buffer_pool);
|
|
|
|
uvm_tracker_init(&dma_buffer->tracker);
|
|
|
|
INIT_LIST_HEAD(&dma_buffer->node);
|
|
|
|
|
|
|
|
status = uvm_mem_alloc_sysmem_dma_and_map_cpu_kernel(UVM_CONF_COMPUTING_DMA_BUFFER_SIZE, dma_owner, NULL, &alloc);
|
|
|
|
if (status != NV_OK)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
dma_buffer->alloc = alloc;
|
|
|
|
|
|
|
|
status = uvm_mem_map_gpu_kernel(alloc, dma_owner);
|
|
|
|
if (status != NV_OK)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
status = uvm_mem_alloc_sysmem_dma_and_map_cpu_kernel(auth_tags_size, dma_owner, NULL, &alloc);
|
|
|
|
if (status != NV_OK)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
dma_buffer->auth_tag = alloc;
|
|
|
|
|
|
|
|
status = uvm_mem_map_gpu_kernel(alloc, dma_owner);
|
|
|
|
if (status != NV_OK)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
*dma_buffer_out = dma_buffer;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
|
|
|
err:
|
|
|
|
dma_buffer_destroy_locked(dma_buffer_pool, dma_buffer);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
void uvm_conf_computing_dma_buffer_pool_sync(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool)
|
|
|
|
{
|
|
|
|
uvm_conf_computing_dma_buffer_t *dma_buffer;
|
|
|
|
|
|
|
|
if (dma_buffer_pool->num_dma_buffers == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
uvm_mutex_lock(&dma_buffer_pool->lock);
|
|
|
|
list_for_each_entry(dma_buffer, &dma_buffer_pool->free_dma_buffers, node)
|
|
|
|
uvm_tracker_wait(&dma_buffer->tracker);
|
|
|
|
uvm_mutex_unlock(&dma_buffer_pool->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void conf_computing_dma_buffer_pool_deinit(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool)
|
|
|
|
{
|
|
|
|
uvm_conf_computing_dma_buffer_t *dma_buffer;
|
|
|
|
uvm_conf_computing_dma_buffer_t *next_buff;
|
|
|
|
|
|
|
|
if (dma_buffer_pool->num_dma_buffers == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Because the pool is teared down at the same time the GPU is unregistered
|
|
|
|
// the lock is required only to quiet assertions not for functional reasons
|
|
|
|
// see dma_buffer_destroy_locked()).
|
|
|
|
uvm_mutex_lock(&dma_buffer_pool->lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(dma_buffer, next_buff, &dma_buffer_pool->free_dma_buffers, node) {
|
|
|
|
dma_buffer_destroy_locked(dma_buffer_pool, dma_buffer);
|
|
|
|
dma_buffer_pool->num_dma_buffers--;
|
|
|
|
}
|
|
|
|
|
|
|
|
UVM_ASSERT(dma_buffer_pool->num_dma_buffers == 0);
|
|
|
|
UVM_ASSERT(list_empty(&dma_buffer_pool->free_dma_buffers));
|
|
|
|
uvm_mutex_unlock(&dma_buffer_pool->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dma_buffer_pool_add(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool,
|
|
|
|
uvm_conf_computing_dma_buffer_t *dma_buffer)
|
|
|
|
{
|
|
|
|
uvm_assert_mutex_locked(&dma_buffer_pool->lock);
|
|
|
|
list_add_tail(&dma_buffer->node, &dma_buffer_pool->free_dma_buffers);
|
|
|
|
}
|
|
|
|
|
|
|
|
static NV_STATUS conf_computing_dma_buffer_pool_init(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
size_t num_dma_buffers = 32;
|
|
|
|
NV_STATUS status = NV_OK;
|
|
|
|
|
|
|
|
UVM_ASSERT(dma_buffer_pool->num_dma_buffers == 0);
|
2024-01-24 17:51:53 +01:00
|
|
|
UVM_ASSERT(g_uvm_global.conf_computing_enabled);
|
2023-05-30 10:11:36 -07:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&dma_buffer_pool->free_dma_buffers);
|
|
|
|
uvm_mutex_init(&dma_buffer_pool->lock, UVM_LOCK_ORDER_CONF_COMPUTING_DMA_BUFFER_POOL);
|
|
|
|
dma_buffer_pool->num_dma_buffers = num_dma_buffers;
|
|
|
|
|
|
|
|
uvm_mutex_lock(&dma_buffer_pool->lock);
|
|
|
|
for (i = 0; i < num_dma_buffers; i++) {
|
|
|
|
uvm_conf_computing_dma_buffer_t *dma_buffer;
|
|
|
|
|
|
|
|
status = dma_buffer_create(dma_buffer_pool, &dma_buffer);
|
|
|
|
if (status != NV_OK)
|
|
|
|
break;
|
|
|
|
|
|
|
|
dma_buffer_pool_add(dma_buffer_pool, dma_buffer);
|
|
|
|
}
|
|
|
|
uvm_mutex_unlock(&dma_buffer_pool->lock);
|
|
|
|
|
|
|
|
if (i < num_dma_buffers)
|
|
|
|
conf_computing_dma_buffer_pool_deinit(dma_buffer_pool);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static NV_STATUS dma_buffer_pool_expand_locked(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
uvm_gpu_t *gpu;
|
|
|
|
size_t nb_to_alloc;
|
|
|
|
NV_STATUS status = NV_OK;
|
|
|
|
UVM_ASSERT(dma_buffer_pool->num_dma_buffers > 0);
|
|
|
|
|
|
|
|
gpu = dma_buffer_pool_to_gpu(dma_buffer_pool);
|
|
|
|
nb_to_alloc = dma_buffer_pool->num_dma_buffers;
|
|
|
|
for (i = 0; i < nb_to_alloc; ++i) {
|
|
|
|
uvm_conf_computing_dma_buffer_t *dma_buffer;
|
|
|
|
|
|
|
|
status = dma_buffer_create(dma_buffer_pool, &dma_buffer);
|
|
|
|
if (status != NV_OK)
|
|
|
|
break;
|
|
|
|
|
|
|
|
dma_buffer_pool_add(dma_buffer_pool, dma_buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_buffer_pool->num_dma_buffers += i;
|
|
|
|
|
|
|
|
if (i == 0)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
return NV_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NV_STATUS uvm_conf_computing_dma_buffer_alloc(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool,
|
|
|
|
uvm_conf_computing_dma_buffer_t **dma_buffer_out,
|
|
|
|
uvm_tracker_t *out_tracker)
|
|
|
|
{
|
|
|
|
uvm_conf_computing_dma_buffer_t *dma_buffer = NULL;
|
|
|
|
NV_STATUS status;
|
|
|
|
|
|
|
|
UVM_ASSERT(dma_buffer_pool->num_dma_buffers > 0);
|
|
|
|
|
|
|
|
// TODO: Bug 3385623: Heuristically expand DMA memory pool
|
|
|
|
uvm_mutex_lock(&dma_buffer_pool->lock);
|
|
|
|
if (list_empty(&dma_buffer_pool->free_dma_buffers)) {
|
|
|
|
status = dma_buffer_pool_expand_locked(dma_buffer_pool);
|
|
|
|
|
|
|
|
if (status != NV_OK) {
|
|
|
|
uvm_mutex_unlock(&dma_buffer_pool->lock);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We're guaranteed that at least one DMA stage buffer is available at this
|
|
|
|
// point.
|
|
|
|
dma_buffer = list_first_entry(&dma_buffer_pool->free_dma_buffers, uvm_conf_computing_dma_buffer_t, node);
|
|
|
|
list_del_init(&dma_buffer->node);
|
|
|
|
uvm_mutex_unlock(&dma_buffer_pool->lock);
|
|
|
|
|
|
|
|
status = uvm_tracker_wait_for_other_gpus(&dma_buffer->tracker, dma_buffer->alloc->dma_owner);
|
|
|
|
if (status != NV_OK)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (out_tracker)
|
|
|
|
status = uvm_tracker_add_tracker_safe(out_tracker, &dma_buffer->tracker);
|
|
|
|
else
|
|
|
|
status = uvm_tracker_wait(&dma_buffer->tracker);
|
|
|
|
|
|
|
|
if (status != NV_OK)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
uvm_page_mask_zero(&dma_buffer->encrypted_page_mask);
|
|
|
|
*dma_buffer_out = dma_buffer;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
|
|
|
error:
|
|
|
|
uvm_tracker_deinit(&dma_buffer->tracker);
|
|
|
|
uvm_conf_computing_dma_buffer_free(dma_buffer_pool, dma_buffer, NULL);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
void uvm_conf_computing_dma_buffer_free(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool,
|
|
|
|
uvm_conf_computing_dma_buffer_t *dma_buffer,
|
|
|
|
uvm_tracker_t *tracker)
|
|
|
|
{
|
|
|
|
|
|
|
|
NV_STATUS status;
|
|
|
|
|
|
|
|
if (!dma_buffer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
UVM_ASSERT(dma_buffer_pool->num_dma_buffers > 0);
|
|
|
|
|
|
|
|
uvm_tracker_remove_completed(&dma_buffer->tracker);
|
|
|
|
if (tracker) {
|
|
|
|
uvm_tracker_remove_completed(tracker);
|
|
|
|
status = uvm_tracker_add_tracker_safe(&dma_buffer->tracker, tracker);
|
|
|
|
if (status != NV_OK)
|
|
|
|
UVM_ASSERT(status == uvm_global_get_status());
|
|
|
|
}
|
|
|
|
|
|
|
|
uvm_mutex_lock(&dma_buffer_pool->lock);
|
|
|
|
dma_buffer_pool_add(dma_buffer_pool, dma_buffer);
|
|
|
|
uvm_mutex_unlock(&dma_buffer_pool->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dummy_iv_mem_deinit(uvm_gpu_t *gpu)
|
|
|
|
{
|
|
|
|
uvm_mem_free(gpu->conf_computing.iv_mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static NV_STATUS dummy_iv_mem_init(uvm_gpu_t *gpu)
|
|
|
|
{
|
|
|
|
NV_STATUS status;
|
|
|
|
|
|
|
|
if (!uvm_conf_computing_mode_is_hcc(gpu))
|
|
|
|
return NV_OK;
|
|
|
|
|
|
|
|
status = uvm_mem_alloc_sysmem_dma(sizeof(UvmCslIv), gpu, NULL, &gpu->conf_computing.iv_mem);
|
|
|
|
if (status != NV_OK)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
status = uvm_mem_map_gpu_kernel(gpu->conf_computing.iv_mem, gpu);
|
|
|
|
if (status != NV_OK)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
return NV_OK;
|
|
|
|
|
|
|
|
error:
|
|
|
|
dummy_iv_mem_deinit(gpu);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
NV_STATUS uvm_conf_computing_gpu_init(uvm_gpu_t *gpu)
|
|
|
|
{
|
|
|
|
NV_STATUS status;
|
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
if (!g_uvm_global.conf_computing_enabled)
|
2023-05-30 10:11:36 -07:00
|
|
|
return NV_OK;
|
|
|
|
|
|
|
|
status = conf_computing_dma_buffer_pool_init(&gpu->conf_computing.dma_buffer_pool);
|
|
|
|
if (status != NV_OK)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
status = dummy_iv_mem_init(gpu);
|
|
|
|
if (status != NV_OK)
|
|
|
|
goto error;
|
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
if (uvm_enable_builtin_tests && uvm_conf_computing_channel_iv_rotation_limit == UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_DEFAULT)
|
|
|
|
uvm_conf_computing_channel_iv_rotation_limit = UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_TESTS;
|
|
|
|
|
|
|
|
if (uvm_conf_computing_channel_iv_rotation_limit < UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_MIN ||
|
|
|
|
uvm_conf_computing_channel_iv_rotation_limit > UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_MAX) {
|
|
|
|
UVM_ERR_PRINT("Value of uvm_conf_computing_channel_iv_rotation_limit: %lu is outside of the safe "
|
|
|
|
"range: <%lu, %lu>. Using the default value instead (%lu)\n",
|
|
|
|
uvm_conf_computing_channel_iv_rotation_limit,
|
|
|
|
UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_MIN,
|
|
|
|
UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_MAX,
|
|
|
|
UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_DEFAULT);
|
|
|
|
uvm_conf_computing_channel_iv_rotation_limit = UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_DEFAULT;
|
|
|
|
}
|
|
|
|
|
2023-05-30 10:11:36 -07:00
|
|
|
return NV_OK;
|
|
|
|
|
|
|
|
error:
|
|
|
|
uvm_conf_computing_gpu_deinit(gpu);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
void uvm_conf_computing_gpu_deinit(uvm_gpu_t *gpu)
|
|
|
|
{
|
|
|
|
dummy_iv_mem_deinit(gpu);
|
|
|
|
conf_computing_dma_buffer_pool_deinit(&gpu->conf_computing.dma_buffer_pool);
|
|
|
|
}
|
|
|
|
|
2024-05-21 15:11:46 +02:00
|
|
|
void uvm_conf_computing_log_gpu_encryption(uvm_channel_t *channel, UvmCslIv *iv)
|
2023-05-30 10:11:36 -07:00
|
|
|
{
|
|
|
|
NV_STATUS status;
|
|
|
|
|
|
|
|
uvm_mutex_lock(&channel->csl.ctx_lock);
|
2023-06-14 12:37:59 -07:00
|
|
|
status = nvUvmInterfaceCslIncrementIv(&channel->csl.ctx, UVM_CSL_OPERATION_DECRYPT, 1, iv);
|
2024-05-21 15:11:46 +02:00
|
|
|
uvm_mutex_unlock(&channel->csl.ctx_lock);
|
2023-05-30 10:11:36 -07:00
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
// IV rotation is done preemptively as needed, so the above
|
|
|
|
// call cannot return failure.
|
2023-05-30 10:11:36 -07:00
|
|
|
UVM_ASSERT(status == NV_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
void uvm_conf_computing_acquire_encryption_iv(uvm_channel_t *channel, UvmCslIv *iv)
|
|
|
|
{
|
|
|
|
NV_STATUS status;
|
|
|
|
|
|
|
|
uvm_mutex_lock(&channel->csl.ctx_lock);
|
2023-06-14 12:37:59 -07:00
|
|
|
status = nvUvmInterfaceCslIncrementIv(&channel->csl.ctx, UVM_CSL_OPERATION_ENCRYPT, 1, iv);
|
2023-05-30 10:11:36 -07:00
|
|
|
uvm_mutex_unlock(&channel->csl.ctx_lock);
|
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
// IV rotation is done preemptively as needed, so the above
|
|
|
|
// call cannot return failure.
|
2023-05-30 10:11:36 -07:00
|
|
|
UVM_ASSERT(status == NV_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
void uvm_conf_computing_cpu_encrypt(uvm_channel_t *channel,
|
|
|
|
void *dst_cipher,
|
|
|
|
const void *src_plain,
|
|
|
|
UvmCslIv *encrypt_iv,
|
|
|
|
size_t size,
|
|
|
|
void *auth_tag_buffer)
|
|
|
|
{
|
|
|
|
NV_STATUS status;
|
|
|
|
|
|
|
|
UVM_ASSERT(size);
|
|
|
|
|
|
|
|
uvm_mutex_lock(&channel->csl.ctx_lock);
|
|
|
|
status = nvUvmInterfaceCslEncrypt(&channel->csl.ctx,
|
|
|
|
size,
|
|
|
|
(NvU8 const *) src_plain,
|
|
|
|
encrypt_iv,
|
|
|
|
(NvU8 *) dst_cipher,
|
|
|
|
(NvU8 *) auth_tag_buffer);
|
2024-05-21 15:11:46 +02:00
|
|
|
uvm_mutex_unlock(&channel->csl.ctx_lock);
|
2023-05-30 10:11:36 -07:00
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
// IV rotation is done preemptively as needed, so the above
|
|
|
|
// call cannot return failure.
|
2023-05-30 10:11:36 -07:00
|
|
|
UVM_ASSERT(status == NV_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel,
|
|
|
|
void *dst_plain,
|
|
|
|
const void *src_cipher,
|
|
|
|
const UvmCslIv *src_iv,
|
|
|
|
size_t size,
|
|
|
|
const void *auth_tag_buffer)
|
|
|
|
{
|
|
|
|
NV_STATUS status;
|
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
// The CSL context associated with a channel can be used by multiple
|
|
|
|
// threads. The IV sequence is thus guaranteed only while the channel is
|
|
|
|
// "locked for push". The channel/push lock is released in
|
|
|
|
// "uvm_channel_end_push", and at that time the GPU encryption operations
|
|
|
|
// have not executed, yet. Therefore the caller has to use
|
|
|
|
// "uvm_conf_computing_log_gpu_encryption" to explicitly store IVs needed
|
|
|
|
// to perform CPU decryption and pass those IVs to this function after the
|
|
|
|
// push that did the encryption completes.
|
|
|
|
UVM_ASSERT(src_iv);
|
|
|
|
|
2023-05-30 10:11:36 -07:00
|
|
|
uvm_mutex_lock(&channel->csl.ctx_lock);
|
|
|
|
status = nvUvmInterfaceCslDecrypt(&channel->csl.ctx,
|
|
|
|
size,
|
|
|
|
(const NvU8 *) src_cipher,
|
|
|
|
src_iv,
|
2024-07-19 15:45:15 -07:00
|
|
|
NV_U32_MAX,
|
2023-05-30 10:11:36 -07:00
|
|
|
(NvU8 *) dst_plain,
|
2023-06-14 12:37:59 -07:00
|
|
|
NULL,
|
|
|
|
0,
|
2023-05-30 10:11:36 -07:00
|
|
|
(const NvU8 *) auth_tag_buffer);
|
|
|
|
uvm_mutex_unlock(&channel->csl.ctx_lock);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
2023-07-18 15:54:53 +02:00
|
|
|
|
|
|
|
NV_STATUS uvm_conf_computing_fault_decrypt(uvm_parent_gpu_t *parent_gpu,
|
|
|
|
void *dst_plain,
|
|
|
|
const void *src_cipher,
|
|
|
|
const void *auth_tag_buffer,
|
|
|
|
NvU8 valid)
|
|
|
|
{
|
|
|
|
NV_STATUS status;
|
2024-07-19 15:45:15 -07:00
|
|
|
NvU32 fault_entry_size = parent_gpu->fault_buffer_hal->entry_size(parent_gpu);
|
|
|
|
UvmCslContext *csl_context = &parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx;
|
2023-07-18 15:54:53 +02:00
|
|
|
|
|
|
|
// There is no dedicated lock for the CSL context associated with replayable
|
|
|
|
// faults. The mutual exclusion required by the RM CSL API is enforced by
|
|
|
|
// relying on the GPU replayable service lock (ISR lock), since fault
|
|
|
|
// decryption is invoked as part of fault servicing.
|
|
|
|
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock));
|
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
UVM_ASSERT(g_uvm_global.conf_computing_enabled);
|
2023-07-18 15:54:53 +02:00
|
|
|
|
2024-07-19 15:45:15 -07:00
|
|
|
status = nvUvmInterfaceCslLogEncryption(csl_context, UVM_CSL_OPERATION_DECRYPT, fault_entry_size);
|
|
|
|
|
|
|
|
// Informing RM of an encryption/decryption should not fail
|
|
|
|
UVM_ASSERT(status == NV_OK);
|
|
|
|
|
|
|
|
status = nvUvmInterfaceCslDecrypt(csl_context,
|
|
|
|
fault_entry_size,
|
2023-07-18 15:54:53 +02:00
|
|
|
(const NvU8 *) src_cipher,
|
|
|
|
NULL,
|
2024-07-19 15:45:15 -07:00
|
|
|
NV_U32_MAX,
|
2023-07-18 15:54:53 +02:00
|
|
|
(NvU8 *) dst_plain,
|
|
|
|
&valid,
|
|
|
|
sizeof(valid),
|
|
|
|
(const NvU8 *) auth_tag_buffer);
|
|
|
|
|
2024-07-19 15:45:15 -07:00
|
|
|
if (status != NV_OK) {
|
2024-01-24 17:51:53 +01:00
|
|
|
UVM_ERR_PRINT("nvUvmInterfaceCslDecrypt() failed: %s, GPU %s\n",
|
|
|
|
nvstatusToString(status),
|
|
|
|
uvm_parent_gpu_name(parent_gpu));
|
2023-07-18 15:54:53 +02:00
|
|
|
|
2024-07-19 15:45:15 -07:00
|
|
|
}
|
|
|
|
|
2023-07-18 15:54:53 +02:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2024-07-19 15:45:15 -07:00
|
|
|
void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu)
|
2023-07-18 15:54:53 +02:00
|
|
|
{
|
|
|
|
NV_STATUS status;
|
2024-07-19 15:45:15 -07:00
|
|
|
NvU32 fault_entry_size = parent_gpu->fault_buffer_hal->entry_size(parent_gpu);
|
|
|
|
UvmCslContext *csl_context = &parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx;
|
2023-07-18 15:54:53 +02:00
|
|
|
|
|
|
|
// See comment in uvm_conf_computing_fault_decrypt
|
|
|
|
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock));
|
|
|
|
|
2024-01-24 17:51:53 +01:00
|
|
|
UVM_ASSERT(g_uvm_global.conf_computing_enabled);
|
2023-07-18 15:54:53 +02:00
|
|
|
|
2024-07-19 15:45:15 -07:00
|
|
|
status = nvUvmInterfaceCslLogEncryption(csl_context, UVM_CSL_OPERATION_DECRYPT, fault_entry_size);
|
|
|
|
|
|
|
|
// Informing RM of an encryption/decryption should not fail
|
|
|
|
UVM_ASSERT(status == NV_OK);
|
|
|
|
|
|
|
|
status = nvUvmInterfaceCslIncrementIv(csl_context, UVM_CSL_OPERATION_DECRYPT, 1, NULL);
|
2023-07-18 15:54:53 +02:00
|
|
|
|
|
|
|
UVM_ASSERT(status == NV_OK);
|
|
|
|
}
|
2024-01-24 17:51:53 +01:00
|
|
|
|
|
|
|
void uvm_conf_computing_query_message_pools(uvm_channel_t *channel,
|
|
|
|
NvU64 *remaining_encryptions,
|
|
|
|
NvU64 *remaining_decryptions)
|
|
|
|
{
|
|
|
|
NV_STATUS status;
|
|
|
|
|
|
|
|
UVM_ASSERT(channel);
|
|
|
|
UVM_ASSERT(remaining_encryptions);
|
|
|
|
UVM_ASSERT(remaining_decryptions);
|
|
|
|
|
|
|
|
uvm_mutex_lock(&channel->csl.ctx_lock);
|
|
|
|
status = nvUvmInterfaceCslQueryMessagePool(&channel->csl.ctx, UVM_CSL_OPERATION_ENCRYPT, remaining_encryptions);
|
|
|
|
UVM_ASSERT(status == NV_OK);
|
|
|
|
UVM_ASSERT(*remaining_encryptions <= NV_U32_MAX);
|
|
|
|
|
|
|
|
status = nvUvmInterfaceCslQueryMessagePool(&channel->csl.ctx, UVM_CSL_OPERATION_DECRYPT, remaining_decryptions);
|
|
|
|
UVM_ASSERT(status == NV_OK);
|
|
|
|
UVM_ASSERT(*remaining_decryptions <= NV_U32_MAX);
|
|
|
|
|
|
|
|
// LCIC channels never use CPU encrypt/GPU decrypt
|
|
|
|
if (uvm_channel_is_lcic(channel))
|
|
|
|
UVM_ASSERT(*remaining_encryptions == NV_U32_MAX);
|
|
|
|
|
|
|
|
uvm_mutex_unlock(&channel->csl.ctx_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static NV_STATUS uvm_conf_computing_rotate_channel_ivs_below_limit_internal(uvm_channel_t *channel, NvU64 limit)
|
|
|
|
{
|
|
|
|
NV_STATUS status = NV_OK;
|
|
|
|
NvU64 remaining_encryptions, remaining_decryptions;
|
|
|
|
bool rotate_encryption_iv, rotate_decryption_iv;
|
|
|
|
|
|
|
|
UVM_ASSERT(uvm_channel_is_locked_for_push(channel) ||
|
|
|
|
(uvm_channel_is_lcic(channel) && uvm_channel_manager_is_wlc_ready(channel->pool->manager)));
|
|
|
|
|
|
|
|
uvm_conf_computing_query_message_pools(channel, &remaining_encryptions, &remaining_decryptions);
|
|
|
|
|
|
|
|
// Ignore decryption limit for SEC2, only CE channels support
|
|
|
|
// GPU encrypt/CPU decrypt. However, RM reports _some_ decrementing
|
|
|
|
// value for SEC2 decryption counter.
|
|
|
|
rotate_decryption_iv = (remaining_decryptions <= limit) && uvm_channel_is_ce(channel);
|
|
|
|
rotate_encryption_iv = remaining_encryptions <= limit;
|
|
|
|
|
|
|
|
if (!rotate_encryption_iv && !rotate_decryption_iv)
|
|
|
|
return NV_OK;
|
|
|
|
|
|
|
|
// Wait for all in-flight pushes. The caller needs to guarantee that there
|
|
|
|
// are no concurrent pushes created, e.g. by only calling rotate after
|
|
|
|
// a channel is locked_for_push.
|
|
|
|
status = uvm_channel_wait(channel);
|
|
|
|
if (status != NV_OK)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
uvm_mutex_lock(&channel->csl.ctx_lock);
|
|
|
|
|
|
|
|
if (rotate_encryption_iv)
|
|
|
|
status = nvUvmInterfaceCslRotateIv(&channel->csl.ctx, UVM_CSL_OPERATION_ENCRYPT);
|
|
|
|
|
|
|
|
if (status == NV_OK && rotate_decryption_iv)
|
|
|
|
status = nvUvmInterfaceCslRotateIv(&channel->csl.ctx, UVM_CSL_OPERATION_DECRYPT);
|
|
|
|
|
|
|
|
uvm_mutex_unlock(&channel->csl.ctx_lock);
|
|
|
|
|
|
|
|
// Change the error to out of resources if the available IVs are running
|
|
|
|
// too low
|
|
|
|
if (status == NV_ERR_STATE_IN_USE &&
|
|
|
|
(remaining_encryptions < UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_MIN ||
|
|
|
|
remaining_decryptions < UVM_CONF_COMPUTING_IV_REMAINING_LIMIT_MIN))
|
|
|
|
return NV_ERR_INSUFFICIENT_RESOURCES;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
NV_STATUS uvm_conf_computing_rotate_channel_ivs_below_limit(uvm_channel_t *channel, NvU64 limit, bool retry_if_busy)
|
|
|
|
{
|
|
|
|
NV_STATUS status;
|
|
|
|
|
|
|
|
do {
|
|
|
|
status = uvm_conf_computing_rotate_channel_ivs_below_limit_internal(channel, limit);
|
|
|
|
} while (retry_if_busy && status == NV_ERR_STATE_IN_USE);
|
|
|
|
|
|
|
|
// Hide "busy" error. The rotation will be retried at the next opportunity.
|
|
|
|
if (!retry_if_busy && status == NV_ERR_STATE_IN_USE)
|
|
|
|
status = NV_OK;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
NV_STATUS uvm_conf_computing_maybe_rotate_channel_ivs(uvm_channel_t *channel)
|
|
|
|
{
|
|
|
|
return uvm_conf_computing_rotate_channel_ivs_below_limit(channel, uvm_conf_computing_channel_iv_rotation_limit, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
NV_STATUS uvm_conf_computing_maybe_rotate_channel_ivs_retry_busy(uvm_channel_t *channel)
|
|
|
|
{
|
|
|
|
return uvm_conf_computing_rotate_channel_ivs_below_limit(channel, uvm_conf_computing_channel_iv_rotation_limit, true);
|
|
|
|
}
|