dma: Update memory domain context structure
Instead of a union with domain type specific parameters, store an opaque pointer to user context. Depending on the memory domain type, this context can be cast to a specific struct, e.g. to spdk_memory_domain_rdma_ctx for RDMA memory domains. This change provides more flexibility to applications to create and manage custom memory domains Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com> Change-Id: Ib0a8297de80773d86edc9849beb4cbc693ef5414 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/9778 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
6c64d64e48
commit
549bcdc0a4
@ -162,15 +162,21 @@ typedef int (*spdk_memory_domain_translate_memory_cb)(struct spdk_memory_domain
|
|||||||
struct spdk_memory_domain_translation_ctx *dst_domain_ctx, void *addr, size_t len,
|
struct spdk_memory_domain_translation_ctx *dst_domain_ctx, void *addr, size_t len,
|
||||||
struct spdk_memory_domain_translation_result *result);
|
struct spdk_memory_domain_translation_result *result);
|
||||||
|
|
||||||
|
/** Context of memory domain of RDMA type */
|
||||||
|
struct spdk_memory_domain_rdma_ctx {
|
||||||
|
/** size of this structure in bytes */
|
||||||
|
size_t size;
|
||||||
|
/** Opaque handle for ibv_pd */
|
||||||
|
void *ibv_pd;
|
||||||
|
};
|
||||||
|
|
||||||
struct spdk_memory_domain_ctx {
|
struct spdk_memory_domain_ctx {
|
||||||
/** size of this structure in bytes */
|
/** size of this structure in bytes */
|
||||||
size_t size;
|
size_t size;
|
||||||
union {
|
/** Optional user context
|
||||||
struct {
|
* Depending on memory domain type, this pointer can be cast to a specific structure,
|
||||||
/* Opaque handle for ibv_pd */
|
* e.g. to spdk_memory_domain_rdma_ctx structure for RDMA memory domain */
|
||||||
void *ibv_pd;
|
void *user_ctx;
|
||||||
} rdma;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -183,7 +189,8 @@ struct spdk_memory_domain_ctx {
|
|||||||
*
|
*
|
||||||
* \param domain Double pointer to memory domain to be allocated by this function
|
* \param domain Double pointer to memory domain to be allocated by this function
|
||||||
* \param type Type of the DMA device which can access this memory domain
|
* \param type Type of the DMA device which can access this memory domain
|
||||||
* \param ctx Optional memory domain context
|
* \param ctx Optional memory domain context to be copied by this function. Later \b ctx can be
|
||||||
|
* retrieved using \ref spdk_memory_domain_get_context function
|
||||||
* \param id String identifier representing the DMA device that can access this memory domain.
|
* \param id String identifier representing the DMA device that can access this memory domain.
|
||||||
* \return 0 on success, negated errno on failure
|
* \return 0 on success, negated errno on failure
|
||||||
*/
|
*/
|
||||||
|
@ -112,6 +112,7 @@ struct nvme_rdma_memory_domain {
|
|||||||
uint32_t ref;
|
uint32_t ref;
|
||||||
struct ibv_pd *pd;
|
struct ibv_pd *pd;
|
||||||
struct spdk_memory_domain *domain;
|
struct spdk_memory_domain *domain;
|
||||||
|
struct spdk_memory_domain_rdma_ctx rdma_ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum nvme_rdma_wr_type {
|
enum nvme_rdma_wr_type {
|
||||||
@ -318,7 +319,7 @@ static struct nvme_rdma_memory_domain *
|
|||||||
nvme_rdma_get_memory_domain(struct ibv_pd *pd)
|
nvme_rdma_get_memory_domain(struct ibv_pd *pd)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_memory_domain *domain = NULL;
|
struct nvme_rdma_memory_domain *domain = NULL;
|
||||||
struct spdk_memory_domain_ctx dev_ctx;
|
struct spdk_memory_domain_ctx ctx;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
pthread_mutex_lock(&g_memory_domains_lock);
|
pthread_mutex_lock(&g_memory_domains_lock);
|
||||||
@ -338,10 +339,12 @@ nvme_rdma_get_memory_domain(struct ibv_pd *pd)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_ctx.size = sizeof(dev_ctx);
|
domain->rdma_ctx.size = sizeof(domain->rdma_ctx);
|
||||||
dev_ctx.rdma.ibv_pd = pd;
|
domain->rdma_ctx.ibv_pd = pd;
|
||||||
|
ctx.size = sizeof(ctx);
|
||||||
|
ctx.user_ctx = &domain->rdma_ctx;
|
||||||
|
|
||||||
rc = spdk_memory_domain_create(&domain->domain, SPDK_DMA_DEVICE_TYPE_RDMA, &dev_ctx,
|
rc = spdk_memory_domain_create(&domain->domain, SPDK_DMA_DEVICE_TYPE_RDMA, &ctx,
|
||||||
SPDK_RDMA_DMA_DEVICE);
|
SPDK_RDMA_DMA_DEVICE);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
SPDK_ERRLOG("Failed to create memory domain\n");
|
SPDK_ERRLOG("Failed to create memory domain\n");
|
||||||
|
@ -81,9 +81,8 @@ test_dma(void)
|
|||||||
void *test_ibv_pd = (void *)0xdeadbeaf;
|
void *test_ibv_pd = (void *)0xdeadbeaf;
|
||||||
struct iovec src_iov = {}, dst_iov = {};
|
struct iovec src_iov = {}, dst_iov = {};
|
||||||
struct spdk_memory_domain *domain = NULL, *domain_2 = NULL, *domain_3 = NULL;
|
struct spdk_memory_domain *domain = NULL, *domain_2 = NULL, *domain_3 = NULL;
|
||||||
struct spdk_memory_domain_ctx memory_domain_ctx = {
|
struct spdk_memory_domain_rdma_ctx rdma_ctx = { .ibv_pd = test_ibv_pd };
|
||||||
.rdma = { .ibv_pd = test_ibv_pd }
|
struct spdk_memory_domain_ctx memory_domain_ctx = { .user_ctx = &rdma_ctx };
|
||||||
};
|
|
||||||
struct spdk_memory_domain_ctx *stored_memory_domain_ctx;
|
struct spdk_memory_domain_ctx *stored_memory_domain_ctx;
|
||||||
struct spdk_memory_domain_translation_result translation_result;
|
struct spdk_memory_domain_translation_result translation_result;
|
||||||
const char *id;
|
const char *id;
|
||||||
@ -107,7 +106,9 @@ test_dma(void)
|
|||||||
/* Get context. Expect pass */
|
/* Get context. Expect pass */
|
||||||
stored_memory_domain_ctx = spdk_memory_domain_get_context(domain);
|
stored_memory_domain_ctx = spdk_memory_domain_get_context(domain);
|
||||||
SPDK_CU_ASSERT_FATAL(stored_memory_domain_ctx != NULL);
|
SPDK_CU_ASSERT_FATAL(stored_memory_domain_ctx != NULL);
|
||||||
CU_ASSERT(stored_memory_domain_ctx->rdma.ibv_pd == test_ibv_pd);
|
CU_ASSERT(stored_memory_domain_ctx->user_ctx == &rdma_ctx);
|
||||||
|
CU_ASSERT(((struct spdk_memory_domain_rdma_ctx *)stored_memory_domain_ctx->user_ctx)->ibv_pd ==
|
||||||
|
rdma_ctx.ibv_pd);
|
||||||
|
|
||||||
/* Get DMA device type. Expect pass */
|
/* Get DMA device type. Expect pass */
|
||||||
CU_ASSERT(spdk_memory_domain_get_dma_device_type(domain) == SPDK_DMA_DEVICE_TYPE_RDMA);
|
CU_ASSERT(spdk_memory_domain_get_dma_device_type(domain) == SPDK_DMA_DEVICE_TYPE_RDMA);
|
||||||
|
Loading…
Reference in New Issue
Block a user