nvme/rdma: Make RDMA memory map per-pd again

The RDMA memory map needs to be per-protection
domain, not per NVMe controller. Otherwise, when
an NVMe controller is removed, the memory map may
reference an invalid pointer to a detached
controller.

Change-Id: I0c5bd2172daee0c70efb40eab784839e0cde8bc4
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/432590
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2018-11-09 10:31:15 -07:00 committed by Jim Harris
parent a817ccf571
commit bf1a82cf5a
2 changed files with 16 additions and 33 deletions

View File

@ -2048,36 +2048,27 @@ struct ibv_mr;
* RDMA Transport Hooks * RDMA Transport Hooks
*/ */
struct spdk_nvme_rdma_hooks { struct spdk_nvme_rdma_hooks {
/**
* \brief Get a transport id specific context to be passed to
* the other hooks.
*
* \param trid the transport id
*
* \return ctx to be passed to the other hooks
*/
void *(*get_ctx)(const struct spdk_nvme_transport_id *trid);
/** /**
* \brief Get an InfiniBand Verbs protection domain. * \brief Get an InfiniBand Verbs protection domain.
* *
* \param ctx Context returned from get_hook_ctx. * \param trid the transport id
* \param verbs Infiniband verbs context * \param verbs Infiniband verbs context
* *
* \return pd of the nvme ctrlr * \return pd of the nvme ctrlr
*/ */
struct ibv_pd *(*get_ibv_pd)(void *ctx, struct ibv_context *verbs); struct ibv_pd *(*get_ibv_pd)(const struct spdk_nvme_transport_id *trid,
struct ibv_context *verbs);
/** /**
* \brief Get an InfiniBand Verbs memory region for a buffer. * \brief Get an InfiniBand Verbs memory region for a buffer.
* *
* \param ctx Context returned from get_hook_ctx. * \param pd The protection domain returned from get_ibv_pd
* \param buf Memory buffer for which an rkey should be returned. * \param buf Memory buffer for which an rkey should be returned.
* \param size size of buf * \param size size of buf
* *
* \return Infiniband remote key (rkey) for this buf * \return Infiniband remote key (rkey) for this buf
*/ */
uint64_t (*get_rkey)(void *ctx, void *buf, size_t size); uint64_t (*get_rkey)(struct ibv_pd *pd, void *buf, size_t size);
}; };
/** /**
@ -2088,6 +2079,8 @@ struct spdk_nvme_rdma_hooks {
* library to create protection domains and register memory. This * library to create protection domains and register memory. This
* is a mechanism to subvert that and use an existing registration. * is a mechanism to subvert that and use an existing registration.
* *
* This function may only be called one time per process.
*
* \param hooks for initializing global hooks * \param hooks for initializing global hooks
*/ */
void spdk_nvme_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks); void spdk_nvme_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks);

View File

@ -85,8 +85,6 @@ struct spdk_nvme_rdma_mr_map {
struct nvme_rdma_ctrlr { struct nvme_rdma_ctrlr {
struct spdk_nvme_ctrlr ctrlr; struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_rdma_hooks hooks;
void *hook_ctx;
struct ibv_pd *pd; struct ibv_pd *pd;
}; };
@ -256,8 +254,8 @@ nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
} }
rctrlr = nvme_rdma_ctrlr(rqpair->qpair.ctrlr); rctrlr = nvme_rdma_ctrlr(rqpair->qpair.ctrlr);
if (rctrlr->hooks.get_ibv_pd) { if (g_nvme_hooks.get_ibv_pd) {
rctrlr->pd = rctrlr->hooks.get_ibv_pd(rctrlr->hook_ctx, rqpair->cm_id->verbs); rctrlr->pd = g_nvme_hooks.get_ibv_pd(&rctrlr->ctrlr.trid, rqpair->cm_id->verbs);
} else { } else {
rctrlr->pd = NULL; rctrlr->pd = NULL;
} }
@ -626,15 +624,13 @@ nvme_rdma_mr_map_notify(void *cb_ctx, struct spdk_mem_map *map,
enum spdk_mem_map_notify_action action, enum spdk_mem_map_notify_action action,
void *vaddr, size_t size) void *vaddr, size_t size)
{ {
struct nvme_rdma_ctrlr *rctrlr = cb_ctx; struct ibv_pd *pd = cb_ctx;
struct ibv_pd *pd;
struct ibv_mr *mr; struct ibv_mr *mr;
int rc; int rc;
switch (action) { switch (action) {
case SPDK_MEM_MAP_NOTIFY_REGISTER: case SPDK_MEM_MAP_NOTIFY_REGISTER:
if (!rctrlr->hooks.get_rkey) { if (!g_nvme_hooks.get_rkey) {
pd = rctrlr->pd;
mr = ibv_reg_mr(pd, vaddr, size, mr = ibv_reg_mr(pd, vaddr, size,
IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_READ |
@ -647,11 +643,11 @@ nvme_rdma_mr_map_notify(void *cb_ctx, struct spdk_mem_map *map,
} }
} else { } else {
rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, size, rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, size,
rctrlr->hooks.get_rkey(rctrlr->hook_ctx, vaddr, size)); g_nvme_hooks.get_rkey(pd, vaddr, size));
} }
break; break;
case SPDK_MEM_MAP_NOTIFY_UNREGISTER: case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
if (!rctrlr->hooks.get_rkey) { if (!g_nvme_hooks.get_rkey) {
mr = (struct ibv_mr *)spdk_mem_map_translate(map, (uint64_t)vaddr, NULL); mr = (struct ibv_mr *)spdk_mem_map_translate(map, (uint64_t)vaddr, NULL);
if (mr) { if (mr) {
ibv_dereg_mr(mr); ibv_dereg_mr(mr);
@ -697,8 +693,7 @@ nvme_rdma_register_mem(struct nvme_rdma_qpair *rqpair)
mr_map->ref = 1; mr_map->ref = 1;
mr_map->pd = pd; mr_map->pd = pd;
mr_map->map = spdk_mem_map_alloc((uint64_t)NULL, &nvme_rdma_map_ops, mr_map->map = spdk_mem_map_alloc((uint64_t)NULL, &nvme_rdma_map_ops, pd);
nvme_rdma_ctrlr(rqpair->qpair.ctrlr));
if (mr_map->map == NULL) { if (mr_map->map == NULL) {
SPDK_ERRLOG("spdk_mem_map_alloc() failed\n"); SPDK_ERRLOG("spdk_mem_map_alloc() failed\n");
free(mr_map); free(mr_map);
@ -943,7 +938,7 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG); assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
requested_size = req->payload_size; requested_size = req->payload_size;
if (!nvme_rdma_ctrlr(rqpair->qpair.ctrlr)->hooks.get_rkey) { if (!g_nvme_hooks.get_rkey) {
mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, (uint64_t)payload, mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, (uint64_t)payload,
&requested_size); &requested_size);
@ -1013,7 +1008,7 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
sge_length = spdk_min(remaining_size, sge_length); sge_length = spdk_min(remaining_size, sge_length);
mr_length = sge_length; mr_length = sge_length;
if (!nvme_rdma_ctrlr(rqpair->qpair.ctrlr)->hooks.get_rkey) { if (!g_nvme_hooks.get_rkey) {
mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map,
(uint64_t)virt_addr, (uint64_t)virt_addr,
&mr_length); &mr_length);
@ -1409,11 +1404,6 @@ struct spdk_nvme_ctrlr *nvme_rdma_ctrlr_construct(const struct spdk_nvme_transpo
nvme_ctrlr_init_cap(&rctrlr->ctrlr, &cap, &vs); nvme_ctrlr_init_cap(&rctrlr->ctrlr, &cap, &vs);
if (g_nvme_hooks.get_ctx) {
rctrlr->hooks = g_nvme_hooks;
rctrlr->hook_ctx = rctrlr->hooks.get_ctx(&rctrlr->ctrlr.trid);
}
SPDK_DEBUGLOG(SPDK_LOG_NVME, "successfully initialized the nvmf ctrlr\n"); SPDK_DEBUGLOG(SPDK_LOG_NVME, "successfully initialized the nvmf ctrlr\n");
return &rctrlr->ctrlr; return &rctrlr->ctrlr;
} }