lib/nvme: remove extra transport functions.

Now that we have a more flexible function table strategy for
transports, we can get rid of some of the wrapping we were doing
to match the macro definitions exactly.

Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: I12c868babfa7bd27dc8ed5e86d35e179f8ec984f
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478874
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Seth Howell 2019-12-26 17:12:33 -07:00 committed by Tomasz Zawadzki
parent f6cf92a31f
commit b2225ff593

View File

@ -210,7 +210,8 @@ static const char *rdma_cm_event_str[] = {
static LIST_HEAD(, spdk_nvme_rdma_mr_map) g_rdma_mr_maps = LIST_HEAD_INITIALIZER(&g_rdma_mr_maps); static LIST_HEAD(, spdk_nvme_rdma_mr_map) g_rdma_mr_maps = LIST_HEAD_INITIALIZER(&g_rdma_mr_maps);
static pthread_mutex_t g_rdma_mr_maps_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t g_rdma_mr_maps_mutex = PTHREAD_MUTEX_INITIALIZER;
static int nvme_rdma_qpair_destroy(struct spdk_nvme_qpair *qpair); int nvme_rdma_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair);
static inline struct nvme_rdma_qpair * static inline struct nvme_rdma_qpair *
nvme_rdma_qpair(struct spdk_nvme_qpair *qpair) nvme_rdma_qpair(struct spdk_nvme_qpair *qpair)
@ -1072,18 +1073,18 @@ nvme_rdma_unregister_mem(struct nvme_rdma_qpair *rqpair)
pthread_mutex_unlock(&g_rdma_mr_maps_mutex); pthread_mutex_unlock(&g_rdma_mr_maps_mutex);
} }
static int int
nvme_rdma_qpair_connect(struct nvme_rdma_qpair *rqpair) nvme_rdma_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{ {
struct sockaddr_storage dst_addr; struct sockaddr_storage dst_addr;
struct sockaddr_storage src_addr; struct sockaddr_storage src_addr;
bool src_addr_specified; bool src_addr_specified;
int rc; int rc;
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_rdma_ctrlr *rctrlr; struct nvme_rdma_ctrlr *rctrlr;
struct nvme_rdma_qpair *rqpair;
int family; int family;
ctrlr = rqpair->qpair.ctrlr; rqpair = nvme_rdma_qpair(qpair);
rctrlr = nvme_rdma_ctrlr(ctrlr); rctrlr = nvme_rdma_ctrlr(ctrlr);
assert(rctrlr != NULL); assert(rctrlr != NULL);
@ -1614,15 +1615,15 @@ nvme_rdma_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
} }
if (rc < 0) { if (rc < 0) {
nvme_rdma_qpair_destroy(qpair); nvme_rdma_ctrlr_delete_io_qpair(ctrlr, qpair);
return NULL; return NULL;
} }
return qpair; return qpair;
} }
static void void
nvme_rdma_qpair_disconnect(struct spdk_nvme_qpair *qpair) nvme_rdma_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{ {
struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair); struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
@ -1650,15 +1651,15 @@ nvme_rdma_qpair_disconnect(struct spdk_nvme_qpair *qpair)
} }
} }
static int int
nvme_rdma_qpair_destroy(struct spdk_nvme_qpair *qpair) nvme_rdma_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{ {
struct nvme_rdma_qpair *rqpair; struct nvme_rdma_qpair *rqpair;
if (!qpair) { if (!qpair) {
return -1; return -1;
} }
nvme_rdma_qpair_disconnect(qpair); nvme_rdma_ctrlr_disconnect_qpair(ctrlr, qpair);
nvme_rdma_qpair_abort_reqs(qpair, 1); nvme_rdma_qpair_abort_reqs(qpair, 1);
nvme_qpair_deinit(qpair); nvme_qpair_deinit(qpair);
@ -1687,14 +1688,6 @@ nvme_rdma_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
return 0; return 0;
} }
/* This function must only be called while holding g_spdk_nvme_driver->lock */
int
nvme_rdma_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
bool direct_connect)
{
return nvme_fabric_ctrlr_scan(probe_ctx, direct_connect);
}
struct spdk_nvme_ctrlr *nvme_rdma_ctrlr_construct(const struct spdk_nvme_transport_id *trid, struct spdk_nvme_ctrlr *nvme_rdma_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
const struct spdk_nvme_ctrlr_opts *opts, const struct spdk_nvme_ctrlr_opts *opts,
void *devhandle) void *devhandle)
@ -1813,7 +1806,7 @@ nvme_rdma_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
struct nvme_rdma_cm_event_entry *entry; struct nvme_rdma_cm_event_entry *entry;
if (ctrlr->adminq) { if (ctrlr->adminq) {
nvme_rdma_qpair_destroy(ctrlr->adminq); nvme_rdma_ctrlr_delete_io_qpair(ctrlr, ctrlr->adminq);
} }
STAILQ_FOREACH(entry, &rctrlr->pending_cm_events, link) { STAILQ_FOREACH(entry, &rctrlr->pending_cm_events, link) {
@ -1836,30 +1829,6 @@ nvme_rdma_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
return 0; return 0;
} }
int
nvme_rdma_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
{
return nvme_fabric_ctrlr_set_reg_4(ctrlr, offset, value);
}
int
nvme_rdma_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
{
return nvme_fabric_ctrlr_set_reg_8(ctrlr, offset, value);
}
int
nvme_rdma_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
{
return nvme_fabric_ctrlr_get_reg_4(ctrlr, offset, value);
}
int
nvme_rdma_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
{
return nvme_fabric_ctrlr_get_reg_8(ctrlr, offset, value);
}
int int
nvme_rdma_qpair_submit_request(struct spdk_nvme_qpair *qpair, nvme_rdma_qpair_submit_request(struct spdk_nvme_qpair *qpair,
struct nvme_request *req) struct nvme_request *req)
@ -1890,24 +1859,6 @@ nvme_rdma_qpair_submit_request(struct spdk_nvme_qpair *qpair,
return nvme_rdma_qpair_queue_send_wr(rqpair, wr); return nvme_rdma_qpair_queue_send_wr(rqpair, wr);
} }
int
nvme_rdma_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
return nvme_rdma_qpair_destroy(qpair);
}
int
nvme_rdma_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
return nvme_rdma_qpair_connect(nvme_rdma_qpair(qpair));
}
void
nvme_rdma_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
nvme_rdma_qpair_disconnect(qpair);
}
int int
nvme_rdma_qpair_reset(struct spdk_nvme_qpair *qpair) nvme_rdma_qpair_reset(struct spdk_nvme_qpair *qpair)
{ {
@ -2075,7 +2026,7 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
fail: fail:
/* /*
* Since admin queues take the ctrlr_lock before entering this function, * Since admin queues take the ctrlr_lock before entering this function,
* we can call nvme_rdma_qpair_disconnect. For other qpairs we need * we can call nvme_rdma_ctrlr_disconnect_qpair. For other qpairs we need
* to call the generic function which will take the lock for us. * to call the generic function which will take the lock for us.
*/ */
if (rc == IBV_WC_RETRY_EXC_ERR) { if (rc == IBV_WC_RETRY_EXC_ERR) {
@ -2085,7 +2036,7 @@ fail:
} }
if (nvme_qpair_is_admin_queue(qpair)) { if (nvme_qpair_is_admin_queue(qpair)) {
nvme_rdma_qpair_disconnect(qpair); nvme_rdma_ctrlr_disconnect_qpair(qpair->ctrlr, qpair);
} else { } else {
nvme_ctrlr_disconnect_qpair(qpair); nvme_ctrlr_disconnect_qpair(qpair);
} }