nvmf/rdma: Eliminate management channel

This is a holdover from before poll groups were introduced.
We just need a per-thread context for a set of connections,
so now that a poll group exists we can use that instead.

Change-Id: I1a91abf52dac6e77ea8505741519332548595c57
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/c/442430
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Seth Howell <seth.howell5141@gmail.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2019-01-28 13:21:05 -07:00 committed by Jim Harris
parent 48834f0daa
commit 608d80a033

View File

@ -332,10 +332,6 @@ struct spdk_nvmf_rdma_qpair {
TAILQ_ENTRY(spdk_nvmf_rdma_qpair) link;
/* Mgmt channel */
struct spdk_io_channel *mgmt_channel;
struct spdk_nvmf_rdma_mgmt_channel *ch;
/* IBV queue pair attributes: they are used to manage
* qp state and recover from errors.
*/
@ -371,6 +367,9 @@ struct spdk_nvmf_rdma_poller {
struct spdk_nvmf_rdma_poll_group {
struct spdk_nvmf_transport_poll_group group;
/* Requests that are waiting to obtain a data buffer */
TAILQ_HEAD(, spdk_nvmf_rdma_request) pending_data_buf_queue;
TAILQ_HEAD(, spdk_nvmf_rdma_poller) pollers;
};
@ -410,11 +409,6 @@ struct spdk_nvmf_rdma_transport {
TAILQ_HEAD(, spdk_nvmf_rdma_port) ports;
};
struct spdk_nvmf_rdma_mgmt_channel {
/* Requests that are waiting to obtain a data buffer */
TAILQ_HEAD(, spdk_nvmf_rdma_request) pending_data_buf_queue;
};
static inline void
spdk_nvmf_rdma_qpair_inc_refcnt(struct spdk_nvmf_rdma_qpair *rqpair)
{
@ -600,25 +594,6 @@ spdk_nvmf_rdma_request_set_state(struct spdk_nvmf_rdma_request *rdma_req,
rqpair->state_cntr[rdma_req->state]++;
}
static int
spdk_nvmf_rdma_mgmt_channel_create(void *io_device, void *ctx_buf)
{
struct spdk_nvmf_rdma_mgmt_channel *ch = ctx_buf;
TAILQ_INIT(&ch->pending_data_buf_queue);
return 0;
}
static void
spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf)
{
struct spdk_nvmf_rdma_mgmt_channel *ch = ctx_buf;
if (!TAILQ_EMPTY(&ch->pending_data_buf_queue)) {
SPDK_ERRLOG("Pending I/O list wasn't empty on channel destruction\n");
}
}
static int
spdk_nvmf_rdma_cur_queue_depth(struct spdk_nvmf_rdma_qpair *rqpair)
{
@ -690,10 +665,6 @@ spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
}
}
if (rqpair->mgmt_channel) {
spdk_put_io_channel(rqpair->mgmt_channel);
}
/* Free all memory */
spdk_dma_free(rqpair->cmds);
spdk_dma_free(rqpair->cpls);
@ -1480,6 +1451,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
{
struct spdk_nvmf_rdma_qpair *rqpair;
struct spdk_nvmf_rdma_device *device;
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvme_cpl *rsp = &rdma_req->req.rsp->nvme_cpl;
int rc;
struct spdk_nvmf_rdma_recv *rdma_recv;
@ -1489,6 +1461,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
device = rqpair->port->device;
rgroup = rqpair->poller->group;
assert(rdma_req->state != RDMA_REQUEST_STATE_FREE);
@ -1496,7 +1469,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
* to release resources. */
if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) {
TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link);
}
spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
}
@ -1538,7 +1511,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
}
spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_NEED_BUFFER);
TAILQ_INSERT_TAIL(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
TAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, link);
break;
case RDMA_REQUEST_STATE_NEED_BUFFER:
spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0,
@ -1546,7 +1519,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);
if (rdma_req != TAILQ_FIRST(&rqpair->ch->pending_data_buf_queue)) {
if (rdma_req != TAILQ_FIRST(&rgroup->pending_data_buf_queue)) {
/* This request needs to wait in line to obtain a buffer */
break;
}
@ -1554,7 +1527,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
/* Try to get a data buffer */
rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
if (rc < 0) {
TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link);
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_COMPLETE);
break;
@ -1565,7 +1538,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
break;
}
TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link);
/* If data is transferring from host to controller and the data didn't
* arrive using in capsule data, we need to do a transfer from the host.
@ -1745,11 +1718,6 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
return NULL;
}
spdk_io_device_register(rtransport, spdk_nvmf_rdma_mgmt_channel_create,
spdk_nvmf_rdma_mgmt_channel_destroy,
sizeof(struct spdk_nvmf_rdma_mgmt_channel),
"rdma_transport");
TAILQ_INIT(&rtransport->devices);
TAILQ_INIT(&rtransport->ports);
@ -1957,7 +1925,6 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
}
spdk_mempool_free(rtransport->data_wr_pool);
spdk_io_device_unregister(rtransport, NULL);
pthread_mutex_destroy(&rtransport->lock);
free(rtransport);
@ -2234,7 +2201,7 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport
}
/* The second highest priority is I/O waiting on memory buffers. */
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->ch->pending_data_buf_queue, link,
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, link,
req_tmp) {
if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
break;
@ -2564,6 +2531,7 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
}
TAILQ_INIT(&rgroup->pollers);
TAILQ_INIT(&rgroup->pending_data_buf_queue);
pthread_mutex_lock(&rtransport->lock);
TAILQ_FOREACH(device, &rtransport->devices, link) {
@ -2632,6 +2600,10 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
free(poller);
}
if (!TAILQ_EMPTY(&rgroup->pending_data_buf_queue)) {
SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
}
free(rgroup);
}
@ -2639,14 +2611,12 @@ static int
spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_qpair *qpair)
{
struct spdk_nvmf_rdma_transport *rtransport;
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvmf_rdma_qpair *rqpair;
struct spdk_nvmf_rdma_device *device;
struct spdk_nvmf_rdma_poller *poller;
int rc;
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
@ -2672,16 +2642,6 @@ spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
return -1;
}
rqpair->mgmt_channel = spdk_get_io_channel(rtransport);
if (!rqpair->mgmt_channel) {
spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
spdk_nvmf_rdma_qpair_destroy(rqpair);
return -1;
}
rqpair->ch = spdk_io_channel_get_ctx(rqpair->mgmt_channel);
assert(rqpair->ch != NULL);
rc = spdk_nvmf_rdma_event_accept(rqpair->cm_id, rqpair);
if (rc) {
/* Try to reject, but we probably can't */