nvmf: Transport polling now done by poll group
Instead of polling each individual qpair, polling is now done by poll group. This allows transports to use more efficient polling schemes in the future. The RDMA transport as of this patch still just loops over each qpair in the group and polls it individually, so this patch results in no performance change yet. Change-Id: I0f63f0dbbc5fd43c1e0d9729b10b37c2cb0d9881 Signed-off-by: Ben Walker <benjamin.walker@intel.com> Reviewed-on: https://review.gerrithub.io/376239 Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
3ee93c3293
commit
d5ce9cff63
@ -556,7 +556,6 @@ spdk_nvmf_property_set(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
int
|
||||
spdk_nvmf_ctrlr_poll(struct spdk_nvmf_ctrlr *ctrlr)
|
||||
{
|
||||
struct spdk_nvmf_qpair *qpair, *tmp;
|
||||
struct spdk_nvmf_subsystem *subsys = ctrlr->subsys;
|
||||
|
||||
if (subsys->is_removed) {
|
||||
@ -571,14 +570,7 @@ spdk_nvmf_ctrlr_poll(struct spdk_nvmf_ctrlr *ctrlr)
|
||||
}
|
||||
}
|
||||
|
||||
TAILQ_FOREACH_SAFE(qpair, &ctrlr->qpairs, link, tmp) {
|
||||
if (spdk_nvmf_transport_qpair_poll(qpair) < 0) {
|
||||
SPDK_ERRLOG("Transport poll failed for qpair %p; closing connection\n", qpair);
|
||||
spdk_nvmf_ctrlr_disconnect(qpair);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return spdk_nvmf_transport_poll_group_poll(ctrlr->group);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1356,7 +1356,8 @@ spdk_nvmf_rdma_stop_listen(struct spdk_nvmf_transport *transport,
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_nvmf_rdma_poll(struct spdk_nvmf_qpair *qpair);
|
||||
spdk_nvmf_rdma_qpair_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
struct spdk_nvmf_rdma_qpair *rqpair);
|
||||
|
||||
static void
|
||||
spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
||||
@ -1376,7 +1377,7 @@ spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
||||
/* Process pending connections for incoming capsules. The only capsule
|
||||
* this should ever find is a CONNECT request. */
|
||||
TAILQ_FOREACH_SAFE(rdma_qpair, &g_pending_conns, pending_link, tmp) {
|
||||
rc = spdk_nvmf_rdma_poll(&rdma_qpair->qpair);
|
||||
rc = spdk_nvmf_rdma_qpair_poll(rtransport, rdma_qpair);
|
||||
if (rc < 0) {
|
||||
TAILQ_REMOVE(&g_pending_conns, rdma_qpair, pending_link);
|
||||
spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
|
||||
@ -1677,11 +1678,10 @@ get_rdma_recv_from_wc(struct spdk_nvmf_rdma_qpair *rdma_qpair,
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_nvmf_rdma_poll(struct spdk_nvmf_qpair *qpair)
|
||||
spdk_nvmf_rdma_qpair_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
struct spdk_nvmf_rdma_qpair *rqpair)
|
||||
{
|
||||
struct ibv_wc wc[32];
|
||||
struct spdk_nvmf_rdma_transport *rtransport;
|
||||
struct spdk_nvmf_rdma_qpair *rdma_qpair;
|
||||
struct spdk_nvmf_rdma_request *rdma_req;
|
||||
struct spdk_nvmf_rdma_recv *rdma_recv;
|
||||
int reaped, i;
|
||||
@ -1689,11 +1689,8 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_qpair *qpair)
|
||||
bool error = false;
|
||||
char buf[64];
|
||||
|
||||
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
|
||||
rdma_qpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||
|
||||
/* Poll for completing operations. */
|
||||
reaped = ibv_poll_cq(rdma_qpair->cq, 32, wc);
|
||||
reaped = ibv_poll_cq(rqpair->cq, 32, wc);
|
||||
if (reaped < 0) {
|
||||
spdk_strerror_r(errno, buf, sizeof(buf));
|
||||
SPDK_ERRLOG("Error polling CQ! (%d): %s\n",
|
||||
@ -1704,14 +1701,14 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_qpair *qpair)
|
||||
for (i = 0; i < reaped; i++) {
|
||||
if (wc[i].status) {
|
||||
SPDK_ERRLOG("CQ error on CQ %p, Request 0x%lu (%d): %s\n",
|
||||
rdma_qpair->cq, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status));
|
||||
rqpair->cq, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status));
|
||||
error = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (wc[i].opcode) {
|
||||
case IBV_WC_SEND:
|
||||
rdma_req = get_rdma_req_from_wc(rdma_qpair, &wc[i]);
|
||||
rdma_req = get_rdma_req_from_wc(rqpair, &wc[i]);
|
||||
|
||||
assert(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
|
||||
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
|
||||
@ -1721,36 +1718,36 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_qpair *qpair)
|
||||
count++;
|
||||
|
||||
/* Try to process other queued requests */
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rdma_qpair);
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
|
||||
break;
|
||||
|
||||
case IBV_WC_RDMA_WRITE:
|
||||
rdma_qpair->cur_rdma_rw_depth--;
|
||||
rqpair->cur_rdma_rw_depth--;
|
||||
|
||||
/* Try to process other queued requests */
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rdma_qpair);
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
|
||||
break;
|
||||
|
||||
case IBV_WC_RDMA_READ:
|
||||
rdma_req = get_rdma_req_from_wc(rdma_qpair, &wc[i]);
|
||||
rdma_req = get_rdma_req_from_wc(rqpair, &wc[i]);
|
||||
|
||||
assert(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
|
||||
rdma_qpair->cur_rdma_rw_depth--;
|
||||
rqpair->cur_rdma_rw_depth--;
|
||||
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
|
||||
|
||||
spdk_nvmf_rdma_request_process(rtransport, rdma_req);
|
||||
|
||||
/* Try to process other queued requests */
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rdma_qpair);
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
|
||||
break;
|
||||
|
||||
case IBV_WC_RECV:
|
||||
rdma_recv = get_rdma_recv_from_wc(rdma_qpair, &wc[i]);
|
||||
rdma_recv = get_rdma_recv_from_wc(rqpair, &wc[i]);
|
||||
|
||||
TAILQ_INSERT_TAIL(&rdma_qpair->incoming_queue, rdma_recv, link);
|
||||
TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link);
|
||||
|
||||
/* Try to process other queued requests */
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rdma_qpair);
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1766,6 +1763,32 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_qpair *qpair)
|
||||
return count;
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_nvmf_rdma_poll_group_poll(struct spdk_nvmf_poll_group *group)
|
||||
{
|
||||
struct spdk_nvmf_rdma_transport *rtransport;
|
||||
struct spdk_nvmf_rdma_poll_group *rgroup;
|
||||
struct spdk_nvmf_rdma_poller *rpoller;
|
||||
struct spdk_nvmf_rdma_qpair *rqpair;
|
||||
int count, rc;
|
||||
|
||||
rtransport = SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_rdma_transport, transport);
|
||||
rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
|
||||
|
||||
count = 0;
|
||||
TAILQ_FOREACH(rpoller, &rgroup->pollers, link) {
|
||||
TAILQ_FOREACH(rqpair, &rpoller->qpairs, link) {
|
||||
rc = spdk_nvmf_rdma_qpair_poll(rtransport, rqpair);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
}
|
||||
count += rc;
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static bool
|
||||
spdk_nvmf_rdma_qpair_is_idle(struct spdk_nvmf_qpair *qpair)
|
||||
{
|
||||
@ -1794,11 +1817,11 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
|
||||
.poll_group_destroy = spdk_nvmf_rdma_poll_group_destroy,
|
||||
.poll_group_add = spdk_nvmf_rdma_poll_group_add,
|
||||
.poll_group_remove = spdk_nvmf_rdma_poll_group_remove,
|
||||
.poll_group_poll = spdk_nvmf_rdma_poll_group_poll,
|
||||
|
||||
.req_complete = spdk_nvmf_rdma_request_complete,
|
||||
|
||||
.qpair_fini = spdk_nvmf_rdma_close_qpair,
|
||||
.qpair_poll = spdk_nvmf_rdma_poll,
|
||||
.qpair_is_idle = spdk_nvmf_rdma_qpair_is_idle,
|
||||
|
||||
};
|
||||
|
@ -160,6 +160,12 @@ spdk_nvmf_transport_poll_group_remove(struct spdk_nvmf_poll_group *group,
|
||||
return group->transport->ops->poll_group_remove(group, qpair);
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvmf_transport_poll_group_poll(struct spdk_nvmf_poll_group *group)
|
||||
{
|
||||
return group->transport->ops->poll_group_poll(group);
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvmf_transport_req_complete(struct spdk_nvmf_request *req)
|
||||
{
|
||||
@ -172,12 +178,6 @@ spdk_nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair)
|
||||
qpair->transport->ops->qpair_fini(qpair);
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvmf_transport_qpair_poll(struct spdk_nvmf_qpair *qpair)
|
||||
{
|
||||
return qpair->transport->ops->qpair_poll(qpair);
|
||||
}
|
||||
|
||||
bool
|
||||
spdk_nvmf_transport_qpair_is_idle(struct spdk_nvmf_qpair *qpair)
|
||||
{
|
||||
|
@ -109,6 +109,11 @@ struct spdk_nvmf_transport_ops {
|
||||
int (*poll_group_remove)(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_qpair *qpair);
|
||||
|
||||
/**
|
||||
* Poll the group to process I/O
|
||||
*/
|
||||
int (*poll_group_poll)(struct spdk_nvmf_poll_group *group);
|
||||
|
||||
/*
|
||||
* Signal request completion, which sends a response
|
||||
* to the originator.
|
||||
@ -120,11 +125,6 @@ struct spdk_nvmf_transport_ops {
|
||||
*/
|
||||
void (*qpair_fini)(struct spdk_nvmf_qpair *qpair);
|
||||
|
||||
/*
|
||||
* Poll a connection for events.
|
||||
*/
|
||||
int (*qpair_poll)(struct spdk_nvmf_qpair *qpair);
|
||||
|
||||
/*
|
||||
* True if the qpair has no pending IO.
|
||||
*/
|
||||
@ -158,12 +158,12 @@ int spdk_nvmf_transport_poll_group_add(struct spdk_nvmf_poll_group *group,
|
||||
int spdk_nvmf_transport_poll_group_remove(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_qpair *qpair);
|
||||
|
||||
int spdk_nvmf_transport_poll_group_poll(struct spdk_nvmf_poll_group *group);
|
||||
|
||||
int spdk_nvmf_transport_req_complete(struct spdk_nvmf_request *req);
|
||||
|
||||
void spdk_nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair);
|
||||
|
||||
int spdk_nvmf_transport_qpair_poll(struct spdk_nvmf_qpair *qpair);
|
||||
|
||||
bool spdk_nvmf_transport_qpair_is_idle(struct spdk_nvmf_qpair *qpair);
|
||||
|
||||
extern const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma;
|
||||
|
@ -88,7 +88,7 @@ spdk_nvmf_transport_poll_group_remove(struct spdk_nvmf_poll_group *group,
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvmf_transport_qpair_poll(struct spdk_nvmf_qpair *qpair)
|
||||
spdk_nvmf_transport_poll_group_poll(struct spdk_nvmf_poll_group *group)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user