rdma: make the pending_data_buf_queue an STAILQ
Should speed up operations, and allows us to remove the 16 byte link object from the request structure. Change-Id: Ie62df1f44d22580a7a7ae41c498295841d1e3064 Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/448080 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
43e136dfcc
commit
1fb629c4d2
@ -269,7 +269,6 @@ struct spdk_nvmf_rdma_request {
|
|||||||
|
|
||||||
uint32_t num_outstanding_data_wr;
|
uint32_t num_outstanding_data_wr;
|
||||||
|
|
||||||
TAILQ_ENTRY(spdk_nvmf_rdma_request) link;
|
|
||||||
STAILQ_ENTRY(spdk_nvmf_rdma_request) state_link;
|
STAILQ_ENTRY(spdk_nvmf_rdma_request) state_link;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -417,7 +416,7 @@ struct spdk_nvmf_rdma_poll_group {
|
|||||||
struct spdk_nvmf_transport_poll_group group;
|
struct spdk_nvmf_transport_poll_group group;
|
||||||
|
|
||||||
/* Requests that are waiting to obtain a data buffer */
|
/* Requests that are waiting to obtain a data buffer */
|
||||||
TAILQ_HEAD(, spdk_nvmf_rdma_request) pending_data_buf_queue;
|
STAILQ_HEAD(, spdk_nvmf_rdma_request) pending_data_buf_queue;
|
||||||
|
|
||||||
TAILQ_HEAD(, spdk_nvmf_rdma_poller) pollers;
|
TAILQ_HEAD(, spdk_nvmf_rdma_poller) pollers;
|
||||||
};
|
};
|
||||||
@ -1816,7 +1815,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
* to release resources. */
|
* to release resources. */
|
||||||
if (rqpair->ibv_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
|
if (rqpair->ibv_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
|
||||||
if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) {
|
if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) {
|
||||||
TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link);
|
STAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
|
||||||
} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING) {
|
} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING) {
|
||||||
STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
|
STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
|
||||||
} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING) {
|
} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING) {
|
||||||
@ -1860,7 +1859,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
}
|
}
|
||||||
|
|
||||||
rdma_req->state = RDMA_REQUEST_STATE_NEED_BUFFER;
|
rdma_req->state = RDMA_REQUEST_STATE_NEED_BUFFER;
|
||||||
TAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, link);
|
STAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, state_link);
|
||||||
break;
|
break;
|
||||||
case RDMA_REQUEST_STATE_NEED_BUFFER:
|
case RDMA_REQUEST_STATE_NEED_BUFFER:
|
||||||
spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0,
|
spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0,
|
||||||
@ -1868,7 +1867,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
|
|
||||||
assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);
|
assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);
|
||||||
|
|
||||||
if (rdma_req != TAILQ_FIRST(&rgroup->pending_data_buf_queue)) {
|
if (rdma_req != STAILQ_FIRST(&rgroup->pending_data_buf_queue)) {
|
||||||
/* This request needs to wait in line to obtain a buffer */
|
/* This request needs to wait in line to obtain a buffer */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1876,7 +1875,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
/* Try to get a data buffer */
|
/* Try to get a data buffer */
|
||||||
rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
|
rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link);
|
STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);
|
||||||
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
|
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
|
||||||
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
|
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
|
||||||
break;
|
break;
|
||||||
@ -1887,7 +1886,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link);
|
STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);
|
||||||
|
|
||||||
/* If data is transferring from host to controller and the data didn't
|
/* If data is transferring from host to controller and the data didn't
|
||||||
* arrive using in capsule data, we need to do a transfer from the host.
|
* arrive using in capsule data, we need to do a transfer from the host.
|
||||||
@ -2526,8 +2525,8 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* The second highest priority is I/O waiting on memory buffers. */
|
/* The second highest priority is I/O waiting on memory buffers. */
|
||||||
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, link,
|
STAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, state_link,
|
||||||
req_tmp) {
|
req_tmp) {
|
||||||
if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
|
if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -2902,7 +2901,7 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
|
|||||||
}
|
}
|
||||||
|
|
||||||
TAILQ_INIT(&rgroup->pollers);
|
TAILQ_INIT(&rgroup->pollers);
|
||||||
TAILQ_INIT(&rgroup->pending_data_buf_queue);
|
STAILQ_INIT(&rgroup->pending_data_buf_queue);
|
||||||
|
|
||||||
pthread_mutex_lock(&rtransport->lock);
|
pthread_mutex_lock(&rtransport->lock);
|
||||||
TAILQ_FOREACH(device, &rtransport->devices, link) {
|
TAILQ_FOREACH(device, &rtransport->devices, link) {
|
||||||
@ -3008,7 +3007,7 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
|
|||||||
free(poller);
|
free(poller);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!TAILQ_EMPTY(&rgroup->pending_data_buf_queue)) {
|
if (!STAILQ_EMPTY(&rgroup->pending_data_buf_queue)) {
|
||||||
SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
|
SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user