rdma: use an stailq for incoming_queue
Change-Id: Ib1e59db4c5dffc9bc21f26461dabeff0d171ad22 Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.gerrithub.io/c/445344 (master) Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/447621 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
e11c4afaad
commit
d145d67c6b
@ -210,17 +210,17 @@ struct spdk_nvmf_rdma_wr {
|
|||||||
* command when there aren't any free request objects.
|
* command when there aren't any free request objects.
|
||||||
*/
|
*/
|
||||||
struct spdk_nvmf_rdma_recv {
|
struct spdk_nvmf_rdma_recv {
|
||||||
struct ibv_recv_wr wr;
|
struct ibv_recv_wr wr;
|
||||||
struct ibv_sge sgl[NVMF_DEFAULT_RX_SGE];
|
struct ibv_sge sgl[NVMF_DEFAULT_RX_SGE];
|
||||||
|
|
||||||
struct spdk_nvmf_rdma_qpair *qpair;
|
struct spdk_nvmf_rdma_qpair *qpair;
|
||||||
|
|
||||||
/* In-capsule data buffer */
|
/* In-capsule data buffer */
|
||||||
uint8_t *buf;
|
uint8_t *buf;
|
||||||
|
|
||||||
struct spdk_nvmf_rdma_wr rdma_wr;
|
struct spdk_nvmf_rdma_wr rdma_wr;
|
||||||
|
|
||||||
TAILQ_ENTRY(spdk_nvmf_rdma_recv) link;
|
STAILQ_ENTRY(spdk_nvmf_rdma_recv) link;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct spdk_nvmf_rdma_request_data {
|
struct spdk_nvmf_rdma_request_data {
|
||||||
@ -298,7 +298,7 @@ struct spdk_nvmf_rdma_qpair {
|
|||||||
uint32_t max_recv_sge;
|
uint32_t max_recv_sge;
|
||||||
|
|
||||||
/* Receives that are waiting for a request object */
|
/* Receives that are waiting for a request object */
|
||||||
TAILQ_HEAD(, spdk_nvmf_rdma_recv) incoming_queue;
|
STAILQ_HEAD(, spdk_nvmf_rdma_recv) incoming_queue;
|
||||||
|
|
||||||
/* Queues to track requests in critical states */
|
/* Queues to track requests in critical states */
|
||||||
STAILQ_HEAD(, spdk_nvmf_rdma_request) free_queue;
|
STAILQ_HEAD(, spdk_nvmf_rdma_request) free_queue;
|
||||||
@ -1057,7 +1057,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
|||||||
rqpair->cm_id = event->id;
|
rqpair->cm_id = event->id;
|
||||||
rqpair->listen_id = event->listen_id;
|
rqpair->listen_id = event->listen_id;
|
||||||
rqpair->qpair.transport = transport;
|
rqpair->qpair.transport = transport;
|
||||||
TAILQ_INIT(&rqpair->incoming_queue);
|
STAILQ_INIT(&rqpair->incoming_queue);
|
||||||
event->id->context = &rqpair->qpair;
|
event->id->context = &rqpair->qpair;
|
||||||
|
|
||||||
cb_fn(&rqpair->qpair);
|
cb_fn(&rqpair->qpair);
|
||||||
@ -1445,7 +1445,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
rdma_req->req.cmd = (union nvmf_h2c_msg *)rdma_recv->sgl[0].addr;
|
rdma_req->req.cmd = (union nvmf_h2c_msg *)rdma_recv->sgl[0].addr;
|
||||||
memset(rdma_req->req.rsp, 0, sizeof(*rdma_req->req.rsp));
|
memset(rdma_req->req.rsp, 0, sizeof(*rdma_req->req.rsp));
|
||||||
|
|
||||||
TAILQ_REMOVE(&rqpair->incoming_queue, rdma_recv, link);
|
STAILQ_REMOVE(&rqpair->incoming_queue, rdma_recv, spdk_nvmf_rdma_recv, link);
|
||||||
|
|
||||||
if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
|
if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
|
||||||
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
|
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
|
||||||
@ -2148,8 +2148,8 @@ static void
|
|||||||
spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport,
|
spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport,
|
||||||
struct spdk_nvmf_rdma_qpair *rqpair, bool drain)
|
struct spdk_nvmf_rdma_qpair *rqpair, bool drain)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_rdma_recv *rdma_recv, *recv_tmp;
|
|
||||||
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
|
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
|
||||||
|
struct spdk_nvmf_rdma_recv *rdma_recv, *recv_tmp;
|
||||||
|
|
||||||
/* We process I/O in the data transfer pending queue at the highest priority. RDMA reads first */
|
/* We process I/O in the data transfer pending queue at the highest priority. RDMA reads first */
|
||||||
STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_read_queue, state_link, req_tmp) {
|
STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_read_queue, state_link, req_tmp) {
|
||||||
@ -2174,15 +2174,16 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* The lowest priority is processing newly received commands */
|
/* The lowest priority is processing newly received commands */
|
||||||
TAILQ_FOREACH_SAFE(rdma_recv, &rqpair->incoming_queue, link, recv_tmp) {
|
STAILQ_FOREACH_SAFE(rdma_recv, &rqpair->incoming_queue, link, recv_tmp) {
|
||||||
|
|
||||||
if (STAILQ_EMPTY(&rqpair->free_queue)) {
|
if (STAILQ_EMPTY(&rqpair->free_queue)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
rdma_req = STAILQ_FIRST(&rqpair->free_queue);
|
rdma_req = STAILQ_FIRST(&rqpair->free_queue);
|
||||||
rdma_req->recv = rdma_recv;
|
rdma_req->recv = rdma_recv;
|
||||||
|
|
||||||
STAILQ_REMOVE_HEAD(&rqpair->free_queue, state_link);
|
STAILQ_REMOVE_HEAD(&rqpair->free_queue, state_link);
|
||||||
|
|
||||||
rqpair->qd++;
|
rqpair->qd++;
|
||||||
rdma_req->state = RDMA_REQUEST_STATE_NEW;
|
rdma_req->state = RDMA_REQUEST_STATE_NEW;
|
||||||
if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
|
if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
|
||||||
@ -2760,7 +2761,7 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
|
|
||||||
/* Dump this into the incoming queue. This gets cleaned up when
|
/* Dump this into the incoming queue. This gets cleaned up when
|
||||||
* the queue pair disconnects or recovers. */
|
* the queue pair disconnects or recovers. */
|
||||||
TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link);
|
STAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link);
|
||||||
rqpair->current_recv_depth++;
|
rqpair->current_recv_depth++;
|
||||||
|
|
||||||
/* Don't worry about responding to recv overflow, we are disconnecting anyways */
|
/* Don't worry about responding to recv overflow, we are disconnecting anyways */
|
||||||
@ -2881,7 +2882,7 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
} else {
|
} else {
|
||||||
rqpair->current_recv_depth++;
|
rqpair->current_recv_depth++;
|
||||||
}
|
}
|
||||||
TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link);
|
STAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link);
|
||||||
/* Try to process other queued requests */
|
/* Try to process other queued requests */
|
||||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, false);
|
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, false);
|
||||||
break;
|
break;
|
||||||
|
Loading…
Reference in New Issue
Block a user