rdma: track outstanding data work requests directly.
This gives us more realistic control over the number of requests we can submit. Change-Id: Ie717912685eaa56905c32d143c7887b636c1a9e9 Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.gerrithub.io/c/441606 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
7289d370f7
commit
dfdd76cf21
@ -264,6 +264,9 @@ struct spdk_nvmf_rdma_qpair {
|
|||||||
/* The maximum number of active RDMA READ and ATOMIC operations at one time */
|
/* The maximum number of active RDMA READ and ATOMIC operations at one time */
|
||||||
uint16_t max_read_depth;
|
uint16_t max_read_depth;
|
||||||
|
|
||||||
|
/* The current number of active RDMA READ operations */
|
||||||
|
uint16_t current_read_depth;
|
||||||
|
|
||||||
/* The maximum number of SGEs per WR on the send queue */
|
/* The maximum number of SGEs per WR on the send queue */
|
||||||
uint32_t max_send_sge;
|
uint32_t max_send_sge;
|
||||||
|
|
||||||
@ -592,12 +595,6 @@ spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
spdk_nvmf_rdma_cur_read_depth(struct spdk_nvmf_rdma_qpair *rqpair)
|
|
||||||
{
|
|
||||||
return rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER];
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
spdk_nvmf_rdma_cur_queue_depth(struct spdk_nvmf_rdma_qpair *rqpair)
|
spdk_nvmf_rdma_cur_queue_depth(struct spdk_nvmf_rdma_qpair *rqpair)
|
||||||
{
|
{
|
||||||
@ -914,6 +911,7 @@ request_transfer_in(struct spdk_nvmf_request *req)
|
|||||||
SPDK_ERRLOG("Unable to transfer data from host to target\n");
|
SPDK_ERRLOG("Unable to transfer data from host to target\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
rqpair->current_read_depth += rdma_req->num_outstanding_data_wr;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -972,9 +970,10 @@ request_transfer_out(struct spdk_nvmf_request *req, int *data_posted)
|
|||||||
rc = ibv_post_send(rqpair->cm_id->qp, send_wr, &bad_send_wr);
|
rc = ibv_post_send(rqpair->cm_id->qp, send_wr, &bad_send_wr);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
SPDK_ERRLOG("Unable to send response capsule\n");
|
SPDK_ERRLOG("Unable to send response capsule\n");
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -1540,7 +1539,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
|
|
||||||
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
|
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
|
||||||
|
|
||||||
if (spdk_nvmf_rdma_cur_read_depth(rqpair) >= rqpair->max_read_depth) {
|
if (rqpair->current_read_depth >= rqpair->max_read_depth) {
|
||||||
/* Read operation queue is full, need to wait */
|
/* Read operation queue is full, need to wait */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -2728,6 +2727,7 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
SPDK_ERRLOG("data=%p length=%u\n", rdma_req->req.data, rdma_req->req.length);
|
SPDK_ERRLOG("data=%p length=%u\n", rdma_req->req.data, rdma_req->req.length);
|
||||||
if (rdma_req->data.wr.opcode == IBV_WR_RDMA_READ) {
|
if (rdma_req->data.wr.opcode == IBV_WR_RDMA_READ) {
|
||||||
assert(rdma_req->num_outstanding_data_wr > 0);
|
assert(rdma_req->num_outstanding_data_wr > 0);
|
||||||
|
rqpair->current_read_depth--;
|
||||||
rdma_req->num_outstanding_data_wr--;
|
rdma_req->num_outstanding_data_wr--;
|
||||||
if (rdma_req->num_outstanding_data_wr == 0) {
|
if (rdma_req->num_outstanding_data_wr == 0) {
|
||||||
spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
|
spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
|
||||||
@ -2802,6 +2802,7 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
assert(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
|
assert(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
|
||||||
/* wait for all outstanding reads associated with the same rdma_req to complete before proceeding. */
|
/* wait for all outstanding reads associated with the same rdma_req to complete before proceeding. */
|
||||||
assert(rdma_req->num_outstanding_data_wr > 0);
|
assert(rdma_req->num_outstanding_data_wr > 0);
|
||||||
|
rqpair->current_read_depth--;
|
||||||
rdma_req->num_outstanding_data_wr--;
|
rdma_req->num_outstanding_data_wr--;
|
||||||
if (rdma_req->num_outstanding_data_wr == 0) {
|
if (rdma_req->num_outstanding_data_wr == 0) {
|
||||||
spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
|
spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
|
||||||
|
Loading…
Reference in New Issue
Block a user