nvme: add num_outstanding_reqs in spdk_nvme_qpair

Added num_outstanding_reqs in struct spdk_nvme_qpair to record outstanding
req number in each qpair. This can be used by multipath to select I/O
path.

Increment num_outstaning_reqs when req is removed from free_req queue and
decrement it when req is put back in free_req queue.

Change-Id: I31148fc7d0a9a85bec4c56d1f6e3047b021c2f48
Signed-off-by: Richael Zhuang <richael.zhuang@arm.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15875
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Richael Zhuang 2022-12-12 16:55:29 +08:00 committed by Tomasz Zawadzki
parent 2ebbeba7d9
commit 41bf6280e9
5 changed files with 43 additions and 0 deletions

View File

@ -439,6 +439,8 @@ struct spdk_nvme_qpair {
enum spdk_nvme_transport_type trtype;
uint32_t num_outstanding_reqs;
/* request object used only for this qpair's FABRICS/CONNECT command (if needed) */
struct nvme_request *reserved_req;
@ -1268,6 +1270,7 @@ nvme_allocate_request(struct spdk_nvme_qpair *qpair,
}
STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
qpair->num_outstanding_reqs++;
/*
* Only memset/zero fields that need it. All other fields
@ -1357,6 +1360,9 @@ nvme_free_request(struct nvme_request *req)
*/
if (spdk_likely(req->qpair->reserved_req != req)) {
STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
assert(req->qpair->num_outstanding_reqs > 0);
req->qpair->num_outstanding_reqs--;
}
}
@ -1381,6 +1387,9 @@ nvme_qpair_free_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
assert(req->num_children == 0);
STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
assert(req->qpair->num_outstanding_reqs > 0);
req->qpair->num_outstanding_reqs--;
}
static inline void

View File

@ -851,6 +851,7 @@ nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
qpair->is_new_qpair = true;
qpair->async = async;
qpair->poll_status = NULL;
qpair->num_outstanding_reqs = 0;
STAILQ_INIT(&qpair->free_req);
STAILQ_INIT(&qpair->queued_req);

View File

@ -688,11 +688,13 @@ test_nvme_allocate_request(void)
memset(&payload, 0x5a, payload_struct_size);
STAILQ_INIT(&qpair.free_req);
STAILQ_INIT(&qpair.queued_req);
qpair.num_outstanding_reqs = 0;
/* Test trying to allocate a request when no requests are available */
req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0,
cb_fn, cb_arg);
CU_ASSERT(req == NULL);
CU_ASSERT(qpair.num_outstanding_reqs == 0);
/* put a dummy on the queue, and then allocate one */
STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
@ -701,6 +703,7 @@ test_nvme_allocate_request(void)
/* all the req elements should now match the passed in parameters */
SPDK_CU_ASSERT_FATAL(req != NULL);
CU_ASSERT(qpair.num_outstanding_reqs == 1);
CU_ASSERT(req->cb_fn == cb_fn);
CU_ASSERT(req->cb_arg == cb_arg);
CU_ASSERT(memcmp(&req->payload, &payload, payload_struct_size) == 0);
@ -718,6 +721,7 @@ test_nvme_free_request(void)
/* put a req on the Q, take it off and compare */
memset(&match_req.cmd, 0x5a, sizeof(struct spdk_nvme_cmd));
match_req.qpair = &qpair;
qpair.num_outstanding_reqs = 1;
/* the code under tests asserts this condition */
match_req.num_children = 0;
STAILQ_INIT(&qpair.free_req);
@ -726,6 +730,7 @@ test_nvme_free_request(void)
nvme_free_request(&match_req);
req = STAILQ_FIRST(&match_req.qpair->free_req);
CU_ASSERT(req == &match_req);
CU_ASSERT(qpair.num_outstanding_reqs == 0);
}
static void

View File

@ -176,6 +176,7 @@ test_nvme_qpair_process_completions(void)
STAILQ_INIT(&qpair.queued_req);
STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
qpair.num_outstanding_reqs = 2;
/* If the controller is failed, return -ENXIO */
ctrlr.is_failed = true;
@ -185,6 +186,7 @@ test_nvme_qpair_process_completions(void)
CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
CU_ASSERT(g_num_cb_passed == 0);
CU_ASSERT(g_num_cb_failed == 0);
CU_ASSERT(qpair.num_outstanding_reqs == 2);
/* Same if the qpair is failed at the transport layer. */
ctrlr.is_failed = false;
@ -195,6 +197,7 @@ test_nvme_qpair_process_completions(void)
CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
CU_ASSERT(g_num_cb_passed == 0);
CU_ASSERT(g_num_cb_failed == 0);
CU_ASSERT(qpair.num_outstanding_reqs == 2);
/* If the controller is removed, make sure we abort the requests. */
ctrlr.is_failed = true;
@ -205,6 +208,7 @@ test_nvme_qpair_process_completions(void)
CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
CU_ASSERT(g_num_cb_passed == 0);
CU_ASSERT(g_num_cb_failed == 2);
CU_ASSERT(qpair.num_outstanding_reqs == 0);
/* If we are resetting, make sure that we don't call into the transport. */
STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
@ -626,10 +630,12 @@ test_nvme_qpair_manual_complete_request(void)
qpair.ctrlr->opts.disable_error_logging = false;
STAILQ_INIT(&qpair.free_req);
SPDK_CU_ASSERT_FATAL(STAILQ_EMPTY(&qpair.free_req));
qpair.num_outstanding_reqs = 1;
nvme_qpair_manual_complete_request(&qpair, &req, SPDK_NVME_SCT_GENERIC,
SPDK_NVME_SC_SUCCESS, 1, true);
CU_ASSERT(!STAILQ_EMPTY(&qpair.free_req));
CU_ASSERT(qpair.num_outstanding_reqs == 0);
}
static void
@ -688,12 +694,14 @@ test_nvme_qpair_init_deinit(void)
STAILQ_REMOVE(&qpair.free_req, reqs[2], nvme_request, stailq);
STAILQ_INSERT_TAIL(&qpair.err_req_head, reqs[2], stailq);
CU_ASSERT(STAILQ_EMPTY(&qpair.free_req));
qpair.num_outstanding_reqs = 3;
nvme_qpair_deinit(&qpair);
CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
CU_ASSERT(STAILQ_EMPTY(&qpair.aborting_queued_req));
CU_ASSERT(STAILQ_EMPTY(&qpair.err_req_head));
CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
CU_ASSERT(qpair.num_outstanding_reqs == 0);
}
static void

View File

@ -451,6 +451,7 @@ test_nvme_tcp_req_complete_safe(void)
tcp_req.tqpair = &tqpair;
tcp_req.state = NVME_TCP_REQ_ACTIVE;
TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
tqpair.qpair.num_outstanding_reqs = 1;
/* Test case 1: send operation and transfer completed. Expect: PASS */
tcp_req.state = NVME_TCP_REQ_ACTIVE;
@ -460,14 +461,17 @@ test_nvme_tcp_req_complete_safe(void)
rc = nvme_tcp_req_complete_safe(&tcp_req);
CU_ASSERT(rc == true);
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
/* Test case 2: send operation not completed. Expect: FAIL */
tcp_req.ordering.raw = 0;
tcp_req.state = NVME_TCP_REQ_ACTIVE;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
tqpair.qpair.num_outstanding_reqs = 1;
rc = nvme_tcp_req_complete_safe(&tcp_req);
SPDK_CU_ASSERT_FATAL(rc != true);
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
TAILQ_REMOVE(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
/* Test case 3: in completion context. Expect: PASS */
@ -477,10 +481,12 @@ test_nvme_tcp_req_complete_safe(void)
tcp_req.ordering.bits.data_recv = 1;
tcp_req.state = NVME_TCP_REQ_ACTIVE;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
tqpair.qpair.num_outstanding_reqs = 1;
rc = nvme_tcp_req_complete_safe(&tcp_req);
CU_ASSERT(rc == true);
CU_ASSERT(tcp_req.tqpair->async_complete == 0);
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
/* Test case 4: in async complete. Expect: PASS */
tqpair.qpair.in_completion_context = 0;
@ -488,10 +494,12 @@ test_nvme_tcp_req_complete_safe(void)
tcp_req.ordering.bits.data_recv = 1;
tcp_req.state = NVME_TCP_REQ_ACTIVE;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
tqpair.qpair.num_outstanding_reqs = 1;
rc = nvme_tcp_req_complete_safe(&tcp_req);
CU_ASSERT(rc == true);
CU_ASSERT(tcp_req.tqpair->async_complete);
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
}
static void
@ -1138,6 +1146,7 @@ test_nvme_tcp_c2h_payload_handle(void)
tcp_req.ordering.bits.data_recv = 0;
tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
tqpair.qpair.num_outstanding_reqs = 1;
nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
@ -1146,6 +1155,7 @@ test_nvme_tcp_c2h_payload_handle(void)
CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
CU_ASSERT(reaped == 2);
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
/* case 2: nvme_tcp_c2h_data_payload_handle: tcp_req->datao == tcp_req->req->payload_size */
tcp_req.datao = 1024;
@ -1156,6 +1166,7 @@ test_nvme_tcp_c2h_payload_handle(void)
tcp_req.ordering.bits.data_recv = 0;
tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
tqpair.qpair.num_outstanding_reqs = 1;
nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
@ -1164,6 +1175,7 @@ test_nvme_tcp_c2h_payload_handle(void)
CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
CU_ASSERT(reaped == 3);
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
/* case 3: nvme_tcp_c2h_data_payload_handle: flag does not have SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS */
pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
@ -1175,16 +1187,19 @@ test_nvme_tcp_c2h_payload_handle(void)
tcp_req.ordering.bits.data_recv = 0;
tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
tqpair.qpair.num_outstanding_reqs = 1;
nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
CU_ASSERT(reaped == 3);
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
/* case 4: nvme_tcp_c2h_term_req_payload_handle: recv_state is NVME_TCP_PDU_RECV_STATE_ERROR */
pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
nvme_tcp_c2h_term_req_payload_handle(&tqpair, &pdu);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
}
static void
@ -1281,6 +1296,7 @@ test_nvme_tcp_pdu_payload_handle(void)
tcp_req.cid = 1;
TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
tqpair.qpair.num_outstanding_reqs = 1;
/* C2H_DATA */
recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
@ -1297,6 +1313,7 @@ test_nvme_tcp_pdu_payload_handle(void)
CU_ASSERT(tcp_req.rsp.sqid == 1);
CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
CU_ASSERT(reaped == 1);
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
/* TermResp */
recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
@ -1338,6 +1355,7 @@ test_nvme_tcp_capsule_resp_hdr_handle(void)
memset(&rccqe_tgt, 0xff, sizeof(rccqe_tgt));
rccqe_tgt.cid = 0;
memcpy(&tqpair.recv_pdu->hdr.capsule_resp.rccqe, &rccqe_tgt, sizeof(rccqe_tgt));
tqpair.qpair.num_outstanding_reqs = 1;
nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
@ -1345,6 +1363,7 @@ test_nvme_tcp_capsule_resp_hdr_handle(void)
CU_ASSERT(tcp_req->ordering.bits.data_recv == 1);
CU_ASSERT(reaped == 1);
CU_ASSERT(TAILQ_EMPTY(&tcp_req->tqpair->outstanding_reqs));
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
/* Get tcp request error, expect fail */
reaped = 0;
@ -1527,6 +1546,7 @@ test_nvme_tcp_ctrlr_delete_io_qpair(void)
tcp_req.state = NVME_TCP_REQ_ACTIVE;
TAILQ_INIT(&tqpair->outstanding_reqs);
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
qpair->num_outstanding_reqs = 1;
rc = nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);