From bc36528cdac39e738c34c4c33f5857757ce11afc Mon Sep 17 00:00:00 2001 From: Alexey Marchuk Date: Tue, 1 Sep 2020 11:37:36 +0300 Subject: [PATCH] nvme/tcp: Process poll_group when waiting for icresp A preparation step for enabling zero copy in NVMEoF TCP initiator. Since nvme_tcp_qpair_process_completions doesn't process poll group, we can't get asycn notification from kernel. 1. Add a qpair to poll group before we send icreq in order to be able to process buffer reclaim notification. 2. Check if qpair is connected to a poll group and call nvme_tcp_poll_group_process_completions instead of nvme_tcp_qpair_process_completions when waiting for icresp 3. Add processing of poll group to nvme_wait_for_completion_timeout and nvme_wait_for_completion_robust_lock since they are used to process FABRIC_CONNECT command Change-Id: I38d2d9496bca8d0cd72e44883df2df802e31a87d Signed-off-by: Alexey Marchuk Signed-off-by: Or Gerlitz Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4208 Tested-by: SPDK CI Jenkins Reviewed-by: Jim Harris Reviewed-by: Ben Walker --- lib/nvme/nvme.c | 12 ++++++++++- lib/nvme/nvme_tcp.c | 22 ++++++++++++++++++++- test/unit/lib/nvme/nvme.c/nvme_ut.c | 3 ++- test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c | 5 +++++ 4 files changed, 39 insertions(+), 3 deletions(-) diff --git a/lib/nvme/nvme.c b/lib/nvme/nvme.c index 169efb814..bd6a0c39b 100644 --- a/lib/nvme/nvme.c +++ b/lib/nvme/nvme.c @@ -104,6 +104,11 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl) status->done = true; } +static void +dummy_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx) +{ +} + /** * Poll qpair for completions until a command completes. * @@ -139,7 +144,12 @@ nvme_wait_for_completion_robust_lock_timeout( nvme_robust_mutex_lock(robust_mutex); } - rc = spdk_nvme_qpair_process_completions(qpair, 0); + if (qpair->poll_group) { + rc = (int)spdk_nvme_poll_group_process_completions(qpair->poll_group->group, 0, + dummy_disconnected_qpair_cb); + } else { + rc = spdk_nvme_qpair_process_completions(qpair, 0); + } if (robust_mutex) { nvme_robust_mutex_unlock(robust_mutex); diff --git a/lib/nvme/nvme_tcp.c b/lib/nvme/nvme_tcp.c index 22f3fce7d..5f4950ef2 100644 --- a/lib/nvme/nvme_tcp.c +++ b/lib/nvme/nvme_tcp.c @@ -154,6 +154,8 @@ struct nvme_tcp_req { }; static void nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req); +static int64_t nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group + *tgroup, uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb); static inline struct nvme_tcp_qpair * nvme_tcp_qpair(struct spdk_nvme_qpair *qpair) @@ -1601,6 +1603,11 @@ nvme_tcp_qpair_sock_cb(void *ctx, struct spdk_sock_group *group, struct spdk_soc } } +static void +dummy_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx) +{ +} + static int nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair) { @@ -1626,7 +1633,12 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair) icreq_timeout_tsc = spdk_get_ticks() + (NVME_TCP_TIME_OUT_IN_SECONDS * spdk_get_ticks_hz()); do { - rc = nvme_tcp_qpair_process_completions(&tqpair->qpair, 0); + if (tqpair->qpair.poll_group) { + rc = (int)nvme_tcp_poll_group_process_completions(tqpair->qpair.poll_group, 0, + dummy_disconnected_qpair_cb); + } else { + rc = nvme_tcp_qpair_process_completions(&tqpair->qpair, 0); + } } while ((tqpair->state == NVME_TCP_QPAIR_STATE_INVALID) && (rc == 0) && (spdk_get_ticks() <= icreq_timeout_tsc)); @@ -1704,6 +1716,14 @@ nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpa return rc; } + if (qpair->poll_group) { + rc = nvme_poll_group_connect_qpair(qpair); + if (rc) { + SPDK_ERRLOG("Unable to activate the tcp qpair.\n"); + return rc; + } + } + tqpair->maxr2t = NVME_TCP_MAX_R2T_DEFAULT; /* Explicitly set the state and recv_state of tqpair */ tqpair->state = NVME_TCP_QPAIR_STATE_INVALID; diff --git a/test/unit/lib/nvme/nvme.c/nvme_ut.c b/test/unit/lib/nvme/nvme.c/nvme_ut.c index 384133410..3c9f258f0 100644 --- a/test/unit/lib/nvme/nvme.c/nvme_ut.c +++ b/test/unit/lib/nvme/nvme.c/nvme_ut.c @@ -64,7 +64,8 @@ DEFINE_STUB_V(nvme_io_msg_ctrlr_detach, (struct spdk_nvme_ctrlr *ctrlr)); DEFINE_STUB(spdk_nvme_transport_available, bool, (enum spdk_nvme_transport_type trtype), true); DEFINE_STUB(nvme_uevent_connect, int, (void), 1); - +DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group, + uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0); static bool ut_destruct_called = false; void diff --git a/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c b/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c index ed817fe2d..1c4d2e487 100644 --- a/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c +++ b/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c @@ -51,6 +51,11 @@ DEFINE_STUB(spdk_sock_set_priority, DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair), 0); +DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group, + uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0); + +DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0); + static void test_nvme_tcp_pdu_set_data_buf(void) {