nvme/tcp: Process poll_group when waiting for icresp

A preparation step for enabling zero copy in NVMEoF TCP initiator.
Since nvme_tcp_qpair_process_completions doesn't process poll
group, we can't get asycn notification from kernel.

1. Add a qpair to poll group before we send icreq in order to be able
to process buffer reclaim notification.

2. Check if qpair is connected to a poll group and call
nvme_tcp_poll_group_process_completions instead of
nvme_tcp_qpair_process_completions when waiting for icresp

3. Add processing of poll group to nvme_wait_for_completion_timeout
and nvme_wait_for_completion_robust_lock since they are used to
process FABRIC_CONNECT command

Change-Id: I38d2d9496bca8d0cd72e44883df2df802e31a87d
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4208
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Alexey Marchuk 2020-09-01 11:37:36 +03:00 committed by Tomasz Zawadzki
parent a85579d8ef
commit bc36528cda
4 changed files with 39 additions and 3 deletions

View File

@ -104,6 +104,11 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
status->done = true; status->done = true;
} }
static void
dummy_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
{
}
/** /**
* Poll qpair for completions until a command completes. * Poll qpair for completions until a command completes.
* *
@ -139,7 +144,12 @@ nvme_wait_for_completion_robust_lock_timeout(
nvme_robust_mutex_lock(robust_mutex); nvme_robust_mutex_lock(robust_mutex);
} }
rc = spdk_nvme_qpair_process_completions(qpair, 0); if (qpair->poll_group) {
rc = (int)spdk_nvme_poll_group_process_completions(qpair->poll_group->group, 0,
dummy_disconnected_qpair_cb);
} else {
rc = spdk_nvme_qpair_process_completions(qpair, 0);
}
if (robust_mutex) { if (robust_mutex) {
nvme_robust_mutex_unlock(robust_mutex); nvme_robust_mutex_unlock(robust_mutex);

View File

@ -154,6 +154,8 @@ struct nvme_tcp_req {
}; };
static void nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req); static void nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req);
static int64_t nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group
*tgroup, uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
static inline struct nvme_tcp_qpair * static inline struct nvme_tcp_qpair *
nvme_tcp_qpair(struct spdk_nvme_qpair *qpair) nvme_tcp_qpair(struct spdk_nvme_qpair *qpair)
@ -1601,6 +1603,11 @@ nvme_tcp_qpair_sock_cb(void *ctx, struct spdk_sock_group *group, struct spdk_soc
} }
} }
static void
dummy_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
{
}
static int static int
nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair) nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
{ {
@ -1626,7 +1633,12 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
icreq_timeout_tsc = spdk_get_ticks() + (NVME_TCP_TIME_OUT_IN_SECONDS * spdk_get_ticks_hz()); icreq_timeout_tsc = spdk_get_ticks() + (NVME_TCP_TIME_OUT_IN_SECONDS * spdk_get_ticks_hz());
do { do {
rc = nvme_tcp_qpair_process_completions(&tqpair->qpair, 0); if (tqpair->qpair.poll_group) {
rc = (int)nvme_tcp_poll_group_process_completions(tqpair->qpair.poll_group, 0,
dummy_disconnected_qpair_cb);
} else {
rc = nvme_tcp_qpair_process_completions(&tqpair->qpair, 0);
}
} while ((tqpair->state == NVME_TCP_QPAIR_STATE_INVALID) && } while ((tqpair->state == NVME_TCP_QPAIR_STATE_INVALID) &&
(rc == 0) && (spdk_get_ticks() <= icreq_timeout_tsc)); (rc == 0) && (spdk_get_ticks() <= icreq_timeout_tsc));
@ -1704,6 +1716,14 @@ nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpa
return rc; return rc;
} }
if (qpair->poll_group) {
rc = nvme_poll_group_connect_qpair(qpair);
if (rc) {
SPDK_ERRLOG("Unable to activate the tcp qpair.\n");
return rc;
}
}
tqpair->maxr2t = NVME_TCP_MAX_R2T_DEFAULT; tqpair->maxr2t = NVME_TCP_MAX_R2T_DEFAULT;
/* Explicitly set the state and recv_state of tqpair */ /* Explicitly set the state and recv_state of tqpair */
tqpair->state = NVME_TCP_QPAIR_STATE_INVALID; tqpair->state = NVME_TCP_QPAIR_STATE_INVALID;

View File

@ -64,7 +64,8 @@ DEFINE_STUB_V(nvme_io_msg_ctrlr_detach, (struct spdk_nvme_ctrlr *ctrlr));
DEFINE_STUB(spdk_nvme_transport_available, bool, DEFINE_STUB(spdk_nvme_transport_available, bool,
(enum spdk_nvme_transport_type trtype), true); (enum spdk_nvme_transport_type trtype), true);
DEFINE_STUB(nvme_uevent_connect, int, (void), 1); DEFINE_STUB(nvme_uevent_connect, int, (void), 1);
DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
static bool ut_destruct_called = false; static bool ut_destruct_called = false;
void void

View File

@ -51,6 +51,11 @@ DEFINE_STUB(spdk_sock_set_priority,
DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group, DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
struct spdk_nvme_qpair *qpair), 0); struct spdk_nvme_qpair *qpair), 0);
DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
static void static void
test_nvme_tcp_pdu_set_data_buf(void) test_nvme_tcp_pdu_set_data_buf(void)
{ {