nvme: enable tcp async qpair connect
This will be done in stages. This patch adds the nvme_tcp_ctrlr_connect_qpair_poll function and and makes the icreq step asynchronous. Later patches will expand it and make the nvme_fabric_qpair_connect part asynchronous as well. Signed-off-by: Jim Harris <james.r.harris@intel.com> Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Change-Id: Ief06f783049723131cc2469b15ad8300d51b6f32 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8599 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
parent
1763a126cc
commit
45d63e9882
@ -1795,7 +1795,6 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
|
|||||||
{
|
{
|
||||||
struct spdk_nvme_tcp_ic_req *ic_req;
|
struct spdk_nvme_tcp_ic_req *ic_req;
|
||||||
struct nvme_tcp_pdu *pdu;
|
struct nvme_tcp_pdu *pdu;
|
||||||
int rc;
|
|
||||||
|
|
||||||
pdu = tqpair->send_pdu;
|
pdu = tqpair->send_pdu;
|
||||||
memset(tqpair->send_pdu, 0, sizeof(*tqpair->send_pdu));
|
memset(tqpair->send_pdu, 0, sizeof(*tqpair->send_pdu));
|
||||||
@ -1813,24 +1812,24 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
|
|||||||
nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_send_icreq_complete, tqpair);
|
nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_send_icreq_complete, tqpair);
|
||||||
|
|
||||||
tqpair->icreq_timeout_tsc = spdk_get_ticks() + (NVME_TCP_TIME_OUT_IN_SECONDS * spdk_get_ticks_hz());
|
tqpair->icreq_timeout_tsc = spdk_get_ticks() + (NVME_TCP_TIME_OUT_IN_SECONDS * spdk_get_ticks_hz());
|
||||||
do {
|
return 0;
|
||||||
if (tqpair->qpair.poll_group) {
|
}
|
||||||
rc = (int)nvme_tcp_poll_group_process_completions(tqpair->qpair.poll_group, 0,
|
|
||||||
dummy_disconnected_qpair_cb);
|
|
||||||
} else {
|
|
||||||
rc = nvme_tcp_qpair_process_completions(&tqpair->qpair, 0);
|
|
||||||
}
|
|
||||||
} while ((tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) &&
|
|
||||||
(rc >= 0) && (spdk_get_ticks() <= tqpair->icreq_timeout_tsc));
|
|
||||||
|
|
||||||
if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
|
static int
|
||||||
SPDK_ERRLOG("Failed to construct the tqpair=%p via correct icresp\n", tqpair);
|
nvme_tcp_qpair_icreq_poll(struct nvme_tcp_qpair *tqpair)
|
||||||
return -1;
|
{
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if (spdk_get_ticks() > tqpair->icreq_timeout_tsc) {
|
||||||
|
rc = -ETIMEDOUT;
|
||||||
|
} else if (tqpair->qpair.poll_group) {
|
||||||
|
rc = nvme_tcp_poll_group_process_completions(tqpair->qpair.poll_group, 0,
|
||||||
|
dummy_disconnected_qpair_cb);
|
||||||
|
} else {
|
||||||
|
rc = nvme_tcp_qpair_process_completions(&tqpair->qpair, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
SPDK_DEBUGLOG(nvme, "Succesfully construct the tqpair=%p via correct icresp\n", tqpair);
|
return rc == 0 ? -EAGAIN : rc;
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -1901,6 +1900,39 @@ nvme_tcp_qpair_connect_sock(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpai
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
||||||
|
{
|
||||||
|
struct nvme_tcp_qpair *tqpair;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
tqpair = nvme_tcp_qpair(qpair);
|
||||||
|
|
||||||
|
switch (tqpair->state) {
|
||||||
|
case NVME_TCP_QPAIR_STATE_INVALID:
|
||||||
|
case NVME_TCP_QPAIR_STATE_INITIALIZING:
|
||||||
|
rc = nvme_tcp_qpair_icreq_poll(tqpair);
|
||||||
|
if (rc != 0 && rc != -EAGAIN) {
|
||||||
|
SPDK_ERRLOG("Failed to construct the tqpair=%p via correct icresp\n", tqpair);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case NVME_TCP_QPAIR_STATE_RUNNING:
|
||||||
|
rc = nvme_fabric_qpair_connect(&tqpair->qpair, tqpair->num_entries);
|
||||||
|
if (rc < 0) {
|
||||||
|
SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assert(false);
|
||||||
|
rc = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
||||||
{
|
{
|
||||||
@ -1936,15 +1968,15 @@ nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpa
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = nvme_fabric_qpair_connect(&tqpair->qpair, tqpair->num_entries);
|
do {
|
||||||
if (rc < 0) {
|
rc = nvme_tcp_ctrlr_connect_qpair_poll(ctrlr, qpair);
|
||||||
SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
|
} while (rc == -EAGAIN);
|
||||||
return rc;
|
|
||||||
|
if (rc != 0) {
|
||||||
|
nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
|
return rc;
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct spdk_nvme_qpair *
|
static struct spdk_nvme_qpair *
|
||||||
|
@ -1411,8 +1411,10 @@ test_nvme_tcp_ctrlr_connect_qpair(void)
|
|||||||
tqpair.qpair.ctrlr->opts.data_digest = true;
|
tqpair.qpair.ctrlr->opts.data_digest = true;
|
||||||
TAILQ_INIT(&tqpair.send_queue);
|
TAILQ_INIT(&tqpair.send_queue);
|
||||||
|
|
||||||
|
|
||||||
rc = nvme_tcp_ctrlr_connect_qpair(&ctrlr, qpair);
|
rc = nvme_tcp_ctrlr_connect_qpair(&ctrlr, qpair);
|
||||||
|
while (rc == -EAGAIN) {
|
||||||
|
rc = nvme_tcp_ctrlr_connect_qpair_poll(&ctrlr, qpair);
|
||||||
|
}
|
||||||
|
|
||||||
CU_ASSERT(rc == 0);
|
CU_ASSERT(rc == 0);
|
||||||
CU_ASSERT(tqpair.maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
|
CU_ASSERT(tqpair.maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
|
||||||
|
Loading…
Reference in New Issue
Block a user