nvme/tcp: Add synchronization for icreq send ack and icresp

A preparation step for enabling zero copy in NVMEoF TCP initiator.
We should wait for both events to occur before continue qpair
initialization.
Add a new bit to nvme_tcp_qpair::flags to track receiving of icreq ack
since icreq is sent without tcp_req and there is no way to apply
existing synchronization mechanisms.
Move tcp qpair to initializing state if we receive icresp before icreq ack,
this state will be checked during handling of icreq ack to continue
qpair initialization

Change-Id: I7f1ec710d49fb1322eb0a7f133190220b9f585ab
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4207
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Alexey Marchuk 2020-09-01 11:33:27 +03:00 committed by Tomasz Zawadzki
parent bc36528cda
commit 2ceff364e5

View File

@ -92,7 +92,8 @@ struct nvme_tcp_qpair {
struct {
uint16_t host_hdgst_enable: 1;
uint16_t host_ddgst_enable: 1;
uint16_t reserved: 14;
uint16_t icreq_send_ack: 1;
uint16_t reserved: 13;
} flags;
/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
@ -156,6 +157,7 @@ struct nvme_tcp_req {
static void nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req);
static int64_t nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group
*tgroup, uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
static void nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu);
static inline struct nvme_tcp_qpair *
nvme_tcp_qpair(struct spdk_nvme_qpair *qpair)
@ -981,8 +983,16 @@ nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
static void
nvme_tcp_send_icreq_complete(void *cb_arg)
{
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Complete the icreq send for tqpair=%p\n",
(struct nvme_tcp_qpair *)cb_arg);
struct nvme_tcp_qpair *tqpair = cb_arg;
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Complete the icreq send for tqpair=%p\n", tqpair);
tqpair->flags.icreq_send_ack = true;
if (tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING) {
SPDK_DEBUGLOG(SPDK_LOG_NVME, "qpair %u, finilize icresp\n", tqpair->qpair.id);
nvme_tcp_icresp_handle(tqpair, &tqpair->recv_pdu);
}
}
static void
@ -994,6 +1004,12 @@ nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair,
enum spdk_nvme_tcp_term_req_fes fes;
int recv_buf_size;
if (!tqpair->flags.icreq_send_ack) {
tqpair->state = NVME_TCP_QPAIR_STATE_INITIALIZING;
SPDK_DEBUGLOG(SPDK_LOG_NVME, "qpair %u, waiting icreq ack\n", tqpair->qpair.id);
return;
}
/* Only PFV 0 is defined currently */
if (ic_resp->pfv != 0) {
SPDK_ERRLOG("Expected ICResp PFV %u, got %u\n", 0u, ic_resp->pfv);
@ -1639,7 +1655,7 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
} else {
rc = nvme_tcp_qpair_process_completions(&tqpair->qpair, 0);
}
} while ((tqpair->state == NVME_TCP_QPAIR_STATE_INVALID) &&
} while ((tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) &&
(rc == 0) && (spdk_get_ticks() <= icreq_timeout_tsc));
if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {