nvmf/tcp: Wait for PDUs to release when closing a qpair
In the presence of hardware offload (for data digest) we may not be able to immediately release all PDUs to free a connection. Add a state to wait for them to finish. Fixes #2862 Change-Id: I5ecbdad394c0296af6f5c2310d7867dd9de154cb Signed-off-by: Ben Walker <benjamin.walker@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16637 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
ceb3747451
commit
43e68a8b1f
@ -120,6 +120,9 @@ enum nvme_tcp_pdu_recv_state {
|
|||||||
/* Active tqpair waiting for payload */
|
/* Active tqpair waiting for payload */
|
||||||
NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD,
|
NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD,
|
||||||
|
|
||||||
|
/* Active tqpair waiting for all outstanding PDUs to complete */
|
||||||
|
NVME_TCP_PDU_RECV_STATE_QUIESCING,
|
||||||
|
|
||||||
/* Active tqpair does not wait for payload */
|
/* Active tqpair does not wait for payload */
|
||||||
NVME_TCP_PDU_RECV_STATE_ERROR,
|
NVME_TCP_PDU_RECV_STATE_ERROR,
|
||||||
};
|
};
|
||||||
|
@ -814,6 +814,11 @@ nvme_tcp_qpair_set_recv_state(struct nvme_tcp_qpair *tqpair,
|
|||||||
tqpair, state);
|
tqpair, state);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (state == NVME_TCP_PDU_RECV_STATE_ERROR) {
|
||||||
|
assert(TAILQ_EMPTY(&tqpair->outstanding_reqs));
|
||||||
|
}
|
||||||
|
|
||||||
tqpair->recv_state = state;
|
tqpair->recv_state = state;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -856,7 +861,7 @@ nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_
|
|||||||
|
|
||||||
/* Contain the header len of the wrong received pdu */
|
/* Contain the header len of the wrong received pdu */
|
||||||
h2c_term_req->common.plen = h2c_term_req->common.hlen + copy_len;
|
h2c_term_req->common.plen = h2c_term_req->common.hlen + copy_len;
|
||||||
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_send_h2c_term_req_complete, tqpair);
|
nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_send_h2c_term_req_complete, tqpair);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1037,7 +1042,7 @@ nvme_tcp_c2h_term_req_payload_handle(struct nvme_tcp_qpair *tqpair,
|
|||||||
struct nvme_tcp_pdu *pdu)
|
struct nvme_tcp_pdu *pdu)
|
||||||
{
|
{
|
||||||
nvme_tcp_c2h_term_req_dump(&pdu->hdr.term_req);
|
nvme_tcp_c2h_term_req_dump(&pdu->hdr.term_req);
|
||||||
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1630,7 +1635,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_
|
|||||||
sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
|
sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
|
||||||
(uint8_t *)&pdu->hdr.common + pdu->ch_valid_bytes);
|
(uint8_t *)&pdu->hdr.common + pdu->ch_valid_bytes);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
pdu->ch_valid_bytes += rc;
|
pdu->ch_valid_bytes += rc;
|
||||||
@ -1648,7 +1653,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_
|
|||||||
pdu->psh_len - pdu->psh_valid_bytes,
|
pdu->psh_len - pdu->psh_valid_bytes,
|
||||||
(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
|
(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1676,7 +1681,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_
|
|||||||
|
|
||||||
rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
|
rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1689,6 +1694,11 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_
|
|||||||
/* All of this PDU has now been read from the socket. */
|
/* All of this PDU has now been read from the socket. */
|
||||||
nvme_tcp_pdu_payload_handle(tqpair, reaped);
|
nvme_tcp_pdu_payload_handle(tqpair, reaped);
|
||||||
break;
|
break;
|
||||||
|
case NVME_TCP_PDU_RECV_STATE_QUIESCING:
|
||||||
|
if (TAILQ_EMPTY(&tqpair->outstanding_reqs)) {
|
||||||
|
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
||||||
|
}
|
||||||
|
break;
|
||||||
case NVME_TCP_PDU_RECV_STATE_ERROR:
|
case NVME_TCP_PDU_RECV_STATE_ERROR:
|
||||||
memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
|
memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
|
||||||
return NVME_TCP_PDU_FATAL;
|
return NVME_TCP_PDU_FATAL;
|
||||||
|
@ -263,6 +263,8 @@ struct spdk_nvmf_tcp_qpair {
|
|||||||
TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_working_queue;
|
TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_working_queue;
|
||||||
TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_free_queue;
|
TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_free_queue;
|
||||||
SLIST_HEAD(, nvme_tcp_pdu) tcp_pdu_free_queue;
|
SLIST_HEAD(, nvme_tcp_pdu) tcp_pdu_free_queue;
|
||||||
|
/* Number of working pdus */
|
||||||
|
uint32_t tcp_pdu_working_count;
|
||||||
|
|
||||||
/* Number of requests in each state */
|
/* Number of requests in each state */
|
||||||
uint32_t state_cntr[TCP_REQUEST_NUM_STATES];
|
uint32_t state_cntr[TCP_REQUEST_NUM_STATES];
|
||||||
@ -897,7 +899,7 @@ nvmf_tcp_qpair_disconnect(struct spdk_nvmf_tcp_qpair *tqpair)
|
|||||||
|
|
||||||
if (tqpair->state <= NVME_TCP_QPAIR_STATE_RUNNING) {
|
if (tqpair->state <= NVME_TCP_QPAIR_STATE_RUNNING) {
|
||||||
nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITING);
|
nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITING);
|
||||||
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
||||||
spdk_poller_unregister(&tqpair->timeout_poller);
|
spdk_poller_unregister(&tqpair->timeout_poller);
|
||||||
|
|
||||||
/* This will end up calling nvmf_tcp_close_qpair */
|
/* This will end up calling nvmf_tcp_close_qpair */
|
||||||
@ -912,7 +914,7 @@ _mgmt_pdu_write_done(void *_tqpair, int err)
|
|||||||
struct nvme_tcp_pdu *pdu = tqpair->mgmt_pdu;
|
struct nvme_tcp_pdu *pdu = tqpair->mgmt_pdu;
|
||||||
|
|
||||||
if (spdk_unlikely(err != 0)) {
|
if (spdk_unlikely(err != 0)) {
|
||||||
nvmf_tcp_qpair_disconnect(tqpair);
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -937,7 +939,7 @@ _req_pdu_write_done(void *req, int err)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (spdk_unlikely(err != 0)) {
|
if (spdk_unlikely(err != 0)) {
|
||||||
nvmf_tcp_qpair_disconnect(tqpair);
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1154,6 +1156,7 @@ nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)
|
|||||||
tqpair->mgmt_pdu->qpair = tqpair;
|
tqpair->mgmt_pdu->qpair = tqpair;
|
||||||
tqpair->pdu_in_progress = SLIST_FIRST(&tqpair->tcp_pdu_free_queue);
|
tqpair->pdu_in_progress = SLIST_FIRST(&tqpair->tcp_pdu_free_queue);
|
||||||
SLIST_REMOVE_HEAD(&tqpair->tcp_pdu_free_queue, slist);
|
SLIST_REMOVE_HEAD(&tqpair->tcp_pdu_free_queue, slist);
|
||||||
|
tqpair->tcp_pdu_working_count = 1;
|
||||||
|
|
||||||
tqpair->recv_buf_size = (in_capsule_data_size + sizeof(struct spdk_nvme_tcp_cmd) + 2 *
|
tqpair->recv_buf_size = (in_capsule_data_size + sizeof(struct spdk_nvme_tcp_cmd) + 2 *
|
||||||
SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
|
SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
|
||||||
@ -1457,6 +1460,17 @@ nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (spdk_unlikely(state == NVME_TCP_PDU_RECV_STATE_QUIESCING)) {
|
||||||
|
if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH && tqpair->pdu_in_progress) {
|
||||||
|
SLIST_INSERT_HEAD(&tqpair->tcp_pdu_free_queue, tqpair->pdu_in_progress, slist);
|
||||||
|
tqpair->tcp_pdu_working_count--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (spdk_unlikely(state == NVME_TCP_PDU_RECV_STATE_ERROR)) {
|
||||||
|
assert(tqpair->tcp_pdu_working_count == 0);
|
||||||
|
}
|
||||||
|
|
||||||
if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) {
|
if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) {
|
||||||
/* When leaving the await req state, move the qpair to the main list */
|
/* When leaving the await req state, move the qpair to the main list */
|
||||||
TAILQ_REMOVE(&tqpair->group->await_req, tqpair, link);
|
TAILQ_REMOVE(&tqpair->group->await_req, tqpair, link);
|
||||||
@ -1527,7 +1541,7 @@ nvmf_tcp_send_c2h_term_req(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_p
|
|||||||
|
|
||||||
/* Contain the header of the wrong received pdu */
|
/* Contain the header of the wrong received pdu */
|
||||||
c2h_term_req->common.plen = c2h_term_req->common.hlen + copy_len;
|
c2h_term_req->common.plen = c2h_term_req->common.hlen + copy_len;
|
||||||
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
nvmf_tcp_qpair_write_mgmt_pdu(tqpair, nvmf_tcp_send_c2h_term_req_complete, tqpair);
|
nvmf_tcp_qpair_write_mgmt_pdu(tqpair, nvmf_tcp_send_c2h_term_req_complete, tqpair);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1556,7 +1570,7 @@ nvmf_tcp_capsule_cmd_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
|
|||||||
|
|
||||||
/* The host sent more commands than the maximum queue depth. */
|
/* The host sent more commands than the maximum queue depth. */
|
||||||
SPDK_ERRLOG("Cannot allocate tcp_req on tqpair=%p\n", tqpair);
|
SPDK_ERRLOG("Cannot allocate tcp_req on tqpair=%p\n", tqpair);
|
||||||
nvmf_tcp_qpair_disconnect(tqpair);
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1848,7 +1862,7 @@ nvmf_tcp_h2c_term_req_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair,
|
|||||||
struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req;
|
struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req;
|
||||||
|
|
||||||
nvmf_tcp_h2c_term_req_dump(h2c_term_req);
|
nvmf_tcp_h2c_term_req_dump(h2c_term_req);
|
||||||
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1875,6 +1889,7 @@ _nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
SLIST_INSERT_HEAD(&tqpair->tcp_pdu_free_queue, pdu, slist);
|
SLIST_INSERT_HEAD(&tqpair->tcp_pdu_free_queue, pdu, slist);
|
||||||
|
tqpair->tcp_pdu_working_count--;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -2182,6 +2197,7 @@ nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
|
|||||||
}
|
}
|
||||||
SLIST_REMOVE_HEAD(&tqpair->tcp_pdu_free_queue, slist);
|
SLIST_REMOVE_HEAD(&tqpair->tcp_pdu_free_queue, slist);
|
||||||
tqpair->pdu_in_progress = pdu;
|
tqpair->pdu_in_progress = pdu;
|
||||||
|
tqpair->tcp_pdu_working_count++;
|
||||||
}
|
}
|
||||||
memset(pdu, 0, offsetof(struct nvme_tcp_pdu, qpair));
|
memset(pdu, 0, offsetof(struct nvme_tcp_pdu, qpair));
|
||||||
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
|
||||||
@ -2196,7 +2212,8 @@ nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
|
|||||||
(void *)&pdu->hdr.common + pdu->ch_valid_bytes);
|
(void *)&pdu->hdr.common + pdu->ch_valid_bytes);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
SPDK_DEBUGLOG(nvmf_tcp, "will disconnect tqpair=%p\n", tqpair);
|
SPDK_DEBUGLOG(nvmf_tcp, "will disconnect tqpair=%p\n", tqpair);
|
||||||
return NVME_TCP_PDU_FATAL;
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
|
break;
|
||||||
} else if (rc > 0) {
|
} else if (rc > 0) {
|
||||||
pdu->ch_valid_bytes += rc;
|
pdu->ch_valid_bytes += rc;
|
||||||
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, tqpair->qpair.qid, rc, 0, tqpair);
|
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, tqpair->qpair.qid, rc, 0, tqpair);
|
||||||
@ -2215,7 +2232,8 @@ nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
|
|||||||
pdu->psh_len - pdu->psh_valid_bytes,
|
pdu->psh_len - pdu->psh_valid_bytes,
|
||||||
(void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
|
(void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
return NVME_TCP_PDU_FATAL;
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
|
break;
|
||||||
} else if (rc > 0) {
|
} else if (rc > 0) {
|
||||||
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, tqpair->qpair.qid, rc, 0, tqpair);
|
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, tqpair->qpair.qid, rc, 0, tqpair);
|
||||||
pdu->psh_valid_bytes += rc;
|
pdu->psh_valid_bytes += rc;
|
||||||
@ -2248,7 +2266,8 @@ nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
|
|||||||
|
|
||||||
rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
|
rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
return NVME_TCP_PDU_FATAL;
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
pdu->rw_offset += rc;
|
pdu->rw_offset += rc;
|
||||||
|
|
||||||
@ -2261,17 +2280,24 @@ nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
|
|||||||
spdk_dif_generate_stream(pdu->data_iov, pdu->data_iovcnt, 0, data_len,
|
spdk_dif_generate_stream(pdu->data_iov, pdu->data_iovcnt, 0, data_len,
|
||||||
pdu->dif_ctx) != 0) {
|
pdu->dif_ctx) != 0) {
|
||||||
SPDK_ERRLOG("DIF generate failed\n");
|
SPDK_ERRLOG("DIF generate failed\n");
|
||||||
return NVME_TCP_PDU_FATAL;
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* All of this PDU has now been read from the socket. */
|
/* All of this PDU has now been read from the socket. */
|
||||||
nvmf_tcp_pdu_payload_handle(tqpair, pdu);
|
nvmf_tcp_pdu_payload_handle(tqpair, pdu);
|
||||||
break;
|
break;
|
||||||
|
case NVME_TCP_PDU_RECV_STATE_QUIESCING:
|
||||||
|
if (tqpair->tcp_pdu_working_count != 0) {
|
||||||
|
return NVME_TCP_PDU_IN_PROGRESS;
|
||||||
|
}
|
||||||
|
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
||||||
|
break;
|
||||||
case NVME_TCP_PDU_RECV_STATE_ERROR:
|
case NVME_TCP_PDU_RECV_STATE_ERROR:
|
||||||
if (!spdk_sock_is_connected(tqpair->sock)) {
|
if (!spdk_sock_is_connected(tqpair->sock)) {
|
||||||
return NVME_TCP_PDU_FATAL;
|
return NVME_TCP_PDU_FATAL;
|
||||||
}
|
}
|
||||||
break;
|
return NVME_TCP_PDU_IN_PROGRESS;
|
||||||
default:
|
default:
|
||||||
SPDK_ERRLOG("The state(%d) is invalid\n", tqpair->recv_state);
|
SPDK_ERRLOG("The state(%d) is invalid\n", tqpair->recv_state);
|
||||||
abort();
|
abort();
|
||||||
@ -3215,7 +3241,12 @@ nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
|
|||||||
}
|
}
|
||||||
|
|
||||||
TAILQ_FOREACH_SAFE(tqpair, &tgroup->await_req, link, tqpair_tmp) {
|
TAILQ_FOREACH_SAFE(tqpair, &tgroup->await_req, link, tqpair_tmp) {
|
||||||
nvmf_tcp_sock_process(tqpair);
|
rc = nvmf_tcp_sock_process(tqpair);
|
||||||
|
|
||||||
|
/* If there was a new socket error, disconnect */
|
||||||
|
if (rc < 0) {
|
||||||
|
nvmf_tcp_qpair_disconnect(tqpair);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -874,7 +874,7 @@ test_nvme_tcp_qpair_send_h2c_term_req(void)
|
|||||||
/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
|
/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
|
||||||
pdu.hdr.common.hlen = 64;
|
pdu.hdr.common.hlen = 64;
|
||||||
nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
|
nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
||||||
pdu.hdr.common.hlen);
|
pdu.hdr.common.hlen);
|
||||||
@ -883,7 +883,7 @@ test_nvme_tcp_qpair_send_h2c_term_req(void)
|
|||||||
/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
|
/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
|
||||||
pdu.hdr.common.hlen = 255;
|
pdu.hdr.common.hlen = 255;
|
||||||
nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
|
nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == (unsigned)
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == (unsigned)
|
||||||
tqpair.send_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
|
tqpair.send_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
|
||||||
@ -905,7 +905,7 @@ test_nvme_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
|
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
|
||||||
tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
|
tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
|
||||||
nvme_tcp_pdu_ch_handle(&tqpair);
|
nvme_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
|
||||||
@ -916,7 +916,7 @@ test_nvme_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
|
tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
|
||||||
tqpair.recv_pdu->hdr.common.hlen = 0;
|
tqpair.recv_pdu->hdr.common.hlen = 0;
|
||||||
nvme_tcp_pdu_ch_handle(&tqpair);
|
nvme_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
|
||||||
@ -928,7 +928,7 @@ test_nvme_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
|
tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
|
||||||
tqpair.recv_pdu->hdr.common.hlen = 0;
|
tqpair.recv_pdu->hdr.common.hlen = 0;
|
||||||
nvme_tcp_pdu_ch_handle(&tqpair);
|
nvme_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
|
||||||
@ -939,7 +939,7 @@ test_nvme_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.recv_pdu->hdr.common.plen = 0;
|
tqpair.recv_pdu->hdr.common.plen = 0;
|
||||||
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
|
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
|
||||||
nvme_tcp_pdu_ch_handle(&tqpair);
|
nvme_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
||||||
@ -951,7 +951,7 @@ test_nvme_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.recv_pdu->hdr.common.plen = 0;
|
tqpair.recv_pdu->hdr.common.plen = 0;
|
||||||
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
|
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
|
||||||
nvme_tcp_pdu_ch_handle(&tqpair);
|
nvme_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
||||||
@ -964,7 +964,7 @@ test_nvme_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.recv_pdu->hdr.common.plen = 0;
|
tqpair.recv_pdu->hdr.common.plen = 0;
|
||||||
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_rsp);
|
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_rsp);
|
||||||
nvme_tcp_pdu_ch_handle(&tqpair);
|
nvme_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
||||||
@ -977,7 +977,7 @@ test_nvme_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.recv_pdu->hdr.common.pdo = 64;
|
tqpair.recv_pdu->hdr.common.pdo = 64;
|
||||||
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
|
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
|
||||||
nvme_tcp_pdu_ch_handle(&tqpair);
|
nvme_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
||||||
@ -989,7 +989,7 @@ test_nvme_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.recv_pdu->hdr.common.plen = 0;
|
tqpair.recv_pdu->hdr.common.plen = 0;
|
||||||
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
|
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
|
||||||
nvme_tcp_pdu_ch_handle(&tqpair);
|
nvme_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
||||||
@ -1002,7 +1002,7 @@ test_nvme_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.recv_pdu->hdr.common.plen = 0;
|
tqpair.recv_pdu->hdr.common.plen = 0;
|
||||||
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
|
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
|
||||||
nvme_tcp_pdu_ch_handle(&tqpair);
|
nvme_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
|
||||||
@ -1198,7 +1198,7 @@ test_nvme_tcp_c2h_payload_handle(void)
|
|||||||
pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
|
pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
|
||||||
nvme_tcp_c2h_term_req_payload_handle(&tqpair, &pdu);
|
nvme_tcp_c2h_term_req_payload_handle(&tqpair, &pdu);
|
||||||
|
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
|
CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1221,7 +1221,7 @@ test_nvme_tcp_icresp_handle(void)
|
|||||||
|
|
||||||
nvme_tcp_icresp_handle(&tqpair, &pdu);
|
nvme_tcp_icresp_handle(&tqpair, &pdu);
|
||||||
|
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
|
|
||||||
/* case 2: Expected ICResp maxh2cdata and got are different. */
|
/* case 2: Expected ICResp maxh2cdata and got are different. */
|
||||||
pdu.hdr.ic_resp.pfv = 0;
|
pdu.hdr.ic_resp.pfv = 0;
|
||||||
@ -1229,7 +1229,7 @@ test_nvme_tcp_icresp_handle(void)
|
|||||||
|
|
||||||
nvme_tcp_icresp_handle(&tqpair, &pdu);
|
nvme_tcp_icresp_handle(&tqpair, &pdu);
|
||||||
|
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
|
|
||||||
/* case 3: Expected ICResp cpda and got are different. */
|
/* case 3: Expected ICResp cpda and got are different. */
|
||||||
pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
|
pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
|
||||||
@ -1237,7 +1237,7 @@ test_nvme_tcp_icresp_handle(void)
|
|||||||
|
|
||||||
nvme_tcp_icresp_handle(&tqpair, &pdu);
|
nvme_tcp_icresp_handle(&tqpair, &pdu);
|
||||||
|
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
|
|
||||||
/* case 4: waiting icreq ack. */
|
/* case 4: waiting icreq ack. */
|
||||||
pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
|
pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
|
||||||
@ -1322,7 +1322,7 @@ test_nvme_tcp_pdu_payload_handle(void)
|
|||||||
|
|
||||||
recv_pdu.req = &tcp_req;
|
recv_pdu.req = &tcp_req;
|
||||||
nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
|
nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1372,7 +1372,7 @@ test_nvme_tcp_capsule_resp_hdr_handle(void)
|
|||||||
|
|
||||||
nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
|
nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
|
||||||
CU_ASSERT(reaped == 0);
|
CU_ASSERT(reaped == 0);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
nvme_tcp_free_reqs(&tqpair);
|
nvme_tcp_free_reqs(&tqpair);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -832,11 +832,12 @@ test_nvmf_tcp_send_c2h_term_req(void)
|
|||||||
mgmt_pdu.qpair = &tqpair;
|
mgmt_pdu.qpair = &tqpair;
|
||||||
tqpair.mgmt_pdu = &mgmt_pdu;
|
tqpair.mgmt_pdu = &mgmt_pdu;
|
||||||
tqpair.pdu_in_progress = &pdu_in_progress;
|
tqpair.pdu_in_progress = &pdu_in_progress;
|
||||||
|
tqpair.tcp_pdu_working_count = 1;
|
||||||
|
|
||||||
/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
|
/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
|
||||||
pdu.hdr.common.hlen = 64;
|
pdu.hdr.common.hlen = 64;
|
||||||
nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset);
|
nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
||||||
pdu.hdr.common.hlen);
|
pdu.hdr.common.hlen);
|
||||||
@ -846,7 +847,7 @@ test_nvmf_tcp_send_c2h_term_req(void)
|
|||||||
/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
|
/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
|
||||||
pdu.hdr.common.hlen = 255;
|
pdu.hdr.common.hlen = 255;
|
||||||
nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset);
|
nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned)
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned)
|
||||||
tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
|
tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
|
||||||
@ -912,20 +913,21 @@ test_nvmf_tcp_icreq_handle(void)
|
|||||||
mgmt_pdu.qpair = &tqpair;
|
mgmt_pdu.qpair = &tqpair;
|
||||||
tqpair.mgmt_pdu = &mgmt_pdu;
|
tqpair.mgmt_pdu = &mgmt_pdu;
|
||||||
tqpair.pdu_in_progress = &pdu_in_progress;
|
tqpair.pdu_in_progress = &pdu_in_progress;
|
||||||
|
tqpair.tcp_pdu_working_count = 1;
|
||||||
|
|
||||||
/* case 1: Expected ICReq PFV 0 and got are different. */
|
/* case 1: Expected ICReq PFV 0 and got are different. */
|
||||||
pdu.hdr.ic_req.pfv = 1;
|
pdu.hdr.ic_req.pfv = 1;
|
||||||
|
|
||||||
nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu);
|
nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu);
|
||||||
|
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
|
|
||||||
/* case 2: Expected ICReq HPDA in range 0-31 and got are different. */
|
/* case 2: Expected ICReq HPDA in range 0-31 and got are different. */
|
||||||
pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1;
|
pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1;
|
||||||
|
|
||||||
nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu);
|
nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu);
|
||||||
|
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
|
|
||||||
/* case 3: Expect: PASS. */
|
/* case 3: Expect: PASS. */
|
||||||
ttransport.transport.opts.max_io_size = 32;
|
ttransport.transport.opts.max_io_size = 32;
|
||||||
@ -1097,7 +1099,7 @@ test_nvmf_tcp_invalid_sgl(void)
|
|||||||
nvmf_tcp_req_process(&ttransport, &tcp_req);
|
nvmf_tcp_req_process(&ttransport, &tcp_req);
|
||||||
CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue));
|
CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue));
|
||||||
CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER);
|
CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1117,7 +1119,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
|
tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
|
||||||
tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
|
tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen);
|
||||||
@ -1129,7 +1131,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
|
tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
|
||||||
tqpair.pdu_in_progress->hdr.common.hlen = 0;
|
tqpair.pdu_in_progress->hdr.common.hlen = 0;
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen);
|
||||||
@ -1142,7 +1144,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
|
tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
|
||||||
tqpair.pdu_in_progress->hdr.common.hlen = 0;
|
tqpair.pdu_in_progress->hdr.common.hlen = 0;
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen);
|
||||||
@ -1154,7 +1156,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.plen = 0;
|
tqpair.pdu_in_progress->hdr.common.plen = 0;
|
||||||
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
|
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
||||||
@ -1167,7 +1169,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.plen = 0;
|
tqpair.pdu_in_progress->hdr.common.plen = 0;
|
||||||
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
|
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
||||||
@ -1182,7 +1184,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.plen = 0;
|
tqpair.pdu_in_progress->hdr.common.plen = 0;
|
||||||
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
|
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof(
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof(
|
||||||
@ -1197,7 +1199,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.pdo = 64;
|
tqpair.pdu_in_progress->hdr.common.pdo = 64;
|
||||||
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
|
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
||||||
@ -1211,7 +1213,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.plen = 0;
|
tqpair.pdu_in_progress->hdr.common.plen = 0;
|
||||||
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
|
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
||||||
@ -1228,7 +1230,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.pdo = 63;
|
tqpair.pdu_in_progress->hdr.common.pdo = 63;
|
||||||
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
|
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof(
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof(
|
||||||
@ -1244,7 +1246,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
tqpair.pdu_in_progress->hdr.common.pdo = 63;
|
tqpair.pdu_in_progress->hdr.common.pdo = 63;
|
||||||
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
|
tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
|
||||||
nvmf_tcp_pdu_ch_handle(&tqpair);
|
nvmf_tcp_pdu_ch_handle(&tqpair);
|
||||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
|
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
|
||||||
|
Loading…
Reference in New Issue
Block a user