tcp: Use nvmf_request dif structure

Change-Id: I215da84d9f27fbc2614ce70ae36ed024ce107a4d
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Signed-off-by: Sasha Kotchubievsky <sashakot@mellanox.com>
Signed-off-by: Evgenii Kochetov <evgeniik@mellanox.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/470467
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Alexey Marchuk 2019-10-02 07:43:17 +00:00 committed by Jim Harris
parent 5de4274594
commit fcd652f5e3

View File

@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -171,8 +171,6 @@ struct spdk_nvmf_tcp_req {
bool has_incapsule_data;
bool dif_insert_or_strip;
/* transfer_tag */
uint16_t ttag;
@ -190,10 +188,6 @@ struct spdk_nvmf_tcp_req {
uint32_t c2h_data_offset;
uint32_t c2h_data_pdu_num;
struct spdk_dif_ctx dif_ctx;
uint32_t elba_length;
uint32_t orig_length;
STAILQ_ENTRY(spdk_nvmf_tcp_req) link;
TAILQ_ENTRY(spdk_nvmf_tcp_req) state_link;
};
@ -371,7 +365,7 @@ spdk_nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair)
tcp_req->r2tl_remain = 0;
tcp_req->c2h_data_offset = 0;
tcp_req->has_incapsule_data = false;
tcp_req->dif_insert_or_strip = false;
memset(&tcp_req->req.dif, 0, sizeof(tcp_req->req.dif));
spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEW);
return tcp_req;
@ -1476,8 +1470,8 @@ spdk_nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
pdu->ctx = tcp_req;
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
pdu->dif_ctx = &tcp_req->dif_ctx;
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
pdu->dif_ctx = &tcp_req->req.dif.dif_ctx;
}
nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
@ -2204,10 +2198,10 @@ spdk_nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req,
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Data requested length= 0x%x\n", length);
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
tcp_req->orig_length = length;
length = spdk_dif_get_length_with_md(length, &tcp_req->dif_ctx);
tcp_req->elba_length = length;
if (spdk_unlikely(req->dif.dif_insert_or_strip)) {
req->dif.orig_length = length;
length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
req->dif.elba_length = length;
}
if (spdk_nvmf_request_get_buffers(req, group, transport, length)) {
@ -2251,12 +2245,12 @@ spdk_nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req,
req->data_from_pool = false;
req->length = length;
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
length = spdk_dif_get_length_with_md(length, &tcp_req->dif_ctx);
tcp_req->elba_length = length;
if (spdk_unlikely(req->dif.dif_insert_or_strip)) {
length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
req->dif.elba_length = length;
}
req->iov[0].iov_base = tcp_req->req.data;
req->iov[0].iov_base = req->data;
req->iov[0].iov_len = length;
req->iovcnt = 1;
@ -2343,14 +2337,14 @@ spdk_nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
c2h_data->common.plen = plen;
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
rsp_pdu->dif_ctx = &tcp_req->dif_ctx;
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
rsp_pdu->dif_ctx = &tcp_req->req.dif.dif_ctx;
}
nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
c2h_data->datao, c2h_data->datal);
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
struct spdk_nvme_cpl *rsp = &tcp_req->req.rsp->nvme_cpl;
struct spdk_dif_error err_blk = {};
@ -2521,9 +2515,9 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
/* copy the cmd from the receive pdu */
tcp_req->cmd = tqpair->pdu_in_progress.hdr->capsule_cmd.ccsqe;
if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->dif_ctx))) {
tcp_req->dif_insert_or_strip = true;
tqpair->pdu_in_progress.dif_ctx = &tcp_req->dif_ctx;
if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->req.dif.dif_ctx))) {
tcp_req->req.dif.dif_insert_or_strip = true;
tqpair->pdu_in_progress.dif_ctx = &tcp_req->req.dif.dif_ctx;
}
/* The next state transition depends on the data transfer needs of this request. */
@ -2597,9 +2591,9 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
case TCP_REQUEST_STATE_READY_TO_EXECUTE:
spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, (uintptr_t)tcp_req, 0);
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
assert(tcp_req->elba_length >= tcp_req->req.length);
tcp_req->req.length = tcp_req->elba_length;
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
assert(tcp_req->req.dif.elba_length >= tcp_req->req.length);
tcp_req->req.length = tcp_req->req.dif.elba_length;
}
spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTING);
@ -2613,8 +2607,8 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
case TCP_REQUEST_STATE_EXECUTED:
spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, 0, 0, (uintptr_t)tcp_req, 0);
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
tcp_req->req.length = tcp_req->orig_length;
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
tcp_req->req.length = tcp_req->req.dif.orig_length;
}
spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);