rdma: Use nvmf_request dif structure

Change-Id: I1f8eb4300f892905f18ccb6327a5abc30dc44de3
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Signed-off-by: Sasha Kotchubievsky <sashakot@mellanox.com>
Signed-off-by: Evgenii Kochetov <evgeniik@mellanox.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/470468
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Alexey Marchuk 2019-10-02 08:10:22 +00:00 committed by Jim Harris
parent fcd652f5e3
commit e1101529e5
2 changed files with 20 additions and 30 deletions

View File

@ -269,11 +269,6 @@ struct spdk_nvmf_rdma_request {
uint32_t num_outstanding_data_wr;
uint64_t receive_tsc;
struct spdk_dif_ctx dif_ctx;
bool dif_insert_or_strip;
uint32_t elba_length;
uint32_t orig_length;
STAILQ_ENTRY(spdk_nvmf_rdma_request) state_link;
};
@ -1627,8 +1622,8 @@ nvmf_rdma_fill_wr_sgl(struct spdk_nvmf_rdma_poll_group *rgroup,
uint32_t remaining_data_block = 0;
uint32_t offset = 0;
if (spdk_unlikely(rdma_req->dif_insert_or_strip)) {
dif_ctx = &rdma_req->dif_ctx;
if (spdk_unlikely(rdma_req->req.dif.dif_insert_or_strip)) {
dif_ctx = &rdma_req->req.dif.dif_ctx;
remaining_data_block = dif_ctx->block_size - dif_ctx->md_size;
}
@ -1830,10 +1825,10 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
/* fill request length and populate iovs */
req->length = length;
if (spdk_unlikely(rdma_req->dif_insert_or_strip)) {
rdma_req->orig_length = length;
length = spdk_dif_get_length_with_md(length, &rdma_req->dif_ctx);
rdma_req->elba_length = length;
if (spdk_unlikely(req->dif.dif_insert_or_strip)) {
req->dif.orig_length = length;
length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
req->dif.elba_length = length;
}
if (spdk_nvmf_rdma_request_fill_iovs(rtransport, device, rdma_req, length) < 0) {
@ -1945,10 +1940,7 @@ nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
rdma_req->req.data = NULL;
rdma_req->rsp.wr.next = NULL;
rdma_req->data.wr.next = NULL;
rdma_req->dif_insert_or_strip = false;
rdma_req->elba_length = 0;
rdma_req->orig_length = 0;
memset(&rdma_req->dif_ctx, 0, sizeof(rdma_req->dif_ctx));
memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
rqpair->qd--;
STAILQ_INSERT_HEAD(&rqpair->resources->free_queue, rdma_req, state_link);
@ -2014,8 +2006,8 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
break;
}
if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&rdma_req->req, &rdma_req->dif_ctx))) {
rdma_req->dif_insert_or_strip = true;
if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&rdma_req->req, &rdma_req->req.dif.dif_ctx))) {
rdma_req->req.dif.dif_insert_or_strip = true;
}
/* The next state transition depends on the data transfer needs of this request. */
@ -2106,14 +2098,14 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE, 0, 0,
(uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
if (spdk_unlikely(rdma_req->dif_insert_or_strip)) {
if (spdk_unlikely(rdma_req->req.dif.dif_insert_or_strip)) {
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
/* generate DIF for write operation */
num_blocks = SPDK_CEIL_DIV(rdma_req->elba_length, rdma_req->dif_ctx.block_size);
num_blocks = SPDK_CEIL_DIV(rdma_req->req.dif.elba_length, rdma_req->req.dif.dif_ctx.block_size);
assert(num_blocks > 0);
rc = spdk_dif_generate(rdma_req->req.iov, rdma_req->req.iovcnt,
num_blocks, &rdma_req->dif_ctx);
num_blocks, &rdma_req->req.dif.dif_ctx);
if (rc != 0) {
SPDK_ERRLOG("DIF generation failed\n");
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
@ -2122,9 +2114,9 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
}
}
assert(rdma_req->elba_length >= rdma_req->req.length);
assert(rdma_req->req.dif.elba_length >= rdma_req->req.length);
/* set extended length before IO operation */
rdma_req->req.length = rdma_req->elba_length;
rdma_req->req.length = rdma_req->req.dif.elba_length;
}
rdma_req->state = RDMA_REQUEST_STATE_EXECUTING;
@ -2145,17 +2137,17 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
} else {
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
}
if (spdk_unlikely(rdma_req->dif_insert_or_strip)) {
if (spdk_unlikely(rdma_req->req.dif.dif_insert_or_strip)) {
/* restore the original length */
rdma_req->req.length = rdma_req->orig_length;
rdma_req->req.length = rdma_req->req.dif.orig_length;
if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
struct spdk_dif_error error_blk;
num_blocks = SPDK_CEIL_DIV(rdma_req->elba_length, rdma_req->dif_ctx.block_size);
num_blocks = SPDK_CEIL_DIV(rdma_req->req.dif.elba_length, rdma_req->req.dif.dif_ctx.block_size);
rc = spdk_dif_verify(rdma_req->req.iov, rdma_req->req.iovcnt, num_blocks, &rdma_req->dif_ctx,
&error_blk);
rc = spdk_dif_verify(rdma_req->req.iov, rdma_req->req.iovcnt, num_blocks,
&rdma_req->req.dif.dif_ctx, &error_blk);
if (rc) {
struct spdk_nvme_cpl *rsp = &rdma_req->req.rsp->nvme_cpl;

View File

@ -106,9 +106,7 @@ static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
rdma_req->data.wr.num_sge = 0;
rdma_req->data.wr.wr.rdma.remote_addr = 0;
rdma_req->data.wr.wr.rdma.rkey = 0;
rdma_req->elba_length = 0;
rdma_req->orig_length = 0;
rdma_req->dif_insert_or_strip = false;
memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
rdma_req->req.iov[i].iov_base = 0;