nvme/rdma: inline buffers for all host to ctrlr ops

Not inlining all host to controller operations breaks the target within
the context of fused commands. This issue was discovered when enabling
the compare-and-write fused command. Only the write command buffer was
being inlined which caused the write to jump the compare in the
transport specific state machine on the target side before our fused
command checks in the generic code.

Change-Id: I9e52ae6160e01ffd36d20429ffc8459491c729ef
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/482001
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Seth Howell 2020-01-17 10:00:02 -07:00 committed by Tomasz Zawadzki
parent 2a8281fdfd
commit 9436ab59ba

View File

@ -1529,7 +1529,8 @@ nvme_rdma_req_init(struct nvme_rdma_qpair *rqpair, struct nvme_request *req,
* targets use icdoff = 0. For targets with non-zero icdoff, we
* will currently just not use inline data for now.
*/
if (req->cmd.opc == SPDK_NVME_OPC_WRITE &&
if (spdk_nvme_opc_get_data_transfer(req->cmd.opc) ==
SPDK_NVME_DATA_HOST_TO_CONTROLLER &&
req->payload_size <= nvme_rdma_icdsz_bytes(ctrlr) &&
(ctrlr->cdata.nvmf_specific.icdoff == 0)) {
rc = nvme_rdma_build_contig_inline_request(rqpair, rdma_req);
@ -1537,7 +1538,8 @@ nvme_rdma_req_init(struct nvme_rdma_qpair *rqpair, struct nvme_request *req,
rc = nvme_rdma_build_contig_request(rqpair, rdma_req);
}
} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
if (req->cmd.opc == SPDK_NVME_OPC_WRITE &&
if (spdk_nvme_opc_get_data_transfer(req->cmd.opc) ==
SPDK_NVME_DATA_HOST_TO_CONTROLLER &&
req->payload_size <= nvme_rdma_icdsz_bytes(ctrlr) &&
ctrlr->cdata.nvmf_specific.icdoff == 0) {
rc = nvme_rdma_build_sgl_inline_request(rqpair, rdma_req);