nvme_rdma: don't send split sgl requests inline.
In order to truly support multi-sgl inline requests in the RDMA
transport, we would need to increase the size of the
spdk_nvme_rdma_req object dramatically. This is because we would need
enough ibv_sge objects in it to support up to the maximum number of SGEs
supported by the target (for SPDK that is up to 16). Instead of doing
that or creating a new pool of shared ibv_sge objects to support that
case, just send split multi-sgl requests through the regular sgl path.
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/452266 (master)
(cherry picked from commit eb6006c242
)
Change-Id: I78313bd88f3ed1cea3b772d9476a00087f49a4dd
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/457581
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
This commit is contained in:
parent
850a5b642a
commit
d353ce1982
@ -1137,7 +1137,6 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
|
|||||||
struct ibv_mr *mr;
|
struct ibv_mr *mr;
|
||||||
uint32_t length;
|
uint32_t length;
|
||||||
uint64_t requested_size;
|
uint64_t requested_size;
|
||||||
uint32_t remaining_payload;
|
|
||||||
void *virt_addr;
|
void *virt_addr;
|
||||||
int rc, i;
|
int rc, i;
|
||||||
|
|
||||||
@ -1147,17 +1146,18 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
|
|||||||
assert(req->payload.next_sge_fn != NULL);
|
assert(req->payload.next_sge_fn != NULL);
|
||||||
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
|
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
|
||||||
|
|
||||||
remaining_payload = req->payload_size;
|
|
||||||
rdma_req->send_wr.num_sge = 1;
|
|
||||||
|
|
||||||
do {
|
|
||||||
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
|
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (length > remaining_payload) {
|
if (length < req->payload_size) {
|
||||||
length = remaining_payload;
|
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Inline SGL request split so sending separately.\n");
|
||||||
|
return nvme_rdma_build_sgl_request(rqpair, rdma_req);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (length > req->payload_size) {
|
||||||
|
length = req->payload_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
requested_size = length;
|
requested_size = length;
|
||||||
@ -1176,18 +1176,11 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
rdma_req->send_sgl[rdma_req->send_wr.num_sge].addr = (uint64_t)virt_addr;
|
rdma_req->send_sgl[1].addr = (uint64_t)virt_addr;
|
||||||
rdma_req->send_sgl[rdma_req->send_wr.num_sge].length = length;
|
rdma_req->send_sgl[1].length = length;
|
||||||
rdma_req->send_sgl[rdma_req->send_wr.num_sge].lkey = mr->lkey;
|
rdma_req->send_sgl[1].lkey = mr->lkey;
|
||||||
rdma_req->send_wr.num_sge++;
|
|
||||||
|
|
||||||
remaining_payload -= length;
|
rdma_req->send_wr.num_sge = 2;
|
||||||
} while (remaining_payload && rdma_req->send_wr.num_sge < (int64_t)rqpair->max_send_sge);
|
|
||||||
|
|
||||||
if (remaining_payload) {
|
|
||||||
SPDK_ERRLOG("Unable to prepare request. Too many SGL elements\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The first element of this SGL is pointing at an
|
/* The first element of this SGL is pointing at an
|
||||||
* spdk_nvmf_cmd object. For this particular command,
|
* spdk_nvmf_cmd object. For this particular command,
|
||||||
|
Loading…
Reference in New Issue
Block a user