nvmf/rdma: Add spdk_nvmf_request_get_buffers_multi() for multi SGL case

This patch is the end of the effort to unify buffer allocation
among NVMe-oF transports.

This patch aggregates multiple calls of spdk_nvmf_request_get_buffers()
into a single spdk_nvmf_request_get_buffers_multi().

As a side effect, we can move zeroing req->iovcnt into
spdk_nvmf_request_get_buffers() and spdk_nvmf_request_get_buffers_multi()
and do it in this patch.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I728bd330a1f533019957d58e06831a79fc17e382
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/469206
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Seth Howell <seth.howell@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-09-24 13:21:13 +09:00 committed by Jim Harris
parent c0ee8ef7d5
commit 0462157650
4 changed files with 110 additions and 22 deletions

View File

@ -391,6 +391,10 @@ int spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t length);
int spdk_nvmf_request_get_buffers_multi(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t *lengths, uint32_t num_lengths);
bool spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request *req, struct spdk_dif_ctx *dif_ctx);

View File

@ -1673,7 +1673,6 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
rqpair = SPDK_CONTAINEROF(req->qpair, struct spdk_nvmf_rdma_qpair, qpair);
rgroup = rqpair->poller->group;
req->iovcnt = 0;
if (spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
length)) {
@ -1710,6 +1709,7 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
struct spdk_nvmf_request *req = &rdma_req->req;
struct spdk_nvme_sgl_descriptor *inline_segment, *desc;
uint32_t num_sgl_descriptors;
uint32_t lengths[SPDK_NVMF_MAX_SGL_ENTRIES];
uint32_t i;
int rc;
@ -1727,11 +1727,23 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
return -ENOMEM;
}
desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
for (i = 0; i < num_sgl_descriptors; i++) {
lengths[i] = desc->keyed.length;
desc++;
}
rc = spdk_nvmf_request_get_buffers_multi(req, &rgroup->group, &rtransport->transport,
lengths, num_sgl_descriptors);
if (rc != 0) {
nvmf_rdma_request_free_data(rdma_req, rtransport);
return -ENOMEM;
}
/* The first WR must always be the embedded data WR. This is how we unwind them later. */
current_wr = &rdma_req->data.wr;
assert(current_wr != NULL);
req->iovcnt = 0;
req->length = 0;
rdma_req->iovpos = 0;
desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
@ -1743,12 +1755,6 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
goto err_exit;
}
rc = spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
desc->keyed.length);
if (rc != 0) {
goto err_exit;
}
current_wr->num_sge = 0;
rc = nvmf_rdma_fill_wr_sgl(rgroup, device, rdma_req, current_wr, desc->keyed.length);

View File

@ -389,11 +389,11 @@ spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
req->data_from_pool = false;
}
int
spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t length)
static int
nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t length)
{
uint32_t num_buffers;
uint32_t i = 0;
@ -418,7 +418,7 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
if (spdk_mempool_get_bulk(transport->data_buf_pool,
&req->buffers[req->num_buffers],
num_buffers - i)) {
goto err_exit;
return -ENOMEM;
}
req->num_buffers += num_buffers - i;
i += num_buffers - i;
@ -436,8 +436,47 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
req->data_from_pool = true;
return 0;
}
int
spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t length)
{
int rc;
req->iovcnt = 0;
rc = nvmf_request_get_buffers(req, group, transport, length);
if (rc == -ENOMEM) {
spdk_nvmf_request_free_buffers(req, group, transport);
}
return rc;
}
int
spdk_nvmf_request_get_buffers_multi(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t *lengths, uint32_t num_lengths)
{
int rc = 0;
uint32_t i;
req->iovcnt = 0;
for (i = 0; i < num_lengths; i++) {
rc = nvmf_request_get_buffers(req, group, transport, lengths[i]);
if (rc != 0) {
goto err_exit;
}
}
return 0;
err_exit:
spdk_nvmf_request_free_buffers(req, group, transport);
return -ENOMEM;
return rc;
}

View File

@ -103,11 +103,11 @@ spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
req->data_from_pool = false;
}
int
spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t length)
static int
nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t length)
{
uint32_t num_buffers;
uint32_t i = 0;
@ -131,7 +131,7 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
if (spdk_mempool_get_bulk(transport->data_buf_pool,
&req->buffers[req->num_buffers],
num_buffers - i)) {
goto err_exit;
return -ENOMEM;
}
req->num_buffers += num_buffers - i;
i += num_buffers - i;
@ -149,10 +149,49 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
req->data_from_pool = true;
return 0;
}
int
spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t length)
{
int rc;
req->iovcnt = 0;
rc = nvmf_request_get_buffers(req, group, transport, length);
if (rc == -ENOMEM) {
spdk_nvmf_request_free_buffers(req, group, transport);
}
return rc;
}
int
spdk_nvmf_request_get_buffers_multi(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t *lengths, uint32_t num_lengths)
{
int rc = 0;
uint32_t i;
req->iovcnt = 0;
for (i = 0; i < num_lengths; i++) {
rc = nvmf_request_get_buffers(req, group, transport, lengths[i]);
if (rc != 0) {
goto err_exit;
}
}
return 0;
err_exit:
spdk_nvmf_request_free_buffers(req, group, transport);
return -ENOMEM;
return rc;
}
uint64_t