nvmf/rdma: Replace RDMA specific get/free_buffers by common APIs

Use spdk_nvmf_request_get_buffers() and spdk_nvmf_request_free_buffers(),
and then remove spdk_nvmf_rdma_request_free_buffers() and
nvmf_rdma_request_get_buffers().

Set rdma_req->data_from_pool to false after
spdk_nvmf_request_free_buffers().

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ie1fc4c261c3197c8299761655bf3138eebcea3bc
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465875
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-08-22 11:21:18 +09:00 committed by Ben Walker
parent cc4d1f82cc
commit 85b9e716e9
2 changed files with 62 additions and 59 deletions

View File

@ -1354,58 +1354,6 @@ spdk_nvmf_rdma_check_contiguous_entries(uint64_t addr_1, uint64_t addr_2)
return addr_1 == addr_2;
}
static void
spdk_nvmf_rdma_request_free_buffers(struct spdk_nvmf_rdma_request *rdma_req,
struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
{
uint32_t i;
for (i = 0; i < num_buffers; i++) {
if (group->buf_cache_count < group->buf_cache_size) {
STAILQ_INSERT_HEAD(&group->buf_cache,
(struct spdk_nvmf_transport_pg_cache_buf *)rdma_req->req.buffers[i], link);
group->buf_cache_count++;
} else {
spdk_mempool_put(transport->data_buf_pool, rdma_req->req.buffers[i]);
}
rdma_req->req.iov[i].iov_base = NULL;
rdma_req->req.buffers[i] = NULL;
rdma_req->req.iov[i].iov_len = 0;
}
rdma_req->req.data_from_pool = false;
}
static int
nvmf_rdma_request_get_buffers(struct spdk_nvmf_rdma_request *rdma_req,
struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
{
uint32_t i = 0;
while (i < num_buffers) {
if (!(STAILQ_EMPTY(&group->buf_cache))) {
group->buf_cache_count--;
rdma_req->req.buffers[i] = STAILQ_FIRST(&group->buf_cache);
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
assert(rdma_req->req.buffers[i] != NULL);
i++;
} else {
if (spdk_mempool_get_bulk(transport->data_buf_pool, &rdma_req->req.buffers[i], num_buffers - i)) {
goto err_exit;
}
i += num_buffers - i;
}
}
return 0;
err_exit:
spdk_nvmf_rdma_request_free_buffers(rdma_req, group, transport, i);
return -ENOMEM;
}
typedef enum spdk_nvme_data_transfer spdk_nvme_data_transfer_t;
static spdk_nvme_data_transfer_t
@ -1575,7 +1523,8 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
num_buffers = SPDK_CEIL_DIV(rdma_req->req.length, rtransport->transport.opts.io_unit_size);
if (nvmf_rdma_request_get_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers)) {
if (spdk_nvmf_request_get_buffers(&rdma_req->req, &rgroup->group, &rtransport->transport,
num_buffers)) {
return -ENOMEM;
}
@ -1594,7 +1543,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
return rc;
err_exit:
spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers);
spdk_nvmf_request_free_buffers(&rdma_req->req, &rgroup->group, &rtransport->transport, num_buffers);
while (i) {
i--;
rdma_req->data.wr.sg_list[i].addr = 0;
@ -1639,13 +1588,13 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
if (num_buffers > NVMF_REQ_MAX_BUFFERS) {
return -EINVAL;
}
if (nvmf_rdma_request_get_buffers(rdma_req, &rgroup->group, &rtransport->transport,
if (spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
num_buffers) != 0) {
return -ENOMEM;
}
if (nvmf_request_alloc_wrs(rtransport, rdma_req, num_sgl_descriptors - 1) != 0) {
spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers);
spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport, num_buffers);
return -ENOMEM;
}
@ -1696,7 +1645,7 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
return 0;
err_exit:
spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers);
spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport, num_buffers);
nvmf_rdma_request_free_data(rdma_req, rtransport);
return rc;
}
@ -1837,7 +1786,7 @@ nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
if (rdma_req->req.data_from_pool) {
rgroup = rqpair->poller->group;
spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport,
spdk_nvmf_request_free_buffers(&rdma_req->req, &rgroup->group, &rtransport->transport,
rdma_req->req.iovcnt);
}
nvmf_rdma_request_free_data(rdma_req, rtransport);

View File

@ -76,6 +76,60 @@ DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transpo
const struct spdk_nvme_transport_id *trid2), 0);
DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
void
spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
{
uint32_t i;
for (i = 0; i < num_buffers; i++) {
if (group->buf_cache_count < group->buf_cache_size) {
STAILQ_INSERT_HEAD(&group->buf_cache,
(struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
link);
group->buf_cache_count++;
} else {
spdk_mempool_put(transport->data_buf_pool, req->buffers[i]);
}
req->iov[i].iov_base = NULL;
req->buffers[i] = NULL;
req->iov[i].iov_len = 0;
}
req->data_from_pool = false;
}
int
spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
{
uint32_t i = 0;
while (i < num_buffers) {
if (!(STAILQ_EMPTY(&group->buf_cache))) {
group->buf_cache_count--;
req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
i++;
} else {
if (spdk_mempool_get_bulk(transport->data_buf_pool, &req->buffers[i],
num_buffers - i)) {
goto err_exit;
}
i += num_buffers - i;
}
}
return 0;
err_exit:
spdk_nvmf_request_free_buffers(req, group, transport, i);
return -ENOMEM;
}
uint64_t
spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
{