nvmf/fc: Factor out getting and filling buffers from nvmf_fc_request_alloc_buffers

This follows the practice of RDMA transport and  is a preparation to
unify buffer allocation among transports.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I3cd4377ae31e47bbde697837be2d9bc1b1b582f1
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465869
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Anil Veerabhadrappa <anil.veerabhadrappa@broadcom.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-08-21 12:03:34 +09:00 committed by Jim Harris
parent 71ae39594f
commit 5437470cdc

View File

@ -1297,16 +1297,12 @@ complete:
}
static int
nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
nvmf_fc_request_get_buffers(struct spdk_nvmf_fc_request *fc_req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
{
uint32_t length = fc_req->req.length;
uint32_t num_buffers;
uint32_t i = 0;
struct spdk_nvmf_fc_poll_group *fc_poll_group = fc_req->hwqp->fc_poll_group;
struct spdk_nvmf_transport_poll_group *group = &fc_poll_group->tp_poll_group;
struct spdk_nvmf_transport *transport = &fc_poll_group->fc_transport->transport;
num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);
while (i < num_buffers) {
if (!(STAILQ_EMPTY(&group->buf_cache))) {
@ -1323,6 +1319,18 @@ nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
i += num_buffers - i;
}
}
return 0;
err_exit:
nvmf_fc_request_free_buffers(fc_req, group, transport, i);
return -ENOMEM;
}
static void
nvmf_fc_request_fill_buffers(struct spdk_nvmf_fc_request *fc_req,
struct spdk_nvmf_transport *transport, uint32_t length)
{
uint32_t i;
fc_req->req.iovcnt = 0;
@ -1336,12 +1344,26 @@ nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
length -= fc_req->req.iov[i].iov_len;
}
fc_req->data_from_pool = true;
}
static int
nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
{
uint32_t length = fc_req->req.length;
uint32_t num_buffers;
struct spdk_nvmf_fc_poll_group *fc_poll_group = fc_req->hwqp->fc_poll_group;
struct spdk_nvmf_transport_poll_group *group = &fc_poll_group->tp_poll_group;
struct spdk_nvmf_transport *transport = &fc_poll_group->fc_transport->transport;
num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);
if (nvmf_fc_request_get_buffers(fc_req, group, transport, num_buffers)) {
return -ENOMEM;
}
nvmf_fc_request_fill_buffers(fc_req, transport, length);
return 0;
err_exit:
nvmf_fc_request_free_buffers(fc_req, group, transport, i);
return -ENOMEM;
}
static int