nvmf/tcp: Factor out getting and filling buffers from nvmf_tcp_req_fill_iovs

This follows the practice of RDMA transport and is a preparation to
unify buffer allocation among transports.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ib85625f2a0eca01ef4028685dd838d6c41faad7b
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465872
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-08-22 09:13:07 +09:00 committed by Jim Harris
parent 72c10f7094
commit eab7360bcb

View File

@ -2094,18 +2094,12 @@ spdk_nvmf_tcp_request_free_buffers(struct spdk_nvmf_tcp_req *tcp_req,
}
static int
spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
struct spdk_nvmf_tcp_req *tcp_req, uint32_t length)
spdk_nvmf_tcp_req_get_buffers(struct spdk_nvmf_tcp_req *tcp_req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
{
uint32_t i = 0;
uint32_t num_buffers;
struct spdk_nvmf_tcp_qpair *tqpair;
struct spdk_nvmf_transport_poll_group *group;
tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
group = &tqpair->group->group;
num_buffers = SPDK_CEIL_DIV(length, ttransport->transport.opts.io_unit_size);
while (i < num_buffers) {
if (!(STAILQ_EMPTY(&group->buf_cache))) {
@ -2115,7 +2109,7 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
assert(tcp_req->buffers[i] != NULL);
i++;
} else {
if (spdk_mempool_get_bulk(ttransport->transport.data_buf_pool,
if (spdk_mempool_get_bulk(transport->data_buf_pool,
&tcp_req->buffers[i], num_buffers - i)) {
goto nomem;
}
@ -2123,25 +2117,56 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
}
}
return 0;
nomem:
spdk_nvmf_tcp_request_free_buffers(tcp_req, group, transport, i);
tcp_req->req.iovcnt = 0;
return -ENOMEM;
}
static void
spdk_nvmf_tcp_req_fill_buffers(struct spdk_nvmf_tcp_req *tcp_req,
struct spdk_nvmf_transport *transport,
uint32_t length)
{
uint32_t i = 0;
tcp_req->req.iovcnt = 0;
while (length) {
i = tcp_req->req.iovcnt;
tcp_req->req.iov[i].iov_base = (void *)((uintptr_t)(tcp_req->buffers[i] +
NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK);
tcp_req->req.iov[i].iov_len = spdk_min(length, ttransport->transport.opts.io_unit_size);
tcp_req->req.iov[i].iov_len = spdk_min(length, transport->opts.io_unit_size);
tcp_req->req.iovcnt++;
length -= tcp_req->req.iov[i].iov_len;
}
assert(tcp_req->req.iovcnt <= SPDK_NVMF_MAX_SGL_ENTRIES);
tcp_req->data_from_pool = true;
return 0;
}
nomem:
spdk_nvmf_tcp_request_free_buffers(tcp_req, group, &ttransport->transport, i);
tcp_req->req.iovcnt = 0;
return -ENOMEM;
static int
spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
struct spdk_nvmf_tcp_req *tcp_req, uint32_t length)
{
uint32_t num_buffers;
struct spdk_nvmf_tcp_qpair *tqpair;
struct spdk_nvmf_transport_poll_group *group;
tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
group = &tqpair->group->group;
num_buffers = SPDK_CEIL_DIV(length, ttransport->transport.opts.io_unit_size);
if (spdk_nvmf_tcp_req_get_buffers(tcp_req, group, &ttransport->transport, num_buffers)) {
return -ENOMEM;
}
spdk_nvmf_tcp_req_fill_buffers(tcp_req, &ttransport->transport, length);
return 0;
}
static int