From eab7360bcb2a128f2e02ffda167324fc1a337d9f Mon Sep 17 00:00:00 2001 From: Shuhei Matsumoto Date: Thu, 22 Aug 2019 09:13:07 +0900 Subject: [PATCH] nvmf/tcp: Factor out getting and filling buffers from nvmf_tcp_req_fill_iovs This follows the practice of RDMA transport and is a preparation to unify buffer allocation among transports. Signed-off-by: Shuhei Matsumoto Change-Id: Ib85625f2a0eca01ef4028685dd838d6c41faad7b Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465872 Tested-by: SPDK CI Jenkins Reviewed-by: Jim Harris Reviewed-by: Broadcom SPDK FC-NVMe CI Reviewed-by: Ben Walker --- lib/nvmf/tcp.c | 59 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/lib/nvmf/tcp.c b/lib/nvmf/tcp.c index 5617fd43e..3f4c92914 100644 --- a/lib/nvmf/tcp.c +++ b/lib/nvmf/tcp.c @@ -2094,18 +2094,12 @@ spdk_nvmf_tcp_request_free_buffers(struct spdk_nvmf_tcp_req *tcp_req, } static int -spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport, - struct spdk_nvmf_tcp_req *tcp_req, uint32_t length) +spdk_nvmf_tcp_req_get_buffers(struct spdk_nvmf_tcp_req *tcp_req, + struct spdk_nvmf_transport_poll_group *group, + struct spdk_nvmf_transport *transport, + uint32_t num_buffers) { uint32_t i = 0; - uint32_t num_buffers; - struct spdk_nvmf_tcp_qpair *tqpair; - struct spdk_nvmf_transport_poll_group *group; - - tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); - group = &tqpair->group->group; - - num_buffers = SPDK_CEIL_DIV(length, ttransport->transport.opts.io_unit_size); while (i < num_buffers) { if (!(STAILQ_EMPTY(&group->buf_cache))) { @@ -2115,7 +2109,7 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport, assert(tcp_req->buffers[i] != NULL); i++; } else { - if (spdk_mempool_get_bulk(ttransport->transport.data_buf_pool, + if (spdk_mempool_get_bulk(transport->data_buf_pool, &tcp_req->buffers[i], num_buffers - i)) { goto nomem; } @@ -2123,25 +2117,56 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport, } } + return 0; + +nomem: + spdk_nvmf_tcp_request_free_buffers(tcp_req, group, transport, i); + tcp_req->req.iovcnt = 0; + return -ENOMEM; +} + +static void +spdk_nvmf_tcp_req_fill_buffers(struct spdk_nvmf_tcp_req *tcp_req, + struct spdk_nvmf_transport *transport, + uint32_t length) +{ + uint32_t i = 0; + tcp_req->req.iovcnt = 0; while (length) { i = tcp_req->req.iovcnt; tcp_req->req.iov[i].iov_base = (void *)((uintptr_t)(tcp_req->buffers[i] + NVMF_DATA_BUFFER_MASK) & ~NVMF_DATA_BUFFER_MASK); - tcp_req->req.iov[i].iov_len = spdk_min(length, ttransport->transport.opts.io_unit_size); + tcp_req->req.iov[i].iov_len = spdk_min(length, transport->opts.io_unit_size); tcp_req->req.iovcnt++; length -= tcp_req->req.iov[i].iov_len; } assert(tcp_req->req.iovcnt <= SPDK_NVMF_MAX_SGL_ENTRIES); tcp_req->data_from_pool = true; - return 0; +} -nomem: - spdk_nvmf_tcp_request_free_buffers(tcp_req, group, &ttransport->transport, i); - tcp_req->req.iovcnt = 0; - return -ENOMEM; +static int +spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport, + struct spdk_nvmf_tcp_req *tcp_req, uint32_t length) +{ + uint32_t num_buffers; + struct spdk_nvmf_tcp_qpair *tqpair; + struct spdk_nvmf_transport_poll_group *group; + + tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); + group = &tqpair->group->group; + + num_buffers = SPDK_CEIL_DIV(length, ttransport->transport.opts.io_unit_size); + + if (spdk_nvmf_tcp_req_get_buffers(tcp_req, group, &ttransport->transport, num_buffers)) { + return -ENOMEM; + } + + spdk_nvmf_tcp_req_fill_buffers(tcp_req, &ttransport->transport, length); + + return 0; } static int