nvmf/tcp: Use spdk_mempool_get_bulk in nvmf_tcp_req_fill_iovs

This follows the practice of RDMA transport and a preparation to
unify buffer management among transports.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I4e9b81b2bec813935064a6d49109b6a0365cb950
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465871
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-08-21 13:32:51 +09:00 committed by Jim Harris
parent 8aac212005
commit 72c10f7094

View File

@ -2097,34 +2097,41 @@ static int
spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport, spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
struct spdk_nvmf_tcp_req *tcp_req, uint32_t length) struct spdk_nvmf_tcp_req *tcp_req, uint32_t length)
{ {
void *buf = NULL;
uint32_t i = 0; uint32_t i = 0;
uint32_t num_buffers;
struct spdk_nvmf_tcp_qpair *tqpair; struct spdk_nvmf_tcp_qpair *tqpair;
struct spdk_nvmf_transport_poll_group *group; struct spdk_nvmf_transport_poll_group *group;
tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
group = &tqpair->group->group; group = &tqpair->group->group;
tcp_req->req.iovcnt = 0; num_buffers = SPDK_CEIL_DIV(length, ttransport->transport.opts.io_unit_size);
while (length) {
while (i < num_buffers) {
if (!(STAILQ_EMPTY(&group->buf_cache))) { if (!(STAILQ_EMPTY(&group->buf_cache))) {
group->buf_cache_count--; group->buf_cache_count--;
buf = STAILQ_FIRST(&group->buf_cache); tcp_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
STAILQ_REMOVE_HEAD(&group->buf_cache, link); STAILQ_REMOVE_HEAD(&group->buf_cache, link);
assert(tcp_req->buffers[i] != NULL);
i++;
} else { } else {
buf = spdk_mempool_get(ttransport->transport.data_buf_pool); if (spdk_mempool_get_bulk(ttransport->transport.data_buf_pool,
if (!buf) { &tcp_req->buffers[i], num_buffers - i)) {
goto nomem; goto nomem;
} }
i += num_buffers - i;
} }
}
tcp_req->req.iov[i].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) & tcp_req->req.iovcnt = 0;
while (length) {
i = tcp_req->req.iovcnt;
tcp_req->req.iov[i].iov_base = (void *)((uintptr_t)(tcp_req->buffers[i] +
NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK); ~NVMF_DATA_BUFFER_MASK);
tcp_req->req.iov[i].iov_len = spdk_min(length, ttransport->transport.opts.io_unit_size); tcp_req->req.iov[i].iov_len = spdk_min(length, ttransport->transport.opts.io_unit_size);
tcp_req->req.iovcnt++; tcp_req->req.iovcnt++;
tcp_req->buffers[i] = buf;
length -= tcp_req->req.iov[i].iov_len; length -= tcp_req->req.iov[i].iov_len;
i++;
} }
assert(tcp_req->req.iovcnt <= SPDK_NVMF_MAX_SGL_ENTRIES); assert(tcp_req->req.iovcnt <= SPDK_NVMF_MAX_SGL_ENTRIES);