nvmf/tcp: Replace TCP specific get/free_buffers by common APIs

Use spdk_nvmf_request_get_buffers() and spdk_nvmf_request_free_buffers(),
and then remove spdk_nvmf_tcp_request_free_buffers() and
spdk_nvmf_tcp_request_get_buffers().

Set tcp_req->data_from_pool to false after spdk_nvmf_request_free_buffers().

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I286b48149530c93784a4865b7215b5a33a4dd3c3
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465876
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-08-22 12:04:23 +09:00 committed by Ben Walker
parent 85b9e716e9
commit 9968035884
2 changed files with 14 additions and 56 deletions

View File

@ -2158,59 +2158,6 @@ spdk_nvmf_tcp_req_get_xfer(struct spdk_nvmf_tcp_req *tcp_req) {
return xfer;
}
static void
spdk_nvmf_tcp_request_free_buffers(struct spdk_nvmf_tcp_req *tcp_req,
struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
{
for (uint32_t i = 0; i < num_buffers; i++) {
assert(tcp_req->req.buffers[i] != NULL);
if (group->buf_cache_count < group->buf_cache_size) {
STAILQ_INSERT_HEAD(&group->buf_cache,
(struct spdk_nvmf_transport_pg_cache_buf *)tcp_req->req.buffers[i], link);
group->buf_cache_count++;
} else {
spdk_mempool_put(transport->data_buf_pool, tcp_req->req.buffers[i]);
}
tcp_req->req.iov[i].iov_base = NULL;
tcp_req->req.buffers[i] = NULL;
tcp_req->req.iov[i].iov_len = 0;
}
tcp_req->req.data_from_pool = false;
}
static int
spdk_nvmf_tcp_req_get_buffers(struct spdk_nvmf_tcp_req *tcp_req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
{
uint32_t i = 0;
while (i < num_buffers) {
if (!(STAILQ_EMPTY(&group->buf_cache))) {
group->buf_cache_count--;
tcp_req->req.buffers[i] = STAILQ_FIRST(&group->buf_cache);
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
assert(tcp_req->req.buffers[i] != NULL);
i++;
} else {
if (spdk_mempool_get_bulk(transport->data_buf_pool,
&tcp_req->req.buffers[i], num_buffers - i)) {
goto nomem;
}
i += num_buffers - i;
}
}
return 0;
nomem:
spdk_nvmf_tcp_request_free_buffers(tcp_req, group, transport, i);
tcp_req->req.iovcnt = 0;
return -ENOMEM;
}
static void
spdk_nvmf_tcp_req_fill_buffers(struct spdk_nvmf_tcp_req *tcp_req,
struct spdk_nvmf_transport *transport,
@ -2246,7 +2193,8 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
num_buffers = SPDK_CEIL_DIV(length, ttransport->transport.opts.io_unit_size);
if (spdk_nvmf_tcp_req_get_buffers(tcp_req, group, &ttransport->transport, num_buffers)) {
if (spdk_nvmf_request_get_buffers(&tcp_req->req, group, &ttransport->transport, num_buffers)) {
tcp_req->req.iovcnt = 0;
return -ENOMEM;
}
@ -2705,8 +2653,8 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
case TCP_REQUEST_STATE_COMPLETED:
spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, 0);
if (tcp_req->req.data_from_pool) {
spdk_nvmf_tcp_request_free_buffers(tcp_req, group, &ttransport->transport,
tcp_req->req.iovcnt);
spdk_nvmf_request_free_buffers(&tcp_req->req, group, &ttransport->transport,
tcp_req->req.iovcnt);
}
tcp_req->req.length = 0;
tcp_req->req.iovcnt = 0;

View File

@ -167,6 +167,16 @@ DEFINE_STUB(spdk_nvmf_transport_req_complete,
(struct spdk_nvmf_request *req),
0);
DEFINE_STUB(spdk_nvmf_request_get_buffers,
int,
(struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport, uint32_t num_buffers),
0);
DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
(struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport, uint32_t num_buffers));
DEFINE_STUB(spdk_sock_get_optimal_sock_group,
int,
(struct spdk_sock *sock, struct spdk_sock_group **group),