nvmf: Hold number of allocated buffers in struct spdk_nvmf_request

This patch makes multi SGL case possible to call spdk_nvmf_request_get_buffers()
per WR.

This patch has an unrelated fix to clear req->iovcnt in
reset_nvmf_rdma_request() in UT. We can do the fix in a separate patch
but include it in this patch because it is very small.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: If6e5af0505fb199c95ef5d0522b579242a7cef29
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468942
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Seth Howell <seth.howell@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-09-24 10:01:53 +09:00 committed by Jim Harris
parent 410455e40b
commit 79945ef0ed
7 changed files with 29 additions and 23 deletions

View File

@ -1483,7 +1483,7 @@ spdk_nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
/* Release IO buffers */
if (fc_req->req.data_from_pool) {
spdk_nvmf_request_free_buffers(&fc_req->req, group, transport, fc_req->req.iovcnt);
spdk_nvmf_request_free_buffers(&fc_req->req, group, transport);
}
fc_req->req.data = NULL;
fc_req->req.iovcnt = 0;

View File

@ -213,6 +213,7 @@ struct spdk_nvmf_request {
union nvmf_h2c_msg *cmd;
union nvmf_c2h_msg *rsp;
void *buffers[NVMF_REQ_MAX_BUFFERS];
uint32_t num_buffers;
struct iovec iov[NVMF_REQ_MAX_BUFFERS];
uint32_t iovcnt;
bool data_from_pool;
@ -385,8 +386,7 @@ int spdk_nvmf_request_complete(struct spdk_nvmf_request *req);
void spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers);
struct spdk_nvmf_transport *transport);
int spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,

View File

@ -1712,7 +1712,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
return rc;
err_exit:
spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport, num_buffers);
spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport);
memset(wr->sg_list, 0, sizeof(wr->sg_list[0]) * wr->num_sge);
wr->num_sge = 0;
req->iovcnt = 0;
@ -1813,7 +1813,7 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
return 0;
err_exit:
spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport, num_buffers);
spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport);
nvmf_rdma_request_free_data(rdma_req, rtransport);
return rc;
}
@ -1962,8 +1962,7 @@ nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
if (rdma_req->req.data_from_pool) {
rgroup = rqpair->poller->group;
spdk_nvmf_request_free_buffers(&rdma_req->req, &rgroup->group, &rtransport->transport,
rdma_req->req.iovcnt);
spdk_nvmf_request_free_buffers(&rdma_req->req, &rgroup->group, &rtransport->transport);
}
nvmf_rdma_request_free_data(rdma_req, rtransport);
rdma_req->req.length = 0;

View File

@ -2677,8 +2677,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
case TCP_REQUEST_STATE_COMPLETED:
spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, 0);
if (tcp_req->req.data_from_pool) {
spdk_nvmf_request_free_buffers(&tcp_req->req, group, &ttransport->transport,
tcp_req->req.iovcnt);
spdk_nvmf_request_free_buffers(&tcp_req->req, group, &ttransport->transport);
}
tcp_req->req.length = 0;
tcp_req->req.iovcnt = 0;

View File

@ -368,12 +368,11 @@ spdk_nvmf_transport_poll_group_free_stat(struct spdk_nvmf_transport *transport,
void
spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
struct spdk_nvmf_transport *transport)
{
uint32_t i;
for (i = 0; i < num_buffers; i++) {
for (i = 0; i < req->num_buffers; i++) {
if (group->buf_cache_count < group->buf_cache_size) {
STAILQ_INSERT_HEAD(&group->buf_cache,
(struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
@ -386,6 +385,7 @@ spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
req->buffers[i] = NULL;
req->iov[i].iov_len = 0;
}
req->num_buffers = 0;
req->data_from_pool = false;
}
@ -400,15 +400,18 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
while (i < num_buffers) {
if (!(STAILQ_EMPTY(&group->buf_cache))) {
group->buf_cache_count--;
req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
req->buffers[req->num_buffers] = STAILQ_FIRST(&group->buf_cache);
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
assert(req->buffers[i] != NULL);
assert(req->buffers[req->num_buffers] != NULL);
req->num_buffers++;
i++;
} else {
if (spdk_mempool_get_bulk(transport->data_buf_pool, &req->buffers[i],
if (spdk_mempool_get_bulk(transport->data_buf_pool,
&req->buffers[req->num_buffers],
num_buffers - i)) {
goto err_exit;
}
req->num_buffers += num_buffers - i;
i += num_buffers - i;
}
}
@ -416,6 +419,6 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
return 0;
err_exit:
spdk_nvmf_request_free_buffers(req, group, transport, i);
spdk_nvmf_request_free_buffers(req, group, transport);
return -ENOMEM;
}

View File

@ -82,12 +82,11 @@ DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
void
spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
struct spdk_nvmf_transport *transport)
{
uint32_t i;
for (i = 0; i < num_buffers; i++) {
for (i = 0; i < req->num_buffers; i++) {
if (group->buf_cache_count < group->buf_cache_size) {
STAILQ_INSERT_HEAD(&group->buf_cache,
(struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
@ -100,6 +99,7 @@ spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
req->buffers[i] = NULL;
req->iov[i].iov_len = 0;
}
req->num_buffers = 0;
req->data_from_pool = false;
}
@ -114,14 +114,17 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
while (i < num_buffers) {
if (!(STAILQ_EMPTY(&group->buf_cache))) {
group->buf_cache_count--;
req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
req->buffers[req->num_buffers] = STAILQ_FIRST(&group->buf_cache);
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
req->num_buffers++;
i++;
} else {
if (spdk_mempool_get_bulk(transport->data_buf_pool, &req->buffers[i],
if (spdk_mempool_get_bulk(transport->data_buf_pool,
&req->buffers[req->num_buffers],
num_buffers - i)) {
goto err_exit;
}
req->num_buffers += num_buffers - i;
i += num_buffers - i;
}
}
@ -129,7 +132,7 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
return 0;
err_exit:
spdk_nvmf_request_free_buffers(req, group, transport, i);
spdk_nvmf_request_free_buffers(req, group, transport);
return -ENOMEM;
}
@ -168,6 +171,8 @@ static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
rdma_req->data.wr.sg_list[i].length = 0;
rdma_req->data.wr.sg_list[i].lkey = 0;
}
rdma_req->req.iovcnt = 0;
rdma_req->req.num_buffers = 0;
}
static void

View File

@ -175,7 +175,7 @@ DEFINE_STUB(spdk_nvmf_request_get_buffers,
DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
(struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport, uint32_t num_buffers));
struct spdk_nvmf_transport *transport));
DEFINE_STUB(spdk_sock_get_optimal_sock_group,
int,