nvmf: Optimize nvmf_request_get_buffers by merging buffer and iovec loops
We can merge two loops of req->buffers and req->iov into a single loop and merge two variables, req->num_buffers and req->iovcnt into a single variable. For the latter, use req->iovcnt because it is also used for in-capsule data. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: Ia164f2054b98bbcb00308791774e3ffa4fc70baf Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/469489 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
e1f72b2cd2
commit
063c79d13c
@ -213,7 +213,6 @@ struct spdk_nvmf_request {
|
|||||||
union nvmf_h2c_msg *cmd;
|
union nvmf_h2c_msg *cmd;
|
||||||
union nvmf_c2h_msg *rsp;
|
union nvmf_c2h_msg *rsp;
|
||||||
void *buffers[NVMF_REQ_MAX_BUFFERS];
|
void *buffers[NVMF_REQ_MAX_BUFFERS];
|
||||||
uint32_t num_buffers;
|
|
||||||
struct iovec iov[NVMF_REQ_MAX_BUFFERS];
|
struct iovec iov[NVMF_REQ_MAX_BUFFERS];
|
||||||
uint32_t iovcnt;
|
uint32_t iovcnt;
|
||||||
bool data_from_pool;
|
bool data_from_pool;
|
||||||
|
@ -372,7 +372,7 @@ spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
|
|||||||
{
|
{
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
for (i = 0; i < req->num_buffers; i++) {
|
for (i = 0; i < req->iovcnt; i++) {
|
||||||
if (group->buf_cache_count < group->buf_cache_size) {
|
if (group->buf_cache_count < group->buf_cache_size) {
|
||||||
STAILQ_INSERT_HEAD(&group->buf_cache,
|
STAILQ_INSERT_HEAD(&group->buf_cache,
|
||||||
(struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
|
(struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
|
||||||
@ -385,54 +385,64 @@ spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
|
|||||||
req->buffers[i] = NULL;
|
req->buffers[i] = NULL;
|
||||||
req->iov[i].iov_len = 0;
|
req->iov[i].iov_len = 0;
|
||||||
}
|
}
|
||||||
req->num_buffers = 0;
|
|
||||||
req->data_from_pool = false;
|
req->data_from_pool = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
nvmf_request_set_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
|
||||||
|
uint32_t io_unit_size)
|
||||||
|
{
|
||||||
|
req->buffers[req->iovcnt] = buf;
|
||||||
|
req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
|
||||||
|
~NVMF_DATA_BUFFER_MASK);
|
||||||
|
req->iov[req->iovcnt].iov_len = spdk_min(length, io_unit_size);
|
||||||
|
length -= req->iov[req->iovcnt].iov_len;
|
||||||
|
req->iovcnt++;
|
||||||
|
|
||||||
|
return length;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvmf_request_get_buffers(struct spdk_nvmf_request *req,
|
nvmf_request_get_buffers(struct spdk_nvmf_request *req,
|
||||||
struct spdk_nvmf_transport_poll_group *group,
|
struct spdk_nvmf_transport_poll_group *group,
|
||||||
struct spdk_nvmf_transport *transport,
|
struct spdk_nvmf_transport *transport,
|
||||||
uint32_t length)
|
uint32_t length)
|
||||||
{
|
{
|
||||||
|
uint32_t io_unit_size = transport->opts.io_unit_size;
|
||||||
uint32_t num_buffers;
|
uint32_t num_buffers;
|
||||||
uint32_t i = 0;
|
uint32_t i = 0, j;
|
||||||
|
void *buffer, *buffers[NVMF_REQ_MAX_BUFFERS];
|
||||||
|
|
||||||
/* If the number of buffers is too large, then we know the I/O is larger than allowed.
|
/* If the number of buffers is too large, then we know the I/O is larger than allowed.
|
||||||
* Fail it.
|
* Fail it.
|
||||||
*/
|
*/
|
||||||
num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);
|
num_buffers = SPDK_CEIL_DIV(length, io_unit_size);
|
||||||
if (num_buffers + req->num_buffers > NVMF_REQ_MAX_BUFFERS) {
|
if (num_buffers + req->iovcnt > NVMF_REQ_MAX_BUFFERS) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (i < num_buffers) {
|
while (i < num_buffers) {
|
||||||
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
||||||
group->buf_cache_count--;
|
group->buf_cache_count--;
|
||||||
req->buffers[req->num_buffers] = STAILQ_FIRST(&group->buf_cache);
|
buffer = STAILQ_FIRST(&group->buf_cache);
|
||||||
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
||||||
assert(req->buffers[req->num_buffers] != NULL);
|
assert(buffer != NULL);
|
||||||
req->num_buffers++;
|
|
||||||
|
length = nvmf_request_set_buffer(req, buffer, length, io_unit_size);
|
||||||
i++;
|
i++;
|
||||||
} else {
|
} else {
|
||||||
if (spdk_mempool_get_bulk(transport->data_buf_pool,
|
if (spdk_mempool_get_bulk(transport->data_buf_pool, buffers,
|
||||||
&req->buffers[req->num_buffers],
|
|
||||||
num_buffers - i)) {
|
num_buffers - i)) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
req->num_buffers += num_buffers - i;
|
for (j = 0; j < num_buffers - i; j++) {
|
||||||
|
length = nvmf_request_set_buffer(req, buffers[j], length, io_unit_size);
|
||||||
|
}
|
||||||
i += num_buffers - i;
|
i += num_buffers - i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (length) {
|
assert(length == 0);
|
||||||
req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
|
|
||||||
NVMF_DATA_BUFFER_MASK) &
|
|
||||||
~NVMF_DATA_BUFFER_MASK);
|
|
||||||
req->iov[req->iovcnt].iov_len = spdk_min(length, transport->opts.io_unit_size);
|
|
||||||
length -= req->iov[req->iovcnt].iov_len;
|
|
||||||
req->iovcnt++;
|
|
||||||
}
|
|
||||||
|
|
||||||
req->data_from_pool = true;
|
req->data_from_pool = true;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -119,7 +119,6 @@ static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
|
|||||||
rdma_req->data.wr.sg_list[i].lkey = 0;
|
rdma_req->data.wr.sg_list[i].lkey = 0;
|
||||||
}
|
}
|
||||||
rdma_req->req.iovcnt = 0;
|
rdma_req->req.iovcnt = 0;
|
||||||
rdma_req->req.num_buffers = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
Loading…
Reference in New Issue
Block a user