nvmf: Move data_from_pool flag to common struct spdk_nvmf_request
This is a prepration to unify buffer management among transports. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: I6b1c208207ae3679619239db4e6e9a77b33291d0 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466002 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
04ae83ec93
commit
005b053a02
@ -435,7 +435,7 @@ nvmf_fc_request_free_buffers(struct spdk_nvmf_fc_request *fc_req,
|
||||
fc_req->req.iov[i].iov_base = NULL;
|
||||
fc_req->req.buffers[i] = NULL;
|
||||
}
|
||||
fc_req->data_from_pool = false;
|
||||
fc_req->req.data_from_pool = false;
|
||||
}
|
||||
|
||||
void
|
||||
@ -1343,7 +1343,7 @@ nvmf_fc_request_fill_buffers(struct spdk_nvmf_fc_request *fc_req,
|
||||
fc_req->req.iovcnt++;
|
||||
length -= fc_req->req.iov[i].iov_len;
|
||||
}
|
||||
fc_req->data_from_pool = true;
|
||||
fc_req->req.data_from_pool = true;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1534,7 +1534,7 @@ spdk_nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
|
||||
}
|
||||
|
||||
/* Release IO buffers */
|
||||
if (fc_req->data_from_pool) {
|
||||
if (fc_req->req.data_from_pool) {
|
||||
nvmf_fc_request_free_buffers(fc_req, group, transport, fc_req->req.iovcnt);
|
||||
}
|
||||
fc_req->req.data = NULL;
|
||||
|
@ -350,7 +350,6 @@ struct spdk_nvmf_fc_request {
|
||||
uint32_t magic;
|
||||
uint32_t s_id;
|
||||
uint32_t d_id;
|
||||
bool data_from_pool;
|
||||
TAILQ_ENTRY(spdk_nvmf_fc_request) link;
|
||||
TAILQ_ENTRY(spdk_nvmf_fc_request) pending_link;
|
||||
TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs;
|
||||
|
@ -215,6 +215,7 @@ struct spdk_nvmf_request {
|
||||
void *buffers[NVMF_REQ_MAX_BUFFERS];
|
||||
struct iovec iov[NVMF_REQ_MAX_BUFFERS];
|
||||
uint32_t iovcnt;
|
||||
bool data_from_pool;
|
||||
struct spdk_bdev_io_wait_entry bdev_io_wait;
|
||||
|
||||
TAILQ_ENTRY(spdk_nvmf_request) link;
|
||||
|
@ -251,7 +251,6 @@ struct spdk_nvmf_rdma_request_data {
|
||||
|
||||
struct spdk_nvmf_rdma_request {
|
||||
struct spdk_nvmf_request req;
|
||||
bool data_from_pool;
|
||||
|
||||
enum spdk_nvmf_rdma_request_state state;
|
||||
|
||||
@ -665,7 +664,7 @@ nvmf_rdma_request_free_data(struct spdk_nvmf_rdma_request *rdma_req,
|
||||
static void
|
||||
nvmf_rdma_dump_request(struct spdk_nvmf_rdma_request *req)
|
||||
{
|
||||
SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", req->data_from_pool);
|
||||
SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", req->req.data_from_pool);
|
||||
if (req->req.cmd) {
|
||||
SPDK_ERRLOG("\t\tRequest opcode: %d\n", req->req.cmd->nvmf_cmd.opcode);
|
||||
}
|
||||
@ -1375,7 +1374,7 @@ spdk_nvmf_rdma_request_free_buffers(struct spdk_nvmf_rdma_request *rdma_req,
|
||||
rdma_req->req.iov[i].iov_len = 0;
|
||||
|
||||
}
|
||||
rdma_req->data_from_pool = false;
|
||||
rdma_req->req.data_from_pool = false;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1590,7 +1589,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
|
||||
assert(rdma_req->req.iovcnt <= rqpair->max_send_sge);
|
||||
|
||||
rdma_req->data_from_pool = true;
|
||||
rdma_req->req.data_from_pool = true;
|
||||
|
||||
return rc;
|
||||
|
||||
@ -1692,7 +1691,7 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
|
||||
#endif
|
||||
|
||||
rdma_req->num_outstanding_data_wr = num_sgl_descriptors;
|
||||
rdma_req->data_from_pool = true;
|
||||
req->data_from_pool = true;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1792,7 +1791,7 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
|
||||
rdma_req->num_outstanding_data_wr = 0;
|
||||
rdma_req->req.data = rdma_req->recv->buf + offset;
|
||||
rdma_req->data_from_pool = false;
|
||||
rdma_req->req.data_from_pool = false;
|
||||
rdma_req->req.length = sgl->unkeyed.length;
|
||||
|
||||
rdma_req->req.iov[0].iov_base = rdma_req->req.data;
|
||||
@ -1835,7 +1834,7 @@ nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
|
||||
struct spdk_nvmf_rdma_poll_group *rgroup;
|
||||
|
||||
rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||
if (rdma_req->data_from_pool) {
|
||||
if (rdma_req->req.data_from_pool) {
|
||||
rgroup = rqpair->poller->group;
|
||||
|
||||
spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport,
|
||||
@ -1954,7 +1953,8 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
/* If data is transferring from host to controller and the data didn't
|
||||
* arrive using in capsule data, we need to do a transfer from the host.
|
||||
*/
|
||||
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER && rdma_req->data_from_pool) {
|
||||
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER &&
|
||||
rdma_req->req.data_from_pool) {
|
||||
STAILQ_INSERT_TAIL(&rqpair->pending_rdma_read_queue, rdma_req, state_link);
|
||||
rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING;
|
||||
break;
|
||||
|
@ -168,7 +168,6 @@ struct spdk_nvmf_tcp_req {
|
||||
/* In-capsule data buffer */
|
||||
uint8_t *buf;
|
||||
|
||||
bool data_from_pool;
|
||||
bool has_incapsule_data;
|
||||
|
||||
/* transfer_tag */
|
||||
@ -461,7 +460,7 @@ nvmf_tcp_dump_qpair_req_contents(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
for (i = 1; i < TCP_REQUEST_NUM_STATES; i++) {
|
||||
SPDK_ERRLOG("\tNum of requests in state[%d] = %d\n", i, tqpair->state_cntr[i]);
|
||||
TAILQ_FOREACH(tcp_req, &tqpair->state_queue[i], state_link) {
|
||||
SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->data_from_pool);
|
||||
SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->req.data_from_pool);
|
||||
SPDK_ERRLOG("\t\tRequest opcode: %d\n", tcp_req->req.cmd->nvmf_cmd.opcode);
|
||||
}
|
||||
}
|
||||
@ -2177,7 +2176,7 @@ spdk_nvmf_tcp_request_free_buffers(struct spdk_nvmf_tcp_req *tcp_req,
|
||||
tcp_req->req.buffers[i] = NULL;
|
||||
tcp_req->req.iov[i].iov_len = 0;
|
||||
}
|
||||
tcp_req->data_from_pool = false;
|
||||
tcp_req->req.data_from_pool = false;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2231,7 +2230,7 @@ spdk_nvmf_tcp_req_fill_buffers(struct spdk_nvmf_tcp_req *tcp_req,
|
||||
}
|
||||
|
||||
assert(tcp_req->req.iovcnt <= SPDK_NVMF_MAX_SGL_ENTRIES);
|
||||
tcp_req->data_from_pool = true;
|
||||
tcp_req->req.data_from_pool = true;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2330,7 +2329,7 @@ spdk_nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
}
|
||||
|
||||
tcp_req->req.data = tcp_req->buf + offset;
|
||||
tcp_req->data_from_pool = false;
|
||||
tcp_req->req.data_from_pool = false;
|
||||
tcp_req->req.length = length;
|
||||
|
||||
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
|
||||
@ -2524,7 +2523,7 @@ spdk_nvmf_tcp_pdu_set_buf_from_req(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
{
|
||||
struct nvme_tcp_pdu *pdu;
|
||||
|
||||
if (tcp_req->data_from_pool) {
|
||||
if (tcp_req->req.data_from_pool) {
|
||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Will send r2t for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);
|
||||
tcp_req->next_expected_r2t_offset = 0;
|
||||
spdk_nvmf_tcp_send_r2t_pdu(tqpair, tcp_req);
|
||||
@ -2705,7 +2704,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
break;
|
||||
case TCP_REQUEST_STATE_COMPLETED:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
if (tcp_req->data_from_pool) {
|
||||
if (tcp_req->req.data_from_pool) {
|
||||
spdk_nvmf_tcp_request_free_buffers(tcp_req, group, &ttransport->transport,
|
||||
tcp_req->req.iovcnt);
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
|
||||
int i;
|
||||
|
||||
rdma_req->req.length = 0;
|
||||
rdma_req->data_from_pool = false;
|
||||
rdma_req->req.data_from_pool = false;
|
||||
rdma_req->req.data = NULL;
|
||||
rdma_req->data.wr.num_sge = 0;
|
||||
rdma_req->data.wr.wr.rdma.remote_addr = 0;
|
||||
@ -163,7 +163,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
device.map = (void *)0x0;
|
||||
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(rdma_req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
|
||||
CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
|
||||
CU_ASSERT(rdma_req.data.wr.num_sge == 1);
|
||||
@ -180,7 +180,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(rdma_req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
|
||||
CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
|
||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
|
||||
@ -206,7 +206,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(rdma_req.data_from_pool == false);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == false);
|
||||
CU_ASSERT(rdma_req.req.data == NULL);
|
||||
CU_ASSERT(rdma_req.data.wr.num_sge == 0);
|
||||
CU_ASSERT(rdma_req.req.buffers[0] == NULL);
|
||||
@ -229,7 +229,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(rdma_req.req.data == (void *)0xDDDD);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
|
||||
CU_ASSERT(rdma_req.data_from_pool == false);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == false);
|
||||
|
||||
/* Part 2: I/O offset + length too large */
|
||||
reset_nvmf_rdma_request(&rdma_req);
|
||||
@ -268,7 +268,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(rdma_req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
|
||||
CU_ASSERT(rdma_req.data.wr.num_sge == 1);
|
||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
|
||||
@ -293,7 +293,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(rdma_req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
|
||||
CU_ASSERT(rdma_req.req.iovcnt == 16);
|
||||
CU_ASSERT(rdma_req.data.wr.num_sge == 8);
|
||||
@ -323,7 +323,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(rdma_req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
|
||||
CU_ASSERT(rdma_req.req.iovcnt == 17);
|
||||
CU_ASSERT(rdma_req.data.wr.num_sge == 16);
|
||||
@ -363,7 +363,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
|
||||
SPDK_CU_ASSERT_FATAL(rc == 0);
|
||||
CU_ASSERT(rdma_req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
|
||||
CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
|
||||
~NVMF_DATA_BUFFER_MASK));
|
||||
@ -384,7 +384,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
|
||||
SPDK_CU_ASSERT_FATAL(rc == 0);
|
||||
CU_ASSERT(rdma_req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
|
||||
CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
|
||||
CU_ASSERT(rdma_req.data.wr.num_sge == 4);
|
||||
@ -409,7 +409,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
|
||||
SPDK_CU_ASSERT_FATAL(rc == 0);
|
||||
CU_ASSERT(rdma_req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
|
||||
CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
|
||||
~NVMF_DATA_BUFFER_MASK));
|
||||
|
Loading…
Reference in New Issue
Block a user