nvmf: Move allocated buffer pointers to common struct spdk_nvmf_request
This is a preparation to unify buffer management among transports. struct spdk_nvmf_request already has SPDK_NVMF_MAX_SGL_ENTRIES (16) * 2 iovecs. Hence incresing the number of buffers twice will be no problem. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: Idb525abbf35dc9f4b8547b785b5dfa77d106d8c9 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465873 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
This commit is contained in:
parent
a3b7ae8ab6
commit
04ae83ec93
@ -426,14 +426,14 @@ nvmf_fc_request_free_buffers(struct spdk_nvmf_fc_request *fc_req,
|
|||||||
for (i = 0; i < num_buffers; i++) {
|
for (i = 0; i < num_buffers; i++) {
|
||||||
if (group->buf_cache_count < group->buf_cache_size) {
|
if (group->buf_cache_count < group->buf_cache_size) {
|
||||||
STAILQ_INSERT_HEAD(&group->buf_cache,
|
STAILQ_INSERT_HEAD(&group->buf_cache,
|
||||||
(struct spdk_nvmf_transport_pg_cache_buf *)fc_req->buffers[i],
|
(struct spdk_nvmf_transport_pg_cache_buf *)fc_req->req.buffers[i],
|
||||||
link);
|
link);
|
||||||
group->buf_cache_count++;
|
group->buf_cache_count++;
|
||||||
} else {
|
} else {
|
||||||
spdk_mempool_put(transport->data_buf_pool, fc_req->buffers[i]);
|
spdk_mempool_put(transport->data_buf_pool, fc_req->req.buffers[i]);
|
||||||
}
|
}
|
||||||
fc_req->req.iov[i].iov_base = NULL;
|
fc_req->req.iov[i].iov_base = NULL;
|
||||||
fc_req->buffers[i] = NULL;
|
fc_req->req.buffers[i] = NULL;
|
||||||
}
|
}
|
||||||
fc_req->data_from_pool = false;
|
fc_req->data_from_pool = false;
|
||||||
}
|
}
|
||||||
@ -1307,12 +1307,12 @@ nvmf_fc_request_get_buffers(struct spdk_nvmf_fc_request *fc_req,
|
|||||||
while (i < num_buffers) {
|
while (i < num_buffers) {
|
||||||
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
||||||
group->buf_cache_count--;
|
group->buf_cache_count--;
|
||||||
fc_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
|
fc_req->req.buffers[i] = STAILQ_FIRST(&group->buf_cache);
|
||||||
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
||||||
assert(fc_req->buffers[i] != NULL);
|
assert(fc_req->req.buffers[i] != NULL);
|
||||||
i++;
|
i++;
|
||||||
} else {
|
} else {
|
||||||
if (spdk_mempool_get_bulk(transport->data_buf_pool, &fc_req->buffers[i],
|
if (spdk_mempool_get_bulk(transport->data_buf_pool, &fc_req->req.buffers[i],
|
||||||
num_buffers - i)) {
|
num_buffers - i)) {
|
||||||
goto err_exit;
|
goto err_exit;
|
||||||
}
|
}
|
||||||
@ -1336,7 +1336,7 @@ nvmf_fc_request_fill_buffers(struct spdk_nvmf_fc_request *fc_req,
|
|||||||
|
|
||||||
while (length) {
|
while (length) {
|
||||||
i = fc_req->req.iovcnt;
|
i = fc_req->req.iovcnt;
|
||||||
fc_req->req.iov[i].iov_base = (void *)((uintptr_t)((char *)fc_req->buffers[i] +
|
fc_req->req.iov[i].iov_base = (void *)((uintptr_t)((char *)fc_req->req.buffers[i] +
|
||||||
NVMF_DATA_BUFFER_MASK) &
|
NVMF_DATA_BUFFER_MASK) &
|
||||||
~NVMF_DATA_BUFFER_MASK);
|
~NVMF_DATA_BUFFER_MASK);
|
||||||
fc_req->req.iov[i].iov_len = spdk_min(length, transport->opts.io_unit_size);
|
fc_req->req.iov[i].iov_len = spdk_min(length, transport->opts.io_unit_size);
|
||||||
|
@ -350,7 +350,6 @@ struct spdk_nvmf_fc_request {
|
|||||||
uint32_t magic;
|
uint32_t magic;
|
||||||
uint32_t s_id;
|
uint32_t s_id;
|
||||||
uint32_t d_id;
|
uint32_t d_id;
|
||||||
void *buffers[SPDK_NVMF_MAX_SGL_ENTRIES];
|
|
||||||
bool data_from_pool;
|
bool data_from_pool;
|
||||||
TAILQ_ENTRY(spdk_nvmf_fc_request) link;
|
TAILQ_ENTRY(spdk_nvmf_fc_request) link;
|
||||||
TAILQ_ENTRY(spdk_nvmf_fc_request) pending_link;
|
TAILQ_ENTRY(spdk_nvmf_fc_request) pending_link;
|
||||||
|
@ -47,6 +47,9 @@
|
|||||||
|
|
||||||
#define SPDK_NVMF_MAX_SGL_ENTRIES 16
|
#define SPDK_NVMF_MAX_SGL_ENTRIES 16
|
||||||
|
|
||||||
|
/* The maximum number of buffers per request */
|
||||||
|
#define NVMF_REQ_MAX_BUFFERS (SPDK_NVMF_MAX_SGL_ENTRIES * 2)
|
||||||
|
|
||||||
/* AIO backend requires block size aligned data buffers,
|
/* AIO backend requires block size aligned data buffers,
|
||||||
* extra 4KiB aligned data buffer should work for most devices.
|
* extra 4KiB aligned data buffer should work for most devices.
|
||||||
*/
|
*/
|
||||||
@ -209,7 +212,8 @@ struct spdk_nvmf_request {
|
|||||||
void *data;
|
void *data;
|
||||||
union nvmf_h2c_msg *cmd;
|
union nvmf_h2c_msg *cmd;
|
||||||
union nvmf_c2h_msg *rsp;
|
union nvmf_c2h_msg *rsp;
|
||||||
struct iovec iov[SPDK_NVMF_MAX_SGL_ENTRIES * 2];
|
void *buffers[NVMF_REQ_MAX_BUFFERS];
|
||||||
|
struct iovec iov[NVMF_REQ_MAX_BUFFERS];
|
||||||
uint32_t iovcnt;
|
uint32_t iovcnt;
|
||||||
struct spdk_bdev_io_wait_entry bdev_io_wait;
|
struct spdk_bdev_io_wait_entry bdev_io_wait;
|
||||||
|
|
||||||
|
@ -264,7 +264,6 @@ struct spdk_nvmf_rdma_request {
|
|||||||
} rsp;
|
} rsp;
|
||||||
|
|
||||||
struct spdk_nvmf_rdma_request_data data;
|
struct spdk_nvmf_rdma_request_data data;
|
||||||
void *buffers[NVMF_REQ_MAX_BUFFERS];
|
|
||||||
|
|
||||||
uint32_t num_outstanding_data_wr;
|
uint32_t num_outstanding_data_wr;
|
||||||
uint64_t receive_tsc;
|
uint64_t receive_tsc;
|
||||||
@ -1366,13 +1365,13 @@ spdk_nvmf_rdma_request_free_buffers(struct spdk_nvmf_rdma_request *rdma_req,
|
|||||||
for (i = 0; i < num_buffers; i++) {
|
for (i = 0; i < num_buffers; i++) {
|
||||||
if (group->buf_cache_count < group->buf_cache_size) {
|
if (group->buf_cache_count < group->buf_cache_size) {
|
||||||
STAILQ_INSERT_HEAD(&group->buf_cache,
|
STAILQ_INSERT_HEAD(&group->buf_cache,
|
||||||
(struct spdk_nvmf_transport_pg_cache_buf *)rdma_req->buffers[i], link);
|
(struct spdk_nvmf_transport_pg_cache_buf *)rdma_req->req.buffers[i], link);
|
||||||
group->buf_cache_count++;
|
group->buf_cache_count++;
|
||||||
} else {
|
} else {
|
||||||
spdk_mempool_put(transport->data_buf_pool, rdma_req->buffers[i]);
|
spdk_mempool_put(transport->data_buf_pool, rdma_req->req.buffers[i]);
|
||||||
}
|
}
|
||||||
rdma_req->req.iov[i].iov_base = NULL;
|
rdma_req->req.iov[i].iov_base = NULL;
|
||||||
rdma_req->buffers[i] = NULL;
|
rdma_req->req.buffers[i] = NULL;
|
||||||
rdma_req->req.iov[i].iov_len = 0;
|
rdma_req->req.iov[i].iov_len = 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -1389,12 +1388,12 @@ nvmf_rdma_request_get_buffers(struct spdk_nvmf_rdma_request *rdma_req,
|
|||||||
while (i < num_buffers) {
|
while (i < num_buffers) {
|
||||||
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
||||||
group->buf_cache_count--;
|
group->buf_cache_count--;
|
||||||
rdma_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
|
rdma_req->req.buffers[i] = STAILQ_FIRST(&group->buf_cache);
|
||||||
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
||||||
assert(rdma_req->buffers[i] != NULL);
|
assert(rdma_req->req.buffers[i] != NULL);
|
||||||
i++;
|
i++;
|
||||||
} else {
|
} else {
|
||||||
if (spdk_mempool_get_bulk(transport->data_buf_pool, &rdma_req->buffers[i], num_buffers - i)) {
|
if (spdk_mempool_get_bulk(transport->data_buf_pool, &rdma_req->req.buffers[i], num_buffers - i)) {
|
||||||
goto err_exit;
|
goto err_exit;
|
||||||
}
|
}
|
||||||
i += num_buffers - i;
|
i += num_buffers - i;
|
||||||
@ -1529,7 +1528,7 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
|
|
||||||
while (remaining_length) {
|
while (remaining_length) {
|
||||||
iovcnt = rdma_req->req.iovcnt;
|
iovcnt = rdma_req->req.iovcnt;
|
||||||
rdma_req->req.iov[iovcnt].iov_base = (void *)((uintptr_t)(rdma_req->buffers[iovcnt] +
|
rdma_req->req.iov[iovcnt].iov_base = (void *)((uintptr_t)(rdma_req->req.buffers[iovcnt] +
|
||||||
NVMF_DATA_BUFFER_MASK) &
|
NVMF_DATA_BUFFER_MASK) &
|
||||||
~NVMF_DATA_BUFFER_MASK);
|
~NVMF_DATA_BUFFER_MASK);
|
||||||
rdma_req->req.iov[iovcnt].iov_len = spdk_min(remaining_length,
|
rdma_req->req.iov[iovcnt].iov_len = spdk_min(remaining_length,
|
||||||
|
@ -176,8 +176,6 @@ struct spdk_nvmf_tcp_req {
|
|||||||
|
|
||||||
enum spdk_nvmf_tcp_req_state state;
|
enum spdk_nvmf_tcp_req_state state;
|
||||||
|
|
||||||
void *buffers[SPDK_NVMF_MAX_SGL_ENTRIES];
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* next_expected_r2t_offset is used when we receive the h2c_data PDU.
|
* next_expected_r2t_offset is used when we receive the h2c_data PDU.
|
||||||
*/
|
*/
|
||||||
@ -2167,16 +2165,16 @@ spdk_nvmf_tcp_request_free_buffers(struct spdk_nvmf_tcp_req *tcp_req,
|
|||||||
uint32_t num_buffers)
|
uint32_t num_buffers)
|
||||||
{
|
{
|
||||||
for (uint32_t i = 0; i < num_buffers; i++) {
|
for (uint32_t i = 0; i < num_buffers; i++) {
|
||||||
assert(tcp_req->buffers[i] != NULL);
|
assert(tcp_req->req.buffers[i] != NULL);
|
||||||
if (group->buf_cache_count < group->buf_cache_size) {
|
if (group->buf_cache_count < group->buf_cache_size) {
|
||||||
STAILQ_INSERT_HEAD(&group->buf_cache,
|
STAILQ_INSERT_HEAD(&group->buf_cache,
|
||||||
(struct spdk_nvmf_transport_pg_cache_buf *)tcp_req->buffers[i], link);
|
(struct spdk_nvmf_transport_pg_cache_buf *)tcp_req->req.buffers[i], link);
|
||||||
group->buf_cache_count++;
|
group->buf_cache_count++;
|
||||||
} else {
|
} else {
|
||||||
spdk_mempool_put(transport->data_buf_pool, tcp_req->buffers[i]);
|
spdk_mempool_put(transport->data_buf_pool, tcp_req->req.buffers[i]);
|
||||||
}
|
}
|
||||||
tcp_req->req.iov[i].iov_base = NULL;
|
tcp_req->req.iov[i].iov_base = NULL;
|
||||||
tcp_req->buffers[i] = NULL;
|
tcp_req->req.buffers[i] = NULL;
|
||||||
tcp_req->req.iov[i].iov_len = 0;
|
tcp_req->req.iov[i].iov_len = 0;
|
||||||
}
|
}
|
||||||
tcp_req->data_from_pool = false;
|
tcp_req->data_from_pool = false;
|
||||||
@ -2193,13 +2191,13 @@ spdk_nvmf_tcp_req_get_buffers(struct spdk_nvmf_tcp_req *tcp_req,
|
|||||||
while (i < num_buffers) {
|
while (i < num_buffers) {
|
||||||
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
||||||
group->buf_cache_count--;
|
group->buf_cache_count--;
|
||||||
tcp_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
|
tcp_req->req.buffers[i] = STAILQ_FIRST(&group->buf_cache);
|
||||||
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
||||||
assert(tcp_req->buffers[i] != NULL);
|
assert(tcp_req->req.buffers[i] != NULL);
|
||||||
i++;
|
i++;
|
||||||
} else {
|
} else {
|
||||||
if (spdk_mempool_get_bulk(transport->data_buf_pool,
|
if (spdk_mempool_get_bulk(transport->data_buf_pool,
|
||||||
&tcp_req->buffers[i], num_buffers - i)) {
|
&tcp_req->req.buffers[i], num_buffers - i)) {
|
||||||
goto nomem;
|
goto nomem;
|
||||||
}
|
}
|
||||||
i += num_buffers - i;
|
i += num_buffers - i;
|
||||||
@ -2224,7 +2222,7 @@ spdk_nvmf_tcp_req_fill_buffers(struct spdk_nvmf_tcp_req *tcp_req,
|
|||||||
tcp_req->req.iovcnt = 0;
|
tcp_req->req.iovcnt = 0;
|
||||||
while (length) {
|
while (length) {
|
||||||
i = tcp_req->req.iovcnt;
|
i = tcp_req->req.iovcnt;
|
||||||
tcp_req->req.iov[i].iov_base = (void *)((uintptr_t)(tcp_req->buffers[i] +
|
tcp_req->req.iov[i].iov_base = (void *)((uintptr_t)(tcp_req->req.buffers[i] +
|
||||||
NVMF_DATA_BUFFER_MASK) &
|
NVMF_DATA_BUFFER_MASK) &
|
||||||
~NVMF_DATA_BUFFER_MASK);
|
~NVMF_DATA_BUFFER_MASK);
|
||||||
tcp_req->req.iov[i].iov_len = spdk_min(length, transport->opts.io_unit_size);
|
tcp_req->req.iov[i].iov_len = spdk_min(length, transport->opts.io_unit_size);
|
||||||
|
@ -100,7 +100,7 @@ static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
|
|||||||
for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
|
for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
|
||||||
rdma_req->req.iov[i].iov_base = 0;
|
rdma_req->req.iov[i].iov_base = 0;
|
||||||
rdma_req->req.iov[i].iov_len = 0;
|
rdma_req->req.iov[i].iov_len = 0;
|
||||||
rdma_req->buffers[i] = 0;
|
rdma_req->req.buffers[i] = 0;
|
||||||
rdma_req->data.wr.sg_list[i].addr = 0;
|
rdma_req->data.wr.sg_list[i].addr = 0;
|
||||||
rdma_req->data.wr.sg_list[i].length = 0;
|
rdma_req->data.wr.sg_list[i].length = 0;
|
||||||
rdma_req->data.wr.sg_list[i].lkey = 0;
|
rdma_req->data.wr.sg_list[i].lkey = 0;
|
||||||
@ -169,7 +169,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
|||||||
CU_ASSERT(rdma_req.data.wr.num_sge == 1);
|
CU_ASSERT(rdma_req.data.wr.num_sge == 1);
|
||||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
|
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
|
||||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
|
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
|
||||||
CU_ASSERT((uint64_t)rdma_req.buffers[0] == 0x2000);
|
CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
|
CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
|
CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
|
CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
|
||||||
@ -186,7 +186,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
|||||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
|
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
|
||||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
|
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
|
||||||
for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
|
for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
|
||||||
CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000);
|
CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
|
||||||
@ -209,7 +209,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
|||||||
CU_ASSERT(rdma_req.data_from_pool == false);
|
CU_ASSERT(rdma_req.data_from_pool == false);
|
||||||
CU_ASSERT(rdma_req.req.data == NULL);
|
CU_ASSERT(rdma_req.req.data == NULL);
|
||||||
CU_ASSERT(rdma_req.data.wr.num_sge == 0);
|
CU_ASSERT(rdma_req.data.wr.num_sge == 0);
|
||||||
CU_ASSERT(rdma_req.buffers[0] == NULL);
|
CU_ASSERT(rdma_req.req.buffers[0] == NULL);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
|
CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
|
CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
|
CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
|
||||||
@ -373,7 +373,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
|||||||
CU_ASSERT(group.group.buf_cache_count == 0);
|
CU_ASSERT(group.group.buf_cache_count == 0);
|
||||||
CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
|
CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
CU_ASSERT((uint64_t)rdma_req.buffers[i] == (uint64_t)&bufs[i]);
|
CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
|
||||||
~NVMF_DATA_BUFFER_MASK));
|
~NVMF_DATA_BUFFER_MASK));
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
||||||
@ -393,7 +393,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
|||||||
CU_ASSERT(group.group.buf_cache_count == 0);
|
CU_ASSERT(group.group.buf_cache_count == 0);
|
||||||
CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
|
CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000);
|
CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
||||||
CU_ASSERT(group.group.buf_cache_count == 0);
|
CU_ASSERT(group.group.buf_cache_count == 0);
|
||||||
@ -418,13 +418,13 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
|||||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
|
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
|
||||||
CU_ASSERT(group.group.buf_cache_count == 0);
|
CU_ASSERT(group.group.buf_cache_count == 0);
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
CU_ASSERT((uint64_t)rdma_req.buffers[i] == (uint64_t)&bufs[i]);
|
CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
|
||||||
~NVMF_DATA_BUFFER_MASK));
|
~NVMF_DATA_BUFFER_MASK));
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
||||||
}
|
}
|
||||||
for (i = 2; i < 4; i++) {
|
for (i = 2; i < 4; i++) {
|
||||||
CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000);
|
CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
|
||||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user