nvme/tcp host: Support the multiple SGL in host side.
Current the idea is: Convert the multiple SGL into the single SGL and send it out. Change-Id: I8e571704e9d7c7b583f889837eead7cac1982fcd Signed-off-by: Ziye Yang <ziye.yang@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/448262 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
55f5b205c5
commit
18b3e74186
@ -110,7 +110,8 @@ struct nvme_tcp_req {
|
|||||||
uint32_t r2tl_remain;
|
uint32_t r2tl_remain;
|
||||||
bool in_capsule_data;
|
bool in_capsule_data;
|
||||||
struct nvme_tcp_pdu send_pdu;
|
struct nvme_tcp_pdu send_pdu;
|
||||||
void *buf;
|
struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS];
|
||||||
|
uint32_t iovcnt;
|
||||||
TAILQ_ENTRY(nvme_tcp_req) link;
|
TAILQ_ENTRY(nvme_tcp_req) link;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -147,7 +148,7 @@ nvme_tcp_req_get(struct nvme_tcp_qpair *tqpair)
|
|||||||
tcp_req->req = NULL;
|
tcp_req->req = NULL;
|
||||||
tcp_req->in_capsule_data = false;
|
tcp_req->in_capsule_data = false;
|
||||||
tcp_req->r2tl_remain = 0;
|
tcp_req->r2tl_remain = 0;
|
||||||
tcp_req->buf = NULL;
|
tcp_req->iovcnt = 0;
|
||||||
memset(&tcp_req->send_pdu, 0, sizeof(tcp_req->send_pdu));
|
memset(&tcp_req->send_pdu, 0, sizeof(tcp_req->send_pdu));
|
||||||
TAILQ_INSERT_TAIL(&tqpair->outstanding_reqs, tcp_req, link);
|
TAILQ_INSERT_TAIL(&tqpair->outstanding_reqs, tcp_req, link);
|
||||||
|
|
||||||
@ -481,7 +482,10 @@ static int
|
|||||||
nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
|
nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
|
||||||
{
|
{
|
||||||
struct nvme_request *req = tcp_req->req;
|
struct nvme_request *req = tcp_req->req;
|
||||||
tcp_req->buf = req->payload.contig_or_cb_arg + req->payload_offset;
|
|
||||||
|
tcp_req->iov[0].iov_base = req->payload.contig_or_cb_arg + req->payload_offset;
|
||||||
|
tcp_req->iov[0].iov_len = req->payload_size;
|
||||||
|
tcp_req->iovcnt = 1;
|
||||||
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
|
SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
|
||||||
|
|
||||||
@ -496,8 +500,9 @@ nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req
|
|||||||
static int
|
static int
|
||||||
nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
|
nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc, iovcnt;
|
||||||
uint32_t length;
|
uint32_t length;
|
||||||
|
uint64_t remaining_size;
|
||||||
struct nvme_request *req = tcp_req->req;
|
struct nvme_request *req = tcp_req->req;
|
||||||
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
|
SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
|
||||||
@ -508,17 +513,28 @@ nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *t
|
|||||||
assert(req->payload.next_sge_fn != NULL);
|
assert(req->payload.next_sge_fn != NULL);
|
||||||
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
|
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
|
||||||
|
|
||||||
/* TODO: for now, we only support a single SGL entry */
|
remaining_size = req->payload_size;
|
||||||
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &tcp_req->buf, &length);
|
iovcnt = 0;
|
||||||
|
|
||||||
if (rc) {
|
do {
|
||||||
|
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &tcp_req->iov[iovcnt].iov_base,
|
||||||
|
&length);
|
||||||
|
if (rc) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tcp_req->iov[iovcnt].iov_len = length;
|
||||||
|
remaining_size -= length;
|
||||||
|
iovcnt++;
|
||||||
|
} while (remaining_size > 0 && iovcnt < NVME_TCP_MAX_SGL_DESCRIPTORS);
|
||||||
|
|
||||||
|
|
||||||
|
/* Should be impossible if we did our sgl checks properly up the stack, but do a sanity check here. */
|
||||||
|
if (remaining_size > 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (length < req->payload_size) {
|
tcp_req->iovcnt = iovcnt;
|
||||||
SPDK_ERRLOG("multi-element SGL currently not supported for TCP now\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -591,7 +607,32 @@ nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
|
|||||||
struct nvme_tcp_req *tcp_req,
|
struct nvme_tcp_req *tcp_req,
|
||||||
uint32_t data_len)
|
uint32_t data_len)
|
||||||
{
|
{
|
||||||
nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)tcp_req->buf + tcp_req->datao), data_len);
|
uint32_t i, remain_len, len;
|
||||||
|
struct _iov_ctx *ctx;
|
||||||
|
|
||||||
|
if (tcp_req->iovcnt == 1) {
|
||||||
|
nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)tcp_req->iov[0].iov_base + tcp_req->datao), data_len);
|
||||||
|
} else {
|
||||||
|
i = 0;
|
||||||
|
ctx = &pdu->iov_ctx;
|
||||||
|
assert(tcp_req->iovcnt <= NVME_TCP_MAX_SGL_DESCRIPTORS);
|
||||||
|
_iov_ctx_init(ctx, pdu->data_iov, tcp_req->iovcnt, tcp_req->datao);
|
||||||
|
remain_len = data_len;
|
||||||
|
|
||||||
|
while (remain_len > 0) {
|
||||||
|
assert(i < NVME_TCP_MAX_SGL_DESCRIPTORS);
|
||||||
|
len = spdk_min(remain_len, tcp_req->iov[i].iov_len);
|
||||||
|
remain_len -= len;
|
||||||
|
if (!_iov_ctx_set_iov(ctx, tcp_req->iov[i].iov_base, len)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(remain_len == 0);
|
||||||
|
pdu->data_iovcnt = ctx->iovcnt;
|
||||||
|
pdu->data_len = data_len;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
Loading…
Reference in New Issue
Block a user