diff --git a/lib/nvme/nvme_tcp.c b/lib/nvme/nvme_tcp.c index 54e0cacc3..e66d9d978 100644 --- a/lib/nvme/nvme_tcp.c +++ b/lib/nvme/nvme_tcp.c @@ -110,7 +110,8 @@ struct nvme_tcp_req { uint32_t r2tl_remain; bool in_capsule_data; struct nvme_tcp_pdu send_pdu; - void *buf; + struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS]; + uint32_t iovcnt; TAILQ_ENTRY(nvme_tcp_req) link; }; @@ -147,7 +148,7 @@ nvme_tcp_req_get(struct nvme_tcp_qpair *tqpair) tcp_req->req = NULL; tcp_req->in_capsule_data = false; tcp_req->r2tl_remain = 0; - tcp_req->buf = NULL; + tcp_req->iovcnt = 0; memset(&tcp_req->send_pdu, 0, sizeof(tcp_req->send_pdu)); TAILQ_INSERT_TAIL(&tqpair->outstanding_reqs, tcp_req, link); @@ -481,7 +482,10 @@ static int nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req) { struct nvme_request *req = tcp_req->req; - tcp_req->buf = req->payload.contig_or_cb_arg + req->payload_offset; + + tcp_req->iov[0].iov_base = req->payload.contig_or_cb_arg + req->payload_offset; + tcp_req->iov[0].iov_len = req->payload_size; + tcp_req->iovcnt = 1; SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n"); @@ -496,8 +500,9 @@ nvme_tcp_build_contig_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req static int nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req) { - int rc; + int rc, iovcnt; uint32_t length; + uint64_t remaining_size; struct nvme_request *req = tcp_req->req; SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n"); @@ -508,17 +513,28 @@ nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *t assert(req->payload.next_sge_fn != NULL); req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset); - /* TODO: for now, we only support a single SGL entry */ - rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &tcp_req->buf, &length); + remaining_size = req->payload_size; + iovcnt = 0; - if (rc) { + do { + rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &tcp_req->iov[iovcnt].iov_base, + &length); + if (rc) { + return -1; + } + + tcp_req->iov[iovcnt].iov_len = length; + remaining_size -= length; + iovcnt++; + } while (remaining_size > 0 && iovcnt < NVME_TCP_MAX_SGL_DESCRIPTORS); + + + /* Should be impossible if we did our sgl checks properly up the stack, but do a sanity check here. */ + if (remaining_size > 0) { return -1; } - if (length < req->payload_size) { - SPDK_ERRLOG("multi-element SGL currently not supported for TCP now\n"); - return -1; - } + tcp_req->iovcnt = iovcnt; return 0; } @@ -591,7 +607,32 @@ nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu, struct nvme_tcp_req *tcp_req, uint32_t data_len) { - nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)tcp_req->buf + tcp_req->datao), data_len); + uint32_t i, remain_len, len; + struct _iov_ctx *ctx; + + if (tcp_req->iovcnt == 1) { + nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)tcp_req->iov[0].iov_base + tcp_req->datao), data_len); + } else { + i = 0; + ctx = &pdu->iov_ctx; + assert(tcp_req->iovcnt <= NVME_TCP_MAX_SGL_DESCRIPTORS); + _iov_ctx_init(ctx, pdu->data_iov, tcp_req->iovcnt, tcp_req->datao); + remain_len = data_len; + + while (remain_len > 0) { + assert(i < NVME_TCP_MAX_SGL_DESCRIPTORS); + len = spdk_min(remain_len, tcp_req->iov[i].iov_len); + remain_len -= len; + if (!_iov_ctx_set_iov(ctx, tcp_req->iov[i].iov_base, len)) { + break; + } + i++; + } + + assert(remain_len == 0); + pdu->data_iovcnt = ctx->iovcnt; + pdu->data_len = data_len; + } } static int