nvmf/vfio-user: allocate SQ requests individually
Since we already allocate ->sg on a per-request basis now, drop the ->reqs_internal allocation in favour of allocating individual requests and placing them on the ->free_reqs queue, which simplifies the need to track the array. For request abort, we'll use the ->outstanding request list, now we have it, to find the victim request. Signed-off-by: John Levon <john.levon@nutanix.com> Change-Id: I25227ccd2ab7e00c8a2e7b2e2af2dc3b073584cd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11427 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
parent
cb3d98d596
commit
bc5f3a6f80
@ -193,12 +193,13 @@ struct nvmf_vfio_user_req {
|
|||||||
/* old CC before prop_set_cc fabric command */
|
/* old CC before prop_set_cc fabric command */
|
||||||
union spdk_nvme_cc_register cc;
|
union spdk_nvme_cc_register cc;
|
||||||
|
|
||||||
/* placeholder for gpa_to_vva memory map table, the IO buffer doesn't use it */
|
TAILQ_ENTRY(nvmf_vfio_user_req) link;
|
||||||
dma_sg_t *sg;
|
|
||||||
struct iovec iov[NVMF_VFIO_USER_MAX_IOVECS];
|
struct iovec iov[NVMF_VFIO_USER_MAX_IOVECS];
|
||||||
uint8_t iovcnt;
|
uint8_t iovcnt;
|
||||||
|
|
||||||
TAILQ_ENTRY(nvmf_vfio_user_req) link;
|
/* NVMF_VFIO_USER_MAX_IOVECS worth of dma_sg_t. */
|
||||||
|
uint8_t sg[];
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -262,7 +263,6 @@ struct nvmf_vfio_user_sq {
|
|||||||
struct spdk_nvmf_qpair qpair;
|
struct spdk_nvmf_qpair qpair;
|
||||||
struct spdk_nvmf_transport_poll_group *group;
|
struct spdk_nvmf_transport_poll_group *group;
|
||||||
struct nvmf_vfio_user_ctrlr *ctrlr;
|
struct nvmf_vfio_user_ctrlr *ctrlr;
|
||||||
struct nvmf_vfio_user_req *reqs_internal;
|
|
||||||
|
|
||||||
uint32_t qsize;
|
uint32_t qsize;
|
||||||
uint32_t qid;
|
uint32_t qid;
|
||||||
@ -287,7 +287,8 @@ struct nvmf_vfio_user_sq {
|
|||||||
*/
|
*/
|
||||||
struct spdk_nvme_cmd create_io_sq_cmd;
|
struct spdk_nvme_cmd create_io_sq_cmd;
|
||||||
|
|
||||||
TAILQ_HEAD(, nvmf_vfio_user_req) reqs;
|
/* Currently unallocated reqs. */
|
||||||
|
TAILQ_HEAD(, nvmf_vfio_user_req) free_reqs;
|
||||||
/* Poll group entry */
|
/* Poll group entry */
|
||||||
TAILQ_ENTRY(nvmf_vfio_user_sq) link;
|
TAILQ_ENTRY(nvmf_vfio_user_sq) link;
|
||||||
/* Connected SQ entry */
|
/* Connected SQ entry */
|
||||||
@ -1032,7 +1033,7 @@ acq_setup(struct nvmf_vfio_user_ctrlr *ctrlr)
|
|||||||
static inline dma_sg_t *
|
static inline dma_sg_t *
|
||||||
vu_req_to_sg_t(struct nvmf_vfio_user_req *vu_req, uint32_t iovcnt)
|
vu_req_to_sg_t(struct nvmf_vfio_user_req *vu_req, uint32_t iovcnt)
|
||||||
{
|
{
|
||||||
return (dma_sg_t *)((uintptr_t)vu_req->sg + iovcnt * dma_sg_size());
|
return (dma_sg_t *)(vu_req->sg + iovcnt * dma_sg_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
@ -1179,6 +1180,16 @@ io_q_exists(struct nvmf_vfio_user_ctrlr *vu_ctrlr, const uint16_t qid, const boo
|
|||||||
vu_ctrlr->sqs[qid]->sq_state != VFIO_USER_SQ_UNUSED);
|
vu_ctrlr->sqs[qid]->sq_state != VFIO_USER_SQ_UNUSED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
free_sq_reqs(struct nvmf_vfio_user_sq *sq)
|
||||||
|
{
|
||||||
|
while (!TAILQ_EMPTY(&sq->free_reqs)) {
|
||||||
|
struct nvmf_vfio_user_req *vu_req = TAILQ_FIRST(&sq->free_reqs);
|
||||||
|
TAILQ_REMOVE(&sq->free_reqs, vu_req, link);
|
||||||
|
free(vu_req);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Deletes a SQ, if this SQ is the last user of the associated CQ
|
/* Deletes a SQ, if this SQ is the last user of the associated CQ
|
||||||
* and the controller is being shut down or reset, then the CQ is
|
* and the controller is being shut down or reset, then the CQ is
|
||||||
* also deleted.
|
* also deleted.
|
||||||
@ -1187,9 +1198,7 @@ static void
|
|||||||
delete_sq_done(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq)
|
delete_sq_done(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq)
|
||||||
{
|
{
|
||||||
struct nvmf_vfio_user_cq *cq;
|
struct nvmf_vfio_user_cq *cq;
|
||||||
struct nvmf_vfio_user_req *vu_req;
|
|
||||||
uint16_t cqid;
|
uint16_t cqid;
|
||||||
uint32_t i;
|
|
||||||
|
|
||||||
SPDK_DEBUGLOG(nvmf_vfio, "%s: delete SQ%d=%p done\n", ctrlr_id(vu_ctrlr),
|
SPDK_DEBUGLOG(nvmf_vfio, "%s: delete SQ%d=%p done\n", ctrlr_id(vu_ctrlr),
|
||||||
sq->qid, sq);
|
sq->qid, sq);
|
||||||
@ -1197,15 +1206,9 @@ delete_sq_done(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *
|
|||||||
/* Free SQ resources */
|
/* Free SQ resources */
|
||||||
unmap_q(vu_ctrlr, &sq->mapping);
|
unmap_q(vu_ctrlr, &sq->mapping);
|
||||||
|
|
||||||
for (i = 0; i < sq->qsize; i++) {
|
free_sq_reqs(sq);
|
||||||
vu_req = &sq->reqs_internal[i];
|
|
||||||
free(vu_req->sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sq->qsize) {
|
sq->qsize = 0;
|
||||||
sq->qsize = 0;
|
|
||||||
free(sq->reqs_internal);
|
|
||||||
}
|
|
||||||
sq->size = 0;
|
sq->size = 0;
|
||||||
sq->sq_state = VFIO_USER_SQ_DELETED;
|
sq->sq_state = VFIO_USER_SQ_DELETED;
|
||||||
|
|
||||||
@ -1237,8 +1240,6 @@ free_qp(struct nvmf_vfio_user_ctrlr *ctrlr, uint16_t qid)
|
|||||||
{
|
{
|
||||||
struct nvmf_vfio_user_sq *sq;
|
struct nvmf_vfio_user_sq *sq;
|
||||||
struct nvmf_vfio_user_cq *cq;
|
struct nvmf_vfio_user_cq *cq;
|
||||||
struct nvmf_vfio_user_req *vu_req;
|
|
||||||
uint32_t i;
|
|
||||||
|
|
||||||
if (ctrlr == NULL) {
|
if (ctrlr == NULL) {
|
||||||
return;
|
return;
|
||||||
@ -1249,13 +1250,7 @@ free_qp(struct nvmf_vfio_user_ctrlr *ctrlr, uint16_t qid)
|
|||||||
SPDK_DEBUGLOG(nvmf_vfio, "%s: Free SQ %u\n", ctrlr_id(ctrlr), qid);
|
SPDK_DEBUGLOG(nvmf_vfio, "%s: Free SQ %u\n", ctrlr_id(ctrlr), qid);
|
||||||
unmap_q(ctrlr, &sq->mapping);
|
unmap_q(ctrlr, &sq->mapping);
|
||||||
|
|
||||||
for (i = 0; i < sq->qsize; i++) {
|
free_sq_reqs(sq);
|
||||||
vu_req = &sq->reqs_internal[i];
|
|
||||||
free(vu_req->sg);
|
|
||||||
}
|
|
||||||
if (sq->qsize) {
|
|
||||||
free(sq->reqs_internal);
|
|
||||||
}
|
|
||||||
|
|
||||||
free(sq->mapping.sg);
|
free(sq->mapping.sg);
|
||||||
free(sq);
|
free(sq);
|
||||||
@ -1298,6 +1293,8 @@ init_sq(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvmf_transport *transpor
|
|||||||
sq->ctrlr = ctrlr;
|
sq->ctrlr = ctrlr;
|
||||||
ctrlr->sqs[id] = sq;
|
ctrlr->sqs[id] = sq;
|
||||||
|
|
||||||
|
TAILQ_INIT(&sq->free_reqs);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1327,25 +1324,21 @@ init_cq(struct nvmf_vfio_user_ctrlr *vu_ctrlr, const uint16_t id)
|
|||||||
|
|
||||||
static int
|
static int
|
||||||
alloc_sq_reqs(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq,
|
alloc_sq_reqs(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq,
|
||||||
const uint32_t qsize)
|
const uint32_t nr_reqs)
|
||||||
{
|
{
|
||||||
uint32_t i;
|
|
||||||
struct nvmf_vfio_user_req *vu_req, *tmp;
|
struct nvmf_vfio_user_req *vu_req, *tmp;
|
||||||
struct spdk_nvmf_request *req;
|
size_t req_size;
|
||||||
|
uint32_t i;
|
||||||
|
|
||||||
TAILQ_INIT(&sq->reqs);
|
req_size = sizeof(struct nvmf_vfio_user_req) +
|
||||||
|
(dma_sg_size() * NVMF_VFIO_USER_MAX_IOVECS);
|
||||||
|
|
||||||
sq->reqs_internal = calloc(qsize, sizeof(struct nvmf_vfio_user_req));
|
for (i = 0; i < nr_reqs; i++) {
|
||||||
if (sq->reqs_internal == NULL) {
|
struct spdk_nvmf_request *req;
|
||||||
SPDK_ERRLOG("%s: error allocating reqs: %m\n", ctrlr_id(vu_ctrlr));
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < qsize; i++) {
|
vu_req = calloc(1, req_size);
|
||||||
vu_req = &sq->reqs_internal[i];
|
if (vu_req == NULL) {
|
||||||
vu_req->sg = calloc(NVMF_VFIO_USER_MAX_IOVECS, dma_sg_size());
|
goto err;
|
||||||
if (vu_req->sg == NULL) {
|
|
||||||
goto sg_err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
req = &vu_req->req;
|
req = &vu_req->req;
|
||||||
@ -1353,17 +1346,16 @@ alloc_sq_reqs(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *s
|
|||||||
req->rsp = (union nvmf_c2h_msg *)&vu_req->rsp;
|
req->rsp = (union nvmf_c2h_msg *)&vu_req->rsp;
|
||||||
req->cmd = (union nvmf_h2c_msg *)&vu_req->cmd;
|
req->cmd = (union nvmf_h2c_msg *)&vu_req->cmd;
|
||||||
|
|
||||||
TAILQ_INSERT_TAIL(&sq->reqs, vu_req, link);
|
TAILQ_INSERT_TAIL(&sq->free_reqs, vu_req, link);
|
||||||
}
|
}
|
||||||
|
|
||||||
sq->qsize = qsize;
|
sq->qsize = nr_reqs;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
sg_err:
|
err:
|
||||||
TAILQ_FOREACH_SAFE(vu_req, &sq->reqs, link, tmp) {
|
TAILQ_FOREACH_SAFE(vu_req, &sq->free_reqs, link, tmp) {
|
||||||
free(vu_req->sg);
|
free(vu_req);
|
||||||
}
|
}
|
||||||
free(sq->reqs_internal);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1698,7 +1690,9 @@ handle_cmd_rsp(struct nvmf_vfio_user_req *vu_req, void *cb_arg)
|
|||||||
assert(vu_ctrlr != NULL);
|
assert(vu_ctrlr != NULL);
|
||||||
|
|
||||||
if (spdk_likely(vu_req->iovcnt)) {
|
if (spdk_likely(vu_req->iovcnt)) {
|
||||||
vfu_unmap_sg(vu_ctrlr->endpoint->vfu_ctx, vu_req->sg, vu_req->iov, vu_req->iovcnt);
|
vfu_unmap_sg(vu_ctrlr->endpoint->vfu_ctx,
|
||||||
|
vu_req_to_sg_t(vu_req, 0),
|
||||||
|
vu_req->iov, vu_req->iovcnt);
|
||||||
}
|
}
|
||||||
sqid = sq->qid;
|
sqid = sq->qid;
|
||||||
cqid = sq->cqid;
|
cqid = sq->cqid;
|
||||||
@ -2719,9 +2713,7 @@ vfio_user_migration_device_state_transition(vfu_ctx_t *vfu_ctx, vfu_migr_state_t
|
|||||||
struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx);
|
struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx);
|
||||||
struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr;
|
struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr;
|
||||||
struct nvmf_vfio_user_sq *sq;
|
struct nvmf_vfio_user_sq *sq;
|
||||||
struct nvmf_vfio_user_req *vu_req;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
uint32_t i;
|
|
||||||
|
|
||||||
SPDK_DEBUGLOG(nvmf_vfio, "%s controller state %u, migration state %u\n", endpoint_id(endpoint),
|
SPDK_DEBUGLOG(nvmf_vfio, "%s controller state %u, migration state %u\n", endpoint_id(endpoint),
|
||||||
vu_ctrlr->state, state);
|
vu_ctrlr->state, state);
|
||||||
@ -2761,12 +2753,8 @@ vfio_user_migration_device_state_transition(vfu_ctx_t *vfu_ctx, vfu_migr_state_t
|
|||||||
/* Free ADMIN SQ resources first, SQ resources will be
|
/* Free ADMIN SQ resources first, SQ resources will be
|
||||||
* allocated based on queue size from source VM.
|
* allocated based on queue size from source VM.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < sq->qsize; i++) {
|
free_sq_reqs(sq);
|
||||||
vu_req = &sq->reqs_internal[i];
|
|
||||||
free(vu_req->sg);
|
|
||||||
}
|
|
||||||
sq->qsize = 0;
|
sq->qsize = 0;
|
||||||
free(sq->reqs_internal);
|
|
||||||
break;
|
break;
|
||||||
case VFU_MIGR_STATE_RUNNING:
|
case VFU_MIGR_STATE_RUNNING:
|
||||||
if (vu_ctrlr->state != VFIO_USER_CTRLR_MIGRATING) {
|
if (vu_ctrlr->state != VFIO_USER_CTRLR_MIGRATING) {
|
||||||
@ -3751,7 +3739,7 @@ _nvmf_vfio_user_req_free(struct nvmf_vfio_user_sq *sq, struct nvmf_vfio_user_req
|
|||||||
vu_req->iovcnt = 0;
|
vu_req->iovcnt = 0;
|
||||||
vu_req->state = VFIO_USER_REQUEST_STATE_FREE;
|
vu_req->state = VFIO_USER_REQUEST_STATE_FREE;
|
||||||
|
|
||||||
TAILQ_INSERT_TAIL(&sq->reqs, vu_req, link);
|
TAILQ_INSERT_TAIL(&sq->free_reqs, vu_req, link);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -3820,7 +3808,7 @@ nvmf_vfio_user_close_qpair(struct spdk_nvmf_qpair *qpair,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a preallocated spdk_nvmf_request or NULL if there isn't one available.
|
* Returns a preallocated request, or NULL if there isn't one available.
|
||||||
*/
|
*/
|
||||||
static struct nvmf_vfio_user_req *
|
static struct nvmf_vfio_user_req *
|
||||||
get_nvmf_vfio_user_req(struct nvmf_vfio_user_sq *sq)
|
get_nvmf_vfio_user_req(struct nvmf_vfio_user_sq *sq)
|
||||||
@ -3831,12 +3819,12 @@ get_nvmf_vfio_user_req(struct nvmf_vfio_user_sq *sq)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TAILQ_EMPTY(&sq->reqs)) {
|
req = TAILQ_FIRST(&sq->free_reqs);
|
||||||
|
if (req == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
req = TAILQ_FIRST(&sq->reqs);
|
TAILQ_REMOVE(&sq->free_reqs, req, link);
|
||||||
TAILQ_REMOVE(&sq->reqs, req, link);
|
|
||||||
|
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
@ -4175,28 +4163,28 @@ static void
|
|||||||
nvmf_vfio_user_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
|
nvmf_vfio_user_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
|
||||||
struct spdk_nvmf_request *req)
|
struct spdk_nvmf_request *req)
|
||||||
{
|
{
|
||||||
struct nvmf_vfio_user_sq *sq;
|
struct spdk_nvmf_request *req_to_abort = NULL;
|
||||||
struct nvmf_vfio_user_req *vu_req, *vu_req_to_abort = NULL;
|
|
||||||
uint32_t i;
|
|
||||||
uint16_t cid;
|
uint16_t cid;
|
||||||
|
|
||||||
sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair);
|
|
||||||
|
|
||||||
cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;
|
cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;
|
||||||
for (i = 0; i < sq->qsize; i++) {
|
|
||||||
vu_req = &sq->reqs_internal[i];
|
TAILQ_FOREACH(req, &qpair->outstanding, link) {
|
||||||
|
struct nvmf_vfio_user_req *vu_req;
|
||||||
|
|
||||||
|
vu_req = SPDK_CONTAINEROF(req, struct nvmf_vfio_user_req, req);
|
||||||
|
|
||||||
if (vu_req->state == VFIO_USER_REQUEST_STATE_EXECUTING && vu_req->cmd.cid == cid) {
|
if (vu_req->state == VFIO_USER_REQUEST_STATE_EXECUTING && vu_req->cmd.cid == cid) {
|
||||||
vu_req_to_abort = vu_req;
|
req_to_abort = req;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vu_req_to_abort == NULL) {
|
if (req_to_abort == NULL) {
|
||||||
spdk_nvmf_request_complete(req);
|
spdk_nvmf_request_complete(req);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
req->req_to_abort = &vu_req_to_abort->req;
|
req->req_to_abort = req_to_abort;
|
||||||
nvmf_ctrlr_abort_request(req);
|
nvmf_ctrlr_abort_request(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user