nvme: allocate extra request for fabrics connect
With async connect, we need to avoid the case
where the initiator is sending the icreq, and
meanwhile the application submits enough I/O
such that the request objects are exhausted, leaving
none for the FABRICS/CONNECT command that we need
to send after the icreq is done.
So allocate an extra request, and then use it
when sending the FABRICS/CONNECT command, rather
than trying to pull one from the qpair's STAILQ.
Fixes issue #2371.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11515 (master)
(cherry picked from commit 635d0cbe75
)
Change-Id: If42a3fbb3fd9d863ee48cf5cae75a9ba1754c349
Signed-off-by: Krzysztof Karas <krzysztof.karas@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12469
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
parent
9d14d8d201
commit
6c61e733a2
@ -533,6 +533,7 @@ nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entr
|
|||||||
struct spdk_nvmf_fabric_connect_cmd cmd;
|
struct spdk_nvmf_fabric_connect_cmd cmd;
|
||||||
struct spdk_nvmf_fabric_connect_data *nvmf_data;
|
struct spdk_nvmf_fabric_connect_data *nvmf_data;
|
||||||
struct spdk_nvme_ctrlr *ctrlr;
|
struct spdk_nvme_ctrlr *ctrlr;
|
||||||
|
struct nvme_request *req;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (num_entries == 0 || num_entries > SPDK_NVME_IO_QUEUE_MAX_ENTRIES) {
|
if (num_entries == 0 || num_entries > SPDK_NVME_IO_QUEUE_MAX_ENTRIES) {
|
||||||
@ -567,6 +568,10 @@ nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entr
|
|||||||
cmd.sqsize = num_entries - 1;
|
cmd.sqsize = num_entries - 1;
|
||||||
cmd.kato = ctrlr->opts.keep_alive_timeout_ms;
|
cmd.kato = ctrlr->opts.keep_alive_timeout_ms;
|
||||||
|
|
||||||
|
assert(qpair->reserved_req != NULL);
|
||||||
|
req = qpair->reserved_req;
|
||||||
|
memcpy(&req->cmd, &cmd, sizeof(cmd));
|
||||||
|
|
||||||
if (nvme_qpair_is_admin_queue(qpair)) {
|
if (nvme_qpair_is_admin_queue(qpair)) {
|
||||||
nvmf_data->cntlid = 0xFFFF;
|
nvmf_data->cntlid = 0xFFFF;
|
||||||
} else {
|
} else {
|
||||||
@ -579,10 +584,10 @@ nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entr
|
|||||||
snprintf(nvmf_data->hostnqn, sizeof(nvmf_data->hostnqn), "%s", ctrlr->opts.hostnqn);
|
snprintf(nvmf_data->hostnqn, sizeof(nvmf_data->hostnqn), "%s", ctrlr->opts.hostnqn);
|
||||||
snprintf(nvmf_data->subnqn, sizeof(nvmf_data->subnqn), "%s", ctrlr->trid.subnqn);
|
snprintf(nvmf_data->subnqn, sizeof(nvmf_data->subnqn), "%s", ctrlr->trid.subnqn);
|
||||||
|
|
||||||
rc = spdk_nvme_ctrlr_cmd_io_raw(ctrlr, qpair,
|
NVME_INIT_REQUEST(req, nvme_completion_poll_cb, status, NVME_PAYLOAD_CONTIG(nvmf_data, NULL),
|
||||||
(struct spdk_nvme_cmd *)&cmd,
|
sizeof(*nvmf_data), 0);
|
||||||
nvmf_data, sizeof(*nvmf_data),
|
|
||||||
nvme_completion_poll_cb, status);
|
rc = nvme_qpair_submit_request(qpair, req);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
SPDK_ERRLOG("Failed to allocate/submit FABRIC_CONNECT command, rc %d\n", rc);
|
SPDK_ERRLOG("Failed to allocate/submit FABRIC_CONNECT command, rc %d\n", rc);
|
||||||
spdk_free(status->dma_data);
|
spdk_free(status->dma_data);
|
||||||
|
@ -453,6 +453,9 @@ struct spdk_nvme_qpair {
|
|||||||
|
|
||||||
enum spdk_nvme_transport_type trtype;
|
enum spdk_nvme_transport_type trtype;
|
||||||
|
|
||||||
|
/* request object used only for this qpair's FABRICS/CONNECT command (if needed) */
|
||||||
|
struct nvme_request *reserved_req;
|
||||||
|
|
||||||
STAILQ_HEAD(, nvme_request) free_req;
|
STAILQ_HEAD(, nvme_request) free_req;
|
||||||
STAILQ_HEAD(, nvme_request) queued_req;
|
STAILQ_HEAD(, nvme_request) queued_req;
|
||||||
|
|
||||||
@ -1356,8 +1359,13 @@ nvme_free_request(struct nvme_request *req)
|
|||||||
assert(req->num_children == 0);
|
assert(req->num_children == 0);
|
||||||
assert(req->qpair != NULL);
|
assert(req->qpair != NULL);
|
||||||
|
|
||||||
|
/* The reserved_req does not go in the free_req STAILQ - it is
|
||||||
|
* saved only for use with a FABRICS/CONNECT command.
|
||||||
|
*/
|
||||||
|
if (spdk_likely(req->qpair->reserved_req != req)) {
|
||||||
STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
|
STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
|
nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
|
||||||
|
@ -819,6 +819,9 @@ nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
|
|||||||
|
|
||||||
req_size_padded = (sizeof(struct nvme_request) + 63) & ~(size_t)63;
|
req_size_padded = (sizeof(struct nvme_request) + 63) & ~(size_t)63;
|
||||||
|
|
||||||
|
/* Add one for the reserved_req */
|
||||||
|
num_requests++;
|
||||||
|
|
||||||
qpair->req_buf = spdk_zmalloc(req_size_padded * num_requests, 64, NULL,
|
qpair->req_buf = spdk_zmalloc(req_size_padded * num_requests, 64, NULL,
|
||||||
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
|
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
|
||||||
if (qpair->req_buf == NULL) {
|
if (qpair->req_buf == NULL) {
|
||||||
@ -831,8 +834,12 @@ nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
|
|||||||
struct nvme_request *req = qpair->req_buf + i * req_size_padded;
|
struct nvme_request *req = qpair->req_buf + i * req_size_padded;
|
||||||
|
|
||||||
req->qpair = qpair;
|
req->qpair = qpair;
|
||||||
|
if (i == 0) {
|
||||||
|
qpair->reserved_req = req;
|
||||||
|
} else {
|
||||||
STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
|
STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -83,23 +83,12 @@ static struct spdk_nvmf_fabric_connect_data g_nvmf_data;
|
|||||||
static struct nvme_request *g_request;
|
static struct nvme_request *g_request;
|
||||||
|
|
||||||
int
|
int
|
||||||
spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
|
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
|
||||||
struct spdk_nvme_qpair *qpair,
|
|
||||||
struct spdk_nvme_cmd *cmd,
|
|
||||||
void *buf, uint32_t len,
|
|
||||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
|
||||||
{
|
{
|
||||||
struct nvme_request *req;
|
CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
|
||||||
|
|
||||||
req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
|
|
||||||
|
|
||||||
if (req == NULL) {
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(&req->cmd, cmd, sizeof(req->cmd));
|
|
||||||
memcpy(&g_nvmf_data, buf, sizeof(g_nvmf_data));
|
|
||||||
g_request = req;
|
g_request = req;
|
||||||
|
memcpy(&g_nvmf_data, req->payload.contig_or_cb_arg, sizeof(g_nvmf_data));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -368,6 +357,7 @@ static void
|
|||||||
test_nvme_fabric_qpair_connect(void)
|
test_nvme_fabric_qpair_connect(void)
|
||||||
{
|
{
|
||||||
struct spdk_nvme_qpair qpair = {};
|
struct spdk_nvme_qpair qpair = {};
|
||||||
|
struct nvme_request reserved_req = {};
|
||||||
struct nvme_request req = {};
|
struct nvme_request req = {};
|
||||||
struct spdk_nvme_ctrlr ctrlr = {};
|
struct spdk_nvme_ctrlr ctrlr = {};
|
||||||
struct spdk_nvmf_fabric_connect_cmd *cmd = NULL;
|
struct spdk_nvmf_fabric_connect_cmd *cmd = NULL;
|
||||||
@ -379,11 +369,13 @@ test_nvme_fabric_qpair_connect(void)
|
|||||||
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
|
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
|
||||||
};
|
};
|
||||||
|
|
||||||
cmd = (void *)&req.cmd;
|
cmd = (void *)&reserved_req.cmd;
|
||||||
qpair.ctrlr = &ctrlr;
|
qpair.ctrlr = &ctrlr;
|
||||||
req.qpair = &qpair;
|
req.qpair = &qpair;
|
||||||
|
reserved_req.qpair = &qpair;
|
||||||
STAILQ_INIT(&qpair.free_req);
|
STAILQ_INIT(&qpair.free_req);
|
||||||
STAILQ_INSERT_HEAD(&qpair.free_req, &req, stailq);
|
STAILQ_INSERT_HEAD(&qpair.free_req, &req, stailq);
|
||||||
|
qpair.reserved_req = &reserved_req;
|
||||||
memset(&g_nvmf_data, 0, sizeof(g_nvmf_data));
|
memset(&g_nvmf_data, 0, sizeof(g_nvmf_data));
|
||||||
|
|
||||||
qpair.id = 1;
|
qpair.id = 1;
|
||||||
@ -404,12 +396,13 @@ test_nvme_fabric_qpair_connect(void)
|
|||||||
CU_ASSERT(!strncmp(g_nvmf_data.hostid, ctrlr.opts.extended_host_id, sizeof(g_nvmf_data.hostid)));
|
CU_ASSERT(!strncmp(g_nvmf_data.hostid, ctrlr.opts.extended_host_id, sizeof(g_nvmf_data.hostid)));
|
||||||
CU_ASSERT(!strncmp(g_nvmf_data.hostnqn, ctrlr.opts.hostnqn, sizeof(ctrlr.opts.hostnqn)));
|
CU_ASSERT(!strncmp(g_nvmf_data.hostnqn, ctrlr.opts.hostnqn, sizeof(ctrlr.opts.hostnqn)));
|
||||||
CU_ASSERT(!strncmp(g_nvmf_data.subnqn, ctrlr.trid.subnqn, sizeof(ctrlr.trid.subnqn)));
|
CU_ASSERT(!strncmp(g_nvmf_data.subnqn, ctrlr.trid.subnqn, sizeof(ctrlr.trid.subnqn)));
|
||||||
CU_ASSERT(STAILQ_EMPTY(&qpair.free_req));
|
/* Make sure we used the qpair's reserved_req, and not one from the STAILQ */
|
||||||
|
CU_ASSERT(g_request == qpair.reserved_req);
|
||||||
|
CU_ASSERT(!STAILQ_EMPTY(&qpair.free_req));
|
||||||
|
|
||||||
/* qid is adminq */
|
/* qid is adminq */
|
||||||
memset(&g_nvmf_data, 0, sizeof(g_nvmf_data));
|
memset(&g_nvmf_data, 0, sizeof(g_nvmf_data));
|
||||||
memset(&req, 0, sizeof(req));
|
memset(&reserved_req, 0, sizeof(reserved_req));
|
||||||
STAILQ_INSERT_HEAD(&qpair.free_req, &req, stailq);
|
|
||||||
qpair.id = 0;
|
qpair.id = 0;
|
||||||
ctrlr.cntlid = 0;
|
ctrlr.cntlid = 0;
|
||||||
|
|
||||||
@ -425,10 +418,11 @@ test_nvme_fabric_qpair_connect(void)
|
|||||||
CU_ASSERT(!strncmp(g_nvmf_data.hostid, ctrlr.opts.extended_host_id, sizeof(g_nvmf_data.hostid)));
|
CU_ASSERT(!strncmp(g_nvmf_data.hostid, ctrlr.opts.extended_host_id, sizeof(g_nvmf_data.hostid)));
|
||||||
CU_ASSERT(!strncmp(g_nvmf_data.hostnqn, ctrlr.opts.hostnqn, sizeof(ctrlr.opts.hostnqn)));
|
CU_ASSERT(!strncmp(g_nvmf_data.hostnqn, ctrlr.opts.hostnqn, sizeof(ctrlr.opts.hostnqn)));
|
||||||
CU_ASSERT(!strncmp(g_nvmf_data.subnqn, ctrlr.trid.subnqn, sizeof(ctrlr.trid.subnqn)));
|
CU_ASSERT(!strncmp(g_nvmf_data.subnqn, ctrlr.trid.subnqn, sizeof(ctrlr.trid.subnqn)));
|
||||||
CU_ASSERT(STAILQ_EMPTY(&qpair.free_req));
|
/* Make sure we used the qpair's reserved_req, and not one from the STAILQ */
|
||||||
|
CU_ASSERT(g_request == qpair.reserved_req);
|
||||||
|
CU_ASSERT(!STAILQ_EMPTY(&qpair.free_req));
|
||||||
|
|
||||||
/* Wait_for completion timeout */
|
/* Wait_for completion timeout */
|
||||||
STAILQ_INSERT_HEAD(&qpair.free_req, &req, stailq);
|
|
||||||
g_nvme_wait_for_completion_timeout = true;
|
g_nvme_wait_for_completion_timeout = true;
|
||||||
|
|
||||||
rc = nvme_fabric_qpair_connect(&qpair, 1);
|
rc = nvme_fabric_qpair_connect(&qpair, 1);
|
||||||
|
Loading…
Reference in New Issue
Block a user