lib/nvmf: Support to queue pending async events

Add support to queue pending async events to avoid event lost if
multiple async events happen when there is no outstanding AER requests.
Pending async events will be consumes in FIFO order once any AER
requests are received.

Signed-off-by: Jiewei Ke <jiewei@smartx.com>
Change-Id: I545f0baa4ec2996e9e02ec12c176d639e3c0d55a
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5117
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Jiewei Ke 2020-11-16 22:09:29 -05:00 committed by Tomasz Zawadzki
parent 8c3b9204b1
commit 8cd9ef2825
3 changed files with 134 additions and 37 deletions

View File

@ -325,6 +325,7 @@ nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
return NULL;
}
STAILQ_INIT(&ctrlr->async_events);
TAILQ_INIT(&ctrlr->log_head);
ctrlr->subsys = subsystem;
ctrlr->thread = req->qpair->group->thread;
@ -451,6 +452,7 @@ _nvmf_ctrlr_destruct(void *ctx)
{
struct spdk_nvmf_ctrlr *ctrlr = ctx;
struct spdk_nvmf_reservation_log *log, *log_tmp;
struct spdk_nvmf_async_event_completion *event, *event_tmp;
nvmf_ctrlr_stop_keep_alive_timer(ctrlr);
nvmf_ctrlr_stop_association_timer(ctrlr);
@ -460,6 +462,10 @@ _nvmf_ctrlr_destruct(void *ctx)
TAILQ_REMOVE(&ctrlr->log_head, log, link);
free(log);
}
STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
free(event);
}
free(ctrlr);
}
@ -1638,6 +1644,7 @@ nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req)
struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct spdk_nvmf_subsystem_poll_group *sgroup;
struct spdk_nvmf_async_event_completion *pending_event;
SPDK_DEBUGLOG(nvmf, "Async Event Request\n");
@ -1654,17 +1661,11 @@ nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req)
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if (ctrlr->notice_event.bits.async_event_type ==
SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) {
rsp->cdw0 = ctrlr->notice_event.raw;
ctrlr->notice_event.raw = 0;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if (ctrlr->reservation_event.bits.async_event_type ==
SPDK_NVME_ASYNC_EVENT_TYPE_IO) {
rsp->cdw0 = ctrlr->reservation_event.raw;
ctrlr->reservation_event.raw = 0;
if (!STAILQ_EMPTY(&ctrlr->async_events)) {
pending_event = STAILQ_FIRST(&ctrlr->async_events);
rsp->cdw0 = pending_event->event.raw;
STAILQ_REMOVE(&ctrlr->async_events, pending_event, spdk_nvmf_async_event_completion, link);
free(pending_event);
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
@ -2831,6 +2832,21 @@ nvmf_ctrlr_async_event_notification(struct spdk_nvmf_ctrlr *ctrlr,
return 0;
}
static inline void
nvmf_ctrlr_queue_pending_async_event(struct spdk_nvmf_ctrlr *ctrlr,
union spdk_nvme_async_event_completion *event)
{
struct spdk_nvmf_async_event_completion *nvmf_event;
nvmf_event = calloc(1, sizeof(*nvmf_event));
if (!nvmf_event) {
SPDK_ERRLOG("Alloc nvmf event failed, ignore the event\n");
return;
}
nvmf_event->event.raw = event->raw;
STAILQ_INSERT_TAIL(&ctrlr->async_events, nvmf_event, link);
}
int
nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr)
{
@ -2850,12 +2866,7 @@ nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr)
* response.
*/
if (ctrlr->nr_aer_reqs == 0) {
if (ctrlr->notice_event.bits.async_event_type ==
SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) {
return 0;
}
ctrlr->notice_event.raw = event.raw;
nvmf_ctrlr_queue_pending_async_event(ctrlr, &event);
return 0;
}
@ -2881,12 +2892,7 @@ nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr)
* response.
*/
if (ctrlr->nr_aer_reqs == 0) {
if (ctrlr->notice_event.bits.async_event_type ==
SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) {
return 0;
}
ctrlr->notice_event.raw = event.raw;
nvmf_ctrlr_queue_pending_async_event(ctrlr, &event);
return 0;
}
@ -2910,12 +2916,7 @@ nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr)
* response.
*/
if (ctrlr->nr_aer_reqs == 0) {
if (ctrlr->reservation_event.bits.async_event_type ==
SPDK_NVME_ASYNC_EVENT_TYPE_IO) {
return;
}
ctrlr->reservation_event.raw = event.raw;
nvmf_ctrlr_queue_pending_async_event(ctrlr, &event);
return;
}
@ -2944,12 +2945,7 @@ nvmf_ctrlr_async_event_discovery_log_change_notice(struct spdk_nvmf_ctrlr *ctrlr
* response.
*/
if (ctrlr->nr_aer_reqs == 0) {
if (ctrlr->notice_event.bits.async_event_type ==
SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) {
return 0;
}
ctrlr->notice_event.raw = event.raw;
nvmf_ctrlr_queue_pending_async_event(ctrlr, &event);
return 0;
}

View File

@ -201,6 +201,14 @@ struct spdk_nvmf_reservation_log {
struct spdk_nvmf_ctrlr *ctrlr;
};
/*
* NVMf async event completion.
*/
struct spdk_nvmf_async_event_completion {
union spdk_nvme_async_event_completion event;
STAILQ_ENTRY(spdk_nvmf_async_event_completion) link;
};
/*
* This structure represents an NVMe-oF controller,
* which is like a "session" in networking terms.
@ -223,8 +231,7 @@ struct spdk_nvmf_ctrlr {
const struct spdk_nvmf_subsystem_listener *listener;
struct spdk_nvmf_request *aer_req[NVMF_MAX_ASYNC_EVENTS];
union spdk_nvme_async_event_completion notice_event;
union spdk_nvme_async_event_completion reservation_event;
STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events;
uint8_t nr_aer_reqs;
struct spdk_uuid hostid;

View File

@ -1317,6 +1317,23 @@ test_reservation_exclusive_access_regs_only_and_all_regs(void)
SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
}
static void
init_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
{
STAILQ_INIT(&ctrlr->async_events);
}
static void
cleanup_pending_async_events(struct spdk_nvmf_ctrlr *ctrlr)
{
struct spdk_nvmf_async_event_completion *event, *event_tmp;
STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
free(event);
}
}
static void
test_reservation_notification_log_page(void)
{
@ -1332,6 +1349,7 @@ test_reservation_notification_log_page(void)
memset(&ctrlr, 0, sizeof(ctrlr));
ctrlr.thread = spdk_get_thread();
TAILQ_INIT(&ctrlr.log_head);
init_pending_async_events(&ctrlr);
ns.nsid = 1;
/* Test Case: Mask all the reservation notifications */
@ -1378,6 +1396,8 @@ test_reservation_notification_log_page(void)
/* Test Case: Get Log Page to clear the log pages */
nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs));
SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
cleanup_pending_async_events(&ctrlr);
}
static void
@ -1782,6 +1802,79 @@ test_get_ana_log_page(void)
CU_ASSERT(memcmp(expected_page, actual_page, UT_ANA_LOG_PAGE_SIZE) == 0);
}
static void
test_multi_async_events(void)
{
struct spdk_nvmf_subsystem subsystem = {};
struct spdk_nvmf_qpair qpair = {};
struct spdk_nvmf_ctrlr ctrlr = {};
struct spdk_nvmf_request req[4] = {};
struct spdk_nvmf_ns *ns_ptrs[1] = {};
struct spdk_nvmf_ns ns = {};
union nvmf_h2c_msg cmd[4] = {};
union nvmf_c2h_msg rsp[4] = {};
union spdk_nvme_async_event_completion event = {};
struct spdk_nvmf_poll_group group = {};
struct spdk_nvmf_subsystem_poll_group sgroups = {};
int i;
ns_ptrs[0] = &ns;
subsystem.ns = ns_ptrs;
subsystem.max_nsid = 1;
subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
ns.opts.nsid = 1;
group.sgroups = &sgroups;
qpair.ctrlr = &ctrlr;
qpair.group = &group;
TAILQ_INIT(&qpair.outstanding);
ctrlr.subsys = &subsystem;
ctrlr.vcprop.cc.bits.en = 1;
ctrlr.feat.async_event_configuration.bits.ns_attr_notice = 1;
ctrlr.feat.async_event_configuration.bits.ana_change_notice = 1;
ctrlr.feat.async_event_configuration.bits.discovery_log_change_notice = 1;
init_pending_async_events(&ctrlr);
/* Target queue pending events when there is no outstanding AER request */
nvmf_ctrlr_async_event_ns_notice(&ctrlr);
nvmf_ctrlr_async_event_ana_change_notice(&ctrlr);
nvmf_ctrlr_async_event_discovery_log_change_notice(&ctrlr);
for (i = 0; i < 4; i++) {
cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
cmd[i].nvme_cmd.nsid = 1;
cmd[i].nvme_cmd.cid = i;
req[i].qpair = &qpair;
req[i].cmd = &cmd[i];
req[i].rsp = &rsp[i];
TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
sgroups.io_outstanding = 1;
if (i < 3) {
CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
CU_ASSERT(sgroups.io_outstanding == 0);
CU_ASSERT(ctrlr.nr_aer_reqs == 0);
} else {
CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
CU_ASSERT(sgroups.io_outstanding == 0);
CU_ASSERT(ctrlr.nr_aer_reqs == 1);
}
}
event.raw = rsp[0].nvme_cpl.cdw0;
CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED);
event.raw = rsp[1].nvme_cpl.cdw0;
CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE);
event.raw = rsp[2].nvme_cpl.cdw0;
CU_ASSERT(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE);
cleanup_pending_async_events(&ctrlr);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
@ -1808,6 +1901,7 @@ int main(int argc, char **argv)
CU_ADD_TEST(suite, test_fused_compare_and_write);
CU_ADD_TEST(suite, test_multi_async_event_reqs);
CU_ADD_TEST(suite, test_get_ana_log_page);
CU_ADD_TEST(suite, test_multi_async_events);
allocate_threads(1);
set_thread(0);