nvmf: make async event and error related functions public

This patch makes functions related to Asynchronous Event and error
handling public, so that they can be used in custom nvmf transport
compiled out of SPDK tree.

Signed-off-by: Szulik, Maciej <maciej.szulik@intel.com>
Change-Id: I253bb7cfc98ea3012c179a709a3337c36b36cb0e
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17237
Community-CI: Mellanox Build Bot
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Szulik, Maciej 2023-03-16 16:43:39 +01:00 committed by Jim Harris
parent a7d3e1068e
commit 414ff9bc23
8 changed files with 44 additions and 23 deletions

View File

@ -22,6 +22,11 @@ New function `spdk_env_get_main_core` was added.
New `spdk_nvmf_request_copy_to/from_buf()` APIs have been added, which support New `spdk_nvmf_request_copy_to/from_buf()` APIs have been added, which support
iovecs, unlike the deprecated `spdk_nvmf_request_get_data()`. iovecs, unlike the deprecated `spdk_nvmf_request_get_data()`.
Two functions related to Asynchronous Event and error handling have been made public:
- `spdk_nvmf_ctrlr_async_event_error_event`,
- `spdk_nvmf_ctrlr_abort_aer`.
### nvme ### nvme
New API `spdk_nvme_ns_get_format_index` was added to calculate the exact format index, that New API `spdk_nvme_ns_get_format_index` was added to calculate the exact format index, that

View File

@ -629,6 +629,26 @@ spdk_nvmf_req_get_xfer(struct spdk_nvmf_request *req) {
return xfer; return xfer;
} }
/**
* Complete Asynchronous Event as Error.
*
* \param ctrlr Controller whose AER is going to be completed.
* \param info Asynchronous Event Error Information to be reported.
*
* \return int. 0 if it completed successfully, or negative errno if it failed.
*/
int spdk_nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
enum spdk_nvme_async_event_info_error info);
/**
* Abort outstanding Asynchronous Event Requests (AERs).
*
* Completes AERs with ABORTED_BY_REQUEST status code.
*
* \param ctrlr Controller whose AERs are going to be aborted.
*/
void spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
/* /*
* Macro used to register new transports. * Macro used to register new transports.
*/ */

View File

@ -3764,18 +3764,23 @@ nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx)
} }
int int
nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr, spdk_nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
union spdk_nvme_async_event_completion event) enum spdk_nvme_async_event_info_error info)
{ {
union spdk_nvme_async_event_completion event;
if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT)) { if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT)) {
return 0; return 0;
} }
if (event.bits.async_event_type != SPDK_NVME_ASYNC_EVENT_TYPE_ERROR || if (info > SPDK_NVME_ASYNC_EVENT_FW_IMAGE_LOAD) {
event.bits.async_event_info > SPDK_NVME_ASYNC_EVENT_FW_IMAGE_LOAD) {
return 0; return 0;
} }
event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_ERROR;
event.bits.log_page_identifier = SPDK_NVME_LOG_ERROR;
event.bits.async_event_info = info;
return nvmf_ctrlr_async_event_notification(ctrlr, &event); return nvmf_ctrlr_async_event_notification(ctrlr, &event);
} }
@ -3800,7 +3805,7 @@ nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair)
} }
void void
nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr) spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr)
{ {
struct spdk_nvmf_request *req; struct spdk_nvmf_request *req;
int i; int i;

View File

@ -411,19 +411,12 @@ int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr); int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr);
void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx); void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx);
void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr); void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
int nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
union spdk_nvme_async_event_completion event);
void nvmf_ns_reservation_request(void *ctx); void nvmf_ns_reservation_request(void *ctx);
void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns *ns,
enum spdk_nvme_reservation_notification_log_page_type type); enum spdk_nvme_reservation_notification_log_page_type type);
/*
* Abort aer is sent on a per controller basis and sends a completion for the aer to the host.
* This function should be called when attempting to recover in error paths when it is OK for
* the host to send a subsequent AER.
*/
void nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
/* /*
* Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't * Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't

View File

@ -125,6 +125,8 @@
spdk_nvmf_ctrlr_restore_migr_data; spdk_nvmf_ctrlr_restore_migr_data;
spdk_nvmf_req_get_xfer; spdk_nvmf_req_get_xfer;
spdk_nvmf_poll_group_remove; spdk_nvmf_poll_group_remove;
spdk_nvmf_ctrlr_async_event_error_event;
spdk_nvmf_ctrlr_abort_aer;
local: *; local: *;
}; };

View File

@ -2796,7 +2796,7 @@ disable_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
* For PCIe controller reset or shutdown, we will drop all AER * For PCIe controller reset or shutdown, we will drop all AER
* responses. * responses.
*/ */
nvmf_ctrlr_abort_aer(vu_ctrlr->ctrlr); spdk_nvmf_ctrlr_abort_aer(vu_ctrlr->ctrlr);
/* Free the shadow doorbell buffer. */ /* Free the shadow doorbell buffer. */
vfio_user_ctrlr_switch_doorbells(vu_ctrlr, false); vfio_user_ctrlr_switch_doorbells(vu_ctrlr, false);
@ -5658,13 +5658,9 @@ nvmf_vfio_user_sq_poll(struct nvmf_vfio_user_sq *sq)
new_tail = new_tail & 0xffffu; new_tail = new_tail & 0xffffu;
if (spdk_unlikely(new_tail >= sq->size)) { if (spdk_unlikely(new_tail >= sq->size)) {
union spdk_nvme_async_event_completion event = {};
SPDK_DEBUGLOG(nvmf_vfio, "%s: invalid sqid:%u doorbell value %u\n", ctrlr_id(ctrlr), sq->qid, SPDK_DEBUGLOG(nvmf_vfio, "%s: invalid sqid:%u doorbell value %u\n", ctrlr_id(ctrlr), sq->qid,
new_tail); new_tail);
event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_ERROR; spdk_nvmf_ctrlr_async_event_error_event(ctrlr->ctrlr, SPDK_NVME_ASYNC_EVENT_INVALID_DB_WRITE);
event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_INVALID_DB_WRITE;
nvmf_ctrlr_async_event_error_event(ctrlr->ctrlr, event);
return -1; return -1;
} }

View File

@ -41,7 +41,7 @@ DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
const struct spdk_nvme_transport_id *trid2), 0); const struct spdk_nvme_transport_id *trid2), 0);
DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
struct spdk_dif_ctx *dif_ctx), false); struct spdk_dif_ctx *dif_ctx), false);
DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,

View File

@ -26,9 +26,9 @@ DEFINE_STUB(spdk_nvmf_subsystem_pause, int, (struct spdk_nvmf_subsystem *subsyst
uint32_t nsid, spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0); uint32_t nsid, spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
DEFINE_STUB(spdk_nvmf_subsystem_resume, int, (struct spdk_nvmf_subsystem *subsystem, DEFINE_STUB(spdk_nvmf_subsystem_resume, int, (struct spdk_nvmf_subsystem *subsystem,
spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0); spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
DEFINE_STUB(nvmf_ctrlr_async_event_error_event, int, (struct spdk_nvmf_ctrlr *ctrlr, DEFINE_STUB(spdk_nvmf_ctrlr_async_event_error_event, int, (struct spdk_nvmf_ctrlr *ctrlr,
union spdk_nvme_async_event_completion event), 0); enum spdk_nvme_async_event_info_error info), 0);
DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair, DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair,
struct spdk_nvme_transport_id *trid), 0); struct spdk_nvme_transport_id *trid), 0);