nvmf: make async event and error related functions public
This patch makes functions related to Asynchronous Event and error handling public, so that they can be used in custom nvmf transport compiled out of SPDK tree. Signed-off-by: Szulik, Maciej <maciej.szulik@intel.com> Change-Id: I253bb7cfc98ea3012c179a709a3337c36b36cb0e Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17237 Community-CI: Mellanox Build Bot Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
a7d3e1068e
commit
414ff9bc23
@ -22,6 +22,11 @@ New function `spdk_env_get_main_core` was added.
|
||||
New `spdk_nvmf_request_copy_to/from_buf()` APIs have been added, which support
|
||||
iovecs, unlike the deprecated `spdk_nvmf_request_get_data()`.
|
||||
|
||||
Two functions related to Asynchronous Event and error handling have been made public:
|
||||
|
||||
- `spdk_nvmf_ctrlr_async_event_error_event`,
|
||||
- `spdk_nvmf_ctrlr_abort_aer`.
|
||||
|
||||
### nvme
|
||||
|
||||
New API `spdk_nvme_ns_get_format_index` was added to calculate the exact format index, that
|
||||
|
@ -629,6 +629,26 @@ spdk_nvmf_req_get_xfer(struct spdk_nvmf_request *req) {
|
||||
return xfer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete Asynchronous Event as Error.
|
||||
*
|
||||
* \param ctrlr Controller whose AER is going to be completed.
|
||||
* \param info Asynchronous Event Error Information to be reported.
|
||||
*
|
||||
* \return int. 0 if it completed successfully, or negative errno if it failed.
|
||||
*/
|
||||
int spdk_nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
enum spdk_nvme_async_event_info_error info);
|
||||
|
||||
/**
|
||||
* Abort outstanding Asynchronous Event Requests (AERs).
|
||||
*
|
||||
* Completes AERs with ABORTED_BY_REQUEST status code.
|
||||
*
|
||||
* \param ctrlr Controller whose AERs are going to be aborted.
|
||||
*/
|
||||
void spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
|
||||
/*
|
||||
* Macro used to register new transports.
|
||||
*/
|
||||
|
@ -3764,18 +3764,23 @@ nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx)
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
union spdk_nvme_async_event_completion event)
|
||||
spdk_nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
enum spdk_nvme_async_event_info_error info)
|
||||
{
|
||||
union spdk_nvme_async_event_completion event;
|
||||
|
||||
if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (event.bits.async_event_type != SPDK_NVME_ASYNC_EVENT_TYPE_ERROR ||
|
||||
event.bits.async_event_info > SPDK_NVME_ASYNC_EVENT_FW_IMAGE_LOAD) {
|
||||
if (info > SPDK_NVME_ASYNC_EVENT_FW_IMAGE_LOAD) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_ERROR;
|
||||
event.bits.log_page_identifier = SPDK_NVME_LOG_ERROR;
|
||||
event.bits.async_event_info = info;
|
||||
|
||||
return nvmf_ctrlr_async_event_notification(ctrlr, &event);
|
||||
}
|
||||
|
||||
@ -3800,7 +3805,7 @@ nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair)
|
||||
}
|
||||
|
||||
void
|
||||
nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr)
|
||||
spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr)
|
||||
{
|
||||
struct spdk_nvmf_request *req;
|
||||
int i;
|
||||
|
@ -411,19 +411,12 @@ int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx);
|
||||
void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
int nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
union spdk_nvme_async_event_completion event);
|
||||
|
||||
void nvmf_ns_reservation_request(void *ctx);
|
||||
void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
struct spdk_nvmf_ns *ns,
|
||||
enum spdk_nvme_reservation_notification_log_page_type type);
|
||||
|
||||
/*
|
||||
* Abort aer is sent on a per controller basis and sends a completion for the aer to the host.
|
||||
* This function should be called when attempting to recover in error paths when it is OK for
|
||||
* the host to send a subsequent AER.
|
||||
*/
|
||||
void nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
|
||||
/*
|
||||
* Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't
|
||||
|
@ -125,6 +125,8 @@
|
||||
spdk_nvmf_ctrlr_restore_migr_data;
|
||||
spdk_nvmf_req_get_xfer;
|
||||
spdk_nvmf_poll_group_remove;
|
||||
spdk_nvmf_ctrlr_async_event_error_event;
|
||||
spdk_nvmf_ctrlr_abort_aer;
|
||||
|
||||
local: *;
|
||||
};
|
||||
|
@ -2796,7 +2796,7 @@ disable_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
|
||||
* For PCIe controller reset or shutdown, we will drop all AER
|
||||
* responses.
|
||||
*/
|
||||
nvmf_ctrlr_abort_aer(vu_ctrlr->ctrlr);
|
||||
spdk_nvmf_ctrlr_abort_aer(vu_ctrlr->ctrlr);
|
||||
|
||||
/* Free the shadow doorbell buffer. */
|
||||
vfio_user_ctrlr_switch_doorbells(vu_ctrlr, false);
|
||||
@ -5658,13 +5658,9 @@ nvmf_vfio_user_sq_poll(struct nvmf_vfio_user_sq *sq)
|
||||
|
||||
new_tail = new_tail & 0xffffu;
|
||||
if (spdk_unlikely(new_tail >= sq->size)) {
|
||||
union spdk_nvme_async_event_completion event = {};
|
||||
|
||||
SPDK_DEBUGLOG(nvmf_vfio, "%s: invalid sqid:%u doorbell value %u\n", ctrlr_id(ctrlr), sq->qid,
|
||||
new_tail);
|
||||
event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_ERROR;
|
||||
event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_INVALID_DB_WRITE;
|
||||
nvmf_ctrlr_async_event_error_event(ctrlr->ctrlr, event);
|
||||
spdk_nvmf_ctrlr_async_event_error_event(ctrlr->ctrlr, SPDK_NVME_ASYNC_EVENT_INVALID_DB_WRITE);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
|
||||
DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
|
||||
DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
|
||||
const struct spdk_nvme_transport_id *trid2), 0);
|
||||
DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
|
||||
DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
|
||||
DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
|
||||
struct spdk_dif_ctx *dif_ctx), false);
|
||||
DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
|
||||
|
@ -26,9 +26,9 @@ DEFINE_STUB(spdk_nvmf_subsystem_pause, int, (struct spdk_nvmf_subsystem *subsyst
|
||||
uint32_t nsid, spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
|
||||
DEFINE_STUB(spdk_nvmf_subsystem_resume, int, (struct spdk_nvmf_subsystem *subsystem,
|
||||
spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
|
||||
DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
|
||||
DEFINE_STUB(nvmf_ctrlr_async_event_error_event, int, (struct spdk_nvmf_ctrlr *ctrlr,
|
||||
union spdk_nvme_async_event_completion event), 0);
|
||||
DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
|
||||
DEFINE_STUB(spdk_nvmf_ctrlr_async_event_error_event, int, (struct spdk_nvmf_ctrlr *ctrlr,
|
||||
enum spdk_nvme_async_event_info_error info), 0);
|
||||
DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
|
||||
DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair,
|
||||
struct spdk_nvme_transport_id *trid), 0);
|
||||
|
Loading…
Reference in New Issue
Block a user