nvmf: add spdk_nvmf_ctrlr_[save|restore]_migr_data() APIs
When doing live migration, there are some spdk_nvmf_ctrlr internal data structures which need to be saved/restored, these data structures are designed only for vfio-user transport, for the purpose to extend them to support other vendor specific transports, here we move them as public APIs, users can use SAVE|RESTORE to restore a new nvmf controller based on original one. And remove the register from vfio-user transport, these registers are stored in the common nvmf library. Change-Id: I9f5847ef427f7064f8e16adcc963dc6b4a35f235 Signed-off-by: Jacek Kalwas <jacek.kalwas@intel.com> Signed-off-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11059 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
04dd028292
commit
982c25feef
@ -22,6 +22,11 @@
|
||||
/* The maximum number of buffers per request */
|
||||
#define NVMF_REQ_MAX_BUFFERS (SPDK_NVMF_MAX_SGL_ENTRIES * 2 + 1)
|
||||
|
||||
/* Maximum pending AERs that can be migrated */
|
||||
#define SPDK_NVMF_MIGR_MAX_PENDING_AERS 256
|
||||
|
||||
#define SPDK_NVMF_MAX_ASYNC_EVENTS 4
|
||||
|
||||
/* AIO backend requires block size aligned data buffers,
|
||||
* extra 4KiB aligned data buffer should work for most devices.
|
||||
*/
|
||||
@ -415,6 +420,7 @@ struct spdk_nvmf_registers {
|
||||
uint64_t asq;
|
||||
uint64_t acq;
|
||||
};
|
||||
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_registers) == 40, "Incorrect size");
|
||||
|
||||
const struct spdk_nvmf_registers *spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
|
||||
@ -467,6 +473,95 @@ spdk_nvmf_ctrlr_get_subsystem(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
*/
|
||||
uint16_t spdk_nvmf_ctrlr_get_id(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
|
||||
struct spdk_nvmf_ctrlr_feat {
|
||||
union spdk_nvme_feat_arbitration arbitration;
|
||||
union spdk_nvme_feat_power_management power_management;
|
||||
union spdk_nvme_feat_error_recovery error_recovery;
|
||||
union spdk_nvme_feat_volatile_write_cache volatile_write_cache;
|
||||
union spdk_nvme_feat_number_of_queues number_of_queues;
|
||||
union spdk_nvme_feat_interrupt_coalescing interrupt_coalescing;
|
||||
union spdk_nvme_feat_interrupt_vector_configuration interrupt_vector_configuration;
|
||||
union spdk_nvme_feat_write_atomicity write_atomicity;
|
||||
union spdk_nvme_feat_async_event_configuration async_event_configuration;
|
||||
union spdk_nvme_feat_keep_alive_timer keep_alive_timer;
|
||||
};
|
||||
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ctrlr_feat) == 40, "Incorrect size");
|
||||
|
||||
/* Migration data structure used to save & restore a NVMe-oF controller. */
|
||||
struct spdk_nvmf_ctrlr_migr_data {
|
||||
/* `data_size` is valid size of `spdk_nvmf_ctrlr_migr_data` without counting `unused`.
|
||||
* We use this field to migrate `spdk_nvmf_ctrlr_migr_data` from source VM and restore
|
||||
* it in destination VM.
|
||||
*/
|
||||
uint32_t data_size;
|
||||
/* `regs_size` is valid size of `spdk_nvmf_registers`. */
|
||||
uint32_t regs_size;
|
||||
/* `feat_size` is valid size of `spdk_nvmf_ctrlr_feat`. */
|
||||
uint32_t feat_size;
|
||||
uint32_t reserved;
|
||||
|
||||
struct spdk_nvmf_registers regs;
|
||||
uint8_t regs_reserved[216];
|
||||
|
||||
struct spdk_nvmf_ctrlr_feat feat;
|
||||
uint8_t feat_reserved[216];
|
||||
|
||||
uint16_t cntlid;
|
||||
uint8_t acre;
|
||||
uint8_t num_aer_cids;
|
||||
uint32_t num_async_events;
|
||||
|
||||
union spdk_nvme_async_event_completion async_events[SPDK_NVMF_MIGR_MAX_PENDING_AERS];
|
||||
uint16_t aer_cids[SPDK_NVMF_MAX_ASYNC_EVENTS];
|
||||
uint64_t notice_aen_mask;
|
||||
|
||||
uint8_t unused[2516];
|
||||
};
|
||||
SPDK_STATIC_ASSERT(offsetof(struct spdk_nvmf_ctrlr_migr_data,
|
||||
regs) - offsetof(struct spdk_nvmf_ctrlr_migr_data, data_size) == 16, "Incorrect header size");
|
||||
SPDK_STATIC_ASSERT(offsetof(struct spdk_nvmf_ctrlr_migr_data,
|
||||
feat) - offsetof(struct spdk_nvmf_ctrlr_migr_data, regs) == 256, "Incorrect regs size");
|
||||
SPDK_STATIC_ASSERT(offsetof(struct spdk_nvmf_ctrlr_migr_data,
|
||||
cntlid) - offsetof(struct spdk_nvmf_ctrlr_migr_data, feat) == 256, "Incorrect feat size");
|
||||
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ctrlr_migr_data) == 4096, "Incorrect size");
|
||||
|
||||
/**
|
||||
* Save the NVMe-oF controller state and configuration.
|
||||
*
|
||||
* It is allowed to save the data only when the nvmf subystem is in paused
|
||||
* state i.e. there are no outstanding cmds in nvmf layer (other than aer),
|
||||
* pending async event completions are getting blocked.
|
||||
*
|
||||
* To preserve thread safety this function must be executed on the same thread
|
||||
* the NVMe-OF controller was created.
|
||||
*
|
||||
* \param ctrlr The NVMe-oF controller
|
||||
* \param data The NVMe-oF controller state and configuration to be saved
|
||||
*
|
||||
* \return 0 on success or a negated errno on failure.
|
||||
*/
|
||||
int spdk_nvmf_ctrlr_save_migr_data(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
struct spdk_nvmf_ctrlr_migr_data *data);
|
||||
|
||||
/**
|
||||
* Restore the NVMe-oF controller state and configuration.
|
||||
*
|
||||
* It is allowed to restore the data only when the nvmf subystem is in paused
|
||||
* state.
|
||||
*
|
||||
* To preserve thread safety this function must be executed on the same thread
|
||||
* the NVMe-OF controller was created.
|
||||
*
|
||||
* AERs shall be restored using spdk_nvmf_request_exec after this function is executed.
|
||||
*
|
||||
* \param ctrlr The NVMe-oF controller
|
||||
* \param data The NVMe-oF controller state and configuration to be restored
|
||||
*
|
||||
* \return 0 on success or a negated errno on failure.
|
||||
*/
|
||||
int spdk_nvmf_ctrlr_restore_migr_data(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
const struct spdk_nvmf_ctrlr_migr_data *data);
|
||||
|
||||
static inline enum spdk_nvme_data_transfer
|
||||
spdk_nvmf_req_get_xfer(struct spdk_nvmf_request *req) {
|
||||
enum spdk_nvme_data_transfer xfer;
|
||||
|
116
lib/nvmf/ctrlr.c
116
lib/nvmf/ctrlr.c
@ -282,7 +282,7 @@ static void
|
||||
nvmf_ctrlr_cdata_init(struct spdk_nvmf_transport *transport, struct spdk_nvmf_subsystem *subsystem,
|
||||
struct spdk_nvmf_ctrlr_data *cdata)
|
||||
{
|
||||
cdata->aerl = NVMF_MAX_ASYNC_EVENTS - 1;
|
||||
cdata->aerl = SPDK_NVMF_MAX_ASYNC_EVENTS - 1;
|
||||
cdata->kas = KAS_DEFAULT_VALUE;
|
||||
cdata->vid = SPDK_PCI_VID_INTEL;
|
||||
cdata->ssvid = SPDK_PCI_VID_INTEL;
|
||||
@ -1889,66 +1889,116 @@ nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req)
|
||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_ctrlr_save_aers(struct spdk_nvmf_ctrlr *ctrlr, uint16_t *aer_cids,
|
||||
uint16_t max_aers)
|
||||
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ctrlr) == 4920,
|
||||
"Please check migration fields that need to be added or not");
|
||||
|
||||
static void
|
||||
nvmf_ctrlr_migr_data_copy(struct spdk_nvmf_ctrlr_migr_data *data,
|
||||
const struct spdk_nvmf_ctrlr_migr_data *data_src, size_t data_size)
|
||||
{
|
||||
struct spdk_nvmf_request *req;
|
||||
uint16_t i;
|
||||
assert(data);
|
||||
assert(data_src);
|
||||
assert(data_size);
|
||||
|
||||
if (!aer_cids || max_aers < ctrlr->nr_aer_reqs) {
|
||||
return -EINVAL;
|
||||
}
|
||||
memcpy(&data->regs, &data_src->regs, spdk_min(data->regs_size, data_src->regs_size));
|
||||
memcpy(&data->feat, &data_src->feat, spdk_min(data->feat_size, data_src->feat_size));
|
||||
|
||||
for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
|
||||
req = ctrlr->aer_req[i];
|
||||
aer_cids[i] = req->cmd->nvme_cmd.cid;
|
||||
}
|
||||
#define SET_FIELD(field) \
|
||||
if (offsetof(struct spdk_nvmf_ctrlr_migr_data, field) + sizeof(data->field) <= data_size) { \
|
||||
data->field = data_src->field; \
|
||||
} \
|
||||
|
||||
return ctrlr->nr_aer_reqs;
|
||||
SET_FIELD(cntlid);
|
||||
SET_FIELD(acre);
|
||||
SET_FIELD(num_aer_cids);
|
||||
SET_FIELD(num_async_events);
|
||||
SET_FIELD(notice_aen_mask);
|
||||
#undef SET_FIELD
|
||||
|
||||
#define SET_ARRAY(arr) \
|
||||
if (offsetof(struct spdk_nvmf_ctrlr_migr_data, arr) + sizeof(data->arr) <= data_size) { \
|
||||
memcpy(&data->arr, &data_src->arr, sizeof(data->arr)); \
|
||||
} \
|
||||
|
||||
SET_ARRAY(async_events);
|
||||
SET_ARRAY(aer_cids);
|
||||
#undef SET_ARRAY
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_ctrlr_save_migr_data(struct spdk_nvmf_ctrlr *ctrlr, struct nvmf_ctrlr_migr_data *data)
|
||||
spdk_nvmf_ctrlr_save_migr_data(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
struct spdk_nvmf_ctrlr_migr_data *data)
|
||||
{
|
||||
uint32_t num_async_events = 0;
|
||||
struct spdk_nvmf_async_event_completion *event, *event_tmp;
|
||||
uint32_t i;
|
||||
struct spdk_nvmf_ctrlr_migr_data data_local = {
|
||||
.data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused),
|
||||
.regs_size = sizeof(struct spdk_nvmf_registers),
|
||||
.feat_size = sizeof(struct spdk_nvmf_ctrlr_feat)
|
||||
};
|
||||
|
||||
memcpy(&data->feat, &ctrlr->feat, sizeof(struct spdk_nvmf_ctrlr_feat));
|
||||
data->cntlid = ctrlr->cntlid;
|
||||
data->acre_enabled = ctrlr->acre_enabled;
|
||||
data->notice_aen_mask = ctrlr->notice_aen_mask;
|
||||
assert(data->data_size <= sizeof(data_local));
|
||||
assert(spdk_get_thread() == ctrlr->thread);
|
||||
|
||||
memcpy(&data_local.regs, &ctrlr->vcprop, sizeof(struct spdk_nvmf_registers));
|
||||
memcpy(&data_local.feat, &ctrlr->feat, sizeof(struct spdk_nvmf_ctrlr_feat));
|
||||
|
||||
data_local.cntlid = ctrlr->cntlid;
|
||||
data_local.acre = ctrlr->acre_enabled;
|
||||
data_local.num_aer_cids = ctrlr->nr_aer_reqs;
|
||||
|
||||
STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
|
||||
data->async_events[num_async_events++].raw = event->event.raw;
|
||||
if (num_async_events == NVMF_MIGR_MAX_PENDING_AERS) {
|
||||
SPDK_ERRLOG("%p has too many pending AERs\n", ctrlr);
|
||||
data_local.async_events[data_local.num_async_events++].raw = event->event.raw;
|
||||
if (data_local.num_async_events > SPDK_NVMF_MIGR_MAX_PENDING_AERS) {
|
||||
SPDK_ERRLOG("ctrlr %p has too many pending AERs\n", ctrlr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
data->num_async_events = num_async_events;
|
||||
|
||||
for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
|
||||
struct spdk_nvmf_request *req = ctrlr->aer_req[i];
|
||||
data_local.aer_cids[i] = req->cmd->nvme_cmd.cid;
|
||||
}
|
||||
data_local.notice_aen_mask = ctrlr->notice_aen_mask;
|
||||
|
||||
nvmf_ctrlr_migr_data_copy(data, &data_local, spdk_min(data->data_size, data_local.data_size));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvmf_ctrlr_restore_migr_data(struct spdk_nvmf_ctrlr *ctrlr, struct nvmf_ctrlr_migr_data *data)
|
||||
spdk_nvmf_ctrlr_restore_migr_data(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
const struct spdk_nvmf_ctrlr_migr_data *data)
|
||||
{
|
||||
struct spdk_nvmf_async_event_completion *event;
|
||||
uint32_t i;
|
||||
struct spdk_nvmf_ctrlr_migr_data data_local = {
|
||||
.data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused),
|
||||
.regs_size = sizeof(struct spdk_nvmf_registers),
|
||||
.feat_size = sizeof(struct spdk_nvmf_ctrlr_feat)
|
||||
};
|
||||
|
||||
memcpy(&ctrlr->feat, &data->feat, sizeof(struct spdk_nvmf_ctrlr_feat));
|
||||
ctrlr->acre_enabled = data->acre_enabled;
|
||||
ctrlr->notice_aen_mask = data->notice_aen_mask;
|
||||
assert(data->data_size <= sizeof(data_local));
|
||||
assert(spdk_get_thread() == ctrlr->thread);
|
||||
|
||||
for (i = 0; i < data->num_async_events; i++) {
|
||||
event = calloc(1, sizeof(struct spdk_nvmf_async_event_completion));
|
||||
/* local version of data should have defaults set before copy */
|
||||
nvmf_ctrlr_migr_data_copy(&data_local, data, spdk_min(data->data_size, data_local.data_size));
|
||||
memcpy(&ctrlr->vcprop, &data_local.regs, sizeof(struct spdk_nvmf_registers));
|
||||
memcpy(&ctrlr->feat, &data_local.feat, sizeof(struct spdk_nvmf_ctrlr_feat));
|
||||
|
||||
ctrlr->cntlid = data_local.cntlid;
|
||||
ctrlr->acre_enabled = data_local.acre;
|
||||
|
||||
for (i = 0; i < data_local.num_async_events; i++) {
|
||||
struct spdk_nvmf_async_event_completion *event;
|
||||
|
||||
event = calloc(1, sizeof(*event));
|
||||
if (!event) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
event->event.raw = data->async_events[i].raw;
|
||||
|
||||
event->event.raw = data_local.async_events[i].raw;
|
||||
STAILQ_INSERT_TAIL(&ctrlr->async_events, event, link);
|
||||
}
|
||||
ctrlr->notice_aen_mask = data_local.notice_aen_mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1977,7 +2027,7 @@ nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req)
|
||||
SPDK_DEBUGLOG(nvmf, "Async Event Request\n");
|
||||
|
||||
/* Four asynchronous events are supported for now */
|
||||
if (ctrlr->nr_aer_reqs >= NVMF_MAX_ASYNC_EVENTS) {
|
||||
if (ctrlr->nr_aer_reqs >= SPDK_NVMF_MAX_ASYNC_EVENTS) {
|
||||
SPDK_DEBUGLOG(nvmf, "AERL exceeded\n");
|
||||
rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
|
||||
rsp->status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED;
|
||||
|
@ -1214,7 +1214,7 @@ nvmf_fc_req_bdev_abort(void *arg1)
|
||||
* Connect -> Special case (async. handling). Not sure how to
|
||||
* handle at this point. Let it run to completion.
|
||||
*/
|
||||
for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
|
||||
for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
|
||||
if (ctrlr->aer_req[i] == &fc_req->req) {
|
||||
SPDK_NOTICELOG("Abort AER request\n");
|
||||
nvmf_qpair_free_aer(fc_req->req.qpair);
|
||||
|
@ -20,8 +20,6 @@
|
||||
#include "spdk/util.h"
|
||||
#include "spdk/thread.h"
|
||||
|
||||
#define NVMF_MAX_ASYNC_EVENTS (4)
|
||||
|
||||
/* The spec reserves cntlid values in the range FFF0h to FFFFh. */
|
||||
#define NVMF_MIN_CNTLID 1
|
||||
#define NVMF_MAX_CNTLID 0xFFEF
|
||||
@ -169,19 +167,6 @@ struct spdk_nvmf_ns {
|
||||
bool zcopy;
|
||||
};
|
||||
|
||||
struct spdk_nvmf_ctrlr_feat {
|
||||
union spdk_nvme_feat_arbitration arbitration;
|
||||
union spdk_nvme_feat_power_management power_management;
|
||||
union spdk_nvme_feat_error_recovery error_recovery;
|
||||
union spdk_nvme_feat_volatile_write_cache volatile_write_cache;
|
||||
union spdk_nvme_feat_number_of_queues number_of_queues;
|
||||
union spdk_nvme_feat_interrupt_coalescing interrupt_coalescing;
|
||||
union spdk_nvme_feat_interrupt_vector_configuration interrupt_vector_configuration;
|
||||
union spdk_nvme_feat_write_atomicity write_atomicity;
|
||||
union spdk_nvme_feat_async_event_configuration async_event_configuration;
|
||||
union spdk_nvme_feat_keep_alive_timer keep_alive_timer;
|
||||
};
|
||||
|
||||
/*
|
||||
* NVMf reservation notification log page.
|
||||
*/
|
||||
@ -220,7 +205,7 @@ struct spdk_nvmf_ctrlr {
|
||||
|
||||
const struct spdk_nvmf_subsystem_listener *listener;
|
||||
|
||||
struct spdk_nvmf_request *aer_req[NVMF_MAX_ASYNC_EVENTS];
|
||||
struct spdk_nvmf_request *aer_req[SPDK_NVMF_MAX_ASYNC_EVENTS];
|
||||
STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events;
|
||||
uint64_t notice_aen_mask;
|
||||
uint8_t nr_aer_reqs;
|
||||
@ -254,29 +239,6 @@ struct spdk_nvmf_ctrlr {
|
||||
TAILQ_ENTRY(spdk_nvmf_ctrlr) link;
|
||||
};
|
||||
|
||||
/* Maximum pending AERs that can be migrated */
|
||||
#define NVMF_MIGR_MAX_PENDING_AERS 256
|
||||
|
||||
/* spdk_nvmf_ctrlr private migration data structure used to save/restore a controller */
|
||||
struct nvmf_ctrlr_migr_data {
|
||||
uint32_t opts_size;
|
||||
|
||||
uint16_t cntlid;
|
||||
uint8_t reserved1[2];
|
||||
|
||||
struct spdk_nvmf_ctrlr_feat feat;
|
||||
uint32_t reserved2[2];
|
||||
|
||||
uint32_t num_async_events;
|
||||
uint32_t acre_enabled;
|
||||
uint64_t notice_aen_mask;
|
||||
union spdk_nvme_async_event_completion async_events[NVMF_MIGR_MAX_PENDING_AERS];
|
||||
|
||||
/* New fields shouldn't go after reserved3 */
|
||||
uint8_t reserved3[3000];
|
||||
};
|
||||
SPDK_STATIC_ASSERT(sizeof(struct nvmf_ctrlr_migr_data) == 0x1000, "Incorrect size");
|
||||
|
||||
#define NVMF_MAX_LISTENERS_PER_SUBSYSTEM 16
|
||||
|
||||
struct spdk_nvmf_subsystem {
|
||||
@ -442,11 +404,6 @@ void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
|
||||
* the host to send a subsequent AER.
|
||||
*/
|
||||
void nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr);
|
||||
int nvmf_ctrlr_save_aers(struct spdk_nvmf_ctrlr *ctrlr, uint16_t *aer_cids,
|
||||
uint16_t max_aers);
|
||||
|
||||
int nvmf_ctrlr_save_migr_data(struct spdk_nvmf_ctrlr *ctrlr, struct nvmf_ctrlr_migr_data *data);
|
||||
int nvmf_ctrlr_restore_migr_data(struct spdk_nvmf_ctrlr *ctrlr, struct nvmf_ctrlr_migr_data *data);
|
||||
|
||||
/*
|
||||
* Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't
|
||||
|
@ -114,6 +114,8 @@
|
||||
spdk_nvmf_request_zcopy_end;
|
||||
spdk_nvmf_ctrlr_get_subsystem;
|
||||
spdk_nvmf_ctrlr_get_id;
|
||||
spdk_nvmf_ctrlr_save_migr_data;
|
||||
spdk_nvmf_ctrlr_restore_migr_data;
|
||||
spdk_nvmf_req_get_xfer;
|
||||
spdk_nvmf_poll_group_remove;
|
||||
|
||||
|
@ -143,11 +143,6 @@ struct vfio_user_nvme_migr_header {
|
||||
uint32_t num_io_queues;
|
||||
uint32_t reserved1;
|
||||
|
||||
/* TODO: this part will be moved to common nvmf controller data */
|
||||
uint16_t reserved2[3];
|
||||
uint16_t nr_aers;
|
||||
uint16_t aer_cids[NVMF_MIGR_MAX_PENDING_AERS];
|
||||
|
||||
/* NVMf controller data offset and length if exist, starting at
|
||||
* the beginning of this data structure.
|
||||
*/
|
||||
@ -158,7 +153,7 @@ struct vfio_user_nvme_migr_header {
|
||||
* Whether or not shadow doorbells are used in the source. 0 is a valid DMA
|
||||
* address.
|
||||
*/
|
||||
bool sdbl;
|
||||
uint32_t sdbl;
|
||||
|
||||
/* Shadow doorbell DMA addresses. */
|
||||
uint64_t shadow_doorbell_buffer;
|
||||
@ -167,7 +162,7 @@ struct vfio_user_nvme_migr_header {
|
||||
/* Reserved memory space for new added fields, the
|
||||
* field is always at the end of this data structure.
|
||||
*/
|
||||
uint8_t unused[3336];
|
||||
uint8_t unused[3856];
|
||||
};
|
||||
SPDK_STATIC_ASSERT(sizeof(struct vfio_user_nvme_migr_header) == 0x1000, "Incorrect size");
|
||||
|
||||
@ -179,9 +174,9 @@ struct vfio_user_nvme_migr_qp {
|
||||
/* NVMe state definition used to load/restore from/to NVMe migration BAR region */
|
||||
struct vfio_user_nvme_migr_state {
|
||||
struct vfio_user_nvme_migr_header ctrlr_header;
|
||||
struct nvmf_ctrlr_migr_data nvmf_data;
|
||||
struct spdk_nvmf_ctrlr_migr_data nvmf_data;
|
||||
struct vfio_user_nvme_migr_qp qps[NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR];
|
||||
uint8_t bar0[NVME_REG_BAR0_SIZE];
|
||||
uint8_t doorbells[NVMF_VFIO_USER_DOORBELLS_SIZE];
|
||||
uint8_t cfg[NVME_REG_CFG_SIZE];
|
||||
};
|
||||
|
||||
@ -3179,7 +3174,7 @@ vfio_user_ctrlr_dump_migr_data(const char *name,
|
||||
struct vfio_user_nvme_migr_state *migr_data,
|
||||
struct nvmf_vfio_user_shadow_doorbells *sdbl)
|
||||
{
|
||||
struct spdk_nvme_registers *regs;
|
||||
struct spdk_nvmf_registers *regs;
|
||||
struct nvme_migr_sq_state *sq;
|
||||
struct nvme_migr_cq_state *cq;
|
||||
uint32_t *doorbell_base;
|
||||
@ -3187,8 +3182,8 @@ vfio_user_ctrlr_dump_migr_data(const char *name,
|
||||
|
||||
SPDK_NOTICELOG("Dump %s\n", name);
|
||||
|
||||
regs = (struct spdk_nvme_registers *)migr_data->bar0;
|
||||
doorbell_base = (uint32_t *)®s->doorbell[0].sq_tdbl;
|
||||
regs = &migr_data->nvmf_data.regs;
|
||||
doorbell_base = (uint32_t *)&migr_data->doorbells;
|
||||
|
||||
SPDK_NOTICELOG("Registers\n");
|
||||
SPDK_NOTICELOG("CSTS 0x%x\n", regs->csts.raw);
|
||||
@ -3263,9 +3258,10 @@ vfio_user_migr_stream_to_data(struct nvmf_vfio_user_endpoint *endpoint,
|
||||
data_ptr = endpoint->migr_data + migr_state->ctrlr_header.qp_offset;
|
||||
memcpy(&migr_state->qps, data_ptr, migr_state->ctrlr_header.qp_len);
|
||||
|
||||
/* Load BAR0 */
|
||||
/* Load doorbells */
|
||||
data_ptr = endpoint->migr_data + migr_state->ctrlr_header.bar_offset[VFU_PCI_DEV_BAR0_REGION_IDX];
|
||||
memcpy(&migr_state->bar0, data_ptr, migr_state->ctrlr_header.bar_len[VFU_PCI_DEV_BAR0_REGION_IDX]);
|
||||
memcpy(&migr_state->doorbells, data_ptr,
|
||||
migr_state->ctrlr_header.bar_len[VFU_PCI_DEV_BAR0_REGION_IDX]);
|
||||
|
||||
/* Load CFG */
|
||||
data_ptr = endpoint->migr_data + migr_state->ctrlr_header.bar_offset[VFU_PCI_DEV_CFG_REGION_IDX];
|
||||
@ -3282,14 +3278,18 @@ vfio_user_migr_ctrlr_save_data(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
|
||||
struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint;
|
||||
struct nvmf_vfio_user_sq *sq;
|
||||
struct nvmf_vfio_user_cq *cq;
|
||||
struct vfio_user_nvme_migr_state migr_state = {};
|
||||
uint64_t data_offset;
|
||||
void *data_ptr;
|
||||
int num_aers;
|
||||
struct spdk_nvme_registers *regs;
|
||||
uint32_t *doorbell_base;
|
||||
uint32_t i = 0;
|
||||
uint16_t sqid, cqid;
|
||||
struct vfio_user_nvme_migr_state migr_state = {
|
||||
.nvmf_data = {
|
||||
.data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused),
|
||||
.regs_size = sizeof(struct spdk_nvmf_registers),
|
||||
.feat_size = sizeof(struct spdk_nvmf_ctrlr_feat)
|
||||
}
|
||||
};
|
||||
|
||||
/* Save all data to vfio_user_nvme_migr_state first, then we will
|
||||
* copy it to device migration region at last.
|
||||
@ -3299,13 +3299,7 @@ vfio_user_migr_ctrlr_save_data(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
|
||||
migr_state.ctrlr_header.magic = VFIO_USER_NVME_MIGR_MAGIC;
|
||||
|
||||
/* save controller data */
|
||||
num_aers = nvmf_ctrlr_save_aers(ctrlr, migr_state.ctrlr_header.aer_cids,
|
||||
256);
|
||||
assert(num_aers >= 0);
|
||||
migr_state.ctrlr_header.nr_aers = num_aers;
|
||||
|
||||
/* save nvmf controller data */
|
||||
nvmf_ctrlr_save_migr_data(ctrlr, (struct nvmf_ctrlr_migr_data *)&migr_state.nvmf_data);
|
||||
spdk_nvmf_ctrlr_save_migr_data(ctrlr, &migr_state.nvmf_data);
|
||||
|
||||
/* save connected queue pairs */
|
||||
TAILQ_FOREACH(sq, &vu_ctrlr->connected_sqs, tailq) {
|
||||
@ -3333,17 +3327,8 @@ vfio_user_migr_ctrlr_save_data(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
|
||||
assert(i > 0);
|
||||
migr_state.ctrlr_header.num_io_queues = i - 1;
|
||||
|
||||
regs = (struct spdk_nvme_registers *)&migr_state.bar0;
|
||||
/* Save mandarory registers to bar0 */
|
||||
regs->csts.raw = ctrlr->vcprop.csts.raw;
|
||||
regs->cap.raw = ctrlr->vcprop.cap.raw;
|
||||
regs->vs.raw = ctrlr->vcprop.vs.raw;
|
||||
regs->cc.raw = ctrlr->vcprop.cc.raw;
|
||||
regs->aqa.raw = ctrlr->vcprop.aqa.raw;
|
||||
regs->asq = ctrlr->vcprop.asq;
|
||||
regs->acq = ctrlr->vcprop.acq;
|
||||
/* Save doorbells */
|
||||
doorbell_base = (uint32_t *)®s->doorbell[0].sq_tdbl;
|
||||
doorbell_base = (uint32_t *)&migr_state.doorbells;
|
||||
memcpy(doorbell_base, (void *)vu_ctrlr->bar0_doorbells, NVMF_VFIO_USER_DOORBELLS_SIZE);
|
||||
|
||||
/* Save PCI configuration space */
|
||||
@ -3356,27 +3341,27 @@ vfio_user_migr_ctrlr_save_data(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
|
||||
data_offset = sizeof(struct vfio_user_nvme_migr_header);
|
||||
data_ptr += data_offset;
|
||||
migr_state.ctrlr_header.nvmf_data_offset = data_offset;
|
||||
migr_state.ctrlr_header.nvmf_data_len = sizeof(struct nvmf_ctrlr_migr_data);
|
||||
memcpy(data_ptr, &migr_state.nvmf_data, sizeof(struct nvmf_ctrlr_migr_data));
|
||||
migr_state.ctrlr_header.nvmf_data_len = sizeof(struct spdk_nvmf_ctrlr_migr_data);
|
||||
memcpy(data_ptr, &migr_state.nvmf_data, sizeof(struct spdk_nvmf_ctrlr_migr_data));
|
||||
|
||||
/* Copy queue pairs */
|
||||
data_offset += sizeof(struct nvmf_ctrlr_migr_data);
|
||||
data_ptr += sizeof(struct nvmf_ctrlr_migr_data);
|
||||
data_offset += sizeof(struct spdk_nvmf_ctrlr_migr_data);
|
||||
data_ptr += sizeof(struct spdk_nvmf_ctrlr_migr_data);
|
||||
migr_state.ctrlr_header.qp_offset = data_offset;
|
||||
migr_state.ctrlr_header.qp_len = i * (sizeof(struct nvme_migr_sq_state) + sizeof(
|
||||
struct nvme_migr_cq_state));
|
||||
memcpy(data_ptr, &migr_state.qps, migr_state.ctrlr_header.qp_len);
|
||||
|
||||
/* Copy BAR0 */
|
||||
/* Copy doorbells */
|
||||
data_offset += migr_state.ctrlr_header.qp_len;
|
||||
data_ptr += migr_state.ctrlr_header.qp_len;
|
||||
migr_state.ctrlr_header.bar_offset[VFU_PCI_DEV_BAR0_REGION_IDX] = data_offset;
|
||||
migr_state.ctrlr_header.bar_len[VFU_PCI_DEV_BAR0_REGION_IDX] = NVME_REG_BAR0_SIZE;
|
||||
memcpy(data_ptr, &migr_state.bar0, NVME_REG_BAR0_SIZE);
|
||||
migr_state.ctrlr_header.bar_len[VFU_PCI_DEV_BAR0_REGION_IDX] = NVMF_VFIO_USER_DOORBELLS_SIZE;
|
||||
memcpy(data_ptr, &migr_state.doorbells, NVMF_VFIO_USER_DOORBELLS_SIZE);
|
||||
|
||||
/* Copy CFG */
|
||||
data_offset += NVME_REG_BAR0_SIZE;
|
||||
data_ptr += NVME_REG_BAR0_SIZE;
|
||||
data_offset += NVMF_VFIO_USER_DOORBELLS_SIZE;
|
||||
data_ptr += NVMF_VFIO_USER_DOORBELLS_SIZE;
|
||||
migr_state.ctrlr_header.bar_offset[VFU_PCI_DEV_CFG_REGION_IDX] = data_offset;
|
||||
migr_state.ctrlr_header.bar_len[VFU_PCI_DEV_CFG_REGION_IDX] = NVME_REG_CFG_SIZE;
|
||||
memcpy(data_ptr, &migr_state.cfg, NVME_REG_CFG_SIZE);
|
||||
@ -3545,11 +3530,16 @@ vfio_user_migr_ctrlr_restore(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
|
||||
struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint;
|
||||
struct spdk_nvmf_ctrlr *ctrlr = vu_ctrlr->ctrlr;
|
||||
uint32_t *doorbell_base;
|
||||
struct vfio_user_nvme_migr_state migr_state = {};
|
||||
struct spdk_nvme_registers *regs;
|
||||
struct spdk_nvme_cmd cmd;
|
||||
uint16_t i;
|
||||
int rc = 0;
|
||||
struct vfio_user_nvme_migr_state migr_state = {
|
||||
.nvmf_data = {
|
||||
.data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused),
|
||||
.regs_size = sizeof(struct spdk_nvmf_registers),
|
||||
.feat_size = sizeof(struct spdk_nvmf_ctrlr_feat)
|
||||
}
|
||||
};
|
||||
|
||||
assert(endpoint->migr_data != NULL);
|
||||
assert(ctrlr != NULL);
|
||||
@ -3585,33 +3575,23 @@ vfio_user_migr_ctrlr_restore(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
|
||||
/* restore PCI configuration space */
|
||||
memcpy((void *)endpoint->pci_config_space, &migr_state.cfg, NVME_REG_CFG_SIZE);
|
||||
|
||||
regs = (struct spdk_nvme_registers *)&migr_state.bar0;
|
||||
doorbell_base = (uint32_t *)®s->doorbell[0].sq_tdbl;
|
||||
doorbell_base = (uint32_t *)&migr_state.doorbells;
|
||||
/* restore doorbells from saved registers */
|
||||
memcpy((void *)vu_ctrlr->bar0_doorbells, doorbell_base, NVMF_VFIO_USER_DOORBELLS_SIZE);
|
||||
|
||||
/* restore controller registers after ADMIN queue connection */
|
||||
ctrlr->vcprop.csts.raw = regs->csts.raw;
|
||||
ctrlr->vcprop.cap.raw = regs->cap.raw;
|
||||
ctrlr->vcprop.vs.raw = regs->vs.raw;
|
||||
ctrlr->vcprop.cc.raw = regs->cc.raw;
|
||||
ctrlr->vcprop.aqa.raw = regs->aqa.raw;
|
||||
ctrlr->vcprop.asq = regs->asq;
|
||||
ctrlr->vcprop.acq = regs->acq;
|
||||
|
||||
/* restore nvmf controller data */
|
||||
rc = nvmf_ctrlr_restore_migr_data(ctrlr, &migr_state.nvmf_data);
|
||||
rc = spdk_nvmf_ctrlr_restore_migr_data(ctrlr, &migr_state.nvmf_data);
|
||||
if (rc) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* resubmit pending AERs */
|
||||
for (i = 0; i < migr_state.ctrlr_header.nr_aers; i++) {
|
||||
for (i = 0; i < migr_state.nvmf_data.num_aer_cids; i++) {
|
||||
SPDK_DEBUGLOG(nvmf_vfio, "%s AER resubmit, CID %u\n", ctrlr_id(vu_ctrlr),
|
||||
migr_state.ctrlr_header.aer_cids[i]);
|
||||
migr_state.nvmf_data.aer_cids[i]);
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
|
||||
cmd.cid = migr_state.ctrlr_header.aer_cids[i];
|
||||
cmd.cid = migr_state.nvmf_data.aer_cids[i];
|
||||
rc = handle_cmd_req(vu_ctrlr, &cmd, vu_ctrlr->sqs[0]);
|
||||
if (rc) {
|
||||
break;
|
||||
|
@ -1786,17 +1786,17 @@ test_multi_async_event_reqs(void)
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
|
||||
}
|
||||
|
||||
/* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
|
||||
sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS;
|
||||
for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
|
||||
/* Target can store SPDK_NVMF_MAX_ASYNC_EVENTS reqs */
|
||||
sgroups.mgmt_io_outstanding = SPDK_NVMF_MAX_ASYNC_EVENTS;
|
||||
for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
|
||||
CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
|
||||
CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
|
||||
}
|
||||
CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
|
||||
|
||||
/* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
|
||||
/* Exceeding the SPDK_NVMF_MAX_ASYNC_EVENTS reports error */
|
||||
CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
|
||||
CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
|
||||
CU_ASSERT(ctrlr.nr_aer_reqs == SPDK_NVMF_MAX_ASYNC_EVENTS);
|
||||
CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
|
||||
CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
|
||||
|
||||
|
@ -36,13 +36,11 @@ DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transpo
|
||||
const struct spdk_nvme_transport_id *trid2), 0);
|
||||
DEFINE_STUB(nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *,
|
||||
(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), NULL);
|
||||
DEFINE_STUB(nvmf_ctrlr_save_aers, int, (struct spdk_nvmf_ctrlr *ctrlr, uint16_t *aer_cids,
|
||||
uint16_t max_aers), 0);
|
||||
DEFINE_STUB(nvmf_ctrlr_save_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
|
||||
struct nvmf_ctrlr_migr_data *data), 0);
|
||||
DEFINE_STUB(nvmf_ctrlr_restore_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
|
||||
struct nvmf_ctrlr_migr_data *data), 0);
|
||||
DEFINE_STUB_V(nvmf_ctrlr_set_fatal_status, (struct spdk_nvmf_ctrlr *ctrlr));
|
||||
DEFINE_STUB(spdk_nvmf_ctrlr_save_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
|
||||
struct spdk_nvmf_ctrlr_migr_data *data), 0);
|
||||
DEFINE_STUB(spdk_nvmf_ctrlr_restore_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
|
||||
const struct spdk_nvmf_ctrlr_migr_data *data), 0);
|
||||
|
||||
static void *
|
||||
gpa_to_vva(void *prv, uint64_t addr, uint64_t len, int prot)
|
||||
|
Loading…
Reference in New Issue
Block a user