nvme: track qpair within nvme_request
Change-Id: Ia40a1e79db6327c4693731e9bb7a57810795429d Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
parent
15f910ece7
commit
cd13f280f4
@ -71,7 +71,8 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req = NULL;
|
||||
@ -94,14 +95,16 @@ nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
req->cb_arg = cb_arg;
|
||||
req->payload = *payload;
|
||||
req->payload_size = payload_size;
|
||||
req->qpair = qpair;
|
||||
req->pid = getpid();
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg)
|
||||
nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
|
||||
void *buffer, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_payload payload;
|
||||
|
||||
@ -109,13 +112,13 @@ nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_
|
||||
payload.u.contig = buffer;
|
||||
payload.md = NULL;
|
||||
|
||||
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
|
||||
return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
|
||||
return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -148,7 +151,8 @@ nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl)
|
||||
* where the overhead of a copy is not a problem.
|
||||
*/
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
|
||||
nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
|
||||
void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg, bool host_to_controller)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
@ -166,7 +170,8 @@ nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size, spdk_nvme_c
|
||||
}
|
||||
}
|
||||
|
||||
req = nvme_allocate_request_contig(contig_buffer, payload_size, nvme_user_copy_cmd_complete, NULL);
|
||||
req = nvme_allocate_request_contig(qpair, contig_buffer, payload_size, nvme_user_copy_cmd_complete,
|
||||
NULL);
|
||||
if (!req) {
|
||||
spdk_free(contig_buffer);
|
||||
return NULL;
|
||||
|
@ -835,7 +835,7 @@ nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct nvme_request *req;
|
||||
|
||||
aer->ctrlr = ctrlr;
|
||||
req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
|
||||
aer->req = req;
|
||||
if (req == NULL) {
|
||||
return -1;
|
||||
@ -1402,7 +1402,7 @@ nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return;
|
||||
}
|
||||
|
||||
req = nvme_allocate_request_null(nvme_keep_alive_completion, NULL);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
|
||||
if (req == NULL) {
|
||||
return;
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
|
||||
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
@ -63,7 +63,7 @@ spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -ENOMEM;
|
||||
@ -84,7 +84,8 @@ nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr *ctrlr, void *payload,
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ctrlr_data),
|
||||
req = nvme_allocate_request_user_copy(ctrlr->adminq,
|
||||
payload, sizeof(struct spdk_nvme_ctrlr_data),
|
||||
cb_fn, cb_arg, false);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
@ -109,7 +110,8 @@ nvme_ctrlr_cmd_identify_namespace(struct spdk_nvme_ctrlr *ctrlr, uint16_t nsid,
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ns_data),
|
||||
req = nvme_allocate_request_user_copy(ctrlr->adminq,
|
||||
payload, sizeof(struct spdk_nvme_ns_data),
|
||||
cb_fn, cb_arg, false);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
@ -136,7 +138,8 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ctrlr_list),
|
||||
req = nvme_allocate_request_user_copy(ctrlr->adminq,
|
||||
payload, sizeof(struct spdk_nvme_ctrlr_list),
|
||||
cb_fn, cb_arg, true);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
@ -163,7 +166,8 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ctrlr_list),
|
||||
req = nvme_allocate_request_user_copy(ctrlr->adminq,
|
||||
payload, sizeof(struct spdk_nvme_ctrlr_list),
|
||||
cb_fn, cb_arg, true);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
@ -190,7 +194,8 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ns_data),
|
||||
req = nvme_allocate_request_user_copy(ctrlr->adminq,
|
||||
payload, sizeof(struct spdk_nvme_ns_data),
|
||||
cb_fn, cb_arg, true);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
@ -216,7 +221,7 @@ nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -ENOMEM;
|
||||
@ -242,7 +247,7 @@ nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -ENOMEM;
|
||||
@ -269,7 +274,7 @@ spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -ENOMEM;
|
||||
@ -297,7 +302,7 @@ spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -ENOMEM;
|
||||
@ -371,7 +376,8 @@ spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req = nvme_allocate_request_user_copy(payload, payload_size, cb_fn, cb_arg, false);
|
||||
req = nvme_allocate_request_user_copy(ctrlr->adminq,
|
||||
payload, payload_size, cb_fn, cb_arg, false);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -ENOMEM;
|
||||
@ -440,7 +446,7 @@ spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair
|
||||
}
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_null(spdk_nvme_ctrlr_cmd_abort_cpl, NULL);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, spdk_nvme_ctrlr_cmd_abort_cpl, NULL);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -ENOMEM;
|
||||
@ -477,7 +483,7 @@ nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -ENOMEM;
|
||||
@ -504,7 +510,7 @@ nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
|
||||
int rc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
req = nvme_allocate_request_user_copy(payload, size, cb_fn, cb_arg, true);
|
||||
req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
|
||||
if (req == NULL) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
return -ENOMEM;
|
||||
|
@ -186,6 +186,8 @@ struct nvme_request {
|
||||
void *cb_arg;
|
||||
STAILQ_ENTRY(nvme_request) stailq;
|
||||
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
|
||||
/**
|
||||
* The active admin request can be moved to a per process pending
|
||||
* list based on the saved pid to tell which process it belongs
|
||||
@ -555,12 +557,16 @@ int nvme_ns_construct(struct spdk_nvme_ns *ns, uint16_t id,
|
||||
struct spdk_nvme_ctrlr *ctrlr);
|
||||
void nvme_ns_destruct(struct spdk_nvme_ns *ns);
|
||||
|
||||
struct nvme_request *nvme_allocate_request(const struct nvme_payload *payload,
|
||||
struct nvme_request *nvme_allocate_request(struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload,
|
||||
uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
||||
struct nvme_request *nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
||||
struct nvme_request *nvme_allocate_request_contig(void *buffer, uint32_t payload_size,
|
||||
struct nvme_request *nvme_allocate_request_null(struct spdk_nvme_qpair *qpair,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
||||
struct nvme_request *nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size,
|
||||
struct nvme_request *nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair,
|
||||
void *buffer, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
||||
struct nvme_request *nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
|
||||
void *buffer, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
|
||||
void nvme_free_request(struct nvme_request *req);
|
||||
void nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child);
|
||||
|
@ -33,7 +33,7 @@
|
||||
|
||||
#include "nvme_internal.h"
|
||||
|
||||
static struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
|
||||
static struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
|
||||
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg, uint32_t opc, uint32_t io_flags,
|
||||
@ -110,7 +110,8 @@ nvme_request_free_children(struct nvme_request *req)
|
||||
}
|
||||
|
||||
static struct nvme_request *
|
||||
_nvme_add_child_request(struct spdk_nvme_ns *ns, const struct nvme_payload *payload,
|
||||
_nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload,
|
||||
uint32_t payload_offset, uint32_t md_offset,
|
||||
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
|
||||
uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag,
|
||||
@ -118,7 +119,7 @@ _nvme_add_child_request(struct spdk_nvme_ns *ns, const struct nvme_payload *payl
|
||||
{
|
||||
struct nvme_request *child;
|
||||
|
||||
child = _nvme_ns_cmd_rw(ns, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
|
||||
child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
|
||||
cb_arg, opc, io_flags, apptag_mask, apptag, check_sgl);
|
||||
if (child == NULL) {
|
||||
nvme_request_free_children(parent);
|
||||
@ -132,6 +133,7 @@ _nvme_add_child_request(struct spdk_nvme_ns *ns, const struct nvme_payload *payl
|
||||
|
||||
static struct nvme_request *
|
||||
_nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
|
||||
struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload,
|
||||
uint32_t payload_offset, uint32_t md_offset,
|
||||
uint64_t lba, uint32_t lba_count,
|
||||
@ -158,7 +160,7 @@ _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
|
||||
lba_count = sectors_per_max_io - (lba & sector_mask);
|
||||
lba_count = spdk_min(remaining_lba_count, lba_count);
|
||||
|
||||
child = _nvme_add_child_request(ns, payload, payload_offset, md_offset,
|
||||
child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
|
||||
lba, lba_count, cb_fn, cb_arg, opc,
|
||||
io_flags, apptag_mask, apptag, req, true);
|
||||
if (child == NULL) {
|
||||
@ -205,6 +207,7 @@ _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
|
||||
|
||||
static struct nvme_request *
|
||||
_nvme_ns_cmd_split_sgl_request(struct spdk_nvme_ns *ns,
|
||||
struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload,
|
||||
uint32_t payload_offset, uint32_t md_offset,
|
||||
uint64_t lba, uint32_t lba_count,
|
||||
@ -293,7 +296,7 @@ _nvme_ns_cmd_split_sgl_request(struct spdk_nvme_ns *ns,
|
||||
* call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
|
||||
* since we have already verified it here.
|
||||
*/
|
||||
child = _nvme_add_child_request(ns, payload, payload_offset, md_offset,
|
||||
child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
|
||||
child_lba, child_lba_count,
|
||||
cb_fn, cb_arg, opc, io_flags,
|
||||
apptag_mask, apptag, req, false);
|
||||
@ -316,8 +319,8 @@ _nvme_ns_cmd_split_sgl_request(struct spdk_nvme_ns *ns,
|
||||
}
|
||||
|
||||
static struct nvme_request *
|
||||
_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, const struct nvme_payload *payload,
|
||||
uint32_t payload_offset, uint32_t md_offset,
|
||||
_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
|
||||
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
|
||||
uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, bool check_sgl)
|
||||
{
|
||||
@ -342,7 +345,7 @@ _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, const struct nvme_payload *payload,
|
||||
sector_size -= 8;
|
||||
}
|
||||
|
||||
req = nvme_allocate_request(payload, lba_count * sector_size, cb_fn, cb_arg);
|
||||
req = nvme_allocate_request(qpair, payload, lba_count * sector_size, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -359,16 +362,18 @@ _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, const struct nvme_payload *payload,
|
||||
if (sectors_per_stripe > 0 &&
|
||||
(((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {
|
||||
|
||||
return _nvme_ns_cmd_split_request(ns, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
|
||||
return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
|
||||
cb_fn,
|
||||
cb_arg, opc,
|
||||
io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, apptag_mask, apptag);
|
||||
} else if (lba_count > sectors_per_max_io) {
|
||||
return _nvme_ns_cmd_split_request(ns, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
|
||||
return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
|
||||
cb_fn,
|
||||
cb_arg, opc,
|
||||
io_flags, req, sectors_per_max_io, 0, apptag_mask, apptag);
|
||||
} else if (req->payload.type == NVME_PAYLOAD_TYPE_SGL && check_sgl &&
|
||||
!(ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED)) {
|
||||
return _nvme_ns_cmd_split_sgl_request(ns, payload, payload_offset, md_offset, lba, lba_count,
|
||||
return _nvme_ns_cmd_split_sgl_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
|
||||
cb_fn, cb_arg, opc, io_flags, req, apptag_mask, apptag);
|
||||
}
|
||||
|
||||
@ -389,7 +394,7 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, vo
|
||||
payload.u.contig = buffer;
|
||||
payload.md = NULL;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
|
||||
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
|
||||
io_flags, 0,
|
||||
0, true);
|
||||
if (req != NULL) {
|
||||
@ -413,7 +418,7 @@ spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q
|
||||
payload.u.contig = buffer;
|
||||
payload.md = metadata;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
|
||||
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
|
||||
io_flags,
|
||||
apptag_mask, apptag, true);
|
||||
if (req != NULL) {
|
||||
@ -442,7 +447,7 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
payload.u.sgl.next_sge_fn = next_sge_fn;
|
||||
payload.u.sgl.cb_arg = cb_arg;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
|
||||
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
|
||||
io_flags, 0, 0, true);
|
||||
if (req != NULL) {
|
||||
return nvme_qpair_submit_request(qpair, req);
|
||||
@ -464,7 +469,7 @@ spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
payload.u.contig = buffer;
|
||||
payload.md = NULL;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
|
||||
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
|
||||
io_flags, 0, 0, true);
|
||||
if (req != NULL) {
|
||||
return nvme_qpair_submit_request(qpair, req);
|
||||
@ -486,7 +491,7 @@ spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
|
||||
payload.u.contig = buffer;
|
||||
payload.md = metadata;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
|
||||
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
|
||||
io_flags, apptag_mask, apptag, true);
|
||||
if (req != NULL) {
|
||||
return nvme_qpair_submit_request(qpair, req);
|
||||
@ -514,7 +519,7 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
payload.u.sgl.next_sge_fn = next_sge_fn;
|
||||
payload.u.sgl.cb_arg = cb_arg;
|
||||
|
||||
req = _nvme_ns_cmd_rw(ns, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
|
||||
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
|
||||
io_flags, 0, 0, true);
|
||||
if (req != NULL) {
|
||||
return nvme_qpair_submit_request(qpair, req);
|
||||
@ -537,7 +542,7 @@ spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -571,7 +576,7 @@ spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qp
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req = nvme_allocate_request_user_copy((void *)ranges,
|
||||
req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
|
||||
num_ranges * sizeof(struct spdk_nvme_dsm_range),
|
||||
cb_fn, cb_arg, true);
|
||||
if (req == NULL) {
|
||||
@ -595,7 +600,7 @@ spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -619,7 +624,8 @@ spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_reservation_register_data),
|
||||
req = nvme_allocate_request_user_copy(qpair,
|
||||
payload, sizeof(struct spdk_nvme_reservation_register_data),
|
||||
cb_fn, cb_arg, true);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
@ -651,7 +657,8 @@ spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
|
||||
req = nvme_allocate_request_user_copy(qpair,
|
||||
payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
|
||||
cb_arg, true);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
@ -683,7 +690,8 @@ spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_reservation_acquire_data),
|
||||
req = nvme_allocate_request_user_copy(qpair,
|
||||
payload, sizeof(struct spdk_nvme_reservation_acquire_data),
|
||||
cb_fn, cb_arg, true);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
@ -717,7 +725,7 @@ spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
|
||||
return -EINVAL;
|
||||
num_dwords = len / 4;
|
||||
|
||||
req = nvme_allocate_request_user_copy(payload, len, cb_fn, cb_arg, false);
|
||||
req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -1261,7 +1261,7 @@ nvme_pcie_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1292,7 +1292,7 @@ nvme_pcie_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1319,7 +1319,7 @@ nvme_pcie_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1338,7 +1338,7 @@ nvme_pcie_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
|
||||
struct nvme_request *req;
|
||||
struct spdk_nvme_cmd *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
||||
if (req == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -328,7 +328,8 @@ nvme_ns_construct(struct spdk_nvme_ns *ns, uint16_t id,
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
@ -343,6 +344,7 @@ nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
|
||||
req->cb_fn = cb_fn;
|
||||
req->cb_arg = cb_arg;
|
||||
req->qpair = qpair;
|
||||
req->pid = getpid();
|
||||
}
|
||||
|
||||
@ -350,21 +352,21 @@ nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg)
|
||||
nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_payload payload;
|
||||
|
||||
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
|
||||
payload.u.contig = buffer;
|
||||
|
||||
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
|
||||
return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
|
||||
return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -239,7 +239,8 @@ static void verify_fw_image_download(struct nvme_request *req)
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
@ -252,15 +253,15 @@ nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
|
||||
req->cb_fn = cb_fn;
|
||||
req->cb_arg = cb_arg;
|
||||
|
||||
req->qpair = qpair;
|
||||
req->pid = getpid();
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg)
|
||||
nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_payload payload;
|
||||
|
||||
@ -268,21 +269,21 @@ nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_
|
||||
payload.u.contig = buffer;
|
||||
payload.md = NULL;
|
||||
|
||||
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
|
||||
return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
|
||||
return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg, bool host_to_controller)
|
||||
nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
|
||||
{
|
||||
/* For the unit test, we don't actually need to copy the buffer */
|
||||
return nvme_allocate_request_contig(buffer, payload_size, cb_fn, cb_arg);
|
||||
return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -240,7 +240,7 @@ nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
@ -48,7 +48,8 @@ struct nvme_driver _g_nvme_driver = {
|
||||
};
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
@ -73,27 +74,28 @@ nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
|
||||
req->cb_arg = cb_arg;
|
||||
req->payload = *payload;
|
||||
req->payload_size = payload_size;
|
||||
req->qpair = qpair;
|
||||
req->pid = getpid();
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
|
||||
void *cb_arg)
|
||||
nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_payload payload;
|
||||
|
||||
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
|
||||
payload.u.contig = buffer;
|
||||
|
||||
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
|
||||
return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
|
||||
return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
void
|
||||
@ -185,7 +187,7 @@ test3(void)
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
|
||||
req = nvme_allocate_request_null(expected_success_callback, NULL);
|
||||
req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
|
||||
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
|
||||
@ -205,7 +207,8 @@ test_ctrlr_failed(void)
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
|
||||
req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
|
||||
req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
|
||||
NULL);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
|
||||
/* Set the controller to failed.
|
||||
|
Loading…
Reference in New Issue
Block a user