nvmf: No longer route fabrics/admin commands to a single thread

These commands can now pause a subsystem if they need to operate
on it. We don't currently implement any of the NVMe commands
that would need to pause, so this patch is simpler than most
would expect.

Change-Id: I25bfdf8e7577cda2bb0ce248d2889447032b9b4c
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/394121
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Ben Walker 2018-01-09 15:03:53 -07:00 committed by Jim Harris
parent e3d083f8d5
commit 823b565b5f
3 changed files with 43 additions and 70 deletions

View File

@ -1083,15 +1083,29 @@ spdk_nvmf_ctrlr_keep_alive(struct spdk_nvmf_request *req)
int
spdk_nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_subsystem *subsystem = req->qpair->ctrlr->subsys;
struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
if (ctrlr == NULL) {
SPDK_ERRLOG("Admin command sent before CONNECT\n");
response->status.sct = SPDK_NVME_SCT_GENERIC;
response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if (ctrlr->vcprop.cc.bits.en != 1) {
SPDK_ERRLOG("Admin command sent to disabled controller\n");
response->status.sct = SPDK_NVME_SCT_GENERIC;
response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if (req->data && spdk_nvme_opc_get_data_transfer(cmd->opc) == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
memset(req->data, 0, req->length);
}
if (subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) {
if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY) {
/* Discovery controllers only support Get Log Page and Identify */
switch (cmd->opc) {
case SPDK_NVME_OPC_IDENTIFY:

View File

@ -381,7 +381,7 @@ spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
struct spdk_bdev_desc *desc;
struct spdk_io_channel *ch;
struct spdk_nvmf_poll_group *group = req->qpair->group;
struct spdk_nvmf_subsystem *subsystem = req->qpair->ctrlr->subsys;
struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
@ -389,7 +389,21 @@ spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
response->status.sc = SPDK_NVME_SC_SUCCESS;
nsid = cmd->nsid;
ns = _spdk_nvmf_subsystem_get_ns(subsystem, nsid);
if (spdk_unlikely(ctrlr == NULL)) {
SPDK_ERRLOG("I/O command sent before CONNECT\n");
response->status.sct = SPDK_NVME_SCT_GENERIC;
response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if (spdk_unlikely(ctrlr->vcprop.cc.bits.en != 1)) {
SPDK_ERRLOG("I/O command sent to disabled controller\n");
response->status.sct = SPDK_NVME_SCT_GENERIC;
response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
ns = _spdk_nvmf_subsystem_get_ns(ctrlr->subsys, nsid);
if (ns == NULL || ns->bdev == NULL || ns->is_removed) {
SPDK_ERRLOG("Unsuccessful query for nsid %u\n", cmd->nsid);
response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
@ -399,7 +413,7 @@ spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
bdev = ns->bdev;
desc = ns->desc;
ch = group->sgroups[subsystem->id].channels[nsid - 1];
ch = group->sgroups[ctrlr->subsys->id].channels[nsid - 1];
switch (cmd->opc) {
case SPDK_NVME_OPC_READ:
return nvmf_bdev_ctrlr_read_cmd(bdev, desc, ch, req);

View File

@ -37,6 +37,7 @@
#include "transport.h"
#include "spdk/io_channel.h"
#include "spdk/likely.h"
#include "spdk/nvme.h"
#include "spdk/nvmf_spec.h"
#include "spdk/trace.h"
@ -44,10 +45,9 @@
#include "spdk_internal/assert.h"
#include "spdk_internal/log.h"
static void
spdk_nvmf_request_complete_on_qpair(void *ctx)
int
spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_request *req = ctx;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
rsp->sqid = 0;
@ -62,25 +62,6 @@ spdk_nvmf_request_complete_on_qpair(void *ctx)
if (spdk_nvmf_transport_req_complete(req)) {
SPDK_ERRLOG("Transport request completion error!\n");
}
}
int
spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
{
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
if (cmd->opc == SPDK_NVME_OPC_FABRIC ||
req->qpair->type == QPAIR_TYPE_AQ) {
struct spdk_io_channel *ch;
ch = spdk_io_channel_from_ctx(req->qpair->group);
/* Pass a message back to the originating thread. */
spdk_thread_send_msg(spdk_io_channel_get_thread(ch),
spdk_nvmf_request_complete_on_qpair,
req);
} else {
spdk_nvmf_request_complete_on_qpair(req);
}
return 0;
}
@ -129,60 +110,24 @@ nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, enum spdk_nvmf_qpair_type qpair_
}
}
static void
spdk_nvmf_request_exec_on_master(void *ctx)
{
struct spdk_nvmf_request *req = ctx;
struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
spdk_nvmf_request_exec_status status;
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
status = spdk_nvmf_ctrlr_process_fabrics_cmd(req);
} else if (ctrlr == NULL || !ctrlr->vcprop.cc.bits.en) {
/* Only Fabric commands are allowed when the controller is disabled */
SPDK_ERRLOG("Non-Fabric command sent to disabled controller\n");
rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} else {
status = spdk_nvmf_ctrlr_process_admin_cmd(req);
}
if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
spdk_nvmf_request_complete(req);
}
}
void
spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_qpair *qpair = req->qpair;
struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
spdk_nvmf_request_exec_status status;
nvmf_trace_command(req->cmd, qpair->type);
if (spdk_unlikely(cmd->opc == SPDK_NVME_OPC_FABRIC || qpair->type == QPAIR_TYPE_AQ)) {
/* Fabric and admin commands are sent to the master core for synchronization. */
spdk_thread_send_msg(qpair->transport->tgt->master_thread,
spdk_nvmf_request_exec_on_master,
req);
return;
if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) {
status = spdk_nvmf_ctrlr_process_fabrics_cmd(req);
} else if (spdk_unlikely(qpair->type == QPAIR_TYPE_AQ)) {
status = spdk_nvmf_ctrlr_process_admin_cmd(req);
} else {
status = spdk_nvmf_ctrlr_process_io_cmd(req);
}
if (spdk_unlikely(ctrlr == NULL)) {
SPDK_ERRLOG("I/O command sent before connect\n");
rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
spdk_nvmf_request_complete_on_qpair(req);
return;
}
status = spdk_nvmf_ctrlr_process_io_cmd(req);
if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
spdk_nvmf_request_complete_on_qpair(req);
spdk_nvmf_request_complete(req);
}
}