nvmf: Refactor ctrlr_bdev_dsm_cmd to prepare for more dsm commands

This was previously very unmap specific. Make at least the top level
DSM call more general purpose by eliminating the unmap_ctx.

Change-Id: I9c044263e9b7e4ce7613badc36b51d00b6957d3a
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/c/440590
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Seth Howell <seth.howell5141@gmail.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2019-01-15 11:03:08 -07:00 committed by Jim Harris
parent f52f6aee0e
commit 5f0df58532

View File

@ -335,7 +335,7 @@ struct nvmf_bdev_ctrlr_unmap {
}; };
static void static void
nvmf_bdev_ctrlr_dsm_cpl(struct spdk_bdev_io *bdev_io, bool success, nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success,
void *cb_arg) void *cb_arg)
{ {
struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg; struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg;
@ -360,11 +360,11 @@ nvmf_bdev_ctrlr_dsm_cpl(struct spdk_bdev_io *bdev_io, bool success,
} }
static int static int
nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_nvmf_request *req, struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
struct nvmf_bdev_ctrlr_unmap *unmap_ctx); struct nvmf_bdev_ctrlr_unmap *unmap_ctx);
static void static void
nvmf_bdev_ctrlr_dsm_cmd_resubmit(void *arg) nvmf_bdev_ctrlr_unmap_resubmit(void *arg)
{ {
struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg; struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg;
struct spdk_nvmf_request *req = unmap_ctx->req; struct spdk_nvmf_request *req = unmap_ctx->req;
@ -372,18 +372,20 @@ nvmf_bdev_ctrlr_dsm_cmd_resubmit(void *arg)
struct spdk_bdev *bdev = unmap_ctx->bdev; struct spdk_bdev *bdev = unmap_ctx->bdev;
struct spdk_io_channel *ch = unmap_ctx->ch; struct spdk_io_channel *ch = unmap_ctx->ch;
nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req, unmap_ctx); nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx);
} }
static int static int
nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_nvmf_request *req, struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
struct nvmf_bdev_ctrlr_unmap *unmap_ctx) struct nvmf_bdev_ctrlr_unmap *unmap_ctx)
{ {
uint32_t attribute;
uint16_t nr, i; uint16_t nr, i;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
struct spdk_nvme_dsm_range *dsm_range;
uint64_t lba;
uint32_t lba_count;
int rc; int rc;
nr = ((cmd->cdw10 & 0x000000ff) + 1); nr = ((cmd->cdw10 & 0x000000ff) + 1);
@ -393,12 +395,6 @@ nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} }
attribute = cmd->cdw11 & 0x00000007;
if (attribute & SPDK_NVME_DSM_ATTR_DEALLOCATE) {
struct spdk_nvme_dsm_range *dsm_range;
uint64_t lba;
uint32_t lba_count;
if (unmap_ctx == NULL) { if (unmap_ctx == NULL) {
unmap_ctx = calloc(1, sizeof(*unmap_ctx)); unmap_ctx = calloc(1, sizeof(*unmap_ctx));
if (!unmap_ctx) { if (!unmap_ctx) {
@ -422,10 +418,10 @@ nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
unmap_ctx->count++; unmap_ctx->count++;
rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count, rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count,
nvmf_bdev_ctrlr_dsm_cpl, unmap_ctx); nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx);
if (rc) { if (rc) {
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_dsm_cmd_resubmit, unmap_ctx); nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx);
/* Unmap was not yet submitted to bdev */ /* Unmap was not yet submitted to bdev */
unmap_ctx->count--; unmap_ctx->count--;
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
@ -446,6 +442,19 @@ nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
} }
static int
nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
{
uint32_t attribute;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
attribute = cmd->cdw11 & 0x00000007;
if (attribute & SPDK_NVME_DSM_ATTR_DEALLOCATE) {
return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL);
}
response->status.sct = SPDK_NVME_SCT_GENERIC; response->status.sct = SPDK_NVME_SCT_GENERIC;
response->status.sc = SPDK_NVME_SC_SUCCESS; response->status.sc = SPDK_NVME_SC_SUCCESS;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
@ -524,7 +533,7 @@ spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
case SPDK_NVME_OPC_FLUSH: case SPDK_NVME_OPC_FLUSH:
return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req); return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req);
case SPDK_NVME_OPC_DATASET_MANAGEMENT: case SPDK_NVME_OPC_DATASET_MANAGEMENT:
return nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req, NULL); return nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req);
default: default:
return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req); return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req);
} }