accel: add API to cancel a batch sequence

Added to the framework as well as all 3 engines.  Needed by apps
in the event that they have to fail following the creation of a
batch, allows them to tell the framework to forget about the batch
as they have no intent to send it.

Signed-off-by: paul luse <paul.e.luse@intel.com>
Change-Id: Id94754ab1350e5a969a5fd2306bd59c38f0a0120
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3389
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
paul luse 2020-07-16 18:47:59 -04:00 committed by Jim Harris
parent 751e2812bc
commit 8d059e7a18
9 changed files with 112 additions and 0 deletions

View File

@ -166,6 +166,17 @@ struct spdk_accel_batch *spdk_accel_batch_create(struct spdk_io_channel *ch);
int spdk_accel_batch_submit(struct spdk_io_channel *ch, struct spdk_accel_batch *batch,
spdk_accel_completion_cb cb_fn, void *cb_arg);
/**
* Synchronous call to cancel a batch sequence. In some cases prepared commands will be
* processed if they cannot be cancelled.
*
* \param ch I/O channel associated with this call.
* \param batch Handle provided when the batch was started with spdk_accel_batch_create().
*
* \return 0 on success, negative errno on failure.
*/
int spdk_accel_batch_cancel(struct spdk_io_channel *ch, struct spdk_accel_batch *batch);
/**
* Synchronous call to prepare a copy request into a previously initialized batch
* created with spdk_accel_batch_create(). The callback will be called when the copy

View File

@ -172,6 +172,16 @@ struct idxd_batch *spdk_idxd_batch_create(struct spdk_idxd_io_channel *chan);
int spdk_idxd_batch_submit(struct spdk_idxd_io_channel *chan, struct idxd_batch *batch,
spdk_idxd_req_cb cb_fn, void *cb_arg);
/**
* Cancel a batch sequence.
*
* \param chan IDXD channel to submit request.
* \param batch Handle provided when the batch was started with spdk_idxd_batch_create().
*
* \return 0 on success, negative errno on failure.
*/
int spdk_idxd_batch_cancel(struct spdk_idxd_io_channel *chan, struct idxd_batch *batch);
/**
* Synchronous call to prepare a copy request into a previously initialized batch
* created with spdk_idxd_batch_create(). The callback will be called when the copy

View File

@ -67,6 +67,7 @@ struct spdk_accel_engine {
spdk_accel_completion_cb cb_fn, void *cb_arg);
int (*batch_submit)(struct spdk_io_channel *ch, struct spdk_accel_batch *batch,
spdk_accel_completion_cb cb_fn, void *cb_arg);
int (*batch_cancel)(struct spdk_io_channel *ch, struct spdk_accel_batch *batch);
int (*compare)(struct spdk_io_channel *ch, void *src1, void *src2,
uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg);
int (*fill)(struct spdk_io_channel *ch, void *dst, uint8_t fill,

View File

@ -231,6 +231,17 @@ spdk_accel_batch_get_max(struct spdk_io_channel *ch)
return accel_ch->engine->batch_get_max();
}
/* Accel framework public API for for when an app is unable to complete a batch sequence,
* it cancels with this API.
*/
int
spdk_accel_batch_cancel(struct spdk_io_channel *ch, struct spdk_accel_batch *batch)
{
struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
return accel_ch->engine->batch_cancel(accel_ch->ch, batch);
}
/* Accel framework public API for batch prep_copy function. All engines are
* required to implement this API.
*/
@ -791,6 +802,27 @@ sw_accel_batch_prep_crc32c(struct spdk_io_channel *ch, struct spdk_accel_batch *
return 0;
}
static int
sw_accel_batch_cancel(struct spdk_io_channel *ch, struct spdk_accel_batch *batch)
{
struct sw_accel_op *op;
struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
if ((struct spdk_accel_batch *)&sw_ch->batch != batch) {
SPDK_ERRLOG("Invalid batch\n");
return -EINVAL;
}
/* Cancel the batch items by moving them back to the op_pool. */
while ((op = TAILQ_FIRST(&sw_ch->batch))) {
TAILQ_REMOVE(&sw_ch->batch, op, link);
TAILQ_INSERT_TAIL(&sw_ch->op_pool, op, link);
}
return 0;
}
static int
sw_accel_batch_submit(struct spdk_io_channel *ch, struct spdk_accel_batch *batch,
spdk_accel_completion_cb cb_fn, void *cb_arg)
@ -927,6 +959,7 @@ static struct spdk_accel_engine sw_accel_engine = {
.dualcast = sw_accel_submit_dualcast,
.batch_get_max = sw_accel_batch_get_max,
.batch_create = sw_accel_batch_start,
.batch_cancel = sw_accel_batch_cancel,
.batch_prep_copy = sw_accel_batch_prep_copy,
.batch_prep_dualcast = sw_accel_batch_prep_dualcast,
.batch_prep_compare = sw_accel_batch_prep_compare,

View File

@ -16,6 +16,7 @@
spdk_accel_batch_prep_fill;
spdk_accel_batch_prep_crc32c;
spdk_accel_batch_submit;
spdk_accel_batch_cancel;
spdk_accel_submit_copy;
spdk_accel_submit_dualcast;
spdk_accel_submit_compare;

View File

@ -926,6 +926,26 @@ _does_batch_exist(struct idxd_batch *batch, struct spdk_idxd_io_channel *chan)
return found;
}
int
spdk_idxd_batch_cancel(struct spdk_idxd_io_channel *chan, struct idxd_batch *batch)
{
if (_does_batch_exist(batch, chan) == false) {
SPDK_ERRLOG("Attempt to cancel a batch that doesn't exist\n.");
return -EINVAL;
}
if (batch->remaining > 0) {
SPDK_ERRLOG("Cannot cancel batch, already submitted to HW\n.");
return -EINVAL;
}
TAILQ_REMOVE(&chan->batches, batch, link);
spdk_bit_array_clear(chan->ring_ctrl.user_ring_slots, batch->batch_num);
TAILQ_INSERT_TAIL(&chan->batch_pool, batch, link);
return 0;
}
int
spdk_idxd_batch_submit(struct spdk_idxd_io_channel *chan, struct idxd_batch *batch,
spdk_idxd_req_cb cb_fn, void *cb_arg)

View File

@ -13,6 +13,7 @@
spdk_idxd_batch_prep_compare;
spdk_idxd_batch_submit;
spdk_idxd_batch_create;
spdk_idxd_batch_cancel;
spdk_idxd_batch_get_max;
spdk_idxd_set_config;
spdk_idxd_submit_compare;

View File

@ -443,6 +443,15 @@ idxd_batch_start(struct spdk_io_channel *ch)
return (struct spdk_accel_batch *)spdk_idxd_batch_create(chan->chan);
}
static int
idxd_batch_cancel(struct spdk_io_channel *ch, struct spdk_accel_batch *_batch)
{
struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
struct idxd_batch *batch = (struct idxd_batch *)_batch;
return spdk_idxd_batch_cancel(chan->chan, batch);
}
static int
idxd_batch_submit(struct spdk_io_channel *ch, struct spdk_accel_batch *_batch,
spdk_accel_completion_cb cb_fn, void *cb_arg)
@ -561,6 +570,7 @@ static struct spdk_accel_engine idxd_accel_engine = {
.copy = idxd_submit_copy,
.batch_get_max = idxd_batch_get_max,
.batch_create = idxd_batch_start,
.batch_cancel = idxd_batch_cancel,
.batch_prep_copy = idxd_batch_prep_copy,
.batch_prep_fill = idxd_batch_prep_fill,
.batch_prep_dualcast = idxd_batch_prep_dualcast,

View File

@ -390,6 +390,30 @@ ioat_batch_prep_crc32c(struct spdk_io_channel *ch,
return 0;
}
static int
ioat_batch_cancel(struct spdk_io_channel *ch, struct spdk_accel_batch *batch)
{
struct ioat_accel_op *op;
struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
if ((struct spdk_accel_batch *)&ioat_ch->hw_batch != batch) {
SPDK_ERRLOG("Invalid batch\n");
return -EINVAL;
}
/* Flush the batched HW items, there's no way to cancel these without resetting. */
spdk_ioat_flush(ioat_ch->ioat_ch);
ioat_ch->hw_batch = false;
/* Return batched software items to the pool. */
while ((op = TAILQ_FIRST(&ioat_ch->sw_batch))) {
TAILQ_REMOVE(&ioat_ch->sw_batch, op, link);
TAILQ_INSERT_TAIL(&ioat_ch->op_pool, op, link);
}
return 0;
}
static int
ioat_batch_submit(struct spdk_io_channel *ch, struct spdk_accel_batch *batch,
spdk_accel_completion_cb cb_fn, void *cb_arg)
@ -449,6 +473,7 @@ static struct spdk_accel_engine ioat_accel_engine = {
.fill = ioat_submit_fill,
.batch_get_max = ioat_batch_get_max,
.batch_create = ioat_batch_create,
.batch_cancel = ioat_batch_cancel,
.batch_prep_copy = ioat_batch_prep_copy,
.batch_prep_dualcast = ioat_batch_prep_dualcast,
.batch_prep_compare = ioat_batch_prep_compare,