bdev: Make the QoS channel use a regular channel

This makes a number of things much simpler and eliminates
the need for upcoming proposed reset handling.

Change-Id: I23a6badd0873f6dcf38ba1e55bf18d846c2843df
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/407357
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Ben Walker 2018-04-06 10:53:52 -07:00 committed by Jim Harris
parent f0191857a5
commit aac1f5f934
2 changed files with 126 additions and 184 deletions

View File

@ -999,10 +999,77 @@ spdk_bdev_channel_poll_qos(void *arg)
return -1;
}
static void
_spdk_bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
{
struct spdk_bdev_shared_resource *shared_resource;
if (!ch) {
return;
}
if (ch->channel) {
spdk_put_io_channel(ch->channel);
}
assert(ch->io_outstanding == 0);
shared_resource = ch->shared_resource;
if (shared_resource) {
assert(ch->io_outstanding == 0);
assert(shared_resource->ref > 0);
shared_resource->ref--;
if (shared_resource->ref == 0) {
assert(shared_resource->io_outstanding == 0);
spdk_put_io_channel(spdk_io_channel_from_ctx(shared_resource->mgmt_ch));
TAILQ_REMOVE(&shared_resource->mgmt_ch->shared_resources, shared_resource, link);
free(shared_resource);
}
}
}
/* Caller must hold bdev->mutex. */
static int
_spdk_bdev_channel_create(struct spdk_bdev_channel *ch, void *io_device)
_spdk_bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch)
{
struct spdk_bdev_qos *qos = bdev->qos;
/* Rate limiting on this bdev enabled */
if (qos) {
if (qos->ch == NULL) {
struct spdk_io_channel *io_ch;
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Selecting channel %p as QoS channel for bdev %s on thread %p\n", ch,
bdev->name, spdk_get_thread());
/* No qos channel has been selected, so set one up */
/* Take another reference to ch */
io_ch = spdk_get_io_channel(__bdev_to_io_dev(bdev));
qos->ch = ch;
qos->thread = spdk_io_channel_get_thread(io_ch);
TAILQ_INIT(&qos->queued);
spdk_bdev_qos_update_max_ios_per_timeslice(qos);
qos->io_submitted_this_timeslice = 0;
qos->poller = spdk_poller_register(spdk_bdev_channel_poll_qos,
qos,
SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
}
ch->flags |= BDEV_CH_QOS_ENABLED;
}
return 0;
}
static int
spdk_bdev_channel_create(void *io_device, void *ctx_buf)
{
struct spdk_bdev *bdev = __bdev_from_io_dev(io_device);
struct spdk_bdev_channel *ch = ctx_buf;
struct spdk_io_channel *mgmt_io_ch;
struct spdk_bdev_mgmt_channel *mgmt_ch;
struct spdk_bdev_shared_resource *shared_resource;
@ -1049,104 +1116,6 @@ _spdk_bdev_channel_create(struct spdk_bdev_channel *ch, void *io_device)
ch->flags = 0;
ch->shared_resource = shared_resource;
return 0;
}
static void
_spdk_bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
{
struct spdk_bdev_shared_resource *shared_resource;
if (!ch) {
return;
}
if (ch->channel) {
spdk_put_io_channel(ch->channel);
}
assert(ch->io_outstanding == 0);
shared_resource = ch->shared_resource;
if (shared_resource) {
assert(ch->io_outstanding == 0);
assert(shared_resource->ref > 0);
shared_resource->ref--;
if (shared_resource->ref == 0) {
assert(shared_resource->io_outstanding == 0);
spdk_put_io_channel(spdk_io_channel_from_ctx(shared_resource->mgmt_ch));
TAILQ_REMOVE(&shared_resource->mgmt_ch->shared_resources, shared_resource, link);
free(shared_resource);
}
}
}
/* Caller must hold bdev->mutex. */
static int
spdk_bdev_qos_channel_create(struct spdk_bdev *bdev)
{
assert(bdev->qos->ch == NULL);
assert(bdev->qos->thread == NULL);
bdev->qos->ch = calloc(1, sizeof(struct spdk_bdev_channel));
if (!bdev->qos->ch) {
return -1;
}
bdev->qos->thread = spdk_get_thread();
if (!bdev->qos->thread) {
free(bdev->qos->ch);
bdev->qos->ch = NULL;
return -1;
}
if (_spdk_bdev_channel_create(bdev->qos->ch, __bdev_to_io_dev(bdev)) != 0) {
free(bdev->qos->ch);
bdev->qos->ch = NULL;
bdev->qos->thread = NULL;
return -1;
}
TAILQ_INIT(&bdev->qos->queued);
bdev->qos->ch->flags |= BDEV_CH_QOS_ENABLED;
spdk_bdev_qos_update_max_ios_per_timeslice(bdev->qos);
bdev->qos->poller = spdk_poller_register(spdk_bdev_channel_poll_qos,
bdev->qos,
SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
return 0;
}
/* Caller must hold bdev->mutex */
static int
_spdk_bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch)
{
/* Rate limiting on this bdev enabled */
if (bdev->qos) {
if (bdev->qos->ch == NULL) {
if (spdk_bdev_qos_channel_create(bdev) != 0) {
return -1;
}
}
ch->flags |= BDEV_CH_QOS_ENABLED;
}
return 0;
}
static int
spdk_bdev_channel_create(void *io_device, void *ctx_buf)
{
struct spdk_bdev *bdev = __bdev_from_io_dev(io_device);
struct spdk_bdev_channel *ch = ctx_buf;
if (_spdk_bdev_channel_create(ch, io_device) != 0) {
_spdk_bdev_channel_destroy_resource(ch);
return -1;
}
#ifdef SPDK_CONFIG_VTUNE
{
char *name;
@ -1171,8 +1140,6 @@ spdk_bdev_channel_create(void *io_device, void *ctx_buf)
return -1;
}
bdev->channel_count++;
pthread_mutex_unlock(&bdev->mutex);
return 0;
@ -1230,32 +1197,16 @@ _spdk_bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
}
}
static void
_spdk_bdev_channel_destroy(struct spdk_bdev_channel *ch)
{
struct spdk_bdev_mgmt_channel *mgmt_ch;
struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
mgmt_ch = shared_resource->mgmt_ch;
_spdk_bdev_abort_queued_io(&ch->queued_resets, ch);
_spdk_bdev_abort_queued_io(&shared_resource->nomem_io, ch);
_spdk_bdev_abort_buf_io(&mgmt_ch->need_buf_small, ch);
_spdk_bdev_abort_buf_io(&mgmt_ch->need_buf_large, ch);
_spdk_bdev_channel_destroy_resource(ch);
}
static void
spdk_bdev_qos_channel_destroy(void *cb_arg)
{
struct spdk_bdev_qos *qos = cb_arg;
_spdk_bdev_channel_destroy(qos->ch);
spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
spdk_poller_unregister(&qos->poller);
free(qos->ch);
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Free QoS %p.\n", qos);
free(qos);
}
@ -1264,8 +1215,9 @@ spdk_bdev_qos_destroy(struct spdk_bdev *bdev)
{
/*
* Cleanly shutting down the QoS poller is tricky, because
* during the asynchronous operation the user could open a
* new channel, spawning a new QoS poller.
* during the asynchronous operation the user could open
* a new descriptor and create a new channel, spawning
* a new QoS poller.
*
* The strategy is to create a new QoS structure here and swap it
* in. The shutdown path then continues to refer to the old one
@ -1308,21 +1260,20 @@ static void
spdk_bdev_channel_destroy(void *io_device, void *ctx_buf)
{
struct spdk_bdev_channel *ch = ctx_buf;
struct spdk_bdev *bdev = ch->bdev;
struct spdk_bdev_mgmt_channel *mgmt_ch;
struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
_spdk_bdev_channel_destroy(ch);
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Destroying channel %p for bdev %s on thread %p\n", ch, ch->bdev->name,
spdk_get_thread());
pthread_mutex_lock(&bdev->mutex);
bdev->channel_count--;
if (bdev->channel_count == 0 && bdev->qos && bdev->qos->ch != NULL) {
if (spdk_bdev_qos_destroy(bdev)) {
/* There isn't anything we can do to recover from here. Just let the
* old QoS poller keep running. The QoS handling won't change
* cores when the user allocates a new channel, but it won't break. */
SPDK_ERRLOG("Unable to shut down QoS poller. It will continue running on the current thread.\n");
}
}
pthread_mutex_unlock(&bdev->mutex);
mgmt_ch = shared_resource->mgmt_ch;
_spdk_bdev_abort_queued_io(&ch->queued_resets, ch);
_spdk_bdev_abort_queued_io(&shared_resource->nomem_io, ch);
_spdk_bdev_abort_buf_io(&mgmt_ch->need_buf_small, ch);
_spdk_bdev_abort_buf_io(&mgmt_ch->need_buf_large, ch);
_spdk_bdev_channel_destroy_resource(ch);
}
int
@ -1909,6 +1860,9 @@ _spdk_bdev_reset_freeze_channel(struct spdk_io_channel_iter *i)
struct spdk_bdev_channel *channel;
struct spdk_bdev_mgmt_channel *mgmt_channel;
struct spdk_bdev_shared_resource *shared_resource;
bdev_io_tailq_t tmp_queued;
TAILQ_INIT(&tmp_queued);
ch = spdk_io_channel_iter_get_channel(i);
channel = spdk_io_channel_get_ctx(ch);
@ -1917,34 +1871,26 @@ _spdk_bdev_reset_freeze_channel(struct spdk_io_channel_iter *i)
channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
if ((channel->flags & BDEV_CH_QOS_ENABLED) != 0) {
/* The QoS object is always valid and readable while
* the channel flag is set, so the lock here should not
* be necessary. We're not in the fast path though, so
* just take it anyway. */
pthread_mutex_lock(&channel->bdev->mutex);
if (channel->bdev->qos->ch == channel) {
TAILQ_SWAP(&channel->bdev->qos->queued, &tmp_queued, spdk_bdev_io, link);
}
pthread_mutex_unlock(&channel->bdev->mutex);
}
_spdk_bdev_abort_queued_io(&shared_resource->nomem_io, channel);
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_small, channel);
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_large, channel);
_spdk_bdev_abort_queued_io(&tmp_queued, channel);
spdk_for_each_channel_continue(i, 0);
}
static void
_spdk_bdev_reset_freeze_qos_channel(void *ctx)
{
struct spdk_bdev *bdev = ctx;
struct spdk_bdev_mgmt_channel *mgmt_channel = NULL;
struct spdk_bdev_channel *qos_channel = bdev->qos->ch;
struct spdk_bdev_shared_resource *shared_resource = NULL;
if (qos_channel) {
shared_resource = qos_channel->shared_resource;
mgmt_channel = shared_resource->mgmt_ch;
qos_channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
_spdk_bdev_abort_queued_io(&shared_resource->nomem_io, qos_channel);
_spdk_bdev_abort_queued_io(&bdev->qos->queued, qos_channel);
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_small, qos_channel);
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_large, qos_channel);
}
}
static void
_spdk_bdev_start_reset(void *ctx)
{
@ -2002,12 +1948,6 @@ spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
_spdk_bdev_channel_start_reset(channel);
/* Explicitly handle the QoS bdev channel as no IO channel associated */
if (bdev->qos && bdev->qos->thread) {
spdk_thread_send_msg(bdev->qos->thread,
_spdk_bdev_reset_freeze_qos_channel, bdev);
}
return 0;
}
@ -2256,17 +2196,6 @@ _spdk_bdev_io_complete(void *ctx)
bdev_io->caller_ctx);
}
static void
_spdk_bdev_unfreeze_qos_channel(void *ctx)
{
struct spdk_bdev *bdev = ctx;
if (bdev->qos->ch) {
bdev->qos->ch->flags &= ~BDEV_CH_RESET_IN_PROGRESS;
assert(TAILQ_EMPTY(&bdev->qos->ch->queued_resets));
}
}
static void
_spdk_bdev_reset_complete(struct spdk_io_channel_iter *i, int status)
{
@ -2317,12 +2246,6 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
pthread_mutex_unlock(&bdev->mutex);
if (unlock_channels) {
/* Explicitly handle the QoS bdev channel as no IO channel associated */
if (bdev->qos && bdev->qos->thread) {
spdk_thread_send_msg(bdev->qos->thread,
_spdk_bdev_unfreeze_qos_channel, bdev);
}
spdk_for_each_channel(__bdev_to_io_dev(bdev), _spdk_bdev_unfreeze_channel,
bdev_io, _spdk_bdev_reset_complete);
return;
@ -2777,10 +2700,13 @@ spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_
return -ENOMEM;
}
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Opening descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
spdk_get_thread());
pthread_mutex_lock(&bdev->mutex);
if (write && bdev->claim_module) {
SPDK_INFOLOG(SPDK_LOG_BDEV, "Could not open %s - already claimed\n", bdev->name);
SPDK_ERRLOG("Could not open %s - already claimed\n", bdev->name);
free(desc);
pthread_mutex_unlock(&bdev->mutex);
return -EPERM;
@ -2805,11 +2731,27 @@ spdk_bdev_close(struct spdk_bdev_desc *desc)
struct spdk_bdev *bdev = desc->bdev;
bool do_unregister = false;
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Closing descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
spdk_get_thread());
pthread_mutex_lock(&bdev->mutex);
TAILQ_REMOVE(&bdev->open_descs, desc, link);
free(desc);
/* If no more descriptors, kill QoS channel */
if (bdev->qos && TAILQ_EMPTY(&bdev->open_descs)) {
SPDK_DEBUGLOG(SPDK_LOG_BDEV, "Closed last descriptor for bdev %s on thread %p. Stopping QoS.\n",
bdev->name, spdk_get_thread());
if (spdk_bdev_qos_destroy(bdev)) {
/* There isn't anything we can do to recover here. Just let the
* old QoS poller keep running. The QoS handling won't change
* cores when the user allocates a new channel, but it won't break. */
SPDK_ERRLOG("Unable to shut down QoS poller. It will continue running on the current thread.\n");
}
}
if (bdev->status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->open_descs)) {
do_unregister = true;
}
@ -2983,10 +2925,9 @@ _spdk_bdev_disable_qos_done(void *cb_arg)
pthread_mutex_unlock(&bdev->mutex);
_spdk_bdev_abort_queued_io(&qos->queued, qos->ch);
_spdk_bdev_channel_destroy(qos->ch);
spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
spdk_poller_unregister(&qos->poller);
free(qos->ch);
free(qos);
_spdk_bdev_set_qos_limit_done(ctx, 0);

View File

@ -684,6 +684,7 @@ basic_qos(void)
/* Close the descriptor, which should stop the qos channel */
spdk_bdev_close(g_desc);
poll_threads();
CU_ASSERT(bdev->qos->ch == NULL);
spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
@ -699,8 +700,8 @@ basic_qos(void)
bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
/* Confirm that the qos tracking was re-enabled */
CU_ASSERT(bdev->qos->ch != NULL);
/* Confirm that the qos thread is now thread 1 */
CU_ASSERT(bdev->qos->ch == bdev_ch[1]);
/* Tear down the channels */
set_thread(0);