blob: defer calling unload callback until dev is destroyed
Fixes condition where blobstore was prematurely calling the application callback on spdk_bs_unload(), if the application tries to do something too quickly bad things happen. To avoid application changes with how the g_devlist_mutex is held, it is no longer held while calling _spdk_io_device_attempt_free() because the app unload CB is called from that function and may want to call spdk_io_device_unregister() from its unload CB. So the lock is now held and releases strictly around the list its protecting which allows the CB from _spdk_io_device_attempt_free() to be called without issue. Change-Id: Ib451cfe6b33ea0c3f9e66c86785316f9d88837c7 Signed-off-by: Paul Luse <paul.e.luse@intel.com> Reviewed-on: https://review.gerrithub.io/377872 Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
parent
4affd37f2b
commit
130d278adf
@ -44,7 +44,6 @@
|
|||||||
#include "spdk_internal/log.h"
|
#include "spdk_internal/log.h"
|
||||||
|
|
||||||
#include "blobstore.h"
|
#include "blobstore.h"
|
||||||
#include "request.h"
|
|
||||||
|
|
||||||
#define BLOB_CRC32C_INITIAL 0xffffffffUL
|
#define BLOB_CRC32C_INITIAL 0xffffffffUL
|
||||||
|
|
||||||
@ -1342,6 +1341,12 @@ _spdk_bs_dev_destroy(void *io_device)
|
|||||||
|
|
||||||
spdk_bit_array_free(&bs->used_md_pages);
|
spdk_bit_array_free(&bs->used_md_pages);
|
||||||
spdk_bit_array_free(&bs->used_clusters);
|
spdk_bit_array_free(&bs->used_clusters);
|
||||||
|
/*
|
||||||
|
* If this function is called for any reason except a successful unload,
|
||||||
|
* the unload_cpl type will be NONE and this will be a nop.
|
||||||
|
*/
|
||||||
|
spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err);
|
||||||
|
|
||||||
free(bs);
|
free(bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1833,6 +1838,14 @@ _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
|
|||||||
|
|
||||||
spdk_dma_free(ctx->super);
|
spdk_dma_free(ctx->super);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to defer calling spdk_bs_call_cpl() until after
|
||||||
|
* dev destuction, so tuck these away for later use.
|
||||||
|
*/
|
||||||
|
ctx->bs->unload_err = bserrno;
|
||||||
|
memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
|
||||||
|
seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
|
||||||
|
|
||||||
spdk_bs_sequence_finish(seq, bserrno);
|
spdk_bs_sequence_finish(seq, bserrno);
|
||||||
|
|
||||||
_spdk_bs_free(ctx->bs);
|
_spdk_bs_free(ctx->bs);
|
||||||
|
@ -39,6 +39,8 @@
|
|||||||
#include "spdk/queue.h"
|
#include "spdk/queue.h"
|
||||||
#include "spdk/util.h"
|
#include "spdk/util.h"
|
||||||
|
|
||||||
|
#include "request.h"
|
||||||
|
|
||||||
/* In Memory Data Structures
|
/* In Memory Data Structures
|
||||||
*
|
*
|
||||||
* The following data structures exist only in memory.
|
* The following data structures exist only in memory.
|
||||||
@ -156,6 +158,9 @@ struct spdk_blob_store {
|
|||||||
|
|
||||||
spdk_blob_id super_blob;
|
spdk_blob_id super_blob;
|
||||||
|
|
||||||
|
struct spdk_bs_cpl unload_cpl;
|
||||||
|
int unload_err;
|
||||||
|
|
||||||
TAILQ_HEAD(, spdk_blob) blobs;
|
TAILQ_HEAD(, spdk_blob) blobs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -73,6 +73,9 @@ spdk_bs_call_cpl(struct spdk_bs_cpl *cpl, int bserrno)
|
|||||||
cpl->u.nested_seq.parent,
|
cpl->u.nested_seq.parent,
|
||||||
bserrno);
|
bserrno);
|
||||||
break;
|
break;
|
||||||
|
case SPDK_BS_CPL_TYPE_NONE:
|
||||||
|
/* this completion's callback is handled elsewhere */
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
#include "spdk/blob.h"
|
#include "spdk/blob.h"
|
||||||
|
|
||||||
enum spdk_bs_cpl_type {
|
enum spdk_bs_cpl_type {
|
||||||
|
SPDK_BS_CPL_TYPE_NONE,
|
||||||
SPDK_BS_CPL_TYPE_BS_BASIC,
|
SPDK_BS_CPL_TYPE_BS_BASIC,
|
||||||
SPDK_BS_CPL_TYPE_BS_HANDLE,
|
SPDK_BS_CPL_TYPE_BS_HANDLE,
|
||||||
SPDK_BS_CPL_TYPE_BLOB_BASIC,
|
SPDK_BS_CPL_TYPE_BLOB_BASIC,
|
||||||
|
@ -241,6 +241,7 @@ _spdk_io_device_attempt_free(struct io_device *dev)
|
|||||||
struct spdk_thread *thread;
|
struct spdk_thread *thread;
|
||||||
struct spdk_io_channel *ch;
|
struct spdk_io_channel *ch;
|
||||||
|
|
||||||
|
pthread_mutex_lock(&g_devlist_mutex);
|
||||||
TAILQ_FOREACH(thread, &g_threads, tailq) {
|
TAILQ_FOREACH(thread, &g_threads, tailq) {
|
||||||
TAILQ_FOREACH(ch, &thread->io_channels, tailq) {
|
TAILQ_FOREACH(ch, &thread->io_channels, tailq) {
|
||||||
if (ch->dev == dev) {
|
if (ch->dev == dev) {
|
||||||
@ -248,10 +249,12 @@ _spdk_io_device_attempt_free(struct io_device *dev)
|
|||||||
* device still exists. Defer deletion
|
* device still exists. Defer deletion
|
||||||
* until it is removed.
|
* until it is removed.
|
||||||
*/
|
*/
|
||||||
|
pthread_mutex_unlock(&g_devlist_mutex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pthread_mutex_unlock(&g_devlist_mutex);
|
||||||
|
|
||||||
if (dev->unregister_cb) {
|
if (dev->unregister_cb) {
|
||||||
dev->unregister_cb(dev->io_device);
|
dev->unregister_cb(dev->io_device);
|
||||||
@ -287,9 +290,8 @@ spdk_io_device_unregister(void *io_device, spdk_io_device_unregister_cb unregist
|
|||||||
dev->unregister_cb = unregister_cb;
|
dev->unregister_cb = unregister_cb;
|
||||||
dev->unregistered = true;
|
dev->unregistered = true;
|
||||||
TAILQ_REMOVE(&g_io_devices, dev, tailq);
|
TAILQ_REMOVE(&g_io_devices, dev, tailq);
|
||||||
_spdk_io_device_attempt_free(dev);
|
|
||||||
|
|
||||||
pthread_mutex_unlock(&g_devlist_mutex);
|
pthread_mutex_unlock(&g_devlist_mutex);
|
||||||
|
_spdk_io_device_attempt_free(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct spdk_io_channel *
|
struct spdk_io_channel *
|
||||||
@ -377,15 +379,13 @@ _spdk_put_io_channel(void *arg)
|
|||||||
ch->destroy_cb(ch->dev->io_device, spdk_io_channel_get_ctx(ch));
|
ch->destroy_cb(ch->dev->io_device, spdk_io_channel_get_ctx(ch));
|
||||||
|
|
||||||
pthread_mutex_lock(&g_devlist_mutex);
|
pthread_mutex_lock(&g_devlist_mutex);
|
||||||
|
|
||||||
TAILQ_REMOVE(&ch->thread->io_channels, ch, tailq);
|
TAILQ_REMOVE(&ch->thread->io_channels, ch, tailq);
|
||||||
|
pthread_mutex_unlock(&g_devlist_mutex);
|
||||||
|
|
||||||
if (ch->dev->unregistered) {
|
if (ch->dev->unregistered) {
|
||||||
_spdk_io_device_attempt_free(ch->dev);
|
_spdk_io_device_attempt_free(ch->dev);
|
||||||
}
|
}
|
||||||
free(ch);
|
free(ch);
|
||||||
|
|
||||||
pthread_mutex_unlock(&g_devlist_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -939,12 +939,10 @@ bs_load(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create a blobstore and then unload it, while delaying all scheduled operations
|
* Create a blobstore and then unload it.
|
||||||
* until after spdk_bs_unload call has finished. This ensures the memory associated
|
|
||||||
* with the internal blobstore channels is not touched after it is freed.
|
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
bs_unload_delayed(void)
|
bs_unload(void)
|
||||||
{
|
{
|
||||||
struct spdk_bs_dev *dev;
|
struct spdk_bs_dev *dev;
|
||||||
|
|
||||||
@ -954,17 +952,10 @@ bs_unload_delayed(void)
|
|||||||
CU_ASSERT(g_bserrno == 0);
|
CU_ASSERT(g_bserrno == 0);
|
||||||
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
|
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
|
||||||
|
|
||||||
g_scheduler_delay = true;
|
|
||||||
|
|
||||||
g_bserrno = -1;
|
g_bserrno = -1;
|
||||||
spdk_bs_unload(g_bs, bs_op_complete, NULL);
|
spdk_bs_unload(g_bs, bs_op_complete, NULL);
|
||||||
CU_ASSERT(g_bserrno == 0);
|
CU_ASSERT(g_bserrno == 0);
|
||||||
g_bs = NULL;
|
g_bs = NULL;
|
||||||
|
|
||||||
_bs_flush_scheduler();
|
|
||||||
CU_ASSERT(TAILQ_EMPTY(&g_scheduled_ops));
|
|
||||||
|
|
||||||
g_scheduler_delay = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1295,7 +1286,7 @@ int main(int argc, char **argv)
|
|||||||
CU_add_test(suite, "blob_iter", blob_iter) == NULL ||
|
CU_add_test(suite, "blob_iter", blob_iter) == NULL ||
|
||||||
CU_add_test(suite, "blob_xattr", blob_xattr) == NULL ||
|
CU_add_test(suite, "blob_xattr", blob_xattr) == NULL ||
|
||||||
CU_add_test(suite, "bs_load", bs_load) == NULL ||
|
CU_add_test(suite, "bs_load", bs_load) == NULL ||
|
||||||
CU_add_test(suite, "bs_unload_delayed", bs_unload_delayed) == NULL ||
|
CU_add_test(suite, "bs_unload", bs_unload) == NULL ||
|
||||||
CU_add_test(suite, "bs_cluster_sz", bs_cluster_sz) == NULL ||
|
CU_add_test(suite, "bs_cluster_sz", bs_cluster_sz) == NULL ||
|
||||||
CU_add_test(suite, "bs_resize_md", bs_resize_md) == NULL ||
|
CU_add_test(suite, "bs_resize_md", bs_resize_md) == NULL ||
|
||||||
CU_add_test(suite, "blob_serialize", blob_serialize) == NULL ||
|
CU_add_test(suite, "blob_serialize", blob_serialize) == NULL ||
|
||||||
|
Loading…
Reference in New Issue
Block a user