blob: Add a persistent, power-fail safe block allocator

This is the initial commit for the "blobstore", a lightweight,
highly parallel, persistent, power-fail safe block allocator.

Documentation will be added in future patches.

Change-Id: I20a4daf899f1215d396f7931c3ec9a2e2bb269d0
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ben Walker 2017-03-20 11:26:50 -07:00
parent e2b330e989
commit d89352a95c
13 changed files with 4717 additions and 1 deletions

267
include/spdk/blob.h Normal file
View File

@ -0,0 +1,267 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* Blob Storage System
*
* The blob storage system, or the blobstore for short, is a low level
* library for placing opaque blobs of data onto a storage device such
* that scattered physical blocks on the storage device appear as a
* single, contiguous storage region. These blobs are also persistent,
* which means they are rediscoverable after reboot or power loss.
*
* The blobstore is designed to be very high performance, and thus has
* a few general rules regarding thread safety to avoid taking locks
* in the I/O path. Functions starting with the prefix "spdk_bs_md" must only
* be called from the metadata thread, of which there is only one at a time.
* The user application can declare which thread is the metadata thread by
* calling \ref spdk_bs_register_md_thread, but by default it is the thread
* that was used to create the blobstore initially. The metadata thread can
* be changed at run time by first unregistering
* (\ref spdk_bs_unregister_md_thread) and then re-registering. Registering
* a thread as the metadata thread is expensive and should be avoided.
*
* Functions starting with the prefix "spdk_bs_io" are passed a channel
* as an argument, and channels may only be used from the thread they were
* created on. See \ref spdk_bs_alloc_io_channel.
*
* Functions not starting with one of those two prefixes are thread safe
* and may be called from any thread at any time.
*
* The blob store returns errors using negated POSIX errno values, either
* returned in the callback or as a return value. An errno value of 0 means
* success.
*/
#ifndef SPDK_BLOB_H
#define SPDK_BLOB_H
#include <stddef.h>
#include <stdint.h>
typedef uint64_t spdk_blob_id;
#define SPDK_BLOBID_INVALID (uint64_t)-1
struct spdk_blob_store;
struct spdk_io_channel;
struct spdk_blob;
struct spdk_xattr_names;
typedef void (*spdk_bs_op_complete)(void *cb_arg, int bserrno);
typedef void (*spdk_bs_op_with_handle_complete)(void *cb_arg, struct spdk_blob_store *bs,
int bserrno);
typedef void (*spdk_blob_op_complete)(void *cb_arg, int bserrno);
typedef void (*spdk_blob_op_with_id_complete)(void *cb_arg, spdk_blob_id blobid, int bserrno);
typedef void (*spdk_blob_op_with_handle_complete)(void *cb_arg, struct spdk_blob *blb, int bserrno);
/* Calls to function pointers of this type must obey all of the normal
rules for channels. The channel passed to this completion must match
the channel the operation was initiated on. */
typedef void (*spdk_bs_dev_cpl)(struct spdk_io_channel *channel,
void *cb_arg, int bserrno);
struct spdk_bs_dev_cb_args {
spdk_bs_dev_cpl cb_fn;
struct spdk_io_channel *channel;
void *cb_arg;
/*
* Blobstore device implementations can use this for scratch space for any data
* structures needed to translate the function arguments to the required format
* for the backing store.
*/
uint8_t scratch[32];
};
struct spdk_bs_dev {
/* Create a new channel which is a software construct that is used
* to submit I/O. */
struct spdk_io_channel *(*create_channel)(struct spdk_bs_dev *dev);
/* Destroy a previously created channel */
void (*destroy_channel)(struct spdk_bs_dev *dev, struct spdk_io_channel *channel);
/* Destroy this blobstore device. Applications must not destroy the blobstore device,
* rather the blobstore will destroy it using this function pointer once all
* references to it during unload callback context have been completed.
*/
void (*destroy)(struct spdk_bs_dev *dev);
void (*read)(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args);
void (*write)(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args);
void (*flush)(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct spdk_bs_dev_cb_args *cb_args);
void (*unmap)(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args);
uint64_t blockcnt;
uint32_t blocklen; /* In bytes */
};
struct spdk_bs_opts {
uint32_t cluster_sz; /* In bytes. Must be multiple of page size. */
uint32_t num_md_pages; /* Count of the number of pages reserved for metadata */
uint32_t max_md_ops; /* Maximum simultaneous metadata operations */
};
/* Initialize an spdk_bs_opts structure to the default blobstore option values. */
void spdk_bs_opts_init(struct spdk_bs_opts *opts);
/* Load a blob store from the given device. This will fail (return NULL) if no blob store is present. */
void spdk_bs_load(struct spdk_bs_dev *dev,
spdk_bs_op_with_handle_complete cb_fn, void *cb_arg);
/* Initialize a blob store on the given disk. Destroys all data present on the device. */
void spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts,
spdk_bs_op_with_handle_complete cb_fn, void *cb_arg);
/* Flush all volatile data to disk and destroy in-memory structures. */
void spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg);
/* Set the given blob as the super blob. This will be retrievable immediately after an
* spdk_bs_load on the next initialization.
*/
void spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
spdk_bs_op_complete cb_fn, void *cb_arg);
/* Open the super blob. */
void spdk_bs_get_super(struct spdk_blob_store *bs,
spdk_blob_op_with_id_complete cb_fn, void *cb_arg);
/* Get the cluster size in bytes. Used in the extend operation. */
uint64_t spdk_bs_get_cluster_size(struct spdk_blob_store *bs);
/* Get the page size in bytes. This is the write and read granularity of blobs. */
uint64_t spdk_bs_get_page_size(struct spdk_blob_store *bs);
/* Get the number of free clusters. */
uint64_t spdk_bs_free_cluster_count(struct spdk_blob_store *bs);
/* Register the current thread as the metadata thread. All functions beginning with
* the prefix "spdk_bs_md" must be called only from this thread.
*/
int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
/* Unregister the current thread as the metadata thread. This allows a different
* thread to be registered.
*/
int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
/* Return the blobid */
spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob);
/* Return the number of pages allocated to the blob */
uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob);
/* Return the number of clusters allocated to the blob */
uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob);
/* Create a new blob with initial size of 'sz' clusters. */
void spdk_bs_md_create_blob(struct spdk_blob_store *bs,
spdk_blob_op_with_id_complete cb_fn, void *cb_arg);
/* Delete an existing blob. */
void spdk_bs_md_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
spdk_blob_op_complete cb_fn, void *cb_arg);
/* Open a blob */
void spdk_bs_md_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
spdk_blob_op_with_handle_complete cb_fn, void *cb_arg);
/* Resize a blob to 'sz' clusters.
*
* These changes are not persisted to disk until
* spdk_bs_md_sync_blob() is called. */
int spdk_bs_md_resize_blob(struct spdk_blob *blob, size_t sz);
/* Sync a blob */
/* Make a blob persistent. This applies to open, resize, set xattr,
* and remove xattr. These operations will not be persistent until
* the blob has been synced.
*
* I/O operations (read/write) are synced independently. See
* spdk_bs_io_flush_channel().
*/
void spdk_bs_md_sync_blob(struct spdk_blob *blob,
spdk_blob_op_complete cb_fn, void *cb_arg);
/* Close a blob. This will automatically sync. */
void spdk_bs_md_close_blob(struct spdk_blob **blob, spdk_blob_op_complete cb_fn, void *cb_arg);
struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs,
uint32_t priority, uint32_t max_ops);
void spdk_bs_free_io_channel(struct spdk_io_channel *channel);
/* Force all previously completed operations on this channel to be persistent. */
void spdk_bs_io_flush_channel(struct spdk_io_channel *channel,
spdk_blob_op_complete cb_fn, void *cb_arg);
/* Write data to a blob. Offset is in pages from the beginning of the blob. */
void spdk_bs_io_write_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
void *payload, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg);
/* Read data from a blob. Offset is in pages from the beginning of the blob. */
void spdk_bs_io_read_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
void *payload, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg);
/* Iterate through all blobs */
void spdk_bs_md_iter_first(struct spdk_blob_store *bs,
spdk_blob_op_with_handle_complete cb_fn, void *cb_arg);
void spdk_bs_md_iter_next(struct spdk_blob_store *bs, struct spdk_blob **blob,
spdk_blob_op_with_handle_complete cb_fn, void *cb_arg);
int spdk_blob_md_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
uint16_t value_len);
int spdk_blob_md_remove_xattr(struct spdk_blob *blob, const char *name);
int spdk_bs_md_get_xattr_value(struct spdk_blob *blob, const char *name,
const void **value, size_t *value_len);
int spdk_bs_md_get_xattr_names(struct spdk_blob *blob,
struct spdk_xattr_names **names);
uint32_t spdk_xattr_names_get_count(struct spdk_xattr_names *names);
const char *spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index);
void spdk_xattr_names_free(struct spdk_xattr_names *names);
#endif /* SPDK_BLOB_H_ */

41
lib/blob/Makefile Normal file
View File

@ -0,0 +1,41 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
CFLAGS += $(ENV_CFLAGS)
C_SRCS = blobstore.c request.c
LIBNAME = blob
include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk

2335
lib/blob/blobstore.c Normal file

File diff suppressed because it is too large Load Diff

370
lib/blob/blobstore.h Normal file
View File

@ -0,0 +1,370 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SPDK_BLOBSTORE_H
#define SPDK_BLOBSTORE_H
#include "spdk/assert.h"
#include "spdk/blob.h"
#include "spdk/queue.h"
#include "spdk/util.h"
/* In Memory Data Structures
*
* The following data structures exist only in memory.
*/
#define SPDK_BLOB_OPTS_CLUSTER_SZ (1024 * 1024)
#define SPDK_BLOB_OPTS_NUM_MD_PAGES UINT32_MAX
#define SPDK_BLOB_OPTS_MAX_MD_OPS 32
struct spdk_xattr {
/* TODO: reorder for best packing */
uint32_t index;
char *name;
void *value;
uint16_t value_len;
TAILQ_ENTRY(spdk_xattr) link;
};
/* The mutable part of the blob data that is sync'd to
* disk. The data in here is both mutable and persistent.
*/
struct spdk_blob_mut_data {
/* Number of data clusters in the blob */
uint64_t num_clusters;
/* Array LBAs that are the beginning of a cluster, in
* the order they appear in the blob.
*/
uint64_t *clusters;
/* The size of the clusters array. This is greater than or
* equal to 'num_clusters'.
*/
size_t cluster_array_size;
/* Number of metadata pages */
uint32_t num_pages;
/* Array of page offsets into the metadata region, in
* the order of the metadata page sequence.
*/
uint32_t *pages;
};
enum spdk_blob_state {
/* The blob in-memory version does not match the on-disk
* version.
*/
SPDK_BLOB_STATE_DIRTY,
/* The blob in memory version of the blob matches the on disk
* version.
*/
SPDK_BLOB_STATE_CLEAN,
/* The in-memory state being synchronized with the on-disk
* blob state. */
SPDK_BLOB_STATE_LOADING,
/* The disk state is being synchronized with the current
* blob state.
*/
SPDK_BLOB_STATE_SYNCING,
};
struct spdk_blob {
struct spdk_blob_store *bs;
uint32_t open_ref;
spdk_blob_id id;
enum spdk_blob_state state;
/* Two copies of the mutable data. One is a version
* that matches the last known data on disk (clean).
* The other (active) is the current data. Syncing
* a blob makes the clean match the active.
*/
struct spdk_blob_mut_data clean;
struct spdk_blob_mut_data active;
/* TODO: The xattrs are mutable, but we don't want to be
* copying them unecessarily. Figure this out.
*/
TAILQ_HEAD(, spdk_xattr) xattrs;
TAILQ_ENTRY(spdk_blob) link;
};
struct spdk_blob_store {
uint64_t md_start; /* Offset from beginning of disk, in pages */
uint32_t md_len; /* Count, in pages */
struct spdk_io_channel *md_channel;
struct spdk_bs_dev *dev;
struct spdk_bit_array *used_md_pages;
struct spdk_bit_array *used_clusters;
uint32_t cluster_sz;
uint64_t total_clusters;
uint64_t num_free_clusters;
uint32_t pages_per_cluster;
uint32_t max_md_ops;
spdk_blob_id super_blob;
TAILQ_HEAD(, spdk_blob) blobs;
};
struct spdk_bs_channel {
struct spdk_bs_request_set *req_mem;
TAILQ_HEAD(, spdk_bs_request_set) reqs;
struct spdk_blob_store *bs;
struct spdk_bs_dev *dev;
struct spdk_io_channel *dev_channel;
};
/* On-Disk Data Structures
*
* The following data structures exist on disk.
*/
#define SPDK_BS_VERSION 1
#pragma pack(push, 1)
#define SPDK_MD_MASK_TYPE_USED_PAGES 0
#define SPDK_MD_MASK_TYPE_USED_CLUSTERS 1
struct spdk_bs_md_mask {
uint8_t type;
uint32_t length; /* In bits */
uint8_t mask[0];
};
#define SPDK_MD_DESCRIPTOR_TYPE_PADDING 0
#define SPDK_MD_DESCRIPTOR_TYPE_EXTENT 1
#define SPDK_MD_DESCRIPTOR_TYPE_XATTR 2
struct spdk_blob_md_descriptor_xattr {
uint8_t type;
uint32_t length;
uint16_t name_length;
uint16_t value_length;
char name[0];
/* String name immediately followed by string value. */
};
struct spdk_blob_md_descriptor_extent {
uint8_t type;
uint32_t length;
struct {
uint32_t cluster_idx;
uint32_t length; /* In units of clusters */
} extents[0];
};
struct spdk_blob_md_descriptor {
uint8_t type;
uint32_t length;
};
#define SPDK_INVALID_MD_PAGE UINT32_MAX
struct spdk_blob_md_page {
spdk_blob_id id;
uint32_t sequence_num;
uint32_t reserved0;
/* Descriptors here */
uint64_t descriptors[509];
uint32_t next;
uint32_t crc;
};
SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_md_page) == 0x1000, "Invalid md page size");
#define SPDK_BS_SUPER_BLOCK_SIG "SPDKBLOB"
struct spdk_bs_super_block {
uint8_t signature[8];
uint32_t version;
uint32_t length;
uint32_t clean; /* If there was a clean shutdown, this is 1. */
spdk_blob_id super_blob;
uint32_t cluster_size; /* In bytes */
uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */
uint32_t used_page_mask_len; /* Count, in pages */
uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */
uint32_t used_cluster_mask_len; /* Count, in pages */
uint32_t md_start; /* Offset from beginning of disk, in pages */
uint32_t md_len; /* Count, in pages */
uint8_t reserved[4040];
};
SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block) == 0x1000, "Invalid super block size");
#pragma pack(pop)
/* Unit Conversions
*
* The blobstore works with several different units:
* - Byte: Self explanatory
* - LBA: The logical blocks on the backing storage device.
* - Page: The read/write units of blobs and metadata. This is
* an offset into a blob in units of 4KiB.
* - Cluster Index: The disk is broken into a sequential list of
* clusters. This is the offset from the beginning.
*
* NOTE: These conversions all act on simple magnitudes, not with any sort
* of knowledge about the blobs themselves. For instance, converting
* a page to an lba with the conversion function below simply converts
* a number of pages to an equivalent number of lbas, but that
* lba certainly isn't the right lba that corresponds to a page offset
* for a particular blob.
*/
static inline uint64_t
_spdk_bs_byte_to_lba(struct spdk_blob_store *bs, uint64_t length)
{
assert(length % bs->dev->blocklen == 0);
return length / bs->dev->blocklen;
}
static inline uint64_t
_spdk_bs_lba_to_byte(struct spdk_blob_store *bs, uint64_t lba)
{
return lba * bs->dev->blocklen;
}
static inline uint64_t
_spdk_bs_page_to_lba(struct spdk_blob_store *bs, uint64_t page)
{
return page * sizeof(struct spdk_blob_md_page) / bs->dev->blocklen;
}
static inline uint32_t
_spdk_bs_lba_to_page(struct spdk_blob_store *bs, uint64_t lba)
{
uint64_t lbas_per_page;
lbas_per_page = sizeof(struct spdk_blob_md_page) / bs->dev->blocklen;
assert(lba % lbas_per_page == 0);
return lba / lbas_per_page;
}
static inline uint64_t
_spdk_bs_cluster_to_page(struct spdk_blob_store *bs, uint32_t cluster)
{
return cluster * bs->pages_per_cluster;
}
static inline uint32_t
_spdk_bs_page_to_cluster(struct spdk_blob_store *bs, uint64_t page)
{
assert(page % bs->pages_per_cluster == 0);
return page / bs->pages_per_cluster;
}
static inline uint64_t
_spdk_bs_cluster_to_lba(struct spdk_blob_store *bs, uint32_t cluster)
{
return cluster * (bs->cluster_sz / bs->dev->blocklen);
}
static inline uint32_t
_spdk_bs_lba_to_cluster(struct spdk_blob_store *bs, uint64_t lba)
{
assert(lba % (bs->cluster_sz / bs->dev->blocklen) == 0);
return lba / (bs->cluster_sz / bs->dev->blocklen);
}
/* End basic conversions */
static inline uint32_t
_spdk_bs_blobid_to_page(spdk_blob_id id)
{
return id & 0xFFFFFFFF;
}
/* Given a page offset into a blob, look up the LBA for the
* start of that page.
*/
static inline uint64_t
_spdk_bs_blob_page_to_lba(struct spdk_blob *blob, uint32_t page)
{
uint64_t lba;
uint32_t pages_per_cluster;
pages_per_cluster = blob->bs->pages_per_cluster;
assert(page < blob->active.num_clusters * pages_per_cluster);
lba = blob->active.clusters[page / pages_per_cluster];
lba += _spdk_bs_page_to_lba(blob->bs, page % pages_per_cluster);
return lba;
}
/* Given a page offset into a blob, look up the number of pages until the
* next cluster boundary.
*/
static inline uint32_t
_spdk_bs_num_pages_to_cluster_boundary(struct spdk_blob *blob, uint32_t page)
{
uint32_t pages_per_cluster;
pages_per_cluster = blob->bs->pages_per_cluster;
return pages_per_cluster - (page % pages_per_cluster);
}
#endif

342
lib/blob/request.c Normal file
View File

@ -0,0 +1,342 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "blobstore.h"
#include "request.h"
#include "spdk/io_channel.h"
#include "spdk/queue.h"
#include "spdk_internal/log.h"
void
spdk_bs_call_cpl(struct spdk_bs_cpl *cpl, int bserrno)
{
switch (cpl->type) {
case SPDK_BS_CPL_TYPE_BS_BASIC:
cpl->u.bs_basic.cb_fn(cpl->u.bs_basic.cb_arg,
bserrno);
break;
case SPDK_BS_CPL_TYPE_BS_HANDLE:
cpl->u.bs_handle.cb_fn(cpl->u.bs_handle.cb_arg,
cpl->u.bs_handle.bs,
bserrno);
break;
case SPDK_BS_CPL_TYPE_BLOB_BASIC:
cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg,
bserrno);
break;
case SPDK_BS_CPL_TYPE_BLOBID:
cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg,
cpl->u.blobid.blobid,
bserrno);
break;
case SPDK_BS_CPL_TYPE_BLOB_HANDLE:
cpl->u.blob_handle.cb_fn(cpl->u.blob_handle.cb_arg,
cpl->u.blob_handle.blob,
bserrno);
break;
case SPDK_BS_CPL_TYPE_NESTED_SEQUENCE:
cpl->u.nested_seq.cb_fn(cpl->u.nested_seq.cb_arg,
cpl->u.nested_seq.parent,
bserrno);
break;
}
}
static void
spdk_bs_request_set_complete(struct spdk_bs_request_set *set)
{
struct spdk_bs_cpl cpl = set->cpl;
int bserrno = set->bserrno;
TAILQ_INSERT_TAIL(&set->channel->reqs, set, link);
spdk_bs_call_cpl(&cpl, bserrno);
}
static void
spdk_bs_sequence_completion(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
{
struct spdk_bs_request_set *set = cb_arg;
set->bserrno = bserrno;
set->u.sequence.cb_fn((spdk_bs_sequence_t *)set, set->u.sequence.cb_arg, bserrno);
}
spdk_bs_sequence_t *
spdk_bs_sequence_start(struct spdk_io_channel *_channel,
struct spdk_bs_cpl *cpl)
{
struct spdk_bs_channel *channel;
struct spdk_bs_request_set *set;
channel = spdk_io_channel_get_ctx(_channel);
set = TAILQ_FIRST(&channel->reqs);
if (!set) {
return NULL;
}
TAILQ_REMOVE(&channel->reqs, set, link);
set->cpl = *cpl;
set->bserrno = 0;
set->channel = channel;
set->cb_args.cb_fn = spdk_bs_sequence_completion;
set->cb_args.cb_arg = set;
set->cb_args.channel = channel->dev_channel;
return (spdk_bs_sequence_t *)set;
}
void
spdk_bs_sequence_read(spdk_bs_sequence_t *seq, void *payload,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Reading %u blocks from LBA %lu\n", lba_count, lba);
set->u.sequence.cb_fn = cb_fn;
set->u.sequence.cb_arg = cb_arg;
channel->dev->read(channel->dev, channel->dev_channel, payload, lba, lba_count,
&set->cb_args);
}
void
spdk_bs_sequence_write(spdk_bs_sequence_t *seq, void *payload,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Writing %u blocks to LBA %lu\n", lba_count, lba);
set->u.sequence.cb_fn = cb_fn;
set->u.sequence.cb_arg = cb_arg;
channel->dev->write(channel->dev, channel->dev_channel, payload, lba, lba_count,
&set->cb_args);
}
void
spdk_bs_sequence_flush(spdk_bs_sequence_t *seq,
spdk_bs_sequence_cpl cb_fn, void *cb_arg)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Flushing\n");
set->u.sequence.cb_fn = cb_fn;
set->u.sequence.cb_arg = cb_arg;
channel->dev->flush(channel->dev, channel->dev_channel,
&set->cb_args);
}
void
spdk_bs_sequence_unmap(spdk_bs_sequence_t *seq,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Unmapping %u blocks at LBA %lu\n", lba_count, lba);
set->u.sequence.cb_fn = cb_fn;
set->u.sequence.cb_arg = cb_arg;
channel->dev->unmap(channel->dev, channel->dev_channel, lba, lba_count,
&set->cb_args);
}
void
spdk_bs_sequence_finish(spdk_bs_sequence_t *seq, int bserrno)
{
if (bserrno != 0) {
seq->bserrno = bserrno;
}
spdk_bs_request_set_complete((struct spdk_bs_request_set *)seq);
}
static void
spdk_bs_batch_completion(struct spdk_io_channel *_channel,
void *cb_arg, int bserrno)
{
struct spdk_bs_request_set *set = cb_arg;
set->u.batch.outstanding_ops--;
if (bserrno != 0) {
set->bserrno = bserrno;
}
if (set->u.batch.outstanding_ops == 0 && set->u.batch.batch_closed) {
if (set->u.batch.cb_fn) {
set->cb_args.cb_fn = spdk_bs_sequence_completion;
set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, bserrno);
} else {
spdk_bs_request_set_complete(set);
}
}
}
spdk_bs_batch_t *
spdk_bs_batch_open(struct spdk_io_channel *_channel,
struct spdk_bs_cpl *cpl)
{
struct spdk_bs_channel *channel;
struct spdk_bs_request_set *set;
channel = spdk_io_channel_get_ctx(_channel);
set = TAILQ_FIRST(&channel->reqs);
if (!set) {
return NULL;
}
TAILQ_REMOVE(&channel->reqs, set, link);
set->cpl = *cpl;
set->bserrno = 0;
set->channel = channel;
set->u.batch.cb_fn = NULL;
set->u.batch.cb_arg = NULL;
set->u.batch.outstanding_ops = 0;
set->u.batch.batch_closed = 0;
set->cb_args.cb_fn = spdk_bs_batch_completion;
set->cb_args.cb_arg = set;
set->cb_args.channel = channel->dev_channel;
return (spdk_bs_batch_t *)set;
}
void
spdk_bs_batch_read(spdk_bs_batch_t *batch, void *payload,
uint64_t lba, uint32_t lba_count)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Reading %u blocks from LBA %lu\n", lba_count, lba);
set->u.batch.outstanding_ops++;
channel->dev->read(channel->dev, channel->dev_channel, payload, lba, lba_count,
&set->cb_args);
}
void
spdk_bs_batch_write(spdk_bs_batch_t *batch, void *payload,
uint64_t lba, uint32_t lba_count)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Writing %u blocks to LBA %lu\n", lba_count, lba);
set->u.batch.outstanding_ops++;
channel->dev->write(channel->dev, channel->dev_channel, payload, lba, lba_count,
&set->cb_args);
}
void
spdk_bs_batch_flush(spdk_bs_batch_t *batch)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Flushing\n");
set->u.batch.outstanding_ops++;
channel->dev->flush(channel->dev, channel->dev_channel,
&set->cb_args);
}
void
spdk_bs_batch_unmap(spdk_bs_batch_t *batch,
uint64_t lba, uint32_t lba_count)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Unmapping %u blocks at LBA %lu\n", lba_count, lba);
set->u.batch.outstanding_ops++;
channel->dev->unmap(channel->dev, channel->dev_channel, lba, lba_count,
&set->cb_args);
}
void
spdk_bs_batch_close(spdk_bs_batch_t *batch)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)batch;
set->u.batch.batch_closed = 1;
if (set->u.batch.outstanding_ops == 0) {
if (set->u.batch.cb_fn) {
set->cb_args.cb_fn = spdk_bs_sequence_completion;
set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, set->bserrno);
} else {
spdk_bs_request_set_complete(set);
}
}
}
spdk_bs_batch_t *
spdk_bs_sequence_to_batch(spdk_bs_sequence_t *seq, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq;
set->u.batch.cb_fn = cb_fn;
set->u.batch.cb_arg = cb_arg;
set->u.batch.outstanding_ops = 0;
set->u.batch.batch_closed = 0;
set->cb_args.cb_fn = spdk_bs_batch_completion;
return set;
}
SPDK_LOG_REGISTER_TRACE_FLAG("blob_rw", SPDK_TRACE_BLOB_RW);

171
lib/blob/request.h Normal file
View File

@ -0,0 +1,171 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SPDK_BS_REQUEST_H
#define SPDK_BS_REQUEST_H
#include <stdint.h>
#include "spdk/blob.h"
enum spdk_bs_cpl_type {
SPDK_BS_CPL_TYPE_BS_BASIC,
SPDK_BS_CPL_TYPE_BS_HANDLE,
SPDK_BS_CPL_TYPE_BLOB_BASIC,
SPDK_BS_CPL_TYPE_BLOBID,
SPDK_BS_CPL_TYPE_BLOB_HANDLE,
SPDK_BS_CPL_TYPE_NESTED_SEQUENCE,
};
struct spdk_bs_request_set;
/* Use a sequence to submit a set of requests serially */
typedef struct spdk_bs_request_set spdk_bs_sequence_t;
/* Use a batch to submit a set of requests in parallel */
typedef struct spdk_bs_request_set spdk_bs_batch_t;
typedef void (*spdk_bs_nested_seq_complete)(void *cb_arg, spdk_bs_sequence_t *parent, int bserrno);
struct spdk_bs_cpl {
enum spdk_bs_cpl_type type;
union {
struct {
spdk_bs_op_complete cb_fn;
void *cb_arg;
} bs_basic;
struct {
spdk_bs_op_with_handle_complete cb_fn;
void *cb_arg;
struct spdk_blob_store *bs;
} bs_handle;
struct {
spdk_blob_op_complete cb_fn;
void *cb_arg;
} blob_basic;
struct {
spdk_blob_op_with_id_complete cb_fn;
void *cb_arg;
spdk_blob_id blobid;
} blobid;
struct {
spdk_blob_op_with_handle_complete cb_fn;
void *cb_arg;
struct spdk_blob *blob;
} blob_handle;
struct {
spdk_bs_nested_seq_complete cb_fn;
void *cb_arg;
spdk_bs_sequence_t *parent;
} nested_seq;
} u;
};
typedef void (*spdk_bs_sequence_cpl)(spdk_bs_sequence_t *sequence,
void *cb_arg, int bserrno);
/* A generic request set. Can be a sequence or a batch. */
struct spdk_bs_request_set {
struct spdk_bs_cpl cpl;
int bserrno;
struct spdk_bs_channel *channel;
struct spdk_bs_dev_cb_args cb_args;
union {
struct {
spdk_bs_sequence_cpl cb_fn;
void *cb_arg;
} sequence;
struct {
uint32_t outstanding_ops;
uint32_t batch_closed;
spdk_bs_sequence_cpl cb_fn;
void *cb_arg;
} batch;
} u;
TAILQ_ENTRY(spdk_bs_request_set) link;
};
void spdk_bs_call_cpl(struct spdk_bs_cpl *cpl, int bserrno);
spdk_bs_sequence_t *spdk_bs_sequence_start(struct spdk_io_channel *channel,
struct spdk_bs_cpl *cpl);
void spdk_bs_sequence_read(spdk_bs_sequence_t *seq, void *payload,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg);
void spdk_bs_sequence_write(spdk_bs_sequence_t *seq, void *payload,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg);
void spdk_bs_sequence_flush(spdk_bs_sequence_t *seq,
spdk_bs_sequence_cpl cb_fn, void *cb_arg);
void spdk_bs_sequence_unmap(spdk_bs_sequence_t *seq,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg);
void spdk_bs_sequence_finish(spdk_bs_sequence_t *seq, int bserrno);
spdk_bs_batch_t *spdk_bs_batch_open(struct spdk_io_channel *channel,
struct spdk_bs_cpl *cpl);
void spdk_bs_batch_read(spdk_bs_batch_t *batch, void *payload,
uint64_t lba, uint32_t lba_count);
void spdk_bs_batch_write(spdk_bs_batch_t *batch, void *payload,
uint64_t lba, uint32_t lba_count);
void spdk_bs_batch_flush(spdk_bs_batch_t *batch);
void spdk_bs_batch_unmap(spdk_bs_batch_t *batch,
uint64_t lba, uint32_t lba_count);
void spdk_bs_batch_close(spdk_bs_batch_t *batch);
spdk_bs_batch_t *spdk_bs_sequence_to_batch(spdk_bs_sequence_t *seq,
spdk_bs_sequence_cpl cb_fn,
void *cb_arg);
#endif

View File

@ -34,7 +34,7 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = bdev env event log json jsonrpc nvme nvmf scsi ioat util
DIRS-y = bdev blob env event log json jsonrpc nvme nvmf scsi ioat util
ifeq ($(OS),Linux)
DIRS-y += iscsi
endif

43
test/lib/blob/Makefile Normal file
View File

@ -0,0 +1,43 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = blob_ut
.PHONY: all clean $(DIRS-y)
all: $(DIRS-y)
clean: $(DIRS-y)
include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk

1
test/lib/blob/blob_ut/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
blob_ut

View File

@ -0,0 +1,55 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
APP = blob_ut
C_SRCS := blob_ut.c
CFLAGS += -I$(SPDK_ROOT_DIR)/lib/blob -I$(SPDK_ROOT_DIR)/test
SPDK_LIB_LIST = util log
LIBS += $(SPDK_LIB_LINKER_ARGS) -lcunit
all : $(APP)
$(APP) : $(OBJS) $(SPDK_LIB_FILES)
$(LINK_C)
clean :
$(CLEAN_C) $(APP)
include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk

View File

@ -0,0 +1,973 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <stdio.h>
#include "spdk_cunit.h"
#include "spdk/blob.h"
#include "lib/test_env.c"
#include "../bs_dev_common.c"
#include "blobstore.c"
#include "request.c"
struct spdk_blob_store *g_bs;
spdk_blob_id g_blobid;
struct spdk_blob *g_blob;
int g_bserrno;
struct spdk_xattr_names *g_names;
int g_done;
static void
bs_op_complete(void *cb_arg, int bserrno)
{
g_bserrno = bserrno;
}
static void
bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
int bserrno)
{
g_bs = bs;
g_bserrno = bserrno;
}
static void
blob_op_complete(void *cb_arg, int bserrno)
{
g_bserrno = bserrno;
}
static void
blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
{
g_blobid = blobid;
g_bserrno = bserrno;
}
static void
blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
{
g_blob = blb;
g_bserrno = bserrno;
}
static void
blob_init(void)
{
struct spdk_bs_dev dev;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_super(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
spdk_blob_id blobid;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
/* Get the super blob without having set one */
spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == -ENOENT);
CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
/* Create a blob */
spdk_bs_md_create_blob(bs,
blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
/* Set the blob as the super blob */
spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Get the super blob */
spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(blobid == g_blobid);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_open(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_blob *blob;
spdk_blob_id blobid, blobid2;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
blobid2 = spdk_blob_get_id(blob);
CU_ASSERT(blobid == blobid2);
/* Try to open file again. It should return success. */
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(blob == g_blob);
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(blob == NULL);
/*
* Close the file a second time, releasing the second reference. This
* should succeed.
*/
blob = g_blob;
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/*
* Try to open file again. It should succeed. This tests the case
* where the file is opened, closed, then re-opened again.
*/
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_delete(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
spdk_blob_id blobid;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
/* Create a blob and then delete it. */
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid > 0);
blobid = g_blobid;
spdk_bs_md_delete_blob(bs, blobid, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Try to open the blob */
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == -ENOENT);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_resize(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_blob *blob;
spdk_blob_id blobid;
uint64_t free_clusters;
int rc;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
free_clusters = spdk_bs_free_cluster_count(bs);
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
blobid = g_blobid;
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
/* The blob started at 0 clusters. Resize it to be 5. */
rc = spdk_bs_md_resize_blob(blob, 5);
CU_ASSERT(rc == 0);
CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
/* Shrink the blob to 3 clusters. This will not actually release
* the old clusters until the blob is synced.
*/
rc = spdk_bs_md_resize_blob(blob, 3);
CU_ASSERT(rc == 0);
/* Verify there are still 5 clusters in use */
CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
spdk_bs_md_sync_blob(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Now there are only 3 clusters in use */
CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
rc = spdk_bs_md_resize_blob(blob, 10);
CU_ASSERT(rc == 0);
CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
spdk_bs_md_delete_blob(bs, blobid, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
channel_ops(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_io_channel *channel;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
channel = spdk_bs_alloc_io_channel(bs, SPDK_IO_PRIORITY_DEFAULT, 32);
CU_ASSERT(channel != NULL);
spdk_bs_free_io_channel(channel);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_write(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_blob *blob;
struct spdk_io_channel *channel;
spdk_blob_id blobid;
uint64_t pages_per_cluster;
uint8_t payload[10 * 4096];
int rc;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
channel = spdk_bs_alloc_io_channel(bs, SPDK_IO_PRIORITY_DEFAULT, 32);
CU_ASSERT(channel != NULL);
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
/* Write to a blob with 0 size */
spdk_bs_io_write_blob(blob, channel, payload, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == -EINVAL);
/* Resize the blob */
rc = spdk_bs_md_resize_blob(blob, 5);
CU_ASSERT(rc == 0);
/* Write to the blob */
spdk_bs_io_write_blob(blob, channel, payload, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Write starting beyond the end */
spdk_bs_io_write_blob(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
NULL);
CU_ASSERT(g_bserrno == -EINVAL);
/* Write starting at a valid location but going off the end */
spdk_bs_io_write_blob(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
blob_op_complete, NULL);
CU_ASSERT(g_bserrno == -EINVAL);
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
spdk_bs_free_io_channel(channel);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_read(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_blob *blob;
struct spdk_io_channel *channel;
spdk_blob_id blobid;
uint64_t pages_per_cluster;
uint8_t payload[10 * 4096];
int rc;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
channel = spdk_bs_alloc_io_channel(bs, SPDK_IO_PRIORITY_DEFAULT, 32);
CU_ASSERT(channel != NULL);
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
/* Read from a blob with 0 size */
spdk_bs_io_read_blob(blob, channel, payload, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == -EINVAL);
/* Resize the blob */
rc = spdk_bs_md_resize_blob(blob, 5);
CU_ASSERT(rc == 0);
/* Read from the blob */
spdk_bs_io_read_blob(blob, channel, payload, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Read starting beyond the end */
spdk_bs_io_read_blob(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
NULL);
CU_ASSERT(g_bserrno == -EINVAL);
/* Read starting at a valid location but going off the end */
spdk_bs_io_read_blob(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
blob_op_complete, NULL);
CU_ASSERT(g_bserrno == -EINVAL);
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
spdk_bs_free_io_channel(channel);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_rw_verify(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_blob *blob;
struct spdk_io_channel *channel;
spdk_blob_id blobid;
uint8_t payload_read[10 * 4096];
uint8_t payload_write[10 * 4096];
int rc;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
channel = spdk_bs_alloc_io_channel(bs, SPDK_IO_PRIORITY_DEFAULT, 32);
CU_ASSERT(channel != NULL);
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
rc = spdk_bs_md_resize_blob(blob, 32);
CU_ASSERT(rc == 0);
memset(payload_write, 0xE5, sizeof(payload_write));
spdk_bs_io_write_blob(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
memset(payload_read, 0x00, sizeof(payload_read));
spdk_bs_io_read_blob(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
spdk_bs_free_io_channel(channel);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_iter(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_blob *blob;
spdk_blob_id blobid;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
spdk_bs_md_iter_first(bs, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_blob == NULL);
CU_ASSERT(g_bserrno == -ENOENT);
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_md_iter_first(bs, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_blob != NULL);
CU_ASSERT(g_bserrno == 0);
blob = g_blob;
CU_ASSERT(spdk_blob_get_id(blob) == blobid);
spdk_bs_md_iter_next(bs, &blob, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_blob == NULL);
CU_ASSERT(g_bserrno == -ENOENT);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_xattr(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_blob *blob;
spdk_blob_id blobid;
uint64_t length;
int rc;
const void *value;
size_t value_len;
struct spdk_xattr_names *names;
init_dev(&dev);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
rc = spdk_blob_md_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
CU_ASSERT(rc == 0);
length = 2345;
rc = spdk_blob_md_set_xattr(blob, "length", &length, sizeof(length));
CU_ASSERT(rc == 0);
/* Overwrite "length" xattr. */
length = 3456;
rc = spdk_blob_md_set_xattr(blob, "length", &length, sizeof(length));
CU_ASSERT(rc == 0);
value = NULL;
rc = spdk_bs_md_get_xattr_value(blob, "length", &value, &value_len);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(value != NULL);
CU_ASSERT(*(uint64_t *)value == length);
CU_ASSERT(value_len == 8);
rc = spdk_bs_md_get_xattr_value(blob, "foobar", &value, &value_len);
CU_ASSERT(rc == -ENOENT);
names = NULL;
rc = spdk_bs_md_get_xattr_names(blob, &names);
CU_ASSERT(rc == 0);
CU_ASSERT(names != NULL);
CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
CU_ASSERT(!strcmp(spdk_xattr_names_get_name(names, 0), "name") ||
!strcmp(spdk_xattr_names_get_name(names, 1), "name"));
CU_ASSERT(!strcmp(spdk_xattr_names_get_name(names, 0), "length") ||
!strcmp(spdk_xattr_names_get_name(names, 1), "length"));
spdk_xattr_names_free(names);
rc = spdk_blob_md_remove_xattr(blob, "name");
CU_ASSERT(rc == 0);
rc = spdk_blob_md_remove_xattr(blob, "foobar");
CU_ASSERT(rc == -ENOENT);
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
bs_load(void)
{
struct spdk_bs_dev dev;
spdk_blob_id blobid;
struct spdk_blob *blob;
uint64_t length;
int rc;
const void *value;
size_t value_len;
init_dev(&dev);
/* Initialize a new blob store */
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
/* Create a blob */
spdk_bs_md_create_blob(g_bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_md_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
/* Set some xattrs */
rc = spdk_blob_md_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
CU_ASSERT(rc == 0);
length = 2345;
rc = spdk_blob_md_set_xattr(blob, "length", &length, sizeof(length));
CU_ASSERT(rc == 0);
/* Resize the blob */
rc = spdk_bs_md_resize_blob(blob, 10);
CU_ASSERT(rc == 0);
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
blob = NULL;
g_blob = NULL;
g_blobid = SPDK_BLOBID_INVALID;
/* Unload the blob store */
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
g_blob = NULL;
g_blobid = 0;
/* Load an existing blob store */
spdk_bs_load(&dev, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
spdk_bs_md_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
/* Get the xattrs */
value = NULL;
rc = spdk_bs_md_get_xattr_value(blob, "length", &value, &value_len);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(value != NULL);
CU_ASSERT(*(uint64_t *)value == length);
CU_ASSERT(value_len == 8);
rc = spdk_bs_md_get_xattr_value(blob, "foobar", &value, &value_len);
CU_ASSERT(rc == -ENOENT);
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
blob = NULL;
g_blob = NULL;
g_blobid = SPDK_BLOBID_INVALID;
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
/*
* Create a blobstore with a cluster size different than the default, and ensure it is
* persisted.
*/
static void
bs_cluster_sz(void)
{
struct spdk_bs_dev dev;
struct spdk_bs_opts opts;
uint32_t cluster_sz;
init_dev(&dev);
spdk_bs_opts_init(&opts);
opts.cluster_sz *= 2;
cluster_sz = opts.cluster_sz;
/* Initialize a new blob store */
spdk_bs_init(&dev, &opts, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
CU_ASSERT(spdk_bs_get_cluster_size(g_bs) == cluster_sz);
/* Unload the blob store */
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
g_blob = NULL;
g_blobid = 0;
/* Load an existing blob store */
spdk_bs_load(&dev, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
CU_ASSERT(spdk_bs_get_cluster_size(g_bs) == cluster_sz);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
/*
* Test resizing of the metadata blob. This requires creating enough blobs
* so that one cluster is not enough to fit the metadata for those blobs.
* To induce this condition to happen more quickly, we reduce the cluster
* size to 16KB, which means only 4 4KB blob metadata pages can fit.
*/
static void
bs_resize_md(void)
{
const int CLUSTER_PAGE_COUNT = 4;
const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
struct spdk_bs_dev dev;
struct spdk_bs_opts opts;
uint32_t cluster_sz;
spdk_blob_id blobids[NUM_BLOBS];
int i;
init_dev(&dev);
spdk_bs_opts_init(&opts);
opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
cluster_sz = opts.cluster_sz;
/* Initialize a new blob store */
spdk_bs_init(&dev, &opts, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
CU_ASSERT(spdk_bs_get_cluster_size(g_bs) == cluster_sz);
for (i = 0; i < NUM_BLOBS; i++) {
g_bserrno = -1;
g_blobid = SPDK_BLOBID_INVALID;
spdk_bs_md_create_blob(g_bs,
blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobids[i] = g_blobid;
}
/* Unload the blob store */
g_bserrno = -1;
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Load an existing blob store */
g_bserrno = -1;
g_bs = NULL;
spdk_bs_load(&dev, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
CU_ASSERT(spdk_bs_get_cluster_size(g_bs) == cluster_sz);
for (i = 0; i < NUM_BLOBS; i++) {
g_bserrno = -1;
g_blob = NULL;
spdk_bs_md_open_blob(g_bs, blobids[i], blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
g_bserrno = -1;
spdk_bs_md_close_blob(&g_blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
}
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
/* Try to hit all of the corner cases associated with serializing
* a blob to disk
*/
static void
blob_serialize(void)
{
struct spdk_bs_dev dev;
struct spdk_bs_opts opts;
struct spdk_blob_store *bs;
spdk_blob_id blobid[2];
struct spdk_blob *blob[2];
uint64_t i;
char *value;
int rc;
init_dev(&dev);
/* Initialize a new blobstore with very small clusters */
spdk_bs_opts_init(&opts);
opts.cluster_sz = dev.blocklen * 8;
spdk_bs_init(&dev, &opts, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
/* Create and open two blobs */
for (i = 0; i < 2; i++) {
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid[i] = g_blobid;
/* Open a blob */
spdk_bs_md_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob[i] = g_blob;
/* Set a fairly large xattr on both blobs to eat up
* metadata space
*/
value = calloc(dev.blocklen - 64, sizeof(char));
SPDK_CU_ASSERT_FATAL(value != NULL);
memset(value, i, dev.blocklen / 2);
rc = spdk_blob_md_set_xattr(blob[i], "name", value, dev.blocklen - 64);
CU_ASSERT(rc == 0);
free(value);
}
/* Resize the blobs, alternating 1 cluster at a time.
* This thwarts run length encoding and will cause spill
* over of the extents.
*/
for (i = 0; i < 6; i++) {
rc = spdk_bs_md_resize_blob(blob[i % 2], (i / 2) + 1);
CU_ASSERT(rc == 0);
}
for (i = 0; i < 2; i++) {
spdk_bs_md_sync_blob(blob[i], blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
}
/* Close the blobs */
for (i = 0; i < 2; i++) {
spdk_bs_md_close_blob(&blob[i], blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
}
/* Unload the blobstore */
spdk_bs_unload(bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
g_blob = NULL;
g_blobid = 0;
bs = NULL;
/* Load an existing blob store */
spdk_bs_load(&dev, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
for (i = 0; i < 2; i++) {
blob[i] = NULL;
spdk_bs_md_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob[i] = g_blob;
CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
spdk_bs_md_close_blob(&blob[i], blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
}
spdk_bs_unload(bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
if (CU_initialize_registry() != CUE_SUCCESS) {
return CU_get_error();
}
suite = CU_add_suite("blob", NULL, NULL);
if (suite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
if (
CU_add_test(suite, "blob_init", blob_init) == NULL ||
CU_add_test(suite, "blob_open", blob_open) == NULL ||
CU_add_test(suite, "blob_delete", blob_delete) == NULL ||
CU_add_test(suite, "blob_resize", blob_resize) == NULL ||
CU_add_test(suite, "channel_ops", channel_ops) == NULL ||
CU_add_test(suite, "blob_super", blob_super) == NULL ||
CU_add_test(suite, "blob_write", blob_write) == NULL ||
CU_add_test(suite, "blob_read", blob_read) == NULL ||
CU_add_test(suite, "blob_rw_verify", blob_rw_verify) == NULL ||
CU_add_test(suite, "blob_iter", blob_iter) == NULL ||
CU_add_test(suite, "blob_xattr", blob_xattr) == NULL ||
CU_add_test(suite, "bs_load", bs_load) == NULL ||
CU_add_test(suite, "bs_cluster_sz", bs_cluster_sz) == NULL ||
CU_add_test(suite, "bs_resize_md", bs_resize_md) == NULL ||
CU_add_test(suite, "blob_serialize", blob_serialize) == NULL
) {
CU_cleanup_registry();
return CU_get_error();
}
g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
spdk_allocate_thread();
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
spdk_free_thread();
free(g_dev_buffer);
return num_failures;
}

View File

@ -0,0 +1,116 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define DEV_BUFFER_SIZE (64 * 1024 * 1024)
#define DEV_BUFFER_BLOCKLEN (4096)
#define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
uint8_t *g_dev_buffer;
static struct spdk_io_channel *
dev_create_channel(struct spdk_bs_dev *dev)
{
return NULL;
}
static void
dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
{
}
static void
dev_destroy(struct spdk_bs_dev *dev)
{
}
static void
dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args)
{
uint64_t offset, length;
offset = lba * DEV_BUFFER_BLOCKLEN;
length = lba_count * DEV_BUFFER_BLOCKLEN;
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
memcpy(payload, &g_dev_buffer[offset], length);
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
}
static void
dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args)
{
uint64_t offset, length;
offset = lba * DEV_BUFFER_BLOCKLEN;
length = lba_count * DEV_BUFFER_BLOCKLEN;
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
memcpy(&g_dev_buffer[offset], payload, length);
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
}
static void
dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct spdk_bs_dev_cb_args *cb_args)
{
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
}
static void
dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args)
{
uint64_t offset, length;
offset = lba * DEV_BUFFER_BLOCKLEN;
length = lba_count * DEV_BUFFER_BLOCKLEN;
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
memset(&g_dev_buffer[offset], 0, length);
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
}
static void
init_dev(struct spdk_bs_dev *dev)
{
dev->create_channel = dev_create_channel;
dev->destroy_channel = dev_destroy_channel;
dev->destroy = dev_destroy;
dev->read = dev_read;
dev->write = dev_write;
dev->flush = dev_flush;
dev->unmap = dev_unmap;
dev->blockcnt = DEV_BUFFER_BLOCKCNT;
dev->blocklen = DEV_BUFFER_BLOCKLEN;
}

View File

@ -5,6 +5,8 @@
set -xe
$valgrind test/lib/blob/blob_ut/blob_ut
$valgrind test/lib/nvme/unit/nvme_c/nvme_ut
$valgrind test/lib/nvme/unit/nvme_ctrlr_c/nvme_ctrlr_ut
$valgrind test/lib/nvme/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut