nvme: No longer abstract away pthread calls

pthreads are widely supported and are available on any
platform we currently foresee porting to. Use that API
instead of attempting to abstract it away to simplify
the code.

Change-Id: I822f9c10910020719e94cce6fca4e1600a2d9f2a
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ben Walker 2016-08-08 10:03:52 -07:00
parent 1f75a72781
commit 888014289c
9 changed files with 97 additions and 129 deletions

View File

@ -34,7 +34,7 @@
#include "nvme_internal.h"
struct nvme_driver _g_nvme_driver = {
.lock = NVME_MUTEX_INITIALIZER,
.lock = PTHREAD_MUTEX_INITIALIZER,
.init_ctrlrs = TAILQ_HEAD_INITIALIZER(_g_nvme_driver.init_ctrlrs),
.attached_ctrlrs = TAILQ_HEAD_INITIALIZER(_g_nvme_driver.attached_ctrlrs),
};
@ -69,13 +69,13 @@ nvme_attach(void *devhandle)
int
spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
{
nvme_mutex_lock(&g_nvme_driver->lock);
pthread_mutex_lock(&g_nvme_driver->lock);
nvme_ctrlr_destruct(ctrlr);
TAILQ_REMOVE(&g_nvme_driver->attached_ctrlrs, ctrlr, tailq);
nvme_free(ctrlr);
nvme_mutex_unlock(&g_nvme_driver->lock);
pthread_mutex_unlock(&g_nvme_driver->lock);
return 0;
}
@ -204,7 +204,7 @@ spdk_nvme_probe(void *cb_ctx, spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb a
struct nvme_enum_ctx enum_ctx;
struct spdk_nvme_ctrlr *ctrlr, *ctrlr_tmp;
nvme_mutex_lock(&g_nvme_driver->lock);
pthread_mutex_lock(&g_nvme_driver->lock);
enum_ctx.probe_cb = probe_cb;
enum_ctx.cb_ctx = cb_ctx;
@ -226,9 +226,9 @@ spdk_nvme_probe(void *cb_ctx, spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb a
* the functions it calls (in particular nvme_ctrlr_set_num_qpairs())
* can assume it is held.
*/
nvme_mutex_unlock(&g_nvme_driver->lock);
pthread_mutex_unlock(&g_nvme_driver->lock);
start_rc = nvme_ctrlr_process_init(ctrlr);
nvme_mutex_lock(&g_nvme_driver->lock);
pthread_mutex_lock(&g_nvme_driver->lock);
if (start_rc) {
/* Controller failed to initialize. */
@ -251,15 +251,15 @@ spdk_nvme_probe(void *cb_ctx, spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb a
* Unlock while calling attach_cb() so the user can call other functions
* that may take the driver lock, like nvme_detach().
*/
nvme_mutex_unlock(&g_nvme_driver->lock);
pthread_mutex_unlock(&g_nvme_driver->lock);
attach_cb(cb_ctx, ctrlr->devhandle, ctrlr, &ctrlr->opts);
nvme_mutex_lock(&g_nvme_driver->lock);
pthread_mutex_lock(&g_nvme_driver->lock);
break;
}
}
}
nvme_mutex_unlock(&g_nvme_driver->lock);
pthread_mutex_unlock(&g_nvme_driver->lock);
return rc;
}

View File

@ -118,7 +118,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
return NULL;
}
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
/*
* Get the first available qpair structure.
@ -126,7 +126,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
qpair = TAILQ_FIRST(&ctrlr->free_io_qpairs);
if (qpair == NULL) {
/* No free queue IDs */
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return NULL;
}
@ -142,13 +142,13 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
* spdk_nvme_ctrlr_create_qpair() failed, so the qpair structure is still unused.
* Exit here so we don't insert it into the active_io_qpairs list.
*/
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return NULL;
}
TAILQ_REMOVE(&ctrlr->free_io_qpairs, qpair, tailq);
TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return qpair;
}
@ -166,42 +166,42 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
ctrlr = qpair->ctrlr;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
/* Delete the I/O submission queue and then the completion queue */
status.done = false;
rc = nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_completion_poll_cb, &status);
if (rc != 0) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
while (status.done == false) {
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -1;
}
status.done = false;
rc = nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
if (rc != 0) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
while (status.done == false) {
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -1;
}
TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
TAILQ_INSERT_HEAD(&ctrlr->free_io_qpairs, qpair, tailq);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
}
@ -522,7 +522,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
uint32_t i;
struct spdk_nvme_qpair *qpair;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
if (ctrlr->is_resetting || ctrlr->is_failed) {
/*
@ -530,7 +530,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
* immediately since there is no need to kick off another
* reset in these cases.
*/
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
}
@ -568,7 +568,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
ctrlr->is_resetting = false;
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
@ -1077,6 +1077,23 @@ nvme_ctrlr_free_bars(struct spdk_nvme_ctrlr *ctrlr)
return rc;
}
static inline int
pthread_mutex_init_recursive(pthread_mutex_t *mtx)
{
pthread_mutexattr_t attr;
int rc = 0;
if (pthread_mutexattr_init(&attr)) {
return -1;
}
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
pthread_mutex_init(mtx, &attr)) {
rc = -1;
}
pthread_mutexattr_destroy(&attr);
return rc;
}
int
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
@ -1117,7 +1134,7 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
TAILQ_INIT(&ctrlr->free_io_qpairs);
TAILQ_INIT(&ctrlr->active_io_qpairs);
nvme_mutex_init_recursive(&ctrlr->ctrlr_lock);
pthread_mutex_init_recursive(&ctrlr->ctrlr_lock);
return 0;
}
@ -1147,7 +1164,7 @@ nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
nvme_qpair_destroy(&ctrlr->adminq);
nvme_ctrlr_free_bars(ctrlr);
nvme_mutex_destroy(&ctrlr->ctrlr_lock);
pthread_mutex_destroy(&ctrlr->ctrlr_lock);
}
int
@ -1162,9 +1179,9 @@ spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
{
int32_t num_completions;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
num_completions = spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return num_completions;
}
@ -1245,9 +1262,9 @@ spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
if (res)
return res;
while (status.done == false) {
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "spdk_nvme_ctrlr_attach_ns failed!\n");
@ -1270,9 +1287,9 @@ spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
if (res)
return res;
while (status.done == false) {
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "spdk_nvme_ctrlr_detach_ns failed!\n");
@ -1293,9 +1310,9 @@ spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_dat
if (res)
return 0;
while (status.done == false) {
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "spdk_nvme_ctrlr_create_ns failed!\n");
@ -1322,9 +1339,9 @@ spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
if (res)
return res;
while (status.done == false) {
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "spdk_nvme_ctrlr_delete_ns failed!\n");
@ -1347,9 +1364,9 @@ spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
if (res)
return res;
while (status.done == false) {
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "spdk_nvme_ctrlr_format failed!\n");
@ -1392,9 +1409,9 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
return res;
while (status.done == false) {
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "spdk_nvme_ctrlr_fw_image_download failed!\n");
@ -1418,9 +1435,9 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
return res;
while (status.done == false) {
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");

View File

@ -62,10 +62,10 @@ spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -73,7 +73,7 @@ spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
@ -233,11 +233,11 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ctrlr_list),
cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -248,7 +248,7 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
@ -260,11 +260,11 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ctrlr_list),
cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -275,7 +275,7 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
@ -287,11 +287,11 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ns_data),
cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -301,7 +301,7 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
@ -313,10 +313,10 @@ nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -327,7 +327,7 @@ nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
@ -338,10 +338,10 @@ nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -351,7 +351,7 @@ nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_
memcpy(&cmd->cdw10, format, sizeof(uint32_t));
nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
}
@ -365,10 +365,10 @@ spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -379,7 +379,7 @@ spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
cmd->cdw12 = cdw12;
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
@ -393,10 +393,10 @@ spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -406,7 +406,7 @@ spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
cmd->cdw11 = cdw11;
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
@ -444,10 +444,10 @@ spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, payload_size, cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -458,7 +458,7 @@ spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page
cmd->cdw10 |= log_page;
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
@ -491,10 +491,10 @@ nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -503,7 +503,7 @@ nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
@ -518,11 +518,11 @@ nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, size,
cb_fn, cb_arg);
if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
}
@ -532,7 +532,7 @@ nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
cmd->cdw11 = offset >> 2;
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}

View File

@ -303,29 +303,4 @@ nvme_pci_enumerate(int (*enum_cb)(void *enum_ctx, struct spdk_pci_device *pci_de
#endif /* !SPDK_CONFIG_PCIACCESS */
typedef pthread_mutex_t nvme_mutex_t;
#define nvme_mutex_init(x) pthread_mutex_init((x), NULL)
#define nvme_mutex_destroy(x) pthread_mutex_destroy((x))
#define nvme_mutex_lock pthread_mutex_lock
#define nvme_mutex_unlock pthread_mutex_unlock
#define NVME_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
static inline int
nvme_mutex_init_recursive(nvme_mutex_t *mtx)
{
pthread_mutexattr_t attr;
int rc = 0;
if (pthread_mutexattr_init(&attr)) {
return -1;
}
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
pthread_mutex_init(mtx, &attr)) {
rc = -1;
}
pthread_mutexattr_destroy(&attr);
return rc;
}
#endif /* __NVME_IMPL_H__ */

View File

@ -37,6 +37,7 @@
#include "spdk/nvme.h"
#include <errno.h>
#include <pthread.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
@ -419,7 +420,7 @@ struct spdk_nvme_ctrlr {
void *aer_cb_arg;
/** guards access to the controller itself, including admin queues */
nvme_mutex_t ctrlr_lock;
pthread_mutex_t ctrlr_lock;
struct spdk_nvme_qpair adminq;
@ -452,7 +453,7 @@ struct spdk_nvme_ctrlr {
};
struct nvme_driver {
nvme_mutex_t lock;
pthread_mutex_t lock;
TAILQ_HEAD(, spdk_nvme_ctrlr) init_ctrlrs;
TAILQ_HEAD(, spdk_nvme_ctrlr) attached_ctrlrs;
};

View File

@ -55,9 +55,9 @@ int nvme_ns_identify_update(struct spdk_nvme_ns *ns)
}
while (status.done == false) {
nvme_mutex_lock(&ns->ctrlr->ctrlr_lock);
pthread_mutex_lock(&ns->ctrlr->ctrlr_lock);
spdk_nvme_qpair_process_completions(&ns->ctrlr->adminq, 0);
nvme_mutex_unlock(&ns->ctrlr->ctrlr_lock);
pthread_mutex_unlock(&ns->ctrlr->ctrlr_lock);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "nvme_identify_namespace failed\n");

View File

@ -36,7 +36,7 @@
#include "nvme/nvme_ctrlr.c"
struct nvme_driver _g_nvme_driver = {
.lock = NVME_MUTEX_INITIALIZER,
.lock = PTHREAD_MUTEX_INITIALIZER,
};
static uint16_t g_pci_vendor_id;

View File

@ -128,29 +128,4 @@ nvme_pcicfg_get_bar_addr_len(void *devhandle, uint32_t bar, uint64_t *addr, uint
*size = 0;
}
typedef pthread_mutex_t nvme_mutex_t;
#define nvme_mutex_init(x) pthread_mutex_init((x), NULL)
#define nvme_mutex_destroy(x) pthread_mutex_destroy((x))
#define nvme_mutex_lock pthread_mutex_lock
#define nvme_mutex_unlock pthread_mutex_unlock
#define NVME_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
static inline int
nvme_mutex_init_recursive(nvme_mutex_t *mtx)
{
pthread_mutexattr_t attr;
int rc = 0;
if (pthread_mutexattr_init(&attr)) {
return -1;
}
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
pthread_mutex_init(mtx, &attr)) {
rc = -1;
}
pthread_mutexattr_destroy(&attr);
return rc;
}
#endif /* __NVME_IMPL_H__ */

View File

@ -39,7 +39,7 @@
#include "nvme/nvme_qpair.c"
struct nvme_driver _g_nvme_driver = {
.lock = NVME_MUTEX_INITIALIZER,
.lock = PTHREAD_MUTEX_INITIALIZER,
};
int32_t spdk_nvme_retry_count = 1;