nvmf: Allocate all rdma requests up front.

Change-Id: Ia7fdb6994b8c167840d7335a2dfcad3ce6171d3a
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ben Walker 2016-07-22 10:36:02 -07:00
parent 989859bbe1
commit 9d9dc8452c
3 changed files with 29 additions and 90 deletions

View File

@ -105,13 +105,18 @@ struct spdk_nvmf_rdma {
static struct spdk_nvmf_rdma g_rdma = { };
static struct spdk_nvmf_rdma_request *alloc_rdma_req(struct spdk_nvmf_conn *conn);
static int nvmf_post_rdma_recv(struct spdk_nvmf_conn *conn, struct spdk_nvmf_request *req);
static void free_rdma_req(struct spdk_nvmf_rdma_request *rdma_req);
static struct spdk_nvmf_rdma_conn *
allocate_rdma_conn(struct rdma_cm_id *id, uint16_t queue_depth)
{
struct spdk_nvmf_rdma_conn *rdma_conn;
struct spdk_nvmf_conn *conn;
int rc;
int rc, i;
struct ibv_qp_init_attr attr;
struct spdk_nvmf_rdma_request *rdma_req;
rdma_conn = calloc(1, sizeof(struct spdk_nvmf_rdma_conn));
if (rdma_conn == NULL) {
@ -166,9 +171,32 @@ allocate_rdma_conn(struct rdma_cm_id *id, uint16_t queue_depth)
conn->transport = &spdk_nvmf_transport_rdma;
id->context = conn;
for (i = 0; i < rdma_conn->queue_depth; i++) {
rdma_req = alloc_rdma_req(conn);
if (rdma_req == NULL) {
goto alloc_error;
}
SPDK_TRACELOG(SPDK_TRACE_RDMA, "rdma_req %p: req %p, rsp %p\n",
rdma_req, &rdma_req->req,
rdma_req->req.rsp);
if (nvmf_post_rdma_recv(conn, &rdma_req->req)) {
SPDK_ERRLOG("Unable to post connection rx desc\n");
goto alloc_error;
}
STAILQ_INSERT_TAIL(&rdma_conn->rdma_reqs, rdma_req, link);
}
return rdma_conn;
alloc_error:
STAILQ_FOREACH(rdma_req, &rdma_conn->rdma_reqs, link) {
STAILQ_REMOVE(&rdma_conn->rdma_reqs, rdma_req, spdk_nvmf_rdma_request, link);
free_rdma_req(rdma_req);
}
if (rdma_conn->cq) {
ibv_destroy_cq(rdma_conn->cq);
}
@ -214,16 +242,6 @@ free_rdma_req(struct spdk_nvmf_rdma_request *rdma_req)
rte_free(rdma_req);
}
static void
spdk_nvmf_rdma_free_req(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(req->conn);
struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req);
STAILQ_REMOVE(&rdma_conn->rdma_reqs, rdma_req, spdk_nvmf_rdma_request, link);
free_rdma_req(rdma_req);
}
static void
spdk_nvmf_rdma_free_reqs(struct spdk_nvmf_conn *conn)
{
@ -514,21 +532,6 @@ static int
spdk_nvmf_rdma_request_release(struct spdk_nvmf_conn *conn,
struct spdk_nvmf_request *req)
{
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
struct spdk_nvmf_capsule_cmd *capsule = &req->cmd->nvmf_cmd;
if (capsule->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) {
/* Special case: connect is always the first capsule and new
* work queue entries are allocated in response to this command.
* Instead of re-posting this entry, just free it.
*/
spdk_nvmf_rdma_free_req(req);
return 0;
}
}
if (nvmf_post_rdma_recv(conn, req)) {
SPDK_ERRLOG("Unable to re-post rx descriptor\n");
return -1;
@ -537,48 +540,10 @@ spdk_nvmf_rdma_request_release(struct spdk_nvmf_conn *conn,
return 0;
}
static int
spdk_nvmf_rdma_alloc_reqs(struct spdk_nvmf_conn *conn)
{
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn);
struct spdk_nvmf_rdma_request *rdma_req;
int i;
for (i = 0; i < rdma_conn->queue_depth; i++) {
rdma_req = alloc_rdma_req(conn);
if (rdma_req == NULL) {
goto fail;
}
SPDK_TRACELOG(SPDK_TRACE_RDMA, "rdma_req %p: req %p, rsp %p\n",
rdma_req, &rdma_req->req,
rdma_req->req.rsp);
if (nvmf_post_rdma_recv(conn, &rdma_req->req)) {
SPDK_ERRLOG("Unable to post connection rx desc\n");
goto fail;
}
STAILQ_INSERT_TAIL(&rdma_conn->rdma_reqs, rdma_req, link);
}
return 0;
fail:
STAILQ_FOREACH(rdma_req, &rdma_conn->rdma_reqs, link) {
STAILQ_REMOVE(&rdma_conn->rdma_reqs, rdma_req, spdk_nvmf_rdma_request, link);
free_rdma_req(rdma_req);
}
return -ENOMEM;
}
static int
nvmf_rdma_connect(struct rdma_cm_event *event)
{
struct spdk_nvmf_rdma_conn *rdma_conn = NULL;
struct spdk_nvmf_conn *conn;
struct spdk_nvmf_rdma_request *rdma_req;
struct ibv_device_attr ibdev_attr;
struct rdma_conn_param *rdma_param = NULL;
struct rdma_conn_param ctrlr_event_data;
@ -651,18 +616,6 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
goto err1;
}
conn = &rdma_conn->conn;
/* Allocate 1 buffer suitable for the CONNECT capsule.
* Once that is received, the full queue depth will be allocated.
*/
rdma_req = alloc_rdma_req(conn);
if (nvmf_post_rdma_recv(conn, &rdma_req->req)) {
SPDK_ERRLOG("Unable to post connection rx desc\n");
goto err1;
}
STAILQ_INSERT_TAIL(&rdma_conn->rdma_reqs, rdma_req, link);
/* Add this RDMA connection to the global list until a CONNECT capsule
* is received. */
TAILQ_INSERT_TAIL(&g_pending_conns, rdma_conn, link);
@ -1183,7 +1136,6 @@ const struct spdk_nvmf_transport spdk_nvmf_transport_rdma = {
.req_complete = spdk_nvmf_rdma_request_complete,
.conn_init = spdk_nvmf_rdma_alloc_reqs,
.conn_fini = nvmf_rdma_conn_cleanup,
.conn_poll = nvmf_check_rdma_completions,

View File

@ -310,14 +310,6 @@ nvmf_handle_connect(spdk_event_t event)
spdk_nvmf_session_connect(conn, connect, connect_data, response);
if (conn->transport->conn_init(conn)) {
SPDK_ERRLOG("Transport connection initialization failed\n");
nvmf_disconnect(conn->sess, conn);
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
spdk_nvmf_request_complete(req);
return;
}
SPDK_TRACELOG(SPDK_TRACE_NVMF, "connect capsule response: cntlid = 0x%04x\n",
response->status_code_specific.success.cntlid);

View File

@ -70,11 +70,6 @@ struct spdk_nvmf_transport {
*/
int (*req_complete)(struct spdk_nvmf_request *req);
/*
* Initialize resources for a new connection.
*/
int (*conn_init)(struct spdk_nvmf_conn *conn);
/*
* Deinitialize a connection.
*/