nvmf: Delay allocating rdma requests until CONNECT capsule.

The queue type and queue depth are not known until
the connect capsule is processed. Delay allocating more
than 1 recv wqe until then.

Change-Id: I0e68c24bc3d6f37043946de6c2cbcb3198cd5d1b
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ben Walker 2016-06-28 09:51:15 -07:00
parent ed3e30bb07
commit 554543168f
3 changed files with 44 additions and 7 deletions

View File

@ -155,6 +155,15 @@ free_rdma_req(struct spdk_nvmf_rdma_request *rdma_req)
rte_free(rdma_req);
}
void
spdk_nvmf_rdma_free_req(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req);
STAILQ_REMOVE(&req->conn->rdma.rdma_reqs, rdma_req, spdk_nvmf_rdma_request, link);
free_rdma_req(rdma_req);
}
void
spdk_nvmf_rdma_free_reqs(struct spdk_nvmf_conn *conn)
{
@ -270,8 +279,10 @@ nvmf_rdma_conn_cleanup(struct spdk_nvmf_conn *conn)
static void
nvmf_trace_ibv_sge(struct ibv_sge *sg_list)
{
SPDK_TRACELOG(SPDK_TRACE_RDMA, "local addr %p length 0x%x lkey 0x%x\n",
(void *)sg_list->addr, sg_list->length, sg_list->lkey);
if (sg_list) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "local addr %p length 0x%x lkey 0x%x\n",
(void *)sg_list->addr, sg_list->length, sg_list->lkey);
}
}
static void
@ -494,6 +505,7 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
struct spdk_nvmf_fabric_intf *fabric_intf;
struct rdma_cm_id *conn_id;
struct spdk_nvmf_conn *conn;
struct spdk_nvmf_rdma_request *rdma_req;
struct ibv_device_attr ibdev_attr;
struct sockaddr_in *addr;
struct rdma_conn_param *host_event_data = NULL;
@ -591,13 +603,15 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
STAILQ_INIT(&conn->rdma.rdma_reqs);
/* Allocate Buffers */
rc = spdk_nvmf_rdma_alloc_reqs(conn);
if (rc) {
SPDK_ERRLOG("Unable to allocate connection RDMA requests\n");
/* Allocate 1 buffer suitable for the CONNECT capsule.
* Once that is received, the full queue depth will be allocated.
*/
rdma_req = alloc_rdma_req(conn);
if (nvmf_post_rdma_recv(conn, &rdma_req->req)) {
SPDK_ERRLOG("Unable to post connection rx desc\n");
goto err1;
}
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "RDMA requests allocated\n");
STAILQ_INSERT_TAIL(&conn->rdma.rdma_reqs, rdma_req, link);
rc = spdk_nvmf_startup_conn(conn);
if (rc) {

View File

@ -81,6 +81,7 @@ int spdk_nvmf_rdma_request_release(struct spdk_nvmf_conn *conn,
int nvmf_rdma_init(void);
int spdk_nvmf_rdma_alloc_reqs(struct spdk_nvmf_conn *conn);
void spdk_nvmf_rdma_free_reqs(struct spdk_nvmf_conn *conn);
void spdk_nvmf_rdma_free_req(struct spdk_nvmf_request *req);
void nvmf_rdma_conn_cleanup(struct spdk_nvmf_conn *conn);
int nvmf_acceptor_start(void);

View File

@ -73,6 +73,21 @@ spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
int
spdk_nvmf_request_release(struct spdk_nvmf_request *req)
{
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvmf_capsule_cmd *capsule;
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
capsule = &req->cmd->nvmf_cmd;
if (capsule->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) {
/* Special case: connect is always the first capsule and new
* work queue entries are allocated in response to this command.
* Instead of re-posting this entry, just free it.
*/
spdk_nvmf_rdma_free_req(req);
return 0;
}
}
return spdk_nvmf_rdma_request_release(req->conn, req);
}
@ -433,6 +448,13 @@ nvmf_process_connect(struct spdk_nvmf_request *req)
}
}
/* Allocate RDMA reqs according to the queue depth and conn type*/
if (spdk_nvmf_rdma_alloc_reqs(conn)) {
SPDK_ERRLOG("Unable to allocate sufficient RDMA work requests\n");
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return true;
}
SPDK_TRACELOG(SPDK_TRACE_NVMF, "connect capsule response: cntlid = 0x%04x\n",
response->status_code_specific.success.cntlid);
return true;