nvmf: factor out RDMA-specific request completion

Move toward making request.c transport agnostic.

Change-Id: I25fbe74fff21a5c23138e1a6e2d40bc6a4a984ec
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-06-24 16:39:53 -07:00
parent e067f3302d
commit 2e550d5165
3 changed files with 47 additions and 40 deletions

View File

@ -326,13 +326,13 @@ nvmf_post_rdma_read(struct spdk_nvmf_conn *conn,
return (rc); return (rc);
} }
int static int
nvmf_post_rdma_write(struct spdk_nvmf_conn *conn, nvmf_post_rdma_write(struct spdk_nvmf_conn *conn,
struct nvme_qp_tx_desc *tx_desc) struct nvmf_request *req)
{ {
struct ibv_send_wr wr, *bad_wr = NULL; struct ibv_send_wr wr, *bad_wr = NULL;
struct nvme_qp_rx_desc *rx_desc = tx_desc->req_state.rx_desc; struct nvme_qp_tx_desc *tx_desc = req->tx_desc;
struct nvmf_request *req = &tx_desc->req_state; struct nvme_qp_rx_desc *rx_desc = req->rx_desc;
int rc; int rc;
if (rx_desc == NULL) { if (rx_desc == NULL) {
@ -354,12 +354,12 @@ nvmf_post_rdma_write(struct spdk_nvmf_conn *conn,
return (rc); return (rc);
} }
int static int
nvmf_post_rdma_send(struct spdk_nvmf_conn *conn, nvmf_post_rdma_send(struct spdk_nvmf_conn *conn,
struct nvme_qp_tx_desc *tx_desc) struct nvmf_request *req)
{ {
struct ibv_send_wr wr, *bad_wr = NULL; struct ibv_send_wr wr, *bad_wr = NULL;
struct nvmf_request *req = &tx_desc->req_state; struct nvme_qp_tx_desc *tx_desc = req->tx_desc;
struct nvme_qp_rx_desc *rx_desc = req->rx_desc; struct nvme_qp_rx_desc *rx_desc = req->rx_desc;
int rc; int rc;
@ -388,6 +388,37 @@ nvmf_post_rdma_send(struct spdk_nvmf_conn *conn,
return (rc); return (rc);
} }
int
spdk_nvmf_rdma_request_complete(struct spdk_nvmf_conn *conn, struct nvmf_request *req)
{
struct nvme_qp_tx_desc *tx_desc = req->tx_desc;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
int ret;
/* Was the command successful? */
if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
/* Need to transfer data via RDMA Write */
ret = nvmf_post_rdma_write(conn, req);
if (ret) {
SPDK_ERRLOG("Unable to post rdma write tx descriptor\n");
goto command_fail;
}
}
ret = nvmf_post_rdma_send(conn, req);
if (ret) {
SPDK_ERRLOG("Unable to send response capsule\n");
goto command_fail;
}
return 0;
command_fail:
nvmf_deactive_tx_desc(tx_desc);
return -1;
}
int int
nvmf_post_rdma_recv(struct spdk_nvmf_conn *conn, nvmf_post_rdma_recv(struct spdk_nvmf_conn *conn,
struct nvme_qp_rx_desc *rx_desc) struct nvme_qp_rx_desc *rx_desc)

View File

@ -81,12 +81,10 @@ struct nvme_qp_tx_desc {
int nvmf_post_rdma_read(struct spdk_nvmf_conn *conn, int nvmf_post_rdma_read(struct spdk_nvmf_conn *conn,
struct nvmf_request *req); struct nvmf_request *req);
int nvmf_post_rdma_write(struct spdk_nvmf_conn *conn,
struct nvme_qp_tx_desc *tx_desc);
int nvmf_post_rdma_recv(struct spdk_nvmf_conn *conn, int nvmf_post_rdma_recv(struct spdk_nvmf_conn *conn,
struct nvme_qp_rx_desc *rx_desc); struct nvme_qp_rx_desc *rx_desc);
int nvmf_post_rdma_send(struct spdk_nvmf_conn *conn, int spdk_nvmf_rdma_request_complete(struct spdk_nvmf_conn *conn,
struct nvme_qp_tx_desc *tx_desc); struct nvmf_request *req);
int nvmf_process_pending_rdma(struct spdk_nvmf_conn *conn); int nvmf_process_pending_rdma(struct spdk_nvmf_conn *conn);
int nvmf_rdma_init(void); int nvmf_rdma_init(void);
void nvmf_rdma_conn_cleanup(struct spdk_nvmf_conn *conn); void nvmf_rdma_conn_cleanup(struct spdk_nvmf_conn *conn);

View File

@ -50,29 +50,13 @@
int int
spdk_nvmf_request_complete(struct nvmf_request *req) spdk_nvmf_request_complete(struct nvmf_request *req)
{ {
struct nvme_qp_tx_desc *tx_desc = req->tx_desc; struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
struct spdk_nvme_cpl *response;
int ret;
response = &req->rsp->nvme_cpl;
/* Was the command successful */
if (response->status.sc == SPDK_NVME_SC_SUCCESS &&
req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
/* data to be copied to host via memory RDMA */
ret = nvmf_post_rdma_write(tx_desc->conn, tx_desc);
if (ret) {
SPDK_ERRLOG("Unable to post rdma write tx descriptor\n");
goto command_fail;
}
}
/* Now send back the response */
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send nvme cmd capsule response\n"); SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send nvme cmd capsule response\n");
response->sqid = 0; response->sqid = 0;
response->status.p = 0; response->status.p = 0;
response->sqhd = tx_desc->conn->sq_head; response->sqhd = req->conn->sq_head;
response->cid = req->cid; response->cid = req->cid;
SPDK_TRACELOG(SPDK_TRACE_NVMF, SPDK_TRACELOG(SPDK_TRACE_NVMF,
@ -80,17 +64,12 @@ spdk_nvmf_request_complete(struct nvmf_request *req)
response->cdw0, response->rsvd1, response->sqhd, response->sqid, response->cid, response->cdw0, response->rsvd1, response->sqhd, response->sqid, response->cid,
*(uint16_t *)&response->status); *(uint16_t *)&response->status);
ret = nvmf_post_rdma_send(tx_desc->conn, req->tx_desc); if (spdk_nvmf_rdma_request_complete(req->conn, req)) {
if (ret) { SPDK_ERRLOG("Transport request completion error!\n");
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n"); return -1;
goto command_fail;
} }
return ret; return 0;
command_fail:
nvmf_deactive_tx_desc(tx_desc);
return ret;
} }
static int static int
@ -658,8 +637,7 @@ spdk_nvmf_request_prep_data(struct nvmf_request *req,
void *in_cap_data, uint32_t in_cap_len, void *in_cap_data, uint32_t in_cap_len,
void *bb, uint32_t bb_len) void *bb, uint32_t bb_len)
{ {
struct nvme_qp_tx_desc *tx_desc = req->tx_desc; struct spdk_nvmf_conn *conn = req->conn;
struct spdk_nvmf_conn *conn = tx_desc->conn;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
enum spdk_nvme_data_transfer xfer; enum spdk_nvme_data_transfer xfer;
int ret; int ret;