rdma: Correct WR type checking
The previous patch ce6b8a1313
added a wrong assumption that every WC of RDMA_WR_TYPE_DATA
type must point to rdma_req with IBV_WC_RDMA_READ opcode since
RDMA_WRITE operations are non-signaled. However it is wrong
since in the case of error all WRs will have WCs. Revert part
of the problematic patch.
Change-Id: I8d270c5313ebfe1ec44a338820a62f085996eb8f
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1334
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
3509fc12df
commit
5e2101ceb2
@ -3915,13 +3915,12 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||||
|
|
||||||
assert(rdma_req->num_outstanding_data_wr > 0);
|
assert(rdma_req->num_outstanding_data_wr > 0);
|
||||||
assert(rdma_req->data.wr.opcode == IBV_WR_RDMA_READ);
|
|
||||||
|
|
||||||
rqpair->current_send_depth--;
|
rqpair->current_send_depth--;
|
||||||
rqpair->current_read_depth--;
|
|
||||||
rdma_req->num_outstanding_data_wr--;
|
rdma_req->num_outstanding_data_wr--;
|
||||||
if (!wc[i].status) {
|
if (!wc[i].status) {
|
||||||
assert(wc[i].opcode == IBV_WC_RDMA_READ);
|
assert(wc[i].opcode == IBV_WC_RDMA_READ);
|
||||||
|
rqpair->current_read_depth--;
|
||||||
/* wait for all outstanding reads associated with the same rdma_req to complete before proceeding. */
|
/* wait for all outstanding reads associated with the same rdma_req to complete before proceeding. */
|
||||||
if (rdma_req->num_outstanding_data_wr == 0) {
|
if (rdma_req->num_outstanding_data_wr == 0) {
|
||||||
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
|
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
|
||||||
@ -3929,9 +3928,14 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* If the data transfer fails still force the queue into the error state,
|
/* If the data transfer fails still force the queue into the error state,
|
||||||
* in case of RDMA_READ, we need to force the request into a completed state */
|
* if we were performing an RDMA_READ, we need to force the request into a
|
||||||
if (rdma_req->num_outstanding_data_wr == 0) {
|
* completed state since it wasn't linked to a send. However, in the RDMA_WRITE
|
||||||
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
|
* case, we should wait for the SEND to complete. */
|
||||||
|
if (rdma_req->data.wr.opcode == IBV_WR_RDMA_READ) {
|
||||||
|
rqpair->current_read_depth--;
|
||||||
|
if (rdma_req->num_outstanding_data_wr == 0) {
|
||||||
|
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
Loading…
Reference in New Issue
Block a user