diff --git a/lib/nvmf/fc.c b/lib/nvmf/fc.c index f4f7e10d4..d33f0c353 100644 --- a/lib/nvmf/fc.c +++ b/lib/nvmf/fc.c @@ -330,52 +330,80 @@ nvmf_fc_handle_assoc_deletion(void *arg) fc_conn->fc_assoc->assoc_id, false, true, NULL, NULL); } -static int -nvmf_fc_create_req_mempool(struct spdk_nvmf_fc_hwqp *hwqp) +void +nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) { - uint32_t i; - struct spdk_nvmf_fc_request *fc_req; + free(fc_conn->pool_memory); + fc_conn->pool_memory = NULL; +} - TAILQ_INIT(&hwqp->free_reqs); - TAILQ_INIT(&hwqp->in_use_reqs); +int +nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) +{ + uint32_t i, qd; + struct spdk_nvmf_fc_pooled_request *req; - hwqp->fc_reqs_buf = calloc(hwqp->rq_size, sizeof(struct spdk_nvmf_fc_request)); - if (hwqp->fc_reqs_buf == NULL) { - SPDK_ERRLOG("create fc request pool failed\n"); - return -ENOMEM; + /* + * Create number of fc-requests to be more than the actual SQ size. + * This is to handle race conditions where the target driver may send + * back a RSP and before the target driver gets to process the CQE + * for the RSP, the initiator may have sent a new command. + * Depending on the load on the HWQP, there is a slim possibility + * that the target reaps the RQE corresponding to the new + * command before processing the CQE corresponding to the RSP. + */ + qd = fc_conn->max_queue_depth * 2; + + STAILQ_INIT(&fc_conn->pool_queue); + fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2), + sizeof(struct spdk_nvmf_fc_request)); + if (!fc_conn->pool_memory) { + SPDK_ERRLOG("create fc req ring objects failed\n"); + goto error; } + fc_conn->pool_size = qd; + fc_conn->pool_free_elems = qd; - for (i = 0; i < hwqp->rq_size; i++) { - fc_req = hwqp->fc_reqs_buf + i; + /* Initialise value in ring objects and link the objects */ + for (i = 0; i < qd; i++) { + req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory + + i * sizeof(struct spdk_nvmf_fc_request)); - nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT); - TAILQ_INSERT_TAIL(&hwqp->free_reqs, fc_req, link); + STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link); } - return 0; +error: + nvmf_fc_free_conn_reqpool(fc_conn); + return -1; } static inline struct spdk_nvmf_fc_request * -nvmf_fc_hwqp_alloc_fc_request(struct spdk_nvmf_fc_hwqp *hwqp) +nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn) { struct spdk_nvmf_fc_request *fc_req; + struct spdk_nvmf_fc_pooled_request *pooled_req; + struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp; - if (TAILQ_EMPTY(&hwqp->free_reqs)) { + pooled_req = STAILQ_FIRST(&fc_conn->pool_queue); + if (!pooled_req) { SPDK_ERRLOG("Alloc request buffer failed\n"); return NULL; } + STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link); + fc_conn->pool_free_elems -= 1; - fc_req = TAILQ_FIRST(&hwqp->free_reqs); - TAILQ_REMOVE(&hwqp->free_reqs, fc_req, link); - + fc_req = (struct spdk_nvmf_fc_request *)pooled_req; memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request)); + nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT); + TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link); + TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link); TAILQ_INIT(&fc_req->abort_cbs); return fc_req; } static inline void -nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_request *fc_req) +nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req) { if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) { /* Log an error for debug purpose. */ @@ -385,8 +413,11 @@ nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc /* set the magic to mark req as no longer valid. */ fc_req->magic = 0xDEADBEEF; - TAILQ_REMOVE(&hwqp->in_use_reqs, fc_req, link); - TAILQ_INSERT_HEAD(&hwqp->free_reqs, fc_req, link); + TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link); + TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link); + + STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link); + fc_conn->pool_free_elems += 1; } struct spdk_nvmf_fc_conn * @@ -418,10 +449,7 @@ nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *h /* clear counters */ memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors)); - if (&fc_port->ls_queue != hwqp) { - nvmf_fc_create_req_mempool(hwqp); - } - + TAILQ_INIT(&hwqp->in_use_reqs); TAILQ_INIT(&hwqp->connection_list); TAILQ_INIT(&hwqp->sync_cbs); TAILQ_INIT(&hwqp->ls_pending_queue); @@ -843,17 +871,9 @@ static void nvmf_fc_port_cleanup(void) { struct spdk_nvmf_fc_port *fc_port, *tmp; - struct spdk_nvmf_fc_hwqp *hwqp; - uint32_t i; TAILQ_FOREACH_SAFE(fc_port, &g_spdk_nvmf_fc_port_list, link, tmp) { TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link); - for (i = 0; i < fc_port->num_io_queues; i++) { - hwqp = &fc_port->io_queues[i]; - if (hwqp->fc_reqs_buf) { - free(hwqp->fc_reqs_buf); - } - } free(fc_port); } } @@ -1111,21 +1131,26 @@ nvmf_fc_request_abort_complete(void *arg1) struct spdk_nvmf_fc_request *fc_req = (struct spdk_nvmf_fc_request *)arg1; struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL; + TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs; - /* Request abort completed. Notify all the callbacks */ - TAILQ_FOREACH_SAFE(ctx, &fc_req->abort_cbs, link, tmp) { - /* Notify */ - ctx->cb(fc_req->hwqp, 0, ctx->cb_args); - /* Remove */ - TAILQ_REMOVE(&fc_req->abort_cbs, ctx, link); - /* free */ - free(ctx); - } + /* Make a copy of the cb list from fc_req */ + TAILQ_INIT(&abort_cbs); + TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link); SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req, fc_req_state_strs[fc_req->state]); _nvmf_fc_request_free(fc_req); + + /* Request abort completed. Notify all the callbacks */ + TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) { + /* Notify */ + ctx->cb(fc_req->hwqp, 0, ctx->cb_args); + /* Remove */ + TAILQ_REMOVE(&abort_cbs, ctx, link); + /* free */ + free(ctx); + } } void @@ -1353,7 +1378,7 @@ nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_ } /* allocate a request buffer */ - fc_req = nvmf_fc_hwqp_alloc_fc_request(hwqp); + fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn); if (fc_req == NULL) { return -ENOMEM; } @@ -1413,7 +1438,7 @@ _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req) fc_req->req.iovcnt = 0; /* Free Fc request */ - nvmf_fc_hwqp_free_fc_request(hwqp, fc_req); + nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req); } void @@ -2153,7 +2178,6 @@ nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i]; hwqp->hwqp_id = i; hwqp->queues = args->io_queues[i]; - hwqp->rq_size = args->io_queue_size; nvmf_fc_init_hwqp(fc_port, hwqp); } diff --git a/lib/nvmf/fc_ls.c b/lib/nvmf/fc_ls.c index 27e2faa0b..b15a7609c 100644 --- a/lib/nvmf/fc_ls.c +++ b/lib/nvmf/fc_ls.c @@ -187,16 +187,6 @@ nvmf_fc_ls_format_rjt(void *buf, uint16_t buflen, uint8_t ls_cmd, static inline void nvmf_fc_ls_free_association(struct spdk_nvmf_fc_association *assoc) { - struct spdk_nvmf_fc_conn *fc_conn; - - /* return the q slots of the conns for the association */ - TAILQ_FOREACH(fc_conn, &assoc->avail_fc_conns, assoc_avail_link) { - if (fc_conn->conn_id != NVMF_FC_INVALID_CONN_ID) { - nvmf_fc_release_conn(fc_conn->hwqp, fc_conn->conn_id, - fc_conn->max_queue_depth); - } - } - /* free assocation's send disconnect buffer */ if (assoc->snd_disconn_bufs) { nvmf_fc_free_srsr_bufs(assoc->snd_disconn_bufs); @@ -229,10 +219,6 @@ nvmf_fc_ls_alloc_connections(struct spdk_nvmf_fc_association *assoc, for (i = 0; i < nvmf_transport->opts.max_qpairs_per_ctrlr; i++) { fc_conn = assoc->conns_buf + (i * sizeof(struct spdk_nvmf_fc_conn)); - fc_conn->conn_id = NVMF_FC_INVALID_CONN_ID; - fc_conn->qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; - fc_conn->qpair.transport = nvmf_transport; - TAILQ_INSERT_TAIL(&assoc->avail_fc_conns, fc_conn, assoc_avail_link); } @@ -285,6 +271,7 @@ nvmf_fc_ls_new_association(uint32_t s_id, assoc->tgtport = tgtport; assoc->rport = rport; assoc->subsystem = subsys; + assoc->nvmf_transport = nvmf_transport; assoc->assoc_state = SPDK_NVMF_FC_OBJECT_CREATED; memcpy(assoc->host_id, a_cmd->hostid, FCNVME_ASSOC_HOSTID_LEN); memcpy(assoc->host_nqn, a_cmd->hostnqn, SPDK_NVME_NQN_FIELD_SIZE); @@ -337,7 +324,11 @@ nvmf_fc_ls_new_connection(struct spdk_nvmf_fc_association *assoc, uint16_t qid, /* Remove from avail list and add to in use. */ TAILQ_REMOVE(&assoc->avail_fc_conns, fc_conn, assoc_avail_link); + memset(fc_conn, 0, sizeof(struct spdk_nvmf_fc_conn)); + + /* Add conn to association's connection list */ TAILQ_INSERT_TAIL(&assoc->fc_conns, fc_conn, assoc_link); + assoc->conn_count++; if (qid == 0) { /* AdminQ connection. */ @@ -346,13 +337,18 @@ nvmf_fc_ls_new_connection(struct spdk_nvmf_fc_association *assoc, uint16_t qid, fc_conn->qpair.qid = qid; fc_conn->qpair.sq_head_max = sq_size; + fc_conn->qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED; + fc_conn->qpair.transport = assoc->nvmf_transport; TAILQ_INIT(&fc_conn->qpair.outstanding); + + fc_conn->conn_id = NVMF_FC_INVALID_CONN_ID; fc_conn->esrp_ratio = esrp_ratio; fc_conn->fc_assoc = assoc; fc_conn->s_id = assoc->s_id; fc_conn->d_id = assoc->tgtport->d_id; fc_conn->rpi = rpi; fc_conn->max_queue_depth = sq_size + 1; + TAILQ_INIT(&fc_conn->in_use_reqs); /* save target port trid in connection (for subsystem * listener validation in fabric connect command) @@ -367,6 +363,7 @@ nvmf_fc_ls_new_connection(struct spdk_nvmf_fc_association *assoc, uint16_t qid, static inline void nvmf_fc_ls_free_connection(struct spdk_nvmf_fc_conn *fc_conn) { + nvmf_fc_free_conn_reqpool(fc_conn); TAILQ_INSERT_TAIL(&fc_conn->fc_assoc->avail_fc_conns, fc_conn, assoc_avail_link); } @@ -548,6 +545,7 @@ nvmf_fc_ls_add_conn_failure( FCNVME_RJT_RC_INSUFF_RES, FCNVME_RJT_EXP_NONE, 0); + TAILQ_REMOVE(&assoc->fc_conns, fc_conn, assoc_link); nvmf_fc_ls_free_connection(fc_conn); if (aq_conn) { nvmf_fc_del_assoc_from_tgt_port(assoc); @@ -572,16 +570,19 @@ nvmf_fc_ls_add_conn_to_poller( "assoc_id 0x%lx conn_id 0x%lx\n", assoc->assoc_id, fc_conn->conn_id); + /* Create fc_req pool for this connection */ + if (nvmf_fc_create_conn_reqpool(fc_conn)) { + SPDK_ERRLOG("allocate fc_req pool failed\n"); + goto error; + } + opd = calloc(1, sizeof(struct nvmf_fc_ls_op_ctx)); if (!opd) { SPDK_ERRLOG("allocate api data for add conn op failed\n"); - nvmf_fc_ls_add_conn_failure(assoc, ls_rqst, fc_conn, aq_conn); - return; + goto error; } - /* insert conn in association's connection list */ api_data = &opd->u.add_conn; - assoc->conn_count++; api_data->args.fc_conn = fc_conn; api_data->args.cb_info.cb_thread = spdk_get_thread(); @@ -597,6 +598,10 @@ nvmf_fc_ls_add_conn_to_poller( /* Let the nvmf_tgt decide which pollgroup to use. */ fc_conn->create_opd = opd; spdk_nvmf_tgt_new_qpair(ls_rqst->nvmf_tgt, &fc_conn->qpair); + return; +error: + nvmf_fc_free_conn_reqpool(fc_conn); + nvmf_fc_ls_add_conn_failure(assoc, ls_rqst, fc_conn, aq_conn); } /* Delete association functions */ diff --git a/lib/nvmf/nvmf_fc.h b/lib/nvmf/nvmf_fc.h index e9704d8bf..72409a024 100644 --- a/lib/nvmf/nvmf_fc.h +++ b/lib/nvmf/nvmf_fc.h @@ -240,6 +240,23 @@ struct spdk_nvmf_fc_conn { /* for hwqp's connection list */ TAILQ_ENTRY(spdk_nvmf_fc_conn) link; + /* for hwqp's rport connection list link */ + TAILQ_ENTRY(spdk_nvmf_fc_conn) rport_link; + + /* Per connection fc_req pool */ + STAILQ_HEAD(, spdk_nvmf_fc_pooled_request) pool_queue; + + /* Memory for the fc_req pool objects */ + struct spdk_nvmf_fc_pooled_request *pool_memory; + + /* Pool size */ + uint32_t pool_size; + + /* Current free elem in pool */ + uint32_t pool_free_elems; + + TAILQ_HEAD(, spdk_nvmf_fc_request) in_use_reqs; + /* New QP create context. */ struct nvmf_fc_ls_op_ctx *create_opd; }; @@ -277,7 +294,6 @@ struct spdk_nvmf_fc_hwqp { uint32_t lcore_id; /* core hwqp is running on (for tracing purposes only) */ struct spdk_thread *thread; /* thread hwqp is running on */ uint32_t hwqp_id; /* A unique id (per physical port) for a hwqp */ - uint32_t rq_size; /* receive queue size */ spdk_nvmf_fc_lld_hwqp_t queues; /* vendor HW queue set */ struct spdk_nvmf_fc_port *fc_port; /* HW port structure for these queues */ struct spdk_nvmf_fc_poll_group *fgroup; @@ -286,8 +302,6 @@ struct spdk_nvmf_fc_hwqp { TAILQ_HEAD(, spdk_nvmf_fc_conn) connection_list; uint32_t num_conns; /* number of connections to queue */ - struct spdk_nvmf_fc_request *fc_reqs_buf; - TAILQ_HEAD(, spdk_nvmf_fc_request) free_reqs; TAILQ_HEAD(, spdk_nvmf_fc_request) in_use_reqs; struct spdk_nvmf_fc_errors counters; @@ -346,13 +360,16 @@ struct spdk_nvmf_fc_request { uint32_t s_id; uint32_t d_id; TAILQ_ENTRY(spdk_nvmf_fc_request) link; - STAILQ_ENTRY(spdk_nvmf_fc_request) pending_link; + TAILQ_ENTRY(spdk_nvmf_fc_request) conn_link; TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs; }; SPDK_STATIC_ASSERT(!offsetof(struct spdk_nvmf_fc_request, req), "FC request and NVMF request address don't match."); +struct spdk_nvmf_fc_pooled_request { + STAILQ_ENTRY(spdk_nvmf_fc_pooled_request) pool_link; +}; /* * NVMF FC Association @@ -363,6 +380,7 @@ struct spdk_nvmf_fc_association { struct spdk_nvmf_fc_nport *tgtport; struct spdk_nvmf_fc_remote_port_info *rport; struct spdk_nvmf_subsystem *subsystem; + struct spdk_nvmf_transport *nvmf_transport; enum spdk_nvmf_fc_object_state assoc_state; char host_id[FCNVME_ASSOC_HOSTID_LEN]; @@ -978,4 +996,8 @@ bool nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req, int nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *req); +int nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn); + +void nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn); + #endif diff --git a/test/unit/lib/nvmf/fc.c/fc_ut.c b/test/unit/lib/nvmf/fc.c/fc_ut.c index 0f588641f..07f6dafa3 100644 --- a/test/unit/lib/nvmf/fc.c/fc_ut.c +++ b/test/unit/lib/nvmf/fc.c/fc_ut.c @@ -225,8 +225,6 @@ DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp, DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *, (struct spdk_nvmf_fc_hwqp *queues, uint32_t num_queues, uint64_t conn_id), NULL); -DEFINE_STUB_V(nvmf_fc_release_conn, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id, - uint32_t sq_size)); DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue, struct spdk_nvmf_fc_hwqp *io_queues, uint32_t num_io_queues, diff --git a/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c b/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c index 6f5bc42ac..8c3fee1de 100644 --- a/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c +++ b/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c @@ -74,7 +74,6 @@ static struct spdk_nvmf_transport_opts g_nvmf_transport_opts = { .max_qpairs_per_ctrlr = 4, .max_aq_depth = 32, }; -static uint32_t g_hw_queue_depth = 1024; static struct spdk_nvmf_subsystem g_nvmf_subsystem; void nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts, @@ -143,39 +142,33 @@ spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_ void spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair) { - uint32_t i; struct spdk_nvmf_fc_conn *fc_conn; - struct spdk_nvmf_fc_hwqp *hwqp = NULL, *sel_hwqp = NULL; + struct spdk_nvmf_fc_hwqp *hwqp = NULL; struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; struct spdk_nvmf_fc_port *fc_port; + static int hwqp_idx = 0; fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); api_data = &fc_conn->create_opd->u.add_conn; - /* Pick a hwqp with least load */ fc_port = fc_conn->fc_assoc->tgtport->fc_port; - for (i = 0; i < fc_port->num_io_queues; i ++) { - hwqp = &fc_port->io_queues[i]; - if (!sel_hwqp || (hwqp->rq_size > sel_hwqp->rq_size)) { - sel_hwqp = hwqp; - } - } + hwqp = &fc_port->io_queues[hwqp_idx]; - if (!nvmf_fc_assign_conn_to_hwqp(sel_hwqp, + if (!nvmf_fc_assign_conn_to_hwqp(hwqp, &fc_conn->conn_id, fc_conn->max_queue_depth)) { goto err; } - fc_conn->hwqp = sel_hwqp; + fc_conn->hwqp = hwqp; /* If this is for ADMIN connection, then update assoc ID. */ if (fc_conn->qpair.qid == 0) { fc_conn->fc_assoc->assoc_id = fc_conn->conn_id; } - nvmf_fc_poller_api_func(sel_hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args); - + nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args); + hwqp_idx++; return; err: nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst, @@ -196,6 +189,17 @@ nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id) return NULL; } +void +nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) +{ +} + +int +nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) +{ + return 0; +} + /* * LLD functions */ @@ -213,20 +217,14 @@ nvmf_fc_assign_conn_to_hwqp(struct spdk_nvmf_fc_hwqp *hwqp, { SPDK_DEBUGLOG(nvmf_fc_ls, "Assign connection to HWQP\n"); - - if (hwqp->rq_size < sq_size) { - return false; /* queue has no space for this connection */ - } - - hwqp->rq_size -= sq_size; hwqp->num_conns++; /* create connection ID */ *conn_id = nvmf_fc_gen_conn_id(hwqp->hwqp_id, hwqp); SPDK_DEBUGLOG(nvmf_fc_ls, - "New connection assigned to HWQP%d (free %d), conn_id 0x%lx\n", - hwqp->hwqp_id, hwqp->rq_size, *conn_id); + "New connection assigned to HWQP%d, conn_id 0x%lx\n", + hwqp->hwqp_id, *conn_id); return true; } @@ -237,13 +235,6 @@ nvmf_fc_get_hwqp_from_conn_id(struct spdk_nvmf_fc_hwqp *queues, return &queues[(conn_id & 0xff) % num_queues]; } -void -nvmf_fc_release_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id, - uint32_t sq_size) -{ - hwqp->rq_size += sq_size; -} - struct spdk_nvmf_fc_srsr_bufs * nvmf_fc_alloc_srsr_bufs(size_t rqst_len, size_t rsp_len) { @@ -293,7 +284,6 @@ enum _test_run_type { static uint32_t g_test_run_type = 0; static uint64_t g_curr_assoc_id = 0; static uint16_t g_create_conn_test_cnt = 0; -static uint16_t g_max_assoc_conn_test = 0; static int g_last_rslt = 0; static bool g_spdk_nvmf_fc_xmt_srsr_req = false; static struct spdk_nvmf_fc_remote_port_info g_rem_port; @@ -430,12 +420,6 @@ run_disconn_test(struct spdk_nvmf_fc_nport *tgt_port, poll_thread(0); } -static void -disconnect_assoc_cb(void *cb_data, uint32_t err) -{ - CU_ASSERT(err == 0); -} - static int handle_ca_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst, bool max_assoc_test) { @@ -516,8 +500,6 @@ handle_cc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst) FCNVME_RJT_RC_INV_PARAM); CU_ASSERT(rjt->rjt.reason_explanation == FCNVME_RJT_EXP_INV_Q_ID); - } else if (!g_max_assoc_conn_test) { - CU_FAIL("Unexpected reject response create connection"); } } else { CU_FAIL("Unexpected response code for create connection"); @@ -632,8 +614,6 @@ static struct spdk_nvmf_fc_port g_fc_port = { static struct spdk_nvmf_fc_nport g_tgt_port; -static uint64_t assoc_id[1024]; - #define FC_LS_UT_MAX_IO_QUEUES 16 struct spdk_nvmf_fc_hwqp g_fc_hwqp[FC_LS_UT_MAX_IO_QUEUES]; struct spdk_nvmf_fc_poll_group g_fgroup[FC_LS_UT_MAX_IO_QUEUES]; @@ -690,7 +670,6 @@ ls_tests_init(void) hwqp->thread = NULL; hwqp->fc_port = &g_fc_port; hwqp->num_conns = 0; - hwqp->rq_size = g_hw_queue_depth; TAILQ_INIT(&hwqp->connection_list); TAILQ_INIT(&hwqp->in_use_reqs); @@ -777,63 +756,6 @@ invalid_connection_test(void) run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, 1); } -static void -create_max_aq_conns_test(void) -{ - /* run test to create max. associations with max. connections */ - uint32_t i, j; - uint32_t create_assoc_test_cnt = 0; - - setup_polling_threads(); - g_max_assoc_conn_test = 1; - g_last_rslt = 0; - while (1) { - g_test_run_type = TEST_RUN_TYPE_CREATE_MAX_ASSOC; - run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port); - if (g_last_rslt == 0) { - assoc_id[create_assoc_test_cnt++] = g_curr_assoc_id; - g_test_run_type = TEST_RUN_TYPE_CREATE_CONN; - for (j = 1; j < g_nvmf_transport.opts.max_qpairs_per_ctrlr; j++) { - if (g_last_rslt == 0) { - run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, (uint16_t) j); - } - } - } else { - break; - } - } - - if (g_last_rslt == LAST_RSLT_STOP_TEST) { - uint32_t ma = (((g_hw_queue_depth / g_nvmf_transport.opts.max_queue_depth) * - (g_fc_port.num_io_queues - 1))) / - (g_nvmf_transport.opts.max_qpairs_per_ctrlr - 1); - if (create_assoc_test_cnt < ma) { - printf("(%d assocs - should be %d) ", create_assoc_test_cnt, ma); - CU_FAIL("Didn't create max. associations"); - } else { - printf("(%d assocs.) ", create_assoc_test_cnt); - } - g_last_rslt = 0; - } - - for (i = 0; i < create_assoc_test_cnt; i++) { - int ret; - g_spdk_nvmf_fc_xmt_srsr_req = false; - ret = nvmf_fc_delete_association(&g_tgt_port, from_be64(&assoc_id[i]), true, false, - disconnect_assoc_cb, 0); - CU_ASSERT(ret == 0); - poll_thread(0); - -#if (NVMF_FC_LS_SEND_LS_DISCONNECT == 1) - if (ret == 0) { - /* check that LS disconnect was sent */ - CU_ASSERT(g_spdk_nvmf_fc_xmt_srsr_req); - } -#endif - } - g_max_assoc_conn_test = 0; -} - static void xmt_ls_rsp_failure_test(void) { @@ -934,8 +856,6 @@ usage(const char *program_name) spdk_log_usage(stdout, "-t"); printf(" -i value - Number of IO Queues (default: %u)\n", g_fc_port.num_io_queues); - printf(" -d value - HW queue depth (default: %u)\n", - g_hw_queue_depth); printf(" -q value - SQ size (default: %u)\n", g_nvmf_transport_opts.max_queue_depth); printf(" -c value - Connection count (default: %u)\n", @@ -986,14 +906,6 @@ int main(int argc, char **argv) case 'u': test = (int)spdk_strtol(optarg, 10); break; - case 'd': - val = spdk_strtol(optarg, 10); - if (val < 16) { - fprintf(stderr, "HW queue depth must be at least 16\n"); - return -EINVAL; - } - g_hw_queue_depth = (uint32_t)val; - break; case 'i': val = spdk_strtol(optarg, 10); if (val < 2) { @@ -1028,7 +940,6 @@ int main(int argc, char **argv) CU_ADD_TEST(suite, invalid_connection_test); CU_ADD_TEST(suite, disconnect_bad_assoc_test); - CU_ADD_TEST(suite, create_max_aq_conns_test); CU_ADD_TEST(suite, xmt_ls_rsp_failure_test); } else { @@ -1043,9 +954,6 @@ int main(int argc, char **argv) case 3: CU_ADD_TEST(suite, invalid_connection_test); break; - case 4: - CU_ADD_TEST(suite, create_max_aq_conns_test); - break; case 5: CU_ADD_TEST(suite, xmt_ls_rsp_failure_test); break;