idxd: Remove idxd_group altogether

The driver always creates a single group containing all of the engines
and a single work queue.

Change-Id: I83f170f966abbd141304c49bd75ffe4608f5ad03
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11533
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
This commit is contained in:
Ben Walker 2022-02-03 16:34:45 -07:00 committed by Tomasz Zawadzki
parent 9de35e7fc8
commit 7dfe90df60
4 changed files with 36 additions and 119 deletions

View File

@ -119,20 +119,6 @@ struct pci_dev_id {
int device_id;
};
struct idxd_group {
struct spdk_idxd_device *idxd;
struct idxd_grpcfg grpcfg;
struct pci_dev_id pcidev;
int num_engines;
int num_wqs;
int id;
uint8_t tokens_allowed;
bool use_token_limit;
uint8_t tokens_reserved;
int tc_a;
int tc_b;
};
/*
* This struct wraps the hardware completion record which is 32 bytes in
* size and must be 32 byte aligned.
@ -167,8 +153,6 @@ struct spdk_idxd_device {
uint32_t total_wq_size;
uint32_t chan_per_device;
pthread_mutex_t num_channels_lock;
struct idxd_group *groups;
};
void idxd_impl_register(struct spdk_idxd_impl *impl);

View File

@ -242,31 +242,6 @@ kernel_idxd_device_destruct(struct spdk_idxd_device *idxd)
free(idxd);
}
/*
* Build work queue (WQ) config based on getting info from the device combined
* with the defined configuration. Once built, it is written to the device.
*/
static int
kernel_idxd_wq_config(struct spdk_kernel_idxd_device *kernel_idxd)
{
uint32_t i;
struct spdk_idxd_device *idxd = &kernel_idxd->idxd;
/* initialize the group */
idxd->groups = calloc(g_kernel_dev_cfg.num_groups, sizeof(struct idxd_group));
if (idxd->groups == NULL) {
SPDK_ERRLOG("Failed to allocate group memory\n");
return -ENOMEM;
}
for (i = 0; i < g_kernel_dev_cfg.num_groups; i++) {
idxd->groups[i].idxd = idxd;
idxd->groups[i].id = i;
}
return 0;
}
static int _kernel_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb, int dev_id);
static int
@ -346,8 +321,6 @@ _kernel_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb, int dev_id)
goto end;
}
kernel_idxd_wq_config(kernel_idxd);
attach_cb(cb_ctx, &kernel_idxd->idxd);
SPDK_NOTICELOG("Successfully got an kernel device=%p\n", kernel_idxd);

View File

@ -170,11 +170,6 @@ idxd_reset_dev(struct spdk_idxd_device *idxd)
return rc;
}
/*
* Build group config based on getting info from the device combined
* with the defined configuration. Once built, it is written to the
* device.
*/
static int
idxd_group_config(struct spdk_idxd_device *idxd)
{
@ -184,7 +179,9 @@ idxd_group_config(struct spdk_idxd_device *idxd)
union idxd_enginecap_register enginecap;
union idxd_wqcap_register wqcap;
union idxd_offsets_register table_offsets;
struct idxd_grptbl *grptbl;
struct idxd_grpcfg grpcfg = {};
groupcap.raw = spdk_mmio_read_8(&user_idxd->registers->groupcap.raw);
enginecap.raw = spdk_mmio_read_8(&user_idxd->registers->enginecap.raw);
@ -194,65 +191,45 @@ idxd_group_config(struct spdk_idxd_device *idxd)
return -ENOTSUP;
}
assert(groupcap.num_groups >= 1);
idxd->groups = calloc(1, sizeof(struct idxd_group));
if (idxd->groups == NULL) {
SPDK_ERRLOG("Failed to allocate group memory\n");
return -ENOMEM;
}
/* Build one group with all of the engines and a single work queue. */
grpcfg.wqs[0] = 1;
grpcfg.flags.read_buffers_allowed = groupcap.read_bufs;
for (i = 0; i < enginecap.num_engines; i++) {
idxd->groups->grpcfg.engines |= (1 << i);
grpcfg.engines |= (1 << i);
}
idxd->groups->grpcfg.wqs[0] = 0x1;
idxd->groups->grpcfg.flags.read_buffers_allowed = groupcap.read_bufs;
idxd->groups->idxd = idxd;
idxd->groups->id = 0;
table_offsets.raw[0] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[0]);
table_offsets.raw[1] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[1]);
grptbl = (struct idxd_grptbl *)((uint8_t *)user_idxd->registers + (table_offsets.grpcfg *
IDXD_TABLE_OFFSET_MULT));
/* GRPWQCFG, work queues config */
spdk_mmio_write_8((uint64_t *)&grptbl->group[0].wqs[0], idxd->groups->grpcfg.wqs[0]);
/* Write the group we've configured */
spdk_mmio_write_8(&grptbl->group[0].wqs[0], grpcfg.wqs[0]);
spdk_mmio_write_8(&grptbl->group[0].wqs[1], 0);
spdk_mmio_write_8(&grptbl->group[0].wqs[2], 0);
spdk_mmio_write_8(&grptbl->group[0].wqs[3], 0);
spdk_mmio_write_8(&grptbl->group[0].engines, grpcfg.engines);
spdk_mmio_write_4(&grptbl->group[0].flags.raw, grpcfg.flags.raw);
/* GRPENGCFG, engine config */
spdk_mmio_write_8((uint64_t *)&grptbl->group[0].engines, idxd->groups->grpcfg.engines);
/* GRPFLAGS, flags config */
spdk_mmio_write_8((uint64_t *)&grptbl->group[0].flags, idxd->groups->grpcfg.flags.raw);
/*
* Now write the other groups to zero them out
*/
/* Write zeroes to the rest of the groups */
for (i = 1 ; i < groupcap.num_groups; i++) {
/* GRPWQCFG, work queues config */
spdk_mmio_write_8((uint64_t *)&grptbl->group[i].wqs[0], 0UL);
/* GRPENGCFG, engine config */
spdk_mmio_write_8((uint64_t *)&grptbl->group[i].engines, 0UL);
/* GRPFLAGS, flags config */
spdk_mmio_write_8((uint64_t *)&grptbl->group[i].flags, 0UL);
spdk_mmio_write_8(&grptbl->group[i].wqs[0], 0L);
spdk_mmio_write_8(&grptbl->group[i].wqs[1], 0L);
spdk_mmio_write_8(&grptbl->group[i].wqs[2], 0L);
spdk_mmio_write_8(&grptbl->group[i].wqs[3], 0L);
spdk_mmio_write_8(&grptbl->group[i].engines, 0L);
spdk_mmio_write_4(&grptbl->group[i].flags.raw, 0L);
}
return 0;
}
/*
* Build work queue (WQ) config based on getting info from the device combined
* with the defined configuration. Once built, it is written to the device.
*/
static int
idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
{
uint32_t j;
uint32_t i;
struct spdk_idxd_device *idxd = &user_idxd->idxd;
uint32_t wq_size;
union idxd_wqcap_register wqcap;
union idxd_offsets_register table_offsets;
struct idxd_wqtbl *wqtbl;
@ -260,12 +237,10 @@ idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw);
wq_size = wqcap.total_wq_size;
/* If this fires, something in the hardware spec has changed. */
assert(sizeof(wqtbl->wq[0]) == 1 << (WQCFG_SHIFT + wqcap.wqcfg_size));
SPDK_DEBUGLOG(idxd, "Total ring slots available space 0x%x, so per work queue is 0x%x\n",
wqcap.total_wq_size, wq_size);
SPDK_DEBUGLOG(idxd, "Total ring slots available 0x%x\n", wqcap.total_wq_size);
idxd->total_wq_size = wqcap.total_wq_size;
/* Spread the channels we allow per device based on the total number of WQE to try
@ -279,25 +254,19 @@ idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
wqtbl = (struct idxd_wqtbl *)((uint8_t *)user_idxd->registers + (table_offsets.wqcfg *
IDXD_TABLE_OFFSET_MULT));
/* Per spec we need to read in existing values first so we don't zero out something we
* didn't touch when we write the cfg register out below.
*/
for (j = 0 ; j < (sizeof(union idxd_wqcfg) / sizeof(uint32_t)); j++) {
wqcfg.raw[j] = spdk_mmio_read_4(&wqtbl->wq[0].raw[j]);
for (i = 0 ; i < SPDK_COUNTOF(wqtbl->wq[0].raw); i++) {
wqcfg.raw[i] = spdk_mmio_read_4(&wqtbl->wq[0].raw[i]);
}
wqcfg.wq_size = wq_size;
wqcfg.wq_size = wqcap.total_wq_size;
wqcfg.mode = WQ_MODE_DEDICATED;
wqcfg.max_batch_shift = LOG2_WQ_MAX_BATCH;
wqcfg.max_xfer_shift = LOG2_WQ_MAX_XFER;
wqcfg.wq_state = WQ_ENABLED;
wqcfg.priority = WQ_PRIORITY_1;
/*
* Now write the work queue config to the device for configured queues
*/
for (j = 0 ; j < (sizeof(union idxd_wqcfg) / sizeof(uint32_t)); j++) {
spdk_mmio_write_4(&wqtbl->wq[0].raw[j], wqcfg.raw[j]);
for (i = 0; i < SPDK_COUNTOF(wqtbl->wq[0].raw); i++) {
spdk_mmio_write_4(&wqtbl->wq[0].raw[i], wqcfg.raw[i]);
}
return 0;
@ -378,7 +347,6 @@ idxd_device_configure(struct spdk_user_idxd_device *user_idxd)
err_wq_enable:
err_device_enable:
err_wq_cfg:
free(idxd->groups);
err_group_cfg:
err_reset:
idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
@ -396,7 +364,6 @@ user_idxd_device_destruct(struct spdk_idxd_device *idxd)
idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
idxd_unmap_pci_bar(idxd, IDXD_WQ_BAR);
free(idxd->groups);
spdk_pci_device_detach(user_idxd->device);
free(user_idxd);

View File

@ -105,7 +105,6 @@ static int
test_idxd_wq_config(void)
{
struct spdk_user_idxd_device user_idxd = {};
struct spdk_idxd_device *idxd = &user_idxd.idxd;
uint32_t wq_size, i, j;
int rc;
struct idxd_wqtbl *wqtbl;
@ -113,9 +112,6 @@ test_idxd_wq_config(void)
user_idxd.registers = calloc(1, FAKE_REG_SIZE);
SPDK_CU_ASSERT_FATAL(user_idxd.registers != NULL);
idxd->groups = calloc(1, sizeof(struct idxd_group));
SPDK_CU_ASSERT_FATAL(idxd->groups != NULL);
user_idxd.registers->wqcap.total_wq_size = TOTAL_WQE_SIZE;
user_idxd.registers->wqcap.num_wqs = 1;
user_idxd.registers->gencap.max_batch_shift = LOG2_WQ_MAX_BATCH;
@ -142,7 +138,6 @@ test_idxd_wq_config(void)
}
free(user_idxd.registers);
free(idxd->groups);
return 0;
}
@ -156,7 +151,7 @@ test_idxd_group_config(void)
uint64_t engines[MAX_ARRAY_SIZE] = {};
union idxd_group_flags flags[MAX_ARRAY_SIZE] = {};
int rc, i;
uint64_t base_offset;
struct idxd_grptbl *grptbl;
user_idxd.registers = calloc(1, FAKE_REG_SIZE);
SPDK_CU_ASSERT_FATAL(user_idxd.registers != NULL);
@ -167,16 +162,15 @@ test_idxd_group_config(void)
user_idxd.registers->groupcap.read_bufs = MAX_TOKENS;
user_idxd.registers->offsets.grpcfg = GRP_CFG_OFFSET;
grptbl = (struct idxd_grptbl *)((uint8_t *)user_idxd.registers +
(user_idxd.registers->offsets.grpcfg * IDXD_TABLE_OFFSET_MULT));
rc = idxd_group_config(idxd);
CU_ASSERT(rc == 0);
for (i = 0 ; i < user_idxd.registers->groupcap.num_groups; i++) {
base_offset = (user_idxd.registers->offsets.grpcfg * IDXD_TABLE_OFFSET_MULT) + i * 64;
wqs[i] = spdk_mmio_read_8((uint64_t *)((uint8_t *)user_idxd.registers + base_offset));
engines[i] = spdk_mmio_read_8((uint64_t *)((uint8_t *)user_idxd.registers + base_offset +
CFG_ENGINE_OFFSET));
flags[i].raw = spdk_mmio_read_8((uint64_t *)((uint8_t *)user_idxd.registers + base_offset +
CFG_FLAG_OFFSET));
wqs[i] = spdk_mmio_read_8(&grptbl->group[i].wqs[0]);
engines[i] = spdk_mmio_read_8(&grptbl->group[i].engines);
flags[i].raw = spdk_mmio_read_4(&grptbl->group[i].flags.raw);
}
/* wqe and engine arrays are indexed by group id and are bitmaps of assigned elements. */
CU_ASSERT(wqs[0] == 0x1);
@ -184,7 +178,6 @@ test_idxd_group_config(void)
CU_ASSERT(flags[0].read_buffers_allowed == MAX_TOKENS);
/* groups allocated by code under test. */
free(idxd->groups);
free(user_idxd.registers);
return 0;