lib/reduce: use global zero buffer instead of zeroing scratch buffer
Eliminates need for memset on parts of our scratch buffer Signed-off-by: paul luse <paul.e.luse@intel.com> Change-Id: I00b7213d3c15562ceeda4d7a3ac2bb7cfd41bf66 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/460010 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
parent
074fa32636
commit
a755fb5f14
@ -163,6 +163,8 @@ struct spdk_reduce_vol {
|
|||||||
|
|
||||||
static void _start_readv_request(struct spdk_reduce_vol_request *req);
|
static void _start_readv_request(struct spdk_reduce_vol_request *req);
|
||||||
static void _start_writev_request(struct spdk_reduce_vol_request *req);
|
static void _start_writev_request(struct spdk_reduce_vol_request *req);
|
||||||
|
static uint8_t *g_zero_buf;
|
||||||
|
static int g_vol_count = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate extra metadata chunks and corresponding backing io units to account for
|
* Allocate extra metadata chunks and corresponding backing io units to account for
|
||||||
@ -397,6 +399,26 @@ _init_load_cleanup(struct spdk_reduce_vol *vol, struct reduce_init_load_ctx *ctx
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
_alloc_zero_buff(struct spdk_reduce_vol *vol)
|
||||||
|
{
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
/* The zero buffer is shared between all volumnes and just used
|
||||||
|
* for reads so allocate one global instance here if not already
|
||||||
|
* allocated when another vol init'd or loaded.
|
||||||
|
*/
|
||||||
|
if (g_vol_count++ == 0) {
|
||||||
|
g_zero_buf = spdk_zmalloc(vol->params.chunk_size,
|
||||||
|
64, NULL, SPDK_ENV_LCORE_ID_ANY,
|
||||||
|
SPDK_MALLOC_DMA);
|
||||||
|
if (g_zero_buf == NULL) {
|
||||||
|
rc = -ENOMEM;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
_init_write_super_cpl(void *cb_arg, int reduce_errno)
|
_init_write_super_cpl(void *cb_arg, int reduce_errno)
|
||||||
{
|
{
|
||||||
@ -410,6 +432,13 @@ _init_write_super_cpl(void *cb_arg, int reduce_errno)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc = _alloc_zero_buff(init_ctx->vol);
|
||||||
|
if (rc != 0) {
|
||||||
|
init_ctx->cb_fn(init_ctx->cb_arg, NULL, rc);
|
||||||
|
_init_load_cleanup(init_ctx->vol, init_ctx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
init_ctx->cb_fn(init_ctx->cb_arg, init_ctx->vol, reduce_errno);
|
init_ctx->cb_fn(init_ctx->cb_arg, init_ctx->vol, reduce_errno);
|
||||||
/* Only clean up the ctx - the vol has been passed to the application
|
/* Only clean up the ctx - the vol has been passed to the application
|
||||||
* for use now that initialization was successful.
|
* for use now that initialization was successful.
|
||||||
@ -693,6 +722,11 @@ _load_read_super_and_path_cpl(void *cb_arg, int reduce_errno)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc = _alloc_zero_buff(vol);
|
||||||
|
if (rc) {
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
load_ctx->cb_fn(load_ctx->cb_arg, vol, 0);
|
load_ctx->cb_fn(load_ctx->cb_arg, vol, 0);
|
||||||
/* Only clean up the ctx - the vol has been passed to the application
|
/* Only clean up the ctx - the vol has been passed to the application
|
||||||
* for use now that volume load was successful.
|
* for use now that volume load was successful.
|
||||||
@ -781,6 +815,9 @@ spdk_reduce_vol_unload(struct spdk_reduce_vol *vol,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (--g_vol_count == 0) {
|
||||||
|
spdk_free(g_zero_buf);
|
||||||
|
}
|
||||||
_init_load_cleanup(vol, NULL);
|
_init_load_cleanup(vol, NULL);
|
||||||
cb_fn(cb_arg, 0);
|
cb_fn(cb_arg, 0);
|
||||||
}
|
}
|
||||||
@ -1398,12 +1435,11 @@ _start_writev_request(struct spdk_reduce_vol_request *req)
|
|||||||
lb_per_chunk = vol->logical_blocks_per_chunk;
|
lb_per_chunk = vol->logical_blocks_per_chunk;
|
||||||
req->decomp_iovcnt = 0;
|
req->decomp_iovcnt = 0;
|
||||||
|
|
||||||
/* Note: we must zero out parts of req->decomp_buf not specified by this write operation. */
|
/* Note: point to our zero buf for offset into the chunk. */
|
||||||
chunk_offset = req->offset % lb_per_chunk;
|
chunk_offset = req->offset % lb_per_chunk;
|
||||||
if (chunk_offset != 0) {
|
if (chunk_offset != 0) {
|
||||||
memset(req->decomp_buf, 0, chunk_offset * lbsize);
|
|
||||||
ttl_len += chunk_offset * lbsize;
|
ttl_len += chunk_offset * lbsize;
|
||||||
req->decomp_iov[0].iov_base = req->decomp_buf;
|
req->decomp_iov[0].iov_base = g_zero_buf;
|
||||||
req->decomp_iov[0].iov_len = ttl_len;
|
req->decomp_iov[0].iov_len = ttl_len;
|
||||||
req->decomp_iovcnt = 1;
|
req->decomp_iovcnt = 1;
|
||||||
}
|
}
|
||||||
@ -1419,8 +1455,7 @@ _start_writev_request(struct spdk_reduce_vol_request *req)
|
|||||||
chunk_offset += req->length;
|
chunk_offset += req->length;
|
||||||
if (chunk_offset != lb_per_chunk) {
|
if (chunk_offset != lb_per_chunk) {
|
||||||
remainder = (lb_per_chunk - chunk_offset) * lbsize;
|
remainder = (lb_per_chunk - chunk_offset) * lbsize;
|
||||||
memset(req->decomp_buf + ttl_len, 0, remainder);
|
req->decomp_iov[req->decomp_iovcnt].iov_base = g_zero_buf;
|
||||||
req->decomp_iov[req->decomp_iovcnt].iov_base = req->decomp_buf + ttl_len;
|
|
||||||
req->decomp_iov[req->decomp_iovcnt].iov_len = remainder;
|
req->decomp_iov[req->decomp_iovcnt].iov_len = remainder;
|
||||||
ttl_len += req->decomp_iov[req->decomp_iovcnt].iov_len;
|
ttl_len += req->decomp_iov[req->decomp_iovcnt].iov_len;
|
||||||
req->decomp_iovcnt++;
|
req->decomp_iovcnt++;
|
||||||
|
Loading…
Reference in New Issue
Block a user