reduce: add intermediate buffers for compression operations

We will actually use these in future patches.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I730ef90da6d93922dd17a5ac16594e66132a5f11

Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/449086
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
This commit is contained in:
Jim Harris 2019-03-22 07:35:43 -07:00
parent ce95c099a8
commit 09aff2d0fd

View File

@ -97,6 +97,15 @@ struct spdk_reduce_vol_request {
*/ */
uint8_t *decomp_buf; uint8_t *decomp_buf;
struct iovec *decomp_buf_iov; struct iovec *decomp_buf_iov;
/**
* Scratch buffer used for compressed chunk. This is used for:
* 1) destination buffer for compression operations
* 2) source buffer for decompression operations
* 3) data buffer when writing compressed chunk to disk
* 4) data buffer when reading compressed chunk from disk
*/
uint8_t *comp_buf;
struct iovec *comp_buf_iov;
struct iovec *iov; struct iovec *iov;
struct spdk_reduce_vol *vol; struct spdk_reduce_vol *vol;
int type; int type;
@ -308,7 +317,10 @@ _allocate_vol_requests(struct spdk_reduce_vol *vol)
struct spdk_reduce_vol_request *req; struct spdk_reduce_vol_request *req;
int i; int i;
vol->buf_mem = spdk_dma_malloc(REDUCE_NUM_VOL_REQUESTS * vol->params.chunk_size, 64, NULL); /* Allocate 2x since we need buffers for both read/write and compress/decompress
* intermediate buffers.
*/
vol->buf_mem = spdk_dma_malloc(2 * REDUCE_NUM_VOL_REQUESTS * vol->params.chunk_size, 64, NULL);
if (vol->buf_mem == NULL) { if (vol->buf_mem == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@ -320,8 +332,11 @@ _allocate_vol_requests(struct spdk_reduce_vol *vol)
return -ENOMEM; return -ENOMEM;
} }
/* Allocate 2x since we need iovs for both read/write and compress/decompress intermediate
* buffers.
*/
vol->buf_iov_mem = calloc(REDUCE_NUM_VOL_REQUESTS, vol->buf_iov_mem = calloc(REDUCE_NUM_VOL_REQUESTS,
sizeof(struct iovec) * vol->backing_io_units_per_chunk); 2 * sizeof(struct iovec) * vol->backing_io_units_per_chunk);
if (vol->buf_iov_mem == NULL) { if (vol->buf_iov_mem == NULL) {
free(vol->request_mem); free(vol->request_mem);
spdk_dma_free(vol->buf_mem); spdk_dma_free(vol->buf_mem);
@ -333,8 +348,10 @@ _allocate_vol_requests(struct spdk_reduce_vol *vol)
for (i = 0; i < REDUCE_NUM_VOL_REQUESTS; i++) { for (i = 0; i < REDUCE_NUM_VOL_REQUESTS; i++) {
req = &vol->request_mem[i]; req = &vol->request_mem[i];
TAILQ_INSERT_HEAD(&vol->free_requests, req, tailq); TAILQ_INSERT_HEAD(&vol->free_requests, req, tailq);
req->decomp_buf_iov = &vol->buf_iov_mem[i * vol->backing_io_units_per_chunk]; req->decomp_buf_iov = &vol->buf_iov_mem[(2 * i) * vol->backing_io_units_per_chunk];
req->decomp_buf = vol->buf_mem + i * vol->params.chunk_size; req->decomp_buf = vol->buf_mem + (2 * i) * vol->params.chunk_size;
req->comp_buf_iov = &vol->buf_iov_mem[(2 * i + 1) * vol->backing_io_units_per_chunk];
req->comp_buf = vol->buf_mem + (2 * i + 1) * vol->params.chunk_size;
} }
return 0; return 0;