lib/reduce: close hole in compress optimization

If a write operation comes in and is not a read-modify-write but
we don't end up compressing the data (because of either error or
we decide the compression ratio isn't good enough) then we not
only need to copy the data to the host buffers but we need to
zero any offset and/or remainder in the decomp scratch buffer.

Change-Id: Ifb2235507826f9ef1110dd9dbaf88045d8979e7c
Signed-off-by: paul luse <paul.e.luse@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/463337
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
This commit is contained in:
paul luse 2019-07-26 12:03:21 -04:00 committed by Darek Stojaczyk
parent bc315deae3
commit c2d441226a

View File

@ -118,6 +118,7 @@ struct spdk_reduce_vol_request {
uint8_t *comp_buf; uint8_t *comp_buf;
struct iovec *comp_buf_iov; struct iovec *comp_buf_iov;
struct iovec *iov; struct iovec *iov;
bool rmw;
struct spdk_reduce_vol *vol; struct spdk_reduce_vol *vol;
int type; int type;
int reduce_errno; int reduce_errno;
@ -1045,7 +1046,7 @@ _reduce_vol_write_chunk(struct spdk_reduce_vol_request *req, reduce_request_fn n
{ {
struct spdk_reduce_vol *vol = req->vol; struct spdk_reduce_vol *vol = req->vol;
uint32_t i; uint32_t i;
uint64_t chunk_offset; uint64_t chunk_offset, remainder, total_len = 0;
uint8_t *buf; uint8_t *buf;
int j; int j;
@ -1067,11 +1068,29 @@ _reduce_vol_write_chunk(struct spdk_reduce_vol_request *req, reduce_request_fn n
/* if the chunk is uncompressed we need to copy the data from the host buffers. */ /* if the chunk is uncompressed we need to copy the data from the host buffers. */
if (req->chunk_is_compressed == false) { if (req->chunk_is_compressed == false) {
chunk_offset = req->offset % vol->logical_blocks_per_chunk; chunk_offset = req->offset % vol->logical_blocks_per_chunk;
buf = req->decomp_buf + chunk_offset * vol->params.logical_block_size; buf = req->decomp_buf;
total_len = chunk_offset * vol->params.logical_block_size;
/* zero any offset into chunk */
if (req->rmw == false && chunk_offset) {
memset(buf, 0, total_len);
}
buf += total_len;
/* copy the data */
for (j = 0; j < req->iovcnt; j++) { for (j = 0; j < req->iovcnt; j++) {
memcpy(buf, req->iov[j].iov_base, req->iov[j].iov_len); memcpy(buf, req->iov[j].iov_base, req->iov[j].iov_len);
buf += req->iov[j].iov_len; buf += req->iov[j].iov_len;
total_len += req->iov[j].iov_len;
} }
/* zero any remainder */
remainder = vol->params.chunk_size - total_len;
total_len += remainder;
if (req->rmw == false && remainder) {
memset(buf, 0, remainder);
}
assert(total_len == vol->params.chunk_size);
} }
for (i = 0; i < req->num_io_units; i++) { for (i = 0; i < req->num_io_units; i++) {
@ -1454,6 +1473,7 @@ _start_writev_request(struct spdk_reduce_vol_request *req)
/* Read old chunk, then overwrite with data from this write /* Read old chunk, then overwrite with data from this write
* operation. * operation.
*/ */
req->rmw = true;
_reduce_vol_read_chunk(req, _write_read_done); _reduce_vol_read_chunk(req, _write_read_done);
return; return;
} }
@ -1461,6 +1481,7 @@ _start_writev_request(struct spdk_reduce_vol_request *req)
lbsize = vol->params.logical_block_size; lbsize = vol->params.logical_block_size;
req->decomp_iovcnt = 0; req->decomp_iovcnt = 0;
req->rmw = false;
/* Note: point to our zero buf for offset into the chunk. */ /* Note: point to our zero buf for offset into the chunk. */
chunk_offset = req->offset % vol->logical_blocks_per_chunk; chunk_offset = req->offset % vol->logical_blocks_per_chunk;