reduce: prepend rw to request buf and buf_iov
We will need separate temporary buffers for compress/decompress. A single temporary buffer won't do because a user's read/write operations may not cover a full chunk - so we'll need one buffer to read/write the compressed data, and another buffer for the uncompressed data. So for now, just prepend rw to the existing fields - this will signify these fields are used for the read/write operations to the backing storage device. We'll add additional ones in future patches for the buffers that will hold the uncompressed data. Note: the vol->buf_mem and ->buf_iov_mem are not prepending "rw" because they will be used for the compress/decompress temporary buffers as well. We'll just double the size of these buffers when allocating them for the volume. Signed-off-by: Jim Harris <james.r.harris@intel.com> Change-Id: I7df56ab72769f4689f3abac3354446e9cf78d423 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/449085 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Paul Luse <paul.e.luse@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
parent
502ab5b66d
commit
20145fd714
@ -89,12 +89,14 @@ struct spdk_reduce_chunk_map {
|
||||
|
||||
struct spdk_reduce_vol_request {
|
||||
/**
|
||||
* Scratch buffer used for read/modify/write operations on
|
||||
* I/Os less than a full chunk size, and as the intermediate
|
||||
* buffer for compress/decompress operations.
|
||||
* Scratch buffer used for uncompressed chunk. This is used for:
|
||||
* 1) source buffer for compression operations
|
||||
* 2) destination buffer for decompression operations
|
||||
* 3) data buffer when writing uncompressed chunk to disk
|
||||
* 4) data buffer when reading uncompressed chunk from disk
|
||||
*/
|
||||
uint8_t *buf;
|
||||
struct iovec *buf_iov;
|
||||
uint8_t *decomp_buf;
|
||||
struct iovec *decomp_buf_iov;
|
||||
struct iovec *iov;
|
||||
struct spdk_reduce_vol *vol;
|
||||
int type;
|
||||
@ -331,8 +333,8 @@ _allocate_vol_requests(struct spdk_reduce_vol *vol)
|
||||
for (i = 0; i < REDUCE_NUM_VOL_REQUESTS; i++) {
|
||||
req = &vol->request_mem[i];
|
||||
TAILQ_INSERT_HEAD(&vol->free_requests, req, tailq);
|
||||
req->buf_iov = &vol->buf_iov_mem[i * vol->backing_io_units_per_chunk];
|
||||
req->buf = vol->buf_mem + i * vol->params.chunk_size;
|
||||
req->decomp_buf_iov = &vol->buf_iov_mem[i * vol->backing_io_units_per_chunk];
|
||||
req->decomp_buf = vol->buf_mem + i * vol->params.chunk_size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -931,14 +933,14 @@ _issue_backing_ops(struct spdk_reduce_vol_request *req, struct spdk_reduce_vol *
|
||||
req->backing_cb_args.cb_fn = next_fn;
|
||||
req->backing_cb_args.cb_arg = req;
|
||||
for (i = 0; i < vol->backing_io_units_per_chunk; i++) {
|
||||
req->buf_iov[i].iov_base = req->buf + i * vol->params.backing_io_unit_size;
|
||||
req->buf_iov[i].iov_len = vol->params.backing_io_unit_size;
|
||||
req->decomp_buf_iov[i].iov_base = req->decomp_buf + i * vol->params.backing_io_unit_size;
|
||||
req->decomp_buf_iov[i].iov_len = vol->params.backing_io_unit_size;
|
||||
if (is_write) {
|
||||
vol->backing_dev->writev(vol->backing_dev, &req->buf_iov[i], 1,
|
||||
vol->backing_dev->writev(vol->backing_dev, &req->decomp_buf_iov[i], 1,
|
||||
req->chunk->io_unit_index[i] * vol->backing_lba_per_io_unit,
|
||||
vol->backing_lba_per_io_unit, &req->backing_cb_args);
|
||||
} else {
|
||||
vol->backing_dev->readv(vol->backing_dev, &req->buf_iov[i], 1,
|
||||
vol->backing_dev->readv(vol->backing_dev, &req->decomp_buf_iov[i], 1,
|
||||
req->chunk->io_unit_index[i] * vol->backing_lba_per_io_unit,
|
||||
vol->backing_lba_per_io_unit, &req->backing_cb_args);
|
||||
}
|
||||
@ -997,7 +999,7 @@ _write_read_done(void *_req, int reduce_errno)
|
||||
}
|
||||
|
||||
chunk_offset = req->offset % req->vol->logical_blocks_per_chunk;
|
||||
buf = req->buf + chunk_offset * req->vol->params.logical_block_size;
|
||||
buf = req->decomp_buf + chunk_offset * req->vol->params.logical_block_size;
|
||||
for (i = 0; i < req->iovcnt; i++) {
|
||||
memcpy(buf, req->iov[i].iov_base, req->iov[i].iov_len);
|
||||
buf += req->iov[i].iov_len;
|
||||
@ -1029,7 +1031,7 @@ _read_read_done(void *_req, int reduce_errno)
|
||||
}
|
||||
|
||||
chunk_offset = req->offset % req->vol->logical_blocks_per_chunk;
|
||||
buf = req->buf + chunk_offset * req->vol->params.logical_block_size;
|
||||
buf = req->decomp_buf + chunk_offset * req->vol->params.logical_block_size;
|
||||
for (i = 0; i < req->iovcnt; i++) {
|
||||
memcpy(req->iov[i].iov_base, buf, req->iov[i].iov_len);
|
||||
buf += req->iov[i].iov_len;
|
||||
@ -1169,7 +1171,7 @@ _start_writev_request(struct spdk_reduce_vol_request *req)
|
||||
return;
|
||||
}
|
||||
|
||||
buf = req->buf;
|
||||
buf = req->decomp_buf;
|
||||
lbsize = vol->params.logical_block_size;
|
||||
lb_per_chunk = vol->logical_blocks_per_chunk;
|
||||
/* Note: we must zero out parts of req->buf not specified by this write operation. */
|
||||
|
Loading…
Reference in New Issue
Block a user