Two small fixes for the DM cache target:
- fix corruption with >2TB fast device due to truncation bug - fix access beyond end of origin device due to a partial block -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJTIxzQAAoJEMUj8QotnQNa2AkIAMXpgGCvxAtuE072ARf2kwxx qtTtdYms/r1ss8keOgACSDgcrn+NMjk4IP4+s54Iz7tViEgIF148gvqgbSeHwNzk Vy49aIbVPj8GIqFxuFeS03DnBZ/loEWiLtiHG2BgKnNRy1NmLvGwLoYsrrn9RHWG AAa4a2cCW/BcrYMJoZSYhlPEEPPvoqSpWTVJr1rjTkmQAAZSsOzsBdklkrEQtHzW Sle8/OKp4tYwBmXml5XsfAPG+pNRkd7HizIRkJ0roP5iIWHLdnpGUvboKYdu1rVp rxEl/nQeXzd8zDsUNvoRQs8CLmtEhBBj0h+L+P2Y3Aj7fNg67ismt25mplQWZsY= =/XEY -----END PGP SIGNATURE----- Merge tag 'dm-3.14-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device-mapper fixes form Mike Snitzer: "Two small fixes for the DM cache target: - fix corruption with >2TB fast device due to truncation bug - fix access beyond end of origin device due to a partial block" * tag 'dm-3.14-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm cache: fix access beyond end of origin device dm cache: fix truncation bug when copying a block to/from >2TB fast device
This commit is contained in:
commit
0c01b45257
1 changed files with 5 additions and 6 deletions
|
@ -979,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
|
|||
int r;
|
||||
struct dm_io_region o_region, c_region;
|
||||
struct cache *cache = mg->cache;
|
||||
sector_t cblock = from_cblock(mg->cblock);
|
||||
|
||||
o_region.bdev = cache->origin_dev->bdev;
|
||||
o_region.count = cache->sectors_per_block;
|
||||
|
||||
c_region.bdev = cache->cache_dev->bdev;
|
||||
c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
|
||||
c_region.sector = cblock * cache->sectors_per_block;
|
||||
c_region.count = cache->sectors_per_block;
|
||||
|
||||
if (mg->writeback || mg->demote) {
|
||||
|
@ -2464,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
|
|||
bool discarded_block;
|
||||
struct dm_bio_prison_cell *cell;
|
||||
struct policy_result lookup_result;
|
||||
struct per_bio_data *pb;
|
||||
struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
|
||||
|
||||
if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
|
||||
if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
|
||||
/*
|
||||
* This can only occur if the io goes to a partial block at
|
||||
* the end of the origin device. We don't cache these.
|
||||
* Just remap to the origin and carry on.
|
||||
*/
|
||||
remap_to_origin_clear_discard(cache, bio, block);
|
||||
remap_to_origin(cache, bio);
|
||||
return DM_MAPIO_REMAPPED;
|
||||
}
|
||||
|
||||
pb = init_per_bio_data(bio, pb_data_size);
|
||||
|
||||
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
|
||||
defer_bio(cache, bio);
|
||||
return DM_MAPIO_SUBMITTED;
|
||||
|
|
Loading…
Add table
Reference in a new issue