dm: add snapshot of dm-req-crypt

This snapshot is taken as of msm-3.18 commit:
5684450d70 ("Promotion of kernel.lnx.3.18-151201").
dm-req-crypt is necessary for full disk encryption.

Signed-off-by: Gilad Broner <gbroner@codeaurora.org>
This commit is contained in:
Gilad Broner 2016-03-02 14:24:34 +02:00 committed by David Keitel
parent ca838eec7c
commit c3b854ad6c
10 changed files with 1481 additions and 3 deletions

View file

@ -1475,6 +1475,9 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
/* this is a bio leak if the bio is not tagged with BIO_DONTFREE */
WARN_ON(req->bio && !bio_flagged(req->bio, BIO_DONTFREE));
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@ -2647,6 +2650,15 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
blk_account_io_completion(req, nr_bytes);
total_bytes = 0;
/*
* Check for this if flagged, Req based dm needs to perform
* post processing, hence dont end bios or request.DM
* layer takes care.
*/
if (bio_flagged(req->bio, BIO_DONTFREE))
return false;
while (req->bio) {
struct bio *bio = req->bio;
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);

View file

@ -467,6 +467,93 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL(blk_rq_map_sg);
/*
* map a request to scatterlist without combining PHY CONT
* blocks, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
*/
int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{
struct bio_vec bvec, bvprv = { NULL };
struct req_iterator iter;
struct scatterlist *sg;
int nsegs, cluster = 0;
nsegs = 0;
/*
* for each bio in rq
*/
sg = NULL;
rq_for_each_segment(bvec, rq, iter) {
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
&nsegs, &cluster);
} /* segments in rq */
if (!sg)
return nsegs;
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
unsigned int pad_len =
(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
sg->length += pad_len;
rq->extra_len += pad_len;
}
if (q->dma_drain_size && q->dma_drain_needed(rq)) {
if (rq->cmd_flags & REQ_WRITE)
memset(q->dma_drain_buffer, 0, q->dma_drain_size);
sg->page_link &= ~0x02;
sg = sg_next(sg);
sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
q->dma_drain_size,
((unsigned long)q->dma_drain_buffer) &
(PAGE_SIZE - 1));
nsegs++;
rq->extra_len += q->dma_drain_size;
}
if (sg)
sg_mark_end(sg);
return nsegs;
}
EXPORT_SYMBOL(blk_rq_map_sg_no_cluster);
/**
* blk_bio_map_sg - map a bio to a scatterlist
* @q: request_queue in question
* @bio: bio being mapped
* @sglist: scatterlist being mapped
*
* Note:
* Caller must make sure sg can hold bio->bi_phys_segments entries
*
* Will return the number of sg entries setup
*/
int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
struct scatterlist *sg = NULL;
int nsegs;
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
bio->bi_next = next;
if (sg)
sg_mark_end(sg);
WARN_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
return nsegs;
}
EXPORT_SYMBOL(blk_bio_map_sg);
static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)

View file

@ -214,7 +214,6 @@ int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
int blk_try_merge(struct request *rq, struct bio *bio);

View file

@ -267,6 +267,23 @@ config DM_CRYPT
If unsure, say N.
config DM_REQ_CRYPT
tristate "Req Crypt target support"
depends on BLK_DEV_DM
select XTS
select CRYPTO_XTS
---help---
This request based device-mapper target allows you to create a device that
transparently encrypts the data on it. You'll need to activate
the ciphers you're going to use in the cryptoapi configuration.
The DM REQ CRYPT operates on requests (bigger payloads) to utilize
crypto hardware better.
To compile this code as a module, choose M here: the module will
be called dm-req-crypt.
If unsure, say N.
config DM_SNAPSHOT
tristate "Snapshot target"
depends on BLK_DEV_DM

View file

@ -59,6 +59,7 @@ obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
obj-$(CONFIG_DM_REQ_CRYPT) += dm-req-crypt.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o

1339
drivers/md/dm-req-crypt.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -1150,7 +1150,7 @@ static void free_rq_clone(struct request *clone)
* Must be called without clone's queue lock held,
* see end_clone_request() for more details.
*/
static void dm_end_request(struct request *clone, int error)
void dm_end_request(struct request *clone, int error)
{
int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
@ -1343,7 +1343,7 @@ static void dm_complete_request(struct request *rq, int error)
* Target's rq_end_io() function isn't called.
* This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
*/
static void dm_kill_unmapped_request(struct request *rq, int error)
void dm_kill_unmapped_request(struct request *rq, int error)
{
rq->cmd_flags |= REQ_FAILED;
dm_complete_request(rq, error);
@ -1802,6 +1802,13 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
dm_complete_request(rq, r);
}
void dm_dispatch_request(struct request *rq)
{
struct dm_rq_target_io *tio = tio_from_request(rq);
dm_dispatch_clone_request(tio->clone, rq);
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{

View file

@ -134,6 +134,12 @@ struct bio {
*/
#define BIO_RESET_BITS 13
#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
/*
* Added for Req based dm which need to perform post processing. This flag
* ensures blk_update_request does not free the bios or request, this is done
* at the dm level
*/
#define BIO_DONTFREE 14
#define BIO_INLINECRYPT 15
/*

View file

@ -796,6 +796,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
extern void blk_recalc_rq_segments(struct request *rq);
extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
@ -1008,6 +1009,8 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern int blk_rq_map_sg_no_cluster
(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);

View file

@ -601,4 +601,11 @@ static inline unsigned long to_bytes(sector_t n)
return (n << SECTOR_SHIFT);
}
/*-----------------------------------------------------------------
* Helper for block layer and dm core operations
*-----------------------------------------------------------------
*/
void dm_dispatch_request(struct request *rq);
void dm_kill_unmapped_request(struct request *rq, int error);
void dm_end_request(struct request *clone, int error);
#endif /* _LINUX_DEVICE_MAPPER_H */