block/fs: keep track of the task that dirtied the page
Background writes happen in the context of a background thread. It is very useful to identify the actual task that generated the request instead of background task that submited the request. Hence keep track of the task when a page gets dirtied and dump this task info while tracing. Not all the pages in the bio are dirtied by the same task but most likely it will be, since the sectors accessed on the device must be adjacent. Change-Id: I6afba85a2063dd3350a0141ba87cf8440ce9f777 Signed-off-by: Venkat Gopalakrishnan <venkatg@codeaurora.org> [venkatg@codeaurora.org: Fixed trivial merge conflicts] Signed-off-by: Venkat Gopalakrishnan <venkatg@codeaurora.org>
This commit is contained in:
parent
a27d5a2f83
commit
014929f975
4 changed files with 85 additions and 15 deletions
|
@ -2093,6 +2093,7 @@ EXPORT_SYMBOL(generic_make_request);
|
||||||
*/
|
*/
|
||||||
blk_qc_t submit_bio(int rw, struct bio *bio)
|
blk_qc_t submit_bio(int rw, struct bio *bio)
|
||||||
{
|
{
|
||||||
|
struct task_struct *tsk = current;
|
||||||
bio->bi_rw |= rw;
|
bio->bi_rw |= rw;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2116,8 +2117,18 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
|
||||||
|
|
||||||
if (unlikely(block_dump)) {
|
if (unlikely(block_dump)) {
|
||||||
char b[BDEVNAME_SIZE];
|
char b[BDEVNAME_SIZE];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Not all the pages in the bio are dirtied by the
|
||||||
|
* same task but most likely it will be, since the
|
||||||
|
* sectors accessed on the device must be adjacent.
|
||||||
|
*/
|
||||||
|
if (bio->bi_io_vec && bio->bi_io_vec->bv_page &&
|
||||||
|
bio->bi_io_vec->bv_page->tsk_dirty)
|
||||||
|
tsk = bio->bi_io_vec->bv_page->tsk_dirty;
|
||||||
|
|
||||||
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
|
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
|
||||||
current->comm, task_pid_nr(current),
|
tsk->comm, task_pid_nr(tsk),
|
||||||
(rw & WRITE) ? "WRITE" : "READ",
|
(rw & WRITE) ? "WRITE" : "READ",
|
||||||
(unsigned long long)bio->bi_iter.bi_sector,
|
(unsigned long long)bio->bi_iter.bi_sector,
|
||||||
bdevname(bio->bi_bdev, b),
|
bdevname(bio->bi_bdev, b),
|
||||||
|
|
|
@ -641,6 +641,8 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping,
|
||||||
account_page_dirtied(page, mapping, memcg);
|
account_page_dirtied(page, mapping, memcg);
|
||||||
radix_tree_tag_set(&mapping->page_tree,
|
radix_tree_tag_set(&mapping->page_tree,
|
||||||
page_index(page), PAGECACHE_TAG_DIRTY);
|
page_index(page), PAGECACHE_TAG_DIRTY);
|
||||||
|
/* Save the task that is dirtying this page */
|
||||||
|
page->tsk_dirty = current;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&mapping->tree_lock, flags);
|
spin_unlock_irqrestore(&mapping->tree_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,6 +207,8 @@ struct page {
|
||||||
not kmapped, ie. highmem) */
|
not kmapped, ie. highmem) */
|
||||||
#endif /* WANT_PAGE_VIRTUAL */
|
#endif /* WANT_PAGE_VIRTUAL */
|
||||||
|
|
||||||
|
struct task_struct *tsk_dirty; /* task that sets this page dirty */
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMCHECK
|
#ifdef CONFIG_KMEMCHECK
|
||||||
/*
|
/*
|
||||||
* kmemcheck wants to track the status of each byte in a page; this
|
* kmemcheck wants to track the status of each byte in a page; this
|
||||||
|
|
|
@ -199,9 +199,9 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
|
||||||
* blk_io_trace structure and places it in a per-cpu subbuffer.
|
* blk_io_trace structure and places it in a per-cpu subbuffer.
|
||||||
*/
|
*/
|
||||||
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
||||||
int rw, u32 what, int error, int pdu_len, void *pdu_data)
|
int rw, u32 what, int error, int pdu_len,
|
||||||
|
void *pdu_data, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
|
||||||
struct ring_buffer_event *event = NULL;
|
struct ring_buffer_event *event = NULL;
|
||||||
struct ring_buffer *buffer = NULL;
|
struct ring_buffer *buffer = NULL;
|
||||||
struct blk_io_trace *t;
|
struct blk_io_trace *t;
|
||||||
|
@ -708,18 +708,33 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
||||||
unsigned int nr_bytes, u32 what)
|
unsigned int nr_bytes, u32 what)
|
||||||
{
|
{
|
||||||
struct blk_trace *bt = q->blk_trace;
|
struct blk_trace *bt = q->blk_trace;
|
||||||
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
if (likely(!bt))
|
if (likely(!bt))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use the bio context for all events except ISSUE and
|
||||||
|
* COMPLETE events.
|
||||||
|
*
|
||||||
|
* Not all the pages in the bio are dirtied by the same task but
|
||||||
|
* most likely it will be, since the sectors accessed on the device
|
||||||
|
* must be adjacent.
|
||||||
|
*/
|
||||||
|
if (!((what == BLK_TA_ISSUE) || (what == BLK_TA_COMPLETE)) &&
|
||||||
|
bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
|
||||||
|
rq->bio->bi_io_vec->bv_page &&
|
||||||
|
rq->bio->bi_io_vec->bv_page->tsk_dirty)
|
||||||
|
tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
|
||||||
|
|
||||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||||
what |= BLK_TC_ACT(BLK_TC_PC);
|
what |= BLK_TC_ACT(BLK_TC_PC);
|
||||||
__blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
|
__blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
|
||||||
what, rq->errors, rq->cmd_len, rq->cmd);
|
what, rq->errors, rq->cmd_len, rq->cmd, tsk);
|
||||||
} else {
|
} else {
|
||||||
what |= BLK_TC_ACT(BLK_TC_FS);
|
what |= BLK_TC_ACT(BLK_TC_FS);
|
||||||
__blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
|
__blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
|
||||||
rq->cmd_flags, what, rq->errors, 0, NULL);
|
rq->cmd_flags, what, rq->errors, 0, NULL, tsk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -771,12 +786,25 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
|
||||||
u32 what, int error)
|
u32 what, int error)
|
||||||
{
|
{
|
||||||
struct blk_trace *bt = q->blk_trace;
|
struct blk_trace *bt = q->blk_trace;
|
||||||
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
if (likely(!bt))
|
if (likely(!bt))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Not all the pages in the bio are dirtied by the same task but
|
||||||
|
* most likely it will be, since the sectors accessed on the device
|
||||||
|
* must be adjacent.
|
||||||
|
*/
|
||||||
|
if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page &&
|
||||||
|
bio->bi_io_vec->bv_page->tsk_dirty)
|
||||||
|
tsk = bio->bi_io_vec->bv_page->tsk_dirty;
|
||||||
|
|
||||||
|
if (!error && !bio_flagged(bio, BIO_UPTODATE))
|
||||||
|
error = EIO;
|
||||||
|
|
||||||
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
|
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
|
||||||
bio->bi_rw, what, error, 0, NULL);
|
bio->bi_rw, what, error, 0, NULL, tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_bio_bounce(void *ignore,
|
static void blk_add_trace_bio_bounce(void *ignore,
|
||||||
|
@ -824,7 +852,8 @@ static void blk_add_trace_getrq(void *ignore,
|
||||||
struct blk_trace *bt = q->blk_trace;
|
struct blk_trace *bt = q->blk_trace;
|
||||||
|
|
||||||
if (bt)
|
if (bt)
|
||||||
__blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
|
__blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0,
|
||||||
|
NULL, current);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -840,7 +869,7 @@ static void blk_add_trace_sleeprq(void *ignore,
|
||||||
|
|
||||||
if (bt)
|
if (bt)
|
||||||
__blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
|
__blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
|
||||||
0, 0, NULL);
|
0, 0, NULL, current);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -849,7 +878,8 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
|
||||||
struct blk_trace *bt = q->blk_trace;
|
struct blk_trace *bt = q->blk_trace;
|
||||||
|
|
||||||
if (bt)
|
if (bt)
|
||||||
__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
|
__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL,
|
||||||
|
current);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
|
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
|
||||||
|
@ -866,7 +896,8 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
|
||||||
else
|
else
|
||||||
what = BLK_TA_UNPLUG_TIMER;
|
what = BLK_TA_UNPLUG_TIMER;
|
||||||
|
|
||||||
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
|
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu,
|
||||||
|
current);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -875,13 +906,19 @@ static void blk_add_trace_split(void *ignore,
|
||||||
unsigned int pdu)
|
unsigned int pdu)
|
||||||
{
|
{
|
||||||
struct blk_trace *bt = q->blk_trace;
|
struct blk_trace *bt = q->blk_trace;
|
||||||
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
if (bt) {
|
if (bt) {
|
||||||
__be64 rpdu = cpu_to_be64(pdu);
|
__be64 rpdu = cpu_to_be64(pdu);
|
||||||
|
|
||||||
|
if (bio_has_data(bio) && bio->bi_io_vec &&
|
||||||
|
bio->bi_io_vec->bv_page &&
|
||||||
|
bio->bi_io_vec->bv_page->tsk_dirty)
|
||||||
|
tsk = bio->bi_io_vec->bv_page->tsk_dirty;
|
||||||
|
|
||||||
__blk_add_trace(bt, bio->bi_iter.bi_sector,
|
__blk_add_trace(bt, bio->bi_iter.bi_sector,
|
||||||
bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
|
bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
|
||||||
bio->bi_error, sizeof(rpdu), &rpdu);
|
bio->bi_error, sizeof(rpdu), &rpdu, tsk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -904,6 +941,7 @@ static void blk_add_trace_bio_remap(void *ignore,
|
||||||
{
|
{
|
||||||
struct blk_trace *bt = q->blk_trace;
|
struct blk_trace *bt = q->blk_trace;
|
||||||
struct blk_io_trace_remap r;
|
struct blk_io_trace_remap r;
|
||||||
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
if (likely(!bt))
|
if (likely(!bt))
|
||||||
return;
|
return;
|
||||||
|
@ -912,9 +950,14 @@ static void blk_add_trace_bio_remap(void *ignore,
|
||||||
r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
|
r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
|
||||||
r.sector_from = cpu_to_be64(from);
|
r.sector_from = cpu_to_be64(from);
|
||||||
|
|
||||||
|
if (bio_has_data(bio) && bio->bi_io_vec &&
|
||||||
|
bio->bi_io_vec->bv_page &&
|
||||||
|
bio->bi_io_vec->bv_page->tsk_dirty)
|
||||||
|
tsk = bio->bi_io_vec->bv_page->tsk_dirty;
|
||||||
|
|
||||||
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
|
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
|
||||||
bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
|
bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
|
||||||
sizeof(r), &r);
|
sizeof(r), &r, tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -937,6 +980,7 @@ static void blk_add_trace_rq_remap(void *ignore,
|
||||||
{
|
{
|
||||||
struct blk_trace *bt = q->blk_trace;
|
struct blk_trace *bt = q->blk_trace;
|
||||||
struct blk_io_trace_remap r;
|
struct blk_io_trace_remap r;
|
||||||
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
if (likely(!bt))
|
if (likely(!bt))
|
||||||
return;
|
return;
|
||||||
|
@ -945,9 +989,14 @@ static void blk_add_trace_rq_remap(void *ignore,
|
||||||
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
|
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
|
||||||
r.sector_from = cpu_to_be64(from);
|
r.sector_from = cpu_to_be64(from);
|
||||||
|
|
||||||
|
if (bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
|
||||||
|
rq->bio->bi_io_vec->bv_page &&
|
||||||
|
rq->bio->bi_io_vec->bv_page->tsk_dirty)
|
||||||
|
tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
|
||||||
|
|
||||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
||||||
rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
|
rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
|
||||||
sizeof(r), &r);
|
sizeof(r), &r, tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -966,16 +1015,22 @@ void blk_add_driver_data(struct request_queue *q,
|
||||||
void *data, size_t len)
|
void *data, size_t len)
|
||||||
{
|
{
|
||||||
struct blk_trace *bt = q->blk_trace;
|
struct blk_trace *bt = q->blk_trace;
|
||||||
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
if (likely(!bt))
|
if (likely(!bt))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
|
||||||
|
rq->bio->bi_io_vec->bv_page &&
|
||||||
|
rq->bio->bi_io_vec->bv_page->tsk_dirty)
|
||||||
|
tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
|
||||||
|
|
||||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||||
__blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
|
__blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
|
||||||
BLK_TA_DRV_DATA, rq->errors, len, data);
|
BLK_TA_DRV_DATA, rq->errors, len, data, tsk);
|
||||||
else
|
else
|
||||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
|
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
|
||||||
BLK_TA_DRV_DATA, rq->errors, len, data);
|
BLK_TA_DRV_DATA, rq->errors, len, data, tsk);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_add_driver_data);
|
EXPORT_SYMBOL_GPL(blk_add_driver_data);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue