From a27d5a2f83bba21a78f45abe63ada896e7b4fc31 Mon Sep 17 00:00:00 2001 From: Gilad Broner Date: Tue, 31 Mar 2015 16:17:25 +0300 Subject: [PATCH] block: test-iosched: fix spinlock recursion spin_lock_irq() / spin_unlock_irq() is used so interrupts are enabled after unlocking the spinlock. However, it is not guaranteed they were enabled before. This change uses the proper irqsave / irqrestore variants instead. Without it, a spinlock recursion on the scsi request completion path is possible if completion interrupt occurs when used for UFS testing. Change-Id: I25a9bf6faaa2bbfedc807111fbcb32276cccea2f Signed-off-by: Gilad Broner --- block/test-iosched.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/block/test-iosched.c b/block/test-iosched.c index cf50436d2d17..2a0be42dd930 100644 --- a/block/test-iosched.c +++ b/block/test-iosched.c @@ -167,6 +167,7 @@ int test_iosched_add_unique_test_req(struct test_iosched *tios, struct request *rq; int rw_flags; struct test_request *test_rq; + unsigned long flags; if (!tios) return -ENODEV; @@ -230,10 +231,10 @@ int test_iosched_add_unique_test_req(struct test_iosched *tios, "%s: added request %d to the test requests list, type = %d", __func__, test_rq->req_id, req_unique); - spin_lock_irq(tios->req_q->queue_lock); + spin_lock_irqsave(tios->req_q->queue_lock, flags); list_add_tail(&test_rq->queuelist, &tios->test_queue); tios->test_count++; - spin_unlock_irq(tios->req_q->queue_lock); + spin_unlock_irqrestore(tios->req_q->queue_lock, flags); return 0; } @@ -408,14 +409,15 @@ int test_iosched_add_wr_rd_test_req(struct test_iosched *tios, int pattern, rq_end_io_fn *end_req_io) { struct test_request *test_rq = NULL; + unsigned long flags; test_rq = test_iosched_create_test_req(tios, is_err_expcted, direction, start_sec, num_bios, pattern, end_req_io); if (test_rq) { - spin_lock_irq(tios->req_q->queue_lock); + spin_lock_irqsave(tios->req_q->queue_lock, flags); list_add_tail(&test_rq->queuelist, &tios->test_queue); tios->test_count++; - spin_unlock_irq(tios->req_q->queue_lock); + spin_unlock_irqrestore(tios->req_q->queue_lock, flags); return 0; } return -ENODEV; @@ -1106,6 +1108,7 @@ static int test_init_queue(struct request_queue *q, struct elevator_type *e) const char *blk_dev_name; int ret; bool found = false; + unsigned long flags; eq = elevator_alloc(q, e); if (!eq) @@ -1168,9 +1171,9 @@ static int test_init_queue(struct request_queue *q, struct elevator_type *e) } } - spin_lock_irq(q->queue_lock); + spin_lock_irqsave(q->queue_lock, flags); q->elevator = eq; - spin_unlock_irq(q->queue_lock); + spin_unlock_irqrestore(q->queue_lock, flags); return 0; @@ -1209,14 +1212,16 @@ static void test_exit_queue(struct elevator_queue *e) void test_iosched_add_urgent_req(struct test_iosched *tios, struct test_request *test_rq) { + unsigned long flags; + if (!tios) return; - spin_lock_irq(&tios->lock); + spin_lock_irqsave(&tios->lock, flags); test_rq->rq->cmd_flags |= REQ_URGENT; list_add_tail(&test_rq->queuelist, &tios->urgent_queue); tios->urgent_count++; - spin_unlock_irq(&tios->lock); + spin_unlock_irqrestore(&tios->lock, flags); } EXPORT_SYMBOL(test_iosched_add_urgent_req);