summaryrefslogtreecommitdiffstats
path: root/drivers/block/null_blk.c
diff options
context:
space:
mode:
authorArianna Avanzini2015-12-01 11:48:18 +0100
committerJens Axboe2015-12-01 18:52:10 +0100
commitcf8ecc5a8455266f8d516426b2acd36f9bdfa061 (patch)
tree81f99d356c0adb2f806782aa491097009c1616cc /drivers/block/null_blk.c
parentnull_blk: set a separate timer for each command (diff)
downloadkernel-qcow2-linux-cf8ecc5a8455266f8d516426b2acd36f9bdfa061.tar.gz
kernel-qcow2-linux-cf8ecc5a8455266f8d516426b2acd36f9bdfa061.tar.xz
kernel-qcow2-linux-cf8ecc5a8455266f8d516426b2acd36f9bdfa061.zip
null_blk: guarantee device restart in all irq modes
In single-queue (block layer) mode,the function null_rq_prep_fn stops the device if alloc_cmd fails. Then, once stopped, the device must be restarted on the next command completion, so that the request(s) for which alloc_cmd failed can be requeued. Otherwise the device hangs. Unfortunately, device restart is currently performed only for delayed completions, i.e., in irqmode==2. This fact causes hangs, for the above reasons, with the other irqmodes in combination with single-queue block layer. This commits addresses this issue by making sure that, if stopped, the device is properly restarted for all irqmodes on completions. Signed-off-by: Paolo Valente <paolo.valente@unimore.it> Signed-off-by: Arianna AVanzini <avanzini@google.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/null_blk.c')
-rw-r--r--drivers/block/null_blk.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 08932f5ea9f3..cf656198836c 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -217,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
+ struct request_queue *q = NULL;
+
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, 0);
@@ -227,27 +229,28 @@ static void end_cmd(struct nullb_cmd *cmd)
break;
case NULL_Q_BIO:
bio_endio(cmd->bio);
- break;
+ goto free_cmd;
}
- free_cmd(cmd);
-}
-
-static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
-{
- struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer);
- struct request_queue *q = NULL;
-
if (cmd->rq)
q = cmd->rq->q;
+ /* Restart queue if needed, as we are freeing a tag */
if (q && !q->mq_ops && blk_queue_stopped(q)) {
- spin_lock(q->queue_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
if (blk_queue_stopped(q))
blk_start_queue(q);
- spin_unlock(q->queue_lock);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
- end_cmd(cmd);
+free_cmd:
+ free_cmd(cmd);
+}
+
+static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
+{
+ end_cmd(container_of(timer, struct nullb_cmd, timer));
return HRTIMER_NORESTART;
}