]> git.hungrycats.org Git - linux/commitdiff
block: don't use blocking queue entered for recursive bio submits
authorJens Axboe <axboe@kernel.dk>
Sat, 2 Jun 2018 20:04:07 +0000 (14:04 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 22 Jul 2018 13:16:06 +0000 (15:16 +0200)
commit cd4a4ae4683dc2e09380118e205e057896dcda2b upstream.

If we end up splitting a bio and the queue goes away between
the initial submission and the later split submission, then we
can block forever in blk_queue_enter() waiting for the reference
to drop to zero. This will never happen, since we already hold
a reference.

Mark a split bio as already having entered the queue, so we can
just use the live non-blocking queue enter variant.

Thanks to Tetsuo Handa for the analysis.

Reported-by: syzbot+c4f9cebf9d651f6e54de@syzkaller.appspotmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
block/blk-core.c
block/blk-merge.c
include/linux/blk_types.h

index b559b9d4f1a2b86848bfc7d33debfd2ab3edd3ae..47ab2d9d02d94395abf51a7a7ad6dd4c6fde3702 100644 (file)
@@ -2392,7 +2392,9 @@ blk_qc_t generic_make_request(struct bio *bio)
 
        if (bio->bi_opf & REQ_NOWAIT)
                flags = BLK_MQ_REQ_NOWAIT;
-       if (blk_queue_enter(q, flags) < 0) {
+       if (bio_flagged(bio, BIO_QUEUE_ENTERED))
+               blk_queue_enter_live(q);
+       else if (blk_queue_enter(q, flags) < 0) {
                if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
                        bio_wouldblock_error(bio);
                else
index 782940c65d8a7c44ff24248f6e280e82b0a5bc9e..481dc02668f939fec259c5fdd9850fca3f7e68bc 100644 (file)
@@ -210,6 +210,16 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
                /* there isn't chance to merge the splitted bio */
                split->bi_opf |= REQ_NOMERGE;
 
+               /*
+                * Since we're recursing into make_request here, ensure
+                * that we mark this bio as already having entered the queue.
+                * If not, and the queue is going away, we can get stuck
+                * forever on waiting for the queue reference to drop. But
+                * that will never happen, as we're already holding a
+                * reference to it.
+                */
+               bio_set_flag(*bio, BIO_QUEUE_ENTERED);
+
                bio_chain(split, *bio);
                trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
                generic_make_request(*bio);
index 17b18b91ebac91a68244b51ca57cd086fd2d60a5..1602bf4ab4cdb273cdeb2c0256250f8c573b546e 100644 (file)
@@ -186,6 +186,8 @@ struct bio {
                                 * throttling rules. Don't do it again. */
 #define BIO_TRACE_COMPLETION 10        /* bio_endio() should trace the final completion
                                 * of this bio. */
+#define BIO_QUEUE_ENTERED 11   /* can use blk_queue_enter_live() */
+
 /* See BVEC_POOL_OFFSET below before adding new flags */
 
 /*