]> git.hungrycats.org Git - linux/commitdiff
io_uring: used cached copies of sq->dropped and cq->overflow
authorJens Axboe <axboe@kernel.dk>
Fri, 25 Oct 2019 16:04:25 +0000 (10:04 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 29 Oct 2019 08:22:30 +0000 (09:22 +0100)
[ Upstream commit 498ccd9eda49117c34e0041563d0da6ac40e52b8 ]

We currently use the ring values directly, but that can lead to issues
if the application is malicious and changes these values on our behalf.
Created in-kernel cached versions of them, and just overwrite the user
side when we update them. This is similar to how we treat the sq/cq
ring tail/head updates.

Reported-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/io_uring.c

index 976ca7becfe81a135a3098e176e46708fa09183d..ed223c33dd898de5c04568c97e284dc68a0b6688 100644 (file)
@@ -221,6 +221,7 @@ struct io_ring_ctx {
                unsigned                sq_entries;
                unsigned                sq_mask;
                unsigned                sq_thread_idle;
+               unsigned                cached_sq_dropped;
                struct io_uring_sqe     *sq_sqes;
 
                struct list_head        defer_list;
@@ -237,6 +238,7 @@ struct io_ring_ctx {
                /* CQ ring */
                struct io_cq_ring       *cq_ring;
                unsigned                cached_cq_tail;
+               atomic_t                cached_cq_overflow;
                unsigned                cq_entries;
                unsigned                cq_mask;
                struct wait_queue_head  cq_wait;
@@ -431,7 +433,8 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
        if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
                return false;
 
-       return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
+       return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped
+                                       + atomic_read(&ctx->cached_cq_overflow);
 }
 
 static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -511,9 +514,8 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
                WRITE_ONCE(cqe->res, res);
                WRITE_ONCE(cqe->flags, 0);
        } else {
-               unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
-
-               WRITE_ONCE(ctx->cq_ring->overflow, overflow + 1);
+               WRITE_ONCE(ctx->cq_ring->overflow,
+                               atomic_inc_return(&ctx->cached_cq_overflow));
        }
 }
 
@@ -2272,7 +2274,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
 
        /* drop invalid entries */
        ctx->cached_sq_head++;
-       ring->dropped++;
+       ctx->cached_sq_dropped++;
+       WRITE_ONCE(ring->dropped, ctx->cached_sq_dropped);
        return false;
 }