diff mbox series

[SRU,Kinetic] io_uring/msg_ring: fix missing lock on overflow for IOPOLL

Message ID 20230614104237.1948590-6-cascardo@canonical.com
State New
Headers show
Series [SRU,Kinetic] io_uring/msg_ring: fix missing lock on overflow for IOPOLL | expand

Commit Message

Thadeu Lima de Souza Cascardo June 14, 2023, 10:42 a.m. UTC
From: Jens Axboe <axboe@kernel.dk>

If the target ring is configured with IOPOLL, then we always need to hold
the target ring uring_lock before posting CQEs. We could just grab it
unconditionally, but since we don't expect many target rings to be of this
type, make grabbing the uring_lock conditional on the ring type.

Link: https://lore.kernel.org/io-uring/Y8krlYa52%2F0YGqkg@ip-172-31-85-199.ec2.internal/
Reported-by: Xingyuan Mo <hdthky0@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
(backported from commit e12d7a46f65ae4b7d58a5e0c1cbfa825cf8d830d)
[cascardo: introduce io_double_lock_ctx and use it in io_msg_ring around
 the equivalent open coded version of io_post_aux_cqe]
CVE-2023-2430
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
---
 io_uring/io_uring.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index cd0b96050ded..5e3e26cb513d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -5049,6 +5049,28 @@  static int io_msg_ring_prep(struct io_kiocb *req,
 	return 0;
 }
 
+static void io_double_unlock_ctx(struct io_ring_ctx *octx)
+{
+	mutex_unlock(&octx->uring_lock);
+}
+
+static int io_double_lock_ctx(struct io_ring_ctx *octx,
+			      unsigned int issue_flags)
+{
+	/*
+	 * To ensure proper ordering between the two ctxs, we can only
+	 * attempt a trylock on the target. If that fails and we already have
+	 * the source ctx lock, punt to io-wq.
+	 */
+	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+		if (!mutex_trylock(&octx->uring_lock))
+			return -EAGAIN;
+		return 0;
+	}
+	mutex_lock(&octx->uring_lock);
+	return 0;
+}
+
 static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_ring_ctx *target_ctx;
@@ -5063,6 +5085,10 @@  static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
 	ret = -EOVERFLOW;
 	target_ctx = req->file->private_data;
 
+	if (target_ctx->flags & IORING_SETUP_IOPOLL)
+		if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+			return -EAGAIN;
+
 	spin_lock(&target_ctx->completion_lock);
 	filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
 	io_commit_cqring(target_ctx);
@@ -5073,6 +5099,9 @@  static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
 		ret = 0;
 	}
 
+	if (target_ctx->flags & IORING_SETUP_IOPOLL)
+		io_double_unlock_ctx(target_ctx);
+
 done:
 	if (ret < 0)
 		req_set_fail(req);