diff mbox series

[T,SRU] timerfd: Protect the might cancel mechanism proper

Message ID 20171002140751.16779-2-shrirang.bagul@canonical.com
State New
Headers show
Series [T,SRU] timerfd: Protect the might cancel mechanism proper | expand

Commit Message

Shrirang Bagul Oct. 2, 2017, 2:07 p.m. UTC
From: Thomas Gleixner <tglx@linutronix.de>

The handling of the might_cancel queueing is not properly protected, so
parallel operations on the file descriptor can race with each other and
lead to list corruptions or use after free.

Protect the context for these operations with a seperate lock.

The wait queue lock cannot be reused for this because that would create a
lock inversion scenario vs. the cancel lock. Replacing might_cancel with an
atomic (atomic_t or atomic bit) does not help either because it still can
race vs. the actual list operation.

Reported-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: "linux-fsdevel@vger.kernel.org"
Cc: syzkaller <syzkaller@googlegroups.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: linux-fsdevel@vger.kernel.org
Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701311521430.3457@nanos
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

This fixes CVE-2017-10661

(cherry picked from commit 1e38da300e1e395a15048b0af1e5305bd91402f6)
Signed-off-by: Shrirang Bagul <shrirang.bagul@canonical.com>
---
 fs/timerfd.c | 17 ++++++++++++++---
 1 file changed, 14 insertions(+), 3 deletions(-)

Comments

Stefan Bader Oct. 5, 2017, 6:47 a.m. UTC | #1
On 02.10.2017 16:07, Shrirang Bagul wrote:
> From: Thomas Gleixner <tglx@linutronix.de>
> 
> The handling of the might_cancel queueing is not properly protected, so
> parallel operations on the file descriptor can race with each other and
> lead to list corruptions or use after free.
> 
> Protect the context for these operations with a seperate lock.
> 
> The wait queue lock cannot be reused for this because that would create a
> lock inversion scenario vs. the cancel lock. Replacing might_cancel with an
> atomic (atomic_t or atomic bit) does not help either because it still can
> race vs. the actual list operation.
> 
> Reported-by: Dmitry Vyukov <dvyukov@google.com>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Cc: "linux-fsdevel@vger.kernel.org"
> Cc: syzkaller <syzkaller@googlegroups.com>
> Cc: Al Viro <viro@zeniv.linux.org.uk>
> Cc: linux-fsdevel@vger.kernel.org
> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701311521430.3457@nanos
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> 
> This fixes CVE-2017-10661
> 
> (cherry picked from commit 1e38da300e1e395a15048b0af1e5305bd91402f6)
> Signed-off-by: Shrirang Bagul <shrirang.bagul@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>

> ---
>  fs/timerfd.c | 17 ++++++++++++++---
>  1 file changed, 14 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/timerfd.c b/fs/timerfd.c
> index 929312180dd0..4ed3c8c0c24c 100644
> --- a/fs/timerfd.c
> +++ b/fs/timerfd.c
> @@ -39,6 +39,7 @@ struct timerfd_ctx {
>  	int clockid;
>  	struct rcu_head rcu;
>  	struct list_head clist;
> +	spinlock_t cancel_lock;
>  	bool might_cancel;
>  };
>  
> @@ -111,7 +112,7 @@ void timerfd_clock_was_set(void)
>  	rcu_read_unlock();
>  }
>  
> -static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
> +static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
>  {
>  	if (ctx->might_cancel) {
>  		ctx->might_cancel = false;
> @@ -121,6 +122,13 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
>  	}
>  }
>  
> +static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
> +{
> +	spin_lock(&ctx->cancel_lock);
> +	__timerfd_remove_cancel(ctx);
> +	spin_unlock(&ctx->cancel_lock);
> +}
> +
>  static bool timerfd_canceled(struct timerfd_ctx *ctx)
>  {
>  	if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
> @@ -131,6 +139,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx)
>  
>  static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
>  {
> +	spin_lock(&ctx->cancel_lock);
>  	if ((ctx->clockid == CLOCK_REALTIME ||
>  	     ctx->clockid == CLOCK_REALTIME_ALARM) &&
>  	    (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
> @@ -140,9 +149,10 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
>  			list_add_rcu(&ctx->clist, &cancel_list);
>  			spin_unlock(&cancel_lock);
>  		}
> -	} else if (ctx->might_cancel) {
> -		timerfd_remove_cancel(ctx);
> +	} else {
> +		__timerfd_remove_cancel(ctx);
>  	}
> +	spin_unlock(&ctx->cancel_lock);
>  }
>  
>  static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
> @@ -325,6 +335,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
>  		return -ENOMEM;
>  
>  	init_waitqueue_head(&ctx->wqh);
> +	spin_lock_init(&ctx->cancel_lock);
>  	ctx->clockid = clockid;
>  
>  	if (isalarm(ctx))
>
Kleber Sacilotto de Souza Oct. 5, 2017, 8:49 a.m. UTC | #2
On 10/02/2017 04:07 PM, Shrirang Bagul wrote:
> From: Thomas Gleixner <tglx@linutronix.de>
>
> The handling of the might_cancel queueing is not properly protected, so
> parallel operations on the file descriptor can race with each other and
> lead to list corruptions or use after free.
>
> Protect the context for these operations with a seperate lock.
>
> The wait queue lock cannot be reused for this because that would create a
> lock inversion scenario vs. the cancel lock. Replacing might_cancel with an
> atomic (atomic_t or atomic bit) does not help either because it still can
> race vs. the actual list operation.
>
> Reported-by: Dmitry Vyukov <dvyukov@google.com>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Cc: "linux-fsdevel@vger.kernel.org"
> Cc: syzkaller <syzkaller@googlegroups.com>
> Cc: Al Viro <viro@zeniv.linux.org.uk>
> Cc: linux-fsdevel@vger.kernel.org
> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701311521430.3457@nanos
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
>
> This fixes CVE-2017-10661

Above line should be simply:

CVE-2017-10661

>
> (cherry picked from commit 1e38da300e1e395a15048b0af1e5305bd91402f6)
> Signed-off-by: Shrirang Bagul <shrirang.bagul@canonical.com>

Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>

> ---
>  fs/timerfd.c | 17 ++++++++++++++---
>  1 file changed, 14 insertions(+), 3 deletions(-)
>
> diff --git a/fs/timerfd.c b/fs/timerfd.c
> index 929312180dd0..4ed3c8c0c24c 100644
> --- a/fs/timerfd.c
> +++ b/fs/timerfd.c
> @@ -39,6 +39,7 @@ struct timerfd_ctx {
>  	int clockid;
>  	struct rcu_head rcu;
>  	struct list_head clist;
> +	spinlock_t cancel_lock;
>  	bool might_cancel;
>  };
>
> @@ -111,7 +112,7 @@ void timerfd_clock_was_set(void)
>  	rcu_read_unlock();
>  }
>
> -static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
> +static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
>  {
>  	if (ctx->might_cancel) {
>  		ctx->might_cancel = false;
> @@ -121,6 +122,13 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
>  	}
>  }
>
> +static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
> +{
> +	spin_lock(&ctx->cancel_lock);
> +	__timerfd_remove_cancel(ctx);
> +	spin_unlock(&ctx->cancel_lock);
> +}
> +
>  static bool timerfd_canceled(struct timerfd_ctx *ctx)
>  {
>  	if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
> @@ -131,6 +139,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx)
>
>  static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
>  {
> +	spin_lock(&ctx->cancel_lock);
>  	if ((ctx->clockid == CLOCK_REALTIME ||
>  	     ctx->clockid == CLOCK_REALTIME_ALARM) &&
>  	    (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
> @@ -140,9 +149,10 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
>  			list_add_rcu(&ctx->clist, &cancel_list);
>  			spin_unlock(&cancel_lock);
>  		}
> -	} else if (ctx->might_cancel) {
> -		timerfd_remove_cancel(ctx);
> +	} else {
> +		__timerfd_remove_cancel(ctx);
>  	}
> +	spin_unlock(&ctx->cancel_lock);
>  }
>
>  static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
> @@ -325,6 +335,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
>  		return -ENOMEM;
>
>  	init_waitqueue_head(&ctx->wqh);
> +	spin_lock_init(&ctx->cancel_lock);
>  	ctx->clockid = clockid;
>
>  	if (isalarm(ctx))
>
Thadeu Lima de Souza Cascardo Oct. 9, 2017, 7:43 p.m. UTC | #3
Applied to trusty/master-next branch.

CVE line fixed up.

Thanks.
Cascardo.

Applied-to: trusty/master-next
diff mbox series

Patch

diff --git a/fs/timerfd.c b/fs/timerfd.c
index 929312180dd0..4ed3c8c0c24c 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -39,6 +39,7 @@  struct timerfd_ctx {
 	int clockid;
 	struct rcu_head rcu;
 	struct list_head clist;
+	spinlock_t cancel_lock;
 	bool might_cancel;
 };
 
@@ -111,7 +112,7 @@  void timerfd_clock_was_set(void)
 	rcu_read_unlock();
 }
 
-static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
 {
 	if (ctx->might_cancel) {
 		ctx->might_cancel = false;
@@ -121,6 +122,13 @@  static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
 	}
 }
 
+static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+{
+	spin_lock(&ctx->cancel_lock);
+	__timerfd_remove_cancel(ctx);
+	spin_unlock(&ctx->cancel_lock);
+}
+
 static bool timerfd_canceled(struct timerfd_ctx *ctx)
 {
 	if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
@@ -131,6 +139,7 @@  static bool timerfd_canceled(struct timerfd_ctx *ctx)
 
 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
 {
+	spin_lock(&ctx->cancel_lock);
 	if ((ctx->clockid == CLOCK_REALTIME ||
 	     ctx->clockid == CLOCK_REALTIME_ALARM) &&
 	    (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
@@ -140,9 +149,10 @@  static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
 			list_add_rcu(&ctx->clist, &cancel_list);
 			spin_unlock(&cancel_lock);
 		}
-	} else if (ctx->might_cancel) {
-		timerfd_remove_cancel(ctx);
+	} else {
+		__timerfd_remove_cancel(ctx);
 	}
+	spin_unlock(&ctx->cancel_lock);
 }
 
 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
@@ -325,6 +335,7 @@  SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
 		return -ENOMEM;
 
 	init_waitqueue_head(&ctx->wqh);
+	spin_lock_init(&ctx->cancel_lock);
 	ctx->clockid = clockid;
 
 	if (isalarm(ctx))