diff mbox series

[v5,1/6] kexec: move locking into do_kexec_load

Message ID 20210727144859.4150043-2-arnd@kernel.org
State New
Headers show
Series compat: remove compat_alloc_user_space | expand

Commit Message

Arnd Bergmann July 27, 2021, 2:48 p.m. UTC
From: Arnd Bergmann <arnd@arndb.de>

The locking is the same between the native and compat version of
sys_kexec_load(), so it can be done in the common implementation
to reduce duplication.

Co-developed-by: Eric Biederman <ebiederm@xmission.com>
Co-developed-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 kernel/kexec.c | 44 ++++++++++++++++----------------------------
 1 file changed, 16 insertions(+), 28 deletions(-)

Comments

Eric W. Biederman July 28, 2021, 4:09 p.m. UTC | #1
Arnd Bergmann <arnd@kernel.org> writes:

> From: Arnd Bergmann <arnd@arndb.de>
>
> The locking is the same between the native and compat version of
> sys_kexec_load(), so it can be done in the common implementation
> to reduce duplication.

Acked-by: "Eric W. Biederman" <ebiederm@xmission.com>

>
> Co-developed-by: Eric Biederman <ebiederm@xmission.com>
> Co-developed-by: Christoph Hellwig <hch@infradead.org>
> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
> ---
>  kernel/kexec.c | 44 ++++++++++++++++----------------------------
>  1 file changed, 16 insertions(+), 28 deletions(-)
>
> diff --git a/kernel/kexec.c b/kernel/kexec.c
> index c82c6c06f051..9c7aef8f4bb6 100644
> --- a/kernel/kexec.c
> +++ b/kernel/kexec.c
> @@ -110,6 +110,17 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
>  	unsigned long i;
>  	int ret;
>  
> +	/*
> +	 * Because we write directly to the reserved memory region when loading
> +	 * crash kernels we need a mutex here to prevent multiple crash kernels
> +	 * from attempting to load simultaneously, and to prevent a crash kernel
> +	 * from loading over the top of a in use crash kernel.
> +	 *
> +	 * KISS: always take the mutex.
> +	 */
> +	if (!mutex_trylock(&kexec_mutex))
> +		return -EBUSY;
> +
>  	if (flags & KEXEC_ON_CRASH) {
>  		dest_image = &kexec_crash_image;
>  		if (kexec_crash_image)
> @@ -121,7 +132,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
>  	if (nr_segments == 0) {
>  		/* Uninstall image */
>  		kimage_free(xchg(dest_image, NULL));
> -		return 0;
> +		ret = 0;
> +		goto out_unlock;
>  	}
>  	if (flags & KEXEC_ON_CRASH) {
>  		/*
> @@ -134,7 +146,7 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
>  
>  	ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
>  	if (ret)
> -		return ret;
> +		goto out_unlock;
>  
>  	if (flags & KEXEC_PRESERVE_CONTEXT)
>  		image->preserve_context = 1;
> @@ -171,6 +183,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
>  		arch_kexec_protect_crashkres();
>  
>  	kimage_free(image);
> +out_unlock:
> +	mutex_unlock(&kexec_mutex);
>  	return ret;
>  }
>  
> @@ -247,21 +261,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
>  		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
>  		return -EINVAL;
>  
> -	/* Because we write directly to the reserved memory
> -	 * region when loading crash kernels we need a mutex here to
> -	 * prevent multiple crash  kernels from attempting to load
> -	 * simultaneously, and to prevent a crash kernel from loading
> -	 * over the top of a in use crash kernel.
> -	 *
> -	 * KISS: always take the mutex.
> -	 */
> -	if (!mutex_trylock(&kexec_mutex))
> -		return -EBUSY;
> -
>  	result = do_kexec_load(entry, nr_segments, segments, flags);
>  
> -	mutex_unlock(&kexec_mutex);
> -
>  	return result;
>  }
>  
> @@ -301,21 +302,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
>  			return -EFAULT;
>  	}
>  
> -	/* Because we write directly to the reserved memory
> -	 * region when loading crash kernels we need a mutex here to
> -	 * prevent multiple crash  kernels from attempting to load
> -	 * simultaneously, and to prevent a crash kernel from loading
> -	 * over the top of a in use crash kernel.
> -	 *
> -	 * KISS: always take the mutex.
> -	 */
> -	if (!mutex_trylock(&kexec_mutex))
> -		return -EBUSY;
> -
>  	result = do_kexec_load(entry, nr_segments, ksegments, flags);
>  
> -	mutex_unlock(&kexec_mutex);
> -
>  	return result;
>  }
>  #endif
diff mbox series

Patch

diff --git a/kernel/kexec.c b/kernel/kexec.c
index c82c6c06f051..9c7aef8f4bb6 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -110,6 +110,17 @@  static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
 	unsigned long i;
 	int ret;
 
+	/*
+	 * Because we write directly to the reserved memory region when loading
+	 * crash kernels we need a mutex here to prevent multiple crash kernels
+	 * from attempting to load simultaneously, and to prevent a crash kernel
+	 * from loading over the top of a in use crash kernel.
+	 *
+	 * KISS: always take the mutex.
+	 */
+	if (!mutex_trylock(&kexec_mutex))
+		return -EBUSY;
+
 	if (flags & KEXEC_ON_CRASH) {
 		dest_image = &kexec_crash_image;
 		if (kexec_crash_image)
@@ -121,7 +132,8 @@  static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
 	if (nr_segments == 0) {
 		/* Uninstall image */
 		kimage_free(xchg(dest_image, NULL));
-		return 0;
+		ret = 0;
+		goto out_unlock;
 	}
 	if (flags & KEXEC_ON_CRASH) {
 		/*
@@ -134,7 +146,7 @@  static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
 
 	ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
 	if (ret)
-		return ret;
+		goto out_unlock;
 
 	if (flags & KEXEC_PRESERVE_CONTEXT)
 		image->preserve_context = 1;
@@ -171,6 +183,8 @@  static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
 		arch_kexec_protect_crashkres();
 
 	kimage_free(image);
+out_unlock:
+	mutex_unlock(&kexec_mutex);
 	return ret;
 }
 
@@ -247,21 +261,8 @@  SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
 		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
 		return -EINVAL;
 
-	/* Because we write directly to the reserved memory
-	 * region when loading crash kernels we need a mutex here to
-	 * prevent multiple crash  kernels from attempting to load
-	 * simultaneously, and to prevent a crash kernel from loading
-	 * over the top of a in use crash kernel.
-	 *
-	 * KISS: always take the mutex.
-	 */
-	if (!mutex_trylock(&kexec_mutex))
-		return -EBUSY;
-
 	result = do_kexec_load(entry, nr_segments, segments, flags);
 
-	mutex_unlock(&kexec_mutex);
-
 	return result;
 }
 
@@ -301,21 +302,8 @@  COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
 			return -EFAULT;
 	}
 
-	/* Because we write directly to the reserved memory
-	 * region when loading crash kernels we need a mutex here to
-	 * prevent multiple crash  kernels from attempting to load
-	 * simultaneously, and to prevent a crash kernel from loading
-	 * over the top of a in use crash kernel.
-	 *
-	 * KISS: always take the mutex.
-	 */
-	if (!mutex_trylock(&kexec_mutex))
-		return -EBUSY;
-
 	result = do_kexec_load(entry, nr_segments, ksegments, flags);
 
-	mutex_unlock(&kexec_mutex);
-
 	return result;
 }
 #endif