arch: um: implement memory protection
diff mbox series

Message ID 20190823225831.23517-1-johannes@sipsolutions.net
State Superseded
Headers show
Series
  • arch: um: implement memory protection
Related show

Commit Message

Johannes Berg Aug. 23, 2019, 10:58 p.m. UTC
From: Johannes Berg <johannes.berg@intel.com>

Implement memory protection, namely
 * ARCH_HAS_SET_MEMORY
 * ARCH_HAS_STRICT_KERNEL_RWX
 * ARCH_HAS_STRICT_MODULE_RWX

The .text section is marked as RO.
The rodata is marked as RO & NX.
The remaining data is marked NX.

Move the .kstrtab to be covered by the NX.

Note that you can now enable CONFIG_DEBUG_RODATA_TEST,
but it's broken as the fixup is broken - it'll just
crash with

 Kernel panic - not syncing: Segfault with no mm

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
---
 arch/um/Kconfig               |  3 ++
 arch/um/include/asm/pgtable.h |  2 +
 arch/um/kernel/dyn.lds.S      |  4 +-
 arch/um/kernel/mem.c          | 76 +++++++++++++++++++++++++++++++++++
 4 files changed, 83 insertions(+), 2 deletions(-)

Comments

Anton Ivanov Aug. 25, 2019, 6:30 a.m. UTC | #1
On 23/08/2019 23:58, Johannes Berg wrote:
> From: Johannes Berg <johannes.berg@intel.com>
> 
> Implement memory protection, namely
>   * ARCH_HAS_SET_MEMORY
>   * ARCH_HAS_STRICT_KERNEL_RWX
>   * ARCH_HAS_STRICT_MODULE_RWX
> 
> The .text section is marked as RO.
> The rodata is marked as RO & NX.
> The remaining data is marked NX.
> 
> Move the .kstrtab to be covered by the NX.
> 
> Note that you can now enable CONFIG_DEBUG_RODATA_TEST,
> but it's broken as the fixup is broken - it'll just
> crash with
> 
>   Kernel panic - not syncing: Segfault with no mm
> 
> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
> ---
>   arch/um/Kconfig               |  3 ++
>   arch/um/include/asm/pgtable.h |  2 +
>   arch/um/kernel/dyn.lds.S      |  4 +-
>   arch/um/kernel/mem.c          | 76 +++++++++++++++++++++++++++++++++++
>   4 files changed, 83 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/um/Kconfig b/arch/um/Kconfig
> index 3c3adfc486f2..e84264be26c9 100644
> --- a/arch/um/Kconfig
> +++ b/arch/um/Kconfig
> @@ -18,6 +18,9 @@ config UML
>   	select GENERIC_CLOCKEVENTS
>   	select HAVE_GCC_PLUGINS
>   	select TTY # Needed for line.c
> +	select ARCH_HAS_SET_MEMORY
> +	select ARCH_HAS_STRICT_KERNEL_RWX
> +	select ARCH_HAS_STRICT_MODULE_RWX
>   
>   config MMU
>   	bool
> diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
> index b377df76cc28..0e6cda3516c6 100644
> --- a/arch/um/include/asm/pgtable.h
> +++ b/arch/um/include/asm/pgtable.h
> @@ -17,6 +17,8 @@
>   #define _PAGE_USER	0x040
>   #define _PAGE_ACCESSED	0x080
>   #define _PAGE_DIRTY	0x100
> +#define _PAGE_RO	0x200
> +#define _PAGE_NX	0x400
>   /* If _PAGE_PRESENT is clear, we use these: */
>   #define _PAGE_PROTNONE	0x010	/* if the user mapped it with PROT_NONE;
>   				   pte_present gives true */
> diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
> index c69d69ee96be..da6b42793e0a 100644
> --- a/arch/um/kernel/dyn.lds.S
> +++ b/arch/um/kernel/dyn.lds.S
> @@ -89,10 +89,10 @@ SECTIONS
>       KEEP (*(.fini))
>     } =0x90909090
>   
> -  .kstrtab : { *(.kstrtab) }
> -
>     #include <asm/common.lds.S>
>   
> +  .kstrtab : { *(.kstrtab) }
> +
>     __init_begin = .;
>     init.data : { INIT_DATA }
>     __init_end = .;
> diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
> index f256be1d77bd..7ce445bfa58f 100644
> --- a/arch/um/kernel/mem.c
> +++ b/arch/um/kernel/mem.c
> @@ -10,6 +10,7 @@
>   #include <linux/mm.h>
>   #include <linux/swap.h>
>   #include <linux/slab.h>
> +#include <asm/set_memory.h>
>   #include <asm/fixmap.h>
>   #include <asm/page.h>
>   #include <as-layout.h>
> @@ -37,6 +38,22 @@ int kmalloc_ok = 0;
>   /* Used during early boot */
>   static unsigned long brk_end;
>   
> +void mark_rodata_ro(void)
> +{
> +	unsigned long text_start = PFN_ALIGN(_text);
> +	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
> +	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
> +	unsigned long all_end = PFN_ALIGN(&__bss_stop);
> +
> +	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
> +	       (rodata_end - text_start) >> 10);
> +
> +	set_memory_ro(text_start,
> +		      (rodata_end - text_start) >> PAGE_SHIFT);
> +	set_memory_nx(rodata_start,
> +		      (all_end - rodata_start) >> PAGE_SHIFT);
> +}
> +
>   void __init mem_init(void)
>   {
>   	/* clear the zero-page */
> @@ -225,3 +242,62 @@ void *uml_kmalloc(int size, int flags)
>   {
>   	return kmalloc(size, flags);
>   }
> +
> +struct page_change_data {
> +	u32 set, clear;
> +};
> +
> +static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> +{
> +	struct page_change_data *cdata = data;
> +	pte_t pte = READ_ONCE(*ptep);
> +
> +	pte_clear_bits(pte, cdata->clear);
> +	pte_set_bits(pte, cdata->set);
> +
> +	set_pte(ptep, pte);
> +
> +	os_protect_memory((void *)addr, PAGE_SIZE,
> +			  1, !(pte.pte & _PAGE_RO), !(pte.pte & _PAGE_NX));
> +	return 0;
> +}
> +
> +static int change_memory_common(unsigned long addr, int numpages,
> +				u32 set, u32 clear)
> +{
> +	unsigned long start = addr & PAGE_MASK;
> +	unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
> +	unsigned long size = end - start;
> +	struct page_change_data data;
> +
> +	WARN_ON_ONCE(start != addr);
> +
> +	if (!size)
> +		return 0;
> +
> +	data.set = set;
> +	data.clear = clear;
> +
> +	return apply_to_page_range(&init_mm, start, size, change_page_range,
> +				   &data);
> +}
> +
> +int set_memory_ro(unsigned long addr, int numpages)
> +{
> +	return change_memory_common(addr, numpages, _PAGE_RO, 0);
> +}
> +
> +int set_memory_rw(unsigned long addr, int numpages)
> +{
> +	return change_memory_common(addr, numpages, 0, _PAGE_RO);
> +}
> +
> +int set_memory_nx(unsigned long addr, int numpages)
> +{
> +	return change_memory_common(addr, numpages, _PAGE_NX, 0);
> +}
> +
> +int set_memory_x(unsigned long addr, int numpages)
> +{
> +	return change_memory_common(addr, numpages, 0, _PAGE_NX);
> +}
> 

I will try to have a look at this one and the other patches in the 
coming week.

Brgds,
Johannes Berg Aug. 25, 2019, 7:41 a.m. UTC | #2
On Sat, 2019-08-24 at 00:58 +0200, Johannes Berg wrote:
> 
> Note that you can now enable CONFIG_DEBUG_RODATA_TEST,
> but it's broken as the fixup is broken - it'll just
> crash with
> 
>  Kernel panic - not syncing: Segfault with no mm

It just occurred to me that I need to check what happens if it's
actually userspace talking the fault or something, maybe if it
erroneously tries to copy_o_user() with a read-only kernel address or
so.

Don't want to cause a panic() in that case, after all.

IOW, I'd feel much happier about this if I could make
CONFIG_DEBUG_RODATA_TEST work after all, but I gave up after an hour or
so of poking the exception handlers and trying to figure out how I could
make fixup tables work on UML. It might mean doing copy_from_user() in
assembly, which would be annoying.

johannes
Anton Ivanov Aug. 27, 2019, 5:15 a.m. UTC | #3
On 25/08/2019 08:41, Johannes Berg wrote:
> On Sat, 2019-08-24 at 00:58 +0200, Johannes Berg wrote:
>>
>> Note that you can now enable CONFIG_DEBUG_RODATA_TEST,
>> but it's broken as the fixup is broken - it'll just
>> crash with
>>
>>   Kernel panic - not syncing: Segfault with no mm
> 
> It just occurred to me that I need to check what happens if it's
> actually userspace talking the fault or something, maybe if it
> erroneously tries to copy_o_user() with a read-only kernel address or
> so.
> 
> Don't want to cause a panic() in that case, after all.
> 
> IOW, I'd feel much happier about this if I could make
> CONFIG_DEBUG_RODATA_TEST work after all, but I gave up after an hour or
> so of poking the exception handlers and trying to figure out how I could
> make fixup tables work on UML. It might mean doing copy_from_user() in

Copy from/to user in UML have embedded tlb lookups. I did a couple of 
tweaks on them earlier in the year. I do not think they are a feasible 
target for a rewrite in asm without redoing once more the actual tlb.

That one is tough, in fact very tough. I think I have squeezed nearly 
everything that could be squeezed out of the current design, any further 
improvements can come only from a redesign from the whole uml mm and/or 
trying to integrate host ksm by default.

A.

> assembly, which would be annoying.
> 
> johannes
> 
> 
> _______________________________________________
> linux-um mailing list
> linux-um@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-um
>
Johannes Berg Aug. 27, 2019, 7:27 a.m. UTC | #4
On Tue, 2019-08-27 at 06:15 +0100, Anton Ivanov wrote:

> > IOW, I'd feel much happier about this if I could make
> > CONFIG_DEBUG_RODATA_TEST work after all, but I gave up after an hour or
> > so of poking the exception handlers and trying to figure out how I could
> > make fixup tables work on UML. It might mean doing copy_from_user() in
> 
> Copy from/to user in UML have embedded tlb lookups. I did a couple of 
> tweaks on them earlier in the year. I do not think they are a feasible 
> target for a rewrite in asm without redoing once more the actual tlb.
> 
> 
> That one is tough, in fact very tough. I think I have squeezed nearly 
> everything that could be squeezed out of the current design, any further 
> improvements can come only from a redesign from the whole uml mm and/or 
> trying to integrate host ksm by default.

OK.

So strictly speaking, obviously it doesn't need to be implemented in
assembly. However, it'd be the most obvious way of inserting exception
handling.

Note that also, it really would only need for copy_chunk_from_user() to
be done in assembly. All the other stuff around it will not take an
exception.

So I suppose we just need to allow copy_chunk_from_user() to return
-EFAULT, and then we can do exception fixup in just that little portion.

I'm not really sure how to do even that though - I'm not sure I see how
setting UPT_IP() in arch/x86/um/fault.c has any effect?

johannes

Patch
diff mbox series

diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 3c3adfc486f2..e84264be26c9 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -18,6 +18,9 @@  config UML
 	select GENERIC_CLOCKEVENTS
 	select HAVE_GCC_PLUGINS
 	select TTY # Needed for line.c
+	select ARCH_HAS_SET_MEMORY
+	select ARCH_HAS_STRICT_KERNEL_RWX
+	select ARCH_HAS_STRICT_MODULE_RWX
 
 config MMU
 	bool
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index b377df76cc28..0e6cda3516c6 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -17,6 +17,8 @@ 
 #define _PAGE_USER	0x040
 #define _PAGE_ACCESSED	0x080
 #define _PAGE_DIRTY	0x100
+#define _PAGE_RO	0x200
+#define _PAGE_NX	0x400
 /* If _PAGE_PRESENT is clear, we use these: */
 #define _PAGE_PROTNONE	0x010	/* if the user mapped it with PROT_NONE;
 				   pte_present gives true */
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index c69d69ee96be..da6b42793e0a 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -89,10 +89,10 @@  SECTIONS
     KEEP (*(.fini))
   } =0x90909090
 
-  .kstrtab : { *(.kstrtab) }
-
   #include <asm/common.lds.S>
 
+  .kstrtab : { *(.kstrtab) }
+
   __init_begin = .;
   init.data : { INIT_DATA }
   __init_end = .;
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index f256be1d77bd..7ce445bfa58f 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -10,6 +10,7 @@ 
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/slab.h>
+#include <asm/set_memory.h>
 #include <asm/fixmap.h>
 #include <asm/page.h>
 #include <as-layout.h>
@@ -37,6 +38,22 @@  int kmalloc_ok = 0;
 /* Used during early boot */
 static unsigned long brk_end;
 
+void mark_rodata_ro(void)
+{
+	unsigned long text_start = PFN_ALIGN(_text);
+	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
+	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
+	unsigned long all_end = PFN_ALIGN(&__bss_stop);
+
+	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+	       (rodata_end - text_start) >> 10);
+
+	set_memory_ro(text_start,
+		      (rodata_end - text_start) >> PAGE_SHIFT);
+	set_memory_nx(rodata_start,
+		      (all_end - rodata_start) >> PAGE_SHIFT);
+}
+
 void __init mem_init(void)
 {
 	/* clear the zero-page */
@@ -225,3 +242,62 @@  void *uml_kmalloc(int size, int flags)
 {
 	return kmalloc(size, flags);
 }
+
+struct page_change_data {
+	u32 set, clear;
+};
+
+static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
+{
+	struct page_change_data *cdata = data;
+	pte_t pte = READ_ONCE(*ptep);
+
+	pte_clear_bits(pte, cdata->clear);
+	pte_set_bits(pte, cdata->set);
+
+	set_pte(ptep, pte);
+
+	os_protect_memory((void *)addr, PAGE_SIZE,
+			  1, !(pte.pte & _PAGE_RO), !(pte.pte & _PAGE_NX));
+	return 0;
+}
+
+static int change_memory_common(unsigned long addr, int numpages,
+				u32 set, u32 clear)
+{
+	unsigned long start = addr & PAGE_MASK;
+	unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
+	unsigned long size = end - start;
+	struct page_change_data data;
+
+	WARN_ON_ONCE(start != addr);
+
+	if (!size)
+		return 0;
+
+	data.set = set;
+	data.clear = clear;
+
+	return apply_to_page_range(&init_mm, start, size, change_page_range,
+				   &data);
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+	return change_memory_common(addr, numpages, _PAGE_RO, 0);
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+	return change_memory_common(addr, numpages, 0, _PAGE_RO);
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+	return change_memory_common(addr, numpages, _PAGE_NX, 0);
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+	return change_memory_common(addr, numpages, 0, _PAGE_NX);
+}