@@ -13,6 +13,9 @@ typedef struct mm_context {
struct mm_id id;
struct uml_arch_mm_context arch;
struct page *stub_pages[2];
+#ifndef CONFIG_MMU
+ unsigned long end_brk;
+#endif
} mm_context_t;
extern void __switch_mm(struct mm_id * mm_idp);
@@ -6,6 +6,7 @@
#ifndef __UM_MMU_CONTEXT_H
#define __UM_MMU_CONTEXT_H
+#ifdef CONFIG_MMU
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
@@ -75,4 +76,11 @@ extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
+#else
+#include <asm-generic/mmu_context.h>
+
+extern void force_flush_all(void);
+
+#endif /* CONFIG_MMU */
+
#endif
@@ -7,6 +7,8 @@
#ifndef __UM_PAGE_H
#define __UM_PAGE_H
+#ifdef CONFIG_MMU
+
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
@@ -120,4 +122,17 @@ extern unsigned long uml_physmem;
#define __HAVE_ARCH_GATE_AREA 1
#endif
+#else /* CONFIG_MMU */
+#define CONFIG_KERNEL_RAM_BASE_ADDRESS memory_start
+#include <asm-generic/page.h>
+
+#define __va_space (8*1024*1024)
+
+#ifndef __ASSEMBLY__
+#include <mem.h>
+void free_mem(void);
+#endif
+
+#endif /* !CONFIG_MMU */
+
#endif /* __UM_PAGE_H */
@@ -21,6 +21,8 @@
#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
pte_present gives true */
+#ifdef CONFIG_MMU
+
#ifdef CONFIG_3_LEVEL_PGTABLES
#include <asm/pgtable-3level.h>
#else
@@ -323,4 +325,29 @@ do { \
__flush_tlb_one((vaddr)); \
} while (0)
+#else /* CONFIG_MMU */
+
+#include <asm-generic/pgtable-nopud.h>
+
+#define swapper_pg_dir ((pgd_t *)0)
+#define PAGE_KERNEL __pgprot(0)
+#define PGDIR_SHIFT 21
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define VMALLOC_START 0
+#define VMALLOC_END 0xffffffff
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long *empty_zero_page;
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern unsigned long end_iomem;
+
+#endif /* !CONFIG_MMU */
+
+
#endif
@@ -21,6 +21,7 @@
#define __addr_range_nowrap(addr, size) \
((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
+#ifdef CONFIG_MMU
extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n);
extern long __strncpy_from_user(char *dst, const char __user *src, long count);
@@ -46,4 +47,9 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
uaccess_kernel());
}
+#else
+#include <asm-generic/uaccess.h>
+#endif /* CONFIG_MMU */
+
+
#endif
@@ -16,8 +16,13 @@ struct lkl_jmp_buf {
* These operations must be provided by a host library or by the application
* itself.
*
+ * @mem_alloc - allocate memory
+ * @mem_free - free memory
+ *
*/
struct lkl_host_operations {
+ void *(*mem_alloc)(unsigned long mem);
+ void (*mem_free)(void *mem);
};
void lkl_bug(const char *fmt, ...);
new file mode 100644
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+
+unsigned long memory_start, memory_end;
+EXPORT_SYMBOL(memory_start);
+static unsigned long _memory_start, mem_size;
+
+unsigned long *empty_zero_page;
+
+/* XXX: unused */
+unsigned long long highmem;
+int iomem_size;
+int kmalloc_ok = 1;
+
+void __init setup_physmem(unsigned long start, unsigned long reserve_end,
+ unsigned long mem_sz, unsigned long long _highmem)
+{
+ mem_size = mem_sz;
+
+ _memory_start = (unsigned long)lkl_ops->mem_alloc(mem_size);
+ memory_start = _memory_start;
+ WARN_ON(!memory_start);
+ memory_end = memory_start + mem_size;
+
+ if (PAGE_ALIGN(memory_start) != memory_start) {
+ mem_size -= PAGE_ALIGN(memory_start) - memory_start;
+ memory_start = PAGE_ALIGN(memory_start);
+ mem_size = (mem_size / PAGE_SIZE) * PAGE_SIZE;
+ }
+ pr_info("memblock address range: 0x%lx - 0x%lx\n", memory_start,
+ memory_start+mem_size);
+ /*
+ * Give all the memory to the bootmap allocator, tell it to put the
+ * boot mem_map at the start of memory.
+ */
+ max_low_pfn = virt_to_pfn(memory_end);
+ min_low_pfn = virt_to_pfn(memory_start);
+ memblock_add(memory_start, mem_size);
+
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
+
+ {
+ unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+ free_area_init(zones_size);
+ }
+}
+
+void __init mem_init(void)
+{
+ max_mapnr = (((unsigned long)high_memory) - PAGE_OFFSET) >> PAGE_SHIFT;
+ /* this will put all memory onto the freelists */
+ totalram_pages_add(memblock_free_all());
+ pr_info("Memory available: %luk/%luk RAM\n",
+ (nr_free_pages() << PAGE_SHIFT) >> 10, mem_size >> 10);
+}
+
+/*
+ * In our case __init memory is not part of the page allocator so there is
+ * nothing to free.
+ */
+void free_initmem(void)
+{
+}
+
+void free_mem(void)
+{
+ lkl_ops->mem_free((void *)_memory_start);
+}
+
+void *uml_kmalloc(int size, int flags)
+{
+ return kmalloc(size, flags);
+}
+
+void __init mem_total_pages(unsigned long physmem, unsigned long iomem,
+ unsigned long _highmem)
+{
+}
+
+void __init paging_init(void)
+{
+}