diff mbox

[v2,3/6] translate-all.c: Compute L1 page table properties at runtime

Message ID 1466528974-12183-4-git-send-email-peter.maydell@linaro.org
State New
Headers show

Commit Message

Peter Maydell June 21, 2016, 5:09 p.m. UTC
From: Vijaya Kumar K <Vijaya.Kumar@caviumnetworks.com>

Remove L1 page mapping table properties computing
statically using macros which is dependent on
TARGET_PAGE_BITS. Drop macros V_L1_SIZE, V_L1_SHIFT,
V_L1_BITS macros and replace with variables which are
computed at early stage of VM boot.

Removing dependency can help to make TARGET_PAGE_BITS
dynamic.

Signed-off-by: Vijaya Kumar K <vijayak@cavium.com>
Message-id: 1465808915-4887-4-git-send-email-vijayak@caviumnetworks.com
[PMM:
 assert(v_l1_shift % V_L2_BITS == 0)
 cache v_l2_levels
 initialize from page_init() rather than vl.c
 minor code style fixes]
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 translate-all.c | 67 +++++++++++++++++++++++++++++++++++++--------------------
 1 file changed, 44 insertions(+), 23 deletions(-)

Comments

Richard Henderson June 22, 2016, 6:56 a.m. UTC | #1
On 06/21/2016 10:09 AM, Peter Maydell wrote:
> @@ -825,8 +846,8 @@ static void page_flush_tb(void)
>  {
>      int i;
>
> -    for (i = 0; i < V_L1_SIZE; i++) {
> -        page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
> +    for (i = 0; i < v_l1_size; i++) {
> +        page_flush_tb_1(v_l2_levels, l1_map + i);
>      }
>  }
>
> @@ -1850,9 +1871,9 @@ int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
>      data.start = -1u;
>      data.prot = 0;
>
> -    for (i = 0; i < V_L1_SIZE; i++) {
> -        int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
> -                                       V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
> +    for (i = 0; i < v_l1_size; i++) {
> +        target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
> +        int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);

It would probably be good to cache v_l1_size in a local, so that we don't keep 
re-reading it around the loop when we know it doesn't change.

Otherwise,

Reviewed-by: Richard Henderson <rth@twiddle.net>


r~
diff mbox

Patch

diff --git a/translate-all.c b/translate-all.c
index 3f402df..e59efdb 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -97,25 +97,24 @@  typedef struct PageDesc {
 #define V_L2_BITS 10
 #define V_L2_SIZE (1 << V_L2_BITS)
 
-/* The bits remaining after N lower levels of page tables.  */
-#define V_L1_BITS_REM \
-    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
-
-#if V_L1_BITS_REM < 4
-#define V_L1_BITS  (V_L1_BITS_REM + V_L2_BITS)
-#else
-#define V_L1_BITS  V_L1_BITS_REM
-#endif
-
-#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
-
-#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
-
 uintptr_t qemu_host_page_size;
 intptr_t qemu_host_page_mask;
 
-/* The bottom level has pointers to PageDesc */
-static void *l1_map[V_L1_SIZE];
+/*
+ * L1 Mapping properties
+ */
+static int v_l1_size;
+static int v_l1_shift;
+static int v_l2_levels;
+
+/* The bottom level has pointers to PageDesc, and is indexed by
+ * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
+ */
+#define V_L1_MIN_BITS 4
+#define V_L1_MAX_BITS (V_L2_BITS + 3)
+#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
+
+static void *l1_map[V_L1_MAX_SIZE];
 
 /* code generation context */
 TCGContext tcg_ctx;
@@ -125,6 +124,26 @@  TCGContext tcg_ctx;
 __thread int have_tb_lock;
 #endif
 
+static void page_table_config_init(void)
+{
+    uint32_t v_l1_bits;
+
+    assert(TARGET_PAGE_BITS);
+    /* The bits remaining after N lower levels of page tables.  */
+    v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
+    if (v_l1_bits < V_L1_MIN_BITS) {
+        v_l1_bits += V_L2_BITS;
+    }
+
+    v_l1_size = 1 << v_l1_bits;
+    v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
+    v_l2_levels = v_l1_shift / V_L2_BITS - 1;
+
+    assert(v_l1_bits <= V_L1_MAX_BITS);
+    assert(v_l1_shift % V_L2_BITS == 0);
+    assert(v_l2_levels >= 0);
+}
+
 void tb_lock(void)
 {
 #ifdef CONFIG_USER_ONLY
@@ -330,6 +349,8 @@  void page_size_init(void)
 static void page_init(void)
 {
     page_size_init();
+    page_table_config_init();
+
 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
     {
 #ifdef HAVE_KINFO_GETVMMAP
@@ -406,10 +427,10 @@  static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
     int i;
 
     /* Level 1.  Always allocated.  */
-    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
+    lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
 
     /* Level 2..N-1.  */
-    for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
+    for (i = v_l2_levels; i > 0; i--) {
         void **p = atomic_rcu_read(lp);
 
         if (p == NULL) {
@@ -825,8 +846,8 @@  static void page_flush_tb(void)
 {
     int i;
 
-    for (i = 0; i < V_L1_SIZE; i++) {
-        page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
+    for (i = 0; i < v_l1_size; i++) {
+        page_flush_tb_1(v_l2_levels, l1_map + i);
     }
 }
 
@@ -1850,9 +1871,9 @@  int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
     data.start = -1u;
     data.prot = 0;
 
-    for (i = 0; i < V_L1_SIZE; i++) {
-        int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
-                                       V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
+    for (i = 0; i < v_l1_size; i++) {
+        target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
+        int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
         if (rc != 0) {
             return rc;
         }