diff mbox series

[v2,3/3] target/ppc: Check page dir/table base alignment

Message ID 20220624171653.143740-4-leandro.lupori@eldorado.org.br
State New
Headers show
Series ppc: Check for bad Radix configs | expand

Commit Message

Leandro Lupori June 24, 2022, 5:16 p.m. UTC
Check if each page dir/table base address is properly aligned and
log a guest error if not, as real hardware behave incorrectly in
this case.

Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br>
---
 target/ppc/mmu-radix64.c | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

Comments

Richard Henderson June 24, 2022, 6:04 p.m. UTC | #1
On 6/24/22 10:16, Leandro Lupori wrote:
> Check if each page dir/table base address is properly aligned and
> log a guest error if not, as real hardware behave incorrectly in
> this case.
> 
> Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br>
> ---
>   target/ppc/mmu-radix64.c | 15 +++++++++++++++
>   1 file changed, 15 insertions(+)
> 
> diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
> index 339cf5b4d8..1e7d932893 100644
> --- a/target/ppc/mmu-radix64.c
> +++ b/target/ppc/mmu-radix64.c
> @@ -280,6 +280,14 @@ static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
>       *psize -= *nls;
>       if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
>           *nls = pde & R_PDE_NLS;
> +
> +        if ((pde & R_PDE_NLB) & MAKE_64BIT_MASK(0, *nls + 3)) {
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                "%s: misaligned page dir/table base: 0x%"VADDR_PRIx
> +                " page dir size: 0x"TARGET_FMT_lx"\n",
> +                __func__, (pde & R_PDE_NLB), BIT(*nls + 3));
> +        }
> +
>           index = eaddr >> (*psize - *nls);       /* Shift */
>           index &= ((1UL << *nls) - 1);           /* Mask */
>           *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));

In your response to my question on v1, you said that it appears that the cpu ignores bits 
*nls+3. This isn't ignoring them -- it's including [nls+2 : nls] into pte_addr.

It would be better to compute this as

     index = ...
     index &= ...
     *pte_addr = ...
     if (*pte_addr & 7) {
         qemu_log(...);
     }


r~
Fabiano Rosas June 24, 2022, 6:47 p.m. UTC | #2
Leandro Lupori <leandro.lupori@eldorado.org.br> writes:

> Check if each page dir/table base address is properly aligned and
> log a guest error if not, as real hardware behave incorrectly in
> this case.

I think the commit message could be clearer, something like:

According to PowerISA 3.1B, Book III 6.7.6 programming note, the page
directory base addresses are expected to be aligned to their size. Real
hardware seems to rely on that and will access the wrong address if they
are misaligned. This results in a translation failure even if the page
tables seem to be properly populated.

Let's make sure we capture this assumption in the code to help anyone
implementing page tables.

>
> Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br>
> ---
>  target/ppc/mmu-radix64.c | 15 +++++++++++++++
>  1 file changed, 15 insertions(+)
>
> diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
> index 339cf5b4d8..1e7d932893 100644
> --- a/target/ppc/mmu-radix64.c
> +++ b/target/ppc/mmu-radix64.c
> @@ -280,6 +280,14 @@ static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
>      *psize -= *nls;
>      if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
>          *nls = pde & R_PDE_NLS;
> +
> +        if ((pde & R_PDE_NLB) & MAKE_64BIT_MASK(0, *nls + 3)) {
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                "%s: misaligned page dir/table base: 0x%"VADDR_PRIx
> +                " page dir size: 0x"TARGET_FMT_lx"\n",
> +                __func__, (pde & R_PDE_NLB), BIT(*nls + 3));
> +        }
> +
>          index = eaddr >> (*psize - *nls);       /* Shift */
>          index &= ((1UL << *nls) - 1);           /* Mask */
>          *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
> @@ -295,6 +303,13 @@ static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
>      uint64_t index, pde, rpn, mask;
>      int level = 0;
>  
> +    if (base_addr & MAKE_64BIT_MASK(0, nls + 3)) {
> +        qemu_log_mask(LOG_GUEST_ERROR,
> +            "%s: misaligned page dir base: 0x%"VADDR_PRIx
> +            " page dir size: 0x"TARGET_FMT_lx"\n",
> +            __func__, base_addr, BIT(nls + 3));
> +    }
> +
>      index = eaddr >> (*psize - nls);    /* Shift */
>      index &= ((1UL << nls) - 1);       /* Mask */
>      *pte_addr = base_addr + (index * sizeof(pde));
Leandro Lupori June 24, 2022, 8:10 p.m. UTC | #3
On 6/24/22 15:04, Richard Henderson wrote:
> 
> On 6/24/22 10:16, Leandro Lupori wrote:
>> Check if each page dir/table base address is properly aligned and
>> log a guest error if not, as real hardware behave incorrectly in
>> this case.
>>
>> Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br>
>> ---
>>   target/ppc/mmu-radix64.c | 15 +++++++++++++++
>>   1 file changed, 15 insertions(+)
>>
>> diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
>> index 339cf5b4d8..1e7d932893 100644
>> --- a/target/ppc/mmu-radix64.c
>> +++ b/target/ppc/mmu-radix64.c
>> @@ -280,6 +280,14 @@ static int ppc_radix64_next_level(AddressSpace 
>> *as, vaddr eaddr,
>>       *psize -= *nls;
>>       if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
>>           *nls = pde & R_PDE_NLS;
>> +
>> +        if ((pde & R_PDE_NLB) & MAKE_64BIT_MASK(0, *nls + 3)) {
>> +            qemu_log_mask(LOG_GUEST_ERROR,
>> +                "%s: misaligned page dir/table base: 0x%"VADDR_PRIx
>> +                " page dir size: 0x"TARGET_FMT_lx"\n",
>> +                __func__, (pde & R_PDE_NLB), BIT(*nls + 3));
>> +        }
>> +
>>           index = eaddr >> (*psize - *nls);       /* Shift */
>>           index &= ((1UL << *nls) - 1);           /* Mask */
>>           *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
> 
> In your response to my question on v1, you said that it appears that the 
> cpu ignores bits
> *nls+3. This isn't ignoring them -- it's including [nls+2 : nls] into 
> pte_addr.
> 
> It would be better to compute this as
> 
>      index = ...
>      index &= ...
>      *pte_addr = ...
>      if (*pte_addr & 7) {
>          qemu_log(...);
>      }
> 

Right, I wanted to warn about the invalid alignment but I ended up 
forgetting to make QEMU match the CPU behavior.

The CPU seems to ignore bits [nls+2 : 0] of NLB.

The multiplication of index by sizeof(pde) discards the 3 lower bits and 
it's not possible for NLB to have its 8 lower bits set, as these are 
used for NLS plus some reserved bits in the PDE.
Then we need to make sure that bits [nls+2 : 8] of NLB are also 0.

So maybe something like this would do it:

     index = eaddr >> (*psize - *nls);       /* Shift */
     index &= ((1UL << *nls) - 1);           /* Mask */
     *pte_addr = pde & R_PDE_NLB;
     mask = MAKE_64BIT_MASK(0, *nls + 3);
     if (*pte_addr & mask) {
         qemu_log(...);
         *pte_addr &= ~mask;
     }
     *pte_addr += index * sizeof(pde);

Thanks,
Leandro

> 
> r~
diff mbox series

Patch

diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 339cf5b4d8..1e7d932893 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -280,6 +280,14 @@  static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
     *psize -= *nls;
     if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
         *nls = pde & R_PDE_NLS;
+
+        if ((pde & R_PDE_NLB) & MAKE_64BIT_MASK(0, *nls + 3)) {
+            qemu_log_mask(LOG_GUEST_ERROR,
+                "%s: misaligned page dir/table base: 0x%"VADDR_PRIx
+                " page dir size: 0x"TARGET_FMT_lx"\n",
+                __func__, (pde & R_PDE_NLB), BIT(*nls + 3));
+        }
+
         index = eaddr >> (*psize - *nls);       /* Shift */
         index &= ((1UL << *nls) - 1);           /* Mask */
         *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
@@ -295,6 +303,13 @@  static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
     uint64_t index, pde, rpn, mask;
     int level = 0;
 
+    if (base_addr & MAKE_64BIT_MASK(0, nls + 3)) {
+        qemu_log_mask(LOG_GUEST_ERROR,
+            "%s: misaligned page dir base: 0x%"VADDR_PRIx
+            " page dir size: 0x"TARGET_FMT_lx"\n",
+            __func__, base_addr, BIT(nls + 3));
+    }
+
     index = eaddr >> (*psize - nls);    /* Shift */
     index &= ((1UL << nls) - 1);       /* Mask */
     *pte_addr = base_addr + (index * sizeof(pde));