diff mbox series

[5/7] target/ppc: Rework ppc_radix64_walk_tree() for partition-scoped translation

Message ID 20200330094946.24678-6-clg@kaod.org
State New
Headers show
Series target/ppc: Add support for Radix partition-scoped translation | expand

Commit Message

Cédric Le Goater March 30, 2020, 9:49 a.m. UTC
The ppc_radix64_walk_tree() routine walks through the nested radix
tables to look for a PTE.

Split it two and introduce a new routine ppc_radix64_next_level()
which we will use for partition-scoped Radix translation when
translating the process tree addresses.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
---
 target/ppc/mmu-radix64.c | 50 ++++++++++++++++++++++++++--------------
 1 file changed, 33 insertions(+), 17 deletions(-)

Comments

Greg Kurz March 30, 2020, 5 p.m. UTC | #1
On Mon, 30 Mar 2020 11:49:44 +0200
Cédric Le Goater <clg@kaod.org> wrote:

> The ppc_radix64_walk_tree() routine walks through the nested radix
> tables to look for a PTE.
> 
> Split it two and introduce a new routine ppc_radix64_next_level()

Split it in two...

> which we will use for partition-scoped Radix translation when
> translating the process tree addresses.
> 
> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> Signed-off-by: Cédric Le Goater <clg@kaod.org>
> ---
>  target/ppc/mmu-radix64.c | 50 ++++++++++++++++++++++++++--------------
>  1 file changed, 33 insertions(+), 17 deletions(-)
> 
> diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
> index b4e6abcd2d35..136498111f60 100644
> --- a/target/ppc/mmu-radix64.c
> +++ b/target/ppc/mmu-radix64.c
> @@ -162,44 +162,60 @@ static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
>      }
>  }
>  
> -static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
> -                                      uint64_t base_addr, uint64_t nls,
> -                                      hwaddr *raddr, int *psize,
> -                                      int *fault_cause, hwaddr *pte_addr)
> +static uint64_t ppc_radix64_next_level(PowerPCCPU *cpu, vaddr eaddr,
> +                                       uint64_t *pte_addr, uint64_t *nls,
> +                                       int *psize, int *fault_cause)
>  {
>      CPUState *cs = CPU(cpu);
>      uint64_t index, pde;
>  
> -    if (nls < 5) { /* Directory maps less than 2**5 entries */
> +    if (*nls < 5) { /* Directory maps less than 2**5 entries */
>          *fault_cause |= DSISR_R_BADCONFIG;
>          return 0;
>      }
>  

I think this should stay in the caller...

>      /* Read page <directory/table> entry from guest address space */
> -    index = eaddr >> (*psize - nls); /* Shift */
> -    index &= ((1UL << nls) - 1); /* Mask */
> -    pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde)));
> -    if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
> +    pde = ldq_phys(cs->as, *pte_addr);
> +    if (!(pde & R_PTE_VALID)) {         /* Invalid Entry */
>          *fault_cause |= DSISR_NOPTE;
>          return 0;
>      }
>  
> -    *psize -= nls;
> +    *psize -= *nls;
> +    if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
> +        *nls = pde & R_PDE_NLS;
> +        index = eaddr >> (*psize - *nls);       /* Shift */
> +        index &= ((1UL << *nls) - 1);           /* Mask */
> +        *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
> +    }
> +    return pde;
> +}
> +
> +static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
> +                                      uint64_t base_addr, uint64_t nls,
> +                                      hwaddr *raddr, int *psize,
> +                                      int *fault_cause, hwaddr *pte_addr)
> +{
> +    uint64_t index, pde;
> +
> +    index = eaddr >> (*psize - nls);    /* Shift */
> +    index &= ((1UL << nls) - 1);       /* Mask */
> +    *pte_addr = base_addr + (index * sizeof(pde));
> +    do {

... here. So that we have a well established "bad config" path at
the root level, just like the current code has.

Since the ppc_radix64_walk_tree() now calls ppc_radix64_next_level()
in a loop instead of recursing, and since ppc_radix64_next_level()
returns the nls value for the next level, it really makes sense to
have this check in ppc_radix64_walk_tree() and maybe put an assert
in ppc_radix64_next_level().

> +        pde = ppc_radix64_next_level(cpu, eaddr, pte_addr, &nls, psize,
> +                                     fault_cause);
> +    } while ((pde & R_PTE_VALID) && !(pde & R_PTE_LEAF));
>  
> -    /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */
> -    if (pde & R_PTE_LEAF) {
> +    /* Did we find a valid leaf? */
> +    if ((pde & R_PTE_VALID) && (pde & R_PTE_LEAF)) {
>          uint64_t rpn = pde & R_PTE_RPN;
>          uint64_t mask = (1UL << *psize) - 1;
>  
>          /* Or high bits of rpn and low bits to ea to form whole real addr */
>          *raddr = (rpn & ~mask) | (eaddr & mask);
> -        *pte_addr = base_addr + (index * sizeof(pde));
> -        return pde;
>      }
>  
> -    /* Next Level of Radix Tree */
> -    return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS,
> -                                 raddr, psize, fault_cause, pte_addr);
> +    return pde;
>  }
>  
>  static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
Cédric Le Goater March 31, 2020, 9:10 a.m. UTC | #2
On 3/30/20 7:00 PM, Greg Kurz wrote:
> On Mon, 30 Mar 2020 11:49:44 +0200
> Cédric Le Goater <clg@kaod.org> wrote:
> 
>> The ppc_radix64_walk_tree() routine walks through the nested radix
>> tables to look for a PTE.
>>
>> Split it two and introduce a new routine ppc_radix64_next_level()
> 
> Split it in two...
> 
>> which we will use for partition-scoped Radix translation when
>> translating the process tree addresses.
>>
>> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
>> Signed-off-by: Cédric Le Goater <clg@kaod.org>
>> ---
>>  target/ppc/mmu-radix64.c | 50 ++++++++++++++++++++++++++--------------
>>  1 file changed, 33 insertions(+), 17 deletions(-)
>>
>> diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
>> index b4e6abcd2d35..136498111f60 100644
>> --- a/target/ppc/mmu-radix64.c
>> +++ b/target/ppc/mmu-radix64.c
>> @@ -162,44 +162,60 @@ static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
>>      }
>>  }
>>  
>> -static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
>> -                                      uint64_t base_addr, uint64_t nls,
>> -                                      hwaddr *raddr, int *psize,
>> -                                      int *fault_cause, hwaddr *pte_addr)
>> +static uint64_t ppc_radix64_next_level(PowerPCCPU *cpu, vaddr eaddr,
>> +                                       uint64_t *pte_addr, uint64_t *nls,
>> +                                       int *psize, int *fault_cause)
>>  {
>>      CPUState *cs = CPU(cpu);
>>      uint64_t index, pde;
>>  
>> -    if (nls < 5) { /* Directory maps less than 2**5 entries */
>> +    if (*nls < 5) { /* Directory maps less than 2**5 entries */
>>          *fault_cause |= DSISR_R_BADCONFIG;
>>          return 0;
>>      }
>>  
> 
> I think this should stay in the caller...
> 
>>      /* Read page <directory/table> entry from guest address space */
>> -    index = eaddr >> (*psize - nls); /* Shift */
>> -    index &= ((1UL << nls) - 1); /* Mask */
>> -    pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde)));
>> -    if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
>> +    pde = ldq_phys(cs->as, *pte_addr);
>> +    if (!(pde & R_PTE_VALID)) {         /* Invalid Entry */
>>          *fault_cause |= DSISR_NOPTE;
>>          return 0;
>>      }
>>  
>> -    *psize -= nls;
>> +    *psize -= *nls;
>> +    if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
>> +        *nls = pde & R_PDE_NLS;
>> +        index = eaddr >> (*psize - *nls);       /* Shift */
>> +        index &= ((1UL << *nls) - 1);           /* Mask */
>> +        *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
>> +    }
>> +    return pde;
>> +}
>> +
>> +static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
>> +                                      uint64_t base_addr, uint64_t nls,
>> +                                      hwaddr *raddr, int *psize,
>> +                                      int *fault_cause, hwaddr *pte_addr)
>> +{
>> +    uint64_t index, pde;
>> +
>> +    index = eaddr >> (*psize - nls);    /* Shift */
>> +    index &= ((1UL << nls) - 1);       /* Mask */
>> +    *pte_addr = base_addr + (index * sizeof(pde));
>> +    do {
> 
> ... here. So that we have a well established "bad config" path at
> the root level, just like the current code has.
> 
> Since the ppc_radix64_walk_tree() now calls ppc_radix64_next_level()
> in a loop instead of recursing, and since ppc_radix64_next_level()
> returns the nls value for the next level, it really makes sense to
> have this check in ppc_radix64_walk_tree() and maybe put an assert
> in ppc_radix64_next_level().

ppc_radix64_next_level() also covers the needs of partition-scoped 
translation which translates each process table address. See PATCH 7.

I rather not duplicate more code and leave it as it is.
 
Thanks,

C.

> 
>> +        pde = ppc_radix64_next_level(cpu, eaddr, pte_addr, &nls, psize,
>> +                                     fault_cause);
>> +    } while ((pde & R_PTE_VALID) && !(pde & R_PTE_LEAF));
>>  
>> -    /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */
>> -    if (pde & R_PTE_LEAF) {
>> +    /* Did we find a valid leaf? */
>> +    if ((pde & R_PTE_VALID) && (pde & R_PTE_LEAF)) {
>>          uint64_t rpn = pde & R_PTE_RPN;
>>          uint64_t mask = (1UL << *psize) - 1;
>>  
>>          /* Or high bits of rpn and low bits to ea to form whole real addr */
>>          *raddr = (rpn & ~mask) | (eaddr & mask);
>> -        *pte_addr = base_addr + (index * sizeof(pde));
>> -        return pde;
>>      }
>>  
>> -    /* Next Level of Radix Tree */
>> -    return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS,
>> -                                 raddr, psize, fault_cause, pte_addr);
>> +    return pde;
>>  }
>>  
>>  static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
>
Greg Kurz March 31, 2020, 9:49 a.m. UTC | #3
On Tue, 31 Mar 2020 11:10:20 +0200
Cédric Le Goater <clg@kaod.org> wrote:

> On 3/30/20 7:00 PM, Greg Kurz wrote:
> > On Mon, 30 Mar 2020 11:49:44 +0200
> > Cédric Le Goater <clg@kaod.org> wrote:
> > 
> >> The ppc_radix64_walk_tree() routine walks through the nested radix
> >> tables to look for a PTE.
> >>
> >> Split it two and introduce a new routine ppc_radix64_next_level()
> > 
> > Split it in two...
> > 
> >> which we will use for partition-scoped Radix translation when
> >> translating the process tree addresses.
> >>
> >> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> >> Signed-off-by: Cédric Le Goater <clg@kaod.org>
> >> ---
> >>  target/ppc/mmu-radix64.c | 50 ++++++++++++++++++++++++++--------------
> >>  1 file changed, 33 insertions(+), 17 deletions(-)
> >>
> >> diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
> >> index b4e6abcd2d35..136498111f60 100644
> >> --- a/target/ppc/mmu-radix64.c
> >> +++ b/target/ppc/mmu-radix64.c
> >> @@ -162,44 +162,60 @@ static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
> >>      }
> >>  }
> >>  
> >> -static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
> >> -                                      uint64_t base_addr, uint64_t nls,
> >> -                                      hwaddr *raddr, int *psize,
> >> -                                      int *fault_cause, hwaddr *pte_addr)
> >> +static uint64_t ppc_radix64_next_level(PowerPCCPU *cpu, vaddr eaddr,
> >> +                                       uint64_t *pte_addr, uint64_t *nls,
> >> +                                       int *psize, int *fault_cause)
> >>  {
> >>      CPUState *cs = CPU(cpu);
> >>      uint64_t index, pde;
> >>  
> >> -    if (nls < 5) { /* Directory maps less than 2**5 entries */
> >> +    if (*nls < 5) { /* Directory maps less than 2**5 entries */
> >>          *fault_cause |= DSISR_R_BADCONFIG;
> >>          return 0;
> >>      }
> >>  
> > 
> > I think this should stay in the caller...
> > 
> >>      /* Read page <directory/table> entry from guest address space */
> >> -    index = eaddr >> (*psize - nls); /* Shift */
> >> -    index &= ((1UL << nls) - 1); /* Mask */
> >> -    pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde)));
> >> -    if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
> >> +    pde = ldq_phys(cs->as, *pte_addr);
> >> +    if (!(pde & R_PTE_VALID)) {         /* Invalid Entry */
> >>          *fault_cause |= DSISR_NOPTE;
> >>          return 0;
> >>      }
> >>  
> >> -    *psize -= nls;
> >> +    *psize -= *nls;
> >> +    if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
> >> +        *nls = pde & R_PDE_NLS;
> >> +        index = eaddr >> (*psize - *nls);       /* Shift */
> >> +        index &= ((1UL << *nls) - 1);           /* Mask */
> >> +        *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
> >> +    }
> >> +    return pde;
> >> +}
> >> +
> >> +static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
> >> +                                      uint64_t base_addr, uint64_t nls,
> >> +                                      hwaddr *raddr, int *psize,
> >> +                                      int *fault_cause, hwaddr *pte_addr)
> >> +{
> >> +    uint64_t index, pde;
> >> +
> >> +    index = eaddr >> (*psize - nls);    /* Shift */
> >> +    index &= ((1UL << nls) - 1);       /* Mask */
> >> +    *pte_addr = base_addr + (index * sizeof(pde));
> >> +    do {
> > 
> > ... here. So that we have a well established "bad config" path at
> > the root level, just like the current code has.
> > 
> > Since the ppc_radix64_walk_tree() now calls ppc_radix64_next_level()
> > in a loop instead of recursing, and since ppc_radix64_next_level()
> > returns the nls value for the next level, it really makes sense to
> > have this check in ppc_radix64_walk_tree() and maybe put an assert
> > in ppc_radix64_next_level().
> 
> ppc_radix64_next_level() also covers the needs of partition-scoped 
> translation which translates each process table address. See PATCH 7.
> 
> I rather not duplicate more code and leave it as it is.
>  

These patches change the behaviour of some existing paths in a
non-trivial way. I'll wait for the respin and try to review again.

> Thanks,
> 
> C.
> 
> > 
> >> +        pde = ppc_radix64_next_level(cpu, eaddr, pte_addr, &nls, psize,
> >> +                                     fault_cause);
> >> +    } while ((pde & R_PTE_VALID) && !(pde & R_PTE_LEAF));
> >>  
> >> -    /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */
> >> -    if (pde & R_PTE_LEAF) {
> >> +    /* Did we find a valid leaf? */
> >> +    if ((pde & R_PTE_VALID) && (pde & R_PTE_LEAF)) {
> >>          uint64_t rpn = pde & R_PTE_RPN;
> >>          uint64_t mask = (1UL << *psize) - 1;
> >>  
> >>          /* Or high bits of rpn and low bits to ea to form whole real addr */
> >>          *raddr = (rpn & ~mask) | (eaddr & mask);
> >> -        *pte_addr = base_addr + (index * sizeof(pde));
> >> -        return pde;
> >>      }
> >>  
> >> -    /* Next Level of Radix Tree */
> >> -    return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS,
> >> -                                 raddr, psize, fault_cause, pte_addr);
> >> +    return pde;
> >>  }
> >>  
> >>  static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
> > 
>
diff mbox series

Patch

diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index b4e6abcd2d35..136498111f60 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -162,44 +162,60 @@  static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
     }
 }
 
-static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
-                                      uint64_t base_addr, uint64_t nls,
-                                      hwaddr *raddr, int *psize,
-                                      int *fault_cause, hwaddr *pte_addr)
+static uint64_t ppc_radix64_next_level(PowerPCCPU *cpu, vaddr eaddr,
+                                       uint64_t *pte_addr, uint64_t *nls,
+                                       int *psize, int *fault_cause)
 {
     CPUState *cs = CPU(cpu);
     uint64_t index, pde;
 
-    if (nls < 5) { /* Directory maps less than 2**5 entries */
+    if (*nls < 5) { /* Directory maps less than 2**5 entries */
         *fault_cause |= DSISR_R_BADCONFIG;
         return 0;
     }
 
     /* Read page <directory/table> entry from guest address space */
-    index = eaddr >> (*psize - nls); /* Shift */
-    index &= ((1UL << nls) - 1); /* Mask */
-    pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde)));
-    if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
+    pde = ldq_phys(cs->as, *pte_addr);
+    if (!(pde & R_PTE_VALID)) {         /* Invalid Entry */
         *fault_cause |= DSISR_NOPTE;
         return 0;
     }
 
-    *psize -= nls;
+    *psize -= *nls;
+    if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
+        *nls = pde & R_PDE_NLS;
+        index = eaddr >> (*psize - *nls);       /* Shift */
+        index &= ((1UL << *nls) - 1);           /* Mask */
+        *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
+    }
+    return pde;
+}
+
+static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
+                                      uint64_t base_addr, uint64_t nls,
+                                      hwaddr *raddr, int *psize,
+                                      int *fault_cause, hwaddr *pte_addr)
+{
+    uint64_t index, pde;
+
+    index = eaddr >> (*psize - nls);    /* Shift */
+    index &= ((1UL << nls) - 1);       /* Mask */
+    *pte_addr = base_addr + (index * sizeof(pde));
+    do {
+        pde = ppc_radix64_next_level(cpu, eaddr, pte_addr, &nls, psize,
+                                     fault_cause);
+    } while ((pde & R_PTE_VALID) && !(pde & R_PTE_LEAF));
 
-    /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */
-    if (pde & R_PTE_LEAF) {
+    /* Did we find a valid leaf? */
+    if ((pde & R_PTE_VALID) && (pde & R_PTE_LEAF)) {
         uint64_t rpn = pde & R_PTE_RPN;
         uint64_t mask = (1UL << *psize) - 1;
 
         /* Or high bits of rpn and low bits to ea to form whole real addr */
         *raddr = (rpn & ~mask) | (eaddr & mask);
-        *pte_addr = base_addr + (index * sizeof(pde));
-        return pde;
     }
 
-    /* Next Level of Radix Tree */
-    return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS,
-                                 raddr, psize, fault_cause, pte_addr);
+    return pde;
 }
 
 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)