diff mbox

[U-Boot,v4] arm: cache: always flush cache line size for page table

Message ID 20160807174301.23482-1-stefan@agner.ch
State Superseded
Delegated to: Tom Rini
Headers show

Commit Message

Stefan Agner Aug. 7, 2016, 5:43 p.m. UTC
From: Stefan Agner <stefan.agner@toradex.com>

The page table is maintained by the CPU, hence it is safe to always
align cache flush to a whole cache line size. This allows to use
mmu_page_table_flush for a single page table, e.g. when configure
only small regions through mmu_set_region_dcache_behaviour.

Signed-off-by: Stefan Agner <stefan.agner@toradex.com>
Tested-by: Fabio Estevam <fabio.estevam@nxp.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
---

Changes in v4:
- Fixed spelling misstake for real

Changes in v3:
- Fixed spelling misstake

Changes in v2:
- Move cache line alignment from mmu_page_table_flush to
  mmu_set_region_dcache_behaviour

 arch/arm/lib/cache-cp15.c | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

Comments

Heiko Schocher Aug. 8, 2016, 4:22 a.m. UTC | #1
Hello Stefan,

Am 07.08.2016 um 19:43 schrieb Stefan Agner:
> From: Stefan Agner <stefan.agner@toradex.com>
>
> The page table is maintained by the CPU, hence it is safe to always
> align cache flush to a whole cache line size. This allows to use
> mmu_page_table_flush for a single page table, e.g. when configure
> only small regions through mmu_set_region_dcache_behaviour.
>
> Signed-off-by: Stefan Agner <stefan.agner@toradex.com>
> Tested-by: Fabio Estevam <fabio.estevam@nxp.com>
> Reviewed-by: Simon Glass <sjg@chromium.org>
> ---
>
> Changes in v4:
> - Fixed spelling misstake for real
>
> Changes in v3:
> - Fixed spelling misstake
>
> Changes in v2:
> - Move cache line alignment from mmu_page_table_flush to
>    mmu_set_region_dcache_behaviour
>
>   arch/arm/lib/cache-cp15.c | 13 ++++++++++++-
>   1 file changed, 12 insertions(+), 1 deletion(-)

Reviewed-by: Heiko Schocher <hs@denx.de>

bye,
Heiko
>
> diff --git a/arch/arm/lib/cache-cp15.c b/arch/arm/lib/cache-cp15.c
> index 1121dc3..488094e 100644
> --- a/arch/arm/lib/cache-cp15.c
> +++ b/arch/arm/lib/cache-cp15.c
> @@ -62,6 +62,7 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
>   				     enum dcache_option option)
>   {
>   	u32 *page_table = (u32 *)gd->arch.tlb_addr;
> +	phys_addr_t startpt, stoppt;
>   	unsigned long upto, end;
>
>   	end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
> @@ -70,7 +71,17 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
>   	      option);
>   	for (upto = start; upto < end; upto++)
>   		set_section_dcache(upto, option);
> -	mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]);
> +
> +	/*
> +	 * Make sure range is cache line aligned
> +	 * Only CPU maintains page tables, hence it is safe to always
> +	 * flush complete cache lines...
> +	 */
> +	startpt = (phys_addr_t)&page_table[start];
> +	startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
> +	stoppt = (phys_addr_t)&page_table[end];
> +	stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
> +	mmu_page_table_flush(startpt, stoppt);
>   }
>
>   __weak void dram_bank_mmu_setup(int bank)
>
Lokesh Vutla Aug. 8, 2016, 6:10 a.m. UTC | #2
Hi,

On Sunday 07 August 2016 11:13 PM, Stefan Agner wrote:
> From: Stefan Agner <stefan.agner@toradex.com>
> 
> The page table is maintained by the CPU, hence it is safe to always
> align cache flush to a whole cache line size. This allows to use
> mmu_page_table_flush for a single page table, e.g. when configure
> only small regions through mmu_set_region_dcache_behaviour.
> 
> Signed-off-by: Stefan Agner <stefan.agner@toradex.com>
> Tested-by: Fabio Estevam <fabio.estevam@nxp.com>
> Reviewed-by: Simon Glass <sjg@chromium.org>
> ---

I get the following warning when CONFIG_PHYS_64BIT is enabled on arm
platforms(dra7xx_evm_defconfig):

arch/arm/lib/cache-cp15.c: In function 'mmu_set_region_dcache_behaviour':
arch/arm/lib/cache-cp15.c:80:12: warning: cast from pointer to integer
of different size [-Wpointer-to-int-cast]
  startpt = (phys_addr_t)&page_table[start];
            ^
arch/arm/lib/cache-cp15.c:82:11: warning: cast from pointer to integer
of different size [-Wpointer-to-int-cast]
  stoppt = (phys_addr_t)&page_table[end];
           ^
arch/arm/lib/cache-cp15.c: In function 'mmu_set_region_dcache_behaviour':
arch/arm/lib/cache-cp15.c:80:12: warning: cast from pointer to integer
of different size [-Wpointer-to-int-cast]
  startpt = (phys_addr_t)&page_table[start];
            ^
arch/arm/lib/cache-cp15.c:82:11: warning: cast from pointer to integer
of different size [-Wpointer-to-int-cast]
  stoppt = (phys_addr_t)&page_table[end];

Thanks and regards,
Lokesh

> 
> Changes in v4:
> - Fixed spelling misstake for real
> 
> Changes in v3:
> - Fixed spelling misstake
> 
> Changes in v2:
> - Move cache line alignment from mmu_page_table_flush to
>   mmu_set_region_dcache_behaviour
> 
>  arch/arm/lib/cache-cp15.c | 13 ++++++++++++-
>  1 file changed, 12 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm/lib/cache-cp15.c b/arch/arm/lib/cache-cp15.c
> index 1121dc3..488094e 100644
> --- a/arch/arm/lib/cache-cp15.c
> +++ b/arch/arm/lib/cache-cp15.c
> @@ -62,6 +62,7 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
>  				     enum dcache_option option)
>  {
>  	u32 *page_table = (u32 *)gd->arch.tlb_addr;
> +	phys_addr_t startpt, stoppt;
>  	unsigned long upto, end;
>  
>  	end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
> @@ -70,7 +71,17 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
>  	      option);
>  	for (upto = start; upto < end; upto++)
>  		set_section_dcache(upto, option);
> -	mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]);
> +
> +	/*
> +	 * Make sure range is cache line aligned
> +	 * Only CPU maintains page tables, hence it is safe to always
> +	 * flush complete cache lines...
> +	 */
> +	startpt = (phys_addr_t)&page_table[start];
> +	startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
> +	stoppt = (phys_addr_t)&page_table[end];
> +	stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
> +	mmu_page_table_flush(startpt, stoppt);
>  }
>  
>  __weak void dram_bank_mmu_setup(int bank)
>
Stefan Agner Aug. 8, 2016, 7:43 a.m. UTC | #3
On 2016-08-07 23:10, Lokesh Vutla wrote:
> Hi,
> 
> On Sunday 07 August 2016 11:13 PM, Stefan Agner wrote:
>> From: Stefan Agner <stefan.agner@toradex.com>
>>
>> The page table is maintained by the CPU, hence it is safe to always
>> align cache flush to a whole cache line size. This allows to use
>> mmu_page_table_flush for a single page table, e.g. when configure
>> only small regions through mmu_set_region_dcache_behaviour.
>>
>> Signed-off-by: Stefan Agner <stefan.agner@toradex.com>
>> Tested-by: Fabio Estevam <fabio.estevam@nxp.com>
>> Reviewed-by: Simon Glass <sjg@chromium.org>
>> ---
> 
> I get the following warning when CONFIG_PHYS_64BIT is enabled on arm
> platforms(dra7xx_evm_defconfig):

Hm, do I see things right, this is otherwise a 32-bit architecture? Does
that work without LPAE? What is the page table size in this case?

> 
> arch/arm/lib/cache-cp15.c: In function 'mmu_set_region_dcache_behaviour':
> arch/arm/lib/cache-cp15.c:80:12: warning: cast from pointer to integer
> of different size [-Wpointer-to-int-cast]
>   startpt = (phys_addr_t)&page_table[start];
>             ^

Hm, maybe we should keep using unsigned long then inside this function.
Added Thierry to CC since he introduced phys_addr_t to the function
signature.

--
Stefan

> arch/arm/lib/cache-cp15.c:82:11: warning: cast from pointer to integer
> of different size [-Wpointer-to-int-cast]
>   stoppt = (phys_addr_t)&page_table[end];
>            ^
> arch/arm/lib/cache-cp15.c: In function 'mmu_set_region_dcache_behaviour':
> arch/arm/lib/cache-cp15.c:80:12: warning: cast from pointer to integer
> of different size [-Wpointer-to-int-cast]
>   startpt = (phys_addr_t)&page_table[start];
>             ^
> arch/arm/lib/cache-cp15.c:82:11: warning: cast from pointer to integer
> of different size [-Wpointer-to-int-cast]
>   stoppt = (phys_addr_t)&page_table[end];
> 
> Thanks and regards,
> Lokesh
> 
>>
>> Changes in v4:
>> - Fixed spelling misstake for real
>>
>> Changes in v3:
>> - Fixed spelling misstake
>>
>> Changes in v2:
>> - Move cache line alignment from mmu_page_table_flush to
>>   mmu_set_region_dcache_behaviour
>>
>>  arch/arm/lib/cache-cp15.c | 13 ++++++++++++-
>>  1 file changed, 12 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arm/lib/cache-cp15.c b/arch/arm/lib/cache-cp15.c
>> index 1121dc3..488094e 100644
>> --- a/arch/arm/lib/cache-cp15.c
>> +++ b/arch/arm/lib/cache-cp15.c
>> @@ -62,6 +62,7 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
>>  				     enum dcache_option option)
>>  {
>>  	u32 *page_table = (u32 *)gd->arch.tlb_addr;
>> +	phys_addr_t startpt, stoppt;
>>  	unsigned long upto, end;
>>
>>  	end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
>> @@ -70,7 +71,17 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
>>  	      option);
>>  	for (upto = start; upto < end; upto++)
>>  		set_section_dcache(upto, option);
>> -	mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]);
>> +
>> +	/*
>> +	 * Make sure range is cache line aligned
>> +	 * Only CPU maintains page tables, hence it is safe to always
>> +	 * flush complete cache lines...
>> +	 */
>> +	startpt = (phys_addr_t)&page_table[start];
>> +	startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
>> +	stoppt = (phys_addr_t)&page_table[end];
>> +	stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
>> +	mmu_page_table_flush(startpt, stoppt);
>>  }
>>
>>  __weak void dram_bank_mmu_setup(int bank)
>>
Tom Rini Aug. 14, 2016, 8:01 p.m. UTC | #4
On Mon, Aug 08, 2016 at 12:43:03AM -0700, Stefan Agner wrote:
> On 2016-08-07 23:10, Lokesh Vutla wrote:
> > Hi,
> > 
> > On Sunday 07 August 2016 11:13 PM, Stefan Agner wrote:
> >> From: Stefan Agner <stefan.agner@toradex.com>
> >>
> >> The page table is maintained by the CPU, hence it is safe to always
> >> align cache flush to a whole cache line size. This allows to use
> >> mmu_page_table_flush for a single page table, e.g. when configure
> >> only small regions through mmu_set_region_dcache_behaviour.
> >>
> >> Signed-off-by: Stefan Agner <stefan.agner@toradex.com>
> >> Tested-by: Fabio Estevam <fabio.estevam@nxp.com>
> >> Reviewed-by: Simon Glass <sjg@chromium.org>
> >> ---
> > 
> > I get the following warning when CONFIG_PHYS_64BIT is enabled on arm
> > platforms(dra7xx_evm_defconfig):
> 
> Hm, do I see things right, this is otherwise a 32-bit architecture? Does
> that work without LPAE? What is the page table size in this case?

It's a 32bit architecture with LPAE, iirc, yes.
Stefan Agner Aug. 15, 2016, 3:44 a.m. UTC | #5
On 2016-08-14 13:01, Tom Rini wrote:
> On Mon, Aug 08, 2016 at 12:43:03AM -0700, Stefan Agner wrote:
>> On 2016-08-07 23:10, Lokesh Vutla wrote:
>> > Hi,
>> >
>> > On Sunday 07 August 2016 11:13 PM, Stefan Agner wrote:
>> >> From: Stefan Agner <stefan.agner@toradex.com>
>> >>
>> >> The page table is maintained by the CPU, hence it is safe to always
>> >> align cache flush to a whole cache line size. This allows to use
>> >> mmu_page_table_flush for a single page table, e.g. when configure
>> >> only small regions through mmu_set_region_dcache_behaviour.
>> >>
>> >> Signed-off-by: Stefan Agner <stefan.agner@toradex.com>
>> >> Tested-by: Fabio Estevam <fabio.estevam@nxp.com>
>> >> Reviewed-by: Simon Glass <sjg@chromium.org>
>> >> ---
>> >
>> > I get the following warning when CONFIG_PHYS_64BIT is enabled on arm
>> > platforms(dra7xx_evm_defconfig):
>>
>> Hm, do I see things right, this is otherwise a 32-bit architecture? Does
>> that work without LPAE? What is the page table size in this case?
> 
> It's a 32bit architecture with LPAE, iirc, yes.

Hm, when looking at other functions such as set_section_dcache,
page_table is casted to a u64* type, hence I guess this is wrong in
mmu_set_region_dcache_behaviour. In fact, that makes it seem unsafe to
use this function as is...

--
Stefan
diff mbox

Patch

diff --git a/arch/arm/lib/cache-cp15.c b/arch/arm/lib/cache-cp15.c
index 1121dc3..488094e 100644
--- a/arch/arm/lib/cache-cp15.c
+++ b/arch/arm/lib/cache-cp15.c
@@ -62,6 +62,7 @@  void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
 				     enum dcache_option option)
 {
 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
+	phys_addr_t startpt, stoppt;
 	unsigned long upto, end;
 
 	end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
@@ -70,7 +71,17 @@  void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
 	      option);
 	for (upto = start; upto < end; upto++)
 		set_section_dcache(upto, option);
-	mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]);
+
+	/*
+	 * Make sure range is cache line aligned
+	 * Only CPU maintains page tables, hence it is safe to always
+	 * flush complete cache lines...
+	 */
+	startpt = (phys_addr_t)&page_table[start];
+	startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
+	stoppt = (phys_addr_t)&page_table[end];
+	stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
+	mmu_page_table_flush(startpt, stoppt);
 }
 
 __weak void dram_bank_mmu_setup(int bank)