diff mbox

[U-Boot,05/15] x86: Add basic cache operations

Message ID 1351051486-6980-6-git-send-email-sjg@chromium.org
State Superseded, archived
Delegated to: Simon Glass
Headers show

Commit Message

Simon Glass Oct. 24, 2012, 4:04 a.m. UTC
From: Stefan Reinauer <reinauer@chromium.org>

Add functions to enable/disable the data cache.

Signed-off-by: Stefan Reinauer <reinauer@chromium.org>
Signed-off-by: Simon Glass <sjg@chromium.org>
---
 arch/x86/cpu/interrupts.c    |    8 +------
 arch/x86/include/asm/cache.h |   47 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 48 insertions(+), 7 deletions(-)

Comments

Graeme Russ Oct. 24, 2012, 4:44 a.m. UTC | #1
Hi Simon,

On Wed, Oct 24, 2012 at 3:04 PM, Simon Glass <sjg@chromium.org> wrote:
> From: Stefan Reinauer <reinauer@chromium.org>
>
> Add functions to enable/disable the data cache.
>
> Signed-off-by: Stefan Reinauer <reinauer@chromium.org>
> Signed-off-by: Simon Glass <sjg@chromium.org>
> ---
>  arch/x86/cpu/interrupts.c    |    8 +------
>  arch/x86/include/asm/cache.h |   47 ++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 48 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/cpu/interrupts.c b/arch/x86/cpu/interrupts.c
> index 43ec3f8..710b653 100644
> --- a/arch/x86/cpu/interrupts.c
> +++ b/arch/x86/cpu/interrupts.c
> @@ -28,6 +28,7 @@
>   */
>
>  #include <common.h>
> +#include <asm/cache.h>
>  #include <asm/interrupt.h>
>  #include <asm/io.h>
>  #include <asm/processor-flags.h>
> @@ -50,13 +51,6 @@
>   */
>  static unsigned long __force_order;
>
> -static inline unsigned long read_cr0(void)
> -{
> -       unsigned long val;
> -       asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
> -       return val;
> -}
> -

Happy to move read_cr0 out of interrupts.c

>  static inline unsigned long read_cr2(void)
>  {
>         unsigned long val;

But please move them all...

> diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
> index 87c9e0b..9836856 100644
> --- a/arch/x86/include/asm/cache.h
> +++ b/arch/x86/include/asm/cache.h
> @@ -32,4 +32,51 @@
>  #define ARCH_DMA_MINALIGN      64
>  #endif
>
> +/* The memory clobber prevents the GCC from reordering the read/write order
> + * of CR0
> + */
> +static inline unsigned long read_cr0(void)
> +{
> +       unsigned long cr0;
> +
> +       asm volatile ("movl %%cr0, %0" : "=r" (cr0) : : "memory");
> +       return cr0;
> +}
> +
> +static inline void write_cr0(unsigned long cr0)
> +{
> +       asm volatile ("movl %0, %%cr0" : : "r" (cr0) : "memory");
> +}

...to another header (control_registers.h)

> +
> +static inline void wbinvd(void)
> +{
> +       asm volatile ("wbinvd" : : : "memory");
> +}
> +
> +static inline void invd(void)
> +{
> +       asm volatile("invd" : : : "memory");
> +}
> +
> +static inline void enable_cache(void)
> +{
> +       unsigned long cr0;
> +
> +       cr0 = read_cr0();
> +       cr0 &= 0x9fffffff;
> +       write_cr0(cr0);
> +}
> +
> +static inline void disable_cache(void)
> +{
> +       /* Disable and write back the cache */
> +       unsigned long cr0;
> +
> +       cr0 = read_cr0();
> +       cr0 |= 0x40000000;
> +       wbinvd();
> +       write_cr0(cr0);
> +       wbinvd();
> +}

These already exist in cpu.c as weak functions (some systems need to
do funky things when fiddling with caching, like updating MMTRs) with
pure inline assembly. Please fix up those implementations to use the
proper cr0 accessors

> +
>  #endif /* __X86_CACHE_H__ */
> --
> 1.7.7.3
>

Regards,

Graeme
Simon Glass Dec. 1, 2012, 9:40 p.m. UTC | #2
Hi Graeme,

On Tue, Oct 23, 2012 at 9:44 PM, Graeme Russ <graeme.russ@gmail.com> wrote:
> Hi Simon,
>
> On Wed, Oct 24, 2012 at 3:04 PM, Simon Glass <sjg@chromium.org> wrote:
>> From: Stefan Reinauer <reinauer@chromium.org>
>>
>> Add functions to enable/disable the data cache.
>>
>> Signed-off-by: Stefan Reinauer <reinauer@chromium.org>
>> Signed-off-by: Simon Glass <sjg@chromium.org>
>> ---
>>  arch/x86/cpu/interrupts.c    |    8 +------
>>  arch/x86/include/asm/cache.h |   47 ++++++++++++++++++++++++++++++++++++++++++
>>  2 files changed, 48 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/x86/cpu/interrupts.c b/arch/x86/cpu/interrupts.c
>> index 43ec3f8..710b653 100644
>> --- a/arch/x86/cpu/interrupts.c
>> +++ b/arch/x86/cpu/interrupts.c
>> @@ -28,6 +28,7 @@
>>   */
>>
>>  #include <common.h>
>> +#include <asm/cache.h>
>>  #include <asm/interrupt.h>
>>  #include <asm/io.h>
>>  #include <asm/processor-flags.h>
>> @@ -50,13 +51,6 @@
>>   */
>>  static unsigned long __force_order;
>>
>> -static inline unsigned long read_cr0(void)
>> -{
>> -       unsigned long val;
>> -       asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
>> -       return val;
>> -}
>> -
>
> Happy to move read_cr0 out of interrupts.c
>
>>  static inline unsigned long read_cr2(void)
>>  {
>>         unsigned long val;
>
> But please move them all...
>
>> diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
>> index 87c9e0b..9836856 100644
>> --- a/arch/x86/include/asm/cache.h
>> +++ b/arch/x86/include/asm/cache.h
>> @@ -32,4 +32,51 @@
>>  #define ARCH_DMA_MINALIGN      64
>>  #endif
>>
>> +/* The memory clobber prevents the GCC from reordering the read/write order
>> + * of CR0
>> + */
>> +static inline unsigned long read_cr0(void)
>> +{
>> +       unsigned long cr0;
>> +
>> +       asm volatile ("movl %%cr0, %0" : "=r" (cr0) : : "memory");
>> +       return cr0;
>> +}
>> +
>> +static inline void write_cr0(unsigned long cr0)
>> +{
>> +       asm volatile ("movl %0, %%cr0" : : "r" (cr0) : "memory");
>> +}
>
> ...to another header (control_registers.h)

Will do, that makes sense. I will move the debug registers also.

>
>> +
>> +static inline void wbinvd(void)
>> +{
>> +       asm volatile ("wbinvd" : : : "memory");
>> +}
>> +
>> +static inline void invd(void)
>> +{
>> +       asm volatile("invd" : : : "memory");
>> +}
>> +
>> +static inline void enable_cache(void)
>> +{
>> +       unsigned long cr0;
>> +
>> +       cr0 = read_cr0();
>> +       cr0 &= 0x9fffffff;
>> +       write_cr0(cr0);
>> +}
>> +
>> +static inline void disable_cache(void)
>> +{
>> +       /* Disable and write back the cache */
>> +       unsigned long cr0;
>> +
>> +       cr0 = read_cr0();
>> +       cr0 |= 0x40000000;
>> +       wbinvd();
>> +       write_cr0(cr0);
>> +       wbinvd();
>> +}
>
> These already exist in cpu.c as weak functions (some systems need to
> do funky things when fiddling with caching, like updating MMTRs) with
> pure inline assembly. Please fix up those implementations to use the
> proper cr0 accessors

OK, I will send a new patch for this.

>
>> +
>>  #endif /* __X86_CACHE_H__ */
>> --
>> 1.7.7.3
>>
>
> Regards,
>
> Graeme

Regards,
Simon
diff mbox

Patch

diff --git a/arch/x86/cpu/interrupts.c b/arch/x86/cpu/interrupts.c
index 43ec3f8..710b653 100644
--- a/arch/x86/cpu/interrupts.c
+++ b/arch/x86/cpu/interrupts.c
@@ -28,6 +28,7 @@ 
  */
 
 #include <common.h>
+#include <asm/cache.h>
 #include <asm/interrupt.h>
 #include <asm/io.h>
 #include <asm/processor-flags.h>
@@ -50,13 +51,6 @@ 
  */
 static unsigned long __force_order;
 
-static inline unsigned long read_cr0(void)
-{
-	unsigned long val;
-	asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
-	return val;
-}
-
 static inline unsigned long read_cr2(void)
 {
 	unsigned long val;
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 87c9e0b..9836856 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -32,4 +32,51 @@ 
 #define ARCH_DMA_MINALIGN	64
 #endif
 
+/* The memory clobber prevents the GCC from reordering the read/write order
+ * of CR0
+ */
+static inline unsigned long read_cr0(void)
+{
+	unsigned long cr0;
+
+	asm volatile ("movl %%cr0, %0" : "=r" (cr0) : : "memory");
+	return cr0;
+}
+
+static inline void write_cr0(unsigned long cr0)
+{
+	asm volatile ("movl %0, %%cr0" : : "r" (cr0) : "memory");
+}
+
+static inline void wbinvd(void)
+{
+	asm volatile ("wbinvd" : : : "memory");
+}
+
+static inline void invd(void)
+{
+	asm volatile("invd" : : : "memory");
+}
+
+static inline void enable_cache(void)
+{
+	unsigned long cr0;
+
+	cr0 = read_cr0();
+	cr0 &= 0x9fffffff;
+	write_cr0(cr0);
+}
+
+static inline void disable_cache(void)
+{
+	/* Disable and write back the cache */
+	unsigned long cr0;
+
+	cr0 = read_cr0();
+	cr0 |= 0x40000000;
+	wbinvd();
+	write_cr0(cr0);
+	wbinvd();
+}
+
 #endif /* __X86_CACHE_H__ */