@@ -101,6 +101,36 @@ int print_cpuinfo(void)
}
#endif /* CONFIG_DISPLAY_CPUINFO */
+#ifdef CONFIG_ARCH_EARLY_INIT_R
+int arch_early_init_r(void)
+{
+#ifndef CONFIG_SYS_DCACHE_OFF
+ uint32_t mask = ~(SZ_1M - 1);
+ uint32_t phys = mem_malloc_start & mask;
+ uint32_t stop = (mem_malloc_end + (SZ_1M - 1)) & mask;
+ uint32_t size = stop - phys;
+ uint32_t virt = CONFIG_CONSISTENT_DMA_START & mask;
+ uint32_t *sect_table = (uint32_t *)gd->arch.tlb_addr;
+
+ if (!mmu_enabled())
+ return 0;
+
+ if (size > (CONFIG_CONSISTENT_DMA_END - virt))
+ panic("malloc size is too large for dma buffer\n");
+
+ while (size > 0) {
+ sect_table[virt >> 20] = phys | (3 << 10) | DCACHE_OFF;
+ virt += SZ_1M;
+ phys += SZ_1M;
+ size -= SZ_1M;
+ }
+
+ mmu_page_table_flush(mem_malloc_start & mask, stop);
+#endif /* !CONFIG_SYS_DCACHE_OFF */
+ return 0;
+}
+#endif /* CONFIG_ARCH_EARLY_INIT_R */
+
int cleanup_before_linux(void)
{
/*
@@ -7,6 +7,18 @@
#ifndef _ASM_CONFIG_H_
#define _ASM_CONFIG_H_
+#if defined(CONFIG_SOC_FARADAY) && !defined(CONFIG_SYS_DCACHE_OFF)
+#ifndef CONFIG_ARCH_EARLY_INIT_R
+#define CONFIG_ARCH_EARLY_INIT_R
+#endif
+#ifndef CONFIG_CONSISTENT_DMA_START
+#define CONFIG_CONSISTENT_DMA_START 0xff000000
+#endif
+#ifndef CONFIG_CONSISTENT_DMA_END
+#define CONFIG_CONSISTENT_DMA_END 0xfff00000
+#endif
+#endif /* CONFIG_SOC_FARADAY && !CONFIG_SYS_DCACHE_OFF */
+
#define CONFIG_LMB
#define CONFIG_SYS_BOOT_RAMDISK_HIGH
#endif
@@ -8,6 +8,10 @@
#ifndef __ASM_ARM_DMA_MAPPING_H
#define __ASM_ARM_DMA_MAPPING_H
+#if defined(CONFIG_SOC_FARADAY) && !defined(CONFIG_SYS_DCACHE_OFF)
+#include <malloc.h>
+#endif /* CONFIG_SOC_FARADAY && !CONFIG_SYS_DCACHE_OFF */
+
enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
@@ -16,13 +20,46 @@ enum dma_data_direction {
static void *dma_alloc_coherent(size_t len, unsigned long *handle)
{
+#if defined(CONFIG_SOC_FARADAY) && !defined(CONFIG_SYS_DCACHE_OFF)
+ uint32_t ofs;
+ void *mem = memalign(ARCH_DMA_MINALIGN, len);
+
+ if (handle)
+ *handle = (unsigned long)mem;
+
+ if (mem && mmu_enabled()) {
+ invalidate_dcache_range((ulong)mem, (ulong)mem + len);
+ ofs = (uint32_t)mem - (mem_malloc_start & 0xfff00000);
+ mem = (void *)(CONFIG_CONSISTENT_DMA_START + ofs);
+ }
+
+ return mem;
+#else /* CONFIG_SOC_FARADAY && !CONFIG_SYS_DCACHE_OFF */
*handle = (unsigned long)memalign(ARCH_DMA_MINALIGN, len);
+#endif /* CONFIG_SOC_FARADAY && !CONFIG_SYS_DCACHE_OFF */
return (void *)*handle;
}
static inline unsigned long dma_map_single(volatile void *vaddr, size_t len,
enum dma_data_direction dir)
{
+#if defined(CONFIG_SOC_FARADAY) && !defined(CONFIG_SYS_DCACHE_OFF)
+ if (mmu_enabled()) {
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_TO_DEVICE:
+ flush_dcache_range((ulong)vaddr,
+ (ulong)vaddr + len);
+ break;
+
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range((ulong)vaddr,
+ (ulong)vaddr + len);
+ break;
+ }
+ return virt_to_phys((void *)vaddr);
+ }
+#endif /* CONFIG_SOC_FARADAY && !CONFIG_SYS_DCACHE_OFF */
return (unsigned long)vaddr;
}
@@ -28,6 +28,9 @@
#if 0 /* XXX###XXX */
#include <asm/arch/hardware.h>
#endif /* XXX###XXX */
+#if defined(CONFIG_SOC_FARADAY) && !defined(CONFIG_SYS_DCACHE_OFF)
+#include <common.h>
+#endif
static inline void sync(void)
{
@@ -57,9 +60,21 @@ static inline void unmap_physmem(void *vaddr, unsigned long flags)
}
-static inline phys_addr_t virt_to_phys(void * vaddr)
+static inline phys_addr_t virt_to_phys(void *vaddr)
{
- return (phys_addr_t)(vaddr);
+#if defined(CONFIG_SOC_FARADAY) && !defined(CONFIG_SYS_DCACHE_OFF)
+ DECLARE_GLOBAL_DATA_PTR;
+ u32 *sect_table = (u32 *)gd->arch.tlb_addr;
+ phys_addr_t phys = (phys_addr_t)vaddr;
+
+ if (!vaddr || !mmu_enabled())
+ return phys;
+
+ phys = sect_table[(u32)vaddr >> 20] & 0xfff00000;
+ return phys + ((phys_addr_t)vaddr & 0x000fffff);
+#else /* CONFIG_SOC_FARADAY && !CONFIG_SYS_DCACHE_OFF */
+ return (phys_addr_t)vaddr;
+#endif /* CONFIG_SOC_FARADAY && !CONFIG_SYS_DCACHE_OFF */
}
/*