//=============================================================================
#include <cyg/infra/cyg_type.h>
-#include <cyg/hal/hal_soc.h> // Variant specific hardware definitions
+#include <cyg/hal/hal_soc.h> // Variant specific hardware definitions
//-----------------------------------------------------------------------------
// Cache dimensions
// Data cache
-#define HAL_DCACHE_SIZE 0x4000 // 16KB Size of data cache in bytes
-#define HAL_DCACHE_LINE_SIZE 32 // Size of a data cache line
-#define HAL_DCACHE_WAYS 64 // Associativity of the cache
+#define HAL_DCACHE_SIZE 0x4000 // 16KB Size of data cache in bytes
+#define HAL_DCACHE_LINE_SIZE 32 // Size of a data cache line
+#define HAL_DCACHE_WAYS 64 // Associativity of the cache
// Instruction cache
-#define HAL_ICACHE_SIZE 0x4000 // Size of cache in bytes
-#define HAL_ICACHE_LINE_SIZE 32 // Size of a cache line
-#define HAL_ICACHE_WAYS 64 // Associativity of the cache
+#define HAL_ICACHE_SIZE 0x4000 // Size of cache in bytes
+#define HAL_ICACHE_LINE_SIZE 32 // Size of a cache line
+#define HAL_ICACHE_WAYS 64 // Associativity of the cache
#define HAL_DCACHE_SETS (HAL_DCACHE_SIZE / (HAL_DCACHE_LINE_SIZE*HAL_DCACHE_WAYS))
#define HAL_ICACHE_SETS (HAL_ICACHE_SIZE / (HAL_ICACHE_LINE_SIZE*HAL_ICACHE_WAYS))
#define CYGHWR_HAL_ARM_ARM9_CLEAN_DCACHE_INDEX
-#define CYGHWR_HAL_ARM_ARM9_CLEAN_DCACHE_INDEX_STEP 0x20
+#define CYGHWR_HAL_ARM_ARM9_CLEAN_DCACHE_INDEX_STEP 0x20
#define CYGHWR_HAL_ARM_ARM9_CLEAN_DCACHE_INDEX_LIMIT 0x100
//-----------------------------------------------------------------------------
// Global control of data cache
// Enable the data cache
-#define HAL_DCACHE_ENABLE_L1() \
-CYG_MACRO_START \
- asm volatile ( \
- "mrc p15, 0, r1, c1, c0, 0;" \
- "orr r1, r1, #0x0007;" /* enable DCache (also ensures */ \
- /* the MMU, alignment faults, and */ \
- "mcr p15, 0, r1, c1, c0, 0" \
- : \
- : \
- : "r1" /* Clobber list */ \
- ); \
+#define HAL_DCACHE_ENABLE_L1() \
+CYG_MACRO_START \
+ asm volatile ( \
+ "mrc p15, 0, r1, c1, c0, 0;" \
+ "orr r1, r1, #0x0007;" /* enable DCache (also ensures */ \
+ /* the MMU, alignment faults, and */ \
+ "mcr p15, 0, r1, c1, c0, 0" \
+ : \
+ : \
+ : "r1" /* Clobber list */ \
+ ); \
CYG_MACRO_END
// Disable the data cache
-#define HAL_DCACHE_DISABLE_L1() \
-CYG_MACRO_START \
- asm volatile ( \
- "mov r1, #0;" \
- "mcr p15, 0, r1, c7, c6, 0;" /* clear data cache */ \
- "mrc p15, 0, r1, c1, c0, 0;" \
- "bic r1, r1, #0x0004;" /* disable DCache */ \
- /* but not MMU and alignment faults */ \
- "mcr p15, 0, r1, c1, c0, 0" \
- : \
- : \
- : "r1" /* Clobber list */ \
- ); \
+#define HAL_DCACHE_DISABLE_L1() \
+CYG_MACRO_START \
+ asm volatile ( \
+ "mov r1, #0;" \
+ "mcr p15, 0, r1, c7, c6, 0;" /* clear data cache */ \
+ "mrc p15, 0, r1, c1, c0, 0;" \
+ "bic r1, r1, #0x0004;" /* disable DCache */ \
+ /* but not MMU and alignment faults */ \
+ "mcr p15, 0, r1, c1, c0, 0" \
+ : \
+ : \
+ : "r1" /* Clobber list */ \
+ ); \
CYG_MACRO_END
// Invalidate the entire cache
-#define HAL_DCACHE_INVALIDATE_ALL_L1() \
-CYG_MACRO_START /* this macro can discard dirty cache lines. */ \
- asm volatile ( \
- "mov r0, #0;" \
- "mcr p15, 0, r0, c7, c6, 0;" /* flush d-cache */ \
- "mcr p15, 0, r0, c8, c7, 0;" /* flush i+d-TLBs */ \
- : \
- : \
- : "r0","memory" /* clobber list */ \
- ); \
+#define HAL_DCACHE_INVALIDATE_ALL_L1() \
+CYG_MACRO_START /* this macro can discard dirty cache lines. */ \
+ asm volatile ( \
+ "mov r0, #0;" \
+ "mcr p15, 0, r0, c7, c6, 0;" /* flush d-cache */ \
+ "mcr p15, 0, r0, c8, c7, 0;" /* flush i+d-TLBs */ \
+ : \
+ : \
+ : "r0","memory" /* clobber list */ \
+ ); \
CYG_MACRO_END
// Synchronize the contents of the cache with memory.
// using ARM9's defined(CYGHWR_HAL_ARM_ARM9_CLEAN_DCACHE_INDEX)
-#define HAL_DCACHE_SYNC_L1() \
-CYG_MACRO_START \
- asm volatile ( \
- "nop; " \
- "nop; " \
- "nop; " \
- "nop; " \
- "nop; " \
- "nop; " \
- "mov r0, #0x0;" \
- "mcr p15, 0, r0, c7, c14, 0;" /* clean, invalidate Dcache*/ \
- "mcr p15, 0, r0, c7, c10, 4;" /* drain the write buffer */ \
- "mcr p15, 0, r0, c7, c10, 5;" /* data memory barrier */ \
- : \
- : \
- : "r0" /* Clobber list */ \
- ); \
+#define HAL_DCACHE_SYNC_L1() \
+CYG_MACRO_START \
+ asm volatile ( \
+ "nop; " \
+ "nop; " \
+ "nop; " \
+ "nop; " \
+ "nop; " \
+ "nop; " \
+ "mov r0, #0x0;" \
+ "mcr p15, 0, r0, c7, c14, 0;" /* clean, invalidate Dcache*/ \
+ "mcr p15, 0, r0, c7, c10, 4;" /* drain the write buffer */ \
+ "mcr p15, 0, r0, c7, c10, 5;" /* data memory barrier */ \
+ : \
+ : \
+ : "r0" /* Clobber list */ \
+ ); \
CYG_MACRO_END
// Query the state of the data cache
-#define HAL_DCACHE_IS_ENABLED(_state_) \
-CYG_MACRO_START \
- register int reg; \
- asm volatile ( \
- "nop; " \
- "nop; " \
- "nop; " \
- "nop; " \
- "nop; " \
- "mrc p15, 0, %0, c1, c0, 0;" \
- : "=r"(reg) \
- : \
- ); \
- (_state_) = (0 != (4 & reg)); /* Bit 2 is DCache enable */ \
+#define HAL_DCACHE_IS_ENABLED(_state_) \
+CYG_MACRO_START \
+ register int reg; \
+ asm volatile ( \
+ "nop; " \
+ "nop; " \
+ "nop; " \
+ "nop; " \
+ "nop; " \
+ "mrc p15, 0, %0, c1, c0, 0;" \
+ : "=r"(reg) \
+ : \
+ ); \
+ (_state_) = (0 != (4 & reg)); /* Bit 2 is DCache enable */ \
CYG_MACRO_END
//-----------------------------------------------------------------------------
// Global control of Instruction cache
// Enable the instruction cache
-#define HAL_ICACHE_ENABLE_L1() \
-CYG_MACRO_START \
- asm volatile ( \
- "mrc p15, 0, r1, c1, c0, 0;" \
- "orr r1, r1, #0x1000;" \
- "orr r1, r1, #0x0003;" /* enable ICache (also ensures */ \
- /* that MMU and alignment faults */ \
- /* are enabled) */ \
- "mcr p15, 0, r1, c1, c0, 0" \
- : \
- : \
- : "r1" /* Clobber list */ \
- ); \
+#define HAL_ICACHE_ENABLE_L1() \
+CYG_MACRO_START \
+ asm volatile ( \
+ "mrc p15, 0, r1, c1, c0, 0;" \
+ "orr r1, r1, #0x1000;" \
+ "orr r1, r1, #0x0003;" /* enable ICache (also ensures */ \
+ /* that MMU and alignment faults */ \
+ /* are enabled) */ \
+ "mcr p15, 0, r1, c1, c0, 0" \
+ : \
+ : \
+ : "r1" /* Clobber list */ \
+ ); \
CYG_MACRO_END
// Query the state of the instruction cache
-#define HAL_ICACHE_IS_ENABLED(_state_) \
-CYG_MACRO_START \
- register cyg_uint32 reg; \
- asm volatile ( \
- "mrc p15, 0, %0, c1, c0, 0" \
- : "=r"(reg) \
- : \
- ); \
- \
- (_state_) = (0 != (0x1000 & reg)); /* Bit 12 is ICache enable */ \
+#define HAL_ICACHE_IS_ENABLED(_state_) \
+CYG_MACRO_START \
+ register cyg_uint32 reg; \
+ asm volatile ( \
+ "mrc p15, 0, %0, c1, c0, 0" \
+ : "=r"(reg) \
+ : \
+ ); \
+ \
+ (_state_) = (0 != (0x1000 & reg)); /* Bit 12 is ICache enable */ \
CYG_MACRO_END
// Disable the instruction cache
-#define HAL_ICACHE_DISABLE_L1() \
-CYG_MACRO_START \
- asm volatile ( \
- "mrc p15, 0, r1, c1, c0, 0;" \
- "bic r1, r1, #0x1000;" /* disable ICache (but not MMU, etc) */ \
- "mcr p15, 0, r1, c1, c0, 0;" \
- "mov r1, #0;" \
- "mcr p15, 0, r1, c7, c5, 0;" /* flush ICache */ \
- "mcr p15, 0, r1, c7, c5, 4;" /* flush prefetch buffer */ \
- "nop;" /* next few instructions may be via cache */ \
- "nop;" \
- "nop;" \
- "nop;" \
- "nop;" \
- "nop" \
- : \
- : \
- : "r1" /* Clobber list */ \
- ); \
+#define HAL_ICACHE_DISABLE_L1() \
+CYG_MACRO_START \
+ asm volatile ( \
+ "mrc p15, 0, r1, c1, c0, 0;" \
+ "bic r1, r1, #0x1000;" /* disable ICache (but not MMU, etc) */ \
+ "mcr p15, 0, r1, c1, c0, 0;" \
+ "mov r1, #0;" \
+ "mcr p15, 0, r1, c7, c5, 0;" /* flush ICache */ \
+ "mcr p15, 0, r1, c7, c5, 4;" /* flush prefetch buffer */ \
+ "nop;" /* next few instructions may be via cache */ \
+ "nop;" \
+ "nop;" \
+ "nop;" \
+ "nop;" \
+ "nop" \
+ : \
+ : \
+ : "r1" /* Clobber list */ \
+ ); \
CYG_MACRO_END
// Invalidate the entire cache
-#define HAL_ICACHE_INVALIDATE_ALL_L1() \
-CYG_MACRO_START \
- /* this macro can discard dirty cache lines (N/A for ICache) */ \
- asm volatile ( \
- "mov r1, #0;" \
- "mcr p15, 0, r1, c7, c5, 0;" /* flush ICache */ \
- "mcr p15, 0, r1, c8, c5, 0;" /* flush ITLB only */ \
- "mcr p15, 0, r1, c7, c5, 4;" /* flush prefetch buffer */ \
- "nop;" /* next few instructions may be via cache */ \
- "nop;" \
- "nop;" \
- "nop;" \
- "nop;" \
- "nop;" \
- : \
- : \
- : "r1" /* Clobber list */ \
- ); \
+#define HAL_ICACHE_INVALIDATE_ALL_L1() \
+CYG_MACRO_START \
+ /* this macro can discard dirty cache lines (N/A for ICache) */ \
+ asm volatile ( \
+ "mov r1, #0;" \
+ "mcr p15, 0, r1, c7, c5, 0;" /* flush ICache */ \
+ "mcr p15, 0, r1, c8, c5, 0;" /* flush ITLB only */ \
+ "mcr p15, 0, r1, c7, c5, 4;" /* flush prefetch buffer */ \
+ "nop;" /* next few instructions may be via cache */ \
+ "nop;" \
+ "nop;" \
+ "nop;" \
+ "nop;" \
+ "nop;" \
+ : \
+ : \
+ : "r1" /* Clobber list */ \
+ ); \
CYG_MACRO_END
// Synchronize the contents of the cache with memory.
// (which includes flushing out pending writes)
-#define HAL_ICACHE_SYNC() \
-CYG_MACRO_START \
- HAL_DCACHE_SYNC(); /* ensure data gets to RAM */ \
- HAL_ICACHE_INVALIDATE_ALL(); /* forget all we know */ \
+#define HAL_ICACHE_SYNC() \
+CYG_MACRO_START \
+ HAL_DCACHE_SYNC(); /* ensure data gets to RAM */ \
+ HAL_ICACHE_INVALIDATE_ALL(); /* forget all we know */ \
CYG_MACRO_END
// Query the state of the L2 cache
-#define HAL_L2CACHE_IS_ENABLED(_state_) \
- (_state_ = readl(L2CC_BASE_ADDR + L2_CACHE_CTL_REG) & 1)
+#define HAL_L2CACHE_IS_ENABLED(_state_) \
+ (_state_ = readl(L2CC_BASE_ADDR + L2_CACHE_CTL_REG) & 1)
#ifdef L2CC_ENABLED
-#define HAL_ENABLE_L2() \
-{ \
- writel(1, L2CC_BASE_ADDR + L2_CACHE_CTL_REG); \
+#define HAL_ENABLE_L2() \
+{ \
+ writel(1, L2CC_BASE_ADDR + L2_CACHE_CTL_REG); \
}
-#define HAL_DISABLE_L2() \
-{ \
- writel(0, L2CC_BASE_ADDR + L2_CACHE_CTL_REG); \
+#define HAL_DISABLE_L2() \
+{ \
+ writel(0, L2CC_BASE_ADDR + L2_CACHE_CTL_REG); \
}
-#define HAL_SYNC_L2() \
-{ \
- if ((readl(L2CC_BASE_ADDR + L2_CACHE_CTL_REG) & 1) != 0) { \
- writel(0, L2CC_BASE_ADDR + L2_CACHE_SYNC_REG); \
- while ((readl(L2CC_BASE_ADDR + L2_CACHE_SYNC_REG) & 1) == 1); \
- } \
+#define HAL_SYNC_L2() \
+{ \
+ if ((readl(L2CC_BASE_ADDR + L2_CACHE_CTL_REG) & 1) != 0) { \
+ writel(0, L2CC_BASE_ADDR + L2_CACHE_SYNC_REG); \
+ while ((readl(L2CC_BASE_ADDR + L2_CACHE_SYNC_REG) & 1) == 1); \
+ } \
}
-#define HAL_INVALIDATE_L2() \
-{ \
- if ((readl(L2CC_BASE_ADDR + L2_CACHE_CTL_REG) & 1) != 0) { \
- writel(0xFF, L2CC_BASE_ADDR + L2_CACHE_INV_WAY_REG); \
- while ((readl(L2CC_BASE_ADDR + L2_CACHE_INV_WAY_REG) & 0xFF) != 0); \
- HAL_SYNC_L2(); \
- } \
+#define HAL_INVALIDATE_L2() \
+{ \
+ if ((readl(L2CC_BASE_ADDR + L2_CACHE_CTL_REG) & 1) != 0) { \
+ writel(0xFF, L2CC_BASE_ADDR + L2_CACHE_INV_WAY_REG); \
+ while ((readl(L2CC_BASE_ADDR + L2_CACHE_INV_WAY_REG) & 0xFF) != 0); \
+ HAL_SYNC_L2(); \
+ } \
}
- \
-#define HAL_CLEAN_INVALIDATE_L2() \
-{ \
- if ((readl(L2CC_BASE_ADDR + L2_CACHE_CTL_REG) & 1) != 0) { \
- writel(0xFF, L2CC_BASE_ADDR + L2_CACHE_CLEAN_INV_WAY_REG); \
- while ((readl(L2CC_BASE_ADDR + L2_CACHE_CLEAN_INV_WAY_REG) & 0xFF) != 0);\
- HAL_SYNC_L2(); \
- } \
+ \
+#define HAL_CLEAN_INVALIDATE_L2() \
+{ \
+ if ((readl(L2CC_BASE_ADDR + L2_CACHE_CTL_REG) & 1) != 0) { \
+ writel(0xFF, L2CC_BASE_ADDR + L2_CACHE_CLEAN_INV_WAY_REG); \
+ while ((readl(L2CC_BASE_ADDR + L2_CACHE_CLEAN_INV_WAY_REG) & 0xFF) != 0);\
+ HAL_SYNC_L2(); \
+ } \
}
#else //L2CC_ENABLED
/*********************** Exported macros *******************/
-#define HAL_DCACHE_ENABLE() { \
- HAL_DCACHE_ENABLE_L1(); \
- HAL_ENABLE_L2(); \
+#define HAL_DCACHE_ENABLE() { \
+ HAL_DCACHE_ENABLE_L1(); \
+ HAL_ENABLE_L2(); \
}
-#define HAL_DCACHE_DISABLE() { \
- HAL_DCACHE_DISABLE_L1(); \
- HAL_DISABLE_L2(); \
+#define HAL_DCACHE_DISABLE() { \
+ HAL_DCACHE_DISABLE_L1(); \
+ HAL_DISABLE_L2(); \
}
-#define HAL_DCACHE_INVALIDATE_ALL() { \
- HAL_DCACHE_INVALIDATE_ALL_L1(); \
- HAL_CLEAN_INVALIDATE_L2(); \
+#define HAL_DCACHE_INVALIDATE_ALL() { \
+ HAL_DCACHE_INVALIDATE_ALL_L1(); \
+ HAL_CLEAN_INVALIDATE_L2(); \
}
-#define HAL_DCACHE_SYNC() { \
- HAL_DCACHE_SYNC_L1(); \
- /* don't just call HAL_SYNC_L2() */ \
- HAL_CLEAN_INVALIDATE_L2(); \
+#define HAL_DCACHE_SYNC() { \
+ HAL_DCACHE_SYNC_L1(); \
+ /* don't just call HAL_SYNC_L2() */ \
+ HAL_CLEAN_INVALIDATE_L2(); \
}
-#define HAL_ICACHE_INVALIDATE_ALL() { \
- HAL_ICACHE_INVALIDATE_ALL_L1(); \
- HAL_CLEAN_INVALIDATE_L2(); \
+#define HAL_ICACHE_INVALIDATE_ALL() { \
+ HAL_ICACHE_INVALIDATE_ALL_L1(); \
+ HAL_CLEAN_INVALIDATE_L2(); \
}
-#define HAL_ICACHE_DISABLE() { \
- HAL_ICACHE_DISABLE_L1(); \
-}
+#define HAL_ICACHE_DISABLE() { \
+ HAL_ICACHE_DISABLE_L1(); \
+}
-#define HAL_ICACHE_ENABLE() { \
- HAL_ICACHE_ENABLE_L1(); \
+#define HAL_ICACHE_ENABLE() { \
+ HAL_ICACHE_ENABLE_L1(); \
}
#endif // ifndef CYGONCE_HAL_CACHE_H
/*
* Translation Table Base Bit Masks
*/
-#define ARM_TRANSLATION_TABLE_MASK 0xFFFFC000
+#define ARM_TRANSLATION_TABLE_MASK 0xFFFFC000
/*
* Domain Access Control Bit Masks
*/
-#define ARM_ACCESS_TYPE_NO_ACCESS(domain_num) (0x0 << (domain_num)*2)
-#define ARM_ACCESS_TYPE_CLIENT(domain_num) (0x1 << (domain_num)*2)
-#define ARM_ACCESS_TYPE_MANAGER(domain_num) (0x3 << (domain_num)*2)
+#define ARM_ACCESS_TYPE_NO_ACCESS(domain_num) (0x0 << (domain_num)*2)
+#define ARM_ACCESS_TYPE_CLIENT(domain_num) (0x1 << (domain_num)*2)
+#define ARM_ACCESS_TYPE_MANAGER(domain_num) (0x3 << (domain_num)*2)
struct ARM_MMU_FIRST_LEVEL_FAULT {
- unsigned int id : 2;
- unsigned int sbz : 30;
+ unsigned int id : 2;
+ unsigned int sbz : 30;
};
#define ARM_MMU_FIRST_LEVEL_FAULT_ID 0x0
struct ARM_MMU_FIRST_LEVEL_PAGE_TABLE {
- unsigned int id : 2;
- unsigned int imp : 2;
- unsigned int domain : 4;
- unsigned int sbz : 1;
- unsigned int base_address : 23;
+ unsigned int id : 2;
+ unsigned int imp : 2;
+ unsigned int domain : 4;
+ unsigned int sbz : 1;
+ unsigned int base_address : 23;
};
#define ARM_MMU_FIRST_LEVEL_PAGE_TABLE_ID 0x1
struct ARM_MMU_FIRST_LEVEL_SECTION {
- unsigned int id : 2;
- unsigned int b : 1;
- unsigned int c : 1;
- unsigned int imp : 1;
- unsigned int domain : 4;
- unsigned int sbz0 : 1;
- unsigned int ap : 2;
- unsigned int sbz1 : 8;
- unsigned int base_address : 12;
+ unsigned int id : 2;
+ unsigned int b : 1;
+ unsigned int c : 1;
+ unsigned int imp : 1;
+ unsigned int domain : 4;
+ unsigned int sbz0 : 1;
+ unsigned int ap : 2;
+ unsigned int sbz1 : 8;
+ unsigned int base_address : 12;
};
#define ARM_MMU_FIRST_LEVEL_SECTION_ID 0x2
struct ARM_MMU_FIRST_LEVEL_RESERVED {
- unsigned int id : 2;
- unsigned int sbz : 30;
+ unsigned int id : 2;
+ unsigned int sbz : 30;
};
#define ARM_MMU_FIRST_LEVEL_RESERVED_ID 0x3
#define ARM_MMU_FIRST_LEVEL_DESCRIPTOR_ADDRESS(ttb_base, table_index) \
- (unsigned long *)((unsigned long)(ttb_base) + ((table_index) << 2))
+ (unsigned long *)((unsigned long)(ttb_base) + ((table_index) << 2))
#define ARM_FIRST_LEVEL_PAGE_TABLE_SIZE 0x4000
-#define ARM_MMU_SECTION(ttb_base, actual_base, virtual_base, \
- cacheable, bufferable, perm) \
- CYG_MACRO_START \
- register union ARM_MMU_FIRST_LEVEL_DESCRIPTOR desc; \
- \
- desc.word = 0; \
- desc.section.id = ARM_MMU_FIRST_LEVEL_SECTION_ID; \
- desc.section.domain = 0; \
- desc.section.c = (cacheable); \
- desc.section.b = (bufferable); \
- desc.section.ap = (perm); \
- desc.section.base_address = (actual_base); \
- *ARM_MMU_FIRST_LEVEL_DESCRIPTOR_ADDRESS(ttb_base, (virtual_base)) \
- = desc.word; \
- CYG_MACRO_END
-
-#define X_ARM_MMU_SECTION(abase,vbase,size,cache,buff,access) \
- { \
- int i; int j = abase; int k = vbase; \
- for (i = size; i > 0 ; i--,j++,k++) { \
- ARM_MMU_SECTION(ttb_base, j, k, cache, buff, access); \
- } \
- }
+#define ARM_MMU_SECTION(ttb_base, actual_base, virtual_base, \
+ cacheable, bufferable, perm) \
+ CYG_MACRO_START \
+ register union ARM_MMU_FIRST_LEVEL_DESCRIPTOR desc; \
+ \
+ desc.word = 0; \
+ desc.section.id = ARM_MMU_FIRST_LEVEL_SECTION_ID; \
+ desc.section.domain = 0; \
+ desc.section.c = (cacheable); \
+ desc.section.b = (bufferable); \
+ desc.section.ap = (perm); \
+ desc.section.base_address = (actual_base); \
+ *ARM_MMU_FIRST_LEVEL_DESCRIPTOR_ADDRESS(ttb_base, (virtual_base)) \
+ = desc.word; \
+ CYG_MACRO_END
+
+#define X_ARM_MMU_SECTION(abase,vbase,size,cache,buff,access) \
+ { \
+ int i; int j = abase; int k = vbase; \
+ for (i = size; i > 0 ; i--,j++,k++) { \
+ ARM_MMU_SECTION(ttb_base, j, k, cache, buff, access); \
+ } \
+ }
union ARM_MMU_FIRST_LEVEL_DESCRIPTOR {
- unsigned long word;
- struct ARM_MMU_FIRST_LEVEL_FAULT fault;
- struct ARM_MMU_FIRST_LEVEL_PAGE_TABLE page_table;
- struct ARM_MMU_FIRST_LEVEL_SECTION section;
- struct ARM_MMU_FIRST_LEVEL_RESERVED reserved;
+ unsigned long word;
+ struct ARM_MMU_FIRST_LEVEL_FAULT fault;
+ struct ARM_MMU_FIRST_LEVEL_PAGE_TABLE page_table;
+ struct ARM_MMU_FIRST_LEVEL_SECTION section;
+ struct ARM_MMU_FIRST_LEVEL_RESERVED reserved;
};
-#define ARM_UNCACHEABLE 0
-#define ARM_CACHEABLE 1
-#define ARM_UNBUFFERABLE 0
-#define ARM_BUFFERABLE 1
+#define ARM_UNCACHEABLE 0
+#define ARM_CACHEABLE 1
+#define ARM_UNBUFFERABLE 0
+#define ARM_BUFFERABLE 1
-#define ARM_ACCESS_PERM_NONE_NONE 0
-#define ARM_ACCESS_PERM_RO_NONE 0
-#define ARM_ACCESS_PERM_RO_RO 0
-#define ARM_ACCESS_PERM_RW_NONE 1
-#define ARM_ACCESS_PERM_RW_RO 2
-#define ARM_ACCESS_PERM_RW_RW 3
+#define ARM_ACCESS_PERM_NONE_NONE 0
+#define ARM_ACCESS_PERM_RO_NONE 0
+#define ARM_ACCESS_PERM_RO_RO 0
+#define ARM_ACCESS_PERM_RW_NONE 1
+#define ARM_ACCESS_PERM_RW_RO 2
+#define ARM_ACCESS_PERM_RW_RW 3
/*
* Initialization for the Domain Access Control Register
*/
-#define ARM_ACCESS_DACR_DEFAULT ( \
- ARM_ACCESS_TYPE_MANAGER(0) | \
- ARM_ACCESS_TYPE_NO_ACCESS(1) | \
- ARM_ACCESS_TYPE_NO_ACCESS(2) | \
- ARM_ACCESS_TYPE_NO_ACCESS(3) | \
- ARM_ACCESS_TYPE_NO_ACCESS(4) | \
- ARM_ACCESS_TYPE_NO_ACCESS(5) | \
- ARM_ACCESS_TYPE_NO_ACCESS(6) | \
- ARM_ACCESS_TYPE_NO_ACCESS(7) | \
- ARM_ACCESS_TYPE_NO_ACCESS(8) | \
- ARM_ACCESS_TYPE_NO_ACCESS(9) | \
- ARM_ACCESS_TYPE_NO_ACCESS(10) | \
- ARM_ACCESS_TYPE_NO_ACCESS(11) | \
- ARM_ACCESS_TYPE_NO_ACCESS(12) | \
- ARM_ACCESS_TYPE_NO_ACCESS(13) | \
- ARM_ACCESS_TYPE_NO_ACCESS(14) | \
- ARM_ACCESS_TYPE_NO_ACCESS(15) )
-
+#define ARM_ACCESS_DACR_DEFAULT ( \
+ ARM_ACCESS_TYPE_MANAGER(0) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(1) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(2) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(3) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(4) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(5) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(6) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(7) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(8) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(9) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(10) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(11) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(12) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(13) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(14) | \
+ ARM_ACCESS_TYPE_NO_ACCESS(15) )
+
+#if 0
/*
* translate the virtual address of ram space to physical address
* It is dependent on the implementation of hal_mmu_init
*/
static unsigned long __inline__ hal_virt_to_phy(unsigned long virt)
{
- if (virt < 0x08000000) {
- return virt | 0x40000000;
- }
- if ((virt & 0xF0000000) == 0x40000000) {
- return virt & ~0x08000000;
- }
- return virt;
+ if (virt < 0x08000000) {
+ return virt | 0x40000000;
+ }
+ if ((virt & 0xF0000000) == 0x40000000) {
+ return virt & ~0x08000000;
+ }
+ return virt;
}
/*
*/
static unsigned long __inline__ hal_ioremap_nocache(unsigned long phy)
{
- /* 0x48000000~0x48FFFFFF is uncacheable meory space which is mapped to SDRAM*/
- if ((phy & 0xF0000000) == 0x40000000) {
- phy |= 0x08000000;
- }
- return phy;
+ /* 0x48000000~0x48FFFFFF is uncacheable meory space which is mapped to SDRAM*/
+ if ((phy & 0xF0000000) == 0x40000000) {
+ phy |= 0x08000000;
+ }
+ return phy;
}
+#endif
// ------------------------------------------------------------------------
#endif // ifndef CYGONCE_HAL_MM_H