1 #ifndef _ASM_POWERPC_PAGE_32_H 2 #define _ASM_POWERPC_PAGE_32_H 3 4 #include <asm/cache.h> 5 6 #if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0) 7 #if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0 8 #error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN" 9 #endif 10 #endif 11 12 #define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 13 14 #ifdef CONFIG_NOT_COHERENT_CACHE 15 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 16 #endif 17 18 #ifdef CONFIG_PTE_64BIT 19 #define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */ 20 #else 21 #define PTE_FLAGS_OFFSET 0 22 #endif 23 24 #ifdef CONFIG_PPC_256K_PAGES 25 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */ 26 #else 27 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */ 28 #endif 29 30 #ifndef __ASSEMBLY__ 31 /* 32 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit 33 * physical addressing. 34 */ 35 #ifdef CONFIG_PTE_64BIT 36 typedef unsigned long long pte_basic_t; 37 #else 38 typedef unsigned long pte_basic_t; 39 #endif 40 41 /* 42 * Clear page using the dcbz instruction, which doesn't cause any 43 * memory traffic (except to write out any cache lines which get 44 * displaced). This only works on cacheable memory. 45 */ 46 static inline void clear_page(void *addr) 47 { 48 unsigned int i; 49 50 for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES) 51 dcbz(addr); 52 } 53 extern void copy_page(void *to, void *from); 54 55 #include <asm-generic/getorder.h> 56 57 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 58 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 59 60 #endif /* __ASSEMBLY__ */ 61 62 #endif /* _ASM_POWERPC_PAGE_32_H */ 63