1 #ifndef __ASM_POWERPC_MMU_CONTEXT_H 2 #define __ASM_POWERPC_MMU_CONTEXT_H 3 #ifdef __KERNEL__ 4 5 #include <linux/kernel.h> 6 #include <linux/mm.h> 7 #include <linux/sched.h> 8 #include <linux/spinlock.h> 9 #include <asm/mmu.h> 10 #include <asm/cputable.h> 11 #include <asm-generic/mm_hooks.h> 12 #include <asm/cputhreads.h> 13 14 /* 15 * Most if the context management is out of line 16 */ 17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18 extern void destroy_context(struct mm_struct *mm); 19 20 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); 21 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 22 extern void set_context(unsigned long id, pgd_t *pgd); 23 24 #ifdef CONFIG_PPC_BOOK3S_64 25 extern int __init_new_context(void); 26 extern void __destroy_context(int context_id); 27 static inline void mmu_context_init(void) { } 28 #else 29 extern unsigned long __init_new_context(void); 30 extern void __destroy_context(unsigned long context_id); 31 extern void mmu_context_init(void); 32 #endif 33 34 extern void switch_cop(struct mm_struct *next); 35 extern int use_cop(unsigned long acop, struct mm_struct *mm); 36 extern void drop_cop(unsigned long acop, struct mm_struct *mm); 37 38 /* 39 * switch_mm is the entry point called from the architecture independent 40 * code in kernel/sched/core.c 41 */ 42 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 43 struct task_struct *tsk) 44 { 45 /* Mark this context has been used on the new CPU */ 46 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 47 48 /* 32-bit keeps track of the current PGDIR in the thread struct */ 49 #ifdef CONFIG_PPC32 50 tsk->thread.pgdir = next->pgd; 51 #endif /* CONFIG_PPC32 */ 52 53 /* 64-bit Book3E keeps track of current PGD in the PACA */ 54 #ifdef CONFIG_PPC_BOOK3E_64 55 get_paca()->pgd = next->pgd; 56 #endif 57 /* Nothing else to do if we aren't actually switching */ 58 if (prev == next) 59 return; 60 61 #ifdef CONFIG_PPC_ICSWX 62 /* Switch coprocessor context only if prev or next uses a coprocessor */ 63 if (prev->context.acop || next->context.acop) 64 switch_cop(next); 65 #endif /* CONFIG_PPC_ICSWX */ 66 67 /* We must stop all altivec streams before changing the HW 68 * context 69 */ 70 #ifdef CONFIG_ALTIVEC 71 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 72 asm volatile ("dssall"); 73 #endif /* CONFIG_ALTIVEC */ 74 75 /* The actual HW switching method differs between the various 76 * sub architectures. 77 */ 78 #ifdef CONFIG_PPC_STD_MMU_64 79 switch_slb(tsk, next); 80 #else 81 /* Out of line for now */ 82 switch_mmu_context(prev, next); 83 #endif 84 85 } 86 87 #define deactivate_mm(tsk,mm) do { } while (0) 88 89 /* 90 * After we have set current->mm to a new value, this activates 91 * the context for the new mm so we see the new mappings. 92 */ 93 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 94 { 95 unsigned long flags; 96 97 local_irq_save(flags); 98 switch_mm(prev, next, current); 99 local_irq_restore(flags); 100 } 101 102 /* We don't currently use enter_lazy_tlb() for anything */ 103 static inline void enter_lazy_tlb(struct mm_struct *mm, 104 struct task_struct *tsk) 105 { 106 /* 64-bit Book3E keeps track of current PGD in the PACA */ 107 #ifdef CONFIG_PPC_BOOK3E_64 108 get_paca()->pgd = NULL; 109 #endif 110 } 111 112 #endif /* __KERNEL__ */ 113 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 114