1c6557e7fSMartin Schwidefsky /* 2c6557e7fSMartin Schwidefsky * S390 version 3c6557e7fSMartin Schwidefsky * 4c6557e7fSMartin Schwidefsky * Derived from "include/asm-i386/mmu_context.h" 5c6557e7fSMartin Schwidefsky */ 6c6557e7fSMartin Schwidefsky 7c6557e7fSMartin Schwidefsky #ifndef __S390_MMU_CONTEXT_H 8c6557e7fSMartin Schwidefsky #define __S390_MMU_CONTEXT_H 9c6557e7fSMartin Schwidefsky 10c6557e7fSMartin Schwidefsky #include <asm/pgalloc.h> 11c6557e7fSMartin Schwidefsky #include <asm/uaccess.h> 12050eef36SMartin Schwidefsky #include <asm/tlbflush.h> 13a0616cdeSDavid Howells #include <asm/ctl_reg.h> 14c6557e7fSMartin Schwidefsky 15c6557e7fSMartin Schwidefsky static inline int init_new_context(struct task_struct *tsk, 16c6557e7fSMartin Schwidefsky struct mm_struct *mm) 17c6557e7fSMartin Schwidefsky { 18050eef36SMartin Schwidefsky atomic_set(&mm->context.attach_count, 0); 19050eef36SMartin Schwidefsky mm->context.flush_mm = 0; 20c6557e7fSMartin Schwidefsky mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 21c6557e7fSMartin Schwidefsky #ifdef CONFIG_64BIT 22c6557e7fSMartin Schwidefsky mm->context.asce_bits |= _ASCE_TYPE_REGION3; 23c6557e7fSMartin Schwidefsky #endif 24badb8bb9SMartin Schwidefsky if (current->mm && current->mm->context.alloc_pgste) { 25250cf776SChristian Borntraeger /* 26250cf776SChristian Borntraeger * alloc_pgste indicates, that any NEW context will be created 27250cf776SChristian Borntraeger * with extended page tables. The old context is unchanged. The 28250cf776SChristian Borntraeger * page table allocation and the page table operations will 29250cf776SChristian Borntraeger * look at has_pgste to distinguish normal and extended page 30250cf776SChristian Borntraeger * tables. The only way to create extended page tables is to 31250cf776SChristian Borntraeger * set alloc_pgste and then create a new context (e.g. dup_mm). 32250cf776SChristian Borntraeger * The page table allocation is called after init_new_context 33250cf776SChristian Borntraeger * and if has_pgste is set, it will create extended page 34250cf776SChristian Borntraeger * tables. 35250cf776SChristian Borntraeger */ 36250cf776SChristian Borntraeger mm->context.has_pgste = 1; 37250cf776SChristian Borntraeger mm->context.alloc_pgste = 1; 38c6557e7fSMartin Schwidefsky } else { 39250cf776SChristian Borntraeger mm->context.has_pgste = 0; 40250cf776SChristian Borntraeger mm->context.alloc_pgste = 0; 41c6557e7fSMartin Schwidefsky } 42c6557e7fSMartin Schwidefsky mm->context.asce_limit = STACK_TOP_MAX; 43c6557e7fSMartin Schwidefsky crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 44c6557e7fSMartin Schwidefsky return 0; 45c6557e7fSMartin Schwidefsky } 46c6557e7fSMartin Schwidefsky 47c6557e7fSMartin Schwidefsky #define destroy_context(mm) do { } while (0) 48c6557e7fSMartin Schwidefsky 49f4815ac6SHeiko Carstens #ifndef CONFIG_64BIT 50c6557e7fSMartin Schwidefsky #define LCTL_OPCODE "lctl" 51c6557e7fSMartin Schwidefsky #else 52c6557e7fSMartin Schwidefsky #define LCTL_OPCODE "lctlg" 53c6557e7fSMartin Schwidefsky #endif 54c6557e7fSMartin Schwidefsky 55c6557e7fSMartin Schwidefsky static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) 56c6557e7fSMartin Schwidefsky { 57c6557e7fSMartin Schwidefsky pgd_t *pgd = mm->pgd; 58c6557e7fSMartin Schwidefsky 59c6557e7fSMartin Schwidefsky S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 60b11b5334SMartin Schwidefsky if (user_mode != HOME_SPACE_MODE) { 61c6557e7fSMartin Schwidefsky /* Load primary space page table origin. */ 62c6557e7fSMartin Schwidefsky asm volatile(LCTL_OPCODE" 1,1,%0\n" 63043d0708SMartin Schwidefsky : : "m" (S390_lowcore.user_asce) ); 64c6557e7fSMartin Schwidefsky } else 65c6557e7fSMartin Schwidefsky /* Load home space page table origin. */ 66c6557e7fSMartin Schwidefsky asm volatile(LCTL_OPCODE" 13,13,%0" 67c6557e7fSMartin Schwidefsky : : "m" (S390_lowcore.user_asce) ); 68c6557e7fSMartin Schwidefsky set_fs(current->thread.mm_segment); 69c6557e7fSMartin Schwidefsky } 70c6557e7fSMartin Schwidefsky 71c6557e7fSMartin Schwidefsky static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 72c6557e7fSMartin Schwidefsky struct task_struct *tsk) 73c6557e7fSMartin Schwidefsky { 74005f8eeeSRusty Russell cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 75c6557e7fSMartin Schwidefsky update_mm(next, tsk); 76050eef36SMartin Schwidefsky atomic_dec(&prev->context.attach_count); 77050eef36SMartin Schwidefsky WARN_ON(atomic_read(&prev->context.attach_count) < 0); 78050eef36SMartin Schwidefsky atomic_inc(&next->context.attach_count); 79050eef36SMartin Schwidefsky /* Check for TLBs not flushed yet */ 80050eef36SMartin Schwidefsky if (next->context.flush_mm) 81050eef36SMartin Schwidefsky __tlb_flush_mm(next); 82c6557e7fSMartin Schwidefsky } 83c6557e7fSMartin Schwidefsky 84c6557e7fSMartin Schwidefsky #define enter_lazy_tlb(mm,tsk) do { } while (0) 85c6557e7fSMartin Schwidefsky #define deactivate_mm(tsk,mm) do { } while (0) 86c6557e7fSMartin Schwidefsky 87c6557e7fSMartin Schwidefsky static inline void activate_mm(struct mm_struct *prev, 88c6557e7fSMartin Schwidefsky struct mm_struct *next) 89c6557e7fSMartin Schwidefsky { 90c6557e7fSMartin Schwidefsky switch_mm(prev, next, current); 91c6557e7fSMartin Schwidefsky } 92c6557e7fSMartin Schwidefsky 93*0f6f281bSMartin Schwidefsky static inline void arch_dup_mmap(struct mm_struct *oldmm, 94*0f6f281bSMartin Schwidefsky struct mm_struct *mm) 95*0f6f281bSMartin Schwidefsky { 96*0f6f281bSMartin Schwidefsky #ifdef CONFIG_64BIT 97*0f6f281bSMartin Schwidefsky if (oldmm->context.asce_limit < mm->context.asce_limit) 98*0f6f281bSMartin Schwidefsky crst_table_downgrade(mm, oldmm->context.asce_limit); 99*0f6f281bSMartin Schwidefsky #endif 100*0f6f281bSMartin Schwidefsky } 101*0f6f281bSMartin Schwidefsky 102*0f6f281bSMartin Schwidefsky static inline void arch_exit_mmap(struct mm_struct *mm) 103*0f6f281bSMartin Schwidefsky { 104*0f6f281bSMartin Schwidefsky } 105*0f6f281bSMartin Schwidefsky 106c6557e7fSMartin Schwidefsky #endif /* __S390_MMU_CONTEXT_H */ 107