1196d9d8bSPeter Zijlstra #include <linux/gfp.h> 2196d9d8bSPeter Zijlstra #include <linux/highmem.h> 3196d9d8bSPeter Zijlstra #include <linux/kernel.h> 4ac801e7eSAlexander Potapenko #include <linux/kmsan-checks.h> 5196d9d8bSPeter Zijlstra #include <linux/mmdebug.h> 6196d9d8bSPeter Zijlstra #include <linux/mm_types.h> 736090defSArnd Bergmann #include <linux/mm_inline.h> 8196d9d8bSPeter Zijlstra #include <linux/pagemap.h> 9196d9d8bSPeter Zijlstra #include <linux/rcupdate.h> 10196d9d8bSPeter Zijlstra #include <linux/smp.h> 11196d9d8bSPeter Zijlstra #include <linux/swap.h> 12196d9d8bSPeter Zijlstra 13196d9d8bSPeter Zijlstra #include <asm/pgalloc.h> 14196d9d8bSPeter Zijlstra #include <asm/tlb.h> 15196d9d8bSPeter Zijlstra 16580a586cSPeter Zijlstra #ifndef CONFIG_MMU_GATHER_NO_GATHER 17952a31c9SMartin Schwidefsky 18196d9d8bSPeter Zijlstra static bool tlb_next_batch(struct mmu_gather *tlb) 19196d9d8bSPeter Zijlstra { 20196d9d8bSPeter Zijlstra struct mmu_gather_batch *batch; 21196d9d8bSPeter Zijlstra 22196d9d8bSPeter Zijlstra batch = tlb->active; 23196d9d8bSPeter Zijlstra if (batch->next) { 24196d9d8bSPeter Zijlstra tlb->active = batch->next; 25196d9d8bSPeter Zijlstra return true; 26196d9d8bSPeter Zijlstra } 27196d9d8bSPeter Zijlstra 28196d9d8bSPeter Zijlstra if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) 29196d9d8bSPeter Zijlstra return false; 30196d9d8bSPeter Zijlstra 31196d9d8bSPeter Zijlstra batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 32196d9d8bSPeter Zijlstra if (!batch) 33196d9d8bSPeter Zijlstra return false; 34196d9d8bSPeter Zijlstra 35196d9d8bSPeter Zijlstra tlb->batch_count++; 36196d9d8bSPeter Zijlstra batch->next = NULL; 37196d9d8bSPeter Zijlstra batch->nr = 0; 38196d9d8bSPeter Zijlstra batch->max = MAX_GATHER_BATCH; 39196d9d8bSPeter Zijlstra 40196d9d8bSPeter Zijlstra tlb->active->next = batch; 41196d9d8bSPeter Zijlstra tlb->active = batch; 42196d9d8bSPeter Zijlstra 43196d9d8bSPeter Zijlstra return true; 44196d9d8bSPeter Zijlstra } 45196d9d8bSPeter Zijlstra 46952a31c9SMartin Schwidefsky static void tlb_batch_pages_flush(struct mmu_gather *tlb) 47196d9d8bSPeter Zijlstra { 48196d9d8bSPeter Zijlstra struct mmu_gather_batch *batch; 49196d9d8bSPeter Zijlstra 50196d9d8bSPeter Zijlstra for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { 51*7cc8f9c7SLinus Torvalds struct encoded_page **pages = batch->encoded_pages; 52b191c9bcSJianxing Wang 53b191c9bcSJianxing Wang do { 54b191c9bcSJianxing Wang /* 55b191c9bcSJianxing Wang * limit free batch count when PAGE_SIZE > 4K 56b191c9bcSJianxing Wang */ 57b191c9bcSJianxing Wang unsigned int nr = min(512U, batch->nr); 58b191c9bcSJianxing Wang 59b191c9bcSJianxing Wang free_pages_and_swap_cache(pages, nr); 60b191c9bcSJianxing Wang pages += nr; 61b191c9bcSJianxing Wang batch->nr -= nr; 62b191c9bcSJianxing Wang 63b191c9bcSJianxing Wang cond_resched(); 64b191c9bcSJianxing Wang } while (batch->nr); 65196d9d8bSPeter Zijlstra } 66196d9d8bSPeter Zijlstra tlb->active = &tlb->local; 67196d9d8bSPeter Zijlstra } 68196d9d8bSPeter Zijlstra 69952a31c9SMartin Schwidefsky static void tlb_batch_list_free(struct mmu_gather *tlb) 70196d9d8bSPeter Zijlstra { 71196d9d8bSPeter Zijlstra struct mmu_gather_batch *batch, *next; 72196d9d8bSPeter Zijlstra 73196d9d8bSPeter Zijlstra for (batch = tlb->local.next; batch; batch = next) { 74196d9d8bSPeter Zijlstra next = batch->next; 75196d9d8bSPeter Zijlstra free_pages((unsigned long)batch, 0); 76196d9d8bSPeter Zijlstra } 77196d9d8bSPeter Zijlstra tlb->local.next = NULL; 78196d9d8bSPeter Zijlstra } 79196d9d8bSPeter Zijlstra 80*7cc8f9c7SLinus Torvalds bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size) 81196d9d8bSPeter Zijlstra { 82196d9d8bSPeter Zijlstra struct mmu_gather_batch *batch; 83196d9d8bSPeter Zijlstra 84196d9d8bSPeter Zijlstra VM_BUG_ON(!tlb->end); 85ed6a7935SPeter Zijlstra 863af4bd03SPeter Zijlstra #ifdef CONFIG_MMU_GATHER_PAGE_SIZE 87196d9d8bSPeter Zijlstra VM_WARN_ON(tlb->page_size != page_size); 88ed6a7935SPeter Zijlstra #endif 89196d9d8bSPeter Zijlstra 90196d9d8bSPeter Zijlstra batch = tlb->active; 91196d9d8bSPeter Zijlstra /* 92196d9d8bSPeter Zijlstra * Add the page and check if we are full. If so 93196d9d8bSPeter Zijlstra * force a flush. 94196d9d8bSPeter Zijlstra */ 95*7cc8f9c7SLinus Torvalds batch->encoded_pages[batch->nr++] = page; 96196d9d8bSPeter Zijlstra if (batch->nr == batch->max) { 97196d9d8bSPeter Zijlstra if (!tlb_next_batch(tlb)) 98196d9d8bSPeter Zijlstra return true; 99196d9d8bSPeter Zijlstra batch = tlb->active; 100196d9d8bSPeter Zijlstra } 101*7cc8f9c7SLinus Torvalds VM_BUG_ON_PAGE(batch->nr > batch->max, encoded_page_ptr(page)); 102196d9d8bSPeter Zijlstra 103196d9d8bSPeter Zijlstra return false; 104196d9d8bSPeter Zijlstra } 105196d9d8bSPeter Zijlstra 106580a586cSPeter Zijlstra #endif /* MMU_GATHER_NO_GATHER */ 107952a31c9SMartin Schwidefsky 1080d6e24d4SPeter Zijlstra #ifdef CONFIG_MMU_GATHER_TABLE_FREE 1090d6e24d4SPeter Zijlstra 1100d6e24d4SPeter Zijlstra static void __tlb_remove_table_free(struct mmu_table_batch *batch) 1110d6e24d4SPeter Zijlstra { 1120d6e24d4SPeter Zijlstra int i; 1130d6e24d4SPeter Zijlstra 1140d6e24d4SPeter Zijlstra for (i = 0; i < batch->nr; i++) 1150d6e24d4SPeter Zijlstra __tlb_remove_table(batch->tables[i]); 1160d6e24d4SPeter Zijlstra 1170d6e24d4SPeter Zijlstra free_page((unsigned long)batch); 1180d6e24d4SPeter Zijlstra } 1190d6e24d4SPeter Zijlstra 120ff2e6d72SPeter Zijlstra #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE 121196d9d8bSPeter Zijlstra 122196d9d8bSPeter Zijlstra /* 1230d6e24d4SPeter Zijlstra * Semi RCU freeing of the page directories. 1240d6e24d4SPeter Zijlstra * 1250d6e24d4SPeter Zijlstra * This is needed by some architectures to implement software pagetable walkers. 1260d6e24d4SPeter Zijlstra * 1270d6e24d4SPeter Zijlstra * gup_fast() and other software pagetable walkers do a lockless page-table 1280d6e24d4SPeter Zijlstra * walk and therefore needs some synchronization with the freeing of the page 1290d6e24d4SPeter Zijlstra * directories. The chosen means to accomplish that is by disabling IRQs over 1300d6e24d4SPeter Zijlstra * the walk. 1310d6e24d4SPeter Zijlstra * 1320d6e24d4SPeter Zijlstra * Architectures that use IPIs to flush TLBs will then automagically DTRT, 1330d6e24d4SPeter Zijlstra * since we unlink the page, flush TLBs, free the page. Since the disabling of 1340d6e24d4SPeter Zijlstra * IRQs delays the completion of the TLB flush we can never observe an already 1350d6e24d4SPeter Zijlstra * freed page. 1360d6e24d4SPeter Zijlstra * 1370d6e24d4SPeter Zijlstra * Architectures that do not have this (PPC) need to delay the freeing by some 1380d6e24d4SPeter Zijlstra * other means, this is that means. 1390d6e24d4SPeter Zijlstra * 1400d6e24d4SPeter Zijlstra * What we do is batch the freed directory pages (tables) and RCU free them. 1410d6e24d4SPeter Zijlstra * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling 1420d6e24d4SPeter Zijlstra * holds off grace periods. 1430d6e24d4SPeter Zijlstra * 1440d6e24d4SPeter Zijlstra * However, in order to batch these pages we need to allocate storage, this 1450d6e24d4SPeter Zijlstra * allocation is deep inside the MM code and can thus easily fail on memory 1460d6e24d4SPeter Zijlstra * pressure. To guarantee progress we fall back to single table freeing, see 1470d6e24d4SPeter Zijlstra * the implementation of tlb_remove_table_one(). 1480d6e24d4SPeter Zijlstra * 149196d9d8bSPeter Zijlstra */ 150196d9d8bSPeter Zijlstra 1510d6e24d4SPeter Zijlstra static void tlb_remove_table_smp_sync(void *arg) 1520d6e24d4SPeter Zijlstra { 1530d6e24d4SPeter Zijlstra /* Simply deliver the interrupt */ 1540d6e24d4SPeter Zijlstra } 1550d6e24d4SPeter Zijlstra 1562ba99c5eSJann Horn void tlb_remove_table_sync_one(void) 1570d6e24d4SPeter Zijlstra { 1580d6e24d4SPeter Zijlstra /* 1590d6e24d4SPeter Zijlstra * This isn't an RCU grace period and hence the page-tables cannot be 1600d6e24d4SPeter Zijlstra * assumed to be actually RCU-freed. 1610d6e24d4SPeter Zijlstra * 1620d6e24d4SPeter Zijlstra * It is however sufficient for software page-table walkers that rely on 1630d6e24d4SPeter Zijlstra * IRQ disabling. 1640d6e24d4SPeter Zijlstra */ 1650d6e24d4SPeter Zijlstra smp_call_function(tlb_remove_table_smp_sync, NULL, 1); 1660d6e24d4SPeter Zijlstra } 1670d6e24d4SPeter Zijlstra 1680d6e24d4SPeter Zijlstra static void tlb_remove_table_rcu(struct rcu_head *head) 1690d6e24d4SPeter Zijlstra { 1700d6e24d4SPeter Zijlstra __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu)); 1710d6e24d4SPeter Zijlstra } 1720d6e24d4SPeter Zijlstra 1730d6e24d4SPeter Zijlstra static void tlb_remove_table_free(struct mmu_table_batch *batch) 1740d6e24d4SPeter Zijlstra { 1750d6e24d4SPeter Zijlstra call_rcu(&batch->rcu, tlb_remove_table_rcu); 1760d6e24d4SPeter Zijlstra } 1770d6e24d4SPeter Zijlstra 1780d6e24d4SPeter Zijlstra #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */ 1790d6e24d4SPeter Zijlstra 1800d6e24d4SPeter Zijlstra static void tlb_remove_table_free(struct mmu_table_batch *batch) 1810d6e24d4SPeter Zijlstra { 1820d6e24d4SPeter Zijlstra __tlb_remove_table_free(batch); 1830d6e24d4SPeter Zijlstra } 1840d6e24d4SPeter Zijlstra 1850d6e24d4SPeter Zijlstra #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ 1860d6e24d4SPeter Zijlstra 187196d9d8bSPeter Zijlstra /* 188196d9d8bSPeter Zijlstra * If we want tlb_remove_table() to imply TLB invalidates. 189196d9d8bSPeter Zijlstra */ 190196d9d8bSPeter Zijlstra static inline void tlb_table_invalidate(struct mmu_gather *tlb) 191196d9d8bSPeter Zijlstra { 1920ed13259SPeter Zijlstra if (tlb_needs_table_invalidate()) { 193196d9d8bSPeter Zijlstra /* 1940ed13259SPeter Zijlstra * Invalidate page-table caches used by hardware walkers. Then 1950ed13259SPeter Zijlstra * we still need to RCU-sched wait while freeing the pages 1960ed13259SPeter Zijlstra * because software walkers can still be in-flight. 197196d9d8bSPeter Zijlstra */ 198196d9d8bSPeter Zijlstra tlb_flush_mmu_tlbonly(tlb); 1990ed13259SPeter Zijlstra } 200196d9d8bSPeter Zijlstra } 201196d9d8bSPeter Zijlstra 202196d9d8bSPeter Zijlstra static void tlb_remove_table_one(void *table) 203196d9d8bSPeter Zijlstra { 2040d6e24d4SPeter Zijlstra tlb_remove_table_sync_one(); 205196d9d8bSPeter Zijlstra __tlb_remove_table(table); 206196d9d8bSPeter Zijlstra } 207196d9d8bSPeter Zijlstra 2080a8caf21SPeter Zijlstra static void tlb_table_flush(struct mmu_gather *tlb) 209196d9d8bSPeter Zijlstra { 210196d9d8bSPeter Zijlstra struct mmu_table_batch **batch = &tlb->batch; 211196d9d8bSPeter Zijlstra 212196d9d8bSPeter Zijlstra if (*batch) { 213196d9d8bSPeter Zijlstra tlb_table_invalidate(tlb); 2140d6e24d4SPeter Zijlstra tlb_remove_table_free(*batch); 215196d9d8bSPeter Zijlstra *batch = NULL; 216196d9d8bSPeter Zijlstra } 217196d9d8bSPeter Zijlstra } 218196d9d8bSPeter Zijlstra 219196d9d8bSPeter Zijlstra void tlb_remove_table(struct mmu_gather *tlb, void *table) 220196d9d8bSPeter Zijlstra { 221196d9d8bSPeter Zijlstra struct mmu_table_batch **batch = &tlb->batch; 222196d9d8bSPeter Zijlstra 223196d9d8bSPeter Zijlstra if (*batch == NULL) { 224196d9d8bSPeter Zijlstra *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); 225196d9d8bSPeter Zijlstra if (*batch == NULL) { 226196d9d8bSPeter Zijlstra tlb_table_invalidate(tlb); 227196d9d8bSPeter Zijlstra tlb_remove_table_one(table); 228196d9d8bSPeter Zijlstra return; 229196d9d8bSPeter Zijlstra } 230196d9d8bSPeter Zijlstra (*batch)->nr = 0; 231196d9d8bSPeter Zijlstra } 232196d9d8bSPeter Zijlstra 233196d9d8bSPeter Zijlstra (*batch)->tables[(*batch)->nr++] = table; 234196d9d8bSPeter Zijlstra if ((*batch)->nr == MAX_TABLE_BATCH) 235196d9d8bSPeter Zijlstra tlb_table_flush(tlb); 236196d9d8bSPeter Zijlstra } 237196d9d8bSPeter Zijlstra 2380d6e24d4SPeter Zijlstra static inline void tlb_table_init(struct mmu_gather *tlb) 2390d6e24d4SPeter Zijlstra { 2400d6e24d4SPeter Zijlstra tlb->batch = NULL; 2410d6e24d4SPeter Zijlstra } 2420d6e24d4SPeter Zijlstra 2430d6e24d4SPeter Zijlstra #else /* !CONFIG_MMU_GATHER_TABLE_FREE */ 2440d6e24d4SPeter Zijlstra 2450d6e24d4SPeter Zijlstra static inline void tlb_table_flush(struct mmu_gather *tlb) { } 2460d6e24d4SPeter Zijlstra static inline void tlb_table_init(struct mmu_gather *tlb) { } 2470d6e24d4SPeter Zijlstra 2480d6e24d4SPeter Zijlstra #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ 249196d9d8bSPeter Zijlstra 2500a8caf21SPeter Zijlstra static void tlb_flush_mmu_free(struct mmu_gather *tlb) 2510a8caf21SPeter Zijlstra { 2520a8caf21SPeter Zijlstra tlb_table_flush(tlb); 253580a586cSPeter Zijlstra #ifndef CONFIG_MMU_GATHER_NO_GATHER 2540a8caf21SPeter Zijlstra tlb_batch_pages_flush(tlb); 2550a8caf21SPeter Zijlstra #endif 2560a8caf21SPeter Zijlstra } 2570a8caf21SPeter Zijlstra 2580a8caf21SPeter Zijlstra void tlb_flush_mmu(struct mmu_gather *tlb) 2590a8caf21SPeter Zijlstra { 2600a8caf21SPeter Zijlstra tlb_flush_mmu_tlbonly(tlb); 2610a8caf21SPeter Zijlstra tlb_flush_mmu_free(tlb); 2620a8caf21SPeter Zijlstra } 2630a8caf21SPeter Zijlstra 264d8b45053SWill Deacon static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 265a72afd87SWill Deacon bool fullmm) 266196d9d8bSPeter Zijlstra { 267ac801e7eSAlexander Potapenko /* 268ac801e7eSAlexander Potapenko * struct mmu_gather contains 7 1-bit fields packed into a 32-bit 269ac801e7eSAlexander Potapenko * unsigned int value. The remaining 25 bits remain uninitialized 270ac801e7eSAlexander Potapenko * and are never used, but KMSAN updates the origin for them in 271ac801e7eSAlexander Potapenko * zap_pXX_range() in mm/memory.c, thus creating very long origin 272ac801e7eSAlexander Potapenko * chains. This is technically correct, but consumes too much memory. 273ac801e7eSAlexander Potapenko * Unpoisoning the whole structure will prevent creating such chains. 274ac801e7eSAlexander Potapenko */ 275ac801e7eSAlexander Potapenko kmsan_unpoison_memory(tlb, sizeof(*tlb)); 2761808d65bSPeter Zijlstra tlb->mm = mm; 277a72afd87SWill Deacon tlb->fullmm = fullmm; 2781808d65bSPeter Zijlstra 279580a586cSPeter Zijlstra #ifndef CONFIG_MMU_GATHER_NO_GATHER 2801808d65bSPeter Zijlstra tlb->need_flush_all = 0; 2811808d65bSPeter Zijlstra tlb->local.next = NULL; 2821808d65bSPeter Zijlstra tlb->local.nr = 0; 2831808d65bSPeter Zijlstra tlb->local.max = ARRAY_SIZE(tlb->__pages); 2841808d65bSPeter Zijlstra tlb->active = &tlb->local; 2851808d65bSPeter Zijlstra tlb->batch_count = 0; 2861808d65bSPeter Zijlstra #endif 2871808d65bSPeter Zijlstra 2880d6e24d4SPeter Zijlstra tlb_table_init(tlb); 2893af4bd03SPeter Zijlstra #ifdef CONFIG_MMU_GATHER_PAGE_SIZE 2901808d65bSPeter Zijlstra tlb->page_size = 0; 2911808d65bSPeter Zijlstra #endif 2921808d65bSPeter Zijlstra 2931808d65bSPeter Zijlstra __tlb_reset_range(tlb); 294196d9d8bSPeter Zijlstra inc_tlb_flush_pending(tlb->mm); 295196d9d8bSPeter Zijlstra } 296196d9d8bSPeter Zijlstra 297845be1cdSRandy Dunlap /** 298845be1cdSRandy Dunlap * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down 299845be1cdSRandy Dunlap * @tlb: the mmu_gather structure to initialize 300845be1cdSRandy Dunlap * @mm: the mm_struct of the target address space 301845be1cdSRandy Dunlap * 302845be1cdSRandy Dunlap * Called to initialize an (on-stack) mmu_gather structure for page-table 303845be1cdSRandy Dunlap * tear-down from @mm. 304845be1cdSRandy Dunlap */ 305a72afd87SWill Deacon void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) 306d8b45053SWill Deacon { 307a72afd87SWill Deacon __tlb_gather_mmu(tlb, mm, false); 308d8b45053SWill Deacon } 309d8b45053SWill Deacon 310845be1cdSRandy Dunlap /** 311845be1cdSRandy Dunlap * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down 312845be1cdSRandy Dunlap * @tlb: the mmu_gather structure to initialize 313845be1cdSRandy Dunlap * @mm: the mm_struct of the target address space 314845be1cdSRandy Dunlap * 315845be1cdSRandy Dunlap * In this case, @mm is without users and we're going to destroy the 316845be1cdSRandy Dunlap * full address space (exit/execve). 317845be1cdSRandy Dunlap * 318845be1cdSRandy Dunlap * Called to initialize an (on-stack) mmu_gather structure for page-table 319845be1cdSRandy Dunlap * tear-down from @mm. 320845be1cdSRandy Dunlap */ 321d8b45053SWill Deacon void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) 322d8b45053SWill Deacon { 323a72afd87SWill Deacon __tlb_gather_mmu(tlb, mm, true); 324d8b45053SWill Deacon } 325d8b45053SWill Deacon 3261808d65bSPeter Zijlstra /** 3271808d65bSPeter Zijlstra * tlb_finish_mmu - finish an mmu_gather structure 3281808d65bSPeter Zijlstra * @tlb: the mmu_gather structure to finish 3291808d65bSPeter Zijlstra * 3301808d65bSPeter Zijlstra * Called at the end of the shootdown operation to free up any resources that 3311808d65bSPeter Zijlstra * were required. 3321808d65bSPeter Zijlstra */ 333ae8eba8bSWill Deacon void tlb_finish_mmu(struct mmu_gather *tlb) 334196d9d8bSPeter Zijlstra { 335196d9d8bSPeter Zijlstra /* 336196d9d8bSPeter Zijlstra * If there are parallel threads are doing PTE changes on same range 337c1e8d7c6SMichel Lespinasse * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB 3387a30df49SYang Shi * flush by batching, one thread may end up seeing inconsistent PTEs 3397a30df49SYang Shi * and result in having stale TLB entries. So flush TLB forcefully 3407a30df49SYang Shi * if we detect parallel PTE batching threads. 3417a30df49SYang Shi * 3427a30df49SYang Shi * However, some syscalls, e.g. munmap(), may free page tables, this 3437a30df49SYang Shi * needs force flush everything in the given range. Otherwise this 3447a30df49SYang Shi * may result in having stale TLB entries for some architectures, 3457a30df49SYang Shi * e.g. aarch64, that could specify flush what level TLB. 346196d9d8bSPeter Zijlstra */ 3471808d65bSPeter Zijlstra if (mm_tlb_flush_nested(tlb->mm)) { 3487a30df49SYang Shi /* 3497a30df49SYang Shi * The aarch64 yields better performance with fullmm by 3507a30df49SYang Shi * avoiding multiple CPUs spamming TLBI messages at the 3517a30df49SYang Shi * same time. 3527a30df49SYang Shi * 3537a30df49SYang Shi * On x86 non-fullmm doesn't yield significant difference 3547a30df49SYang Shi * against fullmm. 3557a30df49SYang Shi */ 3567a30df49SYang Shi tlb->fullmm = 1; 3571808d65bSPeter Zijlstra __tlb_reset_range(tlb); 3587a30df49SYang Shi tlb->freed_tables = 1; 3591808d65bSPeter Zijlstra } 360196d9d8bSPeter Zijlstra 3611808d65bSPeter Zijlstra tlb_flush_mmu(tlb); 3621808d65bSPeter Zijlstra 363580a586cSPeter Zijlstra #ifndef CONFIG_MMU_GATHER_NO_GATHER 3641808d65bSPeter Zijlstra tlb_batch_list_free(tlb); 3651808d65bSPeter Zijlstra #endif 366196d9d8bSPeter Zijlstra dec_tlb_flush_pending(tlb->mm); 367196d9d8bSPeter Zijlstra } 368