11e133ab2SMartin Schwidefsky /* 21e133ab2SMartin Schwidefsky * Page table allocation functions 31e133ab2SMartin Schwidefsky * 41e133ab2SMartin Schwidefsky * Copyright IBM Corp. 2016 51e133ab2SMartin Schwidefsky * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 61e133ab2SMartin Schwidefsky */ 71e133ab2SMartin Schwidefsky 81e133ab2SMartin Schwidefsky #include <linux/mm.h> 91e133ab2SMartin Schwidefsky #include <linux/sysctl.h> 101e133ab2SMartin Schwidefsky #include <asm/mmu_context.h> 111e133ab2SMartin Schwidefsky #include <asm/pgalloc.h> 121e133ab2SMartin Schwidefsky #include <asm/gmap.h> 131e133ab2SMartin Schwidefsky #include <asm/tlb.h> 141e133ab2SMartin Schwidefsky #include <asm/tlbflush.h> 151e133ab2SMartin Schwidefsky 161e133ab2SMartin Schwidefsky #ifdef CONFIG_PGSTE 171e133ab2SMartin Schwidefsky 181e133ab2SMartin Schwidefsky static int page_table_allocate_pgste_min = 0; 191e133ab2SMartin Schwidefsky static int page_table_allocate_pgste_max = 1; 201e133ab2SMartin Schwidefsky int page_table_allocate_pgste = 0; 211e133ab2SMartin Schwidefsky EXPORT_SYMBOL(page_table_allocate_pgste); 221e133ab2SMartin Schwidefsky 231e133ab2SMartin Schwidefsky static struct ctl_table page_table_sysctl[] = { 241e133ab2SMartin Schwidefsky { 251e133ab2SMartin Schwidefsky .procname = "allocate_pgste", 261e133ab2SMartin Schwidefsky .data = &page_table_allocate_pgste, 271e133ab2SMartin Schwidefsky .maxlen = sizeof(int), 281e133ab2SMartin Schwidefsky .mode = S_IRUGO | S_IWUSR, 291e133ab2SMartin Schwidefsky .proc_handler = proc_dointvec, 301e133ab2SMartin Schwidefsky .extra1 = &page_table_allocate_pgste_min, 311e133ab2SMartin Schwidefsky .extra2 = &page_table_allocate_pgste_max, 321e133ab2SMartin Schwidefsky }, 331e133ab2SMartin Schwidefsky { } 341e133ab2SMartin Schwidefsky }; 351e133ab2SMartin Schwidefsky 361e133ab2SMartin Schwidefsky static struct ctl_table page_table_sysctl_dir[] = { 371e133ab2SMartin Schwidefsky { 381e133ab2SMartin Schwidefsky .procname = "vm", 391e133ab2SMartin Schwidefsky .maxlen = 0, 401e133ab2SMartin Schwidefsky .mode = 0555, 411e133ab2SMartin Schwidefsky .child = page_table_sysctl, 421e133ab2SMartin Schwidefsky }, 431e133ab2SMartin Schwidefsky { } 441e133ab2SMartin Schwidefsky }; 451e133ab2SMartin Schwidefsky 461e133ab2SMartin Schwidefsky static int __init page_table_register_sysctl(void) 471e133ab2SMartin Schwidefsky { 481e133ab2SMartin Schwidefsky return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM; 491e133ab2SMartin Schwidefsky } 501e133ab2SMartin Schwidefsky __initcall(page_table_register_sysctl); 511e133ab2SMartin Schwidefsky 521e133ab2SMartin Schwidefsky #endif /* CONFIG_PGSTE */ 531e133ab2SMartin Schwidefsky 541e133ab2SMartin Schwidefsky unsigned long *crst_table_alloc(struct mm_struct *mm) 551e133ab2SMartin Schwidefsky { 561e133ab2SMartin Schwidefsky struct page *page = alloc_pages(GFP_KERNEL, 2); 571e133ab2SMartin Schwidefsky 581e133ab2SMartin Schwidefsky if (!page) 591e133ab2SMartin Schwidefsky return NULL; 60*c9b5ad54SMartin Schwidefsky arch_set_page_dat(page, 2); 611e133ab2SMartin Schwidefsky return (unsigned long *) page_to_phys(page); 621e133ab2SMartin Schwidefsky } 631e133ab2SMartin Schwidefsky 641e133ab2SMartin Schwidefsky void crst_table_free(struct mm_struct *mm, unsigned long *table) 651e133ab2SMartin Schwidefsky { 661e133ab2SMartin Schwidefsky free_pages((unsigned long) table, 2); 671e133ab2SMartin Schwidefsky } 681e133ab2SMartin Schwidefsky 691e133ab2SMartin Schwidefsky static void __crst_table_upgrade(void *arg) 701e133ab2SMartin Schwidefsky { 711e133ab2SMartin Schwidefsky struct mm_struct *mm = arg; 721e133ab2SMartin Schwidefsky 731e133ab2SMartin Schwidefsky if (current->active_mm == mm) { 741e133ab2SMartin Schwidefsky clear_user_asce(); 751e133ab2SMartin Schwidefsky set_user_asce(mm); 761e133ab2SMartin Schwidefsky } 771e133ab2SMartin Schwidefsky __tlb_flush_local(); 781e133ab2SMartin Schwidefsky } 791e133ab2SMartin Schwidefsky 801aea9b3fSMartin Schwidefsky int crst_table_upgrade(struct mm_struct *mm, unsigned long end) 811e133ab2SMartin Schwidefsky { 821e133ab2SMartin Schwidefsky unsigned long *table, *pgd; 831aea9b3fSMartin Schwidefsky int rc, notify; 841e133ab2SMartin Schwidefsky 851aea9b3fSMartin Schwidefsky /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ 861aea9b3fSMartin Schwidefsky BUG_ON(mm->context.asce_limit < (1UL << 42)); 871aea9b3fSMartin Schwidefsky if (end >= TASK_SIZE_MAX) 881e133ab2SMartin Schwidefsky return -ENOMEM; 891aea9b3fSMartin Schwidefsky rc = 0; 901aea9b3fSMartin Schwidefsky notify = 0; 911aea9b3fSMartin Schwidefsky while (mm->context.asce_limit < end) { 921aea9b3fSMartin Schwidefsky table = crst_table_alloc(mm); 931aea9b3fSMartin Schwidefsky if (!table) { 941aea9b3fSMartin Schwidefsky rc = -ENOMEM; 951aea9b3fSMartin Schwidefsky break; 961aea9b3fSMartin Schwidefsky } 971e133ab2SMartin Schwidefsky spin_lock_bh(&mm->page_table_lock); 981e133ab2SMartin Schwidefsky pgd = (unsigned long *) mm->pgd; 991aea9b3fSMartin Schwidefsky if (mm->context.asce_limit == (1UL << 42)) { 100723cacbdSGerald Schaefer crst_table_init(table, _REGION2_ENTRY_EMPTY); 1011aea9b3fSMartin Schwidefsky p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd); 1021e133ab2SMartin Schwidefsky mm->pgd = (pgd_t *) table; 103723cacbdSGerald Schaefer mm->context.asce_limit = 1UL << 53; 104723cacbdSGerald Schaefer mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 105723cacbdSGerald Schaefer _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 1061aea9b3fSMartin Schwidefsky } else { 1071aea9b3fSMartin Schwidefsky crst_table_init(table, _REGION1_ENTRY_EMPTY); 1081aea9b3fSMartin Schwidefsky pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); 1091aea9b3fSMartin Schwidefsky mm->pgd = (pgd_t *) table; 1101aea9b3fSMartin Schwidefsky mm->context.asce_limit = -PAGE_SIZE; 1111aea9b3fSMartin Schwidefsky mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 1121aea9b3fSMartin Schwidefsky _ASCE_USER_BITS | _ASCE_TYPE_REGION1; 1131aea9b3fSMartin Schwidefsky } 1141aea9b3fSMartin Schwidefsky notify = 1; 1151e133ab2SMartin Schwidefsky spin_unlock_bh(&mm->page_table_lock); 1161aea9b3fSMartin Schwidefsky } 1171aea9b3fSMartin Schwidefsky if (notify) 1181e133ab2SMartin Schwidefsky on_each_cpu(__crst_table_upgrade, mm, 0); 1191aea9b3fSMartin Schwidefsky return rc; 1201e133ab2SMartin Schwidefsky } 1211e133ab2SMartin Schwidefsky 122723cacbdSGerald Schaefer void crst_table_downgrade(struct mm_struct *mm) 1231e133ab2SMartin Schwidefsky { 1241e133ab2SMartin Schwidefsky pgd_t *pgd; 1251e133ab2SMartin Schwidefsky 126723cacbdSGerald Schaefer /* downgrade should only happen from 3 to 2 levels (compat only) */ 127723cacbdSGerald Schaefer BUG_ON(mm->context.asce_limit != (1UL << 42)); 128723cacbdSGerald Schaefer 1291e133ab2SMartin Schwidefsky if (current->active_mm == mm) { 1301e133ab2SMartin Schwidefsky clear_user_asce(); 1311e133ab2SMartin Schwidefsky __tlb_flush_mm(mm); 1321e133ab2SMartin Schwidefsky } 133723cacbdSGerald Schaefer 1341e133ab2SMartin Schwidefsky pgd = mm->pgd; 1351e133ab2SMartin Schwidefsky mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 136723cacbdSGerald Schaefer mm->context.asce_limit = 1UL << 31; 137723cacbdSGerald Schaefer mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 138723cacbdSGerald Schaefer _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 1391e133ab2SMartin Schwidefsky crst_table_free(mm, (unsigned long *) pgd); 140723cacbdSGerald Schaefer 1411e133ab2SMartin Schwidefsky if (current->active_mm == mm) 1421e133ab2SMartin Schwidefsky set_user_asce(mm); 1431e133ab2SMartin Schwidefsky } 1441e133ab2SMartin Schwidefsky 1451e133ab2SMartin Schwidefsky static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) 1461e133ab2SMartin Schwidefsky { 1471e133ab2SMartin Schwidefsky unsigned int old, new; 1481e133ab2SMartin Schwidefsky 1491e133ab2SMartin Schwidefsky do { 1501e133ab2SMartin Schwidefsky old = atomic_read(v); 1511e133ab2SMartin Schwidefsky new = old ^ bits; 1521e133ab2SMartin Schwidefsky } while (atomic_cmpxchg(v, old, new) != old); 1531e133ab2SMartin Schwidefsky return new; 1541e133ab2SMartin Schwidefsky } 1551e133ab2SMartin Schwidefsky 1564be130a0SMartin Schwidefsky #ifdef CONFIG_PGSTE 1574be130a0SMartin Schwidefsky 1584be130a0SMartin Schwidefsky struct page *page_table_alloc_pgste(struct mm_struct *mm) 1594be130a0SMartin Schwidefsky { 1604be130a0SMartin Schwidefsky struct page *page; 1614be130a0SMartin Schwidefsky unsigned long *table; 1624be130a0SMartin Schwidefsky 163faee35a5SMichal Hocko page = alloc_page(GFP_KERNEL); 1644be130a0SMartin Schwidefsky if (page) { 1654be130a0SMartin Schwidefsky table = (unsigned long *) page_to_phys(page); 1664be130a0SMartin Schwidefsky clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); 1674be130a0SMartin Schwidefsky clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); 1684be130a0SMartin Schwidefsky } 1694be130a0SMartin Schwidefsky return page; 1704be130a0SMartin Schwidefsky } 1714be130a0SMartin Schwidefsky 1724be130a0SMartin Schwidefsky void page_table_free_pgste(struct page *page) 1734be130a0SMartin Schwidefsky { 1744be130a0SMartin Schwidefsky __free_page(page); 1754be130a0SMartin Schwidefsky } 1764be130a0SMartin Schwidefsky 1774be130a0SMartin Schwidefsky #endif /* CONFIG_PGSTE */ 1784be130a0SMartin Schwidefsky 1791e133ab2SMartin Schwidefsky /* 1801e133ab2SMartin Schwidefsky * page table entry allocation/free routines. 1811e133ab2SMartin Schwidefsky */ 1821e133ab2SMartin Schwidefsky unsigned long *page_table_alloc(struct mm_struct *mm) 1831e133ab2SMartin Schwidefsky { 1841e133ab2SMartin Schwidefsky unsigned long *table; 1851e133ab2SMartin Schwidefsky struct page *page; 1861e133ab2SMartin Schwidefsky unsigned int mask, bit; 1871e133ab2SMartin Schwidefsky 1881e133ab2SMartin Schwidefsky /* Try to get a fragment of a 4K page as a 2K page table */ 1891e133ab2SMartin Schwidefsky if (!mm_alloc_pgste(mm)) { 1901e133ab2SMartin Schwidefsky table = NULL; 1918ecb1a59SMartin Schwidefsky spin_lock_bh(&mm->context.pgtable_lock); 1921e133ab2SMartin Schwidefsky if (!list_empty(&mm->context.pgtable_list)) { 1931e133ab2SMartin Schwidefsky page = list_first_entry(&mm->context.pgtable_list, 1941e133ab2SMartin Schwidefsky struct page, lru); 1951e133ab2SMartin Schwidefsky mask = atomic_read(&page->_mapcount); 1961e133ab2SMartin Schwidefsky mask = (mask | (mask >> 4)) & 3; 1971e133ab2SMartin Schwidefsky if (mask != 3) { 1981e133ab2SMartin Schwidefsky table = (unsigned long *) page_to_phys(page); 1991e133ab2SMartin Schwidefsky bit = mask & 1; /* =1 -> second 2K */ 2001e133ab2SMartin Schwidefsky if (bit) 2011e133ab2SMartin Schwidefsky table += PTRS_PER_PTE; 2021e133ab2SMartin Schwidefsky atomic_xor_bits(&page->_mapcount, 1U << bit); 2031e133ab2SMartin Schwidefsky list_del(&page->lru); 2041e133ab2SMartin Schwidefsky } 2051e133ab2SMartin Schwidefsky } 2068ecb1a59SMartin Schwidefsky spin_unlock_bh(&mm->context.pgtable_lock); 2071e133ab2SMartin Schwidefsky if (table) 2081e133ab2SMartin Schwidefsky return table; 2091e133ab2SMartin Schwidefsky } 2101e133ab2SMartin Schwidefsky /* Allocate a fresh page */ 21110d58bf2SMichal Hocko page = alloc_page(GFP_KERNEL); 2121e133ab2SMartin Schwidefsky if (!page) 2131e133ab2SMartin Schwidefsky return NULL; 2141e133ab2SMartin Schwidefsky if (!pgtable_page_ctor(page)) { 2151e133ab2SMartin Schwidefsky __free_page(page); 2161e133ab2SMartin Schwidefsky return NULL; 2171e133ab2SMartin Schwidefsky } 218*c9b5ad54SMartin Schwidefsky arch_set_page_dat(page, 0); 2191e133ab2SMartin Schwidefsky /* Initialize page table */ 2201e133ab2SMartin Schwidefsky table = (unsigned long *) page_to_phys(page); 2211e133ab2SMartin Schwidefsky if (mm_alloc_pgste(mm)) { 2221e133ab2SMartin Schwidefsky /* Return 4K page table with PGSTEs */ 2231e133ab2SMartin Schwidefsky atomic_set(&page->_mapcount, 3); 2241e133ab2SMartin Schwidefsky clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); 2251e133ab2SMartin Schwidefsky clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); 2261e133ab2SMartin Schwidefsky } else { 2271e133ab2SMartin Schwidefsky /* Return the first 2K fragment of the page */ 2281e133ab2SMartin Schwidefsky atomic_set(&page->_mapcount, 1); 2291e133ab2SMartin Schwidefsky clear_table(table, _PAGE_INVALID, PAGE_SIZE); 2308ecb1a59SMartin Schwidefsky spin_lock_bh(&mm->context.pgtable_lock); 2311e133ab2SMartin Schwidefsky list_add(&page->lru, &mm->context.pgtable_list); 2328ecb1a59SMartin Schwidefsky spin_unlock_bh(&mm->context.pgtable_lock); 2331e133ab2SMartin Schwidefsky } 2341e133ab2SMartin Schwidefsky return table; 2351e133ab2SMartin Schwidefsky } 2361e133ab2SMartin Schwidefsky 2371e133ab2SMartin Schwidefsky void page_table_free(struct mm_struct *mm, unsigned long *table) 2381e133ab2SMartin Schwidefsky { 2391e133ab2SMartin Schwidefsky struct page *page; 2401e133ab2SMartin Schwidefsky unsigned int bit, mask; 2411e133ab2SMartin Schwidefsky 2421e133ab2SMartin Schwidefsky page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 2431e133ab2SMartin Schwidefsky if (!mm_alloc_pgste(mm)) { 2441e133ab2SMartin Schwidefsky /* Free 2K page table fragment of a 4K page */ 2451e133ab2SMartin Schwidefsky bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); 2468ecb1a59SMartin Schwidefsky spin_lock_bh(&mm->context.pgtable_lock); 2471e133ab2SMartin Schwidefsky mask = atomic_xor_bits(&page->_mapcount, 1U << bit); 2481e133ab2SMartin Schwidefsky if (mask & 3) 2491e133ab2SMartin Schwidefsky list_add(&page->lru, &mm->context.pgtable_list); 2501e133ab2SMartin Schwidefsky else 2511e133ab2SMartin Schwidefsky list_del(&page->lru); 2528ecb1a59SMartin Schwidefsky spin_unlock_bh(&mm->context.pgtable_lock); 2531e133ab2SMartin Schwidefsky if (mask != 0) 2541e133ab2SMartin Schwidefsky return; 2551e133ab2SMartin Schwidefsky } 2561e133ab2SMartin Schwidefsky 2571e133ab2SMartin Schwidefsky pgtable_page_dtor(page); 2581e133ab2SMartin Schwidefsky atomic_set(&page->_mapcount, -1); 2591e133ab2SMartin Schwidefsky __free_page(page); 2601e133ab2SMartin Schwidefsky } 2611e133ab2SMartin Schwidefsky 2621e133ab2SMartin Schwidefsky void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, 2631e133ab2SMartin Schwidefsky unsigned long vmaddr) 2641e133ab2SMartin Schwidefsky { 2651e133ab2SMartin Schwidefsky struct mm_struct *mm; 2661e133ab2SMartin Schwidefsky struct page *page; 2671e133ab2SMartin Schwidefsky unsigned int bit, mask; 2681e133ab2SMartin Schwidefsky 2691e133ab2SMartin Schwidefsky mm = tlb->mm; 2701e133ab2SMartin Schwidefsky page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 2711e133ab2SMartin Schwidefsky if (mm_alloc_pgste(mm)) { 2721e133ab2SMartin Schwidefsky gmap_unlink(mm, table, vmaddr); 2731e133ab2SMartin Schwidefsky table = (unsigned long *) (__pa(table) | 3); 2741e133ab2SMartin Schwidefsky tlb_remove_table(tlb, table); 2751e133ab2SMartin Schwidefsky return; 2761e133ab2SMartin Schwidefsky } 2771e133ab2SMartin Schwidefsky bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); 2788ecb1a59SMartin Schwidefsky spin_lock_bh(&mm->context.pgtable_lock); 2791e133ab2SMartin Schwidefsky mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); 2801e133ab2SMartin Schwidefsky if (mask & 3) 2811e133ab2SMartin Schwidefsky list_add_tail(&page->lru, &mm->context.pgtable_list); 2821e133ab2SMartin Schwidefsky else 2831e133ab2SMartin Schwidefsky list_del(&page->lru); 2848ecb1a59SMartin Schwidefsky spin_unlock_bh(&mm->context.pgtable_lock); 2851e133ab2SMartin Schwidefsky table = (unsigned long *) (__pa(table) | (1U << bit)); 2861e133ab2SMartin Schwidefsky tlb_remove_table(tlb, table); 2871e133ab2SMartin Schwidefsky } 2881e133ab2SMartin Schwidefsky 2891e133ab2SMartin Schwidefsky static void __tlb_remove_table(void *_table) 2901e133ab2SMartin Schwidefsky { 2911e133ab2SMartin Schwidefsky unsigned int mask = (unsigned long) _table & 3; 2921e133ab2SMartin Schwidefsky void *table = (void *)((unsigned long) _table ^ mask); 2931e133ab2SMartin Schwidefsky struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 2941e133ab2SMartin Schwidefsky 2951e133ab2SMartin Schwidefsky switch (mask) { 2961aea9b3fSMartin Schwidefsky case 0: /* pmd, pud, or p4d */ 2971e133ab2SMartin Schwidefsky free_pages((unsigned long) table, 2); 2981e133ab2SMartin Schwidefsky break; 2991e133ab2SMartin Schwidefsky case 1: /* lower 2K of a 4K page table */ 3001e133ab2SMartin Schwidefsky case 2: /* higher 2K of a 4K page table */ 3011e133ab2SMartin Schwidefsky if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0) 3021e133ab2SMartin Schwidefsky break; 3031e133ab2SMartin Schwidefsky /* fallthrough */ 3041e133ab2SMartin Schwidefsky case 3: /* 4K page table with pgstes */ 3051e133ab2SMartin Schwidefsky pgtable_page_dtor(page); 3061e133ab2SMartin Schwidefsky atomic_set(&page->_mapcount, -1); 3071e133ab2SMartin Schwidefsky __free_page(page); 3081e133ab2SMartin Schwidefsky break; 3091e133ab2SMartin Schwidefsky } 3101e133ab2SMartin Schwidefsky } 3111e133ab2SMartin Schwidefsky 3121e133ab2SMartin Schwidefsky static void tlb_remove_table_smp_sync(void *arg) 3131e133ab2SMartin Schwidefsky { 3141e133ab2SMartin Schwidefsky /* Simply deliver the interrupt */ 3151e133ab2SMartin Schwidefsky } 3161e133ab2SMartin Schwidefsky 3171e133ab2SMartin Schwidefsky static void tlb_remove_table_one(void *table) 3181e133ab2SMartin Schwidefsky { 3191e133ab2SMartin Schwidefsky /* 3201e133ab2SMartin Schwidefsky * This isn't an RCU grace period and hence the page-tables cannot be 3211e133ab2SMartin Schwidefsky * assumed to be actually RCU-freed. 3221e133ab2SMartin Schwidefsky * 3231e133ab2SMartin Schwidefsky * It is however sufficient for software page-table walkers that rely 3241e133ab2SMartin Schwidefsky * on IRQ disabling. See the comment near struct mmu_table_batch. 3251e133ab2SMartin Schwidefsky */ 3261e133ab2SMartin Schwidefsky smp_call_function(tlb_remove_table_smp_sync, NULL, 1); 3271e133ab2SMartin Schwidefsky __tlb_remove_table(table); 3281e133ab2SMartin Schwidefsky } 3291e133ab2SMartin Schwidefsky 3301e133ab2SMartin Schwidefsky static void tlb_remove_table_rcu(struct rcu_head *head) 3311e133ab2SMartin Schwidefsky { 3321e133ab2SMartin Schwidefsky struct mmu_table_batch *batch; 3331e133ab2SMartin Schwidefsky int i; 3341e133ab2SMartin Schwidefsky 3351e133ab2SMartin Schwidefsky batch = container_of(head, struct mmu_table_batch, rcu); 3361e133ab2SMartin Schwidefsky 3371e133ab2SMartin Schwidefsky for (i = 0; i < batch->nr; i++) 3381e133ab2SMartin Schwidefsky __tlb_remove_table(batch->tables[i]); 3391e133ab2SMartin Schwidefsky 3401e133ab2SMartin Schwidefsky free_page((unsigned long)batch); 3411e133ab2SMartin Schwidefsky } 3421e133ab2SMartin Schwidefsky 3431e133ab2SMartin Schwidefsky void tlb_table_flush(struct mmu_gather *tlb) 3441e133ab2SMartin Schwidefsky { 3451e133ab2SMartin Schwidefsky struct mmu_table_batch **batch = &tlb->batch; 3461e133ab2SMartin Schwidefsky 3471e133ab2SMartin Schwidefsky if (*batch) { 3481e133ab2SMartin Schwidefsky call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); 3491e133ab2SMartin Schwidefsky *batch = NULL; 3501e133ab2SMartin Schwidefsky } 3511e133ab2SMartin Schwidefsky } 3521e133ab2SMartin Schwidefsky 3531e133ab2SMartin Schwidefsky void tlb_remove_table(struct mmu_gather *tlb, void *table) 3541e133ab2SMartin Schwidefsky { 3551e133ab2SMartin Schwidefsky struct mmu_table_batch **batch = &tlb->batch; 3561e133ab2SMartin Schwidefsky 3571e133ab2SMartin Schwidefsky tlb->mm->context.flush_mm = 1; 3581e133ab2SMartin Schwidefsky if (*batch == NULL) { 3591e133ab2SMartin Schwidefsky *batch = (struct mmu_table_batch *) 3601e133ab2SMartin Schwidefsky __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 3611e133ab2SMartin Schwidefsky if (*batch == NULL) { 3621e133ab2SMartin Schwidefsky __tlb_flush_mm_lazy(tlb->mm); 3631e133ab2SMartin Schwidefsky tlb_remove_table_one(table); 3641e133ab2SMartin Schwidefsky return; 3651e133ab2SMartin Schwidefsky } 3661e133ab2SMartin Schwidefsky (*batch)->nr = 0; 3671e133ab2SMartin Schwidefsky } 3681e133ab2SMartin Schwidefsky (*batch)->tables[(*batch)->nr++] = table; 3691e133ab2SMartin Schwidefsky if ((*batch)->nr == MAX_TABLE_BATCH) 3701e133ab2SMartin Schwidefsky tlb_flush_mmu(tlb); 3711e133ab2SMartin Schwidefsky } 372