xref: /linux/arch/s390/mm/pgalloc.c (revision 10d58bf297e2cba0cfa2cd143d4f0df26e129040)
11e133ab2SMartin Schwidefsky /*
21e133ab2SMartin Schwidefsky  *  Page table allocation functions
31e133ab2SMartin Schwidefsky  *
41e133ab2SMartin Schwidefsky  *    Copyright IBM Corp. 2016
51e133ab2SMartin Schwidefsky  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
61e133ab2SMartin Schwidefsky  */
71e133ab2SMartin Schwidefsky 
81e133ab2SMartin Schwidefsky #include <linux/mm.h>
91e133ab2SMartin Schwidefsky #include <linux/sysctl.h>
101e133ab2SMartin Schwidefsky #include <asm/mmu_context.h>
111e133ab2SMartin Schwidefsky #include <asm/pgalloc.h>
121e133ab2SMartin Schwidefsky #include <asm/gmap.h>
131e133ab2SMartin Schwidefsky #include <asm/tlb.h>
141e133ab2SMartin Schwidefsky #include <asm/tlbflush.h>
151e133ab2SMartin Schwidefsky 
161e133ab2SMartin Schwidefsky #ifdef CONFIG_PGSTE
171e133ab2SMartin Schwidefsky 
181e133ab2SMartin Schwidefsky static int page_table_allocate_pgste_min = 0;
191e133ab2SMartin Schwidefsky static int page_table_allocate_pgste_max = 1;
201e133ab2SMartin Schwidefsky int page_table_allocate_pgste = 0;
211e133ab2SMartin Schwidefsky EXPORT_SYMBOL(page_table_allocate_pgste);
221e133ab2SMartin Schwidefsky 
231e133ab2SMartin Schwidefsky static struct ctl_table page_table_sysctl[] = {
241e133ab2SMartin Schwidefsky 	{
251e133ab2SMartin Schwidefsky 		.procname	= "allocate_pgste",
261e133ab2SMartin Schwidefsky 		.data		= &page_table_allocate_pgste,
271e133ab2SMartin Schwidefsky 		.maxlen		= sizeof(int),
281e133ab2SMartin Schwidefsky 		.mode		= S_IRUGO | S_IWUSR,
291e133ab2SMartin Schwidefsky 		.proc_handler	= proc_dointvec,
301e133ab2SMartin Schwidefsky 		.extra1		= &page_table_allocate_pgste_min,
311e133ab2SMartin Schwidefsky 		.extra2		= &page_table_allocate_pgste_max,
321e133ab2SMartin Schwidefsky 	},
331e133ab2SMartin Schwidefsky 	{ }
341e133ab2SMartin Schwidefsky };
351e133ab2SMartin Schwidefsky 
361e133ab2SMartin Schwidefsky static struct ctl_table page_table_sysctl_dir[] = {
371e133ab2SMartin Schwidefsky 	{
381e133ab2SMartin Schwidefsky 		.procname	= "vm",
391e133ab2SMartin Schwidefsky 		.maxlen		= 0,
401e133ab2SMartin Schwidefsky 		.mode		= 0555,
411e133ab2SMartin Schwidefsky 		.child		= page_table_sysctl,
421e133ab2SMartin Schwidefsky 	},
431e133ab2SMartin Schwidefsky 	{ }
441e133ab2SMartin Schwidefsky };
451e133ab2SMartin Schwidefsky 
461e133ab2SMartin Schwidefsky static int __init page_table_register_sysctl(void)
471e133ab2SMartin Schwidefsky {
481e133ab2SMartin Schwidefsky 	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
491e133ab2SMartin Schwidefsky }
501e133ab2SMartin Schwidefsky __initcall(page_table_register_sysctl);
511e133ab2SMartin Schwidefsky 
521e133ab2SMartin Schwidefsky #endif /* CONFIG_PGSTE */
531e133ab2SMartin Schwidefsky 
541e133ab2SMartin Schwidefsky unsigned long *crst_table_alloc(struct mm_struct *mm)
551e133ab2SMartin Schwidefsky {
561e133ab2SMartin Schwidefsky 	struct page *page = alloc_pages(GFP_KERNEL, 2);
571e133ab2SMartin Schwidefsky 
581e133ab2SMartin Schwidefsky 	if (!page)
591e133ab2SMartin Schwidefsky 		return NULL;
601e133ab2SMartin Schwidefsky 	return (unsigned long *) page_to_phys(page);
611e133ab2SMartin Schwidefsky }
621e133ab2SMartin Schwidefsky 
631e133ab2SMartin Schwidefsky void crst_table_free(struct mm_struct *mm, unsigned long *table)
641e133ab2SMartin Schwidefsky {
651e133ab2SMartin Schwidefsky 	free_pages((unsigned long) table, 2);
661e133ab2SMartin Schwidefsky }
671e133ab2SMartin Schwidefsky 
681e133ab2SMartin Schwidefsky static void __crst_table_upgrade(void *arg)
691e133ab2SMartin Schwidefsky {
701e133ab2SMartin Schwidefsky 	struct mm_struct *mm = arg;
711e133ab2SMartin Schwidefsky 
721e133ab2SMartin Schwidefsky 	if (current->active_mm == mm) {
731e133ab2SMartin Schwidefsky 		clear_user_asce();
741e133ab2SMartin Schwidefsky 		set_user_asce(mm);
751e133ab2SMartin Schwidefsky 	}
761e133ab2SMartin Schwidefsky 	__tlb_flush_local();
771e133ab2SMartin Schwidefsky }
781e133ab2SMartin Schwidefsky 
79723cacbdSGerald Schaefer int crst_table_upgrade(struct mm_struct *mm)
801e133ab2SMartin Schwidefsky {
811e133ab2SMartin Schwidefsky 	unsigned long *table, *pgd;
821e133ab2SMartin Schwidefsky 
83723cacbdSGerald Schaefer 	/* upgrade should only happen from 3 to 4 levels */
84723cacbdSGerald Schaefer 	BUG_ON(mm->context.asce_limit != (1UL << 42));
85723cacbdSGerald Schaefer 
861e133ab2SMartin Schwidefsky 	table = crst_table_alloc(mm);
871e133ab2SMartin Schwidefsky 	if (!table)
881e133ab2SMartin Schwidefsky 		return -ENOMEM;
89723cacbdSGerald Schaefer 
901e133ab2SMartin Schwidefsky 	spin_lock_bh(&mm->page_table_lock);
911e133ab2SMartin Schwidefsky 	pgd = (unsigned long *) mm->pgd;
92723cacbdSGerald Schaefer 	crst_table_init(table, _REGION2_ENTRY_EMPTY);
931e133ab2SMartin Schwidefsky 	pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
941e133ab2SMartin Schwidefsky 	mm->pgd = (pgd_t *) table;
95723cacbdSGerald Schaefer 	mm->context.asce_limit = 1UL << 53;
96723cacbdSGerald Schaefer 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
97723cacbdSGerald Schaefer 			   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
981e133ab2SMartin Schwidefsky 	mm->task_size = mm->context.asce_limit;
991e133ab2SMartin Schwidefsky 	spin_unlock_bh(&mm->page_table_lock);
100723cacbdSGerald Schaefer 
1011e133ab2SMartin Schwidefsky 	on_each_cpu(__crst_table_upgrade, mm, 0);
1021e133ab2SMartin Schwidefsky 	return 0;
1031e133ab2SMartin Schwidefsky }
1041e133ab2SMartin Schwidefsky 
105723cacbdSGerald Schaefer void crst_table_downgrade(struct mm_struct *mm)
1061e133ab2SMartin Schwidefsky {
1071e133ab2SMartin Schwidefsky 	pgd_t *pgd;
1081e133ab2SMartin Schwidefsky 
109723cacbdSGerald Schaefer 	/* downgrade should only happen from 3 to 2 levels (compat only) */
110723cacbdSGerald Schaefer 	BUG_ON(mm->context.asce_limit != (1UL << 42));
111723cacbdSGerald Schaefer 
1121e133ab2SMartin Schwidefsky 	if (current->active_mm == mm) {
1131e133ab2SMartin Schwidefsky 		clear_user_asce();
1141e133ab2SMartin Schwidefsky 		__tlb_flush_mm(mm);
1151e133ab2SMartin Schwidefsky 	}
116723cacbdSGerald Schaefer 
1171e133ab2SMartin Schwidefsky 	pgd = mm->pgd;
1181e133ab2SMartin Schwidefsky 	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
119723cacbdSGerald Schaefer 	mm->context.asce_limit = 1UL << 31;
120723cacbdSGerald Schaefer 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
121723cacbdSGerald Schaefer 			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
1221e133ab2SMartin Schwidefsky 	mm->task_size = mm->context.asce_limit;
1231e133ab2SMartin Schwidefsky 	crst_table_free(mm, (unsigned long *) pgd);
124723cacbdSGerald Schaefer 
1251e133ab2SMartin Schwidefsky 	if (current->active_mm == mm)
1261e133ab2SMartin Schwidefsky 		set_user_asce(mm);
1271e133ab2SMartin Schwidefsky }
1281e133ab2SMartin Schwidefsky 
1291e133ab2SMartin Schwidefsky static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
1301e133ab2SMartin Schwidefsky {
1311e133ab2SMartin Schwidefsky 	unsigned int old, new;
1321e133ab2SMartin Schwidefsky 
1331e133ab2SMartin Schwidefsky 	do {
1341e133ab2SMartin Schwidefsky 		old = atomic_read(v);
1351e133ab2SMartin Schwidefsky 		new = old ^ bits;
1361e133ab2SMartin Schwidefsky 	} while (atomic_cmpxchg(v, old, new) != old);
1371e133ab2SMartin Schwidefsky 	return new;
1381e133ab2SMartin Schwidefsky }
1391e133ab2SMartin Schwidefsky 
1401e133ab2SMartin Schwidefsky /*
1411e133ab2SMartin Schwidefsky  * page table entry allocation/free routines.
1421e133ab2SMartin Schwidefsky  */
1431e133ab2SMartin Schwidefsky unsigned long *page_table_alloc(struct mm_struct *mm)
1441e133ab2SMartin Schwidefsky {
1451e133ab2SMartin Schwidefsky 	unsigned long *table;
1461e133ab2SMartin Schwidefsky 	struct page *page;
1471e133ab2SMartin Schwidefsky 	unsigned int mask, bit;
1481e133ab2SMartin Schwidefsky 
1491e133ab2SMartin Schwidefsky 	/* Try to get a fragment of a 4K page as a 2K page table */
1501e133ab2SMartin Schwidefsky 	if (!mm_alloc_pgste(mm)) {
1511e133ab2SMartin Schwidefsky 		table = NULL;
1521e133ab2SMartin Schwidefsky 		spin_lock_bh(&mm->context.list_lock);
1531e133ab2SMartin Schwidefsky 		if (!list_empty(&mm->context.pgtable_list)) {
1541e133ab2SMartin Schwidefsky 			page = list_first_entry(&mm->context.pgtable_list,
1551e133ab2SMartin Schwidefsky 						struct page, lru);
1561e133ab2SMartin Schwidefsky 			mask = atomic_read(&page->_mapcount);
1571e133ab2SMartin Schwidefsky 			mask = (mask | (mask >> 4)) & 3;
1581e133ab2SMartin Schwidefsky 			if (mask != 3) {
1591e133ab2SMartin Schwidefsky 				table = (unsigned long *) page_to_phys(page);
1601e133ab2SMartin Schwidefsky 				bit = mask & 1;		/* =1 -> second 2K */
1611e133ab2SMartin Schwidefsky 				if (bit)
1621e133ab2SMartin Schwidefsky 					table += PTRS_PER_PTE;
1631e133ab2SMartin Schwidefsky 				atomic_xor_bits(&page->_mapcount, 1U << bit);
1641e133ab2SMartin Schwidefsky 				list_del(&page->lru);
1651e133ab2SMartin Schwidefsky 			}
1661e133ab2SMartin Schwidefsky 		}
1671e133ab2SMartin Schwidefsky 		spin_unlock_bh(&mm->context.list_lock);
1681e133ab2SMartin Schwidefsky 		if (table)
1691e133ab2SMartin Schwidefsky 			return table;
1701e133ab2SMartin Schwidefsky 	}
1711e133ab2SMartin Schwidefsky 	/* Allocate a fresh page */
172*10d58bf2SMichal Hocko 	page = alloc_page(GFP_KERNEL);
1731e133ab2SMartin Schwidefsky 	if (!page)
1741e133ab2SMartin Schwidefsky 		return NULL;
1751e133ab2SMartin Schwidefsky 	if (!pgtable_page_ctor(page)) {
1761e133ab2SMartin Schwidefsky 		__free_page(page);
1771e133ab2SMartin Schwidefsky 		return NULL;
1781e133ab2SMartin Schwidefsky 	}
1791e133ab2SMartin Schwidefsky 	/* Initialize page table */
1801e133ab2SMartin Schwidefsky 	table = (unsigned long *) page_to_phys(page);
1811e133ab2SMartin Schwidefsky 	if (mm_alloc_pgste(mm)) {
1821e133ab2SMartin Schwidefsky 		/* Return 4K page table with PGSTEs */
1831e133ab2SMartin Schwidefsky 		atomic_set(&page->_mapcount, 3);
1841e133ab2SMartin Schwidefsky 		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1851e133ab2SMartin Schwidefsky 		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
1861e133ab2SMartin Schwidefsky 	} else {
1871e133ab2SMartin Schwidefsky 		/* Return the first 2K fragment of the page */
1881e133ab2SMartin Schwidefsky 		atomic_set(&page->_mapcount, 1);
1891e133ab2SMartin Schwidefsky 		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
1901e133ab2SMartin Schwidefsky 		spin_lock_bh(&mm->context.list_lock);
1911e133ab2SMartin Schwidefsky 		list_add(&page->lru, &mm->context.pgtable_list);
1921e133ab2SMartin Schwidefsky 		spin_unlock_bh(&mm->context.list_lock);
1931e133ab2SMartin Schwidefsky 	}
1941e133ab2SMartin Schwidefsky 	return table;
1951e133ab2SMartin Schwidefsky }
1961e133ab2SMartin Schwidefsky 
1971e133ab2SMartin Schwidefsky void page_table_free(struct mm_struct *mm, unsigned long *table)
1981e133ab2SMartin Schwidefsky {
1991e133ab2SMartin Schwidefsky 	struct page *page;
2001e133ab2SMartin Schwidefsky 	unsigned int bit, mask;
2011e133ab2SMartin Schwidefsky 
2021e133ab2SMartin Schwidefsky 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
2031e133ab2SMartin Schwidefsky 	if (!mm_alloc_pgste(mm)) {
2041e133ab2SMartin Schwidefsky 		/* Free 2K page table fragment of a 4K page */
2051e133ab2SMartin Schwidefsky 		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
2061e133ab2SMartin Schwidefsky 		spin_lock_bh(&mm->context.list_lock);
2071e133ab2SMartin Schwidefsky 		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
2081e133ab2SMartin Schwidefsky 		if (mask & 3)
2091e133ab2SMartin Schwidefsky 			list_add(&page->lru, &mm->context.pgtable_list);
2101e133ab2SMartin Schwidefsky 		else
2111e133ab2SMartin Schwidefsky 			list_del(&page->lru);
2121e133ab2SMartin Schwidefsky 		spin_unlock_bh(&mm->context.list_lock);
2131e133ab2SMartin Schwidefsky 		if (mask != 0)
2141e133ab2SMartin Schwidefsky 			return;
2151e133ab2SMartin Schwidefsky 	}
2161e133ab2SMartin Schwidefsky 
2171e133ab2SMartin Schwidefsky 	pgtable_page_dtor(page);
2181e133ab2SMartin Schwidefsky 	atomic_set(&page->_mapcount, -1);
2191e133ab2SMartin Schwidefsky 	__free_page(page);
2201e133ab2SMartin Schwidefsky }
2211e133ab2SMartin Schwidefsky 
2221e133ab2SMartin Schwidefsky void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
2231e133ab2SMartin Schwidefsky 			 unsigned long vmaddr)
2241e133ab2SMartin Schwidefsky {
2251e133ab2SMartin Schwidefsky 	struct mm_struct *mm;
2261e133ab2SMartin Schwidefsky 	struct page *page;
2271e133ab2SMartin Schwidefsky 	unsigned int bit, mask;
2281e133ab2SMartin Schwidefsky 
2291e133ab2SMartin Schwidefsky 	mm = tlb->mm;
2301e133ab2SMartin Schwidefsky 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
2311e133ab2SMartin Schwidefsky 	if (mm_alloc_pgste(mm)) {
2321e133ab2SMartin Schwidefsky 		gmap_unlink(mm, table, vmaddr);
2331e133ab2SMartin Schwidefsky 		table = (unsigned long *) (__pa(table) | 3);
2341e133ab2SMartin Schwidefsky 		tlb_remove_table(tlb, table);
2351e133ab2SMartin Schwidefsky 		return;
2361e133ab2SMartin Schwidefsky 	}
2371e133ab2SMartin Schwidefsky 	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
2381e133ab2SMartin Schwidefsky 	spin_lock_bh(&mm->context.list_lock);
2391e133ab2SMartin Schwidefsky 	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
2401e133ab2SMartin Schwidefsky 	if (mask & 3)
2411e133ab2SMartin Schwidefsky 		list_add_tail(&page->lru, &mm->context.pgtable_list);
2421e133ab2SMartin Schwidefsky 	else
2431e133ab2SMartin Schwidefsky 		list_del(&page->lru);
2441e133ab2SMartin Schwidefsky 	spin_unlock_bh(&mm->context.list_lock);
2451e133ab2SMartin Schwidefsky 	table = (unsigned long *) (__pa(table) | (1U << bit));
2461e133ab2SMartin Schwidefsky 	tlb_remove_table(tlb, table);
2471e133ab2SMartin Schwidefsky }
2481e133ab2SMartin Schwidefsky 
2491e133ab2SMartin Schwidefsky static void __tlb_remove_table(void *_table)
2501e133ab2SMartin Schwidefsky {
2511e133ab2SMartin Schwidefsky 	unsigned int mask = (unsigned long) _table & 3;
2521e133ab2SMartin Schwidefsky 	void *table = (void *)((unsigned long) _table ^ mask);
2531e133ab2SMartin Schwidefsky 	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
2541e133ab2SMartin Schwidefsky 
2551e133ab2SMartin Schwidefsky 	switch (mask) {
2561e133ab2SMartin Schwidefsky 	case 0:		/* pmd or pud */
2571e133ab2SMartin Schwidefsky 		free_pages((unsigned long) table, 2);
2581e133ab2SMartin Schwidefsky 		break;
2591e133ab2SMartin Schwidefsky 	case 1:		/* lower 2K of a 4K page table */
2601e133ab2SMartin Schwidefsky 	case 2:		/* higher 2K of a 4K page table */
2611e133ab2SMartin Schwidefsky 		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
2621e133ab2SMartin Schwidefsky 			break;
2631e133ab2SMartin Schwidefsky 		/* fallthrough */
2641e133ab2SMartin Schwidefsky 	case 3:		/* 4K page table with pgstes */
2651e133ab2SMartin Schwidefsky 		pgtable_page_dtor(page);
2661e133ab2SMartin Schwidefsky 		atomic_set(&page->_mapcount, -1);
2671e133ab2SMartin Schwidefsky 		__free_page(page);
2681e133ab2SMartin Schwidefsky 		break;
2691e133ab2SMartin Schwidefsky 	}
2701e133ab2SMartin Schwidefsky }
2711e133ab2SMartin Schwidefsky 
2721e133ab2SMartin Schwidefsky static void tlb_remove_table_smp_sync(void *arg)
2731e133ab2SMartin Schwidefsky {
2741e133ab2SMartin Schwidefsky 	/* Simply deliver the interrupt */
2751e133ab2SMartin Schwidefsky }
2761e133ab2SMartin Schwidefsky 
2771e133ab2SMartin Schwidefsky static void tlb_remove_table_one(void *table)
2781e133ab2SMartin Schwidefsky {
2791e133ab2SMartin Schwidefsky 	/*
2801e133ab2SMartin Schwidefsky 	 * This isn't an RCU grace period and hence the page-tables cannot be
2811e133ab2SMartin Schwidefsky 	 * assumed to be actually RCU-freed.
2821e133ab2SMartin Schwidefsky 	 *
2831e133ab2SMartin Schwidefsky 	 * It is however sufficient for software page-table walkers that rely
2841e133ab2SMartin Schwidefsky 	 * on IRQ disabling. See the comment near struct mmu_table_batch.
2851e133ab2SMartin Schwidefsky 	 */
2861e133ab2SMartin Schwidefsky 	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
2871e133ab2SMartin Schwidefsky 	__tlb_remove_table(table);
2881e133ab2SMartin Schwidefsky }
2891e133ab2SMartin Schwidefsky 
2901e133ab2SMartin Schwidefsky static void tlb_remove_table_rcu(struct rcu_head *head)
2911e133ab2SMartin Schwidefsky {
2921e133ab2SMartin Schwidefsky 	struct mmu_table_batch *batch;
2931e133ab2SMartin Schwidefsky 	int i;
2941e133ab2SMartin Schwidefsky 
2951e133ab2SMartin Schwidefsky 	batch = container_of(head, struct mmu_table_batch, rcu);
2961e133ab2SMartin Schwidefsky 
2971e133ab2SMartin Schwidefsky 	for (i = 0; i < batch->nr; i++)
2981e133ab2SMartin Schwidefsky 		__tlb_remove_table(batch->tables[i]);
2991e133ab2SMartin Schwidefsky 
3001e133ab2SMartin Schwidefsky 	free_page((unsigned long)batch);
3011e133ab2SMartin Schwidefsky }
3021e133ab2SMartin Schwidefsky 
3031e133ab2SMartin Schwidefsky void tlb_table_flush(struct mmu_gather *tlb)
3041e133ab2SMartin Schwidefsky {
3051e133ab2SMartin Schwidefsky 	struct mmu_table_batch **batch = &tlb->batch;
3061e133ab2SMartin Schwidefsky 
3071e133ab2SMartin Schwidefsky 	if (*batch) {
3081e133ab2SMartin Schwidefsky 		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
3091e133ab2SMartin Schwidefsky 		*batch = NULL;
3101e133ab2SMartin Schwidefsky 	}
3111e133ab2SMartin Schwidefsky }
3121e133ab2SMartin Schwidefsky 
3131e133ab2SMartin Schwidefsky void tlb_remove_table(struct mmu_gather *tlb, void *table)
3141e133ab2SMartin Schwidefsky {
3151e133ab2SMartin Schwidefsky 	struct mmu_table_batch **batch = &tlb->batch;
3161e133ab2SMartin Schwidefsky 
3171e133ab2SMartin Schwidefsky 	tlb->mm->context.flush_mm = 1;
3181e133ab2SMartin Schwidefsky 	if (*batch == NULL) {
3191e133ab2SMartin Schwidefsky 		*batch = (struct mmu_table_batch *)
3201e133ab2SMartin Schwidefsky 			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
3211e133ab2SMartin Schwidefsky 		if (*batch == NULL) {
3221e133ab2SMartin Schwidefsky 			__tlb_flush_mm_lazy(tlb->mm);
3231e133ab2SMartin Schwidefsky 			tlb_remove_table_one(table);
3241e133ab2SMartin Schwidefsky 			return;
3251e133ab2SMartin Schwidefsky 		}
3261e133ab2SMartin Schwidefsky 		(*batch)->nr = 0;
3271e133ab2SMartin Schwidefsky 	}
3281e133ab2SMartin Schwidefsky 	(*batch)->tables[(*batch)->nr++] = table;
3291e133ab2SMartin Schwidefsky 	if ((*batch)->nr == MAX_TABLE_BATCH)
3301e133ab2SMartin Schwidefsky 		tlb_flush_mmu(tlb);
3311e133ab2SMartin Schwidefsky }
332