1 /* 2 * This file contains common routines for dealing with free of page tables 3 * 4 * Derived from arch/powerpc/mm/tlb_64.c: 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * 7 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 8 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 9 * Copyright (C) 1996 Paul Mackerras 10 * 11 * Derived from "arch/i386/mm/init.c" 12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 * 14 * Dave Engebretsen <engebret@us.ibm.com> 15 * Rework for PPC64 port. 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License 19 * as published by the Free Software Foundation; either version 20 * 2 of the License, or (at your option) any later version. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/mm.h> 25 #include <linux/init.h> 26 #include <linux/percpu.h> 27 #include <linux/hardirq.h> 28 #include <asm/pgalloc.h> 29 #include <asm/tlbflush.h> 30 #include <asm/tlb.h> 31 32 static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 33 static unsigned long pte_freelist_forced_free; 34 35 struct pte_freelist_batch 36 { 37 struct rcu_head rcu; 38 unsigned int index; 39 pgtable_free_t tables[0]; 40 }; 41 42 #define PTE_FREELIST_SIZE \ 43 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ 44 / sizeof(pgtable_free_t)) 45 46 static void pte_free_smp_sync(void *arg) 47 { 48 /* Do nothing, just ensure we sync with all CPUs */ 49 } 50 51 /* This is only called when we are critically out of memory 52 * (and fail to get a page in pte_free_tlb). 53 */ 54 static void pgtable_free_now(pgtable_free_t pgf) 55 { 56 pte_freelist_forced_free++; 57 58 smp_call_function(pte_free_smp_sync, NULL, 1); 59 60 pgtable_free(pgf); 61 } 62 63 static void pte_free_rcu_callback(struct rcu_head *head) 64 { 65 struct pte_freelist_batch *batch = 66 container_of(head, struct pte_freelist_batch, rcu); 67 unsigned int i; 68 69 for (i = 0; i < batch->index; i++) 70 pgtable_free(batch->tables[i]); 71 72 free_page((unsigned long)batch); 73 } 74 75 static void pte_free_submit(struct pte_freelist_batch *batch) 76 { 77 INIT_RCU_HEAD(&batch->rcu); 78 call_rcu(&batch->rcu, pte_free_rcu_callback); 79 } 80 81 void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) 82 { 83 /* This is safe since tlb_gather_mmu has disabled preemption */ 84 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); 85 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); 86 87 if (atomic_read(&tlb->mm->mm_users) < 2 || 88 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { 89 pgtable_free(pgf); 90 return; 91 } 92 93 if (*batchp == NULL) { 94 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); 95 if (*batchp == NULL) { 96 pgtable_free_now(pgf); 97 return; 98 } 99 (*batchp)->index = 0; 100 } 101 (*batchp)->tables[(*batchp)->index++] = pgf; 102 if ((*batchp)->index == PTE_FREELIST_SIZE) { 103 pte_free_submit(*batchp); 104 *batchp = NULL; 105 } 106 } 107 108 void pte_free_finish(void) 109 { 110 /* This is safe since tlb_gather_mmu has disabled preemption */ 111 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); 112 113 if (*batchp == NULL) 114 return; 115 pte_free_submit(*batchp); 116 *batchp = NULL; 117 } 118