xref: /linux/mm/percpu-km.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * mm/percpu-km.c - kernel memory based chunk allocation
3  *
4  * Copyright (C) 2010		SUSE Linux Products GmbH
5  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
6  *
7  * This file is released under the GPLv2.
8  *
9  * Chunks are allocated as a contiguous kernel memory using gfp
10  * allocation.  This is to be used on nommu architectures.
11  *
12  * To use percpu-km,
13  *
14  * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig.
15  *
16  * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined.  It's
17  *   not compatible with PER_CPU_KM.  EMBED_FIRST_CHUNK should work
18  *   fine.
19  *
20  * - NUMA is not supported.  When setting up the first chunk,
21  *   @cpu_distance_fn should be NULL or report all CPUs to be nearer
22  *   than or at LOCAL_DISTANCE.
23  *
24  * - It's best if the chunk size is power of two multiple of
25  *   PAGE_SIZE.  Because each chunk is allocated as a contiguous
26  *   kernel memory block using alloc_pages(), memory will be wasted if
27  *   chunk size is not aligned.  percpu-km code will whine about it.
28  */
29 
30 #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
31 #error "contiguous percpu allocation is incompatible with paged first chunk"
32 #endif
33 
34 #include <linux/log2.h>
35 
36 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
37 {
38 	unsigned int cpu;
39 
40 	for_each_possible_cpu(cpu)
41 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
42 
43 	return 0;
44 }
45 
46 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
47 {
48 	/* nada */
49 }
50 
51 static struct pcpu_chunk *pcpu_create_chunk(void)
52 {
53 	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
54 	struct pcpu_chunk *chunk;
55 	struct page *pages;
56 	int i;
57 
58 	chunk = pcpu_alloc_chunk();
59 	if (!chunk)
60 		return NULL;
61 
62 	pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages));
63 	if (!pages) {
64 		pcpu_free_chunk(chunk);
65 		return NULL;
66 	}
67 
68 	for (i = 0; i < nr_pages; i++)
69 		pcpu_set_page_chunk(nth_page(pages, i), chunk);
70 
71 	chunk->data = pages;
72 	chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
73 	return chunk;
74 }
75 
76 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
77 {
78 	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
79 
80 	if (chunk && chunk->data)
81 		__free_pages(chunk->data, order_base_2(nr_pages));
82 	pcpu_free_chunk(chunk);
83 }
84 
85 static struct page *pcpu_addr_to_page(void *addr)
86 {
87 	return virt_to_page(addr);
88 }
89 
90 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
91 {
92 	size_t nr_pages, alloc_pages;
93 
94 	/* all units must be in a single group */
95 	if (ai->nr_groups != 1) {
96 		printk(KERN_CRIT "percpu: can't handle more than one groups\n");
97 		return -EINVAL;
98 	}
99 
100 	nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT;
101 	alloc_pages = roundup_pow_of_two(nr_pages);
102 
103 	if (alloc_pages > nr_pages)
104 		printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n",
105 		       alloc_pages - nr_pages);
106 
107 	return 0;
108 }
109