xref: /linux/kernel/smp.c (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 /*
2  * Generic helpers for smp ipi calls
3  *
4  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5  *
6  */
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rculist.h>
12 #include <linux/smp.h>
13 
14 static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
15 static LIST_HEAD(call_function_queue);
16 __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
17 
18 enum {
19 	CSD_FLAG_WAIT		= 0x01,
20 	CSD_FLAG_ALLOC		= 0x02,
21 };
22 
23 struct call_function_data {
24 	struct call_single_data csd;
25 	spinlock_t lock;
26 	unsigned int refs;
27 	cpumask_t cpumask;
28 	struct rcu_head rcu_head;
29 };
30 
31 struct call_single_queue {
32 	struct list_head list;
33 	spinlock_t lock;
34 };
35 
36 static int __cpuinit init_call_single_data(void)
37 {
38 	int i;
39 
40 	for_each_possible_cpu(i) {
41 		struct call_single_queue *q = &per_cpu(call_single_queue, i);
42 
43 		spin_lock_init(&q->lock);
44 		INIT_LIST_HEAD(&q->list);
45 	}
46 	return 0;
47 }
48 early_initcall(init_call_single_data);
49 
50 static void csd_flag_wait(struct call_single_data *data)
51 {
52 	/* Wait for response */
53 	do {
54 		/*
55 		 * We need to see the flags store in the IPI handler
56 		 */
57 		smp_mb();
58 		if (!(data->flags & CSD_FLAG_WAIT))
59 			break;
60 		cpu_relax();
61 	} while (1);
62 }
63 
64 /*
65  * Insert a previously allocated call_single_data element for execution
66  * on the given CPU. data must already have ->func, ->info, and ->flags set.
67  */
68 static void generic_exec_single(int cpu, struct call_single_data *data)
69 {
70 	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
71 	int wait = data->flags & CSD_FLAG_WAIT, ipi;
72 	unsigned long flags;
73 
74 	spin_lock_irqsave(&dst->lock, flags);
75 	ipi = list_empty(&dst->list);
76 	list_add_tail(&data->list, &dst->list);
77 	spin_unlock_irqrestore(&dst->lock, flags);
78 
79 	if (ipi)
80 		arch_send_call_function_single_ipi(cpu);
81 
82 	if (wait)
83 		csd_flag_wait(data);
84 }
85 
86 static void rcu_free_call_data(struct rcu_head *head)
87 {
88 	struct call_function_data *data;
89 
90 	data = container_of(head, struct call_function_data, rcu_head);
91 
92 	kfree(data);
93 }
94 
95 /*
96  * Invoked by arch to handle an IPI for call function. Must be called with
97  * interrupts disabled.
98  */
99 void generic_smp_call_function_interrupt(void)
100 {
101 	struct call_function_data *data;
102 	int cpu = get_cpu();
103 
104 	/*
105 	 * It's ok to use list_for_each_rcu() here even though we may delete
106 	 * 'pos', since list_del_rcu() doesn't clear ->next
107 	 */
108 	rcu_read_lock();
109 	list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
110 		int refs;
111 
112 		if (!cpu_isset(cpu, data->cpumask))
113 			continue;
114 
115 		data->csd.func(data->csd.info);
116 
117 		spin_lock(&data->lock);
118 		cpu_clear(cpu, data->cpumask);
119 		WARN_ON(data->refs == 0);
120 		data->refs--;
121 		refs = data->refs;
122 		spin_unlock(&data->lock);
123 
124 		if (refs)
125 			continue;
126 
127 		spin_lock(&call_function_lock);
128 		list_del_rcu(&data->csd.list);
129 		spin_unlock(&call_function_lock);
130 
131 		if (data->csd.flags & CSD_FLAG_WAIT) {
132 			/*
133 			 * serialize stores to data with the flag clear
134 			 * and wakeup
135 			 */
136 			smp_wmb();
137 			data->csd.flags &= ~CSD_FLAG_WAIT;
138 		} else
139 			call_rcu(&data->rcu_head, rcu_free_call_data);
140 	}
141 	rcu_read_unlock();
142 
143 	put_cpu();
144 }
145 
146 /*
147  * Invoked by arch to handle an IPI for call function single. Must be called
148  * from the arch with interrupts disabled.
149  */
150 void generic_smp_call_function_single_interrupt(void)
151 {
152 	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
153 	LIST_HEAD(list);
154 
155 	/*
156 	 * Need to see other stores to list head for checking whether
157 	 * list is empty without holding q->lock
158 	 */
159 	smp_mb();
160 	while (!list_empty(&q->list)) {
161 		unsigned int data_flags;
162 
163 		spin_lock(&q->lock);
164 		list_replace_init(&q->list, &list);
165 		spin_unlock(&q->lock);
166 
167 		while (!list_empty(&list)) {
168 			struct call_single_data *data;
169 
170 			data = list_entry(list.next, struct call_single_data,
171 						list);
172 			list_del(&data->list);
173 
174 			/*
175 			 * 'data' can be invalid after this call if
176 			 * flags == 0 (when called through
177 			 * generic_exec_single(), so save them away before
178 			 * making the call.
179 			 */
180 			data_flags = data->flags;
181 
182 			data->func(data->info);
183 
184 			if (data_flags & CSD_FLAG_WAIT) {
185 				smp_wmb();
186 				data->flags &= ~CSD_FLAG_WAIT;
187 			} else if (data_flags & CSD_FLAG_ALLOC)
188 				kfree(data);
189 		}
190 		/*
191 		 * See comment on outer loop
192 		 */
193 		smp_mb();
194 	}
195 }
196 
197 /*
198  * smp_call_function_single - Run a function on a specific CPU
199  * @func: The function to run. This must be fast and non-blocking.
200  * @info: An arbitrary pointer to pass to the function.
201  * @wait: If true, wait until function has completed on other CPUs.
202  *
203  * Returns 0 on success, else a negative status code. Note that @wait
204  * will be implicitly turned on in case of allocation failures, since
205  * we fall back to on-stack allocation.
206  */
207 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
208 			     int wait)
209 {
210 	struct call_single_data d;
211 	unsigned long flags;
212 	/* prevent preemption and reschedule on another processor */
213 	int me = get_cpu();
214 
215 	/* Can deadlock when called with interrupts disabled */
216 	WARN_ON(irqs_disabled());
217 
218 	if (cpu == me) {
219 		local_irq_save(flags);
220 		func(info);
221 		local_irq_restore(flags);
222 	} else {
223 		struct call_single_data *data = NULL;
224 
225 		if (!wait) {
226 			data = kmalloc(sizeof(*data), GFP_ATOMIC);
227 			if (data)
228 				data->flags = CSD_FLAG_ALLOC;
229 		}
230 		if (!data) {
231 			data = &d;
232 			data->flags = CSD_FLAG_WAIT;
233 		}
234 
235 		data->func = func;
236 		data->info = info;
237 		generic_exec_single(cpu, data);
238 	}
239 
240 	put_cpu();
241 	return 0;
242 }
243 EXPORT_SYMBOL(smp_call_function_single);
244 
245 /**
246  * __smp_call_function_single(): Run a function on another CPU
247  * @cpu: The CPU to run on.
248  * @data: Pre-allocated and setup data structure
249  *
250  * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
251  * data structure. Useful for embedding @data inside other structures, for
252  * instance.
253  *
254  */
255 void __smp_call_function_single(int cpu, struct call_single_data *data)
256 {
257 	/* Can deadlock when called with interrupts disabled */
258 	WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
259 
260 	generic_exec_single(cpu, data);
261 }
262 
263 /**
264  * smp_call_function_mask(): Run a function on a set of other CPUs.
265  * @mask: The set of cpus to run on.
266  * @func: The function to run. This must be fast and non-blocking.
267  * @info: An arbitrary pointer to pass to the function.
268  * @wait: If true, wait (atomically) until function has completed on other CPUs.
269  *
270  * Returns 0 on success, else a negative status code.
271  *
272  * If @wait is true, then returns once @func has returned. Note that @wait
273  * will be implicitly turned on in case of allocation failures, since
274  * we fall back to on-stack allocation.
275  *
276  * You must not call this function with disabled interrupts or from a
277  * hardware interrupt handler or from a bottom half handler. Preemption
278  * must be disabled when calling this function.
279  */
280 int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
281 			   int wait)
282 {
283 	struct call_function_data d;
284 	struct call_function_data *data = NULL;
285 	cpumask_t allbutself;
286 	unsigned long flags;
287 	int cpu, num_cpus;
288 
289 	/* Can deadlock when called with interrupts disabled */
290 	WARN_ON(irqs_disabled());
291 
292 	cpu = smp_processor_id();
293 	allbutself = cpu_online_map;
294 	cpu_clear(cpu, allbutself);
295 	cpus_and(mask, mask, allbutself);
296 	num_cpus = cpus_weight(mask);
297 
298 	/*
299 	 * If zero CPUs, return. If just a single CPU, turn this request
300 	 * into a targetted single call instead since it's faster.
301 	 */
302 	if (!num_cpus)
303 		return 0;
304 	else if (num_cpus == 1) {
305 		cpu = first_cpu(mask);
306 		return smp_call_function_single(cpu, func, info, wait);
307 	}
308 
309 	if (!wait) {
310 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
311 		if (data)
312 			data->csd.flags = CSD_FLAG_ALLOC;
313 	}
314 	if (!data) {
315 		data = &d;
316 		data->csd.flags = CSD_FLAG_WAIT;
317 		wait = 1;
318 	}
319 
320 	spin_lock_init(&data->lock);
321 	data->csd.func = func;
322 	data->csd.info = info;
323 	data->refs = num_cpus;
324 	data->cpumask = mask;
325 
326 	spin_lock_irqsave(&call_function_lock, flags);
327 	list_add_tail_rcu(&data->csd.list, &call_function_queue);
328 	spin_unlock_irqrestore(&call_function_lock, flags);
329 
330 	/* Send a message to all CPUs in the map */
331 	arch_send_call_function_ipi(mask);
332 
333 	/* optionally wait for the CPUs to complete */
334 	if (wait)
335 		csd_flag_wait(&data->csd);
336 
337 	return 0;
338 }
339 EXPORT_SYMBOL(smp_call_function_mask);
340 
341 /**
342  * smp_call_function(): Run a function on all other CPUs.
343  * @func: The function to run. This must be fast and non-blocking.
344  * @info: An arbitrary pointer to pass to the function.
345  * @wait: If true, wait (atomically) until function has completed on other CPUs.
346  *
347  * Returns 0 on success, else a negative status code.
348  *
349  * If @wait is true, then returns once @func has returned; otherwise
350  * it returns just before the target cpu calls @func. In case of allocation
351  * failure, @wait will be implicitly turned on.
352  *
353  * You must not call this function with disabled interrupts or from a
354  * hardware interrupt handler or from a bottom half handler.
355  */
356 int smp_call_function(void (*func)(void *), void *info, int wait)
357 {
358 	int ret;
359 
360 	preempt_disable();
361 	ret = smp_call_function_mask(cpu_online_map, func, info, wait);
362 	preempt_enable();
363 	return ret;
364 }
365 EXPORT_SYMBOL(smp_call_function);
366 
367 void ipi_call_lock(void)
368 {
369 	spin_lock(&call_function_lock);
370 }
371 
372 void ipi_call_unlock(void)
373 {
374 	spin_unlock(&call_function_lock);
375 }
376 
377 void ipi_call_lock_irq(void)
378 {
379 	spin_lock_irq(&call_function_lock);
380 }
381 
382 void ipi_call_unlock_irq(void)
383 {
384 	spin_unlock_irq(&call_function_lock);
385 }
386