xref: /linux/kernel/smp.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * Generic helpers for smp ipi calls
3  *
4  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5  *
6  */
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rculist.h>
12 #include <linux/smp.h>
13 
14 static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
15 static LIST_HEAD(call_function_queue);
16 __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
17 
18 enum {
19 	CSD_FLAG_WAIT		= 0x01,
20 	CSD_FLAG_ALLOC		= 0x02,
21 };
22 
23 struct call_function_data {
24 	struct call_single_data csd;
25 	spinlock_t lock;
26 	unsigned int refs;
27 	cpumask_t cpumask;
28 	struct rcu_head rcu_head;
29 };
30 
31 struct call_single_queue {
32 	struct list_head list;
33 	spinlock_t lock;
34 };
35 
36 static int __cpuinit init_call_single_data(void)
37 {
38 	int i;
39 
40 	for_each_possible_cpu(i) {
41 		struct call_single_queue *q = &per_cpu(call_single_queue, i);
42 
43 		spin_lock_init(&q->lock);
44 		INIT_LIST_HEAD(&q->list);
45 	}
46 	return 0;
47 }
48 early_initcall(init_call_single_data);
49 
50 static void csd_flag_wait(struct call_single_data *data)
51 {
52 	/* Wait for response */
53 	do {
54 		/*
55 		 * We need to see the flags store in the IPI handler
56 		 */
57 		smp_mb();
58 		if (!(data->flags & CSD_FLAG_WAIT))
59 			break;
60 		cpu_relax();
61 	} while (1);
62 }
63 
64 /*
65  * Insert a previously allocated call_single_data element for execution
66  * on the given CPU. data must already have ->func, ->info, and ->flags set.
67  */
68 static void generic_exec_single(int cpu, struct call_single_data *data)
69 {
70 	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
71 	int wait = data->flags & CSD_FLAG_WAIT, ipi;
72 	unsigned long flags;
73 
74 	spin_lock_irqsave(&dst->lock, flags);
75 	ipi = list_empty(&dst->list);
76 	list_add_tail(&data->list, &dst->list);
77 	spin_unlock_irqrestore(&dst->lock, flags);
78 
79 	if (ipi)
80 		arch_send_call_function_single_ipi(cpu);
81 
82 	if (wait)
83 		csd_flag_wait(data);
84 }
85 
86 static void rcu_free_call_data(struct rcu_head *head)
87 {
88 	struct call_function_data *data;
89 
90 	data = container_of(head, struct call_function_data, rcu_head);
91 
92 	kfree(data);
93 }
94 
95 /*
96  * Invoked by arch to handle an IPI for call function. Must be called with
97  * interrupts disabled.
98  */
99 void generic_smp_call_function_interrupt(void)
100 {
101 	struct call_function_data *data;
102 	int cpu = get_cpu();
103 
104 	/*
105 	 * It's ok to use list_for_each_rcu() here even though we may delete
106 	 * 'pos', since list_del_rcu() doesn't clear ->next
107 	 */
108 	rcu_read_lock();
109 	list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
110 		int refs;
111 
112 		if (!cpu_isset(cpu, data->cpumask))
113 			continue;
114 
115 		data->csd.func(data->csd.info);
116 
117 		spin_lock(&data->lock);
118 		cpu_clear(cpu, data->cpumask);
119 		WARN_ON(data->refs == 0);
120 		data->refs--;
121 		refs = data->refs;
122 		spin_unlock(&data->lock);
123 
124 		if (refs)
125 			continue;
126 
127 		spin_lock(&call_function_lock);
128 		list_del_rcu(&data->csd.list);
129 		spin_unlock(&call_function_lock);
130 
131 		if (data->csd.flags & CSD_FLAG_WAIT) {
132 			/*
133 			 * serialize stores to data with the flag clear
134 			 * and wakeup
135 			 */
136 			smp_wmb();
137 			data->csd.flags &= ~CSD_FLAG_WAIT;
138 		}
139 		if (data->csd.flags & CSD_FLAG_ALLOC)
140 			call_rcu(&data->rcu_head, rcu_free_call_data);
141 	}
142 	rcu_read_unlock();
143 
144 	put_cpu();
145 }
146 
147 /*
148  * Invoked by arch to handle an IPI for call function single. Must be called
149  * from the arch with interrupts disabled.
150  */
151 void generic_smp_call_function_single_interrupt(void)
152 {
153 	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
154 	LIST_HEAD(list);
155 
156 	/*
157 	 * Need to see other stores to list head for checking whether
158 	 * list is empty without holding q->lock
159 	 */
160 	smp_mb();
161 	while (!list_empty(&q->list)) {
162 		unsigned int data_flags;
163 
164 		spin_lock(&q->lock);
165 		list_replace_init(&q->list, &list);
166 		spin_unlock(&q->lock);
167 
168 		while (!list_empty(&list)) {
169 			struct call_single_data *data;
170 
171 			data = list_entry(list.next, struct call_single_data,
172 						list);
173 			list_del(&data->list);
174 
175 			/*
176 			 * 'data' can be invalid after this call if
177 			 * flags == 0 (when called through
178 			 * generic_exec_single(), so save them away before
179 			 * making the call.
180 			 */
181 			data_flags = data->flags;
182 
183 			data->func(data->info);
184 
185 			if (data_flags & CSD_FLAG_WAIT) {
186 				smp_wmb();
187 				data->flags &= ~CSD_FLAG_WAIT;
188 			} else if (data_flags & CSD_FLAG_ALLOC)
189 				kfree(data);
190 		}
191 		/*
192 		 * See comment on outer loop
193 		 */
194 		smp_mb();
195 	}
196 }
197 
198 /*
199  * smp_call_function_single - Run a function on a specific CPU
200  * @func: The function to run. This must be fast and non-blocking.
201  * @info: An arbitrary pointer to pass to the function.
202  * @wait: If true, wait until function has completed on other CPUs.
203  *
204  * Returns 0 on success, else a negative status code. Note that @wait
205  * will be implicitly turned on in case of allocation failures, since
206  * we fall back to on-stack allocation.
207  */
208 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
209 			     int wait)
210 {
211 	struct call_single_data d;
212 	unsigned long flags;
213 	/* prevent preemption and reschedule on another processor,
214 	   as well as CPU removal */
215 	int me = get_cpu();
216 	int err = 0;
217 
218 	/* Can deadlock when called with interrupts disabled */
219 	WARN_ON(irqs_disabled());
220 
221 	if (cpu == me) {
222 		local_irq_save(flags);
223 		func(info);
224 		local_irq_restore(flags);
225 	} else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) {
226 		struct call_single_data *data = NULL;
227 
228 		if (!wait) {
229 			data = kmalloc(sizeof(*data), GFP_ATOMIC);
230 			if (data)
231 				data->flags = CSD_FLAG_ALLOC;
232 		}
233 		if (!data) {
234 			data = &d;
235 			data->flags = CSD_FLAG_WAIT;
236 		}
237 
238 		data->func = func;
239 		data->info = info;
240 		generic_exec_single(cpu, data);
241 	} else {
242 		err = -ENXIO;	/* CPU not online */
243 	}
244 
245 	put_cpu();
246 	return err;
247 }
248 EXPORT_SYMBOL(smp_call_function_single);
249 
250 /**
251  * __smp_call_function_single(): Run a function on another CPU
252  * @cpu: The CPU to run on.
253  * @data: Pre-allocated and setup data structure
254  *
255  * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
256  * data structure. Useful for embedding @data inside other structures, for
257  * instance.
258  *
259  */
260 void __smp_call_function_single(int cpu, struct call_single_data *data)
261 {
262 	/* Can deadlock when called with interrupts disabled */
263 	WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
264 
265 	generic_exec_single(cpu, data);
266 }
267 
268 /* Dummy function */
269 static void quiesce_dummy(void *unused)
270 {
271 }
272 
273 /*
274  * Ensure stack based data used in call function mask is safe to free.
275  *
276  * This is needed by smp_call_function_mask when using on-stack data, because
277  * a single call function queue is shared by all CPUs, and any CPU may pick up
278  * the data item on the queue at any time before it is deleted. So we need to
279  * ensure that all CPUs have transitioned through a quiescent state after
280  * this call.
281  *
282  * This is a very slow function, implemented by sending synchronous IPIs to
283  * all possible CPUs. For this reason, we have to alloc data rather than use
284  * stack based data even in the case of synchronous calls. The stack based
285  * data is then just used for deadlock/oom fallback which will be very rare.
286  *
287  * If a faster scheme can be made, we could go back to preferring stack based
288  * data -- the data allocation/free is non-zero cost.
289  */
290 static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
291 {
292 	struct call_single_data data;
293 	int cpu;
294 
295 	data.func = quiesce_dummy;
296 	data.info = NULL;
297 
298 	for_each_cpu_mask(cpu, mask) {
299 		data.flags = CSD_FLAG_WAIT;
300 		generic_exec_single(cpu, &data);
301 	}
302 }
303 
304 /**
305  * smp_call_function_mask(): Run a function on a set of other CPUs.
306  * @mask: The set of cpus to run on.
307  * @func: The function to run. This must be fast and non-blocking.
308  * @info: An arbitrary pointer to pass to the function.
309  * @wait: If true, wait (atomically) until function has completed on other CPUs.
310  *
311  * Returns 0 on success, else a negative status code.
312  *
313  * If @wait is true, then returns once @func has returned. Note that @wait
314  * will be implicitly turned on in case of allocation failures, since
315  * we fall back to on-stack allocation.
316  *
317  * You must not call this function with disabled interrupts or from a
318  * hardware interrupt handler or from a bottom half handler. Preemption
319  * must be disabled when calling this function.
320  */
321 int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
322 			   int wait)
323 {
324 	struct call_function_data d;
325 	struct call_function_data *data = NULL;
326 	cpumask_t allbutself;
327 	unsigned long flags;
328 	int cpu, num_cpus;
329 	int slowpath = 0;
330 
331 	/* Can deadlock when called with interrupts disabled */
332 	WARN_ON(irqs_disabled());
333 
334 	cpu = smp_processor_id();
335 	allbutself = cpu_online_map;
336 	cpu_clear(cpu, allbutself);
337 	cpus_and(mask, mask, allbutself);
338 	num_cpus = cpus_weight(mask);
339 
340 	/*
341 	 * If zero CPUs, return. If just a single CPU, turn this request
342 	 * into a targetted single call instead since it's faster.
343 	 */
344 	if (!num_cpus)
345 		return 0;
346 	else if (num_cpus == 1) {
347 		cpu = first_cpu(mask);
348 		return smp_call_function_single(cpu, func, info, wait);
349 	}
350 
351 	data = kmalloc(sizeof(*data), GFP_ATOMIC);
352 	if (data) {
353 		data->csd.flags = CSD_FLAG_ALLOC;
354 		if (wait)
355 			data->csd.flags |= CSD_FLAG_WAIT;
356 	} else {
357 		data = &d;
358 		data->csd.flags = CSD_FLAG_WAIT;
359 		wait = 1;
360 		slowpath = 1;
361 	}
362 
363 	spin_lock_init(&data->lock);
364 	data->csd.func = func;
365 	data->csd.info = info;
366 	data->refs = num_cpus;
367 	data->cpumask = mask;
368 
369 	spin_lock_irqsave(&call_function_lock, flags);
370 	list_add_tail_rcu(&data->csd.list, &call_function_queue);
371 	spin_unlock_irqrestore(&call_function_lock, flags);
372 
373 	/* Send a message to all CPUs in the map */
374 	arch_send_call_function_ipi(mask);
375 
376 	/* optionally wait for the CPUs to complete */
377 	if (wait) {
378 		csd_flag_wait(&data->csd);
379 		if (unlikely(slowpath))
380 			smp_call_function_mask_quiesce_stack(mask);
381 	}
382 
383 	return 0;
384 }
385 EXPORT_SYMBOL(smp_call_function_mask);
386 
387 /**
388  * smp_call_function(): Run a function on all other CPUs.
389  * @func: The function to run. This must be fast and non-blocking.
390  * @info: An arbitrary pointer to pass to the function.
391  * @wait: If true, wait (atomically) until function has completed on other CPUs.
392  *
393  * Returns 0 on success, else a negative status code.
394  *
395  * If @wait is true, then returns once @func has returned; otherwise
396  * it returns just before the target cpu calls @func. In case of allocation
397  * failure, @wait will be implicitly turned on.
398  *
399  * You must not call this function with disabled interrupts or from a
400  * hardware interrupt handler or from a bottom half handler.
401  */
402 int smp_call_function(void (*func)(void *), void *info, int wait)
403 {
404 	int ret;
405 
406 	preempt_disable();
407 	ret = smp_call_function_mask(cpu_online_map, func, info, wait);
408 	preempt_enable();
409 	return ret;
410 }
411 EXPORT_SYMBOL(smp_call_function);
412 
413 void ipi_call_lock(void)
414 {
415 	spin_lock(&call_function_lock);
416 }
417 
418 void ipi_call_unlock(void)
419 {
420 	spin_unlock(&call_function_lock);
421 }
422 
423 void ipi_call_lock_irq(void)
424 {
425 	spin_lock_irq(&call_function_lock);
426 }
427 
428 void ipi_call_unlock_irq(void)
429 {
430 	spin_unlock_irq(&call_function_lock);
431 }
432