xref: /linux/kernel/async.c (revision 84b233adcca3cacd5cfa8013a5feda7a3db4a9af)
122a9d645SArjan van de Ven /*
222a9d645SArjan van de Ven  * async.c: Asynchronous function calls for boot performance
322a9d645SArjan van de Ven  *
422a9d645SArjan van de Ven  * (C) Copyright 2009 Intel Corporation
522a9d645SArjan van de Ven  * Author: Arjan van de Ven <arjan@linux.intel.com>
622a9d645SArjan van de Ven  *
722a9d645SArjan van de Ven  * This program is free software; you can redistribute it and/or
822a9d645SArjan van de Ven  * modify it under the terms of the GNU General Public License
922a9d645SArjan van de Ven  * as published by the Free Software Foundation; version 2
1022a9d645SArjan van de Ven  * of the License.
1122a9d645SArjan van de Ven  */
1222a9d645SArjan van de Ven 
1322a9d645SArjan van de Ven 
1422a9d645SArjan van de Ven /*
1522a9d645SArjan van de Ven 
1622a9d645SArjan van de Ven Goals and Theory of Operation
1722a9d645SArjan van de Ven 
1822a9d645SArjan van de Ven The primary goal of this feature is to reduce the kernel boot time,
1922a9d645SArjan van de Ven by doing various independent hardware delays and discovery operations
2022a9d645SArjan van de Ven decoupled and not strictly serialized.
2122a9d645SArjan van de Ven 
2222a9d645SArjan van de Ven More specifically, the asynchronous function call concept allows
2322a9d645SArjan van de Ven certain operations (primarily during system boot) to happen
2422a9d645SArjan van de Ven asynchronously, out of order, while these operations still
2522a9d645SArjan van de Ven have their externally visible parts happen sequentially and in-order.
2622a9d645SArjan van de Ven (not unlike how out-of-order CPUs retire their instructions in order)
2722a9d645SArjan van de Ven 
2822a9d645SArjan van de Ven Key to the asynchronous function call implementation is the concept of
2922a9d645SArjan van de Ven a "sequence cookie" (which, although it has an abstracted type, can be
3022a9d645SArjan van de Ven thought of as a monotonically incrementing number).
3122a9d645SArjan van de Ven 
3222a9d645SArjan van de Ven The async core will assign each scheduled event such a sequence cookie and
3322a9d645SArjan van de Ven pass this to the called functions.
3422a9d645SArjan van de Ven 
3522a9d645SArjan van de Ven The asynchronously called function should before doing a globally visible
3622a9d645SArjan van de Ven operation, such as registering device numbers, call the
3722a9d645SArjan van de Ven async_synchronize_cookie() function and pass in its own cookie. The
3822a9d645SArjan van de Ven async_synchronize_cookie() function will make sure that all asynchronous
3922a9d645SArjan van de Ven operations that were scheduled prior to the operation corresponding with the
4022a9d645SArjan van de Ven cookie have completed.
4122a9d645SArjan van de Ven 
4222a9d645SArjan van de Ven Subsystem/driver initialization code that scheduled asynchronous probe
4322a9d645SArjan van de Ven functions, but which shares global resources with other drivers/subsystems
4422a9d645SArjan van de Ven that do not use the asynchronous call feature, need to do a full
4522a9d645SArjan van de Ven synchronization with the async_synchronize_full() function, before returning
4622a9d645SArjan van de Ven from their init function. This is to maintain strict ordering between the
4722a9d645SArjan van de Ven asynchronous and synchronous parts of the kernel.
4822a9d645SArjan van de Ven 
4922a9d645SArjan van de Ven */
5022a9d645SArjan van de Ven 
5122a9d645SArjan van de Ven #include <linux/async.h>
5284c15027SPaul McQuade #include <linux/atomic.h>
5384c15027SPaul McQuade #include <linux/ktime.h>
549984de1aSPaul Gortmaker #include <linux/export.h>
5522a9d645SArjan van de Ven #include <linux/wait.h>
5622a9d645SArjan van de Ven #include <linux/sched.h>
575a0e3ad6STejun Heo #include <linux/slab.h>
58083b804cSTejun Heo #include <linux/workqueue.h>
5922a9d645SArjan van de Ven 
60*84b233adSTejun Heo #include "workqueue_internal.h"
61*84b233adSTejun Heo 
6222a9d645SArjan van de Ven static async_cookie_t next_cookie = 1;
6322a9d645SArjan van de Ven 
6422a9d645SArjan van de Ven #define MAX_WORK	32768
6522a9d645SArjan van de Ven 
6622a9d645SArjan van de Ven static LIST_HEAD(async_pending);
672955b47dSDan Williams static ASYNC_DOMAIN(async_running);
68a4683487SDan Williams static LIST_HEAD(async_domains);
6922a9d645SArjan van de Ven static DEFINE_SPINLOCK(async_lock);
70a4683487SDan Williams static DEFINE_MUTEX(async_register_mutex);
7122a9d645SArjan van de Ven 
7222a9d645SArjan van de Ven struct async_entry {
7322a9d645SArjan van de Ven 	struct list_head	list;
74083b804cSTejun Heo 	struct work_struct	work;
7522a9d645SArjan van de Ven 	async_cookie_t		cookie;
7622a9d645SArjan van de Ven 	async_func_ptr		*func;
7722a9d645SArjan van de Ven 	void			*data;
782955b47dSDan Williams 	struct async_domain	*running;
7922a9d645SArjan van de Ven };
8022a9d645SArjan van de Ven 
8122a9d645SArjan van de Ven static DECLARE_WAIT_QUEUE_HEAD(async_done);
8222a9d645SArjan van de Ven 
8322a9d645SArjan van de Ven static atomic_t entry_count;
8422a9d645SArjan van de Ven 
8522a9d645SArjan van de Ven 
8622a9d645SArjan van de Ven /*
8722a9d645SArjan van de Ven  * MUST be called with the lock held!
8822a9d645SArjan van de Ven  */
892955b47dSDan Williams static async_cookie_t  __lowest_in_progress(struct async_domain *running)
9022a9d645SArjan van de Ven {
9122a9d645SArjan van de Ven 	struct async_entry *entry;
92d5a877e8SJames Bottomley 
932955b47dSDan Williams 	if (!list_empty(&running->domain)) {
942955b47dSDan Williams 		entry = list_first_entry(&running->domain, typeof(*entry), list);
953af968e0SLinus Torvalds 		return entry->cookie;
9622a9d645SArjan van de Ven 	}
9722a9d645SArjan van de Ven 
98d5a877e8SJames Bottomley 	list_for_each_entry(entry, &async_pending, list)
993af968e0SLinus Torvalds 		if (entry->running == running)
1003af968e0SLinus Torvalds 			return entry->cookie;
101d5a877e8SJames Bottomley 
1023af968e0SLinus Torvalds 	return next_cookie;	/* "infinity" value */
10322a9d645SArjan van de Ven }
10437a76bd4SArjan van de Ven 
1052955b47dSDan Williams static async_cookie_t  lowest_in_progress(struct async_domain *running)
10637a76bd4SArjan van de Ven {
10737a76bd4SArjan van de Ven 	unsigned long flags;
10837a76bd4SArjan van de Ven 	async_cookie_t ret;
10937a76bd4SArjan van de Ven 
11037a76bd4SArjan van de Ven 	spin_lock_irqsave(&async_lock, flags);
11137a76bd4SArjan van de Ven 	ret = __lowest_in_progress(running);
11237a76bd4SArjan van de Ven 	spin_unlock_irqrestore(&async_lock, flags);
11337a76bd4SArjan van de Ven 	return ret;
11437a76bd4SArjan van de Ven }
115083b804cSTejun Heo 
11622a9d645SArjan van de Ven /*
11722a9d645SArjan van de Ven  * pick the first pending entry and run it
11822a9d645SArjan van de Ven  */
119083b804cSTejun Heo static void async_run_entry_fn(struct work_struct *work)
12022a9d645SArjan van de Ven {
121083b804cSTejun Heo 	struct async_entry *entry =
122083b804cSTejun Heo 		container_of(work, struct async_entry, work);
12322a9d645SArjan van de Ven 	unsigned long flags;
124124ff4e5SVitaliy Ivanov 	ktime_t uninitialized_var(calltime), delta, rettime;
1252955b47dSDan Williams 	struct async_domain *running = entry->running;
12622a9d645SArjan van de Ven 
127083b804cSTejun Heo 	/* 1) move self to the running queue */
12822a9d645SArjan van de Ven 	spin_lock_irqsave(&async_lock, flags);
1292955b47dSDan Williams 	list_move_tail(&entry->list, &running->domain);
13022a9d645SArjan van de Ven 	spin_unlock_irqrestore(&async_lock, flags);
13122a9d645SArjan van de Ven 
132083b804cSTejun Heo 	/* 2) run (and print duration) */
133ad160d23SArjan van de Ven 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
13484c15027SPaul McQuade 		printk(KERN_DEBUG "calling  %lli_%pF @ %i\n",
13584c15027SPaul McQuade 			(long long)entry->cookie,
13658763a29SAndrew Morton 			entry->func, task_pid_nr(current));
13722a9d645SArjan van de Ven 		calltime = ktime_get();
13822a9d645SArjan van de Ven 	}
13922a9d645SArjan van de Ven 	entry->func(entry->data, entry->cookie);
140ad160d23SArjan van de Ven 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
14122a9d645SArjan van de Ven 		rettime = ktime_get();
14222a9d645SArjan van de Ven 		delta = ktime_sub(rettime, calltime);
14384c15027SPaul McQuade 		printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
14458763a29SAndrew Morton 			(long long)entry->cookie,
14558763a29SAndrew Morton 			entry->func,
14658763a29SAndrew Morton 			(long long)ktime_to_ns(delta) >> 10);
14722a9d645SArjan van de Ven 	}
14822a9d645SArjan van de Ven 
149083b804cSTejun Heo 	/* 3) remove self from the running queue */
15022a9d645SArjan van de Ven 	spin_lock_irqsave(&async_lock, flags);
15122a9d645SArjan van de Ven 	list_del(&entry->list);
152a4683487SDan Williams 	if (running->registered && --running->count == 0)
153a4683487SDan Williams 		list_del_init(&running->node);
15422a9d645SArjan van de Ven 
155083b804cSTejun Heo 	/* 4) free the entry */
15622a9d645SArjan van de Ven 	kfree(entry);
15722a9d645SArjan van de Ven 	atomic_dec(&entry_count);
15822a9d645SArjan van de Ven 
15922a9d645SArjan van de Ven 	spin_unlock_irqrestore(&async_lock, flags);
16022a9d645SArjan van de Ven 
161083b804cSTejun Heo 	/* 5) wake up any waiters */
16222a9d645SArjan van de Ven 	wake_up(&async_done);
16322a9d645SArjan van de Ven }
16422a9d645SArjan van de Ven 
1652955b47dSDan Williams static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
16622a9d645SArjan van de Ven {
16722a9d645SArjan van de Ven 	struct async_entry *entry;
16822a9d645SArjan van de Ven 	unsigned long flags;
16922a9d645SArjan van de Ven 	async_cookie_t newcookie;
17022a9d645SArjan van de Ven 
17122a9d645SArjan van de Ven 	/* allow irq-off callers */
17222a9d645SArjan van de Ven 	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
17322a9d645SArjan van de Ven 
17422a9d645SArjan van de Ven 	/*
17522a9d645SArjan van de Ven 	 * If we're out of memory or if there's too much work
17622a9d645SArjan van de Ven 	 * pending already, we execute synchronously.
17722a9d645SArjan van de Ven 	 */
178083b804cSTejun Heo 	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
17922a9d645SArjan van de Ven 		kfree(entry);
18022a9d645SArjan van de Ven 		spin_lock_irqsave(&async_lock, flags);
18122a9d645SArjan van de Ven 		newcookie = next_cookie++;
18222a9d645SArjan van de Ven 		spin_unlock_irqrestore(&async_lock, flags);
18322a9d645SArjan van de Ven 
18422a9d645SArjan van de Ven 		/* low on memory.. run synchronously */
18522a9d645SArjan van de Ven 		ptr(data, newcookie);
18622a9d645SArjan van de Ven 		return newcookie;
18722a9d645SArjan van de Ven 	}
188083b804cSTejun Heo 	INIT_WORK(&entry->work, async_run_entry_fn);
18922a9d645SArjan van de Ven 	entry->func = ptr;
19022a9d645SArjan van de Ven 	entry->data = data;
19122a9d645SArjan van de Ven 	entry->running = running;
19222a9d645SArjan van de Ven 
19322a9d645SArjan van de Ven 	spin_lock_irqsave(&async_lock, flags);
19422a9d645SArjan van de Ven 	newcookie = entry->cookie = next_cookie++;
19522a9d645SArjan van de Ven 	list_add_tail(&entry->list, &async_pending);
196a4683487SDan Williams 	if (running->registered && running->count++ == 0)
197a4683487SDan Williams 		list_add_tail(&running->node, &async_domains);
19822a9d645SArjan van de Ven 	atomic_inc(&entry_count);
19922a9d645SArjan van de Ven 	spin_unlock_irqrestore(&async_lock, flags);
200083b804cSTejun Heo 
201083b804cSTejun Heo 	/* schedule for execution */
202083b804cSTejun Heo 	queue_work(system_unbound_wq, &entry->work);
203083b804cSTejun Heo 
20422a9d645SArjan van de Ven 	return newcookie;
20522a9d645SArjan van de Ven }
20622a9d645SArjan van de Ven 
207f30d5b30SCornelia Huck /**
208f30d5b30SCornelia Huck  * async_schedule - schedule a function for asynchronous execution
209f30d5b30SCornelia Huck  * @ptr: function to execute asynchronously
210f30d5b30SCornelia Huck  * @data: data pointer to pass to the function
211f30d5b30SCornelia Huck  *
212f30d5b30SCornelia Huck  * Returns an async_cookie_t that may be used for checkpointing later.
213f30d5b30SCornelia Huck  * Note: This function may be called from atomic or non-atomic contexts.
214f30d5b30SCornelia Huck  */
21522a9d645SArjan van de Ven async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
21622a9d645SArjan van de Ven {
2177a89bbc7SCornelia Huck 	return __async_schedule(ptr, data, &async_running);
21822a9d645SArjan van de Ven }
21922a9d645SArjan van de Ven EXPORT_SYMBOL_GPL(async_schedule);
22022a9d645SArjan van de Ven 
221f30d5b30SCornelia Huck /**
222766ccb9eSCornelia Huck  * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
223f30d5b30SCornelia Huck  * @ptr: function to execute asynchronously
224f30d5b30SCornelia Huck  * @data: data pointer to pass to the function
225766ccb9eSCornelia Huck  * @running: running list for the domain
226f30d5b30SCornelia Huck  *
227f30d5b30SCornelia Huck  * Returns an async_cookie_t that may be used for checkpointing later.
228766ccb9eSCornelia Huck  * @running may be used in the async_synchronize_*_domain() functions
229766ccb9eSCornelia Huck  * to wait within a certain synchronization domain rather than globally.
230766ccb9eSCornelia Huck  * A synchronization domain is specified via the running queue @running to use.
231f30d5b30SCornelia Huck  * Note: This function may be called from atomic or non-atomic contexts.
232f30d5b30SCornelia Huck  */
233766ccb9eSCornelia Huck async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
2342955b47dSDan Williams 				     struct async_domain *running)
23522a9d645SArjan van de Ven {
23622a9d645SArjan van de Ven 	return __async_schedule(ptr, data, running);
23722a9d645SArjan van de Ven }
238766ccb9eSCornelia Huck EXPORT_SYMBOL_GPL(async_schedule_domain);
23922a9d645SArjan van de Ven 
240f30d5b30SCornelia Huck /**
241f30d5b30SCornelia Huck  * async_synchronize_full - synchronize all asynchronous function calls
242f30d5b30SCornelia Huck  *
243f30d5b30SCornelia Huck  * This function waits until all asynchronous function calls have been done.
244f30d5b30SCornelia Huck  */
24522a9d645SArjan van de Ven void async_synchronize_full(void)
24622a9d645SArjan van de Ven {
247a4683487SDan Williams 	mutex_lock(&async_register_mutex);
24833b04b93SArjan van de Ven 	do {
249a4683487SDan Williams 		struct async_domain *domain = NULL;
250a4683487SDan Williams 
251a4683487SDan Williams 		spin_lock_irq(&async_lock);
252a4683487SDan Williams 		if (!list_empty(&async_domains))
253a4683487SDan Williams 			domain = list_first_entry(&async_domains, typeof(*domain), node);
254a4683487SDan Williams 		spin_unlock_irq(&async_lock);
255a4683487SDan Williams 
256a4683487SDan Williams 		async_synchronize_cookie_domain(next_cookie, domain);
257a4683487SDan Williams 	} while (!list_empty(&async_domains));
258a4683487SDan Williams 	mutex_unlock(&async_register_mutex);
25922a9d645SArjan van de Ven }
26022a9d645SArjan van de Ven EXPORT_SYMBOL_GPL(async_synchronize_full);
26122a9d645SArjan van de Ven 
262f30d5b30SCornelia Huck /**
263a4683487SDan Williams  * async_unregister_domain - ensure no more anonymous waiters on this domain
264a4683487SDan Williams  * @domain: idle domain to flush out of any async_synchronize_full instances
265a4683487SDan Williams  *
266a4683487SDan Williams  * async_synchronize_{cookie|full}_domain() are not flushed since callers
267a4683487SDan Williams  * of these routines should know the lifetime of @domain
268a4683487SDan Williams  *
269a4683487SDan Williams  * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
270a4683487SDan Williams  */
271a4683487SDan Williams void async_unregister_domain(struct async_domain *domain)
272a4683487SDan Williams {
273a4683487SDan Williams 	mutex_lock(&async_register_mutex);
274a4683487SDan Williams 	spin_lock_irq(&async_lock);
275a4683487SDan Williams 	WARN_ON(!domain->registered || !list_empty(&domain->node) ||
276a4683487SDan Williams 		!list_empty(&domain->domain));
277a4683487SDan Williams 	domain->registered = 0;
278a4683487SDan Williams 	spin_unlock_irq(&async_lock);
279a4683487SDan Williams 	mutex_unlock(&async_register_mutex);
280a4683487SDan Williams }
281a4683487SDan Williams EXPORT_SYMBOL_GPL(async_unregister_domain);
282a4683487SDan Williams 
283a4683487SDan Williams /**
284766ccb9eSCornelia Huck  * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
2852955b47dSDan Williams  * @domain: running list to synchronize on
286f30d5b30SCornelia Huck  *
287766ccb9eSCornelia Huck  * This function waits until all asynchronous function calls for the
2882955b47dSDan Williams  * synchronization domain specified by the running list @domain have been done.
289f30d5b30SCornelia Huck  */
2902955b47dSDan Williams void async_synchronize_full_domain(struct async_domain *domain)
29122a9d645SArjan van de Ven {
2922955b47dSDan Williams 	async_synchronize_cookie_domain(next_cookie, domain);
29322a9d645SArjan van de Ven }
294766ccb9eSCornelia Huck EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
29522a9d645SArjan van de Ven 
296f30d5b30SCornelia Huck /**
297766ccb9eSCornelia Huck  * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
298f30d5b30SCornelia Huck  * @cookie: async_cookie_t to use as checkpoint
299f30d5b30SCornelia Huck  * @running: running list to synchronize on
300f30d5b30SCornelia Huck  *
301766ccb9eSCornelia Huck  * This function waits until all asynchronous function calls for the
3022955b47dSDan Williams  * synchronization domain specified by running list @running submitted
303766ccb9eSCornelia Huck  * prior to @cookie have been done.
304f30d5b30SCornelia Huck  */
3052955b47dSDan Williams void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
30622a9d645SArjan van de Ven {
307124ff4e5SVitaliy Ivanov 	ktime_t uninitialized_var(starttime), delta, endtime;
30822a9d645SArjan van de Ven 
309a4683487SDan Williams 	if (!running)
310a4683487SDan Williams 		return;
311a4683487SDan Williams 
312ad160d23SArjan van de Ven 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
31384c15027SPaul McQuade 		printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
31422a9d645SArjan van de Ven 		starttime = ktime_get();
31522a9d645SArjan van de Ven 	}
31622a9d645SArjan van de Ven 
31737a76bd4SArjan van de Ven 	wait_event(async_done, lowest_in_progress(running) >= cookie);
31822a9d645SArjan van de Ven 
319ad160d23SArjan van de Ven 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
32022a9d645SArjan van de Ven 		endtime = ktime_get();
32122a9d645SArjan van de Ven 		delta = ktime_sub(endtime, starttime);
32222a9d645SArjan van de Ven 
32384c15027SPaul McQuade 		printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
32458763a29SAndrew Morton 			task_pid_nr(current),
32558763a29SAndrew Morton 			(long long)ktime_to_ns(delta) >> 10);
32622a9d645SArjan van de Ven 	}
32722a9d645SArjan van de Ven }
328766ccb9eSCornelia Huck EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
32922a9d645SArjan van de Ven 
330f30d5b30SCornelia Huck /**
331f30d5b30SCornelia Huck  * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
332f30d5b30SCornelia Huck  * @cookie: async_cookie_t to use as checkpoint
333f30d5b30SCornelia Huck  *
334f30d5b30SCornelia Huck  * This function waits until all asynchronous function calls prior to @cookie
335f30d5b30SCornelia Huck  * have been done.
336f30d5b30SCornelia Huck  */
33722a9d645SArjan van de Ven void async_synchronize_cookie(async_cookie_t cookie)
33822a9d645SArjan van de Ven {
339766ccb9eSCornelia Huck 	async_synchronize_cookie_domain(cookie, &async_running);
34022a9d645SArjan van de Ven }
34122a9d645SArjan van de Ven EXPORT_SYMBOL_GPL(async_synchronize_cookie);
342*84b233adSTejun Heo 
343*84b233adSTejun Heo /**
344*84b233adSTejun Heo  * current_is_async - is %current an async worker task?
345*84b233adSTejun Heo  *
346*84b233adSTejun Heo  * Returns %true if %current is an async worker task.
347*84b233adSTejun Heo  */
348*84b233adSTejun Heo bool current_is_async(void)
349*84b233adSTejun Heo {
350*84b233adSTejun Heo 	struct worker *worker = current_wq_worker();
351*84b233adSTejun Heo 
352*84b233adSTejun Heo 	return worker && worker->current_func == async_run_entry_fn;
353*84b233adSTejun Heo }
354