xref: /linux/kernel/async.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * async.c: Asynchronous function calls for boot performance
4  *
5  * (C) Copyright 2009 Intel Corporation
6  * Author: Arjan van de Ven <arjan@linux.intel.com>
7  */
8 
9 
10 /*
11 
12 Goals and Theory of Operation
13 
14 The primary goal of this feature is to reduce the kernel boot time,
15 by doing various independent hardware delays and discovery operations
16 decoupled and not strictly serialized.
17 
18 More specifically, the asynchronous function call concept allows
19 certain operations (primarily during system boot) to happen
20 asynchronously, out of order, while these operations still
21 have their externally visible parts happen sequentially and in-order.
22 (not unlike how out-of-order CPUs retire their instructions in order)
23 
24 Key to the asynchronous function call implementation is the concept of
25 a "sequence cookie" (which, although it has an abstracted type, can be
26 thought of as a monotonically incrementing number).
27 
28 The async core will assign each scheduled event such a sequence cookie and
29 pass this to the called functions.
30 
31 The asynchronously called function should before doing a globally visible
32 operation, such as registering device numbers, call the
33 async_synchronize_cookie() function and pass in its own cookie. The
34 async_synchronize_cookie() function will make sure that all asynchronous
35 operations that were scheduled prior to the operation corresponding with the
36 cookie have completed.
37 
38 Subsystem/driver initialization code that scheduled asynchronous probe
39 functions, but which shares global resources with other drivers/subsystems
40 that do not use the asynchronous call feature, need to do a full
41 synchronization with the async_synchronize_full() function, before returning
42 from their init function. This is to maintain strict ordering between the
43 asynchronous and synchronous parts of the kernel.
44 
45 */
46 
47 #include <linux/async.h>
48 #include <linux/atomic.h>
49 #include <linux/export.h>
50 #include <linux/ktime.h>
51 #include <linux/pid.h>
52 #include <linux/sched.h>
53 #include <linux/slab.h>
54 #include <linux/wait.h>
55 #include <linux/workqueue.h>
56 
57 #include "workqueue_internal.h"
58 
59 static async_cookie_t next_cookie = 1;
60 
61 #define MAX_WORK		32768
62 #define ASYNC_COOKIE_MAX	ULLONG_MAX	/* infinity cookie */
63 
64 static LIST_HEAD(async_global_pending);	/* pending from all registered doms */
65 static ASYNC_DOMAIN(async_dfl_domain);
66 static DEFINE_SPINLOCK(async_lock);
67 static struct workqueue_struct *async_wq;
68 
69 struct async_entry {
70 	struct list_head	domain_list;
71 	struct list_head	global_list;
72 	struct work_struct	work;
73 	async_cookie_t		cookie;
74 	async_func_t		func;
75 	void			*data;
76 	struct async_domain	*domain;
77 };
78 
79 static DECLARE_WAIT_QUEUE_HEAD(async_done);
80 
81 static atomic_t entry_count;
82 
microseconds_since(ktime_t start)83 static long long microseconds_since(ktime_t start)
84 {
85 	ktime_t now = ktime_get();
86 	return ktime_to_ns(ktime_sub(now, start)) >> 10;
87 }
88 
lowest_in_progress(struct async_domain * domain)89 static async_cookie_t lowest_in_progress(struct async_domain *domain)
90 {
91 	struct async_entry *first = NULL;
92 	async_cookie_t ret = ASYNC_COOKIE_MAX;
93 	unsigned long flags;
94 
95 	spin_lock_irqsave(&async_lock, flags);
96 
97 	if (domain) {
98 		if (!list_empty(&domain->pending))
99 			first = list_first_entry(&domain->pending,
100 					struct async_entry, domain_list);
101 	} else {
102 		if (!list_empty(&async_global_pending))
103 			first = list_first_entry(&async_global_pending,
104 					struct async_entry, global_list);
105 	}
106 
107 	if (first)
108 		ret = first->cookie;
109 
110 	spin_unlock_irqrestore(&async_lock, flags);
111 	return ret;
112 }
113 
114 /*
115  * pick the first pending entry and run it
116  */
async_run_entry_fn(struct work_struct * work)117 static void async_run_entry_fn(struct work_struct *work)
118 {
119 	struct async_entry *entry =
120 		container_of(work, struct async_entry, work);
121 	unsigned long flags;
122 	ktime_t calltime;
123 
124 	/* 1) run (and print duration) */
125 	pr_debug("calling  %lli_%pS @ %i\n", (long long)entry->cookie,
126 		 entry->func, task_pid_nr(current));
127 	calltime = ktime_get();
128 
129 	entry->func(entry->data, entry->cookie);
130 
131 	pr_debug("initcall %lli_%pS returned after %lld usecs\n",
132 		 (long long)entry->cookie, entry->func,
133 		 microseconds_since(calltime));
134 
135 	/* 2) remove self from the pending queues */
136 	spin_lock_irqsave(&async_lock, flags);
137 	list_del_init(&entry->domain_list);
138 	list_del_init(&entry->global_list);
139 
140 	/* 3) free the entry */
141 	kfree(entry);
142 	atomic_dec(&entry_count);
143 
144 	spin_unlock_irqrestore(&async_lock, flags);
145 
146 	/* 4) wake up any waiters */
147 	wake_up(&async_done);
148 }
149 
__async_schedule_node_domain(async_func_t func,void * data,int node,struct async_domain * domain,struct async_entry * entry)150 static async_cookie_t __async_schedule_node_domain(async_func_t func,
151 						   void *data, int node,
152 						   struct async_domain *domain,
153 						   struct async_entry *entry)
154 {
155 	async_cookie_t newcookie;
156 	unsigned long flags;
157 
158 	INIT_LIST_HEAD(&entry->domain_list);
159 	INIT_LIST_HEAD(&entry->global_list);
160 	INIT_WORK(&entry->work, async_run_entry_fn);
161 	entry->func = func;
162 	entry->data = data;
163 	entry->domain = domain;
164 
165 	spin_lock_irqsave(&async_lock, flags);
166 
167 	/* allocate cookie and queue */
168 	newcookie = entry->cookie = next_cookie++;
169 
170 	list_add_tail(&entry->domain_list, &domain->pending);
171 	if (domain->registered)
172 		list_add_tail(&entry->global_list, &async_global_pending);
173 
174 	atomic_inc(&entry_count);
175 	spin_unlock_irqrestore(&async_lock, flags);
176 
177 	/* schedule for execution */
178 	queue_work_node(node, async_wq, &entry->work);
179 
180 	return newcookie;
181 }
182 
183 /**
184  * async_schedule_node_domain - NUMA specific version of async_schedule_domain
185  * @func: function to execute asynchronously
186  * @data: data pointer to pass to the function
187  * @node: NUMA node that we want to schedule this on or close to
188  * @domain: the domain
189  *
190  * Returns an async_cookie_t that may be used for checkpointing later.
191  * @domain may be used in the async_synchronize_*_domain() functions to
192  * wait within a certain synchronization domain rather than globally.
193  *
194  * Note: This function may be called from atomic or non-atomic contexts.
195  *
196  * The node requested will be honored on a best effort basis. If the node
197  * has no CPUs associated with it then the work is distributed among all
198  * available CPUs.
199  */
async_schedule_node_domain(async_func_t func,void * data,int node,struct async_domain * domain)200 async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
201 					  int node, struct async_domain *domain)
202 {
203 	struct async_entry *entry;
204 	unsigned long flags;
205 	async_cookie_t newcookie;
206 
207 	/* allow irq-off callers */
208 	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
209 
210 	/*
211 	 * If we're out of memory or if there's too much work
212 	 * pending already, we execute synchronously.
213 	 */
214 	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
215 		kfree(entry);
216 		spin_lock_irqsave(&async_lock, flags);
217 		newcookie = next_cookie++;
218 		spin_unlock_irqrestore(&async_lock, flags);
219 
220 		/* low on memory.. run synchronously */
221 		func(data, newcookie);
222 		return newcookie;
223 	}
224 
225 	return __async_schedule_node_domain(func, data, node, domain, entry);
226 }
227 EXPORT_SYMBOL_GPL(async_schedule_node_domain);
228 
229 /**
230  * async_schedule_node - NUMA specific version of async_schedule
231  * @func: function to execute asynchronously
232  * @data: data pointer to pass to the function
233  * @node: NUMA node that we want to schedule this on or close to
234  *
235  * Returns an async_cookie_t that may be used for checkpointing later.
236  * Note: This function may be called from atomic or non-atomic contexts.
237  *
238  * The node requested will be honored on a best effort basis. If the node
239  * has no CPUs associated with it then the work is distributed among all
240  * available CPUs.
241  */
async_schedule_node(async_func_t func,void * data,int node)242 async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
243 {
244 	return async_schedule_node_domain(func, data, node, &async_dfl_domain);
245 }
246 EXPORT_SYMBOL_GPL(async_schedule_node);
247 
248 /**
249  * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
250  * @func: function to execute asynchronously
251  * @dev: device argument to be passed to function
252  *
253  * @dev is used as both the argument for the function and to provide NUMA
254  * context for where to run the function.
255  *
256  * If the asynchronous execution of @func is scheduled successfully, return
257  * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
258  * that will run the function synchronously then.
259  */
async_schedule_dev_nocall(async_func_t func,struct device * dev)260 bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
261 {
262 	struct async_entry *entry;
263 
264 	entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
265 
266 	/* Give up if there is no memory or too much work. */
267 	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
268 		kfree(entry);
269 		return false;
270 	}
271 
272 	__async_schedule_node_domain(func, dev, dev_to_node(dev),
273 				     &async_dfl_domain, entry);
274 	return true;
275 }
276 
277 /**
278  * async_synchronize_full - synchronize all asynchronous function calls
279  *
280  * This function waits until all asynchronous function calls have been done.
281  */
async_synchronize_full(void)282 void async_synchronize_full(void)
283 {
284 	async_synchronize_full_domain(NULL);
285 }
286 EXPORT_SYMBOL_GPL(async_synchronize_full);
287 
288 /**
289  * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
290  * @domain: the domain to synchronize
291  *
292  * This function waits until all asynchronous function calls for the
293  * synchronization domain specified by @domain have been done.
294  */
async_synchronize_full_domain(struct async_domain * domain)295 void async_synchronize_full_domain(struct async_domain *domain)
296 {
297 	async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
298 }
299 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
300 
301 /**
302  * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
303  * @cookie: async_cookie_t to use as checkpoint
304  * @domain: the domain to synchronize (%NULL for all registered domains)
305  *
306  * This function waits until all asynchronous function calls for the
307  * synchronization domain specified by @domain submitted prior to @cookie
308  * have been done.
309  */
async_synchronize_cookie_domain(async_cookie_t cookie,struct async_domain * domain)310 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
311 {
312 	ktime_t starttime;
313 
314 	pr_debug("async_waiting @ %i\n", task_pid_nr(current));
315 	starttime = ktime_get();
316 
317 	wait_event(async_done, lowest_in_progress(domain) >= cookie);
318 
319 	pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
320 		 microseconds_since(starttime));
321 }
322 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
323 
324 /**
325  * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
326  * @cookie: async_cookie_t to use as checkpoint
327  *
328  * This function waits until all asynchronous function calls prior to @cookie
329  * have been done.
330  */
async_synchronize_cookie(async_cookie_t cookie)331 void async_synchronize_cookie(async_cookie_t cookie)
332 {
333 	async_synchronize_cookie_domain(cookie, &async_dfl_domain);
334 }
335 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
336 
337 /**
338  * current_is_async - is %current an async worker task?
339  *
340  * Returns %true if %current is an async worker task.
341  */
current_is_async(void)342 bool current_is_async(void)
343 {
344 	struct worker *worker = current_wq_worker();
345 
346 	return worker && worker->current_func == async_run_entry_fn;
347 }
348 EXPORT_SYMBOL_GPL(current_is_async);
349 
async_init(void)350 void __init async_init(void)
351 {
352 	/*
353 	 * Async can schedule a number of interdependent work items. However,
354 	 * unbound workqueues can handle only upto min_active interdependent
355 	 * work items. The default min_active of 8 isn't sufficient for async
356 	 * and can lead to stalls. Let's use a dedicated workqueue with raised
357 	 * min_active.
358 	 */
359 	async_wq = alloc_workqueue("async", WQ_UNBOUND, 0);
360 	BUG_ON(!async_wq);
361 	workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE);
362 }
363