xref: /linux/kernel/async.c (revision f8bcb061ea013a9b39a071b9dd9f6ea0aa2caf72)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * async.c: Asynchronous function calls for boot performance
4   *
5   * (C) Copyright 2009 Intel Corporation
6   * Author: Arjan van de Ven <arjan@linux.intel.com>
7   */
8  
9  
10  /*
11  
12  Goals and Theory of Operation
13  
14  The primary goal of this feature is to reduce the kernel boot time,
15  by doing various independent hardware delays and discovery operations
16  decoupled and not strictly serialized.
17  
18  More specifically, the asynchronous function call concept allows
19  certain operations (primarily during system boot) to happen
20  asynchronously, out of order, while these operations still
21  have their externally visible parts happen sequentially and in-order.
22  (not unlike how out-of-order CPUs retire their instructions in order)
23  
24  Key to the asynchronous function call implementation is the concept of
25  a "sequence cookie" (which, although it has an abstracted type, can be
26  thought of as a monotonically incrementing number).
27  
28  The async core will assign each scheduled event such a sequence cookie and
29  pass this to the called functions.
30  
31  The asynchronously called function should before doing a globally visible
32  operation, such as registering device numbers, call the
33  async_synchronize_cookie() function and pass in its own cookie. The
34  async_synchronize_cookie() function will make sure that all asynchronous
35  operations that were scheduled prior to the operation corresponding with the
36  cookie have completed.
37  
38  Subsystem/driver initialization code that scheduled asynchronous probe
39  functions, but which shares global resources with other drivers/subsystems
40  that do not use the asynchronous call feature, need to do a full
41  synchronization with the async_synchronize_full() function, before returning
42  from their init function. This is to maintain strict ordering between the
43  asynchronous and synchronous parts of the kernel.
44  
45  */
46  
47  #include <linux/async.h>
48  #include <linux/atomic.h>
49  #include <linux/ktime.h>
50  #include <linux/export.h>
51  #include <linux/wait.h>
52  #include <linux/sched.h>
53  #include <linux/slab.h>
54  #include <linux/workqueue.h>
55  
56  #include "workqueue_internal.h"
57  
58  static async_cookie_t next_cookie = 1;
59  
60  #define MAX_WORK		32768
61  #define ASYNC_COOKIE_MAX	ULLONG_MAX	/* infinity cookie */
62  
63  static LIST_HEAD(async_global_pending);	/* pending from all registered doms */
64  static ASYNC_DOMAIN(async_dfl_domain);
65  static DEFINE_SPINLOCK(async_lock);
66  
67  struct async_entry {
68  	struct list_head	domain_list;
69  	struct list_head	global_list;
70  	struct work_struct	work;
71  	async_cookie_t		cookie;
72  	async_func_t		func;
73  	void			*data;
74  	struct async_domain	*domain;
75  };
76  
77  static DECLARE_WAIT_QUEUE_HEAD(async_done);
78  
79  static atomic_t entry_count;
80  
81  static long long microseconds_since(ktime_t start)
82  {
83  	ktime_t now = ktime_get();
84  	return ktime_to_ns(ktime_sub(now, start)) >> 10;
85  }
86  
87  static async_cookie_t lowest_in_progress(struct async_domain *domain)
88  {
89  	struct async_entry *first = NULL;
90  	async_cookie_t ret = ASYNC_COOKIE_MAX;
91  	unsigned long flags;
92  
93  	spin_lock_irqsave(&async_lock, flags);
94  
95  	if (domain) {
96  		if (!list_empty(&domain->pending))
97  			first = list_first_entry(&domain->pending,
98  					struct async_entry, domain_list);
99  	} else {
100  		if (!list_empty(&async_global_pending))
101  			first = list_first_entry(&async_global_pending,
102  					struct async_entry, global_list);
103  	}
104  
105  	if (first)
106  		ret = first->cookie;
107  
108  	spin_unlock_irqrestore(&async_lock, flags);
109  	return ret;
110  }
111  
112  /*
113   * pick the first pending entry and run it
114   */
115  static void async_run_entry_fn(struct work_struct *work)
116  {
117  	struct async_entry *entry =
118  		container_of(work, struct async_entry, work);
119  	unsigned long flags;
120  	ktime_t calltime;
121  
122  	/* 1) run (and print duration) */
123  	pr_debug("calling  %lli_%pS @ %i\n", (long long)entry->cookie,
124  		 entry->func, task_pid_nr(current));
125  	calltime = ktime_get();
126  
127  	entry->func(entry->data, entry->cookie);
128  
129  	pr_debug("initcall %lli_%pS returned after %lld usecs\n",
130  		 (long long)entry->cookie, entry->func,
131  		 microseconds_since(calltime));
132  
133  	/* 2) remove self from the pending queues */
134  	spin_lock_irqsave(&async_lock, flags);
135  	list_del_init(&entry->domain_list);
136  	list_del_init(&entry->global_list);
137  
138  	/* 3) free the entry */
139  	kfree(entry);
140  	atomic_dec(&entry_count);
141  
142  	spin_unlock_irqrestore(&async_lock, flags);
143  
144  	/* 4) wake up any waiters */
145  	wake_up(&async_done);
146  }
147  
148  /**
149   * async_schedule_node_domain - NUMA specific version of async_schedule_domain
150   * @func: function to execute asynchronously
151   * @data: data pointer to pass to the function
152   * @node: NUMA node that we want to schedule this on or close to
153   * @domain: the domain
154   *
155   * Returns an async_cookie_t that may be used for checkpointing later.
156   * @domain may be used in the async_synchronize_*_domain() functions to
157   * wait within a certain synchronization domain rather than globally.
158   *
159   * Note: This function may be called from atomic or non-atomic contexts.
160   *
161   * The node requested will be honored on a best effort basis. If the node
162   * has no CPUs associated with it then the work is distributed among all
163   * available CPUs.
164   */
165  async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
166  					  int node, struct async_domain *domain)
167  {
168  	struct async_entry *entry;
169  	unsigned long flags;
170  	async_cookie_t newcookie;
171  
172  	/* allow irq-off callers */
173  	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
174  
175  	/*
176  	 * If we're out of memory or if there's too much work
177  	 * pending already, we execute synchronously.
178  	 */
179  	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
180  		kfree(entry);
181  		spin_lock_irqsave(&async_lock, flags);
182  		newcookie = next_cookie++;
183  		spin_unlock_irqrestore(&async_lock, flags);
184  
185  		/* low on memory.. run synchronously */
186  		func(data, newcookie);
187  		return newcookie;
188  	}
189  	INIT_LIST_HEAD(&entry->domain_list);
190  	INIT_LIST_HEAD(&entry->global_list);
191  	INIT_WORK(&entry->work, async_run_entry_fn);
192  	entry->func = func;
193  	entry->data = data;
194  	entry->domain = domain;
195  
196  	spin_lock_irqsave(&async_lock, flags);
197  
198  	/* allocate cookie and queue */
199  	newcookie = entry->cookie = next_cookie++;
200  
201  	list_add_tail(&entry->domain_list, &domain->pending);
202  	if (domain->registered)
203  		list_add_tail(&entry->global_list, &async_global_pending);
204  
205  	atomic_inc(&entry_count);
206  	spin_unlock_irqrestore(&async_lock, flags);
207  
208  	/* mark that this task has queued an async job, used by module init */
209  	current->flags |= PF_USED_ASYNC;
210  
211  	/* schedule for execution */
212  	queue_work_node(node, system_unbound_wq, &entry->work);
213  
214  	return newcookie;
215  }
216  EXPORT_SYMBOL_GPL(async_schedule_node_domain);
217  
218  /**
219   * async_schedule_node - NUMA specific version of async_schedule
220   * @func: function to execute asynchronously
221   * @data: data pointer to pass to the function
222   * @node: NUMA node that we want to schedule this on or close to
223   *
224   * Returns an async_cookie_t that may be used for checkpointing later.
225   * Note: This function may be called from atomic or non-atomic contexts.
226   *
227   * The node requested will be honored on a best effort basis. If the node
228   * has no CPUs associated with it then the work is distributed among all
229   * available CPUs.
230   */
231  async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
232  {
233  	return async_schedule_node_domain(func, data, node, &async_dfl_domain);
234  }
235  EXPORT_SYMBOL_GPL(async_schedule_node);
236  
237  /**
238   * async_synchronize_full - synchronize all asynchronous function calls
239   *
240   * This function waits until all asynchronous function calls have been done.
241   */
242  void async_synchronize_full(void)
243  {
244  	async_synchronize_full_domain(NULL);
245  }
246  EXPORT_SYMBOL_GPL(async_synchronize_full);
247  
248  /**
249   * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
250   * @domain: the domain to synchronize
251   *
252   * This function waits until all asynchronous function calls for the
253   * synchronization domain specified by @domain have been done.
254   */
255  void async_synchronize_full_domain(struct async_domain *domain)
256  {
257  	async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
258  }
259  EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
260  
261  /**
262   * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
263   * @cookie: async_cookie_t to use as checkpoint
264   * @domain: the domain to synchronize (%NULL for all registered domains)
265   *
266   * This function waits until all asynchronous function calls for the
267   * synchronization domain specified by @domain submitted prior to @cookie
268   * have been done.
269   */
270  void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
271  {
272  	ktime_t starttime;
273  
274  	pr_debug("async_waiting @ %i\n", task_pid_nr(current));
275  	starttime = ktime_get();
276  
277  	wait_event(async_done, lowest_in_progress(domain) >= cookie);
278  
279  	pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
280  		 microseconds_since(starttime));
281  }
282  EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
283  
284  /**
285   * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
286   * @cookie: async_cookie_t to use as checkpoint
287   *
288   * This function waits until all asynchronous function calls prior to @cookie
289   * have been done.
290   */
291  void async_synchronize_cookie(async_cookie_t cookie)
292  {
293  	async_synchronize_cookie_domain(cookie, &async_dfl_domain);
294  }
295  EXPORT_SYMBOL_GPL(async_synchronize_cookie);
296  
297  /**
298   * current_is_async - is %current an async worker task?
299   *
300   * Returns %true if %current is an async worker task.
301   */
302  bool current_is_async(void)
303  {
304  	struct worker *worker = current_wq_worker();
305  
306  	return worker && worker->current_func == async_run_entry_fn;
307  }
308  EXPORT_SYMBOL_GPL(current_is_async);
309