xref: /linux/kernel/async.c (revision bf52b1ac6ab41a060511d56d0f2da12f3a2486db)
1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
222a9d645SArjan van de Ven /*
322a9d645SArjan van de Ven  * async.c: Asynchronous function calls for boot performance
422a9d645SArjan van de Ven  *
522a9d645SArjan van de Ven  * (C) Copyright 2009 Intel Corporation
622a9d645SArjan van de Ven  * Author: Arjan van de Ven <arjan@linux.intel.com>
722a9d645SArjan van de Ven  */
822a9d645SArjan van de Ven 
922a9d645SArjan van de Ven 
1022a9d645SArjan van de Ven /*
1122a9d645SArjan van de Ven 
1222a9d645SArjan van de Ven Goals and Theory of Operation
1322a9d645SArjan van de Ven 
1422a9d645SArjan van de Ven The primary goal of this feature is to reduce the kernel boot time,
1522a9d645SArjan van de Ven by doing various independent hardware delays and discovery operations
1622a9d645SArjan van de Ven decoupled and not strictly serialized.
1722a9d645SArjan van de Ven 
1822a9d645SArjan van de Ven More specifically, the asynchronous function call concept allows
1922a9d645SArjan van de Ven certain operations (primarily during system boot) to happen
2022a9d645SArjan van de Ven asynchronously, out of order, while these operations still
2122a9d645SArjan van de Ven have their externally visible parts happen sequentially and in-order.
2222a9d645SArjan van de Ven (not unlike how out-of-order CPUs retire their instructions in order)
2322a9d645SArjan van de Ven 
2422a9d645SArjan van de Ven Key to the asynchronous function call implementation is the concept of
2522a9d645SArjan van de Ven a "sequence cookie" (which, although it has an abstracted type, can be
2622a9d645SArjan van de Ven thought of as a monotonically incrementing number).
2722a9d645SArjan van de Ven 
2822a9d645SArjan van de Ven The async core will assign each scheduled event such a sequence cookie and
2922a9d645SArjan van de Ven pass this to the called functions.
3022a9d645SArjan van de Ven 
3122a9d645SArjan van de Ven The asynchronously called function should before doing a globally visible
3222a9d645SArjan van de Ven operation, such as registering device numbers, call the
3322a9d645SArjan van de Ven async_synchronize_cookie() function and pass in its own cookie. The
3422a9d645SArjan van de Ven async_synchronize_cookie() function will make sure that all asynchronous
3522a9d645SArjan van de Ven operations that were scheduled prior to the operation corresponding with the
3622a9d645SArjan van de Ven cookie have completed.
3722a9d645SArjan van de Ven 
3822a9d645SArjan van de Ven Subsystem/driver initialization code that scheduled asynchronous probe
3922a9d645SArjan van de Ven functions, but which shares global resources with other drivers/subsystems
4022a9d645SArjan van de Ven that do not use the asynchronous call feature, need to do a full
4122a9d645SArjan van de Ven synchronization with the async_synchronize_full() function, before returning
4222a9d645SArjan van de Ven from their init function. This is to maintain strict ordering between the
4322a9d645SArjan van de Ven asynchronous and synchronous parts of the kernel.
4422a9d645SArjan van de Ven 
4522a9d645SArjan van de Ven */
4622a9d645SArjan van de Ven 
4722a9d645SArjan van de Ven #include <linux/async.h>
4884c15027SPaul McQuade #include <linux/atomic.h>
499984de1aSPaul Gortmaker #include <linux/export.h>
50f551103cSKent Overstreet #include <linux/ktime.h>
51f551103cSKent Overstreet #include <linux/pid.h>
5222a9d645SArjan van de Ven #include <linux/sched.h>
535a0e3ad6STejun Heo #include <linux/slab.h>
54f551103cSKent Overstreet #include <linux/wait.h>
55083b804cSTejun Heo #include <linux/workqueue.h>
5622a9d645SArjan van de Ven 
5784b233adSTejun Heo #include "workqueue_internal.h"
5884b233adSTejun Heo 
5922a9d645SArjan van de Ven static async_cookie_t next_cookie = 1;
6022a9d645SArjan van de Ven 
6122a9d645SArjan van de Ven #define MAX_WORK		32768
62c68eee14STejun Heo #define ASYNC_COOKIE_MAX	ULLONG_MAX	/* infinity cookie */
6322a9d645SArjan van de Ven 
649fdb04cdSTejun Heo static LIST_HEAD(async_global_pending);	/* pending from all registered doms */
658723d503STejun Heo static ASYNC_DOMAIN(async_dfl_domain);
6622a9d645SArjan van de Ven static DEFINE_SPINLOCK(async_lock);
67*bf52b1acSTejun Heo static struct workqueue_struct *async_wq;
6822a9d645SArjan van de Ven 
6922a9d645SArjan van de Ven struct async_entry {
709fdb04cdSTejun Heo 	struct list_head	domain_list;
719fdb04cdSTejun Heo 	struct list_head	global_list;
72083b804cSTejun Heo 	struct work_struct	work;
7322a9d645SArjan van de Ven 	async_cookie_t		cookie;
74362f2b09SLai Jiangshan 	async_func_t		func;
7522a9d645SArjan van de Ven 	void			*data;
768723d503STejun Heo 	struct async_domain	*domain;
7722a9d645SArjan van de Ven };
7822a9d645SArjan van de Ven 
7922a9d645SArjan van de Ven static DECLARE_WAIT_QUEUE_HEAD(async_done);
8022a9d645SArjan van de Ven 
8122a9d645SArjan van de Ven static atomic_t entry_count;
8222a9d645SArjan van de Ven 
8307416af1SRasmus Villemoes static long long microseconds_since(ktime_t start)
8407416af1SRasmus Villemoes {
8507416af1SRasmus Villemoes 	ktime_t now = ktime_get();
8607416af1SRasmus Villemoes 	return ktime_to_ns(ktime_sub(now, start)) >> 10;
8707416af1SRasmus Villemoes }
8807416af1SRasmus Villemoes 
898723d503STejun Heo static async_cookie_t lowest_in_progress(struct async_domain *domain)
9037a76bd4SArjan van de Ven {
914f7e988eSRasmus Villemoes 	struct async_entry *first = NULL;
9252722794STejun Heo 	async_cookie_t ret = ASYNC_COOKIE_MAX;
9337a76bd4SArjan van de Ven 	unsigned long flags;
9437a76bd4SArjan van de Ven 
9537a76bd4SArjan van de Ven 	spin_lock_irqsave(&async_lock, flags);
969fdb04cdSTejun Heo 
974f7e988eSRasmus Villemoes 	if (domain) {
984f7e988eSRasmus Villemoes 		if (!list_empty(&domain->pending))
994f7e988eSRasmus Villemoes 			first = list_first_entry(&domain->pending,
1004f7e988eSRasmus Villemoes 					struct async_entry, domain_list);
1014f7e988eSRasmus Villemoes 	} else {
1024f7e988eSRasmus Villemoes 		if (!list_empty(&async_global_pending))
1034f7e988eSRasmus Villemoes 			first = list_first_entry(&async_global_pending,
1044f7e988eSRasmus Villemoes 					struct async_entry, global_list);
1054f7e988eSRasmus Villemoes 	}
1069fdb04cdSTejun Heo 
1074f7e988eSRasmus Villemoes 	if (first)
1084f7e988eSRasmus Villemoes 		ret = first->cookie;
1099fdb04cdSTejun Heo 
11037a76bd4SArjan van de Ven 	spin_unlock_irqrestore(&async_lock, flags);
11137a76bd4SArjan van de Ven 	return ret;
11237a76bd4SArjan van de Ven }
113083b804cSTejun Heo 
11422a9d645SArjan van de Ven /*
11522a9d645SArjan van de Ven  * pick the first pending entry and run it
11622a9d645SArjan van de Ven  */
117083b804cSTejun Heo static void async_run_entry_fn(struct work_struct *work)
11822a9d645SArjan van de Ven {
119083b804cSTejun Heo 	struct async_entry *entry =
120083b804cSTejun Heo 		container_of(work, struct async_entry, work);
12122a9d645SArjan van de Ven 	unsigned long flags;
12207416af1SRasmus Villemoes 	ktime_t calltime;
12322a9d645SArjan van de Ven 
12452722794STejun Heo 	/* 1) run (and print duration) */
12507416af1SRasmus Villemoes 	pr_debug("calling  %lli_%pS @ %i\n", (long long)entry->cookie,
12658763a29SAndrew Morton 		 entry->func, task_pid_nr(current));
12722a9d645SArjan van de Ven 	calltime = ktime_get();
12807416af1SRasmus Villemoes 
12922a9d645SArjan van de Ven 	entry->func(entry->data, entry->cookie);
13007416af1SRasmus Villemoes 
1318ba9d40bSRasmus Villemoes 	pr_debug("initcall %lli_%pS returned after %lld usecs\n",
13207416af1SRasmus Villemoes 		 (long long)entry->cookie, entry->func,
13307416af1SRasmus Villemoes 		 microseconds_since(calltime));
13422a9d645SArjan van de Ven 
13552722794STejun Heo 	/* 2) remove self from the pending queues */
13622a9d645SArjan van de Ven 	spin_lock_irqsave(&async_lock, flags);
1379fdb04cdSTejun Heo 	list_del_init(&entry->domain_list);
1389fdb04cdSTejun Heo 	list_del_init(&entry->global_list);
13922a9d645SArjan van de Ven 
14052722794STejun Heo 	/* 3) free the entry */
14122a9d645SArjan van de Ven 	kfree(entry);
14222a9d645SArjan van de Ven 	atomic_dec(&entry_count);
14322a9d645SArjan van de Ven 
14422a9d645SArjan van de Ven 	spin_unlock_irqrestore(&async_lock, flags);
14522a9d645SArjan van de Ven 
14652722794STejun Heo 	/* 4) wake up any waiters */
14722a9d645SArjan van de Ven 	wake_up(&async_done);
14822a9d645SArjan van de Ven }
14922a9d645SArjan van de Ven 
1506aa09a5bSRafael J. Wysocki static async_cookie_t __async_schedule_node_domain(async_func_t func,
1516aa09a5bSRafael J. Wysocki 						   void *data, int node,
1526aa09a5bSRafael J. Wysocki 						   struct async_domain *domain,
1536aa09a5bSRafael J. Wysocki 						   struct async_entry *entry)
1546aa09a5bSRafael J. Wysocki {
1556aa09a5bSRafael J. Wysocki 	async_cookie_t newcookie;
1566aa09a5bSRafael J. Wysocki 	unsigned long flags;
1576aa09a5bSRafael J. Wysocki 
1586aa09a5bSRafael J. Wysocki 	INIT_LIST_HEAD(&entry->domain_list);
1596aa09a5bSRafael J. Wysocki 	INIT_LIST_HEAD(&entry->global_list);
1606aa09a5bSRafael J. Wysocki 	INIT_WORK(&entry->work, async_run_entry_fn);
1616aa09a5bSRafael J. Wysocki 	entry->func = func;
1626aa09a5bSRafael J. Wysocki 	entry->data = data;
1636aa09a5bSRafael J. Wysocki 	entry->domain = domain;
1646aa09a5bSRafael J. Wysocki 
1656aa09a5bSRafael J. Wysocki 	spin_lock_irqsave(&async_lock, flags);
1666aa09a5bSRafael J. Wysocki 
1676aa09a5bSRafael J. Wysocki 	/* allocate cookie and queue */
1686aa09a5bSRafael J. Wysocki 	newcookie = entry->cookie = next_cookie++;
1696aa09a5bSRafael J. Wysocki 
1706aa09a5bSRafael J. Wysocki 	list_add_tail(&entry->domain_list, &domain->pending);
1716aa09a5bSRafael J. Wysocki 	if (domain->registered)
1726aa09a5bSRafael J. Wysocki 		list_add_tail(&entry->global_list, &async_global_pending);
1736aa09a5bSRafael J. Wysocki 
1746aa09a5bSRafael J. Wysocki 	atomic_inc(&entry_count);
1756aa09a5bSRafael J. Wysocki 	spin_unlock_irqrestore(&async_lock, flags);
1766aa09a5bSRafael J. Wysocki 
1776aa09a5bSRafael J. Wysocki 	/* schedule for execution */
178*bf52b1acSTejun Heo 	queue_work_node(node, async_wq, &entry->work);
1796aa09a5bSRafael J. Wysocki 
1806aa09a5bSRafael J. Wysocki 	return newcookie;
1816aa09a5bSRafael J. Wysocki }
1826aa09a5bSRafael J. Wysocki 
1836be9238eSAlexander Duyck /**
1846be9238eSAlexander Duyck  * async_schedule_node_domain - NUMA specific version of async_schedule_domain
1856be9238eSAlexander Duyck  * @func: function to execute asynchronously
1866be9238eSAlexander Duyck  * @data: data pointer to pass to the function
1876be9238eSAlexander Duyck  * @node: NUMA node that we want to schedule this on or close to
1886be9238eSAlexander Duyck  * @domain: the domain
1896be9238eSAlexander Duyck  *
1906be9238eSAlexander Duyck  * Returns an async_cookie_t that may be used for checkpointing later.
1916be9238eSAlexander Duyck  * @domain may be used in the async_synchronize_*_domain() functions to
1926be9238eSAlexander Duyck  * wait within a certain synchronization domain rather than globally.
1936be9238eSAlexander Duyck  *
1946be9238eSAlexander Duyck  * Note: This function may be called from atomic or non-atomic contexts.
1956be9238eSAlexander Duyck  *
1966be9238eSAlexander Duyck  * The node requested will be honored on a best effort basis. If the node
1976be9238eSAlexander Duyck  * has no CPUs associated with it then the work is distributed among all
1986be9238eSAlexander Duyck  * available CPUs.
1996be9238eSAlexander Duyck  */
2006be9238eSAlexander Duyck async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
2016be9238eSAlexander Duyck 					  int node, struct async_domain *domain)
20222a9d645SArjan van de Ven {
20322a9d645SArjan van de Ven 	struct async_entry *entry;
20422a9d645SArjan van de Ven 	unsigned long flags;
20522a9d645SArjan van de Ven 	async_cookie_t newcookie;
20622a9d645SArjan van de Ven 
20722a9d645SArjan van de Ven 	/* allow irq-off callers */
20822a9d645SArjan van de Ven 	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
20922a9d645SArjan van de Ven 
21022a9d645SArjan van de Ven 	/*
21122a9d645SArjan van de Ven 	 * If we're out of memory or if there's too much work
21222a9d645SArjan van de Ven 	 * pending already, we execute synchronously.
21322a9d645SArjan van de Ven 	 */
214083b804cSTejun Heo 	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
21522a9d645SArjan van de Ven 		kfree(entry);
21622a9d645SArjan van de Ven 		spin_lock_irqsave(&async_lock, flags);
21722a9d645SArjan van de Ven 		newcookie = next_cookie++;
21822a9d645SArjan van de Ven 		spin_unlock_irqrestore(&async_lock, flags);
21922a9d645SArjan van de Ven 
22022a9d645SArjan van de Ven 		/* low on memory.. run synchronously */
221362f2b09SLai Jiangshan 		func(data, newcookie);
22222a9d645SArjan van de Ven 		return newcookie;
22322a9d645SArjan van de Ven 	}
22422a9d645SArjan van de Ven 
2256aa09a5bSRafael J. Wysocki 	return __async_schedule_node_domain(func, data, node, domain, entry);
22622a9d645SArjan van de Ven }
2276be9238eSAlexander Duyck EXPORT_SYMBOL_GPL(async_schedule_node_domain);
22822a9d645SArjan van de Ven 
229f30d5b30SCornelia Huck /**
2306be9238eSAlexander Duyck  * async_schedule_node - NUMA specific version of async_schedule
231362f2b09SLai Jiangshan  * @func: function to execute asynchronously
232f30d5b30SCornelia Huck  * @data: data pointer to pass to the function
2336be9238eSAlexander Duyck  * @node: NUMA node that we want to schedule this on or close to
234f30d5b30SCornelia Huck  *
235f30d5b30SCornelia Huck  * Returns an async_cookie_t that may be used for checkpointing later.
236f30d5b30SCornelia Huck  * Note: This function may be called from atomic or non-atomic contexts.
237f30d5b30SCornelia Huck  *
2386be9238eSAlexander Duyck  * The node requested will be honored on a best effort basis. If the node
2396be9238eSAlexander Duyck  * has no CPUs associated with it then the work is distributed among all
2406be9238eSAlexander Duyck  * available CPUs.
241f30d5b30SCornelia Huck  */
2426be9238eSAlexander Duyck async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
24322a9d645SArjan van de Ven {
2446be9238eSAlexander Duyck 	return async_schedule_node_domain(func, data, node, &async_dfl_domain);
24522a9d645SArjan van de Ven }
2466be9238eSAlexander Duyck EXPORT_SYMBOL_GPL(async_schedule_node);
24722a9d645SArjan van de Ven 
248f30d5b30SCornelia Huck /**
2497d4b5d7aSRafael J. Wysocki  * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
2507d4b5d7aSRafael J. Wysocki  * @func: function to execute asynchronously
2517d4b5d7aSRafael J. Wysocki  * @dev: device argument to be passed to function
2527d4b5d7aSRafael J. Wysocki  *
2537d4b5d7aSRafael J. Wysocki  * @dev is used as both the argument for the function and to provide NUMA
2547d4b5d7aSRafael J. Wysocki  * context for where to run the function.
2557d4b5d7aSRafael J. Wysocki  *
2567d4b5d7aSRafael J. Wysocki  * If the asynchronous execution of @func is scheduled successfully, return
2577d4b5d7aSRafael J. Wysocki  * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
2587d4b5d7aSRafael J. Wysocki  * that will run the function synchronously then.
2597d4b5d7aSRafael J. Wysocki  */
2607d4b5d7aSRafael J. Wysocki bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
2617d4b5d7aSRafael J. Wysocki {
2627d4b5d7aSRafael J. Wysocki 	struct async_entry *entry;
2637d4b5d7aSRafael J. Wysocki 
2647d4b5d7aSRafael J. Wysocki 	entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
2657d4b5d7aSRafael J. Wysocki 
2667d4b5d7aSRafael J. Wysocki 	/* Give up if there is no memory or too much work. */
2677d4b5d7aSRafael J. Wysocki 	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
2687d4b5d7aSRafael J. Wysocki 		kfree(entry);
2697d4b5d7aSRafael J. Wysocki 		return false;
2707d4b5d7aSRafael J. Wysocki 	}
2717d4b5d7aSRafael J. Wysocki 
2727d4b5d7aSRafael J. Wysocki 	__async_schedule_node_domain(func, dev, dev_to_node(dev),
2737d4b5d7aSRafael J. Wysocki 				     &async_dfl_domain, entry);
2747d4b5d7aSRafael J. Wysocki 	return true;
2757d4b5d7aSRafael J. Wysocki }
2767d4b5d7aSRafael J. Wysocki 
2777d4b5d7aSRafael J. Wysocki /**
278f30d5b30SCornelia Huck  * async_synchronize_full - synchronize all asynchronous function calls
279f30d5b30SCornelia Huck  *
280f30d5b30SCornelia Huck  * This function waits until all asynchronous function calls have been done.
281f30d5b30SCornelia Huck  */
28222a9d645SArjan van de Ven void async_synchronize_full(void)
28322a9d645SArjan van de Ven {
2849fdb04cdSTejun Heo 	async_synchronize_full_domain(NULL);
28522a9d645SArjan van de Ven }
28622a9d645SArjan van de Ven EXPORT_SYMBOL_GPL(async_synchronize_full);
28722a9d645SArjan van de Ven 
288f30d5b30SCornelia Huck /**
289766ccb9eSCornelia Huck  * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
2908723d503STejun Heo  * @domain: the domain to synchronize
291f30d5b30SCornelia Huck  *
292766ccb9eSCornelia Huck  * This function waits until all asynchronous function calls for the
2938723d503STejun Heo  * synchronization domain specified by @domain have been done.
294f30d5b30SCornelia Huck  */
2952955b47dSDan Williams void async_synchronize_full_domain(struct async_domain *domain)
29622a9d645SArjan van de Ven {
297c68eee14STejun Heo 	async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
29822a9d645SArjan van de Ven }
299766ccb9eSCornelia Huck EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
30022a9d645SArjan van de Ven 
301f30d5b30SCornelia Huck /**
302766ccb9eSCornelia Huck  * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
303f30d5b30SCornelia Huck  * @cookie: async_cookie_t to use as checkpoint
3049fdb04cdSTejun Heo  * @domain: the domain to synchronize (%NULL for all registered domains)
305f30d5b30SCornelia Huck  *
306766ccb9eSCornelia Huck  * This function waits until all asynchronous function calls for the
3078723d503STejun Heo  * synchronization domain specified by @domain submitted prior to @cookie
3088723d503STejun Heo  * have been done.
309f30d5b30SCornelia Huck  */
3108723d503STejun Heo void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
31122a9d645SArjan van de Ven {
31207416af1SRasmus Villemoes 	ktime_t starttime;
31322a9d645SArjan van de Ven 
31427fb10edSIonut Alexa 	pr_debug("async_waiting @ %i\n", task_pid_nr(current));
31522a9d645SArjan van de Ven 	starttime = ktime_get();
31622a9d645SArjan van de Ven 
3178723d503STejun Heo 	wait_event(async_done, lowest_in_progress(domain) >= cookie);
31822a9d645SArjan van de Ven 
31907416af1SRasmus Villemoes 	pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
32007416af1SRasmus Villemoes 		 microseconds_since(starttime));
32122a9d645SArjan van de Ven }
322766ccb9eSCornelia Huck EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
32322a9d645SArjan van de Ven 
324f30d5b30SCornelia Huck /**
325f30d5b30SCornelia Huck  * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
326f30d5b30SCornelia Huck  * @cookie: async_cookie_t to use as checkpoint
327f30d5b30SCornelia Huck  *
328f30d5b30SCornelia Huck  * This function waits until all asynchronous function calls prior to @cookie
329f30d5b30SCornelia Huck  * have been done.
330f30d5b30SCornelia Huck  */
33122a9d645SArjan van de Ven void async_synchronize_cookie(async_cookie_t cookie)
33222a9d645SArjan van de Ven {
3338723d503STejun Heo 	async_synchronize_cookie_domain(cookie, &async_dfl_domain);
33422a9d645SArjan van de Ven }
33522a9d645SArjan van de Ven EXPORT_SYMBOL_GPL(async_synchronize_cookie);
33684b233adSTejun Heo 
33784b233adSTejun Heo /**
33884b233adSTejun Heo  * current_is_async - is %current an async worker task?
33984b233adSTejun Heo  *
34084b233adSTejun Heo  * Returns %true if %current is an async worker task.
34184b233adSTejun Heo  */
34284b233adSTejun Heo bool current_is_async(void)
34384b233adSTejun Heo {
34484b233adSTejun Heo 	struct worker *worker = current_wq_worker();
34584b233adSTejun Heo 
34684b233adSTejun Heo 	return worker && worker->current_func == async_run_entry_fn;
34784b233adSTejun Heo }
348581da2caSLukas Wunner EXPORT_SYMBOL_GPL(current_is_async);
349*bf52b1acSTejun Heo 
350*bf52b1acSTejun Heo void __init async_init(void)
351*bf52b1acSTejun Heo {
352*bf52b1acSTejun Heo 	/*
353*bf52b1acSTejun Heo 	 * Async can schedule a number of interdependent work items. However,
354*bf52b1acSTejun Heo 	 * unbound workqueues can handle only upto min_active interdependent
355*bf52b1acSTejun Heo 	 * work items. The default min_active of 8 isn't sufficient for async
356*bf52b1acSTejun Heo 	 * and can lead to stalls. Let's use a dedicated workqueue with raised
357*bf52b1acSTejun Heo 	 * min_active.
358*bf52b1acSTejun Heo 	 */
359*bf52b1acSTejun Heo 	async_wq = alloc_workqueue("async", WQ_UNBOUND, 0);
360*bf52b1acSTejun Heo 	BUG_ON(!async_wq);
361*bf52b1acSTejun Heo 	workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE);
362*bf52b1acSTejun Heo }
363