1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 222a9d645SArjan van de Ven /* 322a9d645SArjan van de Ven * async.c: Asynchronous function calls for boot performance 422a9d645SArjan van de Ven * 522a9d645SArjan van de Ven * (C) Copyright 2009 Intel Corporation 622a9d645SArjan van de Ven * Author: Arjan van de Ven <arjan@linux.intel.com> 722a9d645SArjan van de Ven */ 822a9d645SArjan van de Ven 922a9d645SArjan van de Ven 1022a9d645SArjan van de Ven /* 1122a9d645SArjan van de Ven 1222a9d645SArjan van de Ven Goals and Theory of Operation 1322a9d645SArjan van de Ven 1422a9d645SArjan van de Ven The primary goal of this feature is to reduce the kernel boot time, 1522a9d645SArjan van de Ven by doing various independent hardware delays and discovery operations 1622a9d645SArjan van de Ven decoupled and not strictly serialized. 1722a9d645SArjan van de Ven 1822a9d645SArjan van de Ven More specifically, the asynchronous function call concept allows 1922a9d645SArjan van de Ven certain operations (primarily during system boot) to happen 2022a9d645SArjan van de Ven asynchronously, out of order, while these operations still 2122a9d645SArjan van de Ven have their externally visible parts happen sequentially and in-order. 2222a9d645SArjan van de Ven (not unlike how out-of-order CPUs retire their instructions in order) 2322a9d645SArjan van de Ven 2422a9d645SArjan van de Ven Key to the asynchronous function call implementation is the concept of 2522a9d645SArjan van de Ven a "sequence cookie" (which, although it has an abstracted type, can be 2622a9d645SArjan van de Ven thought of as a monotonically incrementing number). 2722a9d645SArjan van de Ven 2822a9d645SArjan van de Ven The async core will assign each scheduled event such a sequence cookie and 2922a9d645SArjan van de Ven pass this to the called functions. 3022a9d645SArjan van de Ven 3122a9d645SArjan van de Ven The asynchronously called function should before doing a globally visible 3222a9d645SArjan van de Ven operation, such as registering device numbers, call the 3322a9d645SArjan van de Ven async_synchronize_cookie() function and pass in its own cookie. The 3422a9d645SArjan van de Ven async_synchronize_cookie() function will make sure that all asynchronous 3522a9d645SArjan van de Ven operations that were scheduled prior to the operation corresponding with the 3622a9d645SArjan van de Ven cookie have completed. 3722a9d645SArjan van de Ven 3822a9d645SArjan van de Ven Subsystem/driver initialization code that scheduled asynchronous probe 3922a9d645SArjan van de Ven functions, but which shares global resources with other drivers/subsystems 4022a9d645SArjan van de Ven that do not use the asynchronous call feature, need to do a full 4122a9d645SArjan van de Ven synchronization with the async_synchronize_full() function, before returning 4222a9d645SArjan van de Ven from their init function. This is to maintain strict ordering between the 4322a9d645SArjan van de Ven asynchronous and synchronous parts of the kernel. 4422a9d645SArjan van de Ven 4522a9d645SArjan van de Ven */ 4622a9d645SArjan van de Ven 4722a9d645SArjan van de Ven #include <linux/async.h> 4884c15027SPaul McQuade #include <linux/atomic.h> 4984c15027SPaul McQuade #include <linux/ktime.h> 509984de1aSPaul Gortmaker #include <linux/export.h> 5122a9d645SArjan van de Ven #include <linux/wait.h> 5222a9d645SArjan van de Ven #include <linux/sched.h> 535a0e3ad6STejun Heo #include <linux/slab.h> 54083b804cSTejun Heo #include <linux/workqueue.h> 5522a9d645SArjan van de Ven 5684b233adSTejun Heo #include "workqueue_internal.h" 5784b233adSTejun Heo 5822a9d645SArjan van de Ven static async_cookie_t next_cookie = 1; 5922a9d645SArjan van de Ven 6022a9d645SArjan van de Ven #define MAX_WORK 32768 61c68eee14STejun Heo #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */ 6222a9d645SArjan van de Ven 639fdb04cdSTejun Heo static LIST_HEAD(async_global_pending); /* pending from all registered doms */ 648723d503STejun Heo static ASYNC_DOMAIN(async_dfl_domain); 6522a9d645SArjan van de Ven static DEFINE_SPINLOCK(async_lock); 6622a9d645SArjan van de Ven 6722a9d645SArjan van de Ven struct async_entry { 689fdb04cdSTejun Heo struct list_head domain_list; 699fdb04cdSTejun Heo struct list_head global_list; 70083b804cSTejun Heo struct work_struct work; 7122a9d645SArjan van de Ven async_cookie_t cookie; 72362f2b09SLai Jiangshan async_func_t func; 7322a9d645SArjan van de Ven void *data; 748723d503STejun Heo struct async_domain *domain; 7522a9d645SArjan van de Ven }; 7622a9d645SArjan van de Ven 7722a9d645SArjan van de Ven static DECLARE_WAIT_QUEUE_HEAD(async_done); 7822a9d645SArjan van de Ven 7922a9d645SArjan van de Ven static atomic_t entry_count; 8022a9d645SArjan van de Ven 8107416af1SRasmus Villemoes static long long microseconds_since(ktime_t start) 8207416af1SRasmus Villemoes { 8307416af1SRasmus Villemoes ktime_t now = ktime_get(); 8407416af1SRasmus Villemoes return ktime_to_ns(ktime_sub(now, start)) >> 10; 8507416af1SRasmus Villemoes } 8607416af1SRasmus Villemoes 878723d503STejun Heo static async_cookie_t lowest_in_progress(struct async_domain *domain) 8837a76bd4SArjan van de Ven { 894f7e988eSRasmus Villemoes struct async_entry *first = NULL; 9052722794STejun Heo async_cookie_t ret = ASYNC_COOKIE_MAX; 9137a76bd4SArjan van de Ven unsigned long flags; 9237a76bd4SArjan van de Ven 9337a76bd4SArjan van de Ven spin_lock_irqsave(&async_lock, flags); 949fdb04cdSTejun Heo 954f7e988eSRasmus Villemoes if (domain) { 964f7e988eSRasmus Villemoes if (!list_empty(&domain->pending)) 974f7e988eSRasmus Villemoes first = list_first_entry(&domain->pending, 984f7e988eSRasmus Villemoes struct async_entry, domain_list); 994f7e988eSRasmus Villemoes } else { 1004f7e988eSRasmus Villemoes if (!list_empty(&async_global_pending)) 1014f7e988eSRasmus Villemoes first = list_first_entry(&async_global_pending, 1024f7e988eSRasmus Villemoes struct async_entry, global_list); 1034f7e988eSRasmus Villemoes } 1049fdb04cdSTejun Heo 1054f7e988eSRasmus Villemoes if (first) 1064f7e988eSRasmus Villemoes ret = first->cookie; 1079fdb04cdSTejun Heo 10837a76bd4SArjan van de Ven spin_unlock_irqrestore(&async_lock, flags); 10937a76bd4SArjan van de Ven return ret; 11037a76bd4SArjan van de Ven } 111083b804cSTejun Heo 11222a9d645SArjan van de Ven /* 11322a9d645SArjan van de Ven * pick the first pending entry and run it 11422a9d645SArjan van de Ven */ 115083b804cSTejun Heo static void async_run_entry_fn(struct work_struct *work) 11622a9d645SArjan van de Ven { 117083b804cSTejun Heo struct async_entry *entry = 118083b804cSTejun Heo container_of(work, struct async_entry, work); 11922a9d645SArjan van de Ven unsigned long flags; 12007416af1SRasmus Villemoes ktime_t calltime; 12122a9d645SArjan van de Ven 12252722794STejun Heo /* 1) run (and print duration) */ 12307416af1SRasmus Villemoes pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie, 12458763a29SAndrew Morton entry->func, task_pid_nr(current)); 12522a9d645SArjan van de Ven calltime = ktime_get(); 12607416af1SRasmus Villemoes 12722a9d645SArjan van de Ven entry->func(entry->data, entry->cookie); 12807416af1SRasmus Villemoes 1298ba9d40bSRasmus Villemoes pr_debug("initcall %lli_%pS returned after %lld usecs\n", 13007416af1SRasmus Villemoes (long long)entry->cookie, entry->func, 13107416af1SRasmus Villemoes microseconds_since(calltime)); 13222a9d645SArjan van de Ven 13352722794STejun Heo /* 2) remove self from the pending queues */ 13422a9d645SArjan van de Ven spin_lock_irqsave(&async_lock, flags); 1359fdb04cdSTejun Heo list_del_init(&entry->domain_list); 1369fdb04cdSTejun Heo list_del_init(&entry->global_list); 13722a9d645SArjan van de Ven 13852722794STejun Heo /* 3) free the entry */ 13922a9d645SArjan van de Ven kfree(entry); 14022a9d645SArjan van de Ven atomic_dec(&entry_count); 14122a9d645SArjan van de Ven 14222a9d645SArjan van de Ven spin_unlock_irqrestore(&async_lock, flags); 14322a9d645SArjan van de Ven 14452722794STejun Heo /* 4) wake up any waiters */ 14522a9d645SArjan van de Ven wake_up(&async_done); 14622a9d645SArjan van de Ven } 14722a9d645SArjan van de Ven 148*6aa09a5bSRafael J. Wysocki static async_cookie_t __async_schedule_node_domain(async_func_t func, 149*6aa09a5bSRafael J. Wysocki void *data, int node, 150*6aa09a5bSRafael J. Wysocki struct async_domain *domain, 151*6aa09a5bSRafael J. Wysocki struct async_entry *entry) 152*6aa09a5bSRafael J. Wysocki { 153*6aa09a5bSRafael J. Wysocki async_cookie_t newcookie; 154*6aa09a5bSRafael J. Wysocki unsigned long flags; 155*6aa09a5bSRafael J. Wysocki 156*6aa09a5bSRafael J. Wysocki INIT_LIST_HEAD(&entry->domain_list); 157*6aa09a5bSRafael J. Wysocki INIT_LIST_HEAD(&entry->global_list); 158*6aa09a5bSRafael J. Wysocki INIT_WORK(&entry->work, async_run_entry_fn); 159*6aa09a5bSRafael J. Wysocki entry->func = func; 160*6aa09a5bSRafael J. Wysocki entry->data = data; 161*6aa09a5bSRafael J. Wysocki entry->domain = domain; 162*6aa09a5bSRafael J. Wysocki 163*6aa09a5bSRafael J. Wysocki spin_lock_irqsave(&async_lock, flags); 164*6aa09a5bSRafael J. Wysocki 165*6aa09a5bSRafael J. Wysocki /* allocate cookie and queue */ 166*6aa09a5bSRafael J. Wysocki newcookie = entry->cookie = next_cookie++; 167*6aa09a5bSRafael J. Wysocki 168*6aa09a5bSRafael J. Wysocki list_add_tail(&entry->domain_list, &domain->pending); 169*6aa09a5bSRafael J. Wysocki if (domain->registered) 170*6aa09a5bSRafael J. Wysocki list_add_tail(&entry->global_list, &async_global_pending); 171*6aa09a5bSRafael J. Wysocki 172*6aa09a5bSRafael J. Wysocki atomic_inc(&entry_count); 173*6aa09a5bSRafael J. Wysocki spin_unlock_irqrestore(&async_lock, flags); 174*6aa09a5bSRafael J. Wysocki 175*6aa09a5bSRafael J. Wysocki /* schedule for execution */ 176*6aa09a5bSRafael J. Wysocki queue_work_node(node, system_unbound_wq, &entry->work); 177*6aa09a5bSRafael J. Wysocki 178*6aa09a5bSRafael J. Wysocki return newcookie; 179*6aa09a5bSRafael J. Wysocki } 180*6aa09a5bSRafael J. Wysocki 1816be9238eSAlexander Duyck /** 1826be9238eSAlexander Duyck * async_schedule_node_domain - NUMA specific version of async_schedule_domain 1836be9238eSAlexander Duyck * @func: function to execute asynchronously 1846be9238eSAlexander Duyck * @data: data pointer to pass to the function 1856be9238eSAlexander Duyck * @node: NUMA node that we want to schedule this on or close to 1866be9238eSAlexander Duyck * @domain: the domain 1876be9238eSAlexander Duyck * 1886be9238eSAlexander Duyck * Returns an async_cookie_t that may be used for checkpointing later. 1896be9238eSAlexander Duyck * @domain may be used in the async_synchronize_*_domain() functions to 1906be9238eSAlexander Duyck * wait within a certain synchronization domain rather than globally. 1916be9238eSAlexander Duyck * 1926be9238eSAlexander Duyck * Note: This function may be called from atomic or non-atomic contexts. 1936be9238eSAlexander Duyck * 1946be9238eSAlexander Duyck * The node requested will be honored on a best effort basis. If the node 1956be9238eSAlexander Duyck * has no CPUs associated with it then the work is distributed among all 1966be9238eSAlexander Duyck * available CPUs. 1976be9238eSAlexander Duyck */ 1986be9238eSAlexander Duyck async_cookie_t async_schedule_node_domain(async_func_t func, void *data, 1996be9238eSAlexander Duyck int node, struct async_domain *domain) 20022a9d645SArjan van de Ven { 20122a9d645SArjan van de Ven struct async_entry *entry; 20222a9d645SArjan van de Ven unsigned long flags; 20322a9d645SArjan van de Ven async_cookie_t newcookie; 20422a9d645SArjan van de Ven 20522a9d645SArjan van de Ven /* allow irq-off callers */ 20622a9d645SArjan van de Ven entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); 20722a9d645SArjan van de Ven 20822a9d645SArjan van de Ven /* 20922a9d645SArjan van de Ven * If we're out of memory or if there's too much work 21022a9d645SArjan van de Ven * pending already, we execute synchronously. 21122a9d645SArjan van de Ven */ 212083b804cSTejun Heo if (!entry || atomic_read(&entry_count) > MAX_WORK) { 21322a9d645SArjan van de Ven kfree(entry); 21422a9d645SArjan van de Ven spin_lock_irqsave(&async_lock, flags); 21522a9d645SArjan van de Ven newcookie = next_cookie++; 21622a9d645SArjan van de Ven spin_unlock_irqrestore(&async_lock, flags); 21722a9d645SArjan van de Ven 21822a9d645SArjan van de Ven /* low on memory.. run synchronously */ 219362f2b09SLai Jiangshan func(data, newcookie); 22022a9d645SArjan van de Ven return newcookie; 22122a9d645SArjan van de Ven } 22222a9d645SArjan van de Ven 223*6aa09a5bSRafael J. Wysocki return __async_schedule_node_domain(func, data, node, domain, entry); 22422a9d645SArjan van de Ven } 2256be9238eSAlexander Duyck EXPORT_SYMBOL_GPL(async_schedule_node_domain); 22622a9d645SArjan van de Ven 227f30d5b30SCornelia Huck /** 2286be9238eSAlexander Duyck * async_schedule_node - NUMA specific version of async_schedule 229362f2b09SLai Jiangshan * @func: function to execute asynchronously 230f30d5b30SCornelia Huck * @data: data pointer to pass to the function 2316be9238eSAlexander Duyck * @node: NUMA node that we want to schedule this on or close to 232f30d5b30SCornelia Huck * 233f30d5b30SCornelia Huck * Returns an async_cookie_t that may be used for checkpointing later. 234f30d5b30SCornelia Huck * Note: This function may be called from atomic or non-atomic contexts. 235f30d5b30SCornelia Huck * 2366be9238eSAlexander Duyck * The node requested will be honored on a best effort basis. If the node 2376be9238eSAlexander Duyck * has no CPUs associated with it then the work is distributed among all 2386be9238eSAlexander Duyck * available CPUs. 239f30d5b30SCornelia Huck */ 2406be9238eSAlexander Duyck async_cookie_t async_schedule_node(async_func_t func, void *data, int node) 24122a9d645SArjan van de Ven { 2426be9238eSAlexander Duyck return async_schedule_node_domain(func, data, node, &async_dfl_domain); 24322a9d645SArjan van de Ven } 2446be9238eSAlexander Duyck EXPORT_SYMBOL_GPL(async_schedule_node); 24522a9d645SArjan van de Ven 246f30d5b30SCornelia Huck /** 247f30d5b30SCornelia Huck * async_synchronize_full - synchronize all asynchronous function calls 248f30d5b30SCornelia Huck * 249f30d5b30SCornelia Huck * This function waits until all asynchronous function calls have been done. 250f30d5b30SCornelia Huck */ 25122a9d645SArjan van de Ven void async_synchronize_full(void) 25222a9d645SArjan van de Ven { 2539fdb04cdSTejun Heo async_synchronize_full_domain(NULL); 25422a9d645SArjan van de Ven } 25522a9d645SArjan van de Ven EXPORT_SYMBOL_GPL(async_synchronize_full); 25622a9d645SArjan van de Ven 257f30d5b30SCornelia Huck /** 258766ccb9eSCornelia Huck * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain 2598723d503STejun Heo * @domain: the domain to synchronize 260f30d5b30SCornelia Huck * 261766ccb9eSCornelia Huck * This function waits until all asynchronous function calls for the 2628723d503STejun Heo * synchronization domain specified by @domain have been done. 263f30d5b30SCornelia Huck */ 2642955b47dSDan Williams void async_synchronize_full_domain(struct async_domain *domain) 26522a9d645SArjan van de Ven { 266c68eee14STejun Heo async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain); 26722a9d645SArjan van de Ven } 268766ccb9eSCornelia Huck EXPORT_SYMBOL_GPL(async_synchronize_full_domain); 26922a9d645SArjan van de Ven 270f30d5b30SCornelia Huck /** 271766ccb9eSCornelia Huck * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing 272f30d5b30SCornelia Huck * @cookie: async_cookie_t to use as checkpoint 2739fdb04cdSTejun Heo * @domain: the domain to synchronize (%NULL for all registered domains) 274f30d5b30SCornelia Huck * 275766ccb9eSCornelia Huck * This function waits until all asynchronous function calls for the 2768723d503STejun Heo * synchronization domain specified by @domain submitted prior to @cookie 2778723d503STejun Heo * have been done. 278f30d5b30SCornelia Huck */ 2798723d503STejun Heo void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) 28022a9d645SArjan van de Ven { 28107416af1SRasmus Villemoes ktime_t starttime; 28222a9d645SArjan van de Ven 28327fb10edSIonut Alexa pr_debug("async_waiting @ %i\n", task_pid_nr(current)); 28422a9d645SArjan van de Ven starttime = ktime_get(); 28522a9d645SArjan van de Ven 2868723d503STejun Heo wait_event(async_done, lowest_in_progress(domain) >= cookie); 28722a9d645SArjan van de Ven 28807416af1SRasmus Villemoes pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current), 28907416af1SRasmus Villemoes microseconds_since(starttime)); 29022a9d645SArjan van de Ven } 291766ccb9eSCornelia Huck EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); 29222a9d645SArjan van de Ven 293f30d5b30SCornelia Huck /** 294f30d5b30SCornelia Huck * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing 295f30d5b30SCornelia Huck * @cookie: async_cookie_t to use as checkpoint 296f30d5b30SCornelia Huck * 297f30d5b30SCornelia Huck * This function waits until all asynchronous function calls prior to @cookie 298f30d5b30SCornelia Huck * have been done. 299f30d5b30SCornelia Huck */ 30022a9d645SArjan van de Ven void async_synchronize_cookie(async_cookie_t cookie) 30122a9d645SArjan van de Ven { 3028723d503STejun Heo async_synchronize_cookie_domain(cookie, &async_dfl_domain); 30322a9d645SArjan van de Ven } 30422a9d645SArjan van de Ven EXPORT_SYMBOL_GPL(async_synchronize_cookie); 30584b233adSTejun Heo 30684b233adSTejun Heo /** 30784b233adSTejun Heo * current_is_async - is %current an async worker task? 30884b233adSTejun Heo * 30984b233adSTejun Heo * Returns %true if %current is an async worker task. 31084b233adSTejun Heo */ 31184b233adSTejun Heo bool current_is_async(void) 31284b233adSTejun Heo { 31384b233adSTejun Heo struct worker *worker = current_wq_worker(); 31484b233adSTejun Heo 31584b233adSTejun Heo return worker && worker->current_func == async_run_entry_fn; 31684b233adSTejun Heo } 317581da2caSLukas Wunner EXPORT_SYMBOL_GPL(current_is_async); 318