| /linux/fs/smb/server/ |
| H A D | ksmbd_work.c | 21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, KSMBD_DEFAULT_GFP); in ksmbd_alloc_work_struct() local 23 if (work) { in ksmbd_alloc_work_struct() 24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct() 27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct() 28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct() 29 INIT_LIST_HEAD(&work->aux_read_list); in ksmbd_alloc_work_struct() 30 work->iov_alloc_cnt = 4; in ksmbd_alloc_work_struct() 31 work->iov = kzalloc_objs(struct kvec, work->iov_alloc_cnt, in ksmbd_alloc_work_struct() [all …]
|
| H A D | smb2pdu.h | 321 bool is_smb2_neg_cmd(struct ksmbd_work *work); 322 bool is_smb2_rsp(struct ksmbd_work *work); 324 u16 get_smb2_cmd_val(struct ksmbd_work *work); 325 void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err); 326 int init_smb2_rsp_hdr(struct ksmbd_work *work); 327 int smb2_allocate_rsp_buf(struct ksmbd_work *work); 328 bool is_chained_smb2_message(struct ksmbd_work *work); 329 int init_smb2_neg_rsp(struct ksmbd_work *work); 330 void smb2_set_err_rsp(struct ksmbd_work *work); 331 int smb2_check_user_session(struct ksmbd_work *work); [all …]
|
| H A D | ksmbd_work.h | 86 struct work_struct work; member 96 * @work: smb work containing response buffer 98 static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work) in ksmbd_resp_buf_next() argument 100 return work->response_buf + work->next_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_next() 105 * @work: smb work containing response buffer 107 static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work) in ksmbd_resp_buf_curr() argument 109 return work->response_buf + work->curr_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_curr() 114 * @work: smb work containing response buffer 116 static inline void *ksmbd_req_buf_next(struct ksmbd_work *work) in ksmbd_req_buf_next() argument 118 return work->request_buf + work->next_smb2_rcv_hdr_off + 4; in ksmbd_req_buf_next() [all …]
|
| H A D | smb_common.c | 153 * @work: smb work 159 int ksmbd_verify_smb_message(struct ksmbd_work *work) in ksmbd_verify_smb_message() argument 161 struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work); in ksmbd_verify_smb_message() 165 return ksmbd_smb2_check_message(work); in ksmbd_verify_smb_message() 167 hdr = smb_get_msg(work->request_buf); in ksmbd_verify_smb_message() 170 work->conn->outstanding_credits++; in ksmbd_verify_smb_message() 329 * @work: smb work containing smb header 333 static u16 get_smb1_cmd_val(struct ksmbd_work *work) in get_smb1_cmd_val() argument 340 * @work: smb work containing smb request 344 static int init_smb1_rsp_hdr(struct ksmbd_work *work) in init_smb1_rsp_hdr() argument [all …]
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_flip_work.c | 47 static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task) in drm_flip_work_queue_task() argument 51 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 52 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 53 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 57 * drm_flip_work_queue - queue work 58 * @work: the flip-work 61 * Queues work, that will later be run (passed back to drm_flip_func_t 62 * func) on a work queue after drm_flip_work_commit() is called. 64 void drm_flip_work_queue(struct drm_flip_work *work, void *val) in drm_flip_work_queue() argument 71 drm_flip_work_queue_task(work, task); in drm_flip_work_queue() [all …]
|
| /linux/include/trace/events/ |
| H A D | workqueue.h | 14 * workqueue_queue_work - called when a work gets queued 17 * @work: pointer to struct work_struct 19 * This event occurs when a work is queued immediately or once a 20 * delayed work is actually queued on a workqueue (ie: once the delay 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work [all...] |
| /linux/virt/kvm/ |
| H A D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 63 * work item is fully processed. in async_pf_execute() 99 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) in kvm_flush_and_free_async_pf_work() argument 102 * The async #PF is "done", but KVM must wait for the work item itself, in kvm_flush_and_free_async_pf_work() 105 * after the last call to module_put(). Note, flushing the work item in kvm_flush_and_free_async_pf_work() 111 * need to be flushed (but sanity check that the work wasn't queued). in kvm_flush_and_free_async_pf_work() 113 if (work->wakeup_all) in kvm_flush_and_free_async_pf_work() 114 WARN_ON_ONCE(work in kvm_flush_and_free_async_pf_work() 124 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local 139 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local 155 struct kvm_async_pf *work; kvm_check_async_pf_completion() local 182 struct kvm_async_pf *work; kvm_setup_async_pf() local 218 struct kvm_async_pf *work; kvm_async_pf_wakeup_all() local [all...] |
| /linux/fs/btrfs/ |
| H A D | async-thread.c | 29 /* List head pointing to ordered work list */ 55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument 57 return work->wq->fs_info; in btrfs_work_owner() 163 * Hook for threshold which will be called before executing the work, 213 struct btrfs_work *work; in run_ordered_work() local 222 work = list_first_entry(list, struct btrfs_work, ordered_list); in run_ordered_work() 223 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work() 229 * updates from ordinary work function. in run_ordered_work() 235 * we leave the work item on the list as a barrier so in run_ordered_work() 236 * that later work items that are done don't have their in run_ordered_work() [all …]
|
| /linux/LICENSES/dual/ |
| H A D | copyleft-next-0.3.1 | 26 of, publicly perform and publicly display My Work. 40 Legal Notices contained in My Work (to the extent they remain 47 If You Distribute a Derived Work, You must license the entire Derived 48 Work as a whole under this License, with prominent notice of such 50 separate Distribution of portions of the Derived Work. 52 If the Derived Work includes material licensed under the GPL, You may 53 instead license the Derived Work under the GPL. 57 When Distributing a Covered Work, You may not impose further 58 restrictions on the exercise of rights in the Covered Work granted under 64 However, You may Distribute a Covered Work incorporating material [all …]
|
| H A D | Apache-2.0 | 49 "Work" shall mean the work of authorship, whether in Source or Object form, 51 is included in or attached to the work (an example is provided in the 54 "Derivative Works" shall mean any work, whether in Source or Object form, 55 that is based on (or derived from) the Work and for which the editorial 57 a whole, an original work of authorship. For the purposes of this License, 59 merely link (or bind by name) to the interfaces of, the Work and Derivative 62 "Contribution" shall mean any work of authorship, including the original 63 version of the Work and any modifications or additions to that Work or 65 inclusion in the Work by the copyright owner or by an individual or Legal 72 and improving the Work, but excluding communication that is conspicuously [all …]
|
| /linux/tools/perf/ |
| H A D | builtin-kwork.c | 315 struct kwork_work *work; in work_search() local 319 work = container_of(node, struct kwork_work, node); in work_search() 320 cmp = work_cmp(sort_list, key, work); in work_search() 326 if (work->name == NULL) in work_search() 327 work->name = key->name; in work_search() 328 return work; in work_search() 362 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local 364 if (work == NULL) { in work_new() 365 pr_err("Failed to zalloc kwork work\ in work_new() 383 struct kwork_work *work = work_search(root, key, sort_list); work_findnew() local 409 profile_name_match(struct perf_kwork * kwork,struct kwork_work * work) profile_name_match() argument 420 profile_event_match(struct perf_kwork * kwork,struct kwork_work * work,struct perf_sample * sample) profile_event_match() argument 458 struct kwork_work *work, key; work_push_atom() local 516 struct kwork_work *work, key; work_pop_atom() local 551 struct kwork_work *work; find_work_by_id() local 579 report_update_exit_event(struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample) report_update_exit_event() argument 618 struct kwork_work *work = NULL; report_exit_event() local 634 latency_update_entry_event(struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample) latency_update_entry_event() argument 673 struct kwork_work *work = NULL; latency_entry_event() local 739 timehist_print_event(struct perf_kwork * kwork,struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample,struct addr_location * al) timehist_print_event() argument 832 struct kwork_work *work = NULL; timehist_entry_event() local 853 struct kwork_work *work = NULL; timehist_exit_event() local 883 top_update_runtime(struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample) top_update_runtime() argument 914 struct kwork_work *work, *sched_work; top_exit_event() local 945 struct kwork_work *work; top_sched_switch_event() local 1007 irq_work_init(struct perf_kwork * kwork,struct kwork_class * class,struct kwork_work * work,enum kwork_trace_type src_type __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused) irq_work_init() argument 1025 irq_work_name(struct kwork_work * work,char * buf,int len) irq_work_name() argument 1135 softirq_work_init(struct perf_kwork * kwork,struct kwork_class * class,struct kwork_work * work,enum kwork_trace_type src_type __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused) softirq_work_init() argument 1156 softirq_work_name(struct kwork_work * work,char * buf,int len) softirq_work_name() argument 1235 workqueue_work_init(struct perf_kwork * kwork __maybe_unused,struct kwork_class * class,struct kwork_work * work,enum kwork_trace_type src_type __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine) workqueue_work_init() argument 1252 workqueue_work_name(struct kwork_work * work,char * buf,int len) workqueue_work_name() argument 1303 sched_work_init(struct perf_kwork * kwork __maybe_unused,struct kwork_class * class,struct kwork_work * work,enum kwork_trace_type src_type,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused) sched_work_init() argument 1321 sched_work_name(struct kwork_work * work,char * buf,int len) sched_work_name() argument 1348 report_print_work(struct perf_kwork * kwork,struct kwork_work * work) report_print_work() argument 1639 top_print_work(struct perf_kwork * kwork __maybe_unused,struct kwork_work * work) top_print_work() argument 1839 process_skipped_events(struct perf_kwork * kwork,struct kwork_work * work) process_skipped_events() argument 1855 struct kwork_work *work = NULL; perf_kwork_add_work() local 1907 struct kwork_work *work; perf_kwork__report() local 2000 struct kwork_work *work; top_calc_total_runtime() local 2019 top_calc_idle_time(struct perf_kwork * kwork,struct kwork_work * work) top_calc_idle_time() argument 2031 top_calc_irq_runtime(struct perf_kwork * kwork,enum kwork_class_type type,struct kwork_work * work) top_calc_irq_runtime() argument 2045 top_subtract_irq_runtime(struct perf_kwork * kwork,struct kwork_work * work) top_subtract_irq_runtime() argument 2072 struct kwork_work *work; top_calc_cpu_usage() local 2101 top_calc_load_runtime(struct perf_kwork * kwork,struct kwork_work * work) top_calc_load_runtime() argument 2152 struct kwork_work *work; perf_kwork__top_report() local [all...] |
| /linux/include/linux/ |
| H A D | completion.h | 35 #define COMPLETION_INITIALIZER(work) \ argument 36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument 39 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument 42 (*({ init_completion(&work); &work; })) 46 * @work: identifier for the completion structure 52 #define DECLARE_COMPLETION(work) \ argument 53 struct completion work = COMPLETION_INITIALIZER(work) 62 * @work: identifier for the completion structure [all …]
|
| H A D | jump_label_ratelimit.h | 12 struct delayed_work work; member 18 struct delayed_work work; member 24 struct delayed_work work; member 28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) 30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) 33 __static_key_deferred_flush((x), &(x)->work) 37 struct delayed_work *work, 39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work); 43 extern void jump_label_update_timeout(struct work_struct *work); 49 .work = __DELAYED_WORK_INITIALIZER((name).work, \ [all …]
|
| /linux/kernel/ |
| H A D | kthread.c | 979 * when they finish. There is defined a safe point for freezing when one work 988 struct kthread_work *work; in kthread_worker_fn() local 1011 work = NULL; in kthread_worker_fn() 1014 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 1016 list_del_init(&work->node); in kthread_worker_fn() 1018 worker->current_work = work; in kthread_worker_fn() 1021 if (work) { in kthread_worker_fn() 1022 kthread_work_func_t func = work->func; in kthread_worker_fn() 1024 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn() 1025 work->func(work); in kthread_worker_fn() [all …]
|
| H A D | workqueue.c | 18 * This is the generic async execution mechanism. Work items as are 21 * normal work items and the other for high priority ones) and some extra 251 PWQ_STAT_STARTED, /* work items started execution */ 252 PWQ_STAT_COMPLETED, /* work items completed execution */ 258 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 282 * When pwq->nr_active >= max_active, new work item is queued to 286 * All work items marked with WORK_STRUCT_INACTIVE do not participate in 287 * nr_active and all work items in pwq->inactive_works are marked with 288 * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are 290 * pool->worklist or worker->scheduled. Those work itme 631 struct work_struct *work = addr; work_is_static_object() local 642 struct work_struct *work = addr; work_fixup_init() local 660 struct work_struct *work = addr; work_fixup_free() local 680 debug_work_activate(struct work_struct * work) debug_work_activate() argument 685 debug_work_deactivate(struct work_struct * work) debug_work_deactivate() argument 690 __init_work(struct work_struct * work,int onstack) __init_work() argument 699 destroy_work_on_stack(struct work_struct * work) destroy_work_on_stack() argument 705 destroy_delayed_work_on_stack(struct delayed_work * work) destroy_delayed_work_on_stack() argument 713 debug_work_activate(struct work_struct * work) debug_work_activate() argument 714 debug_work_deactivate(struct work_struct * work) debug_work_deactivate() argument 804 set_work_data(struct work_struct * work,unsigned long data) set_work_data() argument 810 set_work_pwq(struct work_struct * work,struct pool_workqueue * pwq,unsigned long flags) set_work_pwq() argument 817 set_work_pool_and_keep_pending(struct work_struct * work,int pool_id,unsigned long flags) set_work_pool_and_keep_pending() argument 824 set_work_pool_and_clear_pending(struct work_struct * work,int pool_id,unsigned long flags) set_work_pool_and_clear_pending() argument 872 get_work_pwq(struct work_struct * work) get_work_pwq() argument 897 get_work_pool(struct work_struct * work) get_work_pool() argument 1128 find_worker_executing_work(struct worker_pool * pool,struct work_struct * work) find_worker_executing_work() argument 1141 mayday_cursor_func(struct work_struct * work) mayday_cursor_func() argument 1161 move_linked_works(struct work_struct * work,struct list_head * head,struct work_struct ** nextp) move_linked_works() argument 1201 assign_work(struct work_struct * work,struct worker * worker,struct work_struct ** nextp) assign_work() argument 1303 struct work_struct *work = list_first_entry(&pool->worklist, kick_pool() local 1708 __pwq_activate_work(struct pool_workqueue * pwq,struct work_struct * work) __pwq_activate_work() argument 1821 struct work_struct *work = pwq_activate_first_inactive() local 1898 struct work_struct *work; node_activate_pending_pwq() local 2090 try_to_grab_pending(struct work_struct * work,u32 cflags,unsigned long * irq_flags) try_to_grab_pending() argument 2194 work_grab_pending(struct work_struct * work,u32 cflags,unsigned long * irq_flags) work_grab_pending() argument 2220 insert_work(struct pool_workqueue * pwq,struct work_struct * work,struct list_head * head,unsigned int extra_flags) insert_work() argument 2276 __queue_work(int cpu,struct workqueue_struct * wq,struct work_struct * work) __queue_work() argument 2393 clear_pending_if_disabled(struct work_struct * work) clear_pending_if_disabled() argument 2423 queue_work_on(int cpu,struct workqueue_struct * wq,struct work_struct * work) queue_work_on() argument 2491 queue_work_node(int node,struct workqueue_struct * wq,struct work_struct * work) queue_work_node() argument 2535 struct work_struct *work = &dwork->work; __queue_delayed_work() local 2591 struct work_struct *work = &dwork->work; queue_delayed_work_on() local 2665 struct work_struct *work = &rwork->work; queue_rcu_work() local 2988 idle_cull_fn(struct work_struct * work) idle_cull_fn() argument 3050 struct work_struct *work; pool_mayday_timeout() local 3200 process_one_work(struct worker * worker,struct work_struct * work) process_one_work() argument 3376 struct work_struct *work; process_scheduled_works() local 3461 struct work_struct *work = worker_thread() local 3489 struct work_struct *work, *n; assign_rescuer_work() local 3683 struct work_struct *work = bh_worker() local 3720 struct work_struct work; global() member 3725 drain_dead_softirq_workfn(struct work_struct * work) drain_dead_softirq_workfn() argument 3835 struct work_struct work; global() member 3840 wq_barrier_func(struct work_struct * work) wq_barrier_func() argument 4025 touch_work_lockdep_map(struct work_struct * work,struct workqueue_struct * wq) touch_work_lockdep_map() argument 4252 start_flush_work(struct work_struct * work,struct wq_barrier * barr,bool from_cancel) start_flush_work() argument 4308 __flush_work(struct work_struct * work,bool from_cancel) __flush_work() argument 4372 flush_work(struct work_struct * work) flush_work() argument 4439 __cancel_work(struct work_struct * work,u32 cflags) __cancel_work() argument 4458 __cancel_work_sync(struct work_struct * work,u32 cflags) __cancel_work_sync() argument 4485 cancel_work(struct work_struct * work) cancel_work() argument 4509 cancel_work_sync(struct work_struct * work) cancel_work_sync() argument 4564 disable_work(struct work_struct * work) disable_work() argument 4583 disable_work_sync(struct work_struct * work) disable_work_sync() argument 4599 enable_work(struct work_struct * work) enable_work() argument 4677 struct work_struct *work = per_cpu_ptr(works, cpu); schedule_on_each_cpu() local 5177 pwq_release_workfn(struct kthread_work * work) pwq_release_workfn() argument 6227 work_busy(struct work_struct * work) work_busy() argument 6374 pr_cont_work(bool comma,struct work_struct * work,struct pr_cont_work_struct * pcwsp) pr_cont_work() argument 6395 struct work_struct *work; show_pwq() local 6887 struct work_struct work; global() member 6893 work_for_cpu_fn(struct work_struct * work) work_for_cpu_fn() argument [all...] |
| /linux/net/wireless/ |
| H A D | debugfs.c | 148 struct wiphy_work work; in wiphy_locked_debugfs_read() 164 struct wiphy_work *work) in wiphy_locked_debugfs_read() 166 struct debugfs_read_work *w = container_of(work, typeof(*w), work); in wiphy_locked_debugfs_read() 177 wiphy_work_cancel(w->wiphy, &w->work); in wiphy_locked_debugfs_read() 192 struct debugfs_read_work work = { 200 .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), 204 .cancel_data = &work, 210 wiphy_work_init(&work.work, wiphy_locked_debugfs_read_wor 115 struct wiphy_work work; global() member 131 wiphy_locked_debugfs_read_work(struct wiphy * wiphy,struct wiphy_work * work) wiphy_locked_debugfs_read_work() argument 159 struct debugfs_read_work work = { wiphy_locked_debugfs_read() local 195 struct wiphy_work work; global() member 211 wiphy_locked_debugfs_write_work(struct wiphy * wiphy,struct wiphy_work * work) wiphy_locked_debugfs_write_work() argument 238 struct debugfs_write_work work = { wiphy_locked_debugfs_write() local [all...] |
| /linux/LICENSES/deprecated/ |
| H A D | CC0-1.0 | 26 and subsequent owner(s) (each and all, an "owner") of an original work of 27 authorship and/or a database (each, a "Work"). 29 Certain owners wish to permanently relinquish those rights to a Work for 37 works, or to gain reputation or greater distribution for their Work in 42 associating CC0 with a Work (the "Affirmer"), to the extent that he or she 43 is an owner of Copyright and Related Rights in the Work, voluntarily 44 elects to apply CC0 to the Work and publicly distribute the Work under its 46 Work and the meaning and intended legal effect of CC0 on those rights. 48 1. Copyright and Related Rights. A Work made available under CC0 may be 54 communicate, and translate a Work; [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | task_work.c | 45 struct elem *work = value; in process_work() local 47 bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0); in process_work() 57 struct elem *work; in oncpu_hash_map() local 65 work = bpf_map_lookup_elem(&hmap, &key); in oncpu_hash_map() 66 if (!work) in oncpu_hash_map() 68 bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work); in oncpu_hash_map() 75 struct elem *work; in oncpu_array_map() 79 work = bpf_map_lookup_elem(&arrmap, &key); in oncpu_array_map() 80 if (!work) in oncpu_array_map() 76 struct elem *work; oncpu_array_map() local 91 struct elem *work; oncpu_lru_map() local [all...] |
| /linux/io_uring/ |
| H A D | io-wq.c | 68 struct delayed_work work; member 160 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument 162 return __io_get_work_hash(atomic_read(&work->flags)); in io_get_work_hash() 274 * If there's work to do, returns true with acct->lock acquired. If not, 308 * starting work or finishing work. In either case, if it does in io_acct_activate_free_worker() 309 * to go sleep, we'll kick off a new task for this work anyway. in io_acct_activate_free_worker() 326 * Most likely an attempt to queue unbounded work on an io_wq that in io_wq_create_worker() 415 * work item after we canceled in io_wq_exit_workers(). in io_queue_worker_create() 432 /* Defer if current and next work are both hashed to the same chain */ 433 static bool io_wq_hash_defer(struct io_wq_work *work, struct io_wq_acct *acct) in io_wq_hash_defer() argument [all …]
|
| /linux/drivers/infiniband/core/ |
| H A D | cm.c | 95 struct cm_work *work); 187 struct delayed_work work; member 198 struct cm_work work; member 269 static void cm_work_handler(struct work_struct *work); 710 __be32 remote_id = timewait_info->work.remote_id; in cm_insert_remote_id() 716 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 718 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 744 if (be32_lt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 746 else if (be32_gt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 753 res = cm_acquire_id(timewait_info->work.local_id, in cm_find_remote_id() [all …]
|
| /linux/LICENSES/preferred/ |
| H A D | LGPL-2.1 | 94 work, a derivative of the original library. The ordinary General Public 127 follow. Pay close attention to the difference between a "work based on the 128 library" and a "work that uses the library". The former contains code 144 The "Library", below, refers to any such software library or work which 145 has been distributed under these terms. A "work based on the Library" 146 means either the Library or any derivative work under copyright law: 147 that is to say, a work containing the Library or a portion of it, either 152 "Source code" for a work means the preferred form of the work for making 161 program is covered only if its contents constitute a work based on the 177 thus forming a work based on the Library, and copy and distribute such [all …]
|
| H A D | LGPL-2.0 | 88 a textual and legal sense, the linked executable is a combined work, a 108 follow. Pay close attention to the difference between a "work based on the 109 library" and a "work that uses the library". The former contains code 128 The "Library", below, refers to any such software library or work which 129 has been distributed under these terms. A "work based on the Library" 130 means either the Library or any derivative work under copyright law: 131 that is to say, a work containing the Library or a portion of it, either 136 "Source code" for a work means the preferred form of the work for making 145 program is covered only if its contents constitute a work based on the 161 thus forming a work based on the Library, and copy and distribute such [all …]
|
| /linux/kernel/unwind/ |
| H A D | deferred.c | 152 struct unwind_work *work; in process_unwind_deferred() local 177 list_for_each_entry_srcu(work, &callbacks, list, in process_unwind_deferred() 179 if (test_bit(work->bit, &bits)) { in process_unwind_deferred() 180 work->func(work, &trace, cookie); in process_unwind_deferred() 182 info->cache->unwind_completed |= BIT(work->bit); in process_unwind_deferred() 201 task_work_cancel(task, &info->work); in unwind_deferred_task_exit() 206 * @work: Unwind descriptor requesting the trace 209 * Schedule a user space unwind to be done in task work before exiting the 218 * It's valid to call this function multiple times for the same @work within 229 int unwind_deferred_request(struct unwind_work *work, u64 *cookie) in unwind_deferred_request() argument [all …]
|
| /linux/drivers/accessibility/speakup/ |
| H A D | selection.c | 20 struct work_struct work; member 25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument 28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection() 58 .work = __WORK_INITIALIZER(speakup_sel_work.work, 65 * cancelling selection work. getting kref first establishes the in speakup_set_selection() 87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection() 96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection() 97 /* setting to null so that if work fails to run and we cancel it, in speakup_cancel_selection() 106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument 109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
| H A D | mpesw.c | 136 static void mlx5_mpesw_work(struct work_struct *work) in mlx5_mpesw_work() argument 138 struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work); in mlx5_mpesw_work() 167 struct mlx5_mpesw_work_st *work; in mlx5_lag_mpesw_queue_work() local 173 work = kzalloc_obj(*work); in mlx5_lag_mpesw_queue_work() 174 if (!work) in mlx5_lag_mpesw_queue_work() 177 INIT_WORK(&work->work, mlx5_mpesw_work); in mlx5_lag_mpesw_queue_work() 178 init_completion(&work->comp); in mlx5_lag_mpesw_queue_work() 179 work->op = op; in mlx5_lag_mpesw_queue_work() 180 work->lag = ldev; in mlx5_lag_mpesw_queue_work() 182 if (!queue_work(ldev->wq, &work->work)) { in mlx5_lag_mpesw_queue_work() [all …]
|