| /linux/fs/smb/server/ |
| H A D | ksmbd_work.c | 21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, KSMBD_DEFAULT_GFP); in ksmbd_alloc_work_struct() local 23 if (work) { in ksmbd_alloc_work_struct() 24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct() 27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct() 28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct() 29 INIT_LIST_HEAD(&work->aux_read_list); in ksmbd_alloc_work_struct() 30 work->iov_alloc_cnt = 4; in ksmbd_alloc_work_struct() 31 work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec), in ksmbd_alloc_work_struct() [all …]
|
| H A D | ksmbd_work.h | 86 struct work_struct work; member 98 static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work) in ksmbd_resp_buf_next() argument 100 return work->response_buf + work->next_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_next() 107 static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work) in ksmbd_resp_buf_curr() argument 109 return work->response_buf + work->curr_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_curr() 116 static inline void *ksmbd_req_buf_next(struct ksmbd_work *work) in ksmbd_req_buf_next() argument 118 return work->request_buf + work->next_smb2_rcv_hdr_off + 4; in ksmbd_req_buf_next() 122 void ksmbd_free_work_struct(struct ksmbd_work *work); 129 bool ksmbd_queue_work(struct ksmbd_work *work); 130 int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len, [all …]
|
| H A D | smb_common.c | 135 int ksmbd_verify_smb_message(struct ksmbd_work *work) in ksmbd_verify_smb_message() argument 137 struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work); in ksmbd_verify_smb_message() 141 return ksmbd_smb2_check_message(work); in ksmbd_verify_smb_message() 143 hdr = work->request_buf; in ksmbd_verify_smb_message() 146 work->conn->outstanding_credits++; in ksmbd_verify_smb_message() 310 static u16 get_smb1_cmd_val(struct ksmbd_work *work) in get_smb1_cmd_val() argument 321 static int init_smb1_rsp_hdr(struct ksmbd_work *work) in init_smb1_rsp_hdr() argument 323 struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf; in init_smb1_rsp_hdr() 324 struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf; in init_smb1_rsp_hdr() 342 static int smb1_check_user_session(struct ksmbd_work *work) in smb1_check_user_session() argument [all …]
|
| /linux/virt/kvm/ |
| H A D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 63 * work item is fully processed. in async_pf_execute() 99 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) in kvm_flush_and_free_async_pf_work() argument 102 * The async #PF is "done", but KVM must wait for the work item itself, in kvm_flush_and_free_async_pf_work() 105 * after the last call to module_put(). Note, flushing the work item in kvm_flush_and_free_async_pf_work() 111 * need to be flushed (but sanity check that the work wasn't queued). in kvm_flush_and_free_async_pf_work() 113 if (work->wakeup_all) in kvm_flush_and_free_async_pf_work() 114 WARN_ON_ONCE(work in kvm_flush_and_free_async_pf_work() 124 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local 139 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local 155 struct kvm_async_pf *work; kvm_check_async_pf_completion() local 182 struct kvm_async_pf *work; kvm_setup_async_pf() local 218 struct kvm_async_pf *work; kvm_async_pf_wakeup_all() local [all...] |
| /linux/include/trace/events/ |
| H A D | workqueue.h | 14 * workqueue_queue_work - called when a work gets queued 17 * @work: pointer to struct work_struct 19 * This event occurs when a work is queued immediately or once a 20 * delayed work is actually queued on a workqueue (ie: once the delay 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work [all...] |
| /linux/include/linux/ |
| H A D | completion.h | 35 #define COMPLETION_INITIALIZER(work) \ argument 36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument 39 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument 42 (*({ init_completion(&work); &work; })) 52 #define DECLARE_COMPLETION(work) \ argument 53 struct completion work = COMPLETION_INITIALIZER(work) 68 # define DECLARE_COMPLETION_ONSTACK(work) \ argument 69 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) [all …]
|
| H A D | workqueue.h | 24 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument 115 struct work_struct work; member 124 struct work_struct work; member 212 static inline struct delayed_work *to_delayed_work(struct work_struct *work) in to_delayed_work() argument 214 return container_of(work, struct delayed_work, work); in to_delayed_work() 217 static inline struct rcu_work *to_rcu_work(struct work_struct *work) in to_rcu_work() argument 219 return container_of(work, struct rcu_work, work); in to_rcu_work() 223 struct work_struct work; member 246 .work = __WORK_INITIALIZER((n).work, (f)), \ 261 extern void __init_work(struct work_struct *work, int onstack); [all …]
|
| H A D | jump_label_ratelimit.h | 12 struct delayed_work work; member 18 struct delayed_work work; member 24 struct delayed_work work; member 28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) 30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) 33 __static_key_deferred_flush((x), &(x)->work) 37 struct delayed_work *work, 39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work); 43 extern void jump_label_update_timeout(struct work_struct *work); 49 .work = __DELAYED_WORK_INITIALIZER((name).work, \ [all …]
|
| /linux/tools/perf/ |
| H A D | builtin-kwork.c | 315 struct kwork_work *work; in work_search() local 319 work = container_of(node, struct kwork_work, node); in work_search() 320 cmp = work_cmp(sort_list, key, work); in work_search() 326 if (work->name == NULL) in work_search() 327 work->name = key->name; in work_search() 328 return work; in work_search() 362 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local 364 if (work == NULL) { in work_new() 370 INIT_LIST_HEAD(&work->atom_list[i]); in work_new() 372 work->id = key->id; in work_new() [all …]
|
| /linux/drivers/staging/octeon/ |
| H A D | ethernet-rx.c | 63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument 68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error() 70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error() 72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error() 81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error() 82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error() 99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error() 102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error() 111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error() 112 work->word1.len -= i + 5; in cvm_oct_check_rcv_error() [all …]
|
| /linux/kernel/ |
| H A D | kthread.c | 968 struct kthread_work *work; in kthread_worker_fn() local 991 work = NULL; in kthread_worker_fn() 994 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 996 list_del_init(&work->node); in kthread_worker_fn() 998 worker->current_work = work; in kthread_worker_fn() 1001 if (work) { in kthread_worker_fn() 1002 kthread_work_func_t func = work->func; in kthread_worker_fn() 1004 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn() 1005 work->func(work); in kthread_worker_fn() 1010 trace_sched_kthread_work_execute_end(work, func); in kthread_worker_fn() [all …]
|
| H A D | workqueue.c | 613 struct work_struct *work = addr; in work_is_static_object() local 615 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object() 624 struct work_struct *work = addr; in work_fixup_init() local 628 cancel_work_sync(work); in work_fixup_init() 629 debug_object_init(work, &work_debug_descr); in work_fixup_init() 642 struct work_struct *work = addr; in work_fixup_free() local 646 cancel_work_sync(work); in work_fixup_free() 647 debug_object_free(work, &work_debug_descr); in work_fixup_free() 662 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument 664 debug_object_activate(work, &work_debug_descr); in debug_work_activate() [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | task_work.c | 45 struct elem *work = value; in process_work() local 47 bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0); in process_work() 57 struct elem *work; in oncpu_hash_map() local 65 work = bpf_map_lookup_elem(&hmap, &key); in oncpu_hash_map() 66 if (!work) in oncpu_hash_map() 69 bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL); in oncpu_hash_map() 76 struct elem *work; in oncpu_array_map() local 80 work = bpf_map_lookup_elem(&arrmap, &key); in oncpu_array_map() 81 if (!work) in oncpu_array_map() 83 bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL); in oncpu_array_map() [all …]
|
| H A D | task_work_fail.c | 37 struct elem *work = value; in process_work() local 39 bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0); in process_work() 49 struct elem *work; in mismatch_map() local 53 work = bpf_map_lookup_elem(&arrmap, &key); in mismatch_map() 54 if (!work) in mismatch_map() 56 bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL); in mismatch_map() 87 struct elem *work; in map_null() local 91 work = bpf_map_lookup_elem(&arrmap, &key); in map_null() 92 if (!work) in map_null() 94 bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL); in map_null()
|
| /linux/net/wireless/ |
| H A D | debugfs.c | 148 struct wiphy_work work; in wiphy_locked_debugfs_read() 164 struct wiphy_work *work) in wiphy_locked_debugfs_read() 166 struct debugfs_read_work *w = container_of(work, typeof(*w), work); in wiphy_locked_debugfs_read() 177 wiphy_work_cancel(w->wiphy, &w->work); in wiphy_locked_debugfs_read() 192 struct debugfs_read_work work = { 200 .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), 204 .cancel_data = &work, 210 wiphy_work_init(&work.work, wiphy_locked_debugfs_read_wor 115 struct wiphy_work work; global() member 131 wiphy_locked_debugfs_read_work(struct wiphy * wiphy,struct wiphy_work * work) wiphy_locked_debugfs_read_work() argument 159 struct debugfs_read_work work = { wiphy_locked_debugfs_read() local 195 struct wiphy_work work; global() member 211 wiphy_locked_debugfs_write_work(struct wiphy * wiphy,struct wiphy_work * work) wiphy_locked_debugfs_write_work() argument 238 struct debugfs_write_work work = { wiphy_locked_debugfs_write() local [all...] |
| /linux/drivers/accessibility/speakup/ |
| H A D | selection.c | 20 struct work_struct work; member 25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument 28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection() 58 .work = __WORK_INITIALIZER(speakup_sel_work.work, 87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection() 96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection() 106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument 109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection() 117 .work = __WORK_INITIALIZER(speakup_paste_work.work, 129 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); in speakup_paste_selection() [all …]
|
| /linux/drivers/infiniband/core/ |
| H A D | cm.c | 95 struct cm_work *work); 187 struct delayed_work work; member 198 struct cm_work work; member 269 static void cm_work_handler(struct work_struct *work); 710 __be32 remote_id = timewait_info->work.remote_id; in cm_insert_remote_id() 716 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 718 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 744 if (be32_lt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 746 else if (be32_gt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 753 res = cm_acquire_id(timewait_info->work.local_id, in cm_find_remote_id() [all …]
|
| /linux/drivers/net/wireless/st/cw1200/ |
| H A D | sta.h | 60 void cw1200_event_handler(struct work_struct *work); 61 void cw1200_bss_loss_work(struct work_struct *work); 62 void cw1200_bss_params_work(struct work_struct *work); 63 void cw1200_keep_alive_work(struct work_struct *work); 64 void cw1200_tx_failure_work(struct work_struct *work); 80 void cw1200_join_timeout(struct work_struct *work); 81 void cw1200_unjoin_work(struct work_struct *work); 82 void cw1200_join_complete_work(struct work_struct *work); 83 void cw1200_wep_key_work(struct work_struct *work); 86 void cw1200_update_filtering_work(struct work_struct *work); [all …]
|
| /linux/fs/ |
| H A D | fs-writeback.c | 165 static void finish_writeback_work(struct wb_writeback_work *work) in finish_writeback_work() argument 167 struct wb_completion *done = work->done; in finish_writeback_work() 169 if (work->auto_free) in finish_writeback_work() 170 kfree(work); in finish_writeback_work() 181 struct wb_writeback_work *work) in wb_queue_work() argument 183 trace_writeback_queue(wb, work); in wb_queue_work() 185 if (work->done) in wb_queue_work() 186 atomic_inc(&work->done->cnt); in wb_queue_work() 191 list_add_tail(&work->list, &wb->work_list); in wb_queue_work() 194 finish_writeback_work(work); in wb_queue_work() [all …]
|
| /linux/io_uring/ |
| H A D | io-wq.c | 66 struct delayed_work work; member 158 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument 160 return __io_get_work_hash(atomic_read(&work->flags)); in io_get_work_hash() 431 static bool io_wq_hash_defer(struct io_wq_work *work, struct io_wq_acct *acct) in io_wq_hash_defer() argument 438 work_flags = atomic_read(&work->flags); in io_wq_hash_defer() 527 struct io_wq_work *work, *tail; in io_get_next_work() local 534 work = container_of(node, struct io_wq_work, list); in io_get_next_work() 537 work_flags = atomic_read(&work->flags); in io_get_next_work() 540 return work; in io_get_next_work() 551 return work; in io_get_next_work() [all …]
|
| /linux/lib/ |
| H A D | once.c | 9 struct work_struct work; member 16 struct once_work *work; in once_deferred() local 18 work = container_of(w, struct once_work, work); in once_deferred() 19 BUG_ON(!static_key_enabled(work->key)); in once_deferred() 20 static_branch_disable(work->key); in once_deferred() 21 module_put(work->module); in once_deferred() 22 kfree(work); in once_deferred() 33 INIT_WORK(&w->work, once_deferred); in once_disable_jump() 37 schedule_work(&w->work); in once_disable_jump()
|
| /linux/kernel/bpf/ |
| H A D | mmap_unlock_work.h | 26 struct mmap_unlock_irq_work *work = NULL; in bpf_mmap_unlock_get_irq_work() local 31 work = this_cpu_ptr(&mmap_unlock_work); in bpf_mmap_unlock_get_irq_work() 32 if (irq_work_is_busy(&work->irq_work)) { in bpf_mmap_unlock_get_irq_work() 45 *work_ptr = work; in bpf_mmap_unlock_get_irq_work() 49 static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm) in bpf_mmap_unlock_mm() argument 51 if (!work) { in bpf_mmap_unlock_mm() 54 work->mm = mm; in bpf_mmap_unlock_mm() 61 irq_work_queue(&work->irq_work); in bpf_mmap_unlock_mm()
|
| /linux/arch/sparc/kernel/ |
| H A D | sun4d_smp.c | 196 struct sun4d_ipi_work *work; in smp4d_ipi_init() local 201 work = &per_cpu(sun4d_ipi_work, cpu); in smp4d_ipi_init() 202 work->single = work->msk = work->resched = 0; in smp4d_ipi_init() 208 struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work); in sun4d_ipi_interrupt() local 210 if (work->single) { in sun4d_ipi_interrupt() 211 work->single = 0; in sun4d_ipi_interrupt() 214 if (work->msk) { in sun4d_ipi_interrupt() 215 work->msk = 0; in sun4d_ipi_interrupt() 218 if (work->resched) { in sun4d_ipi_interrupt() 219 work->resched = 0; in sun4d_ipi_interrupt() [all …]
|
| H A D | leon_smp.c | 275 struct leon_ipi_work *work; in leon_ipi_init() local 298 work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_init() 299 work->single = work->msk = work->resched = 0; in leon_ipi_init() 312 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_single() local 315 work->single = 1; in leon_ipi_single() 323 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_mask_one() local 326 work->msk = 1; in leon_ipi_mask_one() 334 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_resched() local 337 work->resched = 1; in leon_ipi_resched() 345 struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work); in leonsmp_ipi_interrupt() local [all …]
|
| /linux/net/ceph/crush/ |
| H A D | mapper.c | 75 struct crush_work_bucket *work, in bucket_perm_choose() argument 82 if (work->perm_x != (__u32)x || work->perm_n == 0) { in bucket_perm_choose() 84 work->perm_x = x; in bucket_perm_choose() 90 work->perm[0] = s; in bucket_perm_choose() 91 work->perm_n = 0xffff; /* magic value, see below */ in bucket_perm_choose() 96 work->perm[i] = i; in bucket_perm_choose() 97 work->perm_n = 0; in bucket_perm_choose() 98 } else if (work->perm_n == 0xffff) { in bucket_perm_choose() 101 work->perm[i] = i; in bucket_perm_choose() 102 work->perm[work->perm[0]] = 0; in bucket_perm_choose() [all …]
|