Home
last modified time | relevance | path

Searched full:work (Results 1 – 25 of 4855) sorted by relevance

12345678910>>...195

/linux/fs/smb/server/
H A Dksmbd_work.c21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, KSMBD_DEFAULT_GFP); in ksmbd_alloc_work_struct() local
23 if (work) { in ksmbd_alloc_work_struct()
24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct()
27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct()
28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct()
29 INIT_LIST_HEAD(&work->aux_read_list); in ksmbd_alloc_work_struct()
30 work->iov_alloc_cnt = 4; in ksmbd_alloc_work_struct()
31 work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec), in ksmbd_alloc_work_struct()
[all …]
H A Dserver.c88 * @work: smb work containing server thread information
92 static inline int check_conn_state(struct ksmbd_work *work) in check_conn_state() argument
96 if (ksmbd_conn_exiting(work->conn) || in check_conn_state()
97 ksmbd_conn_need_reconnect(work->conn)) { in check_conn_state()
98 rsp_hdr = smb_get_msg(work->response_buf); in check_conn_state()
108 static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn, in __process_request() argument
115 if (check_conn_state(work)) in __process_request()
118 if (ksmbd_verify_smb_message(work)) { in __process_request()
119 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); in __process_request()
123 command = conn->ops->get_cmd_val(work); in __process_request()
[all …]
H A Dsmb2pdu.h327 bool is_smb2_neg_cmd(struct ksmbd_work *work);
328 bool is_smb2_rsp(struct ksmbd_work *work);
330 u16 get_smb2_cmd_val(struct ksmbd_work *work);
331 void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err);
332 int init_smb2_rsp_hdr(struct ksmbd_work *work);
333 int smb2_allocate_rsp_buf(struct ksmbd_work *work);
334 bool is_chained_smb2_message(struct ksmbd_work *work);
335 int init_smb2_neg_rsp(struct ksmbd_work *work);
336 void smb2_set_err_rsp(struct ksmbd_work *work);
337 int smb2_check_user_session(struct ksmbd_work *work);
[all …]
H A Dsmb2pdu.c44 static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) in __wbuf() argument
46 if (work->next_smb2_rcv_hdr_off) { in __wbuf()
47 *req = ksmbd_req_buf_next(work); in __wbuf()
48 *rsp = ksmbd_resp_buf_next(work); in __wbuf()
50 *req = smb_get_msg(work->request_buf); in __wbuf()
51 *rsp = smb_get_msg(work->response_buf); in __wbuf()
87 * @work: smb work
92 int smb2_get_ksmbd_tcon(struct ksmbd_work *work) in smb2_get_ksmbd_tcon() argument
94 struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); in smb2_get_ksmbd_tcon()
105 if (xa_empty(&work->sess->tree_conns)) { in smb2_get_ksmbd_tcon()
[all …]
H A Dksmbd_work.h86 struct work_struct work; member
96 * @work: smb work containing response buffer
98 static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work) in ksmbd_resp_buf_next() argument
100 return work->response_buf + work->next_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_next()
105 * @work: smb work containing response buffer
107 static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work) in ksmbd_resp_buf_curr() argument
109 return work->response_buf + work->curr_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_curr()
114 * @work: smb work containing response buffer
116 static inline void *ksmbd_req_buf_next(struct ksmbd_work *work) in ksmbd_req_buf_next() argument
118 return work->request_buf + work->next_smb2_rcv_hdr_off + 4; in ksmbd_req_buf_next()
[all …]
H A Dsmb_common.c129 * @work: smb work
135 int ksmbd_verify_smb_message(struct ksmbd_work *work) in ksmbd_verify_smb_message() argument
137 struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work); in ksmbd_verify_smb_message()
141 return ksmbd_smb2_check_message(work); in ksmbd_verify_smb_message()
143 hdr = smb_get_msg(work->request_buf); in ksmbd_verify_smb_message()
146 work->conn->outstanding_credits++; in ksmbd_verify_smb_message()
305 * @work: smb work containing smb header
309 static u16 get_smb1_cmd_val(struct ksmbd_work *work) in get_smb1_cmd_val() argument
316 * @work: smb work containing smb request
320 static int init_smb1_rsp_hdr(struct ksmbd_work *work) in init_smb1_rsp_hdr() argument
[all …]
/linux/include/trace/events/
H A Dworkqueue.h14 * workqueue_queue_work - called when a work gets queued
17 * @work: pointer to struct work_struct
19 * This event occurs when a work is queued immediately or once a
20 * delayed work is actually queued on a workqueue (ie: once the delay
26 struct work_struct *work),
28 TP_ARGS(req_cpu, pwq, work),
31 __field( void *, work )
39 __entry->work = work;
40 __entry->function = work
[all...]
/linux/virt/kvm/
H A Dasync_pf.c45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument
48 container_of(work, struct kvm_async_pf, work); in async_pf_execute()
63 * work item is fully processed. in async_pf_execute()
99 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) in kvm_flush_and_free_async_pf_work() argument
102 * The async #PF is "done", but KVM must wait for the work item itself, in kvm_flush_and_free_async_pf_work()
105 * after the last call to module_put(). Note, flushing the work item in kvm_flush_and_free_async_pf_work()
111 * need to be flushed (but sanity check that the work wasn't queued). in kvm_flush_and_free_async_pf_work()
113 if (work->wakeup_all) in kvm_flush_and_free_async_pf_work()
114 WARN_ON_ONCE(work in kvm_flush_and_free_async_pf_work()
124 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local
139 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local
155 struct kvm_async_pf *work; kvm_check_async_pf_completion() local
182 struct kvm_async_pf *work; kvm_setup_async_pf() local
218 struct kvm_async_pf *work; kvm_async_pf_wakeup_all() local
[all...]
/linux/LICENSES/dual/
H A Dcopyleft-next-0.3.126 of, publicly perform and publicly display My Work.
40 Legal Notices contained in My Work (to the extent they remain
47 If You Distribute a Derived Work, You must license the entire Derived
48 Work as a whole under this License, with prominent notice of such
50 separate Distribution of portions of the Derived Work.
52 If the Derived Work includes material licensed under the GPL, You may
53 instead license the Derived Work under the GPL.
57 When Distributing a Covered Work, You may not impose further
58 restrictions on the exercise of rights in the Covered Work granted under
64 However, You may Distribute a Covered Work incorporating material
[all …]
H A DApache-2.049 "Work" shall mean the work of authorship, whether in Source or Object form,
51 is included in or attached to the work (an example is provided in the
54 "Derivative Works" shall mean any work, whether in Source or Object form,
55 that is based on (or derived from) the Work and for which the editorial
57 a whole, an original work of authorship. For the purposes of this License,
59 merely link (or bind by name) to the interfaces of, the Work and Derivative
62 "Contribution" shall mean any work of authorship, including the original
63 version of the Work and any modifications or additions to that Work or
65 inclusion in the Work by the copyright owner or by an individual or Legal
72 and improving the Work, but excluding communication that is conspicuously
[all …]
/linux/include/linux/
H A Dworkqueue.h3 * workqueue.h --- work queue handling for Linux.
21 * The first word is the work queue pointer and the flags rolled into
24 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument
27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
28 WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */
30 WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */
67 * When a work item is off queue, the high bits encode off-queue flags
115 struct work_struct work; member
118 /* target workqueue and CPU ->timer uses to queue ->work */
124 struct work_struct work; member
[all …]
H A Dcompletion.h35 #define COMPLETION_INITIALIZER(work) \ argument
36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument
39 (*({ init_completion_map(&(work), &(map)); &(work); }))
41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument
42 (*({ init_completion(&work); &work; }))
46 * @work: identifier for the completion structure
52 #define DECLARE_COMPLETION(work) \ argument
53 struct completion work = COMPLETION_INITIALIZER(work)
62 * @work: identifier for the completion structure
[all …]
H A Djump_label_ratelimit.h12 struct delayed_work work; member
18 struct delayed_work work; member
24 struct delayed_work work; member
28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
33 __static_key_deferred_flush((x), &(x)->work)
37 struct delayed_work *work,
39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
43 extern void jump_label_update_timeout(struct work_struct *work);
49 .work = __DELAYED_WORK_INITIALIZER((name).work, \
[all …]
/linux/tools/perf/
H A Dbuiltin-kwork.c315 struct kwork_work *work; in work_search() local
319 work = container_of(node, struct kwork_work, node); in work_search()
320 cmp = work_cmp(sort_list, key, work); in work_search()
326 if (work->name == NULL) in work_search()
327 work->name = key->name; in work_search()
328 return work; in work_search()
362 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local
364 if (work == NULL) { in work_new()
365 pr_err("Failed to zalloc kwork work\n"); in work_new()
370 INIT_LIST_HEAD(&work->atom_list[i]); in work_new()
[all …]
/linux/drivers/staging/octeon/
H A Dethernet-rx.c59 * @work: Work queue entry pointing to the packet.
63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument
68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error()
70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error()
72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error()
81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error()
82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error()
99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error()
102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error()
111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error()
[all …]
/linux/kernel/
H A Dkthread.c317 * functions which do some additional work in non-modular code such as
959 * when they finish. There is defined a safe point for freezing when one work
968 struct kthread_work *work; in kthread_worker_fn() local
991 work = NULL; in kthread_worker_fn()
994 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
996 list_del_init(&work->node); in kthread_worker_fn()
998 worker->current_work = work; in kthread_worker_fn()
1001 if (work) { in kthread_worker_fn()
1002 kthread_work_func_t func = work->func; in kthread_worker_fn()
1004 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn()
[all …]
H A Dworkqueue.c18 * This is the generic async execution mechanism. Work items as are
21 * normal work items and the other for high priority ones) and some extra
240 PWQ_STAT_STARTED, /* work items started execution */
241 PWQ_STAT_COMPLETED, /* work items completed execution */
247 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
271 * When pwq->nr_active >= max_active, new work item is queued to
275 * All work items marked with WORK_STRUCT_INACTIVE do not participate in
276 * nr_active and all work items in pwq->inactive_works are marked with
277 * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are
279 * pool->worklist or worker->scheduled. Those work itmes are only struct
[all …]
/linux/net/wireless/
H A Ddebugfs.c148 struct wiphy_work work; in wiphy_locked_debugfs_read()
164 struct wiphy_work *work) in wiphy_locked_debugfs_read()
166 struct debugfs_read_work *w = container_of(work, typeof(*w), work); in wiphy_locked_debugfs_read()
177 wiphy_work_cancel(w->wiphy, &w->work); in wiphy_locked_debugfs_read()
192 struct debugfs_read_work work = {
200 .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion),
204 .cancel_data = &work,
210 wiphy_work_init(&work.work, wiphy_locked_debugfs_read_wor
115 struct wiphy_work work; global() member
131 wiphy_locked_debugfs_read_work(struct wiphy * wiphy,struct wiphy_work * work) wiphy_locked_debugfs_read_work() argument
159 struct debugfs_read_work work = { wiphy_locked_debugfs_read() local
195 struct wiphy_work work; global() member
211 wiphy_locked_debugfs_write_work(struct wiphy * wiphy,struct wiphy_work * work) wiphy_locked_debugfs_write_work() argument
238 struct debugfs_write_work work = { wiphy_locked_debugfs_write() local
[all...]
/linux/LICENSES/deprecated/
H A DCC0-1.026 and subsequent owner(s) (each and all, an "owner") of an original work of
27 authorship and/or a database (each, a "Work").
29 Certain owners wish to permanently relinquish those rights to a Work for
37 works, or to gain reputation or greater distribution for their Work in
42 associating CC0 with a Work (the "Affirmer"), to the extent that he or she
43 is an owner of Copyright and Related Rights in the Work, voluntarily
44 elects to apply CC0 to the Work and publicly distribute the Work under its
46 Work and the meaning and intended legal effect of CC0 on those rights.
48 1. Copyright and Related Rights. A Work made available under CC0 may be
54 communicate, and translate a Work;
[all …]
/linux/tools/testing/selftests/bpf/progs/
H A Dtask_work.c45 struct elem *work = value; in process_work() local
47 bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0); in process_work()
57 struct elem *work; in oncpu_hash_map() local
65 work = bpf_map_lookup_elem(&hmap, &key); in oncpu_hash_map()
66 if (!work) in oncpu_hash_map()
69 bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL); in oncpu_hash_map()
76 struct elem *work; in oncpu_array_map() local
80 work = bpf_map_lookup_elem(&arrmap, &key); in oncpu_array_map()
81 if (!work) in oncpu_array_map()
83 bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL); in oncpu_array_map()
[all …]
/linux/io_uring/
H A Dio-wq.c66 struct delayed_work work; member
158 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument
160 return __io_get_work_hash(atomic_read(&work->flags)); in io_get_work_hash()
272 * If there's work to do, returns true with acct->lock acquired. If not,
306 * starting work or finishing work. In either case, if it does in io_acct_activate_free_worker()
307 * to go sleep, we'll kick off a new task for this work anyway. in io_acct_activate_free_worker()
324 * Most likely an attempt to queue unbounded work on an io_wq that in io_wq_create_worker()
413 * work item after we canceled in io_wq_exit_workers(). in io_queue_worker_create()
430 /* Defer if current and next work are both hashed to the same chain */
431 static bool io_wq_hash_defer(struct io_wq_work *work, struct io_wq_acct *acct) in io_wq_hash_defer() argument
[all …]
/linux/drivers/infiniband/core/
H A Dcm.c95 struct cm_work *work);
187 struct delayed_work work; member
198 struct cm_work work; member
269 static void cm_work_handler(struct work_struct *work);
710 __be32 remote_id = timewait_info->work.remote_id; in cm_insert_remote_id()
716 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id()
718 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id()
744 if (be32_lt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id()
746 else if (be32_gt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id()
753 res = cm_acquire_id(timewait_info->work.local_id, in cm_find_remote_id()
[all …]
/linux/LICENSES/preferred/
H A DLGPL-2.194 work, a derivative of the original library. The ordinary General Public
127 follow. Pay close attention to the difference between a "work based on the
128 library" and a "work that uses the library". The former contains code
144 The "Library", below, refers to any such software library or work which
145 has been distributed under these terms. A "work based on the Library"
146 means either the Library or any derivative work under copyright law:
147 that is to say, a work containing the Library or a portion of it, either
152 "Source code" for a work means the preferred form of the work for making
161 program is covered only if its contents constitute a work based on the
177 thus forming a work based on the Library, and copy and distribute such
[all …]
H A DLGPL-2.088 a textual and legal sense, the linked executable is a combined work, a
108 follow. Pay close attention to the difference between a "work based on the
109 library" and a "work that uses the library". The former contains code
128 The "Library", below, refers to any such software library or work which
129 has been distributed under these terms. A "work based on the Library"
130 means either the Library or any derivative work under copyright law:
131 that is to say, a work containing the Library or a portion of it, either
136 "Source code" for a work means the preferred form of the work for making
145 program is covered only if its contents constitute a work based on the
161 thus forming a work based on the Library, and copy and distribute such
[all …]
/linux/drivers/accessibility/speakup/
H A Dselection.c20 struct work_struct work; member
25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument
28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection()
58 .work = __WORK_INITIALIZER(speakup_sel_work.work,
65 * cancelling selection work. getting kref first establishes the in speakup_set_selection()
87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection()
96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection()
97 /* setting to null so that if work fails to run and we cancel it, in speakup_cancel_selection()
106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument
109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection()
[all …]

12345678910>>...195