Home
last modified time | relevance | path

Searched full:task (Results 1 – 25 of 2216) sorted by relevance

12345678910>>...89

/linux/net/sunrpc/
H A Dsched.c45 static void rpc_release_task(struct rpc_task *task);
68 bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status) in rpc_task_set_rpc_status() argument
70 if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0) in rpc_task_set_rpc_status()
76 rpc_task_timeout(const struct rpc_task *task) in rpc_task_timeout() argument
78 unsigned long timeout = READ_ONCE(task->tk_timeout); in rpc_task_timeout()
90 * Disable the timer for a given RPC task. Should be called with
95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument
97 if (list_empty(&task->u.tk_wait.timer_list)) in __rpc_disable_timer()
99 task->tk_timeout = 0; in __rpc_disable_timer()
100 list_del(&task->u.tk_wait.timer_list); in __rpc_disable_timer()
[all …]
H A Dclnt.c53 static void call_start(struct rpc_task *task);
54 static void call_reserve(struct rpc_task *task);
55 static void call_reserveresult(struct rpc_task *task);
56 static void call_allocate(struct rpc_task *task);
57 static void call_encode(struct rpc_task *task);
58 static void call_decode(struct rpc_task *task);
59 static void call_bind(struct rpc_task *task);
60 static void call_bind_status(struct rpc_task *task);
61 static void call_transmit(struct rpc_task *task);
62 static void call_status(struct rpc_task *task);
907 struct rpc_task *task; rpc_cancel_tasks() local
1114 rpc_task_release_transport(struct rpc_task * task) rpc_task_release_transport() argument
1128 rpc_task_release_client(struct rpc_task * task) rpc_task_release_client() argument
1163 rpc_task_set_transport(struct rpc_task * task,struct rpc_clnt * clnt) rpc_task_set_transport() argument
1179 rpc_task_set_client(struct rpc_task * task,struct rpc_clnt * clnt) rpc_task_set_client() argument
1196 rpc_task_set_rpc_message(struct rpc_task * task,const struct rpc_message * msg) rpc_task_set_rpc_message() argument
1212 rpc_default_callback(struct rpc_task * task,void * data) rpc_default_callback() argument
1226 struct rpc_task *task; rpc_run_task() local
1255 struct rpc_task *task; rpc_call_sync() local
1292 struct rpc_task *task; rpc_call_async() local
1321 struct rpc_task *task; rpc_run_bc_task() local
1371 rpc_call_start(struct rpc_task * task) rpc_call_start() argument
1671 __rpc_restart_call(struct rpc_task * task,void (* action)(struct rpc_task *)) __rpc_restart_call() argument
1684 rpc_restart_call(struct rpc_task * task) rpc_restart_call() argument
1695 rpc_restart_call_prepare(struct rpc_task * task) rpc_restart_call_prepare() argument
1704 rpc_proc_name(const struct rpc_task * task) rpc_proc_name() argument
1718 __rpc_call_rpcerror(struct rpc_task * task,int tk_status,int rpc_status) __rpc_call_rpcerror() argument
1726 rpc_call_rpcerror(struct rpc_task * task,int status) rpc_call_rpcerror() argument
1738 call_start(struct rpc_task * task) call_start() argument
1762 call_reserve(struct rpc_task * task) call_reserve() argument
1775 call_reserveresult(struct rpc_task * task) call_reserveresult() argument
1815 call_retry_reserve(struct rpc_task * task) call_retry_reserve() argument
1826 call_refresh(struct rpc_task * task) call_refresh() argument
1838 call_refreshresult(struct rpc_task * task) call_refreshresult() argument
1879 call_allocate(struct rpc_task * task) call_allocate() argument
1928 rpc_task_need_encode(struct rpc_task * task) rpc_task_need_encode() argument
1937 rpc_xdr_encode(struct rpc_task * task) rpc_xdr_encode() argument
1963 call_encode(struct rpc_task * task) call_encode() argument
2013 rpc_task_transmitted(struct rpc_task * task) rpc_task_transmitted() argument
2019 rpc_task_handle_transmitted(struct rpc_task * task) rpc_task_handle_transmitted() argument
2029 call_bind(struct rpc_task * task) call_bind() argument
2054 call_bind_status(struct rpc_task * task) call_bind_status() argument
2137 call_connect(struct rpc_task * task) call_connect() argument
2167 call_connect_status(struct rpc_task * task) call_connect_status() argument
2265 call_transmit(struct rpc_task * task) call_transmit() argument
2290 call_transmit_status(struct rpc_task * task) call_transmit_status() argument
2358 call_bc_encode(struct rpc_task * task) call_bc_encode() argument
2369 call_bc_transmit(struct rpc_task * task) call_bc_transmit() argument
2382 call_bc_transmit_status(struct rpc_task * task) call_bc_transmit_status() argument
2441 call_status(struct rpc_task * task) call_status() argument
2519 rpc_check_timeout(struct rpc_task * task) rpc_check_timeout() argument
2581 call_decode(struct rpc_task * task) call_decode() argument
2650 rpc_encode_header(struct rpc_task * task,struct xdr_stream * xdr) rpc_encode_header() argument
2679 rpc_decode_header(struct rpc_task * task,struct xdr_stream * xdr) rpc_decode_header() argument
2848 rpc_null_call_prepare(struct rpc_task * task,void * data) rpc_null_call_prepare() argument
2889 struct rpc_task *task; rpc_ping() local
2914 struct rpc_task *task; rpc_ping_noreply() local
2930 rpc_cb_add_xprt_done(struct rpc_task * task,void * calldata) rpc_cb_add_xprt_done() argument
2965 struct rpc_task *task; rpc_clnt_test_and_add_xprt() local
3005 struct rpc_task *task; rpc_clnt_add_xprt_helper() local
3348 rpc_show_task(const struct rpc_clnt * clnt,const struct rpc_task * task) rpc_show_task() argument
3365 struct rpc_task *task; rpc_show_tasks() local
[all...]
H A Dxprt.c75 static void xprt_request_init(struct rpc_task *task);
258 * @task: task that is requesting access to the transport
265 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_reserve_xprt() argument
267 struct rpc_rqst *req = task->tk_rqstp; in xprt_reserve_xprt()
270 if (task == xprt->snd_task) in xprt_reserve_xprt()
276 xprt->snd_task = task; in xprt_reserve_xprt()
279 trace_xprt_reserve_xprt(xprt, task); in xprt_reserve_xprt()
285 task->tk_status = -EAGAIN; in xprt_reserve_xprt()
286 if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task)) in xprt_reserve_xprt()
287 rpc_sleep_on_timeout(&xprt->sending, task, NULL, in xprt_reserve_xprt()
[all …]
/linux/include/asm-generic/
H A Dsyscall.h12 * and only when the caller is sure that the task of interest
23 * syscall_get_nr - find what system call a task is executing
24 * @task: task of interest, must be blocked
25 * @regs: task_pt_regs() of @task
27 * If @task is executing a system call or is at system call
29 * If @task is not executing a system call, i.e. it's blocked
36 * It's only valid to call this when @task is known to be blocked.
38 int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
41 * syscall_set_nr - change the system call a task is executing
42 * @task: task of interest, must be blocked
[all …]
/linux/drivers/gpu/drm/exynos/
H A Dexynos_drm_ipp.c87 WARN_ON(ipp->task); in exynos_drm_ipp_unregister()
261 struct exynos_drm_ipp_task *task; in exynos_drm_ipp_task_alloc()
263 task = kzalloc(sizeof(*task), GFP_KERNEL); in exynos_drm_ipp_task_alloc()
264 if (!task) in exynos_drm_ipp_task_alloc()
267 task->dev = ipp->dev; in exynos_drm_ipp_task_alloc()
268 task->ipp = ipp; in exynos_drm_ipp_task_alloc()
271 task->src.rect.w = task->dst.rect.w = UINT_MAX; in exynos_drm_ipp_task_alloc()
272 task in exynos_drm_ipp_task_alloc()
260 struct exynos_drm_ipp_task *task; exynos_drm_ipp_task_alloc() local
313 exynos_drm_ipp_task_set(struct exynos_drm_ipp_task * task,struct drm_exynos_ioctl_ipp_commit * arg) exynos_drm_ipp_task_set() argument
395 exynos_drm_ipp_task_free(struct exynos_drm_ipp * ipp,struct exynos_drm_ipp_task * task) exynos_drm_ipp_task_free() argument
548 exynos_drm_ipp_check_format(struct exynos_drm_ipp_task * task,struct exynos_drm_ipp_buffer * buf,struct exynos_drm_ipp_buffer * src,struct exynos_drm_ipp_buffer * dst,bool rotate,bool swap) exynos_drm_ipp_check_format() argument
602 exynos_drm_ipp_task_check(struct exynos_drm_ipp_task * task) exynos_drm_ipp_task_check() argument
664 exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task * task,struct drm_file * filp) exynos_drm_ipp_task_setup_buffers() argument
695 exynos_drm_ipp_event_create(struct exynos_drm_ipp_task * task,struct drm_file * file_priv,uint64_t user_data) exynos_drm_ipp_event_create() argument
721 exynos_drm_ipp_event_send(struct exynos_drm_ipp_task * task) exynos_drm_ipp_event_send() argument
733 exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task * task) exynos_drm_ipp_task_cleanup() argument
749 struct exynos_drm_ipp_task *task = container_of(work, exynos_drm_ipp_cleanup_work() local
762 exynos_drm_ipp_task_done(struct exynos_drm_ipp_task * task,int ret) exynos_drm_ipp_task_done() argument
788 struct exynos_drm_ipp_task *task; exynos_drm_ipp_next_task() local
819 exynos_drm_ipp_schedule_task(struct exynos_drm_ipp * ipp,struct exynos_drm_ipp_task * task) exynos_drm_ipp_schedule_task() argument
831 exynos_drm_ipp_task_abort(struct exynos_drm_ipp * ipp,struct exynos_drm_ipp_task * task) exynos_drm_ipp_task_abort() argument
876 struct exynos_drm_ipp_task *task; exynos_drm_ipp_commit_ioctl() local
[all...]
/linux/drivers/scsi/aic94xx/
H A Daic94xx_tmf.c3 * Aic94xx Task Management Functions
214 static int asd_clear_nexus_tag(struct sas_task *task) in asd_clear_nexus_tag() argument
216 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; in asd_clear_nexus_tag()
217 struct asd_ascb *tascb = task->lldd_task; in asd_clear_nexus_tag()
221 memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8); in asd_clear_nexus_tag()
223 if (task->dev->tproto) in asd_clear_nexus_tag()
225 task->dev->lldd_dev); in asd_clear_nexus_tag()
229 static int asd_clear_nexus_index(struct sas_task *task) in asd_clear_nexus_index() argument
231 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; in asd_clear_nexus_index()
232 struct asd_ascb *tascb = task->lldd_task; in asd_clear_nexus_index()
[all …]
/linux/kernel/bpf/
H A Dtask_iter.c39 struct task_struct *task; in task_group_seq_get_next() local
46 task = get_pid_task(pid, PIDTYPE_TGID); in task_group_seq_get_next()
47 if (!task) in task_group_seq_get_next()
53 return task; in task_group_seq_get_next()
58 * same for task_seq_start() to pick up the correct task. in task_group_seq_get_next()
62 task = get_pid_task(pid, PIDTYPE_PID); in task_group_seq_get_next()
64 return task; in task_group_seq_get_next()
67 task = find_task_by_pid_ns(common->pid_visiting, common->ns); in task_group_seq_get_next()
68 if (!task) in task_group_seq_get_next()
72 task = __next_thread(task); in task_group_seq_get_next()
[all …]
/linux/drivers/media/i2c/
H A Dsaa711x_regs.h77 /* Task independent global settings */
87 /* Task A definition */
134 /* Task B definition */
361 /* Task independent global settings: R_80_GLOBAL_CNTL_1 to R_8F_STATUS_INFO_SCALER */
383 /* Task A definition: R_90_A_TASK_HANDLING_CNTL to R_BF_A_VERT_LUMA_PHASE_OFF_11 */
384 /* Task A: Basic settings and acquisition window definition */
386 "Task A: Task handling control"},
388 "Task A: X port formats and configuration"},
390 "Task A: X port input reference signal definition"},
392 "Task A: I port output formats and configuration"},
[all …]
/linux/drivers/scsi/
H A Dlibiscsi.c136 * @task: scsi command task
146 void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t, in iscsi_prep_data_out_pdu() argument
149 struct iscsi_conn *conn = task->conn; in iscsi_prep_data_out_pdu()
152 task->hdr_len = sizeof(struct iscsi_data); in iscsi_prep_data_out_pdu()
159 hdr->lun = task->lun; in iscsi_prep_data_out_pdu()
160 hdr->itt = task->hdr_itt; in iscsi_prep_data_out_pdu()
176 static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) in iscsi_add_hdr() argument
178 unsigned exp_len = task->hdr_len + len; in iscsi_add_hdr()
180 if (exp_len > task->hdr_max) { in iscsi_add_hdr()
186 task->hdr_len = exp_len; in iscsi_add_hdr()
[all …]
/linux/drivers/scsi/isci/
H A Dtask.c64 #include "task.h"
71 * @task: request to complete
72 * @response: response code for the completed task.
73 * @status: status code for the completed task.
76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, in isci_task_refuse() argument
84 dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n", in isci_task_refuse()
85 __func__, task, response, status); in isci_task_refuse()
87 spin_lock_irqsave(&task->task_state_lock, flags); in isci_task_refuse()
89 task->task_status.resp = response; in isci_task_refuse()
90 task->task_status.stat = status; in isci_task_refuse()
[all …]
/linux/arch/powerpc/kernel/
H A Dsignal.h36 struct task_struct *task);
38 struct task_struct *task);
39 extern unsigned long copy_vsx_from_user(struct task_struct *task,
41 extern unsigned long copy_ckvsx_from_user(struct task_struct *task,
43 unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task);
44 unsigned long copy_ckfpr_to_user(void __user *to, struct task_struct *task);
45 unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from);
46 unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from);
48 #define unsafe_copy_fpr_to_user(to, task, label) do { \ argument
49 struct task_struct *__t = task; \
[all …]
/linux/scripts/gdb/linux/
H A Dtasks.py4 # task & thread tools
39 for task in task_lists():
40 if int(task['pid']) == pid:
41 return task
46 """Find Linux task by PID and return the task_struct variable.
55 task = get_task_by_pid(pid)
56 if task:
57 return task.dereference()
59 raise gdb.GdbError("No task of PID " + str(pid))
72 gdb.write("{:>10} {:>12} {:>7}\n".format("TASK", "PI
86 get_thread_info(task) global() argument
103 invoke(self, task) global() argument
[all...]
/linux/kernel/livepatch/
H A Dtransition.c85 struct task_struct *g, *task;
122 for_each_process_thread(g, task) { in klp_complete_transition()
123 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); in klp_complete_transition()
124 task->patch_state = KLP_TRANSITION_IDLE; in klp_complete_transition()
129 task = idle_task(cpu); in klp_complete_transition()
130 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); in klp_complete_transition()
131 task->patch_state = KLP_TRANSITION_IDLE; in klp_complete_transition()
169 * Switch the patched state of the task to the set of functions in the target in klp_cancel_transition()
172 * NOTE: If task is not 'current', the caller must ensure the task i in klp_cancel_transition()
94 struct task_struct *g, *task; klp_complete_transition() local
184 klp_update_patch_state(struct task_struct * task) klp_update_patch_state() argument
263 klp_check_stack(struct task_struct * task,const char ** oldname) klp_check_stack() argument
293 klp_check_and_switch_task(struct task_struct * task,void * arg) klp_check_and_switch_task() argument
314 klp_try_switch_task(struct task_struct * task) klp_try_switch_task() argument
410 struct task_struct *g, *task; klp_send_signals() local
454 struct task_struct *g, *task; klp_try_complete_transition() local
532 struct task_struct *g, *task; klp_start_transition() local
575 struct task_struct *g, *task; klp_init_transition() local
651 struct task_struct *g, *task; klp_reverse_transition() local
730 struct task_struct *g, *task; klp_force_transition() local
[all...]
/linux/fs/proc/
H A Dbase.c114 * in /proc for a task before it execs a suid executable.
208 static int get_task_root(struct task_struct *task, struct path *root) in get_task_root() argument
212 task_lock(task); in get_task_root()
213 if (task->fs) { in get_task_root()
214 get_fs_root(task->fs, root); in get_task_root()
217 task_unlock(task); in get_task_root()
223 struct task_struct *task = get_proc_task(d_inode(dentry)); in proc_cwd_link() local
226 if (task) { in proc_cwd_link()
227 task_lock(task); in proc_cwd_link()
228 if (task->fs) { in proc_cwd_link()
[all …]
H A Dfd.c28 struct task_struct *task; in seq_show() local
30 task = get_proc_task(m->private); in seq_show()
31 if (!task) in seq_show()
34 task_lock(task); in seq_show()
35 files = task->files; in seq_show()
51 task_unlock(task); in seq_show()
52 put_task_struct(task); in seq_show()
82 * that the current task has PTRACE_MODE_READ in addition to the normal
89 struct task_struct *task = get_proc_task(inode); in proc_fdinfo_permission() local
91 if (!task) in proc_fdinfo_permission()
[all …]
/linux/arch/arm64/mm/
H A Dgcs.c136 * Apply the GCS mode configured for the specified task to the
139 void gcs_set_el0_mode(struct task_struct *task) in gcs_set_el0_mode() argument
143 if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE) in gcs_set_el0_mode()
146 if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_WRITE) in gcs_set_el0_mode()
149 if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_PUSH) in gcs_set_el0_mode()
155 void gcs_free(struct task_struct *task) in gcs_free() argument
160 if (!task->mm || task->mm != current->mm) in gcs_free()
163 if (task->thread.gcs_base) in gcs_free()
164 vm_munmap(task->thread.gcs_base, task->thread.gcs_size); in gcs_free()
166 task->thread.gcspr_el0 = 0; in gcs_free()
[all …]
/linux/drivers/md/dm-vdo/indexer/
H A Dradix-sort.c45 struct task { struct
60 struct task *end_of_stack; argument
61 struct task insertion_list[256];
62 struct task stack[];
72 static inline void insert_key(const struct task task, sort_key_t *next) in insert_key() argument
78 while ((--next >= task.first_key) && in insert_key()
79 (compare(unsorted, next[0], task.offset, task.length) < 0)) in insert_key()
90 static inline void insertion_sort(const struct task task) in insertion_sort() argument
94 for (next = task.first_key + 1; next <= task.last_key; next++) in insertion_sort()
95 insert_key(task, next); in insertion_sort()
[all …]
/linux/include/rv/
H A Dltl_monitor.h12 #include <trace/events/task.h>
24 static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
25 static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
27 static struct ltl_monitor *ltl_get_monitor(struct task_struct *task) in ltl_get_monitor() argument
29 return &task->rv[ltl_monitor_slot].ltl_mon; in ltl_get_monitor()
32 static void ltl_task_init(struct task_struct *task, bool task_creation) in ltl_task_init() argument
34 struct ltl_monitor *mon = ltl_get_monitor(task); in ltl_task_init()
41 ltl_atoms_init(task, mon, task_creation); in ltl_task_init()
42 ltl_atoms_fetch(task, mon); in ltl_task_init()
45 static void handle_task_newtask(void *data, struct task_struct *task, u64 flags) in handle_task_newtask() argument
[all …]
/linux/drivers/video/fbdev/
H A Duvesafb.c69 * find the kernel part of the task struct, copy the registers and
70 * the buffer contents and then complete the task.
75 struct uvesafb_ktask *task; in uvesafb_cn_callback() local
84 task = uvfb_tasks[msg->seq]; in uvesafb_cn_callback()
86 if (!task || msg->ack != task->ack) { in uvesafb_cn_callback()
94 if (task->t.buf_len < utask->buf_len || in uvesafb_cn_callback()
103 memcpy(&task->t, utask, sizeof(*utask)); in uvesafb_cn_callback()
105 if (task->t.buf_len && task->buf) in uvesafb_cn_callback()
106 memcpy(task->buf, utask + 1, task->t.buf_len); in uvesafb_cn_callback()
108 complete(task->done); in uvesafb_cn_callback()
[all …]
/linux/drivers/gpu/drm/lima/
H A Dlima_sched.c115 int lima_sched_task_init(struct lima_sched_task *task, in lima_sched_task_init()
123 task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL); in lima_sched_task_init()
124 if (!task->bos) in lima_sched_task_init()
130 err = drm_sched_job_init(&task->base, &context->base, 1, vm, in lima_sched_task_init()
133 kfree(task->bos); in lima_sched_task_init()
137 drm_sched_job_arm(&task->base); in lima_sched_task_init()
139 task->num_bos = num_bos; in lima_sched_task_init()
140 task->vm = lima_vm_get(vm); in lima_sched_task_init()
145 void lima_sched_task_fini(struct lima_sched_task *task) in lima_sched_task_fini()
149 drm_sched_job_cleanup(&task in lima_sched_task_fini()
113 lima_sched_task_init(struct lima_sched_task * task,struct lima_sched_context * context,struct lima_bo ** bos,int num_bos,struct lima_vm * vm,u64 drm_client_id) lima_sched_task_init() argument
143 lima_sched_task_fini(struct lima_sched_task * task) lima_sched_task_fini() argument
173 lima_sched_context_queue_task(struct lima_sched_task * task) lima_sched_context_queue_task() argument
206 struct lima_sched_task *task = to_lima_task(job); lima_sched_run_job() local
271 lima_sched_build_error_task_list(struct lima_sched_task * task) lima_sched_build_error_task_list() argument
404 struct lima_sched_task *task = to_lima_task(job); lima_sched_timedout_job() local
475 struct lima_sched_task *task = to_lima_task(job); lima_sched_free_job() local
545 struct lima_sched_task *task = pipe->current_task; lima_sched_pipe_task_done() local
[all...]
/linux/tools/testing/selftests/bpf/progs/
H A Drcu_read_lock.c33 struct task_struct *task; in get_cgroup_id() local
36 task = bpf_get_current_task_btf(); in get_cgroup_id()
37 if (task->pid != target_pid) in get_cgroup_id()
42 cgroups = task->cgroups; in get_cgroup_id()
54 struct task_struct *task, *real_parent; in task_succ() local
58 task = bpf_get_current_task_btf(); in task_succ()
59 if (task->pid != target_pid) in task_succ()
64 real_parent = task->real_parent; in task_succ()
83 struct task_struct *task, *real_parent; in no_lock() local
86 task in no_lock()
95 struct task_struct *task, *real_parent; two_regions() local
114 struct task_struct *task, *real_parent; non_sleepable_1() local
130 struct task_struct *task, *real_parent; non_sleepable_2() local
149 struct task_struct *task, *real_parent, *gparent; task_acquire() local
177 struct task_struct *task; miss_lock() local
191 struct task_struct *task; miss_unlock() local
203 struct task_struct *task, *real_parent; non_sleepable_rcu_mismatch() local
221 struct task_struct *task, *real_parent; inproper_sleepable_helper() local
265 struct task_struct *task, *real_parent; nested_rcu_region() local
284 struct task_struct *task, *group_leader; task_trusted_non_rcuptr() local
298 struct task_struct *task, *real_parent; task_untrusted_rcuptr() local
312 struct task_struct *task, *real_parent; cross_rcu_region() local
[all...]
/linux/drivers/scsi/qedi/
H A Dqedi_fw_iscsi.h31 * task context.
33 * @param task_params - Pointer to task parameters struct
37 * @param sgl_task_params - Pointer to SGL task params
49 * Request task context.
51 * @param task_params - Pointer to task parameters struct
53 * @param tx_sgl_task_params - Pointer to SGL task params
54 * @param rx_sgl_task_params - Pointer to SGL task params
62 * task context.
64 * @param task_params - Pointer to task parameters struct
66 * @param tx_sgl_task_params - Pointer to SGL task params
[all …]
/linux/Documentation/admin-guide/mm/
H A Dnuma_memory_policy.rst20 both cpusets and policies are applied to a task, the restrictions of the cpuset
44 Task/Process Policy
45 this is an optional, per-task policy. When defined for a
46 specific task, this policy controls all page allocations made
47 by or on behalf of the task that aren't controlled by a more
48 specific scope. If a task does not define a task policy, then
50 task policy "fall back" to the System Default Policy.
52 The task policy applies to the entire address space of a task. Thus,
54 [clone() w/o the CLONE_VM flag] and exec*(). This allows a parent task
55 to establish the task policy for a child task exec()'d from an
[all …]
/linux/tools/perf/util/bpf_skel/
H A Dkwork_top.bpf.c105 static __always_inline void update_task_info(struct task_struct *task, __u32 cpu) in update_task_info() argument
108 .pid = task->pid, in update_task_info()
114 .tgid = task->tgid, in update_task_info()
115 .is_kthread = task->flags & PF_KTHREAD ? 1 : 0, in update_task_info()
117 BPF_CORE_READ_STR_INTO(&data.comm, task, comm); in update_task_info()
139 static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu) in on_sched_out() argument
144 pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, 0); in on_sched_out()
152 .pid = task->pid, in on_sched_out()
153 .task_p = (__u64)task, in on_sched_out()
157 update_task_info(task, cpu); in on_sched_out()
[all …]
/linux/arch/arm64/kernel/
H A Dfpsimd.c60 * (a) for each task, we need to remember which CPU was the last one to have
61 * the task's FPSIMD state loaded into its FPSIMD registers;
62 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
69 * address of the userland FPSIMD state of the task that was loaded onto the CPU
75 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
79 * indicate whether or not the userland FPSIMD state of the current task is
82 * task. If the task is behaving as a VMM, then this is will be managed by
89 * called from softirq context, which will save the task's FPSIMD context back
91 * task's FPSIMD state from task context and thereby corrupting the state, it
92 * is necessary to protect any manipulation of a task's fpsimd_state or
[all …]

12345678910>>...89