Lines Matching defs:t
117 static void rcu_read_unlock_special(struct task_struct *t);
145 * to the head of the list won't block any grace period that is already
169 struct task_struct *t = current;
196 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
214 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
227 list_add(&t->rcu_node_entry, rnp->exp_tasks);
238 list_add(&t->rcu_node_entry, rnp->gp_tasks);
255 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
259 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
326 struct task_struct *t = current;
334 !t->rcu_read_unlock_special.b.blocked) {
339 t->rcu_read_unlock_special.b.blocked = true;
340 t->rcu_blocked_node = rnp;
348 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
350 t->pid,
356 rcu_preempt_deferred_qs(t);
432 struct task_struct *t = current;
437 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
438 rcu_read_unlock_special(t);
452 static struct list_head *rcu_next_node_entry(struct task_struct *t,
457 np = t->rcu_node_entry.next;
478 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
496 * t->rcu_read_unlock_special cannot change.
498 special = t->rcu_read_unlock_special;
503 t->rcu_read_unlock_special.s = 0;
532 rnp = t->rcu_blocked_node;
534 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
540 np = rcu_next_node_entry(t, rnp);
541 list_del_init(&t->rcu_node_entry);
542 t->rcu_blocked_node = NULL;
544 rnp->gp_seq, t->pid);
545 if (&t->rcu_node_entry == rnp->gp_tasks)
547 if (&t->rcu_node_entry == rnp->exp_tasks)
551 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
552 if (&t->rcu_node_entry == rnp->boost_tasks)
558 * we aren't waiting on any CPUs, report the quiescent state.
600 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
603 READ_ONCE(t->rcu_read_unlock_special.s)) &&
614 notrace void rcu_preempt_deferred_qs(struct task_struct *t)
618 if (!rcu_preempt_need_deferred_qs(t))
621 rcu_preempt_deferred_qs_irqrestore(t, flags);
661 * @t: The task being checked
668 static bool rcu_unlock_needs_exp_handling(struct task_struct *t,
678 * check because 't' might not be on the exp_tasks list at all - its
681 if (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks))
702 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node))
714 if (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && t->rcu_blocked_node)
725 static void rcu_read_unlock_special(struct task_struct *t)
743 needs_exp = rcu_unlock_needs_exp_handling(t, rdp, rnp, irqs_were_disabled);
770 rcu_preempt_deferred_qs_irqrestore(t, flags);
784 struct task_struct *t;
793 t = container_of(rnp->gp_tasks, struct task_struct,
796 rnp->gp_seq, t->pid);
810 struct task_struct *t = current;
816 if (rcu_preempt_need_deferred_qs(t)) {
817 set_tsk_need_resched(t);
820 } else if (rcu_preempt_need_deferred_qs(t)) {
821 rcu_preempt_deferred_qs(t); /* Report deferred QS. */
832 !t->rcu_read_unlock_special.b.need_qs &&
834 t->rcu_read_unlock_special.b.need_qs = true;
847 struct task_struct *t = current;
852 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
895 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
1032 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
1044 notrace void rcu_preempt_deferred_qs(struct task_struct *t)
1157 struct task_struct *t;
1187 * We boost task t by manufacturing an rt_mutex that appears to
1188 * be held by task t. We leave a pointer to that rt_mutex where
1189 * task t can find it, and task t will release the mutex when it
1192 * t's priority. (Thanks to tglx for suggesting this approach!)
1194 * Note that task t must acquire rnp->lock to remove itself from
1196 * nowhere else. We therefore are guaranteed that task t will
1199 * and task t's exiting its outermost RCU read-side critical
1202 t = container_of(tb, struct task_struct, rcu_node_entry);
1203 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
1205 /* Lock only for side effect: boosts task t's priority. */
1256 * The ->boost_kthread_task is immortal, so we don't need to worry
1303 struct task_struct *t;
1308 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1310 if (WARN_ON_ONCE(IS_ERR(t)))
1314 rnp->boost_kthread_task = t;
1318 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1319 rcu_thread_affine_rnp(t, rnp);
1320 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */