xref: /linux/kernel/rcu/tiny.c (revision 9e56ff53b4115875667760445b028357848b4748)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8  *
9  * For detailed explanation of Read-Copy Update mechanism see -
10  *		Documentation/RCU
11  */
12 #include <linux/completion.h>
13 #include <linux/interrupt.h>
14 #include <linux/notifier.h>
15 #include <linux/rcupdate_wait.h>
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/mutex.h>
19 #include <linux/sched.h>
20 #include <linux/types.h>
21 #include <linux/init.h>
22 #include <linux/time.h>
23 #include <linux/cpu.h>
24 #include <linux/prefetch.h>
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27 
28 #include "rcu.h"
29 
30 /* Global control variables for rcupdate callback mechanism. */
31 struct rcu_ctrlblk {
32 	struct rcu_head *rcucblist;	/* List of pending callbacks (CBs). */
33 	struct rcu_head **donetail;	/* ->next pointer of last "done" CB. */
34 	struct rcu_head **curtail;	/* ->next pointer of last CB. */
35 	unsigned long gp_seq;		/* Grace-period counter. */
36 };
37 
38 /* Definition for rcupdate control block. */
39 static struct rcu_ctrlblk rcu_ctrlblk = {
40 	.donetail	= &rcu_ctrlblk.rcucblist,
41 	.curtail	= &rcu_ctrlblk.rcucblist,
42 	.gp_seq		= 0 - 300UL,
43 };
44 
45 void rcu_barrier(void)
46 {
47 	wait_rcu_gp(call_rcu_hurry);
48 }
49 EXPORT_SYMBOL(rcu_barrier);
50 
51 /* Record an rcu quiescent state.  */
52 void rcu_qs(void)
53 {
54 	unsigned long flags;
55 
56 	local_irq_save(flags);
57 	if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
58 		rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
59 		raise_softirq_irqoff(RCU_SOFTIRQ);
60 	}
61 	WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
62 	local_irq_restore(flags);
63 }
64 
65 /*
66  * Check to see if the scheduling-clock interrupt came from an extended
67  * quiescent state, and, if so, tell RCU about it.  This function must
68  * be called from hardirq context.  It is normally called from the
69  * scheduling-clock interrupt.
70  */
71 void rcu_sched_clock_irq(int user)
72 {
73 	if (user) {
74 		rcu_qs();
75 	} else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
76 		set_tsk_need_resched(current);
77 		set_preempt_need_resched();
78 	}
79 }
80 
81 /*
82  * Reclaim the specified callback, either by invoking it for non-kfree cases or
83  * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
84  */
85 static inline bool rcu_reclaim_tiny(struct rcu_head *head)
86 {
87 	rcu_callback_t f;
88 	unsigned long offset = (unsigned long)head->func;
89 
90 	rcu_lock_acquire(&rcu_callback_map);
91 	if (__is_kvfree_rcu_offset(offset)) {
92 		trace_rcu_invoke_kvfree_callback("", head, offset);
93 		kvfree((void *)head - offset);
94 		rcu_lock_release(&rcu_callback_map);
95 		return true;
96 	}
97 
98 	trace_rcu_invoke_callback("", head);
99 	f = head->func;
100 	debug_rcu_head_callback(head);
101 	WRITE_ONCE(head->func, (rcu_callback_t)0L);
102 	f(head);
103 	rcu_lock_release(&rcu_callback_map);
104 	return false;
105 }
106 
107 /* Invoke the RCU callbacks whose grace period has elapsed.  */
108 static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
109 {
110 	struct rcu_head *next, *list;
111 	unsigned long flags;
112 
113 	/* Move the ready-to-invoke callbacks to a local list. */
114 	local_irq_save(flags);
115 	if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
116 		/* No callbacks ready, so just leave. */
117 		local_irq_restore(flags);
118 		return;
119 	}
120 	list = rcu_ctrlblk.rcucblist;
121 	rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
122 	*rcu_ctrlblk.donetail = NULL;
123 	if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
124 		rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
125 	rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
126 	local_irq_restore(flags);
127 
128 	/* Invoke the callbacks on the local list. */
129 	while (list) {
130 		next = list->next;
131 		prefetch(next);
132 		debug_rcu_head_unqueue(list);
133 		local_bh_disable();
134 		rcu_reclaim_tiny(list);
135 		local_bh_enable();
136 		list = next;
137 	}
138 }
139 
140 /*
141  * Wait for a grace period to elapse.  But it is illegal to invoke
142  * synchronize_rcu() from within an RCU read-side critical section.
143  * Therefore, any legal call to synchronize_rcu() is a quiescent state,
144  * and so on a UP system, synchronize_rcu() need do nothing, other than
145  * let the polled APIs know that another grace period elapsed.
146  *
147  * (But Lai Jiangshan points out the benefits of doing might_sleep()
148  * to reduce latency.)
149  *
150  * Cool, huh?  (Due to Josh Triplett.)
151  */
152 void synchronize_rcu(void)
153 {
154 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
155 			 lock_is_held(&rcu_lock_map) ||
156 			 lock_is_held(&rcu_sched_lock_map),
157 			 "Illegal synchronize_rcu() in RCU read-side critical section");
158 	WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
159 }
160 EXPORT_SYMBOL_GPL(synchronize_rcu);
161 
162 static void tiny_rcu_leak_callback(struct rcu_head *rhp)
163 {
164 }
165 
166 /*
167  * Post an RCU callback to be invoked after the end of an RCU grace
168  * period.  But since we have but one CPU, that would be after any
169  * quiescent state.
170  */
171 void call_rcu(struct rcu_head *head, rcu_callback_t func)
172 {
173 	static atomic_t doublefrees;
174 	unsigned long flags;
175 
176 	if (debug_rcu_head_queue(head)) {
177 		if (atomic_inc_return(&doublefrees) < 4) {
178 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
179 			mem_dump_obj(head);
180 		}
181 
182 		if (!__is_kvfree_rcu_offset((unsigned long)head->func))
183 			WRITE_ONCE(head->func, tiny_rcu_leak_callback);
184 		return;
185 	}
186 
187 	head->func = func;
188 	head->next = NULL;
189 
190 	local_irq_save(flags);
191 	*rcu_ctrlblk.curtail = head;
192 	rcu_ctrlblk.curtail = &head->next;
193 	local_irq_restore(flags);
194 
195 	if (unlikely(is_idle_task(current))) {
196 		/* force scheduling for rcu_qs() */
197 		resched_cpu(0);
198 	}
199 }
200 EXPORT_SYMBOL_GPL(call_rcu);
201 
202 /*
203  * Store a grace-period-counter "cookie".  For more information,
204  * see the Tree RCU header comment.
205  */
206 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
207 {
208 	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
209 }
210 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
211 
212 /*
213  * Return a grace-period-counter "cookie".  For more information,
214  * see the Tree RCU header comment.
215  */
216 unsigned long get_state_synchronize_rcu(void)
217 {
218 	return READ_ONCE(rcu_ctrlblk.gp_seq);
219 }
220 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
221 
222 /*
223  * Return a grace-period-counter "cookie" and ensure that a future grace
224  * period completes.  For more information, see the Tree RCU header comment.
225  */
226 unsigned long start_poll_synchronize_rcu(void)
227 {
228 	unsigned long gp_seq = get_state_synchronize_rcu();
229 
230 	if (unlikely(is_idle_task(current))) {
231 		/* force scheduling for rcu_qs() */
232 		resched_cpu(0);
233 	}
234 	return gp_seq;
235 }
236 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
237 
238 /*
239  * Return true if the grace period corresponding to oldstate has completed
240  * and false otherwise.  For more information, see the Tree RCU header
241  * comment.
242  */
243 bool poll_state_synchronize_rcu(unsigned long oldstate)
244 {
245 	return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
246 }
247 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
248 
249 #ifdef CONFIG_KASAN_GENERIC
250 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
251 {
252 	if (head)
253 		kasan_record_aux_stack_noalloc(ptr);
254 
255 	__kvfree_call_rcu(head, ptr);
256 }
257 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
258 #endif
259 
260 void __init rcu_init(void)
261 {
262 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
263 	rcu_early_boot_tests();
264 }
265