xref: /linux/include/linux/srcutiny.h (revision a6fc88b22bc8d12ad52e8412c667ec0f5bf055af)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Sleepable Read-Copy Update mechanism for mutual exclusion,
4  *	tiny variant.
5  *
6  * Copyright (C) IBM Corporation, 2017
7  *
8  * Author: Paul McKenney <paulmck@linux.ibm.com>
9  */
10 
11 #ifndef _LINUX_SRCU_TINY_H
12 #define _LINUX_SRCU_TINY_H
13 
14 #include <linux/irq_work_types.h>
15 #include <linux/swait.h>
16 
17 struct srcu_struct {
18 	short srcu_lock_nesting[2];	/* srcu_read_lock() nesting depth. */
19 	u8 srcu_gp_running;		/* GP workqueue running? */
20 	u8 srcu_gp_waiting;		/* GP waiting for readers? */
21 	unsigned long srcu_idx;		/* Current reader array element in bit 0x2. */
22 	unsigned long srcu_idx_max;	/* Furthest future srcu_idx request. */
23 	struct swait_queue_head srcu_wq;
24 					/* Last srcu_read_unlock() wakes GP. */
25 	struct rcu_head *srcu_cb_head;	/* Pending callbacks: Head. */
26 	struct rcu_head **srcu_cb_tail;	/* Pending callbacks: Tail. */
27 	struct work_struct srcu_work;	/* For driving grace periods. */
28 	struct irq_work srcu_irq_work;	/* Defer schedule_work() to irq work. */
29 #ifdef CONFIG_DEBUG_LOCK_ALLOC
30 	struct lockdep_map dep_map;
31 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
32 };
33 
34 void srcu_drive_gp(struct work_struct *wp);
35 void srcu_tiny_irq_work(struct irq_work *irq_work);
36 
37 #define __SRCU_STRUCT_INIT(name, __ignored, ___ignored, ____ignored)	\
38 {									\
39 	.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq),	\
40 	.srcu_cb_tail = &name.srcu_cb_head,				\
41 	.srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp),	\
42 	.srcu_irq_work = { .func = srcu_tiny_irq_work },		\
43 	__SRCU_DEP_MAP_INIT(name)					\
44 }
45 
46 /*
47  * This odd _STATIC_ arrangement is needed for API compatibility with
48  * Tree SRCU, which needs some per-CPU data.
49  */
50 #define DEFINE_SRCU(name) \
51 	struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
52 #define DEFINE_STATIC_SRCU(name) \
53 	static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
54 #define DEFINE_SRCU_FAST(name) DEFINE_SRCU(name)
55 #define DEFINE_STATIC_SRCU_FAST(name) \
56 	static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
57 #define DEFINE_SRCU_FAST_UPDOWN(name) DEFINE_SRCU(name)
58 #define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
59 	static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
60 
61 // Dummy structure for srcu_notifier_head.
62 struct srcu_usage { };
63 #define __SRCU_USAGE_INIT(name) { }
64 #define __init_srcu_struct_fast __init_srcu_struct
65 #define __init_srcu_struct_fast_updown __init_srcu_struct
66 #ifndef CONFIG_DEBUG_LOCK_ALLOC
67 #define init_srcu_struct_fast init_srcu_struct
68 #define init_srcu_struct_fast_updown init_srcu_struct
69 #endif // #ifndef CONFIG_DEBUG_LOCK_ALLOC
70 
71 void synchronize_srcu(struct srcu_struct *ssp);
72 
73 /*
74  * Counts the new reader in the appropriate per-CPU element of the
75  * srcu_struct.  Can be invoked from irq/bh handlers, but the matching
76  * __srcu_read_unlock() must be in the same handler instance.  Returns an
77  * index that must be passed to the matching srcu_read_unlock().
78  */
79 static inline int __srcu_read_lock(struct srcu_struct *ssp)
80 	__acquires_shared(ssp)
81 {
82 	int idx;
83 
84 	preempt_disable();  // Needed for PREEMPT_LAZY
85 	idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
86 	WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
87 	preempt_enable();
88 	__acquire_shared(ssp);
89 	return idx;
90 }
91 
92 struct srcu_ctr;
93 
94 static inline bool __srcu_ptr_to_ctr(struct srcu_struct *ssp, struct srcu_ctr __percpu *scpp)
95 {
96 	return (int)(intptr_t)(struct srcu_ctr __force __kernel *)scpp;
97 }
98 
99 static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ssp, int idx)
100 {
101 	return (struct srcu_ctr __percpu *)(intptr_t)idx;
102 }
103 
104 static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp)
105 	__acquires_shared(ssp)
106 {
107 	return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
108 }
109 
110 static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
111 	__releases_shared(ssp)
112 {
113 	__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
114 }
115 
116 static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
117 	__acquires_shared(ssp)
118 {
119 	return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
120 }
121 
122 static inline
123 void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
124 	__releases_shared(ssp)
125 {
126 	__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
127 }
128 
129 static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
130 {
131 	synchronize_srcu(ssp);
132 }
133 
134 static inline void srcu_barrier(struct srcu_struct *ssp)
135 {
136 	synchronize_srcu(ssp);
137 }
138 
139 static inline void srcu_expedite_current(struct srcu_struct *ssp) { }
140 #define srcu_check_read_flavor(ssp, read_flavor) do { } while (0)
141 
142 /* Defined here to avoid size increase for non-torture kernels. */
143 static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
144 					    char *tt, char *tf)
145 {
146 	int idx;
147 
148 	idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
149 	pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
150 		 tt, tf, idx,
151 		 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
152 		 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
153 		 data_race(READ_ONCE(ssp->srcu_idx)),
154 		 data_race(READ_ONCE(ssp->srcu_idx_max)));
155 }
156 
157 #endif
158