xref: /freebsd/sys/kern/kern_rmlock.c (revision 9fd69f37d28cfd7438cac3eeb45fe9dd46b4d7dd)
1 /*-
2  * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Machine independent bits of reader/writer lock implementation.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_ddb.h"
38 #include "opt_kdtrace.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/rmlock.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/systm.h>
52 #include <sys/turnstile.h>
53 #include <sys/lock_profile.h>
54 #include <machine/cpu.h>
55 
56 #ifdef DDB
57 #include <ddb/ddb.h>
58 #endif
59 
60 #define RMPF_ONQUEUE	1
61 #define RMPF_SIGNAL	2
62 
63 /*
64  * To support usage of rmlock in CVs and msleep yet another list for the
65  * priority tracker would be needed.  Using this lock for cv and msleep also
66  * does not seem very useful
67  */
68 
69 static __inline void compiler_memory_barrier(void) {
70 	__asm __volatile("":::"memory");
71 }
72 
73 static void	assert_rm(struct lock_object *lock, int what);
74 static void	lock_rm(struct lock_object *lock, int how);
75 #ifdef KDTRACE_HOOKS
76 static int	owner_rm(struct lock_object *lock, struct thread **owner);
77 #endif
78 static int	unlock_rm(struct lock_object *lock);
79 
80 struct lock_class lock_class_rm = {
81 	.lc_name = "rm",
82 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
83 	.lc_assert = assert_rm,
84 #if 0
85 #ifdef DDB
86 	.lc_ddb_show = db_show_rwlock,
87 #endif
88 #endif
89 	.lc_lock = lock_rm,
90 	.lc_unlock = unlock_rm,
91 #ifdef KDTRACE_HOOKS
92 	.lc_owner = owner_rm,
93 #endif
94 };
95 
96 static void
97 assert_rm(struct lock_object *lock, int what)
98 {
99 
100 	panic("assert_rm called");
101 }
102 
103 static void
104 lock_rm(struct lock_object *lock, int how)
105 {
106 
107 	panic("lock_rm called");
108 }
109 
110 static int
111 unlock_rm(struct lock_object *lock)
112 {
113 
114 	panic("unlock_rm called");
115 }
116 
117 #ifdef KDTRACE_HOOKS
118 static int
119 owner_rm(struct lock_object *lock, struct thread **owner)
120 {
121 
122 	panic("owner_rm called");
123 }
124 #endif
125 
126 static struct mtx rm_spinlock;
127 
128 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
129 
130 /*
131  * Add or remove tracker from per-cpu list.
132  *
133  * The per-cpu list can be traversed at any time in forward direction from an
134  * interrupt on the *local* cpu.
135  */
136 static void inline
137 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
138 {
139 	struct rm_queue *next;
140 
141 	/* Initialize all tracker pointers */
142 	tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
143 	next = pc->pc_rm_queue.rmq_next;
144 	tracker->rmp_cpuQueue.rmq_next = next;
145 
146 	/* rmq_prev is not used during froward traversal. */
147 	next->rmq_prev = &tracker->rmp_cpuQueue;
148 
149 	/* Update pointer to first element. */
150 	pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
151 }
152 
153 static void inline
154 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
155 {
156 	struct rm_queue *next, *prev;
157 
158 	next = tracker->rmp_cpuQueue.rmq_next;
159 	prev = tracker->rmp_cpuQueue.rmq_prev;
160 
161 	/* Not used during forward traversal. */
162 	next->rmq_prev = prev;
163 
164 	/* Remove from list. */
165 	prev->rmq_next = next;
166 }
167 
168 static void
169 rm_cleanIPI(void *arg)
170 {
171 	struct pcpu *pc;
172 	struct rmlock *rm = arg;
173 	struct rm_priotracker *tracker;
174 	struct rm_queue *queue;
175 	pc = pcpu_find(curcpu);
176 
177 	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
178 	    queue = queue->rmq_next) {
179 		tracker = (struct rm_priotracker *)queue;
180 		if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
181 			tracker->rmp_flags = RMPF_ONQUEUE;
182 			mtx_lock_spin(&rm_spinlock);
183 			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
184 			    rmp_qentry);
185 			mtx_unlock_spin(&rm_spinlock);
186 		}
187 	}
188 }
189 
190 void
191 rm_init_flags(struct rmlock *rm, const char *name, int opts)
192 {
193 	int liflags;
194 
195 	liflags = 0;
196 	if (!(opts & RM_NOWITNESS))
197 		liflags |= LO_WITNESS;
198 	if (opts & RM_RECURSE)
199 		liflags |= LO_RECURSABLE;
200 	rm->rm_noreadtoken = 1;
201 	LIST_INIT(&rm->rm_activeReaders);
202 	mtx_init(&rm->rm_lock, name, "rmlock_mtx", MTX_NOWITNESS);
203 	lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
204 }
205 
206 void
207 rm_init(struct rmlock *rm, const char *name)
208 {
209 
210 	rm_init_flags(rm, name, 0);
211 }
212 
213 void
214 rm_destroy(struct rmlock *rm)
215 {
216 
217 	mtx_destroy(&rm->rm_lock);
218 	lock_destroy(&rm->lock_object);
219 }
220 
221 int
222 rm_wowned(struct rmlock *rm)
223 {
224 
225 	return (mtx_owned(&rm->rm_lock));
226 }
227 
228 void
229 rm_sysinit(void *arg)
230 {
231 	struct rm_args *args = arg;
232 
233 	rm_init(args->ra_rm, args->ra_desc);
234 }
235 
236 void
237 rm_sysinit_flags(void *arg)
238 {
239 	struct rm_args_flags *args = arg;
240 
241 	rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
242 }
243 
244 static void
245 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker)
246 {
247 	struct pcpu *pc;
248 	struct rm_queue *queue;
249 	struct rm_priotracker *atracker;
250 
251 	critical_enter();
252 	pc = pcpu_find(curcpu);
253 
254 	/* Check if we just need to do a proper critical_exit. */
255 	if (0 == rm->rm_noreadtoken) {
256 		critical_exit();
257 		return;
258 	}
259 
260 	/* Remove our tracker from the per-cpu list. */
261 	rm_tracker_remove(pc, tracker);
262 
263 	/* Check to see if the IPI granted us the lock after all. */
264 	if (tracker->rmp_flags) {
265 		/* Just add back tracker - we hold the lock. */
266 		rm_tracker_add(pc, tracker);
267 		critical_exit();
268 		return;
269 	}
270 
271 	/*
272 	 * We allow readers to aquire a lock even if a writer is blocked if
273 	 * the lock is recursive and the reader already holds the lock.
274 	 */
275 	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
276 		/*
277 		 * Just grant the lock if this thread already has a tracker
278 		 * for this lock on the per-cpu queue.
279 		 */
280 		for (queue = pc->pc_rm_queue.rmq_next;
281 		    queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
282 			atracker = (struct rm_priotracker *)queue;
283 			if ((atracker->rmp_rmlock == rm) &&
284 			    (atracker->rmp_thread == tracker->rmp_thread)) {
285 				mtx_lock_spin(&rm_spinlock);
286 				LIST_INSERT_HEAD(&rm->rm_activeReaders,
287 				    tracker, rmp_qentry);
288 				tracker->rmp_flags = RMPF_ONQUEUE;
289 				mtx_unlock_spin(&rm_spinlock);
290 				rm_tracker_add(pc, tracker);
291 				critical_exit();
292 				return;
293 			}
294 		}
295 	}
296 
297 	sched_unpin();
298 	critical_exit();
299 
300 	mtx_lock(&rm->rm_lock);
301 	rm->rm_noreadtoken = 0;
302 	critical_enter();
303 
304 	pc = pcpu_find(curcpu);
305 	rm_tracker_add(pc, tracker);
306 	sched_pin();
307 	critical_exit();
308 
309 	mtx_unlock(&rm->rm_lock);
310 }
311 
312 void
313 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker)
314 {
315 	struct thread *td = curthread;
316 	struct pcpu *pc;
317 
318 	tracker->rmp_flags  = 0;
319 	tracker->rmp_thread = td;
320 	tracker->rmp_rmlock = rm;
321 
322 	td->td_critnest++;	/* critical_enter(); */
323 
324 	compiler_memory_barrier();
325 
326 	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
327 
328 	rm_tracker_add(pc, tracker);
329 
330 	sched_pin();
331 
332 	compiler_memory_barrier();
333 
334 	td->td_critnest--;
335 
336 	/*
337 	 * Fast path to combine two common conditions into a single
338 	 * conditional jump.
339 	 */
340 	if (0 == (td->td_owepreempt | rm->rm_noreadtoken))
341 		return;
342 
343 	/* We do not have a read token and need to acquire one. */
344 	_rm_rlock_hard(rm, tracker);
345 }
346 
347 static void
348 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
349 {
350 
351 	if (td->td_owepreempt) {
352 		td->td_critnest++;
353 		critical_exit();
354 	}
355 
356 	if (!tracker->rmp_flags)
357 		return;
358 
359 	mtx_lock_spin(&rm_spinlock);
360 	LIST_REMOVE(tracker, rmp_qentry);
361 
362 	if (tracker->rmp_flags & RMPF_SIGNAL) {
363 		struct rmlock *rm;
364 		struct turnstile *ts;
365 
366 		rm = tracker->rmp_rmlock;
367 
368 		turnstile_chain_lock(&rm->lock_object);
369 		mtx_unlock_spin(&rm_spinlock);
370 
371 		ts = turnstile_lookup(&rm->lock_object);
372 
373 		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
374 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
375 		turnstile_chain_unlock(&rm->lock_object);
376 	} else
377 		mtx_unlock_spin(&rm_spinlock);
378 }
379 
380 void
381 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
382 {
383 	struct pcpu *pc;
384 	struct thread *td = tracker->rmp_thread;
385 
386 	td->td_critnest++;	/* critical_enter(); */
387 	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
388 	rm_tracker_remove(pc, tracker);
389 	td->td_critnest--;
390 	sched_unpin();
391 
392 	if (0 == (td->td_owepreempt | tracker->rmp_flags))
393 		return;
394 
395 	_rm_unlock_hard(td, tracker);
396 }
397 
398 void
399 _rm_wlock(struct rmlock *rm)
400 {
401 	struct rm_priotracker *prio;
402 	struct turnstile *ts;
403 
404 	mtx_lock(&rm->rm_lock);
405 
406 	if (rm->rm_noreadtoken == 0) {
407 		/* Get all read tokens back */
408 
409 		rm->rm_noreadtoken = 1;
410 
411 		/*
412 		 * Assumes rm->rm_noreadtoken update is visible on other CPUs
413 		 * before rm_cleanIPI is called.
414 		 */
415 #ifdef SMP
416 		smp_rendezvous(smp_no_rendevous_barrier,
417 		    rm_cleanIPI,
418 		    smp_no_rendevous_barrier,
419 		    rm);
420 
421 #else
422 		rm_cleanIPI(rm);
423 #endif
424 
425 		mtx_lock_spin(&rm_spinlock);
426 		while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
427 			ts = turnstile_trywait(&rm->lock_object);
428 			prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
429 			mtx_unlock_spin(&rm_spinlock);
430 			turnstile_wait(ts, prio->rmp_thread,
431 			    TS_EXCLUSIVE_QUEUE);
432 			mtx_lock_spin(&rm_spinlock);
433 		}
434 		mtx_unlock_spin(&rm_spinlock);
435 	}
436 }
437 
438 void
439 _rm_wunlock(struct rmlock *rm)
440 {
441 
442 	mtx_unlock(&rm->rm_lock);
443 }
444 
445 #ifdef LOCK_DEBUG
446 
447 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
448 {
449 
450 	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
451 	    file, line, NULL);
452 
453 	_rm_wlock(rm);
454 
455 	LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
456 
457 	WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
458 
459 	curthread->td_locks++;
460 
461 }
462 
463 void
464 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
465 {
466 
467 	curthread->td_locks--;
468 	WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
469 	LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
470 	_rm_wunlock(rm);
471 }
472 
473 void
474 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
475     const char *file, int line)
476 {
477 
478 	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
479 
480 	_rm_rlock(rm, tracker);
481 
482 	LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
483 
484 	WITNESS_LOCK(&rm->lock_object, 0, file, line);
485 
486 	curthread->td_locks++;
487 }
488 
489 void
490 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
491     const char *file, int line)
492 {
493 
494 	curthread->td_locks--;
495 	WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
496 	LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
497 	_rm_runlock(rm, tracker);
498 }
499 
500 #else
501 
502 /*
503  * Just strip out file and line arguments if no lock debugging is enabled in
504  * the kernel - we are called from a kernel module.
505  */
506 void
507 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
508 {
509 
510 	_rm_wlock(rm);
511 }
512 
513 void
514 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
515 {
516 
517 	_rm_wunlock(rm);
518 }
519 
520 void
521 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
522     const char *file, int line)
523 {
524 
525 	_rm_rlock(rm, tracker);
526 }
527 
528 void
529 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
530     const char *file, int line)
531 {
532 
533 	_rm_runlock(rm, tracker);
534 }
535 
536 #endif
537