1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * RT Mutexes: blocking mutual exclusion locks with PI support 4 * 5 * started by Ingo Molnar and Thomas Gleixner: 6 * 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> 9 * 10 * This file contains the public data structure and API definitions. 11 */ 12 13 #ifndef __LINUX_RT_MUTEX_H 14 #define __LINUX_RT_MUTEX_H 15 16 #include <linux/compiler.h> 17 #include <linux/linkage.h> 18 #include <linux/rbtree_types.h> 19 #include <linux/spinlock_types_raw.h> 20 21 extern int max_lock_depth; 22 23 struct rt_mutex_base { 24 raw_spinlock_t wait_lock; 25 struct rb_root_cached waiters; 26 struct task_struct *owner; 27 }; 28 29 #define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \ 30 { \ 31 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \ 32 .waiters = RB_ROOT_CACHED, \ 33 .owner = NULL \ 34 } 35 36 /** 37 * rt_mutex_base_is_locked - is the rtmutex locked 38 * @lock: the mutex to be queried 39 * 40 * Returns true if the mutex is locked, false if unlocked. 41 */ 42 static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock) 43 { 44 return READ_ONCE(lock->owner) != NULL; 45 } 46 47 #ifdef CONFIG_RT_MUTEXES 48 #define RT_MUTEX_HAS_WAITERS 1UL 49 50 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock) 51 { 52 unsigned long owner = (unsigned long) READ_ONCE(lock->owner); 53 54 return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS); 55 } 56 #endif 57 extern void rt_mutex_base_init(struct rt_mutex_base *rtb); 58 59 /** 60 * The rt_mutex structure 61 * 62 * @wait_lock: spinlock to protect the structure 63 * @waiters: rbtree root to enqueue waiters in priority order; 64 * caches top-waiter (leftmost node). 65 * @owner: the mutex owner 66 */ 67 struct rt_mutex { 68 struct rt_mutex_base rtmutex; 69 #ifdef CONFIG_DEBUG_LOCK_ALLOC 70 struct lockdep_map dep_map; 71 #endif 72 }; 73 74 struct rt_mutex_waiter; 75 struct hrtimer_sleeper; 76 77 #ifdef CONFIG_DEBUG_RT_MUTEXES 78 extern void rt_mutex_debug_task_free(struct task_struct *tsk); 79 #else 80 static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { } 81 #endif 82 83 #define rt_mutex_init(mutex) \ 84 do { \ 85 static struct lock_class_key __key; \ 86 __rt_mutex_init(mutex, __func__, &__key); \ 87 } while (0) 88 89 #ifdef CONFIG_DEBUG_LOCK_ALLOC 90 #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ 91 .dep_map = { \ 92 .name = #mutexname, \ 93 .wait_type_inner = LD_WAIT_SLEEP, \ 94 } 95 #else 96 #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) 97 #endif 98 99 #define __RT_MUTEX_INITIALIZER(mutexname) \ 100 { \ 101 .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \ 102 __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ 103 } 104 105 #define DEFINE_RT_MUTEX(mutexname) \ 106 struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) 107 108 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); 109 110 #ifdef CONFIG_DEBUG_LOCK_ALLOC 111 extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); 112 extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock); 113 #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) 114 #define rt_mutex_lock_nest_lock(lock, nest_lock) \ 115 do { \ 116 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ 117 _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 118 } while (0) 119 120 #else 121 extern void rt_mutex_lock(struct rt_mutex *lock); 122 #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) 123 #define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock) 124 #endif 125 126 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); 127 extern int rt_mutex_lock_killable(struct rt_mutex *lock); 128 extern int rt_mutex_trylock(struct rt_mutex *lock); 129 130 extern void rt_mutex_unlock(struct rt_mutex *lock); 131 132 #endif 133