1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/export.h> 3 #include <linux/lockref.h> 4 5 #if USE_CMPXCHG_LOCKREF 6 7 /* 8 * Note that the "cmpxchg()" reloads the "old" value for the 9 * failure case. 10 */ 11 #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ 12 int retry = 100; \ 13 struct lockref old; \ 14 BUILD_BUG_ON(sizeof(old) != 8); \ 15 old.lock_count = READ_ONCE(lockref->lock_count); \ 16 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 17 struct lockref new = old; \ 18 CODE \ 19 if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \ 20 &old.lock_count, \ 21 new.lock_count))) { \ 22 SUCCESS; \ 23 } \ 24 if (!--retry) \ 25 break; \ 26 } \ 27 } while (0) 28 29 #else 30 31 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) 32 33 #endif 34 35 /** 36 * lockref_get - Increments reference count unconditionally 37 * @lockref: pointer to lockref structure 38 * 39 * This operation is only valid if you already hold a reference 40 * to the object, so you know the count cannot be zero. 41 */ 42 void lockref_get(struct lockref *lockref) 43 { 44 CMPXCHG_LOOP( 45 new.count++; 46 , 47 return; 48 ); 49 50 spin_lock(&lockref->lock); 51 lockref->count++; 52 spin_unlock(&lockref->lock); 53 } 54 EXPORT_SYMBOL(lockref_get); 55 56 /** 57 * lockref_get_not_zero - Increments count unless the count is 0 or dead 58 * @lockref: pointer to lockref structure 59 * Return: 1 if count updated successfully or 0 if count was zero 60 */ 61 bool lockref_get_not_zero(struct lockref *lockref) 62 { 63 bool retval = false; 64 65 CMPXCHG_LOOP( 66 new.count++; 67 if (old.count <= 0) 68 return false; 69 , 70 return true; 71 ); 72 73 spin_lock(&lockref->lock); 74 if (lockref->count > 0) { 75 lockref->count++; 76 retval = true; 77 } 78 spin_unlock(&lockref->lock); 79 return retval; 80 } 81 EXPORT_SYMBOL(lockref_get_not_zero); 82 83 /** 84 * lockref_put_return - Decrement reference count if possible 85 * @lockref: pointer to lockref structure 86 * 87 * Decrement the reference count and return the new value. 88 * If the lockref was dead or locked, return -1. 89 */ 90 int lockref_put_return(struct lockref *lockref) 91 { 92 CMPXCHG_LOOP( 93 new.count--; 94 if (old.count <= 0) 95 return -1; 96 , 97 return new.count; 98 ); 99 return -1; 100 } 101 EXPORT_SYMBOL(lockref_put_return); 102 103 /** 104 * lockref_put_or_lock - decrements count unless count <= 1 before decrement 105 * @lockref: pointer to lockref structure 106 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken 107 */ 108 bool lockref_put_or_lock(struct lockref *lockref) 109 { 110 CMPXCHG_LOOP( 111 new.count--; 112 if (old.count <= 1) 113 break; 114 , 115 return true; 116 ); 117 118 spin_lock(&lockref->lock); 119 if (lockref->count <= 1) 120 return false; 121 lockref->count--; 122 spin_unlock(&lockref->lock); 123 return true; 124 } 125 EXPORT_SYMBOL(lockref_put_or_lock); 126 127 /** 128 * lockref_mark_dead - mark lockref dead 129 * @lockref: pointer to lockref structure 130 */ 131 void lockref_mark_dead(struct lockref *lockref) 132 { 133 assert_spin_locked(&lockref->lock); 134 lockref->count = -128; 135 } 136 EXPORT_SYMBOL(lockref_mark_dead); 137 138 /** 139 * lockref_get_not_dead - Increments count unless the ref is dead 140 * @lockref: pointer to lockref structure 141 * Return: 1 if count updated successfully or 0 if lockref was dead 142 */ 143 bool lockref_get_not_dead(struct lockref *lockref) 144 { 145 bool retval = false; 146 147 CMPXCHG_LOOP( 148 new.count++; 149 if (old.count < 0) 150 return false; 151 , 152 return true; 153 ); 154 155 spin_lock(&lockref->lock); 156 if (lockref->count >= 0) { 157 lockref->count++; 158 retval = true; 159 } 160 spin_unlock(&lockref->lock); 161 return retval; 162 } 163 EXPORT_SYMBOL(lockref_get_not_dead); 164