1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #ifndef __GLOCK_DOT_H__ 8 #define __GLOCK_DOT_H__ 9 10 #include <linux/sched.h> 11 #include <linux/parser.h> 12 #include "incore.h" 13 #include "util.h" 14 15 /* Options for hostdata parser */ 16 17 enum { 18 Opt_jid, 19 Opt_id, 20 Opt_first, 21 Opt_nodir, 22 Opt_err, 23 }; 24 25 /* 26 * lm_lockname types 27 */ 28 29 #define LM_TYPE_RESERVED 0x00 30 #define LM_TYPE_NONDISK 0x01 31 #define LM_TYPE_INODE 0x02 32 #define LM_TYPE_RGRP 0x03 33 #define LM_TYPE_META 0x04 34 #define LM_TYPE_IOPEN 0x05 35 #define LM_TYPE_FLOCK 0x06 36 #define LM_TYPE_PLOCK 0x07 37 #define LM_TYPE_QUOTA 0x08 38 #define LM_TYPE_JOURNAL 0x09 39 40 /* 41 * lm_lock() states 42 * 43 * SHARED is compatible with SHARED, not with DEFERRED or EX. 44 * DEFERRED is compatible with DEFERRED, not with SHARED or EX. 45 */ 46 47 #define LM_ST_UNLOCKED 0 48 #define LM_ST_EXCLUSIVE 1 49 #define LM_ST_DEFERRED 2 50 #define LM_ST_SHARED 3 51 52 /* 53 * lm_lock() flags 54 * 55 * LM_FLAG_TRY 56 * Don't wait to acquire the lock if it can't be granted immediately. 57 * 58 * LM_FLAG_TRY_1CB 59 * Send one blocking callback if TRY is set and the lock is not granted. 60 * 61 * LM_FLAG_NOEXP 62 * GFS sets this flag on lock requests it makes while doing journal recovery. 63 * These special requests should not be blocked due to the recovery like 64 * ordinary locks would be. 65 * 66 * LM_FLAG_ANY 67 * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may 68 * also be granted in SHARED. The preferred state is whichever is compatible 69 * with other granted locks, or the specified state if no other locks exist. 70 * 71 * In addition, when a lock is already held in EX mode locally, a SHARED or 72 * DEFERRED mode request with the LM_FLAG_ANY flag set will be granted. 73 * (The LM_FLAG_ANY flag is only use for SHARED mode requests currently.) 74 * 75 * LM_FLAG_NODE_SCOPE 76 * This holder agrees to share the lock within this node. In other words, 77 * the glock is held in EX mode according to DLM, but local holders on the 78 * same node can share it. 79 */ 80 81 #define LM_FLAG_TRY 0x0001 82 #define LM_FLAG_TRY_1CB 0x0002 83 #define LM_FLAG_NOEXP 0x0004 84 #define LM_FLAG_ANY 0x0008 85 #define LM_FLAG_NODE_SCOPE 0x0020 86 #define GL_ASYNC 0x0040 87 #define GL_EXACT 0x0080 88 #define GL_SKIP 0x0100 89 #define GL_NOPID 0x0200 90 #define GL_NOCACHE 0x0400 91 #define GL_NOBLOCK 0x0800 92 93 /* 94 * lm_async_cb return flags 95 * 96 * LM_OUT_ST_MASK 97 * Masks the lower two bits of lock state in the returned value. 98 * 99 * LM_OUT_TRY_AGAIN 100 * The trylock request failed. 101 * 102 * LM_OUT_DEADLOCK 103 * The lock request failed because it would deadlock. 104 * 105 * LM_OUT_CANCELED 106 * The lock request was canceled. 107 * 108 * LM_OUT_ERROR 109 * The lock request timed out or failed. 110 */ 111 112 #define LM_OUT_ST_MASK 0x00000003 113 #define LM_OUT_TRY_AGAIN 0x00000020 114 #define LM_OUT_DEADLOCK 0x00000010 115 #define LM_OUT_CANCELED 0x00000008 116 #define LM_OUT_ERROR 0x00000004 117 118 /* 119 * lm_recovery_done() messages 120 */ 121 122 #define LM_RD_GAVEUP 308 123 #define LM_RD_SUCCESS 309 124 125 #define GLR_TRYFAILED 13 126 127 #define GL_GLOCK_MAX_HOLD (long)(HZ / 5) 128 #define GL_GLOCK_DFT_HOLD (long)(HZ / 5) 129 #define GL_GLOCK_MIN_HOLD (long)(10) 130 #define GL_GLOCK_HOLD_INCR (long)(HZ / 20) 131 #define GL_GLOCK_HOLD_DECR (long)(HZ / 40) 132 133 struct lm_lockops { 134 const char *lm_proto_name; 135 int (*lm_mount) (struct gfs2_sbd *sdp, const char *table); 136 void (*lm_first_done) (struct gfs2_sbd *sdp); 137 void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid, 138 unsigned int result); 139 void (*lm_unmount) (struct gfs2_sbd *sdp); 140 void (*lm_withdraw) (struct gfs2_sbd *sdp); 141 void (*lm_put_lock) (struct gfs2_glock *gl); 142 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, 143 unsigned int flags); 144 void (*lm_cancel) (struct gfs2_glock *gl); 145 const match_table_t *lm_tokens; 146 }; 147 148 struct gfs2_glock_aspace { 149 struct gfs2_glock glock; 150 struct address_space mapping; 151 }; 152 153 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) 154 { 155 struct gfs2_holder *gh; 156 struct pid *pid; 157 158 /* Look in glock's list of holders for one with current task as owner */ 159 spin_lock(&gl->gl_lockref.lock); 160 pid = task_pid(current); 161 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 162 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 163 break; 164 if (gh->gh_owner_pid == pid) 165 goto out; 166 } 167 gh = NULL; 168 out: 169 spin_unlock(&gl->gl_lockref.lock); 170 171 return gh; 172 } 173 174 static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) 175 { 176 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 177 struct gfs2_glock_aspace *gla = 178 container_of(gl, struct gfs2_glock_aspace, glock); 179 return &gla->mapping; 180 } 181 return NULL; 182 } 183 184 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 185 const struct gfs2_glock_operations *glops, 186 int create, struct gfs2_glock **glp); 187 struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl); 188 void gfs2_glock_put(struct gfs2_glock *gl); 189 void gfs2_glock_put_async(struct gfs2_glock *gl); 190 191 void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, 192 u16 flags, struct gfs2_holder *gh, 193 unsigned long ip); 194 static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, 195 u16 flags, struct gfs2_holder *gh) { 196 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); 197 } 198 199 void gfs2_holder_reinit(unsigned int state, u16 flags, 200 struct gfs2_holder *gh); 201 void gfs2_holder_uninit(struct gfs2_holder *gh); 202 int gfs2_glock_nq(struct gfs2_holder *gh); 203 int gfs2_glock_poll(struct gfs2_holder *gh); 204 int gfs2_instantiate(struct gfs2_holder *gh); 205 int gfs2_glock_holder_ready(struct gfs2_holder *gh); 206 int gfs2_glock_wait(struct gfs2_holder *gh); 207 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs); 208 void gfs2_glock_dq(struct gfs2_holder *gh); 209 void gfs2_glock_dq_wait(struct gfs2_holder *gh); 210 void gfs2_glock_dq_uninit(struct gfs2_holder *gh); 211 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 212 const struct gfs2_glock_operations *glops, 213 unsigned int state, u16 flags, 214 struct gfs2_holder *gh); 215 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); 216 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); 217 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, 218 bool fsid); 219 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \ 220 gfs2_dump_glock(NULL, gl, true); \ 221 BUG(); } } while(0) 222 #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \ 223 gfs2_dump_glock(NULL, gl, true); \ 224 gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \ 225 while (0) 226 #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \ 227 gfs2_dump_glock(NULL, gl, true); \ 228 gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \ 229 while (0) 230 231 __printf(2, 3) 232 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); 233 234 /** 235 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock 236 * @gl: the glock 237 * @state: the state we're requesting 238 * @flags: the modifier flags 239 * @gh: the holder structure 240 * 241 * Returns: 0, GLR_*, or errno 242 */ 243 244 static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, 245 unsigned int state, u16 flags, 246 struct gfs2_holder *gh) 247 { 248 int error; 249 250 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); 251 252 error = gfs2_glock_nq(gh); 253 if (error) 254 gfs2_holder_uninit(gh); 255 256 return error; 257 } 258 259 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); 260 void gfs2_glock_complete(struct gfs2_glock *gl, int ret); 261 bool gfs2_queue_try_to_evict(struct gfs2_glock *gl); 262 bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later); 263 void gfs2_cancel_delete_work(struct gfs2_glock *gl); 264 void gfs2_flush_delete_work(struct gfs2_sbd *sdp); 265 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); 266 void gfs2_gl_dq_holders(struct gfs2_sbd *sdp); 267 void gfs2_glock_thaw(struct gfs2_sbd *sdp); 268 void gfs2_glock_free(struct gfs2_glock *gl); 269 void gfs2_glock_free_later(struct gfs2_glock *gl); 270 271 int __init gfs2_glock_init(void); 272 void gfs2_glock_exit(void); 273 274 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp); 275 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp); 276 void gfs2_register_debugfs(void); 277 void gfs2_unregister_debugfs(void); 278 279 void glock_set_object(struct gfs2_glock *gl, void *object); 280 void glock_clear_object(struct gfs2_glock *gl, void *object); 281 282 extern const struct lm_lockops gfs2_dlm_ops; 283 284 static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh) 285 { 286 gh->gh_gl = NULL; 287 } 288 289 static inline bool gfs2_holder_initialized(struct gfs2_holder *gh) 290 { 291 return gh->gh_gl; 292 } 293 294 static inline bool gfs2_holder_queued(struct gfs2_holder *gh) 295 { 296 return !list_empty(&gh->gh_list); 297 } 298 299 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation); 300 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation); 301 302 static inline bool glock_needs_demote(struct gfs2_glock *gl) 303 { 304 return (test_bit(GLF_DEMOTE, &gl->gl_flags) || 305 test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); 306 } 307 308 #endif /* __GLOCK_DOT_H__ */ 309