1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmcommon.h 5 * 6 * Copyright (C) 2004 Oracle. All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public 10 * License as published by the Free Software Foundation; either 11 * version 2 of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public 19 * License along with this program; if not, write to the 20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 21 * Boston, MA 021110-1307, USA. 22 * 23 */ 24 25 #ifndef DLMCOMMON_H 26 #define DLMCOMMON_H 27 28 #include <linux/kref.h> 29 30 #define DLM_HB_NODE_DOWN_PRI (0xf000000) 31 #define DLM_HB_NODE_UP_PRI (0x8000000) 32 33 #define DLM_LOCKID_NAME_MAX 32 34 35 #define DLM_DOMAIN_NAME_MAX_LEN 255 36 #define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES 37 #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes 38 #define DLM_THREAD_MS 200 // flush at least every 200 ms 39 40 #define DLM_HASH_SIZE_DEFAULT (1 << 17) 41 #if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE 42 # define DLM_HASH_PAGES 1 43 #else 44 # define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE) 45 #endif 46 #define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head)) 47 #define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE) 48 49 /* Intended to make it easier for us to switch out hash functions */ 50 #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) 51 52 enum dlm_mle_type { 53 DLM_MLE_BLOCK = 0, 54 DLM_MLE_MASTER = 1, 55 DLM_MLE_MIGRATION = 2, 56 DLM_MLE_NUM_TYPES = 3, 57 }; 58 59 struct dlm_master_list_entry { 60 struct hlist_node master_hash_node; 61 struct list_head hb_events; 62 struct dlm_ctxt *dlm; 63 spinlock_t spinlock; 64 wait_queue_head_t wq; 65 atomic_t woken; 66 struct kref mle_refs; 67 int inuse; 68 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 69 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 70 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 71 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 72 u8 master; 73 u8 new_master; 74 enum dlm_mle_type type; 75 struct o2hb_callback_func mle_hb_up; 76 struct o2hb_callback_func mle_hb_down; 77 struct dlm_lock_resource *mleres; 78 unsigned char mname[DLM_LOCKID_NAME_MAX]; 79 unsigned int mnamelen; 80 unsigned int mnamehash; 81 }; 82 83 enum dlm_ast_type { 84 DLM_AST = 0, 85 DLM_BAST = 1, 86 DLM_ASTUNLOCK = 2, 87 }; 88 89 90 #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \ 91 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \ 92 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE) 93 94 #define DLM_RECOVERY_LOCK_NAME "$RECOVERY" 95 #define DLM_RECOVERY_LOCK_NAME_LEN 9 96 97 static inline int dlm_is_recovery_lock(const char *lock_name, int name_len) 98 { 99 if (name_len == DLM_RECOVERY_LOCK_NAME_LEN && 100 memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0) 101 return 1; 102 return 0; 103 } 104 105 #define DLM_RECO_STATE_ACTIVE 0x0001 106 #define DLM_RECO_STATE_FINALIZE 0x0002 107 108 struct dlm_recovery_ctxt 109 { 110 struct list_head resources; 111 struct list_head node_data; 112 u8 new_master; 113 u8 dead_node; 114 u16 state; 115 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 116 wait_queue_head_t event; 117 }; 118 119 enum dlm_ctxt_state { 120 DLM_CTXT_NEW = 0, 121 DLM_CTXT_JOINED = 1, 122 DLM_CTXT_IN_SHUTDOWN = 2, 123 DLM_CTXT_LEAVING = 3, 124 }; 125 126 struct dlm_ctxt 127 { 128 struct list_head list; 129 struct hlist_head **lockres_hash; 130 struct list_head dirty_list; 131 struct list_head purge_list; 132 struct list_head pending_asts; 133 struct list_head pending_basts; 134 struct list_head tracking_list; 135 unsigned int purge_count; 136 spinlock_t spinlock; 137 spinlock_t ast_lock; 138 spinlock_t track_lock; 139 char *name; 140 u8 node_num; 141 u32 key; 142 u8 joining_node; 143 wait_queue_head_t dlm_join_events; 144 unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 145 unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 146 unsigned long exit_domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 147 unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 148 struct dlm_recovery_ctxt reco; 149 spinlock_t master_lock; 150 struct hlist_head **master_hash; 151 struct list_head mle_hb_events; 152 153 /* these give a really vague idea of the system load */ 154 atomic_t mle_tot_count[DLM_MLE_NUM_TYPES]; 155 atomic_t mle_cur_count[DLM_MLE_NUM_TYPES]; 156 atomic_t res_tot_count; 157 atomic_t res_cur_count; 158 159 struct dlm_debug_ctxt *dlm_debug_ctxt; 160 struct dentry *dlm_debugfs_subroot; 161 162 /* NOTE: Next three are protected by dlm_domain_lock */ 163 struct kref dlm_refs; 164 enum dlm_ctxt_state dlm_state; 165 unsigned int num_joins; 166 167 struct o2hb_callback_func dlm_hb_up; 168 struct o2hb_callback_func dlm_hb_down; 169 struct task_struct *dlm_thread_task; 170 struct task_struct *dlm_reco_thread_task; 171 struct workqueue_struct *dlm_worker; 172 wait_queue_head_t dlm_thread_wq; 173 wait_queue_head_t dlm_reco_thread_wq; 174 wait_queue_head_t ast_wq; 175 wait_queue_head_t migration_wq; 176 177 struct work_struct dispatched_work; 178 struct list_head work_list; 179 spinlock_t work_lock; 180 struct list_head dlm_domain_handlers; 181 struct list_head dlm_eviction_callbacks; 182 183 /* The filesystem specifies this at domain registration. We 184 * cache it here to know what to tell other nodes. */ 185 struct dlm_protocol_version fs_locking_proto; 186 /* This is the inter-dlm communication version */ 187 struct dlm_protocol_version dlm_locking_proto; 188 }; 189 190 static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) 191 { 192 return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE); 193 } 194 195 static inline struct hlist_head *dlm_master_hash(struct dlm_ctxt *dlm, 196 unsigned i) 197 { 198 return dlm->master_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + 199 (i % DLM_BUCKETS_PER_PAGE); 200 } 201 202 /* these keventd work queue items are for less-frequently 203 * called functions that cannot be directly called from the 204 * net message handlers for some reason, usually because 205 * they need to send net messages of their own. */ 206 void dlm_dispatch_work(struct work_struct *work); 207 208 struct dlm_lock_resource; 209 struct dlm_work_item; 210 211 typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *); 212 213 struct dlm_request_all_locks_priv 214 { 215 u8 reco_master; 216 u8 dead_node; 217 }; 218 219 struct dlm_mig_lockres_priv 220 { 221 struct dlm_lock_resource *lockres; 222 u8 real_master; 223 u8 extra_ref; 224 }; 225 226 struct dlm_assert_master_priv 227 { 228 struct dlm_lock_resource *lockres; 229 u8 request_from; 230 u32 flags; 231 unsigned ignore_higher:1; 232 }; 233 234 struct dlm_deref_lockres_priv 235 { 236 struct dlm_lock_resource *deref_res; 237 u8 deref_node; 238 }; 239 240 struct dlm_work_item 241 { 242 struct list_head list; 243 dlm_workfunc_t *func; 244 struct dlm_ctxt *dlm; 245 void *data; 246 union { 247 struct dlm_request_all_locks_priv ral; 248 struct dlm_mig_lockres_priv ml; 249 struct dlm_assert_master_priv am; 250 struct dlm_deref_lockres_priv dl; 251 } u; 252 }; 253 254 static inline void dlm_init_work_item(struct dlm_ctxt *dlm, 255 struct dlm_work_item *i, 256 dlm_workfunc_t *f, void *data) 257 { 258 memset(i, 0, sizeof(*i)); 259 i->func = f; 260 INIT_LIST_HEAD(&i->list); 261 i->data = data; 262 i->dlm = dlm; /* must have already done a dlm_grab on this! */ 263 } 264 265 266 267 static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm, 268 u8 node) 269 { 270 assert_spin_locked(&dlm->spinlock); 271 272 dlm->joining_node = node; 273 wake_up(&dlm->dlm_join_events); 274 } 275 276 #define DLM_LOCK_RES_UNINITED 0x00000001 277 #define DLM_LOCK_RES_RECOVERING 0x00000002 278 #define DLM_LOCK_RES_READY 0x00000004 279 #define DLM_LOCK_RES_DIRTY 0x00000008 280 #define DLM_LOCK_RES_IN_PROGRESS 0x00000010 281 #define DLM_LOCK_RES_MIGRATING 0x00000020 282 #define DLM_LOCK_RES_DROPPING_REF 0x00000040 283 #define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000 284 #define DLM_LOCK_RES_SETREF_INPROG 0x00002000 285 286 /* max milliseconds to wait to sync up a network failure with a node death */ 287 #define DLM_NODE_DEATH_WAIT_MAX (5 * 1000) 288 289 #define DLM_PURGE_INTERVAL_MS (8 * 1000) 290 291 struct dlm_lock_resource 292 { 293 /* WARNING: Please see the comment in dlm_init_lockres before 294 * adding fields here. */ 295 struct hlist_node hash_node; 296 struct qstr lockname; 297 struct kref refs; 298 299 /* 300 * Please keep granted, converting, and blocked in this order, 301 * as some funcs want to iterate over all lists. 302 * 303 * All four lists are protected by the hash's reference. 304 */ 305 struct list_head granted; 306 struct list_head converting; 307 struct list_head blocked; 308 struct list_head purge; 309 310 /* 311 * These two lists require you to hold an additional reference 312 * while they are on the list. 313 */ 314 struct list_head dirty; 315 struct list_head recovering; // dlm_recovery_ctxt.resources list 316 317 /* Added during init and removed during release */ 318 struct list_head tracking; /* dlm->tracking_list */ 319 320 /* unused lock resources have their last_used stamped and are 321 * put on a list for the dlm thread to run. */ 322 unsigned long last_used; 323 324 struct dlm_ctxt *dlm; 325 326 unsigned migration_pending:1; 327 atomic_t asts_reserved; 328 spinlock_t spinlock; 329 wait_queue_head_t wq; 330 u8 owner; //node which owns the lock resource, or unknown 331 u16 state; 332 char lvb[DLM_LVB_LEN]; 333 unsigned int inflight_locks; 334 unsigned int inflight_assert_workers; 335 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 336 }; 337 338 struct dlm_migratable_lock 339 { 340 __be64 cookie; 341 342 /* these 3 are just padding for the in-memory structure, but 343 * list and flags are actually used when sent over the wire */ 344 __be16 pad1; 345 u8 list; // 0=granted, 1=converting, 2=blocked 346 u8 flags; 347 348 s8 type; 349 s8 convert_type; 350 s8 highest_blocked; 351 u8 node; 352 }; // 16 bytes 353 354 struct dlm_lock 355 { 356 struct dlm_migratable_lock ml; 357 358 struct list_head list; 359 struct list_head ast_list; 360 struct list_head bast_list; 361 struct dlm_lock_resource *lockres; 362 spinlock_t spinlock; 363 struct kref lock_refs; 364 365 // ast and bast must be callable while holding a spinlock! 366 dlm_astlockfunc_t *ast; 367 dlm_bastlockfunc_t *bast; 368 void *astdata; 369 struct dlm_lockstatus *lksb; 370 unsigned ast_pending:1, 371 bast_pending:1, 372 convert_pending:1, 373 lock_pending:1, 374 cancel_pending:1, 375 unlock_pending:1, 376 lksb_kernel_allocated:1; 377 }; 378 379 enum dlm_lockres_list { 380 DLM_GRANTED_LIST = 0, 381 DLM_CONVERTING_LIST = 1, 382 DLM_BLOCKED_LIST = 2, 383 }; 384 385 static inline int dlm_lvb_is_empty(char *lvb) 386 { 387 int i; 388 for (i=0; i<DLM_LVB_LEN; i++) 389 if (lvb[i]) 390 return 0; 391 return 1; 392 } 393 394 static inline char *dlm_list_in_text(enum dlm_lockres_list idx) 395 { 396 if (idx == DLM_GRANTED_LIST) 397 return "granted"; 398 else if (idx == DLM_CONVERTING_LIST) 399 return "converting"; 400 else if (idx == DLM_BLOCKED_LIST) 401 return "blocked"; 402 else 403 return "unknown"; 404 } 405 406 static inline struct list_head * 407 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) 408 { 409 struct list_head *ret = NULL; 410 if (idx == DLM_GRANTED_LIST) 411 ret = &res->granted; 412 else if (idx == DLM_CONVERTING_LIST) 413 ret = &res->converting; 414 else if (idx == DLM_BLOCKED_LIST) 415 ret = &res->blocked; 416 else 417 BUG(); 418 return ret; 419 } 420 421 422 423 424 struct dlm_node_iter 425 { 426 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 427 int curnode; 428 }; 429 430 431 enum { 432 DLM_MASTER_REQUEST_MSG = 500, 433 DLM_UNUSED_MSG1 = 501, 434 DLM_ASSERT_MASTER_MSG = 502, 435 DLM_CREATE_LOCK_MSG = 503, 436 DLM_CONVERT_LOCK_MSG = 504, 437 DLM_PROXY_AST_MSG = 505, 438 DLM_UNLOCK_LOCK_MSG = 506, 439 DLM_DEREF_LOCKRES_MSG = 507, 440 DLM_MIGRATE_REQUEST_MSG = 508, 441 DLM_MIG_LOCKRES_MSG = 509, 442 DLM_QUERY_JOIN_MSG = 510, 443 DLM_ASSERT_JOINED_MSG = 511, 444 DLM_CANCEL_JOIN_MSG = 512, 445 DLM_EXIT_DOMAIN_MSG = 513, 446 DLM_MASTER_REQUERY_MSG = 514, 447 DLM_LOCK_REQUEST_MSG = 515, 448 DLM_RECO_DATA_DONE_MSG = 516, 449 DLM_BEGIN_RECO_MSG = 517, 450 DLM_FINALIZE_RECO_MSG = 518, 451 DLM_QUERY_REGION = 519, 452 DLM_QUERY_NODEINFO = 520, 453 DLM_BEGIN_EXIT_DOMAIN_MSG = 521, 454 }; 455 456 struct dlm_reco_node_data 457 { 458 int state; 459 u8 node_num; 460 struct list_head list; 461 }; 462 463 enum { 464 DLM_RECO_NODE_DATA_DEAD = -1, 465 DLM_RECO_NODE_DATA_INIT = 0, 466 DLM_RECO_NODE_DATA_REQUESTING = 1, 467 DLM_RECO_NODE_DATA_REQUESTED = 2, 468 DLM_RECO_NODE_DATA_RECEIVING = 3, 469 DLM_RECO_NODE_DATA_DONE = 4, 470 DLM_RECO_NODE_DATA_FINALIZE_SENT = 5, 471 }; 472 473 474 enum { 475 DLM_MASTER_RESP_NO = 0, 476 DLM_MASTER_RESP_YES = 1, 477 DLM_MASTER_RESP_MAYBE = 2, 478 DLM_MASTER_RESP_ERROR = 3, 479 }; 480 481 482 struct dlm_master_request 483 { 484 u8 node_idx; 485 u8 namelen; 486 __be16 pad1; 487 __be32 flags; 488 489 u8 name[O2NM_MAX_NAME_LEN]; 490 }; 491 492 #define DLM_ASSERT_RESPONSE_REASSERT 0x00000001 493 #define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002 494 495 #define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001 496 #define DLM_ASSERT_MASTER_REQUERY 0x00000002 497 #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004 498 struct dlm_assert_master 499 { 500 u8 node_idx; 501 u8 namelen; 502 __be16 pad1; 503 __be32 flags; 504 505 u8 name[O2NM_MAX_NAME_LEN]; 506 }; 507 508 #define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001 509 510 struct dlm_migrate_request 511 { 512 u8 master; 513 u8 new_master; 514 u8 namelen; 515 u8 pad1; 516 __be32 pad2; 517 u8 name[O2NM_MAX_NAME_LEN]; 518 }; 519 520 struct dlm_master_requery 521 { 522 u8 pad1; 523 u8 pad2; 524 u8 node_idx; 525 u8 namelen; 526 __be32 pad3; 527 u8 name[O2NM_MAX_NAME_LEN]; 528 }; 529 530 #define DLM_MRES_RECOVERY 0x01 531 #define DLM_MRES_MIGRATION 0x02 532 #define DLM_MRES_ALL_DONE 0x04 533 534 /* 535 * We would like to get one whole lockres into a single network 536 * message whenever possible. Generally speaking, there will be 537 * at most one dlm_lock on a lockres for each node in the cluster, 538 * plus (infrequently) any additional locks coming in from userdlm. 539 * 540 * struct _dlm_lockres_page 541 * { 542 * dlm_migratable_lockres mres; 543 * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS]; 544 * u8 pad[DLM_MIG_LOCKRES_RESERVED]; 545 * }; 546 * 547 * from ../cluster/tcp.h 548 * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg)) 549 * (roughly 4080 bytes) 550 * and sizeof(dlm_migratable_lockres) = 112 bytes 551 * and sizeof(dlm_migratable_lock) = 16 bytes 552 * 553 * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and 554 * DLM_MIG_LOCKRES_RESERVED=128 means we have this: 555 * 556 * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) + 557 * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED = 558 * NET_MAX_PAYLOAD_BYTES 559 * (240 * 16) + 112 + 128 = 4080 560 * 561 * So a lockres would need more than 240 locks before it would 562 * use more than one network packet to recover. Not too bad. 563 */ 564 #define DLM_MAX_MIGRATABLE_LOCKS 240 565 566 struct dlm_migratable_lockres 567 { 568 u8 master; 569 u8 lockname_len; 570 u8 num_locks; // locks sent in this structure 571 u8 flags; 572 __be32 total_locks; // locks to be sent for this migration cookie 573 __be64 mig_cookie; // cookie for this lockres migration 574 // or zero if not needed 575 // 16 bytes 576 u8 lockname[DLM_LOCKID_NAME_MAX]; 577 // 48 bytes 578 u8 lvb[DLM_LVB_LEN]; 579 // 112 bytes 580 struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112 581 }; 582 #define DLM_MIG_LOCKRES_MAX_LEN \ 583 (sizeof(struct dlm_migratable_lockres) + \ 584 (sizeof(struct dlm_migratable_lock) * \ 585 DLM_MAX_MIGRATABLE_LOCKS) ) 586 587 /* from above, 128 bytes 588 * for some undetermined future use */ 589 #define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \ 590 DLM_MIG_LOCKRES_MAX_LEN) 591 592 struct dlm_create_lock 593 { 594 __be64 cookie; 595 596 __be32 flags; 597 u8 pad1; 598 u8 node_idx; 599 s8 requested_type; 600 u8 namelen; 601 602 u8 name[O2NM_MAX_NAME_LEN]; 603 }; 604 605 struct dlm_convert_lock 606 { 607 __be64 cookie; 608 609 __be32 flags; 610 u8 pad1; 611 u8 node_idx; 612 s8 requested_type; 613 u8 namelen; 614 615 u8 name[O2NM_MAX_NAME_LEN]; 616 617 s8 lvb[0]; 618 }; 619 #define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN) 620 621 struct dlm_unlock_lock 622 { 623 __be64 cookie; 624 625 __be32 flags; 626 __be16 pad1; 627 u8 node_idx; 628 u8 namelen; 629 630 u8 name[O2NM_MAX_NAME_LEN]; 631 632 s8 lvb[0]; 633 }; 634 #define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN) 635 636 struct dlm_proxy_ast 637 { 638 __be64 cookie; 639 640 __be32 flags; 641 u8 node_idx; 642 u8 type; 643 u8 blocked_type; 644 u8 namelen; 645 646 u8 name[O2NM_MAX_NAME_LEN]; 647 648 s8 lvb[0]; 649 }; 650 #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) 651 652 #define DLM_MOD_KEY (0x666c6172) 653 enum dlm_query_join_response_code { 654 JOIN_DISALLOW = 0, 655 JOIN_OK = 1, 656 JOIN_OK_NO_MAP = 2, 657 JOIN_PROTOCOL_MISMATCH = 3, 658 }; 659 660 struct dlm_query_join_packet { 661 u8 code; /* Response code. dlm_minor and fs_minor 662 are only valid if this is JOIN_OK */ 663 u8 dlm_minor; /* The minor version of the protocol the 664 dlm is speaking. */ 665 u8 fs_minor; /* The minor version of the protocol the 666 filesystem is speaking. */ 667 u8 reserved; 668 }; 669 670 union dlm_query_join_response { 671 __be32 intval; 672 struct dlm_query_join_packet packet; 673 }; 674 675 struct dlm_lock_request 676 { 677 u8 node_idx; 678 u8 dead_node; 679 __be16 pad1; 680 __be32 pad2; 681 }; 682 683 struct dlm_reco_data_done 684 { 685 u8 node_idx; 686 u8 dead_node; 687 __be16 pad1; 688 __be32 pad2; 689 690 /* unused for now */ 691 /* eventually we can use this to attempt 692 * lvb recovery based on each node's info */ 693 u8 reco_lvb[DLM_LVB_LEN]; 694 }; 695 696 struct dlm_begin_reco 697 { 698 u8 node_idx; 699 u8 dead_node; 700 __be16 pad1; 701 __be32 pad2; 702 }; 703 704 705 #define BITS_PER_BYTE 8 706 #define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE) 707 708 struct dlm_query_join_request 709 { 710 u8 node_idx; 711 u8 pad1[2]; 712 u8 name_len; 713 struct dlm_protocol_version dlm_proto; 714 struct dlm_protocol_version fs_proto; 715 u8 domain[O2NM_MAX_NAME_LEN]; 716 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)]; 717 }; 718 719 struct dlm_assert_joined 720 { 721 u8 node_idx; 722 u8 pad1[2]; 723 u8 name_len; 724 u8 domain[O2NM_MAX_NAME_LEN]; 725 }; 726 727 struct dlm_cancel_join 728 { 729 u8 node_idx; 730 u8 pad1[2]; 731 u8 name_len; 732 u8 domain[O2NM_MAX_NAME_LEN]; 733 }; 734 735 struct dlm_query_region { 736 u8 qr_node; 737 u8 qr_numregions; 738 u8 qr_namelen; 739 u8 pad1; 740 u8 qr_domain[O2NM_MAX_NAME_LEN]; 741 u8 qr_regions[O2HB_MAX_REGION_NAME_LEN * O2NM_MAX_REGIONS]; 742 }; 743 744 struct dlm_node_info { 745 u8 ni_nodenum; 746 u8 pad1; 747 __be16 ni_ipv4_port; 748 __be32 ni_ipv4_address; 749 }; 750 751 struct dlm_query_nodeinfo { 752 u8 qn_nodenum; 753 u8 qn_numnodes; 754 u8 qn_namelen; 755 u8 pad1; 756 u8 qn_domain[O2NM_MAX_NAME_LEN]; 757 struct dlm_node_info qn_nodes[O2NM_MAX_NODES]; 758 }; 759 760 struct dlm_exit_domain 761 { 762 u8 node_idx; 763 u8 pad1[3]; 764 }; 765 766 struct dlm_finalize_reco 767 { 768 u8 node_idx; 769 u8 dead_node; 770 u8 flags; 771 u8 pad1; 772 __be32 pad2; 773 }; 774 775 struct dlm_deref_lockres 776 { 777 u32 pad1; 778 u16 pad2; 779 u8 node_idx; 780 u8 namelen; 781 782 u8 name[O2NM_MAX_NAME_LEN]; 783 }; 784 785 static inline enum dlm_status 786 __dlm_lockres_state_to_status(struct dlm_lock_resource *res) 787 { 788 enum dlm_status status = DLM_NORMAL; 789 790 assert_spin_locked(&res->spinlock); 791 792 if (res->state & DLM_LOCK_RES_RECOVERING) 793 status = DLM_RECOVERING; 794 else if (res->state & DLM_LOCK_RES_MIGRATING) 795 status = DLM_MIGRATING; 796 else if (res->state & DLM_LOCK_RES_IN_PROGRESS) 797 status = DLM_FORWARD; 798 799 return status; 800 } 801 802 static inline u8 dlm_get_lock_cookie_node(u64 cookie) 803 { 804 u8 ret; 805 cookie >>= 56; 806 ret = (u8)(cookie & 0xffULL); 807 return ret; 808 } 809 810 static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie) 811 { 812 unsigned long long ret; 813 ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL; 814 return ret; 815 } 816 817 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, 818 struct dlm_lockstatus *lksb); 819 void dlm_lock_get(struct dlm_lock *lock); 820 void dlm_lock_put(struct dlm_lock *lock); 821 822 void dlm_lock_attach_lockres(struct dlm_lock *lock, 823 struct dlm_lock_resource *res); 824 825 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, 826 void **ret_data); 827 int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, 828 void **ret_data); 829 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, 830 void **ret_data); 831 832 void dlm_revert_pending_convert(struct dlm_lock_resource *res, 833 struct dlm_lock *lock); 834 void dlm_revert_pending_lock(struct dlm_lock_resource *res, 835 struct dlm_lock *lock); 836 837 int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data, 838 void **ret_data); 839 void dlm_commit_pending_cancel(struct dlm_lock_resource *res, 840 struct dlm_lock *lock); 841 void dlm_commit_pending_unlock(struct dlm_lock_resource *res, 842 struct dlm_lock *lock); 843 844 int dlm_launch_thread(struct dlm_ctxt *dlm); 845 void dlm_complete_thread(struct dlm_ctxt *dlm); 846 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); 847 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); 848 void dlm_wait_for_recovery(struct dlm_ctxt *dlm); 849 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); 850 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); 851 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); 852 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); 853 854 void dlm_put(struct dlm_ctxt *dlm); 855 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); 856 int dlm_domain_fully_joined(struct dlm_ctxt *dlm); 857 858 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 859 struct dlm_lock_resource *res); 860 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 861 struct dlm_lock_resource *res); 862 static inline void dlm_lockres_get(struct dlm_lock_resource *res) 863 { 864 /* This is called on every lookup, so it might be worth 865 * inlining. */ 866 kref_get(&res->refs); 867 } 868 void dlm_lockres_put(struct dlm_lock_resource *res); 869 void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 870 void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 871 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, 872 const char *name, 873 unsigned int len, 874 unsigned int hash); 875 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, 876 const char *name, 877 unsigned int len, 878 unsigned int hash); 879 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, 880 const char *name, 881 unsigned int len); 882 883 int dlm_is_host_down(int errno); 884 885 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, 886 const char *lockid, 887 int namelen, 888 int flags); 889 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, 890 const char *name, 891 unsigned int namelen); 892 893 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, 894 struct dlm_lock_resource *res, int bit); 895 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, 896 struct dlm_lock_resource *res, int bit); 897 898 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 899 struct dlm_lock_resource *res); 900 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 901 struct dlm_lock_resource *res); 902 903 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm, 904 struct dlm_lock_resource *res); 905 906 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 907 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 908 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 909 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 910 void dlm_do_local_ast(struct dlm_ctxt *dlm, 911 struct dlm_lock_resource *res, 912 struct dlm_lock *lock); 913 int dlm_do_remote_ast(struct dlm_ctxt *dlm, 914 struct dlm_lock_resource *res, 915 struct dlm_lock *lock); 916 void dlm_do_local_bast(struct dlm_ctxt *dlm, 917 struct dlm_lock_resource *res, 918 struct dlm_lock *lock, 919 int blocked_type); 920 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, 921 struct dlm_lock_resource *res, 922 struct dlm_lock *lock, 923 int msg_type, 924 int blocked_type, int flags); 925 static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm, 926 struct dlm_lock_resource *res, 927 struct dlm_lock *lock, 928 int blocked_type) 929 { 930 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST, 931 blocked_type, 0); 932 } 933 934 static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm, 935 struct dlm_lock_resource *res, 936 struct dlm_lock *lock, 937 int flags) 938 { 939 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST, 940 0, flags); 941 } 942 943 void dlm_print_one_lock_resource(struct dlm_lock_resource *res); 944 void __dlm_print_one_lock_resource(struct dlm_lock_resource *res); 945 946 u8 dlm_nm_this_node(struct dlm_ctxt *dlm); 947 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 948 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 949 950 951 int dlm_nm_init(struct dlm_ctxt *dlm); 952 int dlm_heartbeat_init(struct dlm_ctxt *dlm); 953 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); 954 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); 955 956 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 957 int dlm_finish_migration(struct dlm_ctxt *dlm, 958 struct dlm_lock_resource *res, 959 u8 old_master); 960 void dlm_lockres_release_ast(struct dlm_ctxt *dlm, 961 struct dlm_lock_resource *res); 962 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res); 963 964 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, 965 void **ret_data); 966 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, 967 void **ret_data); 968 void dlm_assert_master_post_handler(int status, void *data, void *ret_data); 969 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 970 void **ret_data); 971 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, 972 void **ret_data); 973 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 974 void **ret_data); 975 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, 976 void **ret_data); 977 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, 978 void **ret_data); 979 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, 980 void **ret_data); 981 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, 982 void **ret_data); 983 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, 984 void **ret_data); 985 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 986 u8 nodenum, u8 *real_master); 987 988 989 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 990 struct dlm_lock_resource *res, 991 int ignore_higher, 992 u8 request_from, 993 u32 flags); 994 995 996 int dlm_send_one_lockres(struct dlm_ctxt *dlm, 997 struct dlm_lock_resource *res, 998 struct dlm_migratable_lockres *mres, 999 u8 send_to, 1000 u8 flags); 1001 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, 1002 struct dlm_lock_resource *res); 1003 1004 /* will exit holding res->spinlock, but may drop in function */ 1005 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags); 1006 1007 /* will exit holding res->spinlock, but may drop in function */ 1008 static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res) 1009 { 1010 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS| 1011 DLM_LOCK_RES_RECOVERING| 1012 DLM_LOCK_RES_MIGRATING)); 1013 } 1014 1015 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle); 1016 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle); 1017 1018 /* create/destroy slab caches */ 1019 int dlm_init_master_caches(void); 1020 void dlm_destroy_master_caches(void); 1021 1022 int dlm_init_lock_cache(void); 1023 void dlm_destroy_lock_cache(void); 1024 1025 int dlm_init_mle_cache(void); 1026 void dlm_destroy_mle_cache(void); 1027 1028 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up); 1029 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, 1030 struct dlm_lock_resource *res); 1031 void dlm_clean_master_list(struct dlm_ctxt *dlm, 1032 u8 dead_node); 1033 void dlm_force_free_mles(struct dlm_ctxt *dlm); 1034 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); 1035 int __dlm_lockres_has_locks(struct dlm_lock_resource *res); 1036 int __dlm_lockres_unused(struct dlm_lock_resource *res); 1037 1038 static inline const char * dlm_lock_mode_name(int mode) 1039 { 1040 switch (mode) { 1041 case LKM_EXMODE: 1042 return "EX"; 1043 case LKM_PRMODE: 1044 return "PR"; 1045 case LKM_NLMODE: 1046 return "NL"; 1047 } 1048 return "UNKNOWN"; 1049 } 1050 1051 1052 static inline int dlm_lock_compatible(int existing, int request) 1053 { 1054 /* NO_LOCK compatible with all */ 1055 if (request == LKM_NLMODE || 1056 existing == LKM_NLMODE) 1057 return 1; 1058 1059 /* EX incompatible with all non-NO_LOCK */ 1060 if (request == LKM_EXMODE) 1061 return 0; 1062 1063 /* request must be PR, which is compatible with PR */ 1064 if (existing == LKM_PRMODE) 1065 return 1; 1066 1067 return 0; 1068 } 1069 1070 static inline int dlm_lock_on_list(struct list_head *head, 1071 struct dlm_lock *lock) 1072 { 1073 struct dlm_lock *tmplock; 1074 1075 list_for_each_entry(tmplock, head, list) { 1076 if (tmplock == lock) 1077 return 1; 1078 } 1079 return 0; 1080 } 1081 1082 1083 static inline enum dlm_status dlm_err_to_dlm_status(int err) 1084 { 1085 enum dlm_status ret; 1086 if (err == -ENOMEM) 1087 ret = DLM_SYSERR; 1088 else if (err == -ETIMEDOUT || o2net_link_down(err, NULL)) 1089 ret = DLM_NOLOCKMGR; 1090 else if (err == -EINVAL) 1091 ret = DLM_BADPARAM; 1092 else if (err == -ENAMETOOLONG) 1093 ret = DLM_IVBUFLEN; 1094 else 1095 ret = DLM_BADARGS; 1096 return ret; 1097 } 1098 1099 1100 static inline void dlm_node_iter_init(unsigned long *map, 1101 struct dlm_node_iter *iter) 1102 { 1103 memcpy(iter->node_map, map, sizeof(iter->node_map)); 1104 iter->curnode = -1; 1105 } 1106 1107 static inline int dlm_node_iter_next(struct dlm_node_iter *iter) 1108 { 1109 int bit; 1110 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1); 1111 if (bit >= O2NM_MAX_NODES) { 1112 iter->curnode = O2NM_MAX_NODES; 1113 return -ENOENT; 1114 } 1115 iter->curnode = bit; 1116 return bit; 1117 } 1118 1119 static inline void dlm_set_lockres_owner(struct dlm_ctxt *dlm, 1120 struct dlm_lock_resource *res, 1121 u8 owner) 1122 { 1123 assert_spin_locked(&res->spinlock); 1124 1125 res->owner = owner; 1126 } 1127 1128 static inline void dlm_change_lockres_owner(struct dlm_ctxt *dlm, 1129 struct dlm_lock_resource *res, 1130 u8 owner) 1131 { 1132 assert_spin_locked(&res->spinlock); 1133 1134 if (owner != res->owner) 1135 dlm_set_lockres_owner(dlm, res, owner); 1136 } 1137 1138 #endif /* DLMCOMMON_H */ 1139