1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmast.c 5 * 6 * AST and BAST functionality for local and remote nodes 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/utsname.h> 34 #include <linux/init.h> 35 #include <linux/sysctl.h> 36 #include <linux/random.h> 37 #include <linux/blkdev.h> 38 #include <linux/socket.h> 39 #include <linux/inet.h> 40 #include <linux/spinlock.h> 41 42 43 #include "cluster/heartbeat.h" 44 #include "cluster/nodemanager.h" 45 #include "cluster/tcp.h" 46 #include "cluster/endian.h" 47 48 #include "dlmapi.h" 49 #include "dlmcommon.h" 50 51 #define MLOG_MASK_PREFIX ML_DLM 52 #include "cluster/masklog.h" 53 54 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 55 struct dlm_lock *lock); 56 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 57 58 /* Should be called as an ast gets queued to see if the new 59 * lock level will obsolete a pending bast. 60 * For example, if dlm_thread queued a bast for an EX lock that 61 * was blocking another EX, but before sending the bast the 62 * lock owner downconverted to NL, the bast is now obsolete. 63 * Only the ast should be sent. 64 * This is needed because the lock and convert paths can queue 65 * asts out-of-band (not waiting for dlm_thread) in order to 66 * allow for LKM_NOQUEUE to get immediate responses. */ 67 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 68 { 69 assert_spin_locked(&dlm->ast_lock); 70 assert_spin_locked(&lock->spinlock); 71 72 if (lock->ml.highest_blocked == LKM_IVMODE) 73 return 0; 74 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); 75 76 if (lock->bast_pending && 77 list_empty(&lock->bast_list)) 78 /* old bast already sent, ok */ 79 return 0; 80 81 if (lock->ml.type == LKM_EXMODE) 82 /* EX blocks anything left, any bast still valid */ 83 return 0; 84 else if (lock->ml.type == LKM_NLMODE) 85 /* NL blocks nothing, no reason to send any bast, cancel it */ 86 return 1; 87 else if (lock->ml.highest_blocked != LKM_EXMODE) 88 /* PR only blocks EX */ 89 return 1; 90 91 return 0; 92 } 93 94 static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 95 { 96 mlog_entry_void(); 97 98 BUG_ON(!dlm); 99 BUG_ON(!lock); 100 101 assert_spin_locked(&dlm->ast_lock); 102 if (!list_empty(&lock->ast_list)) { 103 mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n", 104 lock->ast_pending, lock->ml.type); 105 BUG(); 106 } 107 BUG_ON(!list_empty(&lock->ast_list)); 108 if (lock->ast_pending) 109 mlog(0, "lock has an ast getting flushed right now\n"); 110 111 /* putting lock on list, add a ref */ 112 dlm_lock_get(lock); 113 spin_lock(&lock->spinlock); 114 115 /* check to see if this ast obsoletes the bast */ 116 if (dlm_should_cancel_bast(dlm, lock)) { 117 struct dlm_lock_resource *res = lock->lockres; 118 mlog(0, "%s: cancelling bast for %.*s\n", 119 dlm->name, res->lockname.len, res->lockname.name); 120 lock->bast_pending = 0; 121 list_del_init(&lock->bast_list); 122 lock->ml.highest_blocked = LKM_IVMODE; 123 /* removing lock from list, remove a ref. guaranteed 124 * this won't be the last ref because of the get above, 125 * so res->spinlock will not be taken here */ 126 dlm_lock_put(lock); 127 /* free up the reserved bast that we are cancelling. 128 * guaranteed that this will not be the last reserved 129 * ast because *both* an ast and a bast were reserved 130 * to get to this point. the res->spinlock will not be 131 * taken here */ 132 dlm_lockres_release_ast(dlm, res); 133 } 134 list_add_tail(&lock->ast_list, &dlm->pending_asts); 135 lock->ast_pending = 1; 136 spin_unlock(&lock->spinlock); 137 } 138 139 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 140 { 141 mlog_entry_void(); 142 143 BUG_ON(!dlm); 144 BUG_ON(!lock); 145 146 spin_lock(&dlm->ast_lock); 147 __dlm_queue_ast(dlm, lock); 148 spin_unlock(&dlm->ast_lock); 149 } 150 151 152 static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 153 { 154 mlog_entry_void(); 155 156 BUG_ON(!dlm); 157 BUG_ON(!lock); 158 assert_spin_locked(&dlm->ast_lock); 159 160 BUG_ON(!list_empty(&lock->bast_list)); 161 if (lock->bast_pending) 162 mlog(0, "lock has a bast getting flushed right now\n"); 163 164 /* putting lock on list, add a ref */ 165 dlm_lock_get(lock); 166 spin_lock(&lock->spinlock); 167 list_add_tail(&lock->bast_list, &dlm->pending_basts); 168 lock->bast_pending = 1; 169 spin_unlock(&lock->spinlock); 170 } 171 172 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 173 { 174 mlog_entry_void(); 175 176 BUG_ON(!dlm); 177 BUG_ON(!lock); 178 179 spin_lock(&dlm->ast_lock); 180 __dlm_queue_bast(dlm, lock); 181 spin_unlock(&dlm->ast_lock); 182 } 183 184 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 185 struct dlm_lock *lock) 186 { 187 struct dlm_lockstatus *lksb = lock->lksb; 188 BUG_ON(!lksb); 189 190 /* only updates if this node masters the lockres */ 191 if (res->owner == dlm->node_num) { 192 193 spin_lock(&res->spinlock); 194 /* check the lksb flags for the direction */ 195 if (lksb->flags & DLM_LKSB_GET_LVB) { 196 mlog(0, "getting lvb from lockres for %s node\n", 197 lock->ml.node == dlm->node_num ? "master" : 198 "remote"); 199 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); 200 } else if (lksb->flags & DLM_LKSB_PUT_LVB) { 201 mlog(0, "setting lvb from lockres for %s node\n", 202 lock->ml.node == dlm->node_num ? "master" : 203 "remote"); 204 memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); 205 } 206 spin_unlock(&res->spinlock); 207 } 208 209 /* reset any lvb flags on the lksb */ 210 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); 211 } 212 213 void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 214 struct dlm_lock *lock) 215 { 216 dlm_astlockfunc_t *fn; 217 struct dlm_lockstatus *lksb; 218 219 mlog_entry_void(); 220 221 lksb = lock->lksb; 222 fn = lock->ast; 223 BUG_ON(lock->ml.node != dlm->node_num); 224 225 dlm_update_lvb(dlm, res, lock); 226 (*fn)(lock->astdata); 227 } 228 229 230 int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 231 struct dlm_lock *lock) 232 { 233 int ret; 234 struct dlm_lockstatus *lksb; 235 int lksbflags; 236 237 mlog_entry_void(); 238 239 lksb = lock->lksb; 240 BUG_ON(lock->ml.node == dlm->node_num); 241 242 lksbflags = lksb->flags; 243 dlm_update_lvb(dlm, res, lock); 244 245 /* lock request came from another node 246 * go do the ast over there */ 247 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags); 248 return ret; 249 } 250 251 void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 252 struct dlm_lock *lock, int blocked_type) 253 { 254 dlm_bastlockfunc_t *fn = lock->bast; 255 256 mlog_entry_void(); 257 BUG_ON(lock->ml.node != dlm->node_num); 258 259 (*fn)(lock->astdata, blocked_type); 260 } 261 262 263 264 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) 265 { 266 int ret; 267 unsigned int locklen; 268 struct dlm_ctxt *dlm = data; 269 struct dlm_lock_resource *res = NULL; 270 struct dlm_lock *lock = NULL; 271 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; 272 char *name; 273 struct list_head *iter, *head=NULL; 274 u64 cookie; 275 u32 flags; 276 277 if (!dlm_grab(dlm)) { 278 dlm_error(DLM_REJECTED); 279 return DLM_REJECTED; 280 } 281 282 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), 283 "Domain %s not fully joined!\n", dlm->name); 284 285 name = past->name; 286 locklen = past->namelen; 287 cookie = be64_to_cpu(past->cookie); 288 flags = be32_to_cpu(past->flags); 289 290 if (locklen > DLM_LOCKID_NAME_MAX) { 291 ret = DLM_IVBUFLEN; 292 mlog(ML_ERROR, "Invalid name length in proxy ast handler!\n"); 293 goto leave; 294 } 295 296 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == 297 (LKM_PUT_LVB|LKM_GET_LVB)) { 298 mlog(ML_ERROR, "both PUT and GET lvb specified\n"); 299 ret = DLM_BADARGS; 300 goto leave; 301 } 302 303 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : 304 (flags & LKM_GET_LVB ? "get lvb" : "none")); 305 306 mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type); 307 308 if (past->type != DLM_AST && 309 past->type != DLM_BAST) { 310 mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu" 311 "name=%.*s\n", past->type, 312 dlm_get_lock_cookie_node(cookie), 313 dlm_get_lock_cookie_seq(cookie), 314 locklen, name); 315 ret = DLM_IVLOCKID; 316 goto leave; 317 } 318 319 res = dlm_lookup_lockres(dlm, name, locklen); 320 if (!res) { 321 mlog(ML_ERROR, "got %sast for unknown lockres! " 322 "cookie=%u:%llu, name=%.*s, namelen=%u\n", 323 past->type == DLM_AST ? "" : "b", 324 dlm_get_lock_cookie_node(cookie), 325 dlm_get_lock_cookie_seq(cookie), 326 locklen, name, locklen); 327 ret = DLM_IVLOCKID; 328 goto leave; 329 } 330 331 /* cannot get a proxy ast message if this node owns it */ 332 BUG_ON(res->owner == dlm->node_num); 333 334 mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name); 335 336 spin_lock(&res->spinlock); 337 if (res->state & DLM_LOCK_RES_RECOVERING) { 338 mlog(0, "responding with DLM_RECOVERING!\n"); 339 ret = DLM_RECOVERING; 340 goto unlock_out; 341 } 342 if (res->state & DLM_LOCK_RES_MIGRATING) { 343 mlog(0, "responding with DLM_MIGRATING!\n"); 344 ret = DLM_MIGRATING; 345 goto unlock_out; 346 } 347 /* try convert queue for both ast/bast */ 348 head = &res->converting; 349 lock = NULL; 350 list_for_each(iter, head) { 351 lock = list_entry (iter, struct dlm_lock, list); 352 if (be64_to_cpu(lock->ml.cookie) == cookie) 353 goto do_ast; 354 } 355 356 /* if not on convert, try blocked for ast, granted for bast */ 357 if (past->type == DLM_AST) 358 head = &res->blocked; 359 else 360 head = &res->granted; 361 362 list_for_each(iter, head) { 363 lock = list_entry (iter, struct dlm_lock, list); 364 if (be64_to_cpu(lock->ml.cookie) == cookie) 365 goto do_ast; 366 } 367 368 mlog(ML_ERROR, "got %sast for unknown lock! cookie=%u:%llu, " 369 "name=%.*s, namelen=%u\n", 370 past->type == DLM_AST ? "" : "b", 371 dlm_get_lock_cookie_node(cookie), 372 dlm_get_lock_cookie_seq(cookie), 373 locklen, name, locklen); 374 375 ret = DLM_NORMAL; 376 unlock_out: 377 spin_unlock(&res->spinlock); 378 goto leave; 379 380 do_ast: 381 ret = DLM_NORMAL; 382 if (past->type == DLM_AST) { 383 /* do not alter lock refcount. switching lists. */ 384 list_del_init(&lock->list); 385 list_add_tail(&lock->list, &res->granted); 386 mlog(0, "ast: adding to granted list... type=%d, " 387 "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); 388 if (lock->ml.convert_type != LKM_IVMODE) { 389 lock->ml.type = lock->ml.convert_type; 390 lock->ml.convert_type = LKM_IVMODE; 391 } else { 392 // should already be there.... 393 } 394 395 lock->lksb->status = DLM_NORMAL; 396 397 /* if we requested the lvb, fetch it into our lksb now */ 398 if (flags & LKM_GET_LVB) { 399 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB)); 400 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN); 401 } 402 } 403 spin_unlock(&res->spinlock); 404 405 if (past->type == DLM_AST) 406 dlm_do_local_ast(dlm, res, lock); 407 else 408 dlm_do_local_bast(dlm, res, lock, past->blocked_type); 409 410 leave: 411 412 if (res) 413 dlm_lockres_put(res); 414 415 dlm_put(dlm); 416 return ret; 417 } 418 419 420 421 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 422 struct dlm_lock *lock, int msg_type, 423 int blocked_type, int flags) 424 { 425 int ret = 0; 426 struct dlm_proxy_ast past; 427 struct kvec vec[2]; 428 size_t veclen = 1; 429 int status; 430 431 mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n", 432 res->lockname.len, res->lockname.name, lock->ml.node, 433 msg_type, blocked_type); 434 435 memset(&past, 0, sizeof(struct dlm_proxy_ast)); 436 past.node_idx = dlm->node_num; 437 past.type = msg_type; 438 past.blocked_type = blocked_type; 439 past.namelen = res->lockname.len; 440 memcpy(past.name, res->lockname.name, past.namelen); 441 past.cookie = lock->ml.cookie; 442 443 vec[0].iov_len = sizeof(struct dlm_proxy_ast); 444 vec[0].iov_base = &past; 445 if (flags & DLM_LKSB_GET_LVB) { 446 mlog(0, "returning requested LVB data\n"); 447 be32_add_cpu(&past.flags, LKM_GET_LVB); 448 vec[1].iov_len = DLM_LVB_LEN; 449 vec[1].iov_base = lock->lksb->lvb; 450 veclen++; 451 } 452 453 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, 454 lock->ml.node, &status); 455 if (ret < 0) 456 mlog_errno(ret); 457 else { 458 if (status == DLM_RECOVERING) { 459 mlog(ML_ERROR, "sent AST to node %u, it thinks this " 460 "node is dead!\n", lock->ml.node); 461 BUG(); 462 } else if (status == DLM_MIGRATING) { 463 mlog(ML_ERROR, "sent AST to node %u, it returned " 464 "DLM_MIGRATING!\n", lock->ml.node); 465 BUG(); 466 } else if (status != DLM_NORMAL) { 467 mlog(ML_ERROR, "AST to node %u returned %d!\n", 468 lock->ml.node, status); 469 /* ignore it */ 470 } 471 ret = 0; 472 } 473 return ret; 474 } 475