1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmast.c 5 * 6 * AST and BAST functionality for local and remote nodes 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/utsname.h> 34 #include <linux/init.h> 35 #include <linux/sysctl.h> 36 #include <linux/random.h> 37 #include <linux/blkdev.h> 38 #include <linux/socket.h> 39 #include <linux/inet.h> 40 #include <linux/spinlock.h> 41 42 43 #include "cluster/heartbeat.h" 44 #include "cluster/nodemanager.h" 45 #include "cluster/tcp.h" 46 #include "cluster/endian.h" 47 48 #include "dlmapi.h" 49 #include "dlmcommon.h" 50 51 #define MLOG_MASK_PREFIX ML_DLM 52 #include "cluster/masklog.h" 53 54 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 55 struct dlm_lock *lock); 56 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 57 58 /* Should be called as an ast gets queued to see if the new 59 * lock level will obsolete a pending bast. 60 * For example, if dlm_thread queued a bast for an EX lock that 61 * was blocking another EX, but before sending the bast the 62 * lock owner downconverted to NL, the bast is now obsolete. 63 * Only the ast should be sent. 64 * This is needed because the lock and convert paths can queue 65 * asts out-of-band (not waiting for dlm_thread) in order to 66 * allow for LKM_NOQUEUE to get immediate responses. */ 67 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 68 { 69 assert_spin_locked(&dlm->ast_lock); 70 assert_spin_locked(&lock->spinlock); 71 72 if (lock->ml.highest_blocked == LKM_IVMODE) 73 return 0; 74 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); 75 76 if (lock->bast_pending && 77 list_empty(&lock->bast_list)) 78 /* old bast already sent, ok */ 79 return 0; 80 81 if (lock->ml.type == LKM_EXMODE) 82 /* EX blocks anything left, any bast still valid */ 83 return 0; 84 else if (lock->ml.type == LKM_NLMODE) 85 /* NL blocks nothing, no reason to send any bast, cancel it */ 86 return 1; 87 else if (lock->ml.highest_blocked != LKM_EXMODE) 88 /* PR only blocks EX */ 89 return 1; 90 91 return 0; 92 } 93 94 static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 95 { 96 mlog_entry_void(); 97 98 BUG_ON(!dlm); 99 BUG_ON(!lock); 100 101 assert_spin_locked(&dlm->ast_lock); 102 if (!list_empty(&lock->ast_list)) { 103 mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n", 104 lock->ast_pending, lock->ml.type); 105 BUG(); 106 } 107 BUG_ON(!list_empty(&lock->ast_list)); 108 if (lock->ast_pending) 109 mlog(0, "lock has an ast getting flushed right now\n"); 110 111 /* putting lock on list, add a ref */ 112 dlm_lock_get(lock); 113 spin_lock(&lock->spinlock); 114 115 /* check to see if this ast obsoletes the bast */ 116 if (dlm_should_cancel_bast(dlm, lock)) { 117 struct dlm_lock_resource *res = lock->lockres; 118 mlog(0, "%s: cancelling bast for %.*s\n", 119 dlm->name, res->lockname.len, res->lockname.name); 120 lock->bast_pending = 0; 121 list_del_init(&lock->bast_list); 122 lock->ml.highest_blocked = LKM_IVMODE; 123 /* removing lock from list, remove a ref. guaranteed 124 * this won't be the last ref because of the get above, 125 * so res->spinlock will not be taken here */ 126 dlm_lock_put(lock); 127 /* free up the reserved bast that we are cancelling. 128 * guaranteed that this will not be the last reserved 129 * ast because *both* an ast and a bast were reserved 130 * to get to this point. the res->spinlock will not be 131 * taken here */ 132 dlm_lockres_release_ast(dlm, res); 133 } 134 list_add_tail(&lock->ast_list, &dlm->pending_asts); 135 lock->ast_pending = 1; 136 spin_unlock(&lock->spinlock); 137 } 138 139 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 140 { 141 mlog_entry_void(); 142 143 BUG_ON(!dlm); 144 BUG_ON(!lock); 145 146 spin_lock(&dlm->ast_lock); 147 __dlm_queue_ast(dlm, lock); 148 spin_unlock(&dlm->ast_lock); 149 } 150 151 152 static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 153 { 154 mlog_entry_void(); 155 156 BUG_ON(!dlm); 157 BUG_ON(!lock); 158 assert_spin_locked(&dlm->ast_lock); 159 160 BUG_ON(!list_empty(&lock->bast_list)); 161 if (lock->bast_pending) 162 mlog(0, "lock has a bast getting flushed right now\n"); 163 164 /* putting lock on list, add a ref */ 165 dlm_lock_get(lock); 166 spin_lock(&lock->spinlock); 167 list_add_tail(&lock->bast_list, &dlm->pending_basts); 168 lock->bast_pending = 1; 169 spin_unlock(&lock->spinlock); 170 } 171 172 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 173 { 174 mlog_entry_void(); 175 176 BUG_ON(!dlm); 177 BUG_ON(!lock); 178 179 spin_lock(&dlm->ast_lock); 180 __dlm_queue_bast(dlm, lock); 181 spin_unlock(&dlm->ast_lock); 182 } 183 184 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 185 struct dlm_lock *lock) 186 { 187 struct dlm_lockstatus *lksb = lock->lksb; 188 BUG_ON(!lksb); 189 190 /* only updates if this node masters the lockres */ 191 if (res->owner == dlm->node_num) { 192 193 spin_lock(&res->spinlock); 194 /* check the lksb flags for the direction */ 195 if (lksb->flags & DLM_LKSB_GET_LVB) { 196 mlog(0, "getting lvb from lockres for %s node\n", 197 lock->ml.node == dlm->node_num ? "master" : 198 "remote"); 199 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); 200 } 201 /* Do nothing for lvb put requests - they should be done in 202 * place when the lock is downconverted - otherwise we risk 203 * racing gets and puts which could result in old lvb data 204 * being propagated. We leave the put flag set and clear it 205 * here. In the future we might want to clear it at the time 206 * the put is actually done. 207 */ 208 spin_unlock(&res->spinlock); 209 } 210 211 /* reset any lvb flags on the lksb */ 212 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); 213 } 214 215 void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 216 struct dlm_lock *lock) 217 { 218 dlm_astlockfunc_t *fn; 219 struct dlm_lockstatus *lksb; 220 221 mlog_entry_void(); 222 223 lksb = lock->lksb; 224 fn = lock->ast; 225 BUG_ON(lock->ml.node != dlm->node_num); 226 227 dlm_update_lvb(dlm, res, lock); 228 (*fn)(lock->astdata); 229 } 230 231 232 int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 233 struct dlm_lock *lock) 234 { 235 int ret; 236 struct dlm_lockstatus *lksb; 237 int lksbflags; 238 239 mlog_entry_void(); 240 241 lksb = lock->lksb; 242 BUG_ON(lock->ml.node == dlm->node_num); 243 244 lksbflags = lksb->flags; 245 dlm_update_lvb(dlm, res, lock); 246 247 /* lock request came from another node 248 * go do the ast over there */ 249 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags); 250 return ret; 251 } 252 253 void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 254 struct dlm_lock *lock, int blocked_type) 255 { 256 dlm_bastlockfunc_t *fn = lock->bast; 257 258 mlog_entry_void(); 259 BUG_ON(lock->ml.node != dlm->node_num); 260 261 (*fn)(lock->astdata, blocked_type); 262 } 263 264 265 266 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) 267 { 268 int ret; 269 unsigned int locklen; 270 struct dlm_ctxt *dlm = data; 271 struct dlm_lock_resource *res = NULL; 272 struct dlm_lock *lock = NULL; 273 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; 274 char *name; 275 struct list_head *iter, *head=NULL; 276 u64 cookie; 277 u32 flags; 278 279 if (!dlm_grab(dlm)) { 280 dlm_error(DLM_REJECTED); 281 return DLM_REJECTED; 282 } 283 284 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), 285 "Domain %s not fully joined!\n", dlm->name); 286 287 name = past->name; 288 locklen = past->namelen; 289 cookie = be64_to_cpu(past->cookie); 290 flags = be32_to_cpu(past->flags); 291 292 if (locklen > DLM_LOCKID_NAME_MAX) { 293 ret = DLM_IVBUFLEN; 294 mlog(ML_ERROR, "Invalid name length in proxy ast handler!\n"); 295 goto leave; 296 } 297 298 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == 299 (LKM_PUT_LVB|LKM_GET_LVB)) { 300 mlog(ML_ERROR, "both PUT and GET lvb specified\n"); 301 ret = DLM_BADARGS; 302 goto leave; 303 } 304 305 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : 306 (flags & LKM_GET_LVB ? "get lvb" : "none")); 307 308 mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type); 309 310 if (past->type != DLM_AST && 311 past->type != DLM_BAST) { 312 mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu" 313 "name=%.*s\n", past->type, 314 dlm_get_lock_cookie_node(cookie), 315 dlm_get_lock_cookie_seq(cookie), 316 locklen, name); 317 ret = DLM_IVLOCKID; 318 goto leave; 319 } 320 321 res = dlm_lookup_lockres(dlm, name, locklen); 322 if (!res) { 323 mlog(ML_ERROR, "got %sast for unknown lockres! " 324 "cookie=%u:%llu, name=%.*s, namelen=%u\n", 325 past->type == DLM_AST ? "" : "b", 326 dlm_get_lock_cookie_node(cookie), 327 dlm_get_lock_cookie_seq(cookie), 328 locklen, name, locklen); 329 ret = DLM_IVLOCKID; 330 goto leave; 331 } 332 333 /* cannot get a proxy ast message if this node owns it */ 334 BUG_ON(res->owner == dlm->node_num); 335 336 mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name); 337 338 spin_lock(&res->spinlock); 339 if (res->state & DLM_LOCK_RES_RECOVERING) { 340 mlog(0, "responding with DLM_RECOVERING!\n"); 341 ret = DLM_RECOVERING; 342 goto unlock_out; 343 } 344 if (res->state & DLM_LOCK_RES_MIGRATING) { 345 mlog(0, "responding with DLM_MIGRATING!\n"); 346 ret = DLM_MIGRATING; 347 goto unlock_out; 348 } 349 /* try convert queue for both ast/bast */ 350 head = &res->converting; 351 lock = NULL; 352 list_for_each(iter, head) { 353 lock = list_entry (iter, struct dlm_lock, list); 354 if (be64_to_cpu(lock->ml.cookie) == cookie) 355 goto do_ast; 356 } 357 358 /* if not on convert, try blocked for ast, granted for bast */ 359 if (past->type == DLM_AST) 360 head = &res->blocked; 361 else 362 head = &res->granted; 363 364 list_for_each(iter, head) { 365 lock = list_entry (iter, struct dlm_lock, list); 366 if (be64_to_cpu(lock->ml.cookie) == cookie) 367 goto do_ast; 368 } 369 370 mlog(0, "got %sast for unknown lock! cookie=%u:%llu, " 371 "name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b", 372 dlm_get_lock_cookie_node(cookie), dlm_get_lock_cookie_seq(cookie), 373 locklen, name, locklen); 374 375 ret = DLM_NORMAL; 376 unlock_out: 377 spin_unlock(&res->spinlock); 378 goto leave; 379 380 do_ast: 381 ret = DLM_NORMAL; 382 if (past->type == DLM_AST) { 383 /* do not alter lock refcount. switching lists. */ 384 list_move_tail(&lock->list, &res->granted); 385 mlog(0, "ast: adding to granted list... type=%d, " 386 "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); 387 if (lock->ml.convert_type != LKM_IVMODE) { 388 lock->ml.type = lock->ml.convert_type; 389 lock->ml.convert_type = LKM_IVMODE; 390 } else { 391 // should already be there.... 392 } 393 394 lock->lksb->status = DLM_NORMAL; 395 396 /* if we requested the lvb, fetch it into our lksb now */ 397 if (flags & LKM_GET_LVB) { 398 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB)); 399 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN); 400 } 401 } 402 spin_unlock(&res->spinlock); 403 404 if (past->type == DLM_AST) 405 dlm_do_local_ast(dlm, res, lock); 406 else 407 dlm_do_local_bast(dlm, res, lock, past->blocked_type); 408 409 leave: 410 411 if (res) 412 dlm_lockres_put(res); 413 414 dlm_put(dlm); 415 return ret; 416 } 417 418 419 420 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 421 struct dlm_lock *lock, int msg_type, 422 int blocked_type, int flags) 423 { 424 int ret = 0; 425 struct dlm_proxy_ast past; 426 struct kvec vec[2]; 427 size_t veclen = 1; 428 int status; 429 430 mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n", 431 res->lockname.len, res->lockname.name, lock->ml.node, 432 msg_type, blocked_type); 433 434 memset(&past, 0, sizeof(struct dlm_proxy_ast)); 435 past.node_idx = dlm->node_num; 436 past.type = msg_type; 437 past.blocked_type = blocked_type; 438 past.namelen = res->lockname.len; 439 memcpy(past.name, res->lockname.name, past.namelen); 440 past.cookie = lock->ml.cookie; 441 442 vec[0].iov_len = sizeof(struct dlm_proxy_ast); 443 vec[0].iov_base = &past; 444 if (flags & DLM_LKSB_GET_LVB) { 445 mlog(0, "returning requested LVB data\n"); 446 be32_add_cpu(&past.flags, LKM_GET_LVB); 447 vec[1].iov_len = DLM_LVB_LEN; 448 vec[1].iov_base = lock->lksb->lvb; 449 veclen++; 450 } 451 452 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, 453 lock->ml.node, &status); 454 if (ret < 0) 455 mlog_errno(ret); 456 else { 457 if (status == DLM_RECOVERING) { 458 mlog(ML_ERROR, "sent AST to node %u, it thinks this " 459 "node is dead!\n", lock->ml.node); 460 BUG(); 461 } else if (status == DLM_MIGRATING) { 462 mlog(ML_ERROR, "sent AST to node %u, it returned " 463 "DLM_MIGRATING!\n", lock->ml.node); 464 BUG(); 465 } else if (status != DLM_NORMAL) { 466 mlog(ML_ERROR, "AST to node %u returned %d!\n", 467 lock->ml.node, status); 468 /* ignore it */ 469 } 470 ret = 0; 471 } 472 return ret; 473 } 474