1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmfs.c 5 * 6 * Code which implements the kernel side of a minimal userspace 7 * interface to our DLM. This file handles the virtual file system 8 * used for communication with userspace. Credit should go to ramfs, 9 * which was a template for the fs side of this module. 10 * 11 * Copyright (C) 2003, 2004 Oracle. All rights reserved. 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public 15 * License as published by the Free Software Foundation; either 16 * version 2 of the License, or (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 * General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public 24 * License along with this program; if not, write to the 25 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 26 * Boston, MA 021110-1307, USA. 27 */ 28 29 /* Simple VFS hooks based on: */ 30 /* 31 * Resizable simple ram filesystem for Linux. 32 * 33 * Copyright (C) 2000 Linus Torvalds. 34 * 2000 Transmeta Corp. 35 */ 36 37 #include <linux/module.h> 38 #include <linux/fs.h> 39 #include <linux/pagemap.h> 40 #include <linux/types.h> 41 #include <linux/slab.h> 42 #include <linux/highmem.h> 43 #include <linux/init.h> 44 #include <linux/string.h> 45 #include <linux/backing-dev.h> 46 #include <linux/poll.h> 47 48 #include <asm/uaccess.h> 49 50 #include "stackglue.h" 51 #include "userdlm.h" 52 #include "dlmfsver.h" 53 54 #define MLOG_MASK_PREFIX ML_DLMFS 55 #include "cluster/masklog.h" 56 57 58 static const struct super_operations dlmfs_ops; 59 static const struct file_operations dlmfs_file_operations; 60 static const struct inode_operations dlmfs_dir_inode_operations; 61 static const struct inode_operations dlmfs_root_inode_operations; 62 static const struct inode_operations dlmfs_file_inode_operations; 63 static struct kmem_cache *dlmfs_inode_cache; 64 65 struct workqueue_struct *user_dlm_worker; 66 67 68 69 /* 70 * These are the ABI capabilities of dlmfs. 71 * 72 * Over time, dlmfs has added some features that were not part of the 73 * initial ABI. Unfortunately, some of these features are not detectable 74 * via standard usage. For example, Linux's default poll always returns 75 * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs 76 * added poll support. Instead, we provide this list of new capabilities. 77 * 78 * Capabilities is a read-only attribute. We do it as a module parameter 79 * so we can discover it whether dlmfs is built in, loaded, or even not 80 * loaded. 81 * 82 * The ABI features are local to this machine's dlmfs mount. This is 83 * distinct from the locking protocol, which is concerned with inter-node 84 * interaction. 85 * 86 * Capabilities: 87 * - bast : POLLIN against the file descriptor of a held lock 88 * signifies a bast fired on the lock. 89 */ 90 #define DLMFS_CAPABILITIES "bast stackglue" 91 static int param_set_dlmfs_capabilities(const char *val, 92 struct kernel_param *kp) 93 { 94 printk(KERN_ERR "%s: readonly parameter\n", kp->name); 95 return -EINVAL; 96 } 97 static int param_get_dlmfs_capabilities(char *buffer, 98 struct kernel_param *kp) 99 { 100 return strlcpy(buffer, DLMFS_CAPABILITIES, 101 strlen(DLMFS_CAPABILITIES) + 1); 102 } 103 module_param_call(capabilities, param_set_dlmfs_capabilities, 104 param_get_dlmfs_capabilities, NULL, 0444); 105 MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES); 106 107 108 /* 109 * decodes a set of open flags into a valid lock level and a set of flags. 110 * returns < 0 if we have invalid flags 111 * flags which mean something to us: 112 * O_RDONLY -> PRMODE level 113 * O_WRONLY -> EXMODE level 114 * 115 * O_NONBLOCK -> NOQUEUE 116 */ 117 static int dlmfs_decode_open_flags(int open_flags, 118 int *level, 119 int *flags) 120 { 121 if (open_flags & (O_WRONLY|O_RDWR)) 122 *level = DLM_LOCK_EX; 123 else 124 *level = DLM_LOCK_PR; 125 126 *flags = 0; 127 if (open_flags & O_NONBLOCK) 128 *flags |= DLM_LKF_NOQUEUE; 129 130 return 0; 131 } 132 133 static int dlmfs_file_open(struct inode *inode, 134 struct file *file) 135 { 136 int status, level, flags; 137 struct dlmfs_filp_private *fp = NULL; 138 struct dlmfs_inode_private *ip; 139 140 if (S_ISDIR(inode->i_mode)) 141 BUG(); 142 143 mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino, 144 file->f_flags); 145 146 status = dlmfs_decode_open_flags(file->f_flags, &level, &flags); 147 if (status < 0) 148 goto bail; 149 150 /* We don't want to honor O_APPEND at read/write time as it 151 * doesn't make sense for LVB writes. */ 152 file->f_flags &= ~O_APPEND; 153 154 fp = kmalloc(sizeof(*fp), GFP_NOFS); 155 if (!fp) { 156 status = -ENOMEM; 157 goto bail; 158 } 159 fp->fp_lock_level = level; 160 161 ip = DLMFS_I(inode); 162 163 status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags); 164 if (status < 0) { 165 /* this is a strange error to return here but I want 166 * to be able userspace to be able to distinguish a 167 * valid lock request from one that simply couldn't be 168 * granted. */ 169 if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN) 170 status = -ETXTBSY; 171 kfree(fp); 172 goto bail; 173 } 174 175 file->private_data = fp; 176 bail: 177 return status; 178 } 179 180 static int dlmfs_file_release(struct inode *inode, 181 struct file *file) 182 { 183 int level, status; 184 struct dlmfs_inode_private *ip = DLMFS_I(inode); 185 struct dlmfs_filp_private *fp = file->private_data; 186 187 if (S_ISDIR(inode->i_mode)) 188 BUG(); 189 190 mlog(0, "close called on inode %lu\n", inode->i_ino); 191 192 status = 0; 193 if (fp) { 194 level = fp->fp_lock_level; 195 if (level != DLM_LOCK_IV) 196 user_dlm_cluster_unlock(&ip->ip_lockres, level); 197 198 kfree(fp); 199 file->private_data = NULL; 200 } 201 202 return 0; 203 } 204 205 /* 206 * We do ->setattr() just to override size changes. Our size is the size 207 * of the LVB and nothing else. 208 */ 209 static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr) 210 { 211 int error; 212 struct inode *inode = dentry->d_inode; 213 214 attr->ia_valid &= ~ATTR_SIZE; 215 error = inode_change_ok(inode, attr); 216 if (error) 217 return error; 218 219 setattr_copy(inode, attr); 220 mark_inode_dirty(inode); 221 return 0; 222 } 223 224 static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait) 225 { 226 int event = 0; 227 struct inode *inode = file->f_path.dentry->d_inode; 228 struct dlmfs_inode_private *ip = DLMFS_I(inode); 229 230 poll_wait(file, &ip->ip_lockres.l_event, wait); 231 232 spin_lock(&ip->ip_lockres.l_lock); 233 if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED) 234 event = POLLIN | POLLRDNORM; 235 spin_unlock(&ip->ip_lockres.l_lock); 236 237 return event; 238 } 239 240 static ssize_t dlmfs_file_read(struct file *filp, 241 char __user *buf, 242 size_t count, 243 loff_t *ppos) 244 { 245 int bytes_left; 246 ssize_t readlen, got; 247 char *lvb_buf; 248 struct inode *inode = filp->f_path.dentry->d_inode; 249 250 mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", 251 inode->i_ino, count, *ppos); 252 253 if (*ppos >= i_size_read(inode)) 254 return 0; 255 256 if (!count) 257 return 0; 258 259 if (!access_ok(VERIFY_WRITE, buf, count)) 260 return -EFAULT; 261 262 /* don't read past the lvb */ 263 if ((count + *ppos) > i_size_read(inode)) 264 readlen = i_size_read(inode) - *ppos; 265 else 266 readlen = count; 267 268 lvb_buf = kmalloc(readlen, GFP_NOFS); 269 if (!lvb_buf) 270 return -ENOMEM; 271 272 got = user_dlm_read_lvb(inode, lvb_buf, readlen); 273 if (got) { 274 BUG_ON(got != readlen); 275 bytes_left = __copy_to_user(buf, lvb_buf, readlen); 276 readlen -= bytes_left; 277 } else 278 readlen = 0; 279 280 kfree(lvb_buf); 281 282 *ppos = *ppos + readlen; 283 284 mlog(0, "read %zd bytes\n", readlen); 285 return readlen; 286 } 287 288 static ssize_t dlmfs_file_write(struct file *filp, 289 const char __user *buf, 290 size_t count, 291 loff_t *ppos) 292 { 293 int bytes_left; 294 ssize_t writelen; 295 char *lvb_buf; 296 struct inode *inode = filp->f_path.dentry->d_inode; 297 298 mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", 299 inode->i_ino, count, *ppos); 300 301 if (*ppos >= i_size_read(inode)) 302 return -ENOSPC; 303 304 if (!count) 305 return 0; 306 307 if (!access_ok(VERIFY_READ, buf, count)) 308 return -EFAULT; 309 310 /* don't write past the lvb */ 311 if ((count + *ppos) > i_size_read(inode)) 312 writelen = i_size_read(inode) - *ppos; 313 else 314 writelen = count - *ppos; 315 316 lvb_buf = kmalloc(writelen, GFP_NOFS); 317 if (!lvb_buf) 318 return -ENOMEM; 319 320 bytes_left = copy_from_user(lvb_buf, buf, writelen); 321 writelen -= bytes_left; 322 if (writelen) 323 user_dlm_write_lvb(inode, lvb_buf, writelen); 324 325 kfree(lvb_buf); 326 327 *ppos = *ppos + writelen; 328 mlog(0, "wrote %zd bytes\n", writelen); 329 return writelen; 330 } 331 332 static void dlmfs_init_once(void *foo) 333 { 334 struct dlmfs_inode_private *ip = 335 (struct dlmfs_inode_private *) foo; 336 337 ip->ip_conn = NULL; 338 ip->ip_parent = NULL; 339 340 inode_init_once(&ip->ip_vfs_inode); 341 } 342 343 static struct inode *dlmfs_alloc_inode(struct super_block *sb) 344 { 345 struct dlmfs_inode_private *ip; 346 347 ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS); 348 if (!ip) 349 return NULL; 350 351 return &ip->ip_vfs_inode; 352 } 353 354 static void dlmfs_i_callback(struct rcu_head *head) 355 { 356 struct inode *inode = container_of(head, struct inode, i_rcu); 357 kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode)); 358 } 359 360 static void dlmfs_destroy_inode(struct inode *inode) 361 { 362 call_rcu(&inode->i_rcu, dlmfs_i_callback); 363 } 364 365 static void dlmfs_evict_inode(struct inode *inode) 366 { 367 int status; 368 struct dlmfs_inode_private *ip; 369 370 clear_inode(inode); 371 372 mlog(0, "inode %lu\n", inode->i_ino); 373 374 ip = DLMFS_I(inode); 375 376 if (S_ISREG(inode->i_mode)) { 377 status = user_dlm_destroy_lock(&ip->ip_lockres); 378 if (status < 0) 379 mlog_errno(status); 380 iput(ip->ip_parent); 381 goto clear_fields; 382 } 383 384 mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn); 385 /* we must be a directory. If required, lets unregister the 386 * dlm context now. */ 387 if (ip->ip_conn) 388 user_dlm_unregister(ip->ip_conn); 389 clear_fields: 390 ip->ip_parent = NULL; 391 ip->ip_conn = NULL; 392 } 393 394 static struct backing_dev_info dlmfs_backing_dev_info = { 395 .name = "ocfs2-dlmfs", 396 .ra_pages = 0, /* No readahead */ 397 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 398 }; 399 400 static struct inode *dlmfs_get_root_inode(struct super_block *sb) 401 { 402 struct inode *inode = new_inode(sb); 403 umode_t mode = S_IFDIR | 0755; 404 struct dlmfs_inode_private *ip; 405 406 if (inode) { 407 ip = DLMFS_I(inode); 408 409 inode->i_ino = get_next_ino(); 410 inode_init_owner(inode, NULL, mode); 411 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; 412 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 413 inc_nlink(inode); 414 415 inode->i_fop = &simple_dir_operations; 416 inode->i_op = &dlmfs_root_inode_operations; 417 } 418 419 return inode; 420 } 421 422 static struct inode *dlmfs_get_inode(struct inode *parent, 423 struct dentry *dentry, 424 umode_t mode) 425 { 426 struct super_block *sb = parent->i_sb; 427 struct inode * inode = new_inode(sb); 428 struct dlmfs_inode_private *ip; 429 430 if (!inode) 431 return NULL; 432 433 inode->i_ino = get_next_ino(); 434 inode_init_owner(inode, parent, mode); 435 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; 436 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 437 438 ip = DLMFS_I(inode); 439 ip->ip_conn = DLMFS_I(parent)->ip_conn; 440 441 switch (mode & S_IFMT) { 442 default: 443 /* for now we don't support anything other than 444 * directories and regular files. */ 445 BUG(); 446 break; 447 case S_IFREG: 448 inode->i_op = &dlmfs_file_inode_operations; 449 inode->i_fop = &dlmfs_file_operations; 450 451 i_size_write(inode, DLM_LVB_LEN); 452 453 user_dlm_lock_res_init(&ip->ip_lockres, dentry); 454 455 /* released at clear_inode time, this insures that we 456 * get to drop the dlm reference on each lock *before* 457 * we call the unregister code for releasing parent 458 * directories. */ 459 ip->ip_parent = igrab(parent); 460 BUG_ON(!ip->ip_parent); 461 break; 462 case S_IFDIR: 463 inode->i_op = &dlmfs_dir_inode_operations; 464 inode->i_fop = &simple_dir_operations; 465 466 /* directory inodes start off with i_nlink == 467 * 2 (for "." entry) */ 468 inc_nlink(inode); 469 break; 470 } 471 return inode; 472 } 473 474 /* 475 * File creation. Allocate an inode, and we're done.. 476 */ 477 /* SMP-safe */ 478 static int dlmfs_mkdir(struct inode * dir, 479 struct dentry * dentry, 480 umode_t mode) 481 { 482 int status; 483 struct inode *inode = NULL; 484 struct qstr *domain = &dentry->d_name; 485 struct dlmfs_inode_private *ip; 486 struct ocfs2_cluster_connection *conn; 487 488 mlog(0, "mkdir %.*s\n", domain->len, domain->name); 489 490 /* verify that we have a proper domain */ 491 if (domain->len >= GROUP_NAME_MAX) { 492 status = -EINVAL; 493 mlog(ML_ERROR, "invalid domain name for directory.\n"); 494 goto bail; 495 } 496 497 inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR); 498 if (!inode) { 499 status = -ENOMEM; 500 mlog_errno(status); 501 goto bail; 502 } 503 504 ip = DLMFS_I(inode); 505 506 conn = user_dlm_register(domain); 507 if (IS_ERR(conn)) { 508 status = PTR_ERR(conn); 509 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", 510 status, domain->len, domain->name); 511 goto bail; 512 } 513 ip->ip_conn = conn; 514 515 inc_nlink(dir); 516 d_instantiate(dentry, inode); 517 dget(dentry); /* Extra count - pin the dentry in core */ 518 519 status = 0; 520 bail: 521 if (status < 0) 522 iput(inode); 523 return status; 524 } 525 526 static int dlmfs_create(struct inode *dir, 527 struct dentry *dentry, 528 umode_t mode, 529 bool excl) 530 { 531 int status = 0; 532 struct inode *inode; 533 struct qstr *name = &dentry->d_name; 534 535 mlog(0, "create %.*s\n", name->len, name->name); 536 537 /* verify name is valid and doesn't contain any dlm reserved 538 * characters */ 539 if (name->len >= USER_DLM_LOCK_ID_MAX_LEN || 540 name->name[0] == '$') { 541 status = -EINVAL; 542 mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len, 543 name->name); 544 goto bail; 545 } 546 547 inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG); 548 if (!inode) { 549 status = -ENOMEM; 550 mlog_errno(status); 551 goto bail; 552 } 553 554 d_instantiate(dentry, inode); 555 dget(dentry); /* Extra count - pin the dentry in core */ 556 bail: 557 return status; 558 } 559 560 static int dlmfs_unlink(struct inode *dir, 561 struct dentry *dentry) 562 { 563 int status; 564 struct inode *inode = dentry->d_inode; 565 566 mlog(0, "unlink inode %lu\n", inode->i_ino); 567 568 /* if there are no current holders, or none that are waiting 569 * to acquire a lock, this basically destroys our lockres. */ 570 status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres); 571 if (status < 0) { 572 mlog(ML_ERROR, "unlink %.*s, error %d from destroy\n", 573 dentry->d_name.len, dentry->d_name.name, status); 574 goto bail; 575 } 576 status = simple_unlink(dir, dentry); 577 bail: 578 return status; 579 } 580 581 static int dlmfs_fill_super(struct super_block * sb, 582 void * data, 583 int silent) 584 { 585 sb->s_maxbytes = MAX_LFS_FILESIZE; 586 sb->s_blocksize = PAGE_CACHE_SIZE; 587 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 588 sb->s_magic = DLMFS_MAGIC; 589 sb->s_op = &dlmfs_ops; 590 sb->s_root = d_make_root(dlmfs_get_root_inode(sb)); 591 if (!sb->s_root) 592 return -ENOMEM; 593 return 0; 594 } 595 596 static const struct file_operations dlmfs_file_operations = { 597 .open = dlmfs_file_open, 598 .release = dlmfs_file_release, 599 .poll = dlmfs_file_poll, 600 .read = dlmfs_file_read, 601 .write = dlmfs_file_write, 602 .llseek = default_llseek, 603 }; 604 605 static const struct inode_operations dlmfs_dir_inode_operations = { 606 .create = dlmfs_create, 607 .lookup = simple_lookup, 608 .unlink = dlmfs_unlink, 609 }; 610 611 /* this way we can restrict mkdir to only the toplevel of the fs. */ 612 static const struct inode_operations dlmfs_root_inode_operations = { 613 .lookup = simple_lookup, 614 .mkdir = dlmfs_mkdir, 615 .rmdir = simple_rmdir, 616 }; 617 618 static const struct super_operations dlmfs_ops = { 619 .statfs = simple_statfs, 620 .alloc_inode = dlmfs_alloc_inode, 621 .destroy_inode = dlmfs_destroy_inode, 622 .evict_inode = dlmfs_evict_inode, 623 .drop_inode = generic_delete_inode, 624 }; 625 626 static const struct inode_operations dlmfs_file_inode_operations = { 627 .getattr = simple_getattr, 628 .setattr = dlmfs_file_setattr, 629 }; 630 631 static struct dentry *dlmfs_mount(struct file_system_type *fs_type, 632 int flags, const char *dev_name, void *data) 633 { 634 return mount_nodev(fs_type, flags, data, dlmfs_fill_super); 635 } 636 637 static struct file_system_type dlmfs_fs_type = { 638 .owner = THIS_MODULE, 639 .name = "ocfs2_dlmfs", 640 .mount = dlmfs_mount, 641 .kill_sb = kill_litter_super, 642 }; 643 644 static int __init init_dlmfs_fs(void) 645 { 646 int status; 647 int cleanup_inode = 0, cleanup_worker = 0; 648 649 dlmfs_print_version(); 650 651 status = bdi_init(&dlmfs_backing_dev_info); 652 if (status) 653 return status; 654 655 dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache", 656 sizeof(struct dlmfs_inode_private), 657 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 658 SLAB_MEM_SPREAD), 659 dlmfs_init_once); 660 if (!dlmfs_inode_cache) { 661 status = -ENOMEM; 662 goto bail; 663 } 664 cleanup_inode = 1; 665 666 user_dlm_worker = create_singlethread_workqueue("user_dlm"); 667 if (!user_dlm_worker) { 668 status = -ENOMEM; 669 goto bail; 670 } 671 cleanup_worker = 1; 672 673 user_dlm_set_locking_protocol(); 674 status = register_filesystem(&dlmfs_fs_type); 675 bail: 676 if (status) { 677 if (cleanup_inode) 678 kmem_cache_destroy(dlmfs_inode_cache); 679 if (cleanup_worker) 680 destroy_workqueue(user_dlm_worker); 681 bdi_destroy(&dlmfs_backing_dev_info); 682 } else 683 printk("OCFS2 User DLM kernel interface loaded\n"); 684 return status; 685 } 686 687 static void __exit exit_dlmfs_fs(void) 688 { 689 unregister_filesystem(&dlmfs_fs_type); 690 691 flush_workqueue(user_dlm_worker); 692 destroy_workqueue(user_dlm_worker); 693 694 /* 695 * Make sure all delayed rcu free inodes are flushed before we 696 * destroy cache. 697 */ 698 rcu_barrier(); 699 kmem_cache_destroy(dlmfs_inode_cache); 700 701 bdi_destroy(&dlmfs_backing_dev_info); 702 } 703 704 MODULE_AUTHOR("Oracle"); 705 MODULE_LICENSE("GPL"); 706 707 module_init(init_dlmfs_fs) 708 module_exit(exit_dlmfs_fs) 709