1 /* 2 * net/sunrpc/rpc_pipe.c 3 * 4 * Userland/kernel interface for rpcauth_gss. 5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c 6 * and fs/sysfs/inode.c 7 * 8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no> 9 * 10 */ 11 #include <linux/config.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/string.h> 15 #include <linux/pagemap.h> 16 #include <linux/mount.h> 17 #include <linux/namei.h> 18 #include <linux/dnotify.h> 19 #include <linux/kernel.h> 20 21 #include <asm/ioctls.h> 22 #include <linux/fs.h> 23 #include <linux/poll.h> 24 #include <linux/wait.h> 25 #include <linux/seq_file.h> 26 27 #include <linux/sunrpc/clnt.h> 28 #include <linux/workqueue.h> 29 #include <linux/sunrpc/rpc_pipe_fs.h> 30 31 static struct vfsmount *rpc_mount __read_mostly; 32 static int rpc_mount_count; 33 34 static struct file_system_type rpc_pipe_fs_type; 35 36 37 static kmem_cache_t *rpc_inode_cachep __read_mostly; 38 39 #define RPC_UPCALL_TIMEOUT (30*HZ) 40 41 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, 42 void (*destroy_msg)(struct rpc_pipe_msg *), int err) 43 { 44 struct rpc_pipe_msg *msg; 45 46 if (list_empty(head)) 47 return; 48 do { 49 msg = list_entry(head->next, struct rpc_pipe_msg, list); 50 list_del(&msg->list); 51 msg->errno = err; 52 destroy_msg(msg); 53 } while (!list_empty(head)); 54 wake_up(&rpci->waitq); 55 } 56 57 static void 58 rpc_timeout_upcall_queue(void *data) 59 { 60 LIST_HEAD(free_list); 61 struct rpc_inode *rpci = (struct rpc_inode *)data; 62 struct inode *inode = &rpci->vfs_inode; 63 void (*destroy_msg)(struct rpc_pipe_msg *); 64 65 spin_lock(&inode->i_lock); 66 if (rpci->ops == NULL) { 67 spin_unlock(&inode->i_lock); 68 return; 69 } 70 destroy_msg = rpci->ops->destroy_msg; 71 if (rpci->nreaders == 0) { 72 list_splice_init(&rpci->pipe, &free_list); 73 rpci->pipelen = 0; 74 } 75 spin_unlock(&inode->i_lock); 76 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); 77 } 78 79 int 80 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) 81 { 82 struct rpc_inode *rpci = RPC_I(inode); 83 int res = -EPIPE; 84 85 spin_lock(&inode->i_lock); 86 if (rpci->ops == NULL) 87 goto out; 88 if (rpci->nreaders) { 89 list_add_tail(&msg->list, &rpci->pipe); 90 rpci->pipelen += msg->len; 91 res = 0; 92 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { 93 if (list_empty(&rpci->pipe)) 94 schedule_delayed_work(&rpci->queue_timeout, 95 RPC_UPCALL_TIMEOUT); 96 list_add_tail(&msg->list, &rpci->pipe); 97 rpci->pipelen += msg->len; 98 res = 0; 99 } 100 out: 101 spin_unlock(&inode->i_lock); 102 wake_up(&rpci->waitq); 103 return res; 104 } 105 106 static inline void 107 rpc_inode_setowner(struct inode *inode, void *private) 108 { 109 RPC_I(inode)->private = private; 110 } 111 112 static void 113 rpc_close_pipes(struct inode *inode) 114 { 115 struct rpc_inode *rpci = RPC_I(inode); 116 struct rpc_pipe_ops *ops; 117 118 mutex_lock(&inode->i_mutex); 119 ops = rpci->ops; 120 if (ops != NULL) { 121 LIST_HEAD(free_list); 122 123 spin_lock(&inode->i_lock); 124 rpci->nreaders = 0; 125 list_splice_init(&rpci->in_upcall, &free_list); 126 list_splice_init(&rpci->pipe, &free_list); 127 rpci->pipelen = 0; 128 rpci->ops = NULL; 129 spin_unlock(&inode->i_lock); 130 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); 131 rpci->nwriters = 0; 132 if (ops->release_pipe) 133 ops->release_pipe(inode); 134 cancel_delayed_work(&rpci->queue_timeout); 135 flush_scheduled_work(); 136 } 137 rpc_inode_setowner(inode, NULL); 138 mutex_unlock(&inode->i_mutex); 139 } 140 141 static struct inode * 142 rpc_alloc_inode(struct super_block *sb) 143 { 144 struct rpc_inode *rpci; 145 rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL); 146 if (!rpci) 147 return NULL; 148 return &rpci->vfs_inode; 149 } 150 151 static void 152 rpc_destroy_inode(struct inode *inode) 153 { 154 kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); 155 } 156 157 static int 158 rpc_pipe_open(struct inode *inode, struct file *filp) 159 { 160 struct rpc_inode *rpci = RPC_I(inode); 161 int res = -ENXIO; 162 163 mutex_lock(&inode->i_mutex); 164 if (rpci->ops != NULL) { 165 if (filp->f_mode & FMODE_READ) 166 rpci->nreaders ++; 167 if (filp->f_mode & FMODE_WRITE) 168 rpci->nwriters ++; 169 res = 0; 170 } 171 mutex_unlock(&inode->i_mutex); 172 return res; 173 } 174 175 static int 176 rpc_pipe_release(struct inode *inode, struct file *filp) 177 { 178 struct rpc_inode *rpci = RPC_I(inode); 179 struct rpc_pipe_msg *msg; 180 181 mutex_lock(&inode->i_mutex); 182 if (rpci->ops == NULL) 183 goto out; 184 msg = (struct rpc_pipe_msg *)filp->private_data; 185 if (msg != NULL) { 186 spin_lock(&inode->i_lock); 187 msg->errno = -EAGAIN; 188 list_del(&msg->list); 189 spin_unlock(&inode->i_lock); 190 rpci->ops->destroy_msg(msg); 191 } 192 if (filp->f_mode & FMODE_WRITE) 193 rpci->nwriters --; 194 if (filp->f_mode & FMODE_READ) { 195 rpci->nreaders --; 196 if (rpci->nreaders == 0) { 197 LIST_HEAD(free_list); 198 spin_lock(&inode->i_lock); 199 list_splice_init(&rpci->pipe, &free_list); 200 rpci->pipelen = 0; 201 spin_unlock(&inode->i_lock); 202 rpc_purge_list(rpci, &free_list, 203 rpci->ops->destroy_msg, -EAGAIN); 204 } 205 } 206 if (rpci->ops->release_pipe) 207 rpci->ops->release_pipe(inode); 208 out: 209 mutex_unlock(&inode->i_mutex); 210 return 0; 211 } 212 213 static ssize_t 214 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) 215 { 216 struct inode *inode = filp->f_dentry->d_inode; 217 struct rpc_inode *rpci = RPC_I(inode); 218 struct rpc_pipe_msg *msg; 219 int res = 0; 220 221 mutex_lock(&inode->i_mutex); 222 if (rpci->ops == NULL) { 223 res = -EPIPE; 224 goto out_unlock; 225 } 226 msg = filp->private_data; 227 if (msg == NULL) { 228 spin_lock(&inode->i_lock); 229 if (!list_empty(&rpci->pipe)) { 230 msg = list_entry(rpci->pipe.next, 231 struct rpc_pipe_msg, 232 list); 233 list_move(&msg->list, &rpci->in_upcall); 234 rpci->pipelen -= msg->len; 235 filp->private_data = msg; 236 msg->copied = 0; 237 } 238 spin_unlock(&inode->i_lock); 239 if (msg == NULL) 240 goto out_unlock; 241 } 242 /* NOTE: it is up to the callback to update msg->copied */ 243 res = rpci->ops->upcall(filp, msg, buf, len); 244 if (res < 0 || msg->len == msg->copied) { 245 filp->private_data = NULL; 246 spin_lock(&inode->i_lock); 247 list_del(&msg->list); 248 spin_unlock(&inode->i_lock); 249 rpci->ops->destroy_msg(msg); 250 } 251 out_unlock: 252 mutex_unlock(&inode->i_mutex); 253 return res; 254 } 255 256 static ssize_t 257 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) 258 { 259 struct inode *inode = filp->f_dentry->d_inode; 260 struct rpc_inode *rpci = RPC_I(inode); 261 int res; 262 263 mutex_lock(&inode->i_mutex); 264 res = -EPIPE; 265 if (rpci->ops != NULL) 266 res = rpci->ops->downcall(filp, buf, len); 267 mutex_unlock(&inode->i_mutex); 268 return res; 269 } 270 271 static unsigned int 272 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) 273 { 274 struct rpc_inode *rpci; 275 unsigned int mask = 0; 276 277 rpci = RPC_I(filp->f_dentry->d_inode); 278 poll_wait(filp, &rpci->waitq, wait); 279 280 mask = POLLOUT | POLLWRNORM; 281 if (rpci->ops == NULL) 282 mask |= POLLERR | POLLHUP; 283 if (!list_empty(&rpci->pipe)) 284 mask |= POLLIN | POLLRDNORM; 285 return mask; 286 } 287 288 static int 289 rpc_pipe_ioctl(struct inode *ino, struct file *filp, 290 unsigned int cmd, unsigned long arg) 291 { 292 struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode); 293 int len; 294 295 switch (cmd) { 296 case FIONREAD: 297 if (rpci->ops == NULL) 298 return -EPIPE; 299 len = rpci->pipelen; 300 if (filp->private_data) { 301 struct rpc_pipe_msg *msg; 302 msg = (struct rpc_pipe_msg *)filp->private_data; 303 len += msg->len - msg->copied; 304 } 305 return put_user(len, (int __user *)arg); 306 default: 307 return -EINVAL; 308 } 309 } 310 311 static struct file_operations rpc_pipe_fops = { 312 .owner = THIS_MODULE, 313 .llseek = no_llseek, 314 .read = rpc_pipe_read, 315 .write = rpc_pipe_write, 316 .poll = rpc_pipe_poll, 317 .ioctl = rpc_pipe_ioctl, 318 .open = rpc_pipe_open, 319 .release = rpc_pipe_release, 320 }; 321 322 static int 323 rpc_show_info(struct seq_file *m, void *v) 324 { 325 struct rpc_clnt *clnt = m->private; 326 327 seq_printf(m, "RPC server: %s\n", clnt->cl_server); 328 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, 329 clnt->cl_prog, clnt->cl_vers); 330 seq_printf(m, "address: %u.%u.%u.%u\n", 331 NIPQUAD(clnt->cl_xprt->addr.sin_addr.s_addr)); 332 seq_printf(m, "protocol: %s\n", 333 clnt->cl_xprt->prot == IPPROTO_UDP ? "udp" : "tcp"); 334 return 0; 335 } 336 337 static int 338 rpc_info_open(struct inode *inode, struct file *file) 339 { 340 struct rpc_clnt *clnt; 341 int ret = single_open(file, rpc_show_info, NULL); 342 343 if (!ret) { 344 struct seq_file *m = file->private_data; 345 mutex_lock(&inode->i_mutex); 346 clnt = RPC_I(inode)->private; 347 if (clnt) { 348 atomic_inc(&clnt->cl_users); 349 m->private = clnt; 350 } else { 351 single_release(inode, file); 352 ret = -EINVAL; 353 } 354 mutex_unlock(&inode->i_mutex); 355 } 356 return ret; 357 } 358 359 static int 360 rpc_info_release(struct inode *inode, struct file *file) 361 { 362 struct seq_file *m = file->private_data; 363 struct rpc_clnt *clnt = (struct rpc_clnt *)m->private; 364 365 if (clnt) 366 rpc_release_client(clnt); 367 return single_release(inode, file); 368 } 369 370 static struct file_operations rpc_info_operations = { 371 .owner = THIS_MODULE, 372 .open = rpc_info_open, 373 .read = seq_read, 374 .llseek = seq_lseek, 375 .release = rpc_info_release, 376 }; 377 378 379 /* 380 * We have a single directory with 1 node in it. 381 */ 382 enum { 383 RPCAUTH_Root = 1, 384 RPCAUTH_lockd, 385 RPCAUTH_mount, 386 RPCAUTH_nfs, 387 RPCAUTH_portmap, 388 RPCAUTH_statd, 389 RPCAUTH_RootEOF 390 }; 391 392 /* 393 * Description of fs contents. 394 */ 395 struct rpc_filelist { 396 char *name; 397 struct file_operations *i_fop; 398 int mode; 399 }; 400 401 static struct rpc_filelist files[] = { 402 [RPCAUTH_lockd] = { 403 .name = "lockd", 404 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 405 }, 406 [RPCAUTH_mount] = { 407 .name = "mount", 408 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 409 }, 410 [RPCAUTH_nfs] = { 411 .name = "nfs", 412 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 413 }, 414 [RPCAUTH_portmap] = { 415 .name = "portmap", 416 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 417 }, 418 [RPCAUTH_statd] = { 419 .name = "statd", 420 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 421 }, 422 }; 423 424 enum { 425 RPCAUTH_info = 2, 426 RPCAUTH_EOF 427 }; 428 429 static struct rpc_filelist authfiles[] = { 430 [RPCAUTH_info] = { 431 .name = "info", 432 .i_fop = &rpc_info_operations, 433 .mode = S_IFREG | S_IRUSR, 434 }, 435 }; 436 437 static int 438 rpc_get_mount(void) 439 { 440 return simple_pin_fs("rpc_pipefs", &rpc_mount, &rpc_mount_count); 441 } 442 443 static void 444 rpc_put_mount(void) 445 { 446 simple_release_fs(&rpc_mount, &rpc_mount_count); 447 } 448 449 static int 450 rpc_lookup_parent(char *path, struct nameidata *nd) 451 { 452 if (path[0] == '\0') 453 return -ENOENT; 454 if (rpc_get_mount()) { 455 printk(KERN_WARNING "%s: %s failed to mount " 456 "pseudofilesystem \n", __FILE__, __FUNCTION__); 457 return -ENODEV; 458 } 459 nd->mnt = mntget(rpc_mount); 460 nd->dentry = dget(rpc_mount->mnt_root); 461 nd->last_type = LAST_ROOT; 462 nd->flags = LOOKUP_PARENT; 463 nd->depth = 0; 464 465 if (path_walk(path, nd)) { 466 printk(KERN_WARNING "%s: %s failed to find path %s\n", 467 __FILE__, __FUNCTION__, path); 468 rpc_put_mount(); 469 return -ENOENT; 470 } 471 return 0; 472 } 473 474 static void 475 rpc_release_path(struct nameidata *nd) 476 { 477 path_release(nd); 478 rpc_put_mount(); 479 } 480 481 static struct inode * 482 rpc_get_inode(struct super_block *sb, int mode) 483 { 484 struct inode *inode = new_inode(sb); 485 if (!inode) 486 return NULL; 487 inode->i_mode = mode; 488 inode->i_uid = inode->i_gid = 0; 489 inode->i_blksize = PAGE_CACHE_SIZE; 490 inode->i_blocks = 0; 491 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 492 switch(mode & S_IFMT) { 493 case S_IFDIR: 494 inode->i_fop = &simple_dir_operations; 495 inode->i_op = &simple_dir_inode_operations; 496 inode->i_nlink++; 497 default: 498 break; 499 } 500 return inode; 501 } 502 503 /* 504 * FIXME: This probably has races. 505 */ 506 static void 507 rpc_depopulate(struct dentry *parent) 508 { 509 struct inode *dir = parent->d_inode; 510 struct list_head *pos, *next; 511 struct dentry *dentry, *dvec[10]; 512 int n = 0; 513 514 mutex_lock(&dir->i_mutex); 515 repeat: 516 spin_lock(&dcache_lock); 517 list_for_each_safe(pos, next, &parent->d_subdirs) { 518 dentry = list_entry(pos, struct dentry, d_u.d_child); 519 spin_lock(&dentry->d_lock); 520 if (!d_unhashed(dentry)) { 521 dget_locked(dentry); 522 __d_drop(dentry); 523 spin_unlock(&dentry->d_lock); 524 dvec[n++] = dentry; 525 if (n == ARRAY_SIZE(dvec)) 526 break; 527 } else 528 spin_unlock(&dentry->d_lock); 529 } 530 spin_unlock(&dcache_lock); 531 if (n) { 532 do { 533 dentry = dvec[--n]; 534 if (dentry->d_inode) { 535 rpc_close_pipes(dentry->d_inode); 536 simple_unlink(dir, dentry); 537 } 538 dput(dentry); 539 } while (n); 540 goto repeat; 541 } 542 mutex_unlock(&dir->i_mutex); 543 } 544 545 static int 546 rpc_populate(struct dentry *parent, 547 struct rpc_filelist *files, 548 int start, int eof) 549 { 550 struct inode *inode, *dir = parent->d_inode; 551 void *private = RPC_I(dir)->private; 552 struct dentry *dentry; 553 int mode, i; 554 555 mutex_lock(&dir->i_mutex); 556 for (i = start; i < eof; i++) { 557 dentry = d_alloc_name(parent, files[i].name); 558 if (!dentry) 559 goto out_bad; 560 mode = files[i].mode; 561 inode = rpc_get_inode(dir->i_sb, mode); 562 if (!inode) { 563 dput(dentry); 564 goto out_bad; 565 } 566 inode->i_ino = i; 567 if (files[i].i_fop) 568 inode->i_fop = files[i].i_fop; 569 if (private) 570 rpc_inode_setowner(inode, private); 571 if (S_ISDIR(mode)) 572 dir->i_nlink++; 573 d_add(dentry, inode); 574 } 575 mutex_unlock(&dir->i_mutex); 576 return 0; 577 out_bad: 578 mutex_unlock(&dir->i_mutex); 579 printk(KERN_WARNING "%s: %s failed to populate directory %s\n", 580 __FILE__, __FUNCTION__, parent->d_name.name); 581 return -ENOMEM; 582 } 583 584 static int 585 __rpc_mkdir(struct inode *dir, struct dentry *dentry) 586 { 587 struct inode *inode; 588 589 inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR); 590 if (!inode) 591 goto out_err; 592 inode->i_ino = iunique(dir->i_sb, 100); 593 d_instantiate(dentry, inode); 594 dir->i_nlink++; 595 inode_dir_notify(dir, DN_CREATE); 596 rpc_get_mount(); 597 return 0; 598 out_err: 599 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", 600 __FILE__, __FUNCTION__, dentry->d_name.name); 601 return -ENOMEM; 602 } 603 604 static int 605 __rpc_rmdir(struct inode *dir, struct dentry *dentry) 606 { 607 int error; 608 609 shrink_dcache_parent(dentry); 610 if (dentry->d_inode) 611 rpc_close_pipes(dentry->d_inode); 612 if ((error = simple_rmdir(dir, dentry)) != 0) 613 return error; 614 if (!error) { 615 inode_dir_notify(dir, DN_DELETE); 616 d_drop(dentry); 617 rpc_put_mount(); 618 } 619 return 0; 620 } 621 622 static struct dentry * 623 rpc_lookup_negative(char *path, struct nameidata *nd) 624 { 625 struct dentry *dentry; 626 struct inode *dir; 627 int error; 628 629 if ((error = rpc_lookup_parent(path, nd)) != 0) 630 return ERR_PTR(error); 631 dir = nd->dentry->d_inode; 632 mutex_lock(&dir->i_mutex); 633 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len); 634 if (IS_ERR(dentry)) 635 goto out_err; 636 if (dentry->d_inode) { 637 dput(dentry); 638 dentry = ERR_PTR(-EEXIST); 639 goto out_err; 640 } 641 return dentry; 642 out_err: 643 mutex_unlock(&dir->i_mutex); 644 rpc_release_path(nd); 645 return dentry; 646 } 647 648 649 struct dentry * 650 rpc_mkdir(char *path, struct rpc_clnt *rpc_client) 651 { 652 struct nameidata nd; 653 struct dentry *dentry; 654 struct inode *dir; 655 int error; 656 657 dentry = rpc_lookup_negative(path, &nd); 658 if (IS_ERR(dentry)) 659 return dentry; 660 dir = nd.dentry->d_inode; 661 if ((error = __rpc_mkdir(dir, dentry)) != 0) 662 goto err_dput; 663 RPC_I(dentry->d_inode)->private = rpc_client; 664 error = rpc_populate(dentry, authfiles, 665 RPCAUTH_info, RPCAUTH_EOF); 666 if (error) 667 goto err_depopulate; 668 out: 669 mutex_unlock(&dir->i_mutex); 670 rpc_release_path(&nd); 671 return dentry; 672 err_depopulate: 673 rpc_depopulate(dentry); 674 __rpc_rmdir(dir, dentry); 675 err_dput: 676 dput(dentry); 677 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n", 678 __FILE__, __FUNCTION__, path, error); 679 dentry = ERR_PTR(error); 680 goto out; 681 } 682 683 int 684 rpc_rmdir(char *path) 685 { 686 struct nameidata nd; 687 struct dentry *dentry; 688 struct inode *dir; 689 int error; 690 691 if ((error = rpc_lookup_parent(path, &nd)) != 0) 692 return error; 693 dir = nd.dentry->d_inode; 694 mutex_lock(&dir->i_mutex); 695 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 696 if (IS_ERR(dentry)) { 697 error = PTR_ERR(dentry); 698 goto out_release; 699 } 700 rpc_depopulate(dentry); 701 error = __rpc_rmdir(dir, dentry); 702 dput(dentry); 703 out_release: 704 mutex_unlock(&dir->i_mutex); 705 rpc_release_path(&nd); 706 return error; 707 } 708 709 struct dentry * 710 rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) 711 { 712 struct nameidata nd; 713 struct dentry *dentry; 714 struct inode *dir, *inode; 715 struct rpc_inode *rpci; 716 717 dentry = rpc_lookup_negative(path, &nd); 718 if (IS_ERR(dentry)) 719 return dentry; 720 dir = nd.dentry->d_inode; 721 inode = rpc_get_inode(dir->i_sb, S_IFSOCK | S_IRUSR | S_IWUSR); 722 if (!inode) 723 goto err_dput; 724 inode->i_ino = iunique(dir->i_sb, 100); 725 inode->i_fop = &rpc_pipe_fops; 726 d_instantiate(dentry, inode); 727 rpci = RPC_I(inode); 728 rpci->private = private; 729 rpci->flags = flags; 730 rpci->ops = ops; 731 inode_dir_notify(dir, DN_CREATE); 732 out: 733 mutex_unlock(&dir->i_mutex); 734 rpc_release_path(&nd); 735 return dentry; 736 err_dput: 737 dput(dentry); 738 dentry = ERR_PTR(-ENOMEM); 739 printk(KERN_WARNING "%s: %s() failed to create pipe %s (errno = %d)\n", 740 __FILE__, __FUNCTION__, path, -ENOMEM); 741 goto out; 742 } 743 744 int 745 rpc_unlink(char *path) 746 { 747 struct nameidata nd; 748 struct dentry *dentry; 749 struct inode *dir; 750 int error; 751 752 if ((error = rpc_lookup_parent(path, &nd)) != 0) 753 return error; 754 dir = nd.dentry->d_inode; 755 mutex_lock(&dir->i_mutex); 756 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 757 if (IS_ERR(dentry)) { 758 error = PTR_ERR(dentry); 759 goto out_release; 760 } 761 d_drop(dentry); 762 if (dentry->d_inode) { 763 rpc_close_pipes(dentry->d_inode); 764 error = simple_unlink(dir, dentry); 765 } 766 dput(dentry); 767 inode_dir_notify(dir, DN_DELETE); 768 out_release: 769 mutex_unlock(&dir->i_mutex); 770 rpc_release_path(&nd); 771 return error; 772 } 773 774 /* 775 * populate the filesystem 776 */ 777 static struct super_operations s_ops = { 778 .alloc_inode = rpc_alloc_inode, 779 .destroy_inode = rpc_destroy_inode, 780 .statfs = simple_statfs, 781 }; 782 783 #define RPCAUTH_GSSMAGIC 0x67596969 784 785 static int 786 rpc_fill_super(struct super_block *sb, void *data, int silent) 787 { 788 struct inode *inode; 789 struct dentry *root; 790 791 sb->s_blocksize = PAGE_CACHE_SIZE; 792 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 793 sb->s_magic = RPCAUTH_GSSMAGIC; 794 sb->s_op = &s_ops; 795 sb->s_time_gran = 1; 796 797 inode = rpc_get_inode(sb, S_IFDIR | 0755); 798 if (!inode) 799 return -ENOMEM; 800 root = d_alloc_root(inode); 801 if (!root) { 802 iput(inode); 803 return -ENOMEM; 804 } 805 if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF)) 806 goto out; 807 sb->s_root = root; 808 return 0; 809 out: 810 d_genocide(root); 811 dput(root); 812 return -ENOMEM; 813 } 814 815 static struct super_block * 816 rpc_get_sb(struct file_system_type *fs_type, 817 int flags, const char *dev_name, void *data) 818 { 819 return get_sb_single(fs_type, flags, data, rpc_fill_super); 820 } 821 822 static struct file_system_type rpc_pipe_fs_type = { 823 .owner = THIS_MODULE, 824 .name = "rpc_pipefs", 825 .get_sb = rpc_get_sb, 826 .kill_sb = kill_litter_super, 827 }; 828 829 static void 830 init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 831 { 832 struct rpc_inode *rpci = (struct rpc_inode *) foo; 833 834 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 835 SLAB_CTOR_CONSTRUCTOR) { 836 inode_init_once(&rpci->vfs_inode); 837 rpci->private = NULL; 838 rpci->nreaders = 0; 839 rpci->nwriters = 0; 840 INIT_LIST_HEAD(&rpci->in_upcall); 841 INIT_LIST_HEAD(&rpci->pipe); 842 rpci->pipelen = 0; 843 init_waitqueue_head(&rpci->waitq); 844 INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); 845 rpci->ops = NULL; 846 } 847 } 848 849 int register_rpc_pipefs(void) 850 { 851 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", 852 sizeof(struct rpc_inode), 853 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, 854 init_once, NULL); 855 if (!rpc_inode_cachep) 856 return -ENOMEM; 857 register_filesystem(&rpc_pipe_fs_type); 858 return 0; 859 } 860 861 void unregister_rpc_pipefs(void) 862 { 863 if (kmem_cache_destroy(rpc_inode_cachep)) 864 printk(KERN_WARNING "RPC: unable to free inode cache\n"); 865 unregister_filesystem(&rpc_pipe_fs_type); 866 } 867