xref: /linux/net/sunrpc/rpc_pipe.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * net/sunrpc/rpc_pipe.c
3  *
4  * Userland/kernel interface for rpcauth_gss.
5  * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
6  * and fs/sysfs/inode.c
7  *
8  * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/pagemap.h>
15 #include <linux/mount.h>
16 #include <linux/namei.h>
17 #include <linux/dnotify.h>
18 #include <linux/kernel.h>
19 
20 #include <asm/ioctls.h>
21 #include <linux/fs.h>
22 #include <linux/poll.h>
23 #include <linux/wait.h>
24 #include <linux/seq_file.h>
25 
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sunrpc/rpc_pipe_fs.h>
29 
30 static struct vfsmount *rpc_mount __read_mostly;
31 static int rpc_mount_count;
32 
33 static struct file_system_type rpc_pipe_fs_type;
34 
35 
36 static kmem_cache_t *rpc_inode_cachep __read_mostly;
37 
38 #define RPC_UPCALL_TIMEOUT (30*HZ)
39 
40 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
41 		void (*destroy_msg)(struct rpc_pipe_msg *), int err)
42 {
43 	struct rpc_pipe_msg *msg;
44 
45 	if (list_empty(head))
46 		return;
47 	do {
48 		msg = list_entry(head->next, struct rpc_pipe_msg, list);
49 		list_del(&msg->list);
50 		msg->errno = err;
51 		destroy_msg(msg);
52 	} while (!list_empty(head));
53 	wake_up(&rpci->waitq);
54 }
55 
56 static void
57 rpc_timeout_upcall_queue(void *data)
58 {
59 	LIST_HEAD(free_list);
60 	struct rpc_inode *rpci = (struct rpc_inode *)data;
61 	struct inode *inode = &rpci->vfs_inode;
62 	void (*destroy_msg)(struct rpc_pipe_msg *);
63 
64 	spin_lock(&inode->i_lock);
65 	if (rpci->ops == NULL) {
66 		spin_unlock(&inode->i_lock);
67 		return;
68 	}
69 	destroy_msg = rpci->ops->destroy_msg;
70 	if (rpci->nreaders == 0) {
71 		list_splice_init(&rpci->pipe, &free_list);
72 		rpci->pipelen = 0;
73 	}
74 	spin_unlock(&inode->i_lock);
75 	rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
76 }
77 
78 int
79 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
80 {
81 	struct rpc_inode *rpci = RPC_I(inode);
82 	int res = -EPIPE;
83 
84 	spin_lock(&inode->i_lock);
85 	if (rpci->ops == NULL)
86 		goto out;
87 	if (rpci->nreaders) {
88 		list_add_tail(&msg->list, &rpci->pipe);
89 		rpci->pipelen += msg->len;
90 		res = 0;
91 	} else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
92 		if (list_empty(&rpci->pipe))
93 			queue_delayed_work(rpciod_workqueue,
94 					&rpci->queue_timeout,
95 					RPC_UPCALL_TIMEOUT);
96 		list_add_tail(&msg->list, &rpci->pipe);
97 		rpci->pipelen += msg->len;
98 		res = 0;
99 	}
100 out:
101 	spin_unlock(&inode->i_lock);
102 	wake_up(&rpci->waitq);
103 	return res;
104 }
105 
106 static inline void
107 rpc_inode_setowner(struct inode *inode, void *private)
108 {
109 	RPC_I(inode)->private = private;
110 }
111 
112 static void
113 rpc_close_pipes(struct inode *inode)
114 {
115 	struct rpc_inode *rpci = RPC_I(inode);
116 	struct rpc_pipe_ops *ops;
117 
118 	mutex_lock(&inode->i_mutex);
119 	ops = rpci->ops;
120 	if (ops != NULL) {
121 		LIST_HEAD(free_list);
122 
123 		spin_lock(&inode->i_lock);
124 		rpci->nreaders = 0;
125 		list_splice_init(&rpci->in_upcall, &free_list);
126 		list_splice_init(&rpci->pipe, &free_list);
127 		rpci->pipelen = 0;
128 		rpci->ops = NULL;
129 		spin_unlock(&inode->i_lock);
130 		rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
131 		rpci->nwriters = 0;
132 		if (ops->release_pipe)
133 			ops->release_pipe(inode);
134 		cancel_delayed_work(&rpci->queue_timeout);
135 		flush_workqueue(rpciod_workqueue);
136 	}
137 	rpc_inode_setowner(inode, NULL);
138 	mutex_unlock(&inode->i_mutex);
139 }
140 
141 static struct inode *
142 rpc_alloc_inode(struct super_block *sb)
143 {
144 	struct rpc_inode *rpci;
145 	rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL);
146 	if (!rpci)
147 		return NULL;
148 	return &rpci->vfs_inode;
149 }
150 
151 static void
152 rpc_destroy_inode(struct inode *inode)
153 {
154 	kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
155 }
156 
157 static int
158 rpc_pipe_open(struct inode *inode, struct file *filp)
159 {
160 	struct rpc_inode *rpci = RPC_I(inode);
161 	int res = -ENXIO;
162 
163 	mutex_lock(&inode->i_mutex);
164 	if (rpci->ops != NULL) {
165 		if (filp->f_mode & FMODE_READ)
166 			rpci->nreaders ++;
167 		if (filp->f_mode & FMODE_WRITE)
168 			rpci->nwriters ++;
169 		res = 0;
170 	}
171 	mutex_unlock(&inode->i_mutex);
172 	return res;
173 }
174 
175 static int
176 rpc_pipe_release(struct inode *inode, struct file *filp)
177 {
178 	struct rpc_inode *rpci = RPC_I(inode);
179 	struct rpc_pipe_msg *msg;
180 
181 	mutex_lock(&inode->i_mutex);
182 	if (rpci->ops == NULL)
183 		goto out;
184 	msg = (struct rpc_pipe_msg *)filp->private_data;
185 	if (msg != NULL) {
186 		spin_lock(&inode->i_lock);
187 		msg->errno = -EAGAIN;
188 		list_del(&msg->list);
189 		spin_unlock(&inode->i_lock);
190 		rpci->ops->destroy_msg(msg);
191 	}
192 	if (filp->f_mode & FMODE_WRITE)
193 		rpci->nwriters --;
194 	if (filp->f_mode & FMODE_READ) {
195 		rpci->nreaders --;
196 		if (rpci->nreaders == 0) {
197 			LIST_HEAD(free_list);
198 			spin_lock(&inode->i_lock);
199 			list_splice_init(&rpci->pipe, &free_list);
200 			rpci->pipelen = 0;
201 			spin_unlock(&inode->i_lock);
202 			rpc_purge_list(rpci, &free_list,
203 					rpci->ops->destroy_msg, -EAGAIN);
204 		}
205 	}
206 	if (rpci->ops->release_pipe)
207 		rpci->ops->release_pipe(inode);
208 out:
209 	mutex_unlock(&inode->i_mutex);
210 	return 0;
211 }
212 
213 static ssize_t
214 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
215 {
216 	struct inode *inode = filp->f_dentry->d_inode;
217 	struct rpc_inode *rpci = RPC_I(inode);
218 	struct rpc_pipe_msg *msg;
219 	int res = 0;
220 
221 	mutex_lock(&inode->i_mutex);
222 	if (rpci->ops == NULL) {
223 		res = -EPIPE;
224 		goto out_unlock;
225 	}
226 	msg = filp->private_data;
227 	if (msg == NULL) {
228 		spin_lock(&inode->i_lock);
229 		if (!list_empty(&rpci->pipe)) {
230 			msg = list_entry(rpci->pipe.next,
231 					struct rpc_pipe_msg,
232 					list);
233 			list_move(&msg->list, &rpci->in_upcall);
234 			rpci->pipelen -= msg->len;
235 			filp->private_data = msg;
236 			msg->copied = 0;
237 		}
238 		spin_unlock(&inode->i_lock);
239 		if (msg == NULL)
240 			goto out_unlock;
241 	}
242 	/* NOTE: it is up to the callback to update msg->copied */
243 	res = rpci->ops->upcall(filp, msg, buf, len);
244 	if (res < 0 || msg->len == msg->copied) {
245 		filp->private_data = NULL;
246 		spin_lock(&inode->i_lock);
247 		list_del(&msg->list);
248 		spin_unlock(&inode->i_lock);
249 		rpci->ops->destroy_msg(msg);
250 	}
251 out_unlock:
252 	mutex_unlock(&inode->i_mutex);
253 	return res;
254 }
255 
256 static ssize_t
257 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
258 {
259 	struct inode *inode = filp->f_dentry->d_inode;
260 	struct rpc_inode *rpci = RPC_I(inode);
261 	int res;
262 
263 	mutex_lock(&inode->i_mutex);
264 	res = -EPIPE;
265 	if (rpci->ops != NULL)
266 		res = rpci->ops->downcall(filp, buf, len);
267 	mutex_unlock(&inode->i_mutex);
268 	return res;
269 }
270 
271 static unsigned int
272 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
273 {
274 	struct rpc_inode *rpci;
275 	unsigned int mask = 0;
276 
277 	rpci = RPC_I(filp->f_dentry->d_inode);
278 	poll_wait(filp, &rpci->waitq, wait);
279 
280 	mask = POLLOUT | POLLWRNORM;
281 	if (rpci->ops == NULL)
282 		mask |= POLLERR | POLLHUP;
283 	if (!list_empty(&rpci->pipe))
284 		mask |= POLLIN | POLLRDNORM;
285 	return mask;
286 }
287 
288 static int
289 rpc_pipe_ioctl(struct inode *ino, struct file *filp,
290 		unsigned int cmd, unsigned long arg)
291 {
292 	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
293 	int len;
294 
295 	switch (cmd) {
296 	case FIONREAD:
297 		if (rpci->ops == NULL)
298 			return -EPIPE;
299 		len = rpci->pipelen;
300 		if (filp->private_data) {
301 			struct rpc_pipe_msg *msg;
302 			msg = (struct rpc_pipe_msg *)filp->private_data;
303 			len += msg->len - msg->copied;
304 		}
305 		return put_user(len, (int __user *)arg);
306 	default:
307 		return -EINVAL;
308 	}
309 }
310 
311 static struct file_operations rpc_pipe_fops = {
312 	.owner		= THIS_MODULE,
313 	.llseek		= no_llseek,
314 	.read		= rpc_pipe_read,
315 	.write		= rpc_pipe_write,
316 	.poll		= rpc_pipe_poll,
317 	.ioctl		= rpc_pipe_ioctl,
318 	.open		= rpc_pipe_open,
319 	.release	= rpc_pipe_release,
320 };
321 
322 static int
323 rpc_show_info(struct seq_file *m, void *v)
324 {
325 	struct rpc_clnt *clnt = m->private;
326 
327 	seq_printf(m, "RPC server: %s\n", clnt->cl_server);
328 	seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
329 			clnt->cl_prog, clnt->cl_vers);
330 	seq_printf(m, "address: %u.%u.%u.%u\n",
331 			NIPQUAD(clnt->cl_xprt->addr.sin_addr.s_addr));
332 	seq_printf(m, "protocol: %s\n",
333 			clnt->cl_xprt->prot == IPPROTO_UDP ? "udp" : "tcp");
334 	return 0;
335 }
336 
337 static int
338 rpc_info_open(struct inode *inode, struct file *file)
339 {
340 	struct rpc_clnt *clnt;
341 	int ret = single_open(file, rpc_show_info, NULL);
342 
343 	if (!ret) {
344 		struct seq_file *m = file->private_data;
345 		mutex_lock(&inode->i_mutex);
346 		clnt = RPC_I(inode)->private;
347 		if (clnt) {
348 			atomic_inc(&clnt->cl_users);
349 			m->private = clnt;
350 		} else {
351 			single_release(inode, file);
352 			ret = -EINVAL;
353 		}
354 		mutex_unlock(&inode->i_mutex);
355 	}
356 	return ret;
357 }
358 
359 static int
360 rpc_info_release(struct inode *inode, struct file *file)
361 {
362 	struct seq_file *m = file->private_data;
363 	struct rpc_clnt *clnt = (struct rpc_clnt *)m->private;
364 
365 	if (clnt)
366 		rpc_release_client(clnt);
367 	return single_release(inode, file);
368 }
369 
370 static struct file_operations rpc_info_operations = {
371 	.owner		= THIS_MODULE,
372 	.open		= rpc_info_open,
373 	.read		= seq_read,
374 	.llseek		= seq_lseek,
375 	.release	= rpc_info_release,
376 };
377 
378 
379 /*
380  * We have a single directory with 1 node in it.
381  */
382 enum {
383 	RPCAUTH_Root = 1,
384 	RPCAUTH_lockd,
385 	RPCAUTH_mount,
386 	RPCAUTH_nfs,
387 	RPCAUTH_portmap,
388 	RPCAUTH_statd,
389 	RPCAUTH_RootEOF
390 };
391 
392 /*
393  * Description of fs contents.
394  */
395 struct rpc_filelist {
396 	char *name;
397 	const struct file_operations *i_fop;
398 	int mode;
399 };
400 
401 static struct rpc_filelist files[] = {
402 	[RPCAUTH_lockd] = {
403 		.name = "lockd",
404 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
405 	},
406 	[RPCAUTH_mount] = {
407 		.name = "mount",
408 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
409 	},
410 	[RPCAUTH_nfs] = {
411 		.name = "nfs",
412 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
413 	},
414 	[RPCAUTH_portmap] = {
415 		.name = "portmap",
416 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
417 	},
418 	[RPCAUTH_statd] = {
419 		.name = "statd",
420 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
421 	},
422 };
423 
424 enum {
425 	RPCAUTH_info = 2,
426 	RPCAUTH_EOF
427 };
428 
429 static struct rpc_filelist authfiles[] = {
430 	[RPCAUTH_info] = {
431 		.name = "info",
432 		.i_fop = &rpc_info_operations,
433 		.mode = S_IFREG | S_IRUSR,
434 	},
435 };
436 
437 struct vfsmount *rpc_get_mount(void)
438 {
439 	int err;
440 
441 	err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
442 	if (err != 0)
443 		return ERR_PTR(err);
444 	return rpc_mount;
445 }
446 
447 void rpc_put_mount(void)
448 {
449 	simple_release_fs(&rpc_mount, &rpc_mount_count);
450 }
451 
452 static int
453 rpc_lookup_parent(char *path, struct nameidata *nd)
454 {
455 	if (path[0] == '\0')
456 		return -ENOENT;
457 	nd->mnt = rpc_get_mount();
458 	if (IS_ERR(nd->mnt)) {
459 		printk(KERN_WARNING "%s: %s failed to mount "
460 			       "pseudofilesystem \n", __FILE__, __FUNCTION__);
461 		return PTR_ERR(nd->mnt);
462 	}
463 	mntget(nd->mnt);
464 	nd->dentry = dget(rpc_mount->mnt_root);
465 	nd->last_type = LAST_ROOT;
466 	nd->flags = LOOKUP_PARENT;
467 	nd->depth = 0;
468 
469 	if (path_walk(path, nd)) {
470 		printk(KERN_WARNING "%s: %s failed to find path %s\n",
471 				__FILE__, __FUNCTION__, path);
472 		rpc_put_mount();
473 		return -ENOENT;
474 	}
475 	return 0;
476 }
477 
478 static void
479 rpc_release_path(struct nameidata *nd)
480 {
481 	path_release(nd);
482 	rpc_put_mount();
483 }
484 
485 static struct inode *
486 rpc_get_inode(struct super_block *sb, int mode)
487 {
488 	struct inode *inode = new_inode(sb);
489 	if (!inode)
490 		return NULL;
491 	inode->i_mode = mode;
492 	inode->i_uid = inode->i_gid = 0;
493 	inode->i_blksize = PAGE_CACHE_SIZE;
494 	inode->i_blocks = 0;
495 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
496 	switch(mode & S_IFMT) {
497 		case S_IFDIR:
498 			inode->i_fop = &simple_dir_operations;
499 			inode->i_op = &simple_dir_inode_operations;
500 			inode->i_nlink++;
501 		default:
502 			break;
503 	}
504 	return inode;
505 }
506 
507 /*
508  * FIXME: This probably has races.
509  */
510 static void
511 rpc_depopulate(struct dentry *parent)
512 {
513 	struct inode *dir = parent->d_inode;
514 	struct list_head *pos, *next;
515 	struct dentry *dentry, *dvec[10];
516 	int n = 0;
517 
518 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
519 repeat:
520 	spin_lock(&dcache_lock);
521 	list_for_each_safe(pos, next, &parent->d_subdirs) {
522 		dentry = list_entry(pos, struct dentry, d_u.d_child);
523 		spin_lock(&dentry->d_lock);
524 		if (!d_unhashed(dentry)) {
525 			dget_locked(dentry);
526 			__d_drop(dentry);
527 			spin_unlock(&dentry->d_lock);
528 			dvec[n++] = dentry;
529 			if (n == ARRAY_SIZE(dvec))
530 				break;
531 		} else
532 			spin_unlock(&dentry->d_lock);
533 	}
534 	spin_unlock(&dcache_lock);
535 	if (n) {
536 		do {
537 			dentry = dvec[--n];
538 			if (dentry->d_inode) {
539 				rpc_close_pipes(dentry->d_inode);
540 				simple_unlink(dir, dentry);
541 			}
542 			dput(dentry);
543 		} while (n);
544 		goto repeat;
545 	}
546 	mutex_unlock(&dir->i_mutex);
547 }
548 
549 static int
550 rpc_populate(struct dentry *parent,
551 		struct rpc_filelist *files,
552 		int start, int eof)
553 {
554 	struct inode *inode, *dir = parent->d_inode;
555 	void *private = RPC_I(dir)->private;
556 	struct dentry *dentry;
557 	int mode, i;
558 
559 	mutex_lock(&dir->i_mutex);
560 	for (i = start; i < eof; i++) {
561 		dentry = d_alloc_name(parent, files[i].name);
562 		if (!dentry)
563 			goto out_bad;
564 		mode = files[i].mode;
565 		inode = rpc_get_inode(dir->i_sb, mode);
566 		if (!inode) {
567 			dput(dentry);
568 			goto out_bad;
569 		}
570 		inode->i_ino = i;
571 		if (files[i].i_fop)
572 			inode->i_fop = files[i].i_fop;
573 		if (private)
574 			rpc_inode_setowner(inode, private);
575 		if (S_ISDIR(mode))
576 			dir->i_nlink++;
577 		d_add(dentry, inode);
578 	}
579 	mutex_unlock(&dir->i_mutex);
580 	return 0;
581 out_bad:
582 	mutex_unlock(&dir->i_mutex);
583 	printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
584 			__FILE__, __FUNCTION__, parent->d_name.name);
585 	return -ENOMEM;
586 }
587 
588 static int
589 __rpc_mkdir(struct inode *dir, struct dentry *dentry)
590 {
591 	struct inode *inode;
592 
593 	inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR);
594 	if (!inode)
595 		goto out_err;
596 	inode->i_ino = iunique(dir->i_sb, 100);
597 	d_instantiate(dentry, inode);
598 	dir->i_nlink++;
599 	inode_dir_notify(dir, DN_CREATE);
600 	return 0;
601 out_err:
602 	printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
603 			__FILE__, __FUNCTION__, dentry->d_name.name);
604 	return -ENOMEM;
605 }
606 
607 static int
608 __rpc_rmdir(struct inode *dir, struct dentry *dentry)
609 {
610 	int error;
611 
612 	shrink_dcache_parent(dentry);
613 	if (dentry->d_inode)
614 		rpc_close_pipes(dentry->d_inode);
615 	if ((error = simple_rmdir(dir, dentry)) != 0)
616 		return error;
617 	if (!error) {
618 		inode_dir_notify(dir, DN_DELETE);
619 		d_drop(dentry);
620 	}
621 	return 0;
622 }
623 
624 static struct dentry *
625 rpc_lookup_negative(char *path, struct nameidata *nd)
626 {
627 	struct dentry *dentry;
628 	struct inode *dir;
629 	int error;
630 
631 	if ((error = rpc_lookup_parent(path, nd)) != 0)
632 		return ERR_PTR(error);
633 	dir = nd->dentry->d_inode;
634 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
635 	dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len);
636 	if (IS_ERR(dentry))
637 		goto out_err;
638 	if (dentry->d_inode) {
639 		dput(dentry);
640 		dentry = ERR_PTR(-EEXIST);
641 		goto out_err;
642 	}
643 	return dentry;
644 out_err:
645 	mutex_unlock(&dir->i_mutex);
646 	rpc_release_path(nd);
647 	return dentry;
648 }
649 
650 
651 struct dentry *
652 rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
653 {
654 	struct nameidata nd;
655 	struct dentry *dentry;
656 	struct inode *dir;
657 	int error;
658 
659 	dentry = rpc_lookup_negative(path, &nd);
660 	if (IS_ERR(dentry))
661 		return dentry;
662 	dir = nd.dentry->d_inode;
663 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
664 		goto err_dput;
665 	RPC_I(dentry->d_inode)->private = rpc_client;
666 	error = rpc_populate(dentry, authfiles,
667 			RPCAUTH_info, RPCAUTH_EOF);
668 	if (error)
669 		goto err_depopulate;
670 out:
671 	mutex_unlock(&dir->i_mutex);
672 	rpc_release_path(&nd);
673 	return dget(dentry);
674 err_depopulate:
675 	rpc_depopulate(dentry);
676 	__rpc_rmdir(dir, dentry);
677 err_dput:
678 	dput(dentry);
679 	printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
680 			__FILE__, __FUNCTION__, path, error);
681 	dentry = ERR_PTR(error);
682 	goto out;
683 }
684 
685 int
686 rpc_rmdir(char *path)
687 {
688 	struct nameidata nd;
689 	struct dentry *dentry;
690 	struct inode *dir;
691 	int error;
692 
693 	if ((error = rpc_lookup_parent(path, &nd)) != 0)
694 		return error;
695 	dir = nd.dentry->d_inode;
696 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
697 	dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
698 	if (IS_ERR(dentry)) {
699 		error = PTR_ERR(dentry);
700 		goto out_release;
701 	}
702 	rpc_depopulate(dentry);
703 	error = __rpc_rmdir(dir, dentry);
704 	dput(dentry);
705 out_release:
706 	mutex_unlock(&dir->i_mutex);
707 	rpc_release_path(&nd);
708 	return error;
709 }
710 
711 struct dentry *
712 rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
713 {
714 	struct nameidata nd;
715 	struct dentry *dentry;
716 	struct inode *dir, *inode;
717 	struct rpc_inode *rpci;
718 
719 	dentry = rpc_lookup_negative(path, &nd);
720 	if (IS_ERR(dentry))
721 		return dentry;
722 	dir = nd.dentry->d_inode;
723 	inode = rpc_get_inode(dir->i_sb, S_IFSOCK | S_IRUSR | S_IWUSR);
724 	if (!inode)
725 		goto err_dput;
726 	inode->i_ino = iunique(dir->i_sb, 100);
727 	inode->i_fop = &rpc_pipe_fops;
728 	d_instantiate(dentry, inode);
729 	rpci = RPC_I(inode);
730 	rpci->private = private;
731 	rpci->flags = flags;
732 	rpci->ops = ops;
733 	inode_dir_notify(dir, DN_CREATE);
734 out:
735 	mutex_unlock(&dir->i_mutex);
736 	rpc_release_path(&nd);
737 	return dget(dentry);
738 err_dput:
739 	dput(dentry);
740 	dentry = ERR_PTR(-ENOMEM);
741 	printk(KERN_WARNING "%s: %s() failed to create pipe %s (errno = %d)\n",
742 			__FILE__, __FUNCTION__, path, -ENOMEM);
743 	goto out;
744 }
745 
746 int
747 rpc_unlink(char *path)
748 {
749 	struct nameidata nd;
750 	struct dentry *dentry;
751 	struct inode *dir;
752 	int error;
753 
754 	if ((error = rpc_lookup_parent(path, &nd)) != 0)
755 		return error;
756 	dir = nd.dentry->d_inode;
757 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
758 	dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
759 	if (IS_ERR(dentry)) {
760 		error = PTR_ERR(dentry);
761 		goto out_release;
762 	}
763 	d_drop(dentry);
764 	if (dentry->d_inode) {
765 		rpc_close_pipes(dentry->d_inode);
766 		error = simple_unlink(dir, dentry);
767 	}
768 	dput(dentry);
769 	inode_dir_notify(dir, DN_DELETE);
770 out_release:
771 	mutex_unlock(&dir->i_mutex);
772 	rpc_release_path(&nd);
773 	return error;
774 }
775 
776 /*
777  * populate the filesystem
778  */
779 static struct super_operations s_ops = {
780 	.alloc_inode	= rpc_alloc_inode,
781 	.destroy_inode	= rpc_destroy_inode,
782 	.statfs		= simple_statfs,
783 };
784 
785 #define RPCAUTH_GSSMAGIC 0x67596969
786 
787 static int
788 rpc_fill_super(struct super_block *sb, void *data, int silent)
789 {
790 	struct inode *inode;
791 	struct dentry *root;
792 
793 	sb->s_blocksize = PAGE_CACHE_SIZE;
794 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
795 	sb->s_magic = RPCAUTH_GSSMAGIC;
796 	sb->s_op = &s_ops;
797 	sb->s_time_gran = 1;
798 
799 	inode = rpc_get_inode(sb, S_IFDIR | 0755);
800 	if (!inode)
801 		return -ENOMEM;
802 	root = d_alloc_root(inode);
803 	if (!root) {
804 		iput(inode);
805 		return -ENOMEM;
806 	}
807 	if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF))
808 		goto out;
809 	sb->s_root = root;
810 	return 0;
811 out:
812 	d_genocide(root);
813 	dput(root);
814 	return -ENOMEM;
815 }
816 
817 static int
818 rpc_get_sb(struct file_system_type *fs_type,
819 		int flags, const char *dev_name, void *data, struct vfsmount *mnt)
820 {
821 	return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
822 }
823 
824 static struct file_system_type rpc_pipe_fs_type = {
825 	.owner		= THIS_MODULE,
826 	.name		= "rpc_pipefs",
827 	.get_sb		= rpc_get_sb,
828 	.kill_sb	= kill_litter_super,
829 };
830 
831 static void
832 init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
833 {
834 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
835 
836 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
837 	    SLAB_CTOR_CONSTRUCTOR) {
838 		inode_init_once(&rpci->vfs_inode);
839 		rpci->private = NULL;
840 		rpci->nreaders = 0;
841 		rpci->nwriters = 0;
842 		INIT_LIST_HEAD(&rpci->in_upcall);
843 		INIT_LIST_HEAD(&rpci->pipe);
844 		rpci->pipelen = 0;
845 		init_waitqueue_head(&rpci->waitq);
846 		INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
847 		rpci->ops = NULL;
848 	}
849 }
850 
851 int register_rpc_pipefs(void)
852 {
853 	rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
854 				sizeof(struct rpc_inode),
855 				0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
856 						SLAB_MEM_SPREAD),
857 				init_once, NULL);
858 	if (!rpc_inode_cachep)
859 		return -ENOMEM;
860 	register_filesystem(&rpc_pipe_fs_type);
861 	return 0;
862 }
863 
864 void unregister_rpc_pipefs(void)
865 {
866 	if (kmem_cache_destroy(rpc_inode_cachep))
867 		printk(KERN_WARNING "RPC: unable to free inode cache\n");
868 	unregister_filesystem(&rpc_pipe_fs_type);
869 }
870