xref: /linux/net/sunrpc/rpc_pipe.c (revision 8b4a40809e5330c9da5d20107d693d92d73b31dc)
1 /*
2  * net/sunrpc/rpc_pipe.c
3  *
4  * Userland/kernel interface for rpcauth_gss.
5  * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
6  * and fs/sysfs/inode.c
7  *
8  * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/pagemap.h>
15 #include <linux/mount.h>
16 #include <linux/namei.h>
17 #include <linux/dnotify.h>
18 #include <linux/kernel.h>
19 
20 #include <asm/ioctls.h>
21 #include <linux/fs.h>
22 #include <linux/poll.h>
23 #include <linux/wait.h>
24 #include <linux/seq_file.h>
25 
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sunrpc/rpc_pipe_fs.h>
29 
30 static struct vfsmount *rpc_mount __read_mostly;
31 static int rpc_mount_count;
32 
33 static struct file_system_type rpc_pipe_fs_type;
34 
35 
36 static struct kmem_cache *rpc_inode_cachep __read_mostly;
37 
38 #define RPC_UPCALL_TIMEOUT (30*HZ)
39 
40 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
41 		void (*destroy_msg)(struct rpc_pipe_msg *), int err)
42 {
43 	struct rpc_pipe_msg *msg;
44 
45 	if (list_empty(head))
46 		return;
47 	do {
48 		msg = list_entry(head->next, struct rpc_pipe_msg, list);
49 		list_del(&msg->list);
50 		msg->errno = err;
51 		destroy_msg(msg);
52 	} while (!list_empty(head));
53 	wake_up(&rpci->waitq);
54 }
55 
56 static void
57 rpc_timeout_upcall_queue(struct work_struct *work)
58 {
59 	LIST_HEAD(free_list);
60 	struct rpc_inode *rpci =
61 		container_of(work, struct rpc_inode, queue_timeout.work);
62 	struct inode *inode = &rpci->vfs_inode;
63 	void (*destroy_msg)(struct rpc_pipe_msg *);
64 
65 	spin_lock(&inode->i_lock);
66 	if (rpci->ops == NULL) {
67 		spin_unlock(&inode->i_lock);
68 		return;
69 	}
70 	destroy_msg = rpci->ops->destroy_msg;
71 	if (rpci->nreaders == 0) {
72 		list_splice_init(&rpci->pipe, &free_list);
73 		rpci->pipelen = 0;
74 	}
75 	spin_unlock(&inode->i_lock);
76 	rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
77 }
78 
79 int
80 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
81 {
82 	struct rpc_inode *rpci = RPC_I(inode);
83 	int res = -EPIPE;
84 
85 	spin_lock(&inode->i_lock);
86 	if (rpci->ops == NULL)
87 		goto out;
88 	if (rpci->nreaders) {
89 		list_add_tail(&msg->list, &rpci->pipe);
90 		rpci->pipelen += msg->len;
91 		res = 0;
92 	} else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
93 		if (list_empty(&rpci->pipe))
94 			queue_delayed_work(rpciod_workqueue,
95 					&rpci->queue_timeout,
96 					RPC_UPCALL_TIMEOUT);
97 		list_add_tail(&msg->list, &rpci->pipe);
98 		rpci->pipelen += msg->len;
99 		res = 0;
100 	}
101 out:
102 	spin_unlock(&inode->i_lock);
103 	wake_up(&rpci->waitq);
104 	return res;
105 }
106 
107 static inline void
108 rpc_inode_setowner(struct inode *inode, void *private)
109 {
110 	RPC_I(inode)->private = private;
111 }
112 
113 static void
114 rpc_close_pipes(struct inode *inode)
115 {
116 	struct rpc_inode *rpci = RPC_I(inode);
117 	struct rpc_pipe_ops *ops;
118 
119 	mutex_lock(&inode->i_mutex);
120 	ops = rpci->ops;
121 	if (ops != NULL) {
122 		LIST_HEAD(free_list);
123 
124 		spin_lock(&inode->i_lock);
125 		rpci->nreaders = 0;
126 		list_splice_init(&rpci->in_upcall, &free_list);
127 		list_splice_init(&rpci->pipe, &free_list);
128 		rpci->pipelen = 0;
129 		rpci->ops = NULL;
130 		spin_unlock(&inode->i_lock);
131 		rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
132 		rpci->nwriters = 0;
133 		if (ops->release_pipe)
134 			ops->release_pipe(inode);
135 		cancel_delayed_work(&rpci->queue_timeout);
136 		flush_workqueue(rpciod_workqueue);
137 	}
138 	rpc_inode_setowner(inode, NULL);
139 	mutex_unlock(&inode->i_mutex);
140 }
141 
142 static struct inode *
143 rpc_alloc_inode(struct super_block *sb)
144 {
145 	struct rpc_inode *rpci;
146 	rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
147 	if (!rpci)
148 		return NULL;
149 	return &rpci->vfs_inode;
150 }
151 
152 static void
153 rpc_destroy_inode(struct inode *inode)
154 {
155 	kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
156 }
157 
158 static int
159 rpc_pipe_open(struct inode *inode, struct file *filp)
160 {
161 	struct rpc_inode *rpci = RPC_I(inode);
162 	int res = -ENXIO;
163 
164 	mutex_lock(&inode->i_mutex);
165 	if (rpci->ops != NULL) {
166 		if (filp->f_mode & FMODE_READ)
167 			rpci->nreaders ++;
168 		if (filp->f_mode & FMODE_WRITE)
169 			rpci->nwriters ++;
170 		res = 0;
171 	}
172 	mutex_unlock(&inode->i_mutex);
173 	return res;
174 }
175 
176 static int
177 rpc_pipe_release(struct inode *inode, struct file *filp)
178 {
179 	struct rpc_inode *rpci = RPC_I(inode);
180 	struct rpc_pipe_msg *msg;
181 
182 	mutex_lock(&inode->i_mutex);
183 	if (rpci->ops == NULL)
184 		goto out;
185 	msg = (struct rpc_pipe_msg *)filp->private_data;
186 	if (msg != NULL) {
187 		spin_lock(&inode->i_lock);
188 		msg->errno = -EAGAIN;
189 		list_del(&msg->list);
190 		spin_unlock(&inode->i_lock);
191 		rpci->ops->destroy_msg(msg);
192 	}
193 	if (filp->f_mode & FMODE_WRITE)
194 		rpci->nwriters --;
195 	if (filp->f_mode & FMODE_READ) {
196 		rpci->nreaders --;
197 		if (rpci->nreaders == 0) {
198 			LIST_HEAD(free_list);
199 			spin_lock(&inode->i_lock);
200 			list_splice_init(&rpci->pipe, &free_list);
201 			rpci->pipelen = 0;
202 			spin_unlock(&inode->i_lock);
203 			rpc_purge_list(rpci, &free_list,
204 					rpci->ops->destroy_msg, -EAGAIN);
205 		}
206 	}
207 	if (rpci->ops->release_pipe)
208 		rpci->ops->release_pipe(inode);
209 out:
210 	mutex_unlock(&inode->i_mutex);
211 	return 0;
212 }
213 
214 static ssize_t
215 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
216 {
217 	struct inode *inode = filp->f_path.dentry->d_inode;
218 	struct rpc_inode *rpci = RPC_I(inode);
219 	struct rpc_pipe_msg *msg;
220 	int res = 0;
221 
222 	mutex_lock(&inode->i_mutex);
223 	if (rpci->ops == NULL) {
224 		res = -EPIPE;
225 		goto out_unlock;
226 	}
227 	msg = filp->private_data;
228 	if (msg == NULL) {
229 		spin_lock(&inode->i_lock);
230 		if (!list_empty(&rpci->pipe)) {
231 			msg = list_entry(rpci->pipe.next,
232 					struct rpc_pipe_msg,
233 					list);
234 			list_move(&msg->list, &rpci->in_upcall);
235 			rpci->pipelen -= msg->len;
236 			filp->private_data = msg;
237 			msg->copied = 0;
238 		}
239 		spin_unlock(&inode->i_lock);
240 		if (msg == NULL)
241 			goto out_unlock;
242 	}
243 	/* NOTE: it is up to the callback to update msg->copied */
244 	res = rpci->ops->upcall(filp, msg, buf, len);
245 	if (res < 0 || msg->len == msg->copied) {
246 		filp->private_data = NULL;
247 		spin_lock(&inode->i_lock);
248 		list_del(&msg->list);
249 		spin_unlock(&inode->i_lock);
250 		rpci->ops->destroy_msg(msg);
251 	}
252 out_unlock:
253 	mutex_unlock(&inode->i_mutex);
254 	return res;
255 }
256 
257 static ssize_t
258 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
259 {
260 	struct inode *inode = filp->f_path.dentry->d_inode;
261 	struct rpc_inode *rpci = RPC_I(inode);
262 	int res;
263 
264 	mutex_lock(&inode->i_mutex);
265 	res = -EPIPE;
266 	if (rpci->ops != NULL)
267 		res = rpci->ops->downcall(filp, buf, len);
268 	mutex_unlock(&inode->i_mutex);
269 	return res;
270 }
271 
272 static unsigned int
273 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
274 {
275 	struct rpc_inode *rpci;
276 	unsigned int mask = 0;
277 
278 	rpci = RPC_I(filp->f_path.dentry->d_inode);
279 	poll_wait(filp, &rpci->waitq, wait);
280 
281 	mask = POLLOUT | POLLWRNORM;
282 	if (rpci->ops == NULL)
283 		mask |= POLLERR | POLLHUP;
284 	if (!list_empty(&rpci->pipe))
285 		mask |= POLLIN | POLLRDNORM;
286 	return mask;
287 }
288 
289 static int
290 rpc_pipe_ioctl(struct inode *ino, struct file *filp,
291 		unsigned int cmd, unsigned long arg)
292 {
293 	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
294 	int len;
295 
296 	switch (cmd) {
297 	case FIONREAD:
298 		if (rpci->ops == NULL)
299 			return -EPIPE;
300 		len = rpci->pipelen;
301 		if (filp->private_data) {
302 			struct rpc_pipe_msg *msg;
303 			msg = (struct rpc_pipe_msg *)filp->private_data;
304 			len += msg->len - msg->copied;
305 		}
306 		return put_user(len, (int __user *)arg);
307 	default:
308 		return -EINVAL;
309 	}
310 }
311 
312 static const struct file_operations rpc_pipe_fops = {
313 	.owner		= THIS_MODULE,
314 	.llseek		= no_llseek,
315 	.read		= rpc_pipe_read,
316 	.write		= rpc_pipe_write,
317 	.poll		= rpc_pipe_poll,
318 	.ioctl		= rpc_pipe_ioctl,
319 	.open		= rpc_pipe_open,
320 	.release	= rpc_pipe_release,
321 };
322 
323 static int
324 rpc_show_info(struct seq_file *m, void *v)
325 {
326 	struct rpc_clnt *clnt = m->private;
327 
328 	seq_printf(m, "RPC server: %s\n", clnt->cl_server);
329 	seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
330 			clnt->cl_prog, clnt->cl_vers);
331 	seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
332 	seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO));
333 	return 0;
334 }
335 
336 static int
337 rpc_info_open(struct inode *inode, struct file *file)
338 {
339 	struct rpc_clnt *clnt;
340 	int ret = single_open(file, rpc_show_info, NULL);
341 
342 	if (!ret) {
343 		struct seq_file *m = file->private_data;
344 		mutex_lock(&inode->i_mutex);
345 		clnt = RPC_I(inode)->private;
346 		if (clnt) {
347 			kref_get(&clnt->cl_kref);
348 			m->private = clnt;
349 		} else {
350 			single_release(inode, file);
351 			ret = -EINVAL;
352 		}
353 		mutex_unlock(&inode->i_mutex);
354 	}
355 	return ret;
356 }
357 
358 static int
359 rpc_info_release(struct inode *inode, struct file *file)
360 {
361 	struct seq_file *m = file->private_data;
362 	struct rpc_clnt *clnt = (struct rpc_clnt *)m->private;
363 
364 	if (clnt)
365 		rpc_release_client(clnt);
366 	return single_release(inode, file);
367 }
368 
369 static const struct file_operations rpc_info_operations = {
370 	.owner		= THIS_MODULE,
371 	.open		= rpc_info_open,
372 	.read		= seq_read,
373 	.llseek		= seq_lseek,
374 	.release	= rpc_info_release,
375 };
376 
377 
378 /*
379  * We have a single directory with 1 node in it.
380  */
381 enum {
382 	RPCAUTH_Root = 1,
383 	RPCAUTH_lockd,
384 	RPCAUTH_mount,
385 	RPCAUTH_nfs,
386 	RPCAUTH_portmap,
387 	RPCAUTH_statd,
388 	RPCAUTH_RootEOF
389 };
390 
391 /*
392  * Description of fs contents.
393  */
394 struct rpc_filelist {
395 	char *name;
396 	const struct file_operations *i_fop;
397 	int mode;
398 };
399 
400 static struct rpc_filelist files[] = {
401 	[RPCAUTH_lockd] = {
402 		.name = "lockd",
403 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
404 	},
405 	[RPCAUTH_mount] = {
406 		.name = "mount",
407 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
408 	},
409 	[RPCAUTH_nfs] = {
410 		.name = "nfs",
411 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
412 	},
413 	[RPCAUTH_portmap] = {
414 		.name = "portmap",
415 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
416 	},
417 	[RPCAUTH_statd] = {
418 		.name = "statd",
419 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
420 	},
421 };
422 
423 enum {
424 	RPCAUTH_info = 2,
425 	RPCAUTH_EOF
426 };
427 
428 static struct rpc_filelist authfiles[] = {
429 	[RPCAUTH_info] = {
430 		.name = "info",
431 		.i_fop = &rpc_info_operations,
432 		.mode = S_IFREG | S_IRUSR,
433 	},
434 };
435 
436 struct vfsmount *rpc_get_mount(void)
437 {
438 	int err;
439 
440 	err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
441 	if (err != 0)
442 		return ERR_PTR(err);
443 	return rpc_mount;
444 }
445 
446 void rpc_put_mount(void)
447 {
448 	simple_release_fs(&rpc_mount, &rpc_mount_count);
449 }
450 
451 static int rpc_delete_dentry(struct dentry *dentry)
452 {
453 	return 1;
454 }
455 
456 static struct dentry_operations rpc_dentry_operations = {
457 	.d_delete = rpc_delete_dentry,
458 };
459 
460 static int
461 rpc_lookup_parent(char *path, struct nameidata *nd)
462 {
463 	if (path[0] == '\0')
464 		return -ENOENT;
465 	nd->mnt = rpc_get_mount();
466 	if (IS_ERR(nd->mnt)) {
467 		printk(KERN_WARNING "%s: %s failed to mount "
468 			       "pseudofilesystem \n", __FILE__, __FUNCTION__);
469 		return PTR_ERR(nd->mnt);
470 	}
471 	mntget(nd->mnt);
472 	nd->dentry = dget(rpc_mount->mnt_root);
473 	nd->last_type = LAST_ROOT;
474 	nd->flags = LOOKUP_PARENT;
475 	nd->depth = 0;
476 
477 	if (path_walk(path, nd)) {
478 		printk(KERN_WARNING "%s: %s failed to find path %s\n",
479 				__FILE__, __FUNCTION__, path);
480 		rpc_put_mount();
481 		return -ENOENT;
482 	}
483 	return 0;
484 }
485 
486 static void
487 rpc_release_path(struct nameidata *nd)
488 {
489 	path_release(nd);
490 	rpc_put_mount();
491 }
492 
493 static struct inode *
494 rpc_get_inode(struct super_block *sb, int mode)
495 {
496 	struct inode *inode = new_inode(sb);
497 	if (!inode)
498 		return NULL;
499 	inode->i_mode = mode;
500 	inode->i_uid = inode->i_gid = 0;
501 	inode->i_blocks = 0;
502 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
503 	switch(mode & S_IFMT) {
504 		case S_IFDIR:
505 			inode->i_fop = &simple_dir_operations;
506 			inode->i_op = &simple_dir_inode_operations;
507 			inc_nlink(inode);
508 		default:
509 			break;
510 	}
511 	return inode;
512 }
513 
514 /*
515  * FIXME: This probably has races.
516  */
517 static void
518 rpc_depopulate(struct dentry *parent, int start, int eof)
519 {
520 	struct inode *dir = parent->d_inode;
521 	struct list_head *pos, *next;
522 	struct dentry *dentry, *dvec[10];
523 	int n = 0;
524 
525 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
526 repeat:
527 	spin_lock(&dcache_lock);
528 	list_for_each_safe(pos, next, &parent->d_subdirs) {
529 		dentry = list_entry(pos, struct dentry, d_u.d_child);
530 		if (!dentry->d_inode ||
531 				dentry->d_inode->i_ino < start ||
532 				dentry->d_inode->i_ino >= eof)
533 			continue;
534 		spin_lock(&dentry->d_lock);
535 		if (!d_unhashed(dentry)) {
536 			dget_locked(dentry);
537 			__d_drop(dentry);
538 			spin_unlock(&dentry->d_lock);
539 			dvec[n++] = dentry;
540 			if (n == ARRAY_SIZE(dvec))
541 				break;
542 		} else
543 			spin_unlock(&dentry->d_lock);
544 	}
545 	spin_unlock(&dcache_lock);
546 	if (n) {
547 		do {
548 			dentry = dvec[--n];
549 			if (S_ISREG(dentry->d_inode->i_mode))
550 				simple_unlink(dir, dentry);
551 			else if (S_ISDIR(dentry->d_inode->i_mode))
552 				simple_rmdir(dir, dentry);
553 			d_delete(dentry);
554 			dput(dentry);
555 		} while (n);
556 		goto repeat;
557 	}
558 	mutex_unlock(&dir->i_mutex);
559 }
560 
561 static int
562 rpc_populate(struct dentry *parent,
563 		struct rpc_filelist *files,
564 		int start, int eof)
565 {
566 	struct inode *inode, *dir = parent->d_inode;
567 	void *private = RPC_I(dir)->private;
568 	struct dentry *dentry;
569 	int mode, i;
570 
571 	mutex_lock(&dir->i_mutex);
572 	for (i = start; i < eof; i++) {
573 		dentry = d_alloc_name(parent, files[i].name);
574 		if (!dentry)
575 			goto out_bad;
576 		dentry->d_op = &rpc_dentry_operations;
577 		mode = files[i].mode;
578 		inode = rpc_get_inode(dir->i_sb, mode);
579 		if (!inode) {
580 			dput(dentry);
581 			goto out_bad;
582 		}
583 		inode->i_ino = i;
584 		if (files[i].i_fop)
585 			inode->i_fop = files[i].i_fop;
586 		if (private)
587 			rpc_inode_setowner(inode, private);
588 		if (S_ISDIR(mode))
589 			inc_nlink(dir);
590 		d_add(dentry, inode);
591 	}
592 	mutex_unlock(&dir->i_mutex);
593 	return 0;
594 out_bad:
595 	mutex_unlock(&dir->i_mutex);
596 	printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
597 			__FILE__, __FUNCTION__, parent->d_name.name);
598 	return -ENOMEM;
599 }
600 
601 static int
602 __rpc_mkdir(struct inode *dir, struct dentry *dentry)
603 {
604 	struct inode *inode;
605 
606 	inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
607 	if (!inode)
608 		goto out_err;
609 	inode->i_ino = iunique(dir->i_sb, 100);
610 	d_instantiate(dentry, inode);
611 	inc_nlink(dir);
612 	inode_dir_notify(dir, DN_CREATE);
613 	return 0;
614 out_err:
615 	printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
616 			__FILE__, __FUNCTION__, dentry->d_name.name);
617 	return -ENOMEM;
618 }
619 
620 static int
621 __rpc_rmdir(struct inode *dir, struct dentry *dentry)
622 {
623 	int error;
624 	error = simple_rmdir(dir, dentry);
625 	if (!error)
626 		d_delete(dentry);
627 	return error;
628 }
629 
630 static struct dentry *
631 rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive)
632 {
633 	struct inode *dir = parent->d_inode;
634 	struct dentry *dentry;
635 
636 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
637 	dentry = lookup_one_len(name, parent, len);
638 	if (IS_ERR(dentry))
639 		goto out_err;
640 	if (!dentry->d_inode)
641 		dentry->d_op = &rpc_dentry_operations;
642 	else if (exclusive) {
643 		dput(dentry);
644 		dentry = ERR_PTR(-EEXIST);
645 		goto out_err;
646 	}
647 	return dentry;
648 out_err:
649 	mutex_unlock(&dir->i_mutex);
650 	return dentry;
651 }
652 
653 static struct dentry *
654 rpc_lookup_negative(char *path, struct nameidata *nd)
655 {
656 	struct dentry *dentry;
657 	int error;
658 
659 	if ((error = rpc_lookup_parent(path, nd)) != 0)
660 		return ERR_PTR(error);
661 	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1);
662 	if (IS_ERR(dentry))
663 		rpc_release_path(nd);
664 	return dentry;
665 }
666 
667 
668 struct dentry *
669 rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
670 {
671 	struct nameidata nd;
672 	struct dentry *dentry;
673 	struct inode *dir;
674 	int error;
675 
676 	dentry = rpc_lookup_negative(path, &nd);
677 	if (IS_ERR(dentry))
678 		return dentry;
679 	dir = nd.dentry->d_inode;
680 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
681 		goto err_dput;
682 	RPC_I(dentry->d_inode)->private = rpc_client;
683 	error = rpc_populate(dentry, authfiles,
684 			RPCAUTH_info, RPCAUTH_EOF);
685 	if (error)
686 		goto err_depopulate;
687 	dget(dentry);
688 out:
689 	mutex_unlock(&dir->i_mutex);
690 	rpc_release_path(&nd);
691 	return dentry;
692 err_depopulate:
693 	rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
694 	__rpc_rmdir(dir, dentry);
695 err_dput:
696 	dput(dentry);
697 	printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
698 			__FILE__, __FUNCTION__, path, error);
699 	dentry = ERR_PTR(error);
700 	goto out;
701 }
702 
703 int
704 rpc_rmdir(struct dentry *dentry)
705 {
706 	struct dentry *parent;
707 	struct inode *dir;
708 	int error;
709 
710 	parent = dget_parent(dentry);
711 	dir = parent->d_inode;
712 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
713 	rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
714 	error = __rpc_rmdir(dir, dentry);
715 	dput(dentry);
716 	mutex_unlock(&dir->i_mutex);
717 	dput(parent);
718 	return error;
719 }
720 
721 struct dentry *
722 rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags)
723 {
724 	struct dentry *dentry;
725 	struct inode *dir, *inode;
726 	struct rpc_inode *rpci;
727 
728 	dentry = rpc_lookup_create(parent, name, strlen(name), 0);
729 	if (IS_ERR(dentry))
730 		return dentry;
731 	dir = parent->d_inode;
732 	if (dentry->d_inode) {
733 		rpci = RPC_I(dentry->d_inode);
734 		if (rpci->private != private ||
735 				rpci->ops != ops ||
736 				rpci->flags != flags) {
737 			dput (dentry);
738 			dentry = ERR_PTR(-EBUSY);
739 		}
740 		rpci->nkern_readwriters++;
741 		goto out;
742 	}
743 	inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR);
744 	if (!inode)
745 		goto err_dput;
746 	inode->i_ino = iunique(dir->i_sb, 100);
747 	inode->i_fop = &rpc_pipe_fops;
748 	d_instantiate(dentry, inode);
749 	rpci = RPC_I(inode);
750 	rpci->private = private;
751 	rpci->flags = flags;
752 	rpci->ops = ops;
753 	rpci->nkern_readwriters = 1;
754 	inode_dir_notify(dir, DN_CREATE);
755 	dget(dentry);
756 out:
757 	mutex_unlock(&dir->i_mutex);
758 	return dentry;
759 err_dput:
760 	dput(dentry);
761 	dentry = ERR_PTR(-ENOMEM);
762 	printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
763 			__FILE__, __FUNCTION__, parent->d_name.name, name,
764 			-ENOMEM);
765 	goto out;
766 }
767 
768 int
769 rpc_unlink(struct dentry *dentry)
770 {
771 	struct dentry *parent;
772 	struct inode *dir;
773 	int error = 0;
774 
775 	parent = dget_parent(dentry);
776 	dir = parent->d_inode;
777 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
778 	if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) {
779 		rpc_close_pipes(dentry->d_inode);
780 		error = simple_unlink(dir, dentry);
781 		if (!error)
782 			d_delete(dentry);
783 	}
784 	dput(dentry);
785 	mutex_unlock(&dir->i_mutex);
786 	dput(parent);
787 	return error;
788 }
789 
790 /*
791  * populate the filesystem
792  */
793 static struct super_operations s_ops = {
794 	.alloc_inode	= rpc_alloc_inode,
795 	.destroy_inode	= rpc_destroy_inode,
796 	.statfs		= simple_statfs,
797 };
798 
799 #define RPCAUTH_GSSMAGIC 0x67596969
800 
801 static int
802 rpc_fill_super(struct super_block *sb, void *data, int silent)
803 {
804 	struct inode *inode;
805 	struct dentry *root;
806 
807 	sb->s_blocksize = PAGE_CACHE_SIZE;
808 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
809 	sb->s_magic = RPCAUTH_GSSMAGIC;
810 	sb->s_op = &s_ops;
811 	sb->s_time_gran = 1;
812 
813 	inode = rpc_get_inode(sb, S_IFDIR | 0755);
814 	if (!inode)
815 		return -ENOMEM;
816 	root = d_alloc_root(inode);
817 	if (!root) {
818 		iput(inode);
819 		return -ENOMEM;
820 	}
821 	if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF))
822 		goto out;
823 	sb->s_root = root;
824 	return 0;
825 out:
826 	d_genocide(root);
827 	dput(root);
828 	return -ENOMEM;
829 }
830 
831 static int
832 rpc_get_sb(struct file_system_type *fs_type,
833 		int flags, const char *dev_name, void *data, struct vfsmount *mnt)
834 {
835 	return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
836 }
837 
838 static struct file_system_type rpc_pipe_fs_type = {
839 	.owner		= THIS_MODULE,
840 	.name		= "rpc_pipefs",
841 	.get_sb		= rpc_get_sb,
842 	.kill_sb	= kill_litter_super,
843 };
844 
845 static void
846 init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
847 {
848 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
849 
850 	inode_init_once(&rpci->vfs_inode);
851 	rpci->private = NULL;
852 	rpci->nreaders = 0;
853 	rpci->nwriters = 0;
854 	INIT_LIST_HEAD(&rpci->in_upcall);
855 	INIT_LIST_HEAD(&rpci->in_downcall);
856 	INIT_LIST_HEAD(&rpci->pipe);
857 	rpci->pipelen = 0;
858 	init_waitqueue_head(&rpci->waitq);
859 	INIT_DELAYED_WORK(&rpci->queue_timeout,
860 			    rpc_timeout_upcall_queue);
861 	rpci->ops = NULL;
862 }
863 
864 int register_rpc_pipefs(void)
865 {
866 	int err;
867 
868 	rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
869 				sizeof(struct rpc_inode),
870 				0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
871 						SLAB_MEM_SPREAD),
872 				init_once, NULL);
873 	if (!rpc_inode_cachep)
874 		return -ENOMEM;
875 	err = register_filesystem(&rpc_pipe_fs_type);
876 	if (err) {
877 		kmem_cache_destroy(rpc_inode_cachep);
878 		return err;
879 	}
880 
881 	return 0;
882 }
883 
884 void unregister_rpc_pipefs(void)
885 {
886 	kmem_cache_destroy(rpc_inode_cachep);
887 	unregister_filesystem(&rpc_pipe_fs_type);
888 }
889