xref: /linux/net/sunrpc/rpc_pipe.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  * net/sunrpc/rpc_pipe.c
3  *
4  * Userland/kernel interface for rpcauth_gss.
5  * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
6  * and fs/sysfs/inode.c
7  *
8  * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/pagemap.h>
15 #include <linux/mount.h>
16 #include <linux/namei.h>
17 #include <linux/dnotify.h>
18 #include <linux/kernel.h>
19 
20 #include <asm/ioctls.h>
21 #include <linux/fs.h>
22 #include <linux/poll.h>
23 #include <linux/wait.h>
24 #include <linux/seq_file.h>
25 
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sunrpc/rpc_pipe_fs.h>
29 
30 static struct vfsmount *rpc_mount __read_mostly;
31 static int rpc_mount_count;
32 
33 static struct file_system_type rpc_pipe_fs_type;
34 
35 
36 static struct kmem_cache *rpc_inode_cachep __read_mostly;
37 
38 #define RPC_UPCALL_TIMEOUT (30*HZ)
39 
40 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
41 		void (*destroy_msg)(struct rpc_pipe_msg *), int err)
42 {
43 	struct rpc_pipe_msg *msg;
44 
45 	if (list_empty(head))
46 		return;
47 	do {
48 		msg = list_entry(head->next, struct rpc_pipe_msg, list);
49 		list_del(&msg->list);
50 		msg->errno = err;
51 		destroy_msg(msg);
52 	} while (!list_empty(head));
53 	wake_up(&rpci->waitq);
54 }
55 
56 static void
57 rpc_timeout_upcall_queue(struct work_struct *work)
58 {
59 	LIST_HEAD(free_list);
60 	struct rpc_inode *rpci =
61 		container_of(work, struct rpc_inode, queue_timeout.work);
62 	struct inode *inode = &rpci->vfs_inode;
63 	void (*destroy_msg)(struct rpc_pipe_msg *);
64 
65 	spin_lock(&inode->i_lock);
66 	if (rpci->ops == NULL) {
67 		spin_unlock(&inode->i_lock);
68 		return;
69 	}
70 	destroy_msg = rpci->ops->destroy_msg;
71 	if (rpci->nreaders == 0) {
72 		list_splice_init(&rpci->pipe, &free_list);
73 		rpci->pipelen = 0;
74 	}
75 	spin_unlock(&inode->i_lock);
76 	rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
77 }
78 
79 int
80 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
81 {
82 	struct rpc_inode *rpci = RPC_I(inode);
83 	int res = -EPIPE;
84 
85 	spin_lock(&inode->i_lock);
86 	if (rpci->ops == NULL)
87 		goto out;
88 	if (rpci->nreaders) {
89 		list_add_tail(&msg->list, &rpci->pipe);
90 		rpci->pipelen += msg->len;
91 		res = 0;
92 	} else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
93 		if (list_empty(&rpci->pipe))
94 			queue_delayed_work(rpciod_workqueue,
95 					&rpci->queue_timeout,
96 					RPC_UPCALL_TIMEOUT);
97 		list_add_tail(&msg->list, &rpci->pipe);
98 		rpci->pipelen += msg->len;
99 		res = 0;
100 	}
101 out:
102 	spin_unlock(&inode->i_lock);
103 	wake_up(&rpci->waitq);
104 	return res;
105 }
106 
107 static inline void
108 rpc_inode_setowner(struct inode *inode, void *private)
109 {
110 	RPC_I(inode)->private = private;
111 }
112 
113 static void
114 rpc_close_pipes(struct inode *inode)
115 {
116 	struct rpc_inode *rpci = RPC_I(inode);
117 	struct rpc_pipe_ops *ops;
118 
119 	mutex_lock(&inode->i_mutex);
120 	ops = rpci->ops;
121 	if (ops != NULL) {
122 		LIST_HEAD(free_list);
123 
124 		spin_lock(&inode->i_lock);
125 		rpci->nreaders = 0;
126 		list_splice_init(&rpci->in_upcall, &free_list);
127 		list_splice_init(&rpci->pipe, &free_list);
128 		rpci->pipelen = 0;
129 		rpci->ops = NULL;
130 		spin_unlock(&inode->i_lock);
131 		rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
132 		rpci->nwriters = 0;
133 		if (ops->release_pipe)
134 			ops->release_pipe(inode);
135 		cancel_delayed_work_sync(&rpci->queue_timeout);
136 	}
137 	rpc_inode_setowner(inode, NULL);
138 	mutex_unlock(&inode->i_mutex);
139 }
140 
141 static struct inode *
142 rpc_alloc_inode(struct super_block *sb)
143 {
144 	struct rpc_inode *rpci;
145 	rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
146 	if (!rpci)
147 		return NULL;
148 	return &rpci->vfs_inode;
149 }
150 
151 static void
152 rpc_destroy_inode(struct inode *inode)
153 {
154 	kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
155 }
156 
157 static int
158 rpc_pipe_open(struct inode *inode, struct file *filp)
159 {
160 	struct rpc_inode *rpci = RPC_I(inode);
161 	int res = -ENXIO;
162 
163 	mutex_lock(&inode->i_mutex);
164 	if (rpci->ops != NULL) {
165 		if (filp->f_mode & FMODE_READ)
166 			rpci->nreaders ++;
167 		if (filp->f_mode & FMODE_WRITE)
168 			rpci->nwriters ++;
169 		res = 0;
170 	}
171 	mutex_unlock(&inode->i_mutex);
172 	return res;
173 }
174 
175 static int
176 rpc_pipe_release(struct inode *inode, struct file *filp)
177 {
178 	struct rpc_inode *rpci = RPC_I(inode);
179 	struct rpc_pipe_msg *msg;
180 
181 	mutex_lock(&inode->i_mutex);
182 	if (rpci->ops == NULL)
183 		goto out;
184 	msg = (struct rpc_pipe_msg *)filp->private_data;
185 	if (msg != NULL) {
186 		spin_lock(&inode->i_lock);
187 		msg->errno = -EAGAIN;
188 		list_del(&msg->list);
189 		spin_unlock(&inode->i_lock);
190 		rpci->ops->destroy_msg(msg);
191 	}
192 	if (filp->f_mode & FMODE_WRITE)
193 		rpci->nwriters --;
194 	if (filp->f_mode & FMODE_READ) {
195 		rpci->nreaders --;
196 		if (rpci->nreaders == 0) {
197 			LIST_HEAD(free_list);
198 			spin_lock(&inode->i_lock);
199 			list_splice_init(&rpci->pipe, &free_list);
200 			rpci->pipelen = 0;
201 			spin_unlock(&inode->i_lock);
202 			rpc_purge_list(rpci, &free_list,
203 					rpci->ops->destroy_msg, -EAGAIN);
204 		}
205 	}
206 	if (rpci->ops->release_pipe)
207 		rpci->ops->release_pipe(inode);
208 out:
209 	mutex_unlock(&inode->i_mutex);
210 	return 0;
211 }
212 
213 static ssize_t
214 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
215 {
216 	struct inode *inode = filp->f_path.dentry->d_inode;
217 	struct rpc_inode *rpci = RPC_I(inode);
218 	struct rpc_pipe_msg *msg;
219 	int res = 0;
220 
221 	mutex_lock(&inode->i_mutex);
222 	if (rpci->ops == NULL) {
223 		res = -EPIPE;
224 		goto out_unlock;
225 	}
226 	msg = filp->private_data;
227 	if (msg == NULL) {
228 		spin_lock(&inode->i_lock);
229 		if (!list_empty(&rpci->pipe)) {
230 			msg = list_entry(rpci->pipe.next,
231 					struct rpc_pipe_msg,
232 					list);
233 			list_move(&msg->list, &rpci->in_upcall);
234 			rpci->pipelen -= msg->len;
235 			filp->private_data = msg;
236 			msg->copied = 0;
237 		}
238 		spin_unlock(&inode->i_lock);
239 		if (msg == NULL)
240 			goto out_unlock;
241 	}
242 	/* NOTE: it is up to the callback to update msg->copied */
243 	res = rpci->ops->upcall(filp, msg, buf, len);
244 	if (res < 0 || msg->len == msg->copied) {
245 		filp->private_data = NULL;
246 		spin_lock(&inode->i_lock);
247 		list_del(&msg->list);
248 		spin_unlock(&inode->i_lock);
249 		rpci->ops->destroy_msg(msg);
250 	}
251 out_unlock:
252 	mutex_unlock(&inode->i_mutex);
253 	return res;
254 }
255 
256 static ssize_t
257 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
258 {
259 	struct inode *inode = filp->f_path.dentry->d_inode;
260 	struct rpc_inode *rpci = RPC_I(inode);
261 	int res;
262 
263 	mutex_lock(&inode->i_mutex);
264 	res = -EPIPE;
265 	if (rpci->ops != NULL)
266 		res = rpci->ops->downcall(filp, buf, len);
267 	mutex_unlock(&inode->i_mutex);
268 	return res;
269 }
270 
271 static unsigned int
272 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
273 {
274 	struct rpc_inode *rpci;
275 	unsigned int mask = 0;
276 
277 	rpci = RPC_I(filp->f_path.dentry->d_inode);
278 	poll_wait(filp, &rpci->waitq, wait);
279 
280 	mask = POLLOUT | POLLWRNORM;
281 	if (rpci->ops == NULL)
282 		mask |= POLLERR | POLLHUP;
283 	if (!list_empty(&rpci->pipe))
284 		mask |= POLLIN | POLLRDNORM;
285 	return mask;
286 }
287 
288 static int
289 rpc_pipe_ioctl(struct inode *ino, struct file *filp,
290 		unsigned int cmd, unsigned long arg)
291 {
292 	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
293 	int len;
294 
295 	switch (cmd) {
296 	case FIONREAD:
297 		if (rpci->ops == NULL)
298 			return -EPIPE;
299 		len = rpci->pipelen;
300 		if (filp->private_data) {
301 			struct rpc_pipe_msg *msg;
302 			msg = (struct rpc_pipe_msg *)filp->private_data;
303 			len += msg->len - msg->copied;
304 		}
305 		return put_user(len, (int __user *)arg);
306 	default:
307 		return -EINVAL;
308 	}
309 }
310 
311 static const struct file_operations rpc_pipe_fops = {
312 	.owner		= THIS_MODULE,
313 	.llseek		= no_llseek,
314 	.read		= rpc_pipe_read,
315 	.write		= rpc_pipe_write,
316 	.poll		= rpc_pipe_poll,
317 	.ioctl		= rpc_pipe_ioctl,
318 	.open		= rpc_pipe_open,
319 	.release	= rpc_pipe_release,
320 };
321 
322 static int
323 rpc_show_info(struct seq_file *m, void *v)
324 {
325 	struct rpc_clnt *clnt = m->private;
326 
327 	seq_printf(m, "RPC server: %s\n", clnt->cl_server);
328 	seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
329 			clnt->cl_prog, clnt->cl_vers);
330 	seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
331 	seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO));
332 	return 0;
333 }
334 
335 static int
336 rpc_info_open(struct inode *inode, struct file *file)
337 {
338 	struct rpc_clnt *clnt;
339 	int ret = single_open(file, rpc_show_info, NULL);
340 
341 	if (!ret) {
342 		struct seq_file *m = file->private_data;
343 		mutex_lock(&inode->i_mutex);
344 		clnt = RPC_I(inode)->private;
345 		if (clnt) {
346 			kref_get(&clnt->cl_kref);
347 			m->private = clnt;
348 		} else {
349 			single_release(inode, file);
350 			ret = -EINVAL;
351 		}
352 		mutex_unlock(&inode->i_mutex);
353 	}
354 	return ret;
355 }
356 
357 static int
358 rpc_info_release(struct inode *inode, struct file *file)
359 {
360 	struct seq_file *m = file->private_data;
361 	struct rpc_clnt *clnt = (struct rpc_clnt *)m->private;
362 
363 	if (clnt)
364 		rpc_release_client(clnt);
365 	return single_release(inode, file);
366 }
367 
368 static const struct file_operations rpc_info_operations = {
369 	.owner		= THIS_MODULE,
370 	.open		= rpc_info_open,
371 	.read		= seq_read,
372 	.llseek		= seq_lseek,
373 	.release	= rpc_info_release,
374 };
375 
376 
377 /*
378  * We have a single directory with 1 node in it.
379  */
380 enum {
381 	RPCAUTH_Root = 1,
382 	RPCAUTH_lockd,
383 	RPCAUTH_mount,
384 	RPCAUTH_nfs,
385 	RPCAUTH_portmap,
386 	RPCAUTH_statd,
387 	RPCAUTH_RootEOF
388 };
389 
390 /*
391  * Description of fs contents.
392  */
393 struct rpc_filelist {
394 	char *name;
395 	const struct file_operations *i_fop;
396 	int mode;
397 };
398 
399 static struct rpc_filelist files[] = {
400 	[RPCAUTH_lockd] = {
401 		.name = "lockd",
402 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
403 	},
404 	[RPCAUTH_mount] = {
405 		.name = "mount",
406 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
407 	},
408 	[RPCAUTH_nfs] = {
409 		.name = "nfs",
410 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
411 	},
412 	[RPCAUTH_portmap] = {
413 		.name = "portmap",
414 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
415 	},
416 	[RPCAUTH_statd] = {
417 		.name = "statd",
418 		.mode = S_IFDIR | S_IRUGO | S_IXUGO,
419 	},
420 };
421 
422 enum {
423 	RPCAUTH_info = 2,
424 	RPCAUTH_EOF
425 };
426 
427 static struct rpc_filelist authfiles[] = {
428 	[RPCAUTH_info] = {
429 		.name = "info",
430 		.i_fop = &rpc_info_operations,
431 		.mode = S_IFREG | S_IRUSR,
432 	},
433 };
434 
435 struct vfsmount *rpc_get_mount(void)
436 {
437 	int err;
438 
439 	err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
440 	if (err != 0)
441 		return ERR_PTR(err);
442 	return rpc_mount;
443 }
444 
445 void rpc_put_mount(void)
446 {
447 	simple_release_fs(&rpc_mount, &rpc_mount_count);
448 }
449 
450 static int rpc_delete_dentry(struct dentry *dentry)
451 {
452 	return 1;
453 }
454 
455 static struct dentry_operations rpc_dentry_operations = {
456 	.d_delete = rpc_delete_dentry,
457 };
458 
459 static int
460 rpc_lookup_parent(char *path, struct nameidata *nd)
461 {
462 	struct vfsmount *mnt;
463 
464 	if (path[0] == '\0')
465 		return -ENOENT;
466 
467 	mnt = rpc_get_mount();
468 	if (IS_ERR(mnt)) {
469 		printk(KERN_WARNING "%s: %s failed to mount "
470 			       "pseudofilesystem \n", __FILE__, __FUNCTION__);
471 		return PTR_ERR(mnt);
472 	}
473 
474 	if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
475 		printk(KERN_WARNING "%s: %s failed to find path %s\n",
476 				__FILE__, __FUNCTION__, path);
477 		rpc_put_mount();
478 		return -ENOENT;
479 	}
480 	return 0;
481 }
482 
483 static void
484 rpc_release_path(struct nameidata *nd)
485 {
486 	path_release(nd);
487 	rpc_put_mount();
488 }
489 
490 static struct inode *
491 rpc_get_inode(struct super_block *sb, int mode)
492 {
493 	struct inode *inode = new_inode(sb);
494 	if (!inode)
495 		return NULL;
496 	inode->i_mode = mode;
497 	inode->i_uid = inode->i_gid = 0;
498 	inode->i_blocks = 0;
499 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
500 	switch(mode & S_IFMT) {
501 		case S_IFDIR:
502 			inode->i_fop = &simple_dir_operations;
503 			inode->i_op = &simple_dir_inode_operations;
504 			inc_nlink(inode);
505 		default:
506 			break;
507 	}
508 	return inode;
509 }
510 
511 /*
512  * FIXME: This probably has races.
513  */
514 static void
515 rpc_depopulate(struct dentry *parent, int start, int eof)
516 {
517 	struct inode *dir = parent->d_inode;
518 	struct list_head *pos, *next;
519 	struct dentry *dentry, *dvec[10];
520 	int n = 0;
521 
522 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
523 repeat:
524 	spin_lock(&dcache_lock);
525 	list_for_each_safe(pos, next, &parent->d_subdirs) {
526 		dentry = list_entry(pos, struct dentry, d_u.d_child);
527 		if (!dentry->d_inode ||
528 				dentry->d_inode->i_ino < start ||
529 				dentry->d_inode->i_ino >= eof)
530 			continue;
531 		spin_lock(&dentry->d_lock);
532 		if (!d_unhashed(dentry)) {
533 			dget_locked(dentry);
534 			__d_drop(dentry);
535 			spin_unlock(&dentry->d_lock);
536 			dvec[n++] = dentry;
537 			if (n == ARRAY_SIZE(dvec))
538 				break;
539 		} else
540 			spin_unlock(&dentry->d_lock);
541 	}
542 	spin_unlock(&dcache_lock);
543 	if (n) {
544 		do {
545 			dentry = dvec[--n];
546 			if (S_ISREG(dentry->d_inode->i_mode))
547 				simple_unlink(dir, dentry);
548 			else if (S_ISDIR(dentry->d_inode->i_mode))
549 				simple_rmdir(dir, dentry);
550 			d_delete(dentry);
551 			dput(dentry);
552 		} while (n);
553 		goto repeat;
554 	}
555 	mutex_unlock(&dir->i_mutex);
556 }
557 
558 static int
559 rpc_populate(struct dentry *parent,
560 		struct rpc_filelist *files,
561 		int start, int eof)
562 {
563 	struct inode *inode, *dir = parent->d_inode;
564 	void *private = RPC_I(dir)->private;
565 	struct dentry *dentry;
566 	int mode, i;
567 
568 	mutex_lock(&dir->i_mutex);
569 	for (i = start; i < eof; i++) {
570 		dentry = d_alloc_name(parent, files[i].name);
571 		if (!dentry)
572 			goto out_bad;
573 		dentry->d_op = &rpc_dentry_operations;
574 		mode = files[i].mode;
575 		inode = rpc_get_inode(dir->i_sb, mode);
576 		if (!inode) {
577 			dput(dentry);
578 			goto out_bad;
579 		}
580 		inode->i_ino = i;
581 		if (files[i].i_fop)
582 			inode->i_fop = files[i].i_fop;
583 		if (private)
584 			rpc_inode_setowner(inode, private);
585 		if (S_ISDIR(mode))
586 			inc_nlink(dir);
587 		d_add(dentry, inode);
588 	}
589 	mutex_unlock(&dir->i_mutex);
590 	return 0;
591 out_bad:
592 	mutex_unlock(&dir->i_mutex);
593 	printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
594 			__FILE__, __FUNCTION__, parent->d_name.name);
595 	return -ENOMEM;
596 }
597 
598 static int
599 __rpc_mkdir(struct inode *dir, struct dentry *dentry)
600 {
601 	struct inode *inode;
602 
603 	inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
604 	if (!inode)
605 		goto out_err;
606 	inode->i_ino = iunique(dir->i_sb, 100);
607 	d_instantiate(dentry, inode);
608 	inc_nlink(dir);
609 	inode_dir_notify(dir, DN_CREATE);
610 	return 0;
611 out_err:
612 	printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
613 			__FILE__, __FUNCTION__, dentry->d_name.name);
614 	return -ENOMEM;
615 }
616 
617 static int
618 __rpc_rmdir(struct inode *dir, struct dentry *dentry)
619 {
620 	int error;
621 	error = simple_rmdir(dir, dentry);
622 	if (!error)
623 		d_delete(dentry);
624 	return error;
625 }
626 
627 static struct dentry *
628 rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive)
629 {
630 	struct inode *dir = parent->d_inode;
631 	struct dentry *dentry;
632 
633 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
634 	dentry = lookup_one_len(name, parent, len);
635 	if (IS_ERR(dentry))
636 		goto out_err;
637 	if (!dentry->d_inode)
638 		dentry->d_op = &rpc_dentry_operations;
639 	else if (exclusive) {
640 		dput(dentry);
641 		dentry = ERR_PTR(-EEXIST);
642 		goto out_err;
643 	}
644 	return dentry;
645 out_err:
646 	mutex_unlock(&dir->i_mutex);
647 	return dentry;
648 }
649 
650 static struct dentry *
651 rpc_lookup_negative(char *path, struct nameidata *nd)
652 {
653 	struct dentry *dentry;
654 	int error;
655 
656 	if ((error = rpc_lookup_parent(path, nd)) != 0)
657 		return ERR_PTR(error);
658 	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1);
659 	if (IS_ERR(dentry))
660 		rpc_release_path(nd);
661 	return dentry;
662 }
663 
664 
665 struct dentry *
666 rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
667 {
668 	struct nameidata nd;
669 	struct dentry *dentry;
670 	struct inode *dir;
671 	int error;
672 
673 	dentry = rpc_lookup_negative(path, &nd);
674 	if (IS_ERR(dentry))
675 		return dentry;
676 	dir = nd.dentry->d_inode;
677 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
678 		goto err_dput;
679 	RPC_I(dentry->d_inode)->private = rpc_client;
680 	error = rpc_populate(dentry, authfiles,
681 			RPCAUTH_info, RPCAUTH_EOF);
682 	if (error)
683 		goto err_depopulate;
684 	dget(dentry);
685 out:
686 	mutex_unlock(&dir->i_mutex);
687 	rpc_release_path(&nd);
688 	return dentry;
689 err_depopulate:
690 	rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
691 	__rpc_rmdir(dir, dentry);
692 err_dput:
693 	dput(dentry);
694 	printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
695 			__FILE__, __FUNCTION__, path, error);
696 	dentry = ERR_PTR(error);
697 	goto out;
698 }
699 
700 int
701 rpc_rmdir(struct dentry *dentry)
702 {
703 	struct dentry *parent;
704 	struct inode *dir;
705 	int error;
706 
707 	parent = dget_parent(dentry);
708 	dir = parent->d_inode;
709 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
710 	rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
711 	error = __rpc_rmdir(dir, dentry);
712 	dput(dentry);
713 	mutex_unlock(&dir->i_mutex);
714 	dput(parent);
715 	return error;
716 }
717 
718 struct dentry *
719 rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags)
720 {
721 	struct dentry *dentry;
722 	struct inode *dir, *inode;
723 	struct rpc_inode *rpci;
724 
725 	dentry = rpc_lookup_create(parent, name, strlen(name), 0);
726 	if (IS_ERR(dentry))
727 		return dentry;
728 	dir = parent->d_inode;
729 	if (dentry->d_inode) {
730 		rpci = RPC_I(dentry->d_inode);
731 		if (rpci->private != private ||
732 				rpci->ops != ops ||
733 				rpci->flags != flags) {
734 			dput (dentry);
735 			dentry = ERR_PTR(-EBUSY);
736 		}
737 		rpci->nkern_readwriters++;
738 		goto out;
739 	}
740 	inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR);
741 	if (!inode)
742 		goto err_dput;
743 	inode->i_ino = iunique(dir->i_sb, 100);
744 	inode->i_fop = &rpc_pipe_fops;
745 	d_instantiate(dentry, inode);
746 	rpci = RPC_I(inode);
747 	rpci->private = private;
748 	rpci->flags = flags;
749 	rpci->ops = ops;
750 	rpci->nkern_readwriters = 1;
751 	inode_dir_notify(dir, DN_CREATE);
752 	dget(dentry);
753 out:
754 	mutex_unlock(&dir->i_mutex);
755 	return dentry;
756 err_dput:
757 	dput(dentry);
758 	dentry = ERR_PTR(-ENOMEM);
759 	printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
760 			__FILE__, __FUNCTION__, parent->d_name.name, name,
761 			-ENOMEM);
762 	goto out;
763 }
764 
765 int
766 rpc_unlink(struct dentry *dentry)
767 {
768 	struct dentry *parent;
769 	struct inode *dir;
770 	int error = 0;
771 
772 	parent = dget_parent(dentry);
773 	dir = parent->d_inode;
774 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
775 	if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) {
776 		rpc_close_pipes(dentry->d_inode);
777 		error = simple_unlink(dir, dentry);
778 		if (!error)
779 			d_delete(dentry);
780 	}
781 	dput(dentry);
782 	mutex_unlock(&dir->i_mutex);
783 	dput(parent);
784 	return error;
785 }
786 
787 /*
788  * populate the filesystem
789  */
790 static struct super_operations s_ops = {
791 	.alloc_inode	= rpc_alloc_inode,
792 	.destroy_inode	= rpc_destroy_inode,
793 	.statfs		= simple_statfs,
794 };
795 
796 #define RPCAUTH_GSSMAGIC 0x67596969
797 
798 static int
799 rpc_fill_super(struct super_block *sb, void *data, int silent)
800 {
801 	struct inode *inode;
802 	struct dentry *root;
803 
804 	sb->s_blocksize = PAGE_CACHE_SIZE;
805 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
806 	sb->s_magic = RPCAUTH_GSSMAGIC;
807 	sb->s_op = &s_ops;
808 	sb->s_time_gran = 1;
809 
810 	inode = rpc_get_inode(sb, S_IFDIR | 0755);
811 	if (!inode)
812 		return -ENOMEM;
813 	root = d_alloc_root(inode);
814 	if (!root) {
815 		iput(inode);
816 		return -ENOMEM;
817 	}
818 	if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF))
819 		goto out;
820 	sb->s_root = root;
821 	return 0;
822 out:
823 	d_genocide(root);
824 	dput(root);
825 	return -ENOMEM;
826 }
827 
828 static int
829 rpc_get_sb(struct file_system_type *fs_type,
830 		int flags, const char *dev_name, void *data, struct vfsmount *mnt)
831 {
832 	return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
833 }
834 
835 static struct file_system_type rpc_pipe_fs_type = {
836 	.owner		= THIS_MODULE,
837 	.name		= "rpc_pipefs",
838 	.get_sb		= rpc_get_sb,
839 	.kill_sb	= kill_litter_super,
840 };
841 
842 static void
843 init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
844 {
845 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
846 
847 	inode_init_once(&rpci->vfs_inode);
848 	rpci->private = NULL;
849 	rpci->nreaders = 0;
850 	rpci->nwriters = 0;
851 	INIT_LIST_HEAD(&rpci->in_upcall);
852 	INIT_LIST_HEAD(&rpci->in_downcall);
853 	INIT_LIST_HEAD(&rpci->pipe);
854 	rpci->pipelen = 0;
855 	init_waitqueue_head(&rpci->waitq);
856 	INIT_DELAYED_WORK(&rpci->queue_timeout,
857 			    rpc_timeout_upcall_queue);
858 	rpci->ops = NULL;
859 }
860 
861 int register_rpc_pipefs(void)
862 {
863 	int err;
864 
865 	rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
866 				sizeof(struct rpc_inode),
867 				0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
868 						SLAB_MEM_SPREAD),
869 				init_once);
870 	if (!rpc_inode_cachep)
871 		return -ENOMEM;
872 	err = register_filesystem(&rpc_pipe_fs_type);
873 	if (err) {
874 		kmem_cache_destroy(rpc_inode_cachep);
875 		return err;
876 	}
877 
878 	return 0;
879 }
880 
881 void unregister_rpc_pipefs(void)
882 {
883 	kmem_cache_destroy(rpc_inode_cachep);
884 	unregister_filesystem(&rpc_pipe_fs_type);
885 }
886