xref: /linux/arch/powerpc/platforms/cell/spufs/inode.c (revision d6296cb65320be16dbf20f2fd584ddc25f3437cd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  * SPU file system
5  *
6  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
7  *
8  * Author: Arnd Bergmann <arndb@de.ibm.com>
9  */
10 
11 #include <linux/file.h>
12 #include <linux/fs.h>
13 #include <linux/fs_context.h>
14 #include <linux/fs_parser.h>
15 #include <linux/fsnotify.h>
16 #include <linux/backing-dev.h>
17 #include <linux/init.h>
18 #include <linux/ioctl.h>
19 #include <linux/module.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/pagemap.h>
23 #include <linux/poll.h>
24 #include <linux/of.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 
28 #include <asm/spu.h>
29 #include <asm/spu_priv1.h>
30 #include <linux/uaccess.h>
31 
32 #include "spufs.h"
33 
34 struct spufs_sb_info {
35 	bool debug;
36 };
37 
38 static struct kmem_cache *spufs_inode_cache;
39 char *isolated_loader;
40 static int isolated_loader_size;
41 
42 static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb)
43 {
44 	return sb->s_fs_info;
45 }
46 
47 static struct inode *
48 spufs_alloc_inode(struct super_block *sb)
49 {
50 	struct spufs_inode_info *ei;
51 
52 	ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
53 	if (!ei)
54 		return NULL;
55 
56 	ei->i_gang = NULL;
57 	ei->i_ctx = NULL;
58 	ei->i_openers = 0;
59 
60 	return &ei->vfs_inode;
61 }
62 
63 static void spufs_free_inode(struct inode *inode)
64 {
65 	kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
66 }
67 
68 static void
69 spufs_init_once(void *p)
70 {
71 	struct spufs_inode_info *ei = p;
72 
73 	inode_init_once(&ei->vfs_inode);
74 }
75 
76 static struct inode *
77 spufs_new_inode(struct super_block *sb, umode_t mode)
78 {
79 	struct inode *inode;
80 
81 	inode = new_inode(sb);
82 	if (!inode)
83 		goto out;
84 
85 	inode->i_ino = get_next_ino();
86 	inode->i_mode = mode;
87 	inode->i_uid = current_fsuid();
88 	inode->i_gid = current_fsgid();
89 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
90 out:
91 	return inode;
92 }
93 
94 static int
95 spufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
96 	      struct iattr *attr)
97 {
98 	struct inode *inode = d_inode(dentry);
99 
100 	if ((attr->ia_valid & ATTR_SIZE) &&
101 	    (attr->ia_size != inode->i_size))
102 		return -EINVAL;
103 	setattr_copy(&nop_mnt_idmap, inode, attr);
104 	mark_inode_dirty(inode);
105 	return 0;
106 }
107 
108 
109 static int
110 spufs_new_file(struct super_block *sb, struct dentry *dentry,
111 		const struct file_operations *fops, umode_t mode,
112 		size_t size, struct spu_context *ctx)
113 {
114 	static const struct inode_operations spufs_file_iops = {
115 		.setattr = spufs_setattr,
116 	};
117 	struct inode *inode;
118 	int ret;
119 
120 	ret = -ENOSPC;
121 	inode = spufs_new_inode(sb, S_IFREG | mode);
122 	if (!inode)
123 		goto out;
124 
125 	ret = 0;
126 	inode->i_op = &spufs_file_iops;
127 	inode->i_fop = fops;
128 	inode->i_size = size;
129 	inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
130 	d_add(dentry, inode);
131 out:
132 	return ret;
133 }
134 
135 static void
136 spufs_evict_inode(struct inode *inode)
137 {
138 	struct spufs_inode_info *ei = SPUFS_I(inode);
139 	clear_inode(inode);
140 	if (ei->i_ctx)
141 		put_spu_context(ei->i_ctx);
142 	if (ei->i_gang)
143 		put_spu_gang(ei->i_gang);
144 }
145 
146 static void spufs_prune_dir(struct dentry *dir)
147 {
148 	struct dentry *dentry, *tmp;
149 
150 	inode_lock(d_inode(dir));
151 	list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
152 		spin_lock(&dentry->d_lock);
153 		if (simple_positive(dentry)) {
154 			dget_dlock(dentry);
155 			__d_drop(dentry);
156 			spin_unlock(&dentry->d_lock);
157 			simple_unlink(d_inode(dir), dentry);
158 			/* XXX: what was dcache_lock protecting here? Other
159 			 * filesystems (IB, configfs) release dcache_lock
160 			 * before unlink */
161 			dput(dentry);
162 		} else {
163 			spin_unlock(&dentry->d_lock);
164 		}
165 	}
166 	shrink_dcache_parent(dir);
167 	inode_unlock(d_inode(dir));
168 }
169 
170 /* Caller must hold parent->i_mutex */
171 static int spufs_rmdir(struct inode *parent, struct dentry *dir)
172 {
173 	/* remove all entries */
174 	int res;
175 	spufs_prune_dir(dir);
176 	d_drop(dir);
177 	res = simple_rmdir(parent, dir);
178 	/* We have to give up the mm_struct */
179 	spu_forget(SPUFS_I(d_inode(dir))->i_ctx);
180 	return res;
181 }
182 
183 static int spufs_fill_dir(struct dentry *dir,
184 		const struct spufs_tree_descr *files, umode_t mode,
185 		struct spu_context *ctx)
186 {
187 	while (files->name && files->name[0]) {
188 		int ret;
189 		struct dentry *dentry = d_alloc_name(dir, files->name);
190 		if (!dentry)
191 			return -ENOMEM;
192 		ret = spufs_new_file(dir->d_sb, dentry, files->ops,
193 					files->mode & mode, files->size, ctx);
194 		if (ret)
195 			return ret;
196 		files++;
197 	}
198 	return 0;
199 }
200 
201 static int spufs_dir_close(struct inode *inode, struct file *file)
202 {
203 	struct inode *parent;
204 	struct dentry *dir;
205 	int ret;
206 
207 	dir = file->f_path.dentry;
208 	parent = d_inode(dir->d_parent);
209 
210 	inode_lock_nested(parent, I_MUTEX_PARENT);
211 	ret = spufs_rmdir(parent, dir);
212 	inode_unlock(parent);
213 	WARN_ON(ret);
214 
215 	return dcache_dir_close(inode, file);
216 }
217 
218 const struct file_operations spufs_context_fops = {
219 	.open		= dcache_dir_open,
220 	.release	= spufs_dir_close,
221 	.llseek		= dcache_dir_lseek,
222 	.read		= generic_read_dir,
223 	.iterate_shared	= dcache_readdir,
224 	.fsync		= noop_fsync,
225 };
226 EXPORT_SYMBOL_GPL(spufs_context_fops);
227 
228 static int
229 spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
230 		umode_t mode)
231 {
232 	int ret;
233 	struct inode *inode;
234 	struct spu_context *ctx;
235 
236 	inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
237 	if (!inode)
238 		return -ENOSPC;
239 
240 	inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR);
241 	ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
242 	SPUFS_I(inode)->i_ctx = ctx;
243 	if (!ctx) {
244 		iput(inode);
245 		return -ENOSPC;
246 	}
247 
248 	ctx->flags = flags;
249 	inode->i_op = &simple_dir_inode_operations;
250 	inode->i_fop = &simple_dir_operations;
251 
252 	inode_lock(inode);
253 
254 	dget(dentry);
255 	inc_nlink(dir);
256 	inc_nlink(inode);
257 
258 	d_instantiate(dentry, inode);
259 
260 	if (flags & SPU_CREATE_NOSCHED)
261 		ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
262 					 mode, ctx);
263 	else
264 		ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
265 
266 	if (!ret && spufs_get_sb_info(dir->i_sb)->debug)
267 		ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
268 				mode, ctx);
269 
270 	if (ret)
271 		spufs_rmdir(dir, dentry);
272 
273 	inode_unlock(inode);
274 
275 	return ret;
276 }
277 
278 static int spufs_context_open(const struct path *path)
279 {
280 	int ret;
281 	struct file *filp;
282 
283 	ret = get_unused_fd_flags(0);
284 	if (ret < 0)
285 		return ret;
286 
287 	filp = dentry_open(path, O_RDONLY, current_cred());
288 	if (IS_ERR(filp)) {
289 		put_unused_fd(ret);
290 		return PTR_ERR(filp);
291 	}
292 
293 	filp->f_op = &spufs_context_fops;
294 	fd_install(ret, filp);
295 	return ret;
296 }
297 
298 static struct spu_context *
299 spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
300 						struct file *filp)
301 {
302 	struct spu_context *tmp, *neighbor, *err;
303 	int count, node;
304 	int aff_supp;
305 
306 	aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next,
307 					struct spu, cbe_list))->aff_list);
308 
309 	if (!aff_supp)
310 		return ERR_PTR(-EINVAL);
311 
312 	if (flags & SPU_CREATE_GANG)
313 		return ERR_PTR(-EINVAL);
314 
315 	if (flags & SPU_CREATE_AFFINITY_MEM &&
316 	    gang->aff_ref_ctx &&
317 	    gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM)
318 		return ERR_PTR(-EEXIST);
319 
320 	if (gang->aff_flags & AFF_MERGED)
321 		return ERR_PTR(-EBUSY);
322 
323 	neighbor = NULL;
324 	if (flags & SPU_CREATE_AFFINITY_SPU) {
325 		if (!filp || filp->f_op != &spufs_context_fops)
326 			return ERR_PTR(-EINVAL);
327 
328 		neighbor = get_spu_context(
329 				SPUFS_I(file_inode(filp))->i_ctx);
330 
331 		if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
332 		    !list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
333 		    !list_entry(neighbor->aff_list.next, struct spu_context,
334 		    aff_list)->aff_head) {
335 			err = ERR_PTR(-EEXIST);
336 			goto out_put_neighbor;
337 		}
338 
339 		if (gang != neighbor->gang) {
340 			err = ERR_PTR(-EINVAL);
341 			goto out_put_neighbor;
342 		}
343 
344 		count = 1;
345 		list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
346 			count++;
347 		if (list_empty(&neighbor->aff_list))
348 			count++;
349 
350 		for (node = 0; node < MAX_NUMNODES; node++) {
351 			if ((cbe_spu_info[node].n_spus - atomic_read(
352 				&cbe_spu_info[node].reserved_spus)) >= count)
353 				break;
354 		}
355 
356 		if (node == MAX_NUMNODES) {
357 			err = ERR_PTR(-EEXIST);
358 			goto out_put_neighbor;
359 		}
360 	}
361 
362 	return neighbor;
363 
364 out_put_neighbor:
365 	put_spu_context(neighbor);
366 	return err;
367 }
368 
369 static void
370 spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
371 					struct spu_context *neighbor)
372 {
373 	if (flags & SPU_CREATE_AFFINITY_MEM)
374 		ctx->gang->aff_ref_ctx = ctx;
375 
376 	if (flags & SPU_CREATE_AFFINITY_SPU) {
377 		if (list_empty(&neighbor->aff_list)) {
378 			list_add_tail(&neighbor->aff_list,
379 				&ctx->gang->aff_list_head);
380 			neighbor->aff_head = 1;
381 		}
382 
383 		if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head)
384 		    || list_entry(neighbor->aff_list.next, struct spu_context,
385 							aff_list)->aff_head) {
386 			list_add(&ctx->aff_list, &neighbor->aff_list);
387 		} else  {
388 			list_add_tail(&ctx->aff_list, &neighbor->aff_list);
389 			if (neighbor->aff_head) {
390 				neighbor->aff_head = 0;
391 				ctx->aff_head = 1;
392 			}
393 		}
394 
395 		if (!ctx->gang->aff_ref_ctx)
396 			ctx->gang->aff_ref_ctx = ctx;
397 	}
398 }
399 
400 static int
401 spufs_create_context(struct inode *inode, struct dentry *dentry,
402 			struct vfsmount *mnt, int flags, umode_t mode,
403 			struct file *aff_filp)
404 {
405 	int ret;
406 	int affinity;
407 	struct spu_gang *gang;
408 	struct spu_context *neighbor;
409 	struct path path = {.mnt = mnt, .dentry = dentry};
410 
411 	if ((flags & SPU_CREATE_NOSCHED) &&
412 	    !capable(CAP_SYS_NICE))
413 		return -EPERM;
414 
415 	if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
416 	    == SPU_CREATE_ISOLATE)
417 		return -EINVAL;
418 
419 	if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
420 		return -ENODEV;
421 
422 	gang = NULL;
423 	neighbor = NULL;
424 	affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
425 	if (affinity) {
426 		gang = SPUFS_I(inode)->i_gang;
427 		if (!gang)
428 			return -EINVAL;
429 		mutex_lock(&gang->aff_mutex);
430 		neighbor = spufs_assert_affinity(flags, gang, aff_filp);
431 		if (IS_ERR(neighbor)) {
432 			ret = PTR_ERR(neighbor);
433 			goto out_aff_unlock;
434 		}
435 	}
436 
437 	ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
438 	if (ret)
439 		goto out_aff_unlock;
440 
441 	if (affinity) {
442 		spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
443 								neighbor);
444 		if (neighbor)
445 			put_spu_context(neighbor);
446 	}
447 
448 	ret = spufs_context_open(&path);
449 	if (ret < 0)
450 		WARN_ON(spufs_rmdir(inode, dentry));
451 
452 out_aff_unlock:
453 	if (affinity)
454 		mutex_unlock(&gang->aff_mutex);
455 	return ret;
456 }
457 
458 static int
459 spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
460 {
461 	int ret;
462 	struct inode *inode;
463 	struct spu_gang *gang;
464 
465 	ret = -ENOSPC;
466 	inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
467 	if (!inode)
468 		goto out;
469 
470 	ret = 0;
471 	inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR);
472 	gang = alloc_spu_gang();
473 	SPUFS_I(inode)->i_ctx = NULL;
474 	SPUFS_I(inode)->i_gang = gang;
475 	if (!gang) {
476 		ret = -ENOMEM;
477 		goto out_iput;
478 	}
479 
480 	inode->i_op = &simple_dir_inode_operations;
481 	inode->i_fop = &simple_dir_operations;
482 
483 	d_instantiate(dentry, inode);
484 	inc_nlink(dir);
485 	inc_nlink(d_inode(dentry));
486 	return ret;
487 
488 out_iput:
489 	iput(inode);
490 out:
491 	return ret;
492 }
493 
494 static int spufs_gang_open(const struct path *path)
495 {
496 	int ret;
497 	struct file *filp;
498 
499 	ret = get_unused_fd_flags(0);
500 	if (ret < 0)
501 		return ret;
502 
503 	/*
504 	 * get references for dget and mntget, will be released
505 	 * in error path of *_open().
506 	 */
507 	filp = dentry_open(path, O_RDONLY, current_cred());
508 	if (IS_ERR(filp)) {
509 		put_unused_fd(ret);
510 		return PTR_ERR(filp);
511 	}
512 
513 	filp->f_op = &simple_dir_operations;
514 	fd_install(ret, filp);
515 	return ret;
516 }
517 
518 static int spufs_create_gang(struct inode *inode,
519 			struct dentry *dentry,
520 			struct vfsmount *mnt, umode_t mode)
521 {
522 	struct path path = {.mnt = mnt, .dentry = dentry};
523 	int ret;
524 
525 	ret = spufs_mkgang(inode, dentry, mode & 0777);
526 	if (!ret) {
527 		ret = spufs_gang_open(&path);
528 		if (ret < 0) {
529 			int err = simple_rmdir(inode, dentry);
530 			WARN_ON(err);
531 		}
532 	}
533 	return ret;
534 }
535 
536 
537 static struct file_system_type spufs_type;
538 
539 long spufs_create(const struct path *path, struct dentry *dentry,
540 		unsigned int flags, umode_t mode, struct file *filp)
541 {
542 	struct inode *dir = d_inode(path->dentry);
543 	int ret;
544 
545 	/* check if we are on spufs */
546 	if (path->dentry->d_sb->s_type != &spufs_type)
547 		return -EINVAL;
548 
549 	/* don't accept undefined flags */
550 	if (flags & (~SPU_CREATE_FLAG_ALL))
551 		return -EINVAL;
552 
553 	/* only threads can be underneath a gang */
554 	if (path->dentry != path->dentry->d_sb->s_root)
555 		if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang)
556 			return -EINVAL;
557 
558 	mode &= ~current_umask();
559 
560 	if (flags & SPU_CREATE_GANG)
561 		ret = spufs_create_gang(dir, dentry, path->mnt, mode);
562 	else
563 		ret = spufs_create_context(dir, dentry, path->mnt, flags, mode,
564 					    filp);
565 	if (ret >= 0)
566 		fsnotify_mkdir(dir, dentry);
567 
568 	return ret;
569 }
570 
571 /* File system initialization */
572 struct spufs_fs_context {
573 	kuid_t	uid;
574 	kgid_t	gid;
575 	umode_t	mode;
576 };
577 
578 enum {
579 	Opt_uid, Opt_gid, Opt_mode, Opt_debug,
580 };
581 
582 static const struct fs_parameter_spec spufs_fs_parameters[] = {
583 	fsparam_u32	("gid",				Opt_gid),
584 	fsparam_u32oct	("mode",			Opt_mode),
585 	fsparam_u32	("uid",				Opt_uid),
586 	fsparam_flag	("debug",			Opt_debug),
587 	{}
588 };
589 
590 static int spufs_show_options(struct seq_file *m, struct dentry *root)
591 {
592 	struct spufs_sb_info *sbi = spufs_get_sb_info(root->d_sb);
593 	struct inode *inode = root->d_inode;
594 
595 	if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
596 		seq_printf(m, ",uid=%u",
597 			   from_kuid_munged(&init_user_ns, inode->i_uid));
598 	if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID))
599 		seq_printf(m, ",gid=%u",
600 			   from_kgid_munged(&init_user_ns, inode->i_gid));
601 	if ((inode->i_mode & S_IALLUGO) != 0775)
602 		seq_printf(m, ",mode=%o", inode->i_mode);
603 	if (sbi->debug)
604 		seq_puts(m, ",debug");
605 	return 0;
606 }
607 
608 static int spufs_parse_param(struct fs_context *fc, struct fs_parameter *param)
609 {
610 	struct spufs_fs_context *ctx = fc->fs_private;
611 	struct spufs_sb_info *sbi = fc->s_fs_info;
612 	struct fs_parse_result result;
613 	kuid_t uid;
614 	kgid_t gid;
615 	int opt;
616 
617 	opt = fs_parse(fc, spufs_fs_parameters, param, &result);
618 	if (opt < 0)
619 		return opt;
620 
621 	switch (opt) {
622 	case Opt_uid:
623 		uid = make_kuid(current_user_ns(), result.uint_32);
624 		if (!uid_valid(uid))
625 			return invalf(fc, "Unknown uid");
626 		ctx->uid = uid;
627 		break;
628 	case Opt_gid:
629 		gid = make_kgid(current_user_ns(), result.uint_32);
630 		if (!gid_valid(gid))
631 			return invalf(fc, "Unknown gid");
632 		ctx->gid = gid;
633 		break;
634 	case Opt_mode:
635 		ctx->mode = result.uint_32 & S_IALLUGO;
636 		break;
637 	case Opt_debug:
638 		sbi->debug = true;
639 		break;
640 	}
641 
642 	return 0;
643 }
644 
645 static void spufs_exit_isolated_loader(void)
646 {
647 	free_pages((unsigned long) isolated_loader,
648 			get_order(isolated_loader_size));
649 }
650 
651 static void __init
652 spufs_init_isolated_loader(void)
653 {
654 	struct device_node *dn;
655 	const char *loader;
656 	int size;
657 
658 	dn = of_find_node_by_path("/spu-isolation");
659 	if (!dn)
660 		return;
661 
662 	loader = of_get_property(dn, "loader", &size);
663 	of_node_put(dn);
664 	if (!loader)
665 		return;
666 
667 	/* the loader must be align on a 16 byte boundary */
668 	isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size));
669 	if (!isolated_loader)
670 		return;
671 
672 	isolated_loader_size = size;
673 	memcpy(isolated_loader, loader, size);
674 	printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
675 }
676 
677 static int spufs_create_root(struct super_block *sb, struct fs_context *fc)
678 {
679 	struct spufs_fs_context *ctx = fc->fs_private;
680 	struct inode *inode;
681 
682 	if (!spu_management_ops)
683 		return -ENODEV;
684 
685 	inode = spufs_new_inode(sb, S_IFDIR | ctx->mode);
686 	if (!inode)
687 		return -ENOMEM;
688 
689 	inode->i_uid = ctx->uid;
690 	inode->i_gid = ctx->gid;
691 	inode->i_op = &simple_dir_inode_operations;
692 	inode->i_fop = &simple_dir_operations;
693 	SPUFS_I(inode)->i_ctx = NULL;
694 	inc_nlink(inode);
695 
696 	sb->s_root = d_make_root(inode);
697 	if (!sb->s_root)
698 		return -ENOMEM;
699 	return 0;
700 }
701 
702 static const struct super_operations spufs_ops = {
703 	.alloc_inode	= spufs_alloc_inode,
704 	.free_inode	= spufs_free_inode,
705 	.statfs		= simple_statfs,
706 	.evict_inode	= spufs_evict_inode,
707 	.show_options	= spufs_show_options,
708 };
709 
710 static int spufs_fill_super(struct super_block *sb, struct fs_context *fc)
711 {
712 	sb->s_maxbytes = MAX_LFS_FILESIZE;
713 	sb->s_blocksize = PAGE_SIZE;
714 	sb->s_blocksize_bits = PAGE_SHIFT;
715 	sb->s_magic = SPUFS_MAGIC;
716 	sb->s_op = &spufs_ops;
717 
718 	return spufs_create_root(sb, fc);
719 }
720 
721 static int spufs_get_tree(struct fs_context *fc)
722 {
723 	return get_tree_single(fc, spufs_fill_super);
724 }
725 
726 static void spufs_free_fc(struct fs_context *fc)
727 {
728 	kfree(fc->s_fs_info);
729 }
730 
731 static const struct fs_context_operations spufs_context_ops = {
732 	.free		= spufs_free_fc,
733 	.parse_param	= spufs_parse_param,
734 	.get_tree	= spufs_get_tree,
735 };
736 
737 static int spufs_init_fs_context(struct fs_context *fc)
738 {
739 	struct spufs_fs_context *ctx;
740 	struct spufs_sb_info *sbi;
741 
742 	ctx = kzalloc(sizeof(struct spufs_fs_context), GFP_KERNEL);
743 	if (!ctx)
744 		goto nomem;
745 
746 	sbi = kzalloc(sizeof(struct spufs_sb_info), GFP_KERNEL);
747 	if (!sbi)
748 		goto nomem_ctx;
749 
750 	ctx->uid = current_uid();
751 	ctx->gid = current_gid();
752 	ctx->mode = 0755;
753 
754 	fc->fs_private = ctx;
755 	fc->s_fs_info = sbi;
756 	fc->ops = &spufs_context_ops;
757 	return 0;
758 
759 nomem_ctx:
760 	kfree(ctx);
761 nomem:
762 	return -ENOMEM;
763 }
764 
765 static struct file_system_type spufs_type = {
766 	.owner = THIS_MODULE,
767 	.name = "spufs",
768 	.init_fs_context = spufs_init_fs_context,
769 	.parameters	= spufs_fs_parameters,
770 	.kill_sb = kill_litter_super,
771 };
772 MODULE_ALIAS_FS("spufs");
773 
774 static int __init spufs_init(void)
775 {
776 	int ret;
777 
778 	ret = -ENODEV;
779 	if (!spu_management_ops)
780 		goto out;
781 
782 	ret = -ENOMEM;
783 	spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
784 			sizeof(struct spufs_inode_info), 0,
785 			SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, spufs_init_once);
786 
787 	if (!spufs_inode_cache)
788 		goto out;
789 	ret = spu_sched_init();
790 	if (ret)
791 		goto out_cache;
792 	ret = register_spu_syscalls(&spufs_calls);
793 	if (ret)
794 		goto out_sched;
795 	ret = register_filesystem(&spufs_type);
796 	if (ret)
797 		goto out_syscalls;
798 
799 	spufs_init_isolated_loader();
800 
801 	return 0;
802 
803 out_syscalls:
804 	unregister_spu_syscalls(&spufs_calls);
805 out_sched:
806 	spu_sched_exit();
807 out_cache:
808 	kmem_cache_destroy(spufs_inode_cache);
809 out:
810 	return ret;
811 }
812 module_init(spufs_init);
813 
814 static void __exit spufs_exit(void)
815 {
816 	spu_sched_exit();
817 	spufs_exit_isolated_loader();
818 	unregister_spu_syscalls(&spufs_calls);
819 	unregister_filesystem(&spufs_type);
820 	kmem_cache_destroy(spufs_inode_cache);
821 }
822 module_exit(spufs_exit);
823 
824 MODULE_LICENSE("GPL");
825 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
826 
827