xref: /linux/fs/hugetlbfs/inode.c (revision 2a2c74b2efcb1a0ca3fdcb5fbb96ad8de6a29177)
1 /*
2  * hugetlbpage-backed filesystem.  Based on ramfs.
3  *
4  * Nadia Yvette Chambers, 2002
5  *
6  * Copyright (C) 2002 Linus Torvalds.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/thread_info.h>
11 #include <asm/current.h>
12 #include <linux/sched.h>		/* remove ASAP */
13 #include <linux/fs.h>
14 #include <linux/mount.h>
15 #include <linux/file.h>
16 #include <linux/kernel.h>
17 #include <linux/writeback.h>
18 #include <linux/pagemap.h>
19 #include <linux/highmem.h>
20 #include <linux/init.h>
21 #include <linux/string.h>
22 #include <linux/capability.h>
23 #include <linux/ctype.h>
24 #include <linux/backing-dev.h>
25 #include <linux/hugetlb.h>
26 #include <linux/pagevec.h>
27 #include <linux/parser.h>
28 #include <linux/mman.h>
29 #include <linux/slab.h>
30 #include <linux/dnotify.h>
31 #include <linux/statfs.h>
32 #include <linux/security.h>
33 #include <linux/magic.h>
34 #include <linux/migrate.h>
35 
36 #include <asm/uaccess.h>
37 
38 static const struct super_operations hugetlbfs_ops;
39 static const struct address_space_operations hugetlbfs_aops;
40 const struct file_operations hugetlbfs_file_operations;
41 static const struct inode_operations hugetlbfs_dir_inode_operations;
42 static const struct inode_operations hugetlbfs_inode_operations;
43 
44 struct hugetlbfs_config {
45 	kuid_t   uid;
46 	kgid_t   gid;
47 	umode_t mode;
48 	long	nr_blocks;
49 	long	nr_inodes;
50 	struct hstate *hstate;
51 };
52 
53 struct hugetlbfs_inode_info {
54 	struct shared_policy policy;
55 	struct inode vfs_inode;
56 };
57 
58 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
59 {
60 	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
61 }
62 
63 static struct backing_dev_info hugetlbfs_backing_dev_info = {
64 	.name		= "hugetlbfs",
65 	.ra_pages	= 0,	/* No readahead */
66 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
67 };
68 
69 int sysctl_hugetlb_shm_group;
70 
71 enum {
72 	Opt_size, Opt_nr_inodes,
73 	Opt_mode, Opt_uid, Opt_gid,
74 	Opt_pagesize,
75 	Opt_err,
76 };
77 
78 static const match_table_t tokens = {
79 	{Opt_size,	"size=%s"},
80 	{Opt_nr_inodes,	"nr_inodes=%s"},
81 	{Opt_mode,	"mode=%o"},
82 	{Opt_uid,	"uid=%u"},
83 	{Opt_gid,	"gid=%u"},
84 	{Opt_pagesize,	"pagesize=%s"},
85 	{Opt_err,	NULL},
86 };
87 
88 static void huge_pagevec_release(struct pagevec *pvec)
89 {
90 	int i;
91 
92 	for (i = 0; i < pagevec_count(pvec); ++i)
93 		put_page(pvec->pages[i]);
94 
95 	pagevec_reinit(pvec);
96 }
97 
98 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
99 {
100 	struct inode *inode = file_inode(file);
101 	loff_t len, vma_len;
102 	int ret;
103 	struct hstate *h = hstate_file(file);
104 
105 	/*
106 	 * vma address alignment (but not the pgoff alignment) has
107 	 * already been checked by prepare_hugepage_range.  If you add
108 	 * any error returns here, do so after setting VM_HUGETLB, so
109 	 * is_vm_hugetlb_page tests below unmap_region go the right
110 	 * way when do_mmap_pgoff unwinds (may be important on powerpc
111 	 * and ia64).
112 	 */
113 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
114 	vma->vm_ops = &hugetlb_vm_ops;
115 
116 	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
117 		return -EINVAL;
118 
119 	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
120 
121 	mutex_lock(&inode->i_mutex);
122 	file_accessed(file);
123 
124 	ret = -ENOMEM;
125 	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
126 
127 	if (hugetlb_reserve_pages(inode,
128 				vma->vm_pgoff >> huge_page_order(h),
129 				len >> huge_page_shift(h), vma,
130 				vma->vm_flags))
131 		goto out;
132 
133 	ret = 0;
134 	hugetlb_prefault_arch_hook(vma->vm_mm);
135 	if (vma->vm_flags & VM_WRITE && inode->i_size < len)
136 		inode->i_size = len;
137 out:
138 	mutex_unlock(&inode->i_mutex);
139 
140 	return ret;
141 }
142 
143 /*
144  * Called under down_write(mmap_sem).
145  */
146 
147 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
148 static unsigned long
149 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
150 		unsigned long len, unsigned long pgoff, unsigned long flags)
151 {
152 	struct mm_struct *mm = current->mm;
153 	struct vm_area_struct *vma;
154 	struct hstate *h = hstate_file(file);
155 	struct vm_unmapped_area_info info;
156 
157 	if (len & ~huge_page_mask(h))
158 		return -EINVAL;
159 	if (len > TASK_SIZE)
160 		return -ENOMEM;
161 
162 	if (flags & MAP_FIXED) {
163 		if (prepare_hugepage_range(file, addr, len))
164 			return -EINVAL;
165 		return addr;
166 	}
167 
168 	if (addr) {
169 		addr = ALIGN(addr, huge_page_size(h));
170 		vma = find_vma(mm, addr);
171 		if (TASK_SIZE - len >= addr &&
172 		    (!vma || addr + len <= vma->vm_start))
173 			return addr;
174 	}
175 
176 	info.flags = 0;
177 	info.length = len;
178 	info.low_limit = TASK_UNMAPPED_BASE;
179 	info.high_limit = TASK_SIZE;
180 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
181 	info.align_offset = 0;
182 	return vm_unmapped_area(&info);
183 }
184 #endif
185 
186 static int
187 hugetlbfs_read_actor(struct page *page, unsigned long offset,
188 			char __user *buf, unsigned long count,
189 			unsigned long size)
190 {
191 	char *kaddr;
192 	unsigned long left, copied = 0;
193 	int i, chunksize;
194 
195 	if (size > count)
196 		size = count;
197 
198 	/* Find which 4k chunk and offset with in that chunk */
199 	i = offset >> PAGE_CACHE_SHIFT;
200 	offset = offset & ~PAGE_CACHE_MASK;
201 
202 	while (size) {
203 		chunksize = PAGE_CACHE_SIZE;
204 		if (offset)
205 			chunksize -= offset;
206 		if (chunksize > size)
207 			chunksize = size;
208 		kaddr = kmap(&page[i]);
209 		left = __copy_to_user(buf, kaddr + offset, chunksize);
210 		kunmap(&page[i]);
211 		if (left) {
212 			copied += (chunksize - left);
213 			break;
214 		}
215 		offset = 0;
216 		size -= chunksize;
217 		buf += chunksize;
218 		copied += chunksize;
219 		i++;
220 	}
221 	return copied ? copied : -EFAULT;
222 }
223 
224 /*
225  * Support for read() - Find the page attached to f_mapping and copy out the
226  * data. Its *very* similar to do_generic_mapping_read(), we can't use that
227  * since it has PAGE_CACHE_SIZE assumptions.
228  */
229 static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
230 			      size_t len, loff_t *ppos)
231 {
232 	struct hstate *h = hstate_file(filp);
233 	struct address_space *mapping = filp->f_mapping;
234 	struct inode *inode = mapping->host;
235 	unsigned long index = *ppos >> huge_page_shift(h);
236 	unsigned long offset = *ppos & ~huge_page_mask(h);
237 	unsigned long end_index;
238 	loff_t isize;
239 	ssize_t retval = 0;
240 
241 	/* validate length */
242 	if (len == 0)
243 		goto out;
244 
245 	for (;;) {
246 		struct page *page;
247 		unsigned long nr, ret;
248 		int ra;
249 
250 		/* nr is the maximum number of bytes to copy from this page */
251 		nr = huge_page_size(h);
252 		isize = i_size_read(inode);
253 		if (!isize)
254 			goto out;
255 		end_index = (isize - 1) >> huge_page_shift(h);
256 		if (index >= end_index) {
257 			if (index > end_index)
258 				goto out;
259 			nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
260 			if (nr <= offset)
261 				goto out;
262 		}
263 		nr = nr - offset;
264 
265 		/* Find the page */
266 		page = find_lock_page(mapping, index);
267 		if (unlikely(page == NULL)) {
268 			/*
269 			 * We have a HOLE, zero out the user-buffer for the
270 			 * length of the hole or request.
271 			 */
272 			ret = len < nr ? len : nr;
273 			if (clear_user(buf, ret))
274 				ra = -EFAULT;
275 			else
276 				ra = 0;
277 		} else {
278 			unlock_page(page);
279 
280 			/*
281 			 * We have the page, copy it to user space buffer.
282 			 */
283 			ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
284 			ret = ra;
285 			page_cache_release(page);
286 		}
287 		if (ra < 0) {
288 			if (retval == 0)
289 				retval = ra;
290 			goto out;
291 		}
292 
293 		offset += ret;
294 		retval += ret;
295 		len -= ret;
296 		index += offset >> huge_page_shift(h);
297 		offset &= ~huge_page_mask(h);
298 
299 		/* short read or no more work */
300 		if ((ret != nr) || (len == 0))
301 			break;
302 	}
303 out:
304 	*ppos = ((loff_t)index << huge_page_shift(h)) + offset;
305 	return retval;
306 }
307 
308 static int hugetlbfs_write_begin(struct file *file,
309 			struct address_space *mapping,
310 			loff_t pos, unsigned len, unsigned flags,
311 			struct page **pagep, void **fsdata)
312 {
313 	return -EINVAL;
314 }
315 
316 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
317 			loff_t pos, unsigned len, unsigned copied,
318 			struct page *page, void *fsdata)
319 {
320 	BUG();
321 	return -EINVAL;
322 }
323 
324 static void truncate_huge_page(struct page *page)
325 {
326 	cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
327 	ClearPageUptodate(page);
328 	delete_from_page_cache(page);
329 }
330 
331 static void truncate_hugepages(struct inode *inode, loff_t lstart)
332 {
333 	struct hstate *h = hstate_inode(inode);
334 	struct address_space *mapping = &inode->i_data;
335 	const pgoff_t start = lstart >> huge_page_shift(h);
336 	struct pagevec pvec;
337 	pgoff_t next;
338 	int i, freed = 0;
339 
340 	pagevec_init(&pvec, 0);
341 	next = start;
342 	while (1) {
343 		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
344 			if (next == start)
345 				break;
346 			next = start;
347 			continue;
348 		}
349 
350 		for (i = 0; i < pagevec_count(&pvec); ++i) {
351 			struct page *page = pvec.pages[i];
352 
353 			lock_page(page);
354 			if (page->index > next)
355 				next = page->index;
356 			++next;
357 			truncate_huge_page(page);
358 			unlock_page(page);
359 			freed++;
360 		}
361 		huge_pagevec_release(&pvec);
362 	}
363 	BUG_ON(!lstart && mapping->nrpages);
364 	hugetlb_unreserve_pages(inode, start, freed);
365 }
366 
367 static void hugetlbfs_evict_inode(struct inode *inode)
368 {
369 	struct resv_map *resv_map;
370 
371 	truncate_hugepages(inode, 0);
372 	resv_map = (struct resv_map *)inode->i_mapping->private_data;
373 	/* root inode doesn't have the resv_map, so we should check it */
374 	if (resv_map)
375 		resv_map_release(&resv_map->refs);
376 	clear_inode(inode);
377 }
378 
379 static inline void
380 hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
381 {
382 	struct vm_area_struct *vma;
383 
384 	vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
385 		unsigned long v_offset;
386 
387 		/*
388 		 * Can the expression below overflow on 32-bit arches?
389 		 * No, because the interval tree returns us only those vmas
390 		 * which overlap the truncated area starting at pgoff,
391 		 * and no vma on a 32-bit arch can span beyond the 4GB.
392 		 */
393 		if (vma->vm_pgoff < pgoff)
394 			v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
395 		else
396 			v_offset = 0;
397 
398 		unmap_hugepage_range(vma, vma->vm_start + v_offset,
399 				     vma->vm_end, NULL);
400 	}
401 }
402 
403 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
404 {
405 	pgoff_t pgoff;
406 	struct address_space *mapping = inode->i_mapping;
407 	struct hstate *h = hstate_inode(inode);
408 
409 	BUG_ON(offset & ~huge_page_mask(h));
410 	pgoff = offset >> PAGE_SHIFT;
411 
412 	i_size_write(inode, offset);
413 	mutex_lock(&mapping->i_mmap_mutex);
414 	if (!RB_EMPTY_ROOT(&mapping->i_mmap))
415 		hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
416 	mutex_unlock(&mapping->i_mmap_mutex);
417 	truncate_hugepages(inode, offset);
418 	return 0;
419 }
420 
421 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
422 {
423 	struct inode *inode = dentry->d_inode;
424 	struct hstate *h = hstate_inode(inode);
425 	int error;
426 	unsigned int ia_valid = attr->ia_valid;
427 
428 	BUG_ON(!inode);
429 
430 	error = inode_change_ok(inode, attr);
431 	if (error)
432 		return error;
433 
434 	if (ia_valid & ATTR_SIZE) {
435 		error = -EINVAL;
436 		if (attr->ia_size & ~huge_page_mask(h))
437 			return -EINVAL;
438 		error = hugetlb_vmtruncate(inode, attr->ia_size);
439 		if (error)
440 			return error;
441 	}
442 
443 	setattr_copy(inode, attr);
444 	mark_inode_dirty(inode);
445 	return 0;
446 }
447 
448 static struct inode *hugetlbfs_get_root(struct super_block *sb,
449 					struct hugetlbfs_config *config)
450 {
451 	struct inode *inode;
452 
453 	inode = new_inode(sb);
454 	if (inode) {
455 		struct hugetlbfs_inode_info *info;
456 		inode->i_ino = get_next_ino();
457 		inode->i_mode = S_IFDIR | config->mode;
458 		inode->i_uid = config->uid;
459 		inode->i_gid = config->gid;
460 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
461 		info = HUGETLBFS_I(inode);
462 		mpol_shared_policy_init(&info->policy, NULL);
463 		inode->i_op = &hugetlbfs_dir_inode_operations;
464 		inode->i_fop = &simple_dir_operations;
465 		/* directory inodes start off with i_nlink == 2 (for "." entry) */
466 		inc_nlink(inode);
467 		lockdep_annotate_inode_mutex_key(inode);
468 	}
469 	return inode;
470 }
471 
472 /*
473  * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
474  * be taken from reclaim -- unlike regular filesystems. This needs an
475  * annotation because huge_pmd_share() does an allocation under
476  * i_mmap_mutex.
477  */
478 struct lock_class_key hugetlbfs_i_mmap_mutex_key;
479 
480 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
481 					struct inode *dir,
482 					umode_t mode, dev_t dev)
483 {
484 	struct inode *inode;
485 	struct resv_map *resv_map;
486 
487 	resv_map = resv_map_alloc();
488 	if (!resv_map)
489 		return NULL;
490 
491 	inode = new_inode(sb);
492 	if (inode) {
493 		struct hugetlbfs_inode_info *info;
494 		inode->i_ino = get_next_ino();
495 		inode_init_owner(inode, dir, mode);
496 		lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
497 				&hugetlbfs_i_mmap_mutex_key);
498 		inode->i_mapping->a_ops = &hugetlbfs_aops;
499 		inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
500 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
501 		inode->i_mapping->private_data = resv_map;
502 		info = HUGETLBFS_I(inode);
503 		/*
504 		 * The policy is initialized here even if we are creating a
505 		 * private inode because initialization simply creates an
506 		 * an empty rb tree and calls spin_lock_init(), later when we
507 		 * call mpol_free_shared_policy() it will just return because
508 		 * the rb tree will still be empty.
509 		 */
510 		mpol_shared_policy_init(&info->policy, NULL);
511 		switch (mode & S_IFMT) {
512 		default:
513 			init_special_inode(inode, mode, dev);
514 			break;
515 		case S_IFREG:
516 			inode->i_op = &hugetlbfs_inode_operations;
517 			inode->i_fop = &hugetlbfs_file_operations;
518 			break;
519 		case S_IFDIR:
520 			inode->i_op = &hugetlbfs_dir_inode_operations;
521 			inode->i_fop = &simple_dir_operations;
522 
523 			/* directory inodes start off with i_nlink == 2 (for "." entry) */
524 			inc_nlink(inode);
525 			break;
526 		case S_IFLNK:
527 			inode->i_op = &page_symlink_inode_operations;
528 			break;
529 		}
530 		lockdep_annotate_inode_mutex_key(inode);
531 	} else
532 		kref_put(&resv_map->refs, resv_map_release);
533 
534 	return inode;
535 }
536 
537 /*
538  * File creation. Allocate an inode, and we're done..
539  */
540 static int hugetlbfs_mknod(struct inode *dir,
541 			struct dentry *dentry, umode_t mode, dev_t dev)
542 {
543 	struct inode *inode;
544 	int error = -ENOSPC;
545 
546 	inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
547 	if (inode) {
548 		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
549 		d_instantiate(dentry, inode);
550 		dget(dentry);	/* Extra count - pin the dentry in core */
551 		error = 0;
552 	}
553 	return error;
554 }
555 
556 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
557 {
558 	int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
559 	if (!retval)
560 		inc_nlink(dir);
561 	return retval;
562 }
563 
564 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
565 {
566 	return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
567 }
568 
569 static int hugetlbfs_symlink(struct inode *dir,
570 			struct dentry *dentry, const char *symname)
571 {
572 	struct inode *inode;
573 	int error = -ENOSPC;
574 
575 	inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
576 	if (inode) {
577 		int l = strlen(symname)+1;
578 		error = page_symlink(inode, symname, l);
579 		if (!error) {
580 			d_instantiate(dentry, inode);
581 			dget(dentry);
582 		} else
583 			iput(inode);
584 	}
585 	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
586 
587 	return error;
588 }
589 
590 /*
591  * mark the head page dirty
592  */
593 static int hugetlbfs_set_page_dirty(struct page *page)
594 {
595 	struct page *head = compound_head(page);
596 
597 	SetPageDirty(head);
598 	return 0;
599 }
600 
601 static int hugetlbfs_migrate_page(struct address_space *mapping,
602 				struct page *newpage, struct page *page,
603 				enum migrate_mode mode)
604 {
605 	int rc;
606 
607 	rc = migrate_huge_page_move_mapping(mapping, newpage, page);
608 	if (rc != MIGRATEPAGE_SUCCESS)
609 		return rc;
610 	migrate_page_copy(newpage, page);
611 
612 	return MIGRATEPAGE_SUCCESS;
613 }
614 
615 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
616 {
617 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
618 	struct hstate *h = hstate_inode(dentry->d_inode);
619 
620 	buf->f_type = HUGETLBFS_MAGIC;
621 	buf->f_bsize = huge_page_size(h);
622 	if (sbinfo) {
623 		spin_lock(&sbinfo->stat_lock);
624 		/* If no limits set, just report 0 for max/free/used
625 		 * blocks, like simple_statfs() */
626 		if (sbinfo->spool) {
627 			long free_pages;
628 
629 			spin_lock(&sbinfo->spool->lock);
630 			buf->f_blocks = sbinfo->spool->max_hpages;
631 			free_pages = sbinfo->spool->max_hpages
632 				- sbinfo->spool->used_hpages;
633 			buf->f_bavail = buf->f_bfree = free_pages;
634 			spin_unlock(&sbinfo->spool->lock);
635 			buf->f_files = sbinfo->max_inodes;
636 			buf->f_ffree = sbinfo->free_inodes;
637 		}
638 		spin_unlock(&sbinfo->stat_lock);
639 	}
640 	buf->f_namelen = NAME_MAX;
641 	return 0;
642 }
643 
644 static void hugetlbfs_put_super(struct super_block *sb)
645 {
646 	struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
647 
648 	if (sbi) {
649 		sb->s_fs_info = NULL;
650 
651 		if (sbi->spool)
652 			hugepage_put_subpool(sbi->spool);
653 
654 		kfree(sbi);
655 	}
656 }
657 
658 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
659 {
660 	if (sbinfo->free_inodes >= 0) {
661 		spin_lock(&sbinfo->stat_lock);
662 		if (unlikely(!sbinfo->free_inodes)) {
663 			spin_unlock(&sbinfo->stat_lock);
664 			return 0;
665 		}
666 		sbinfo->free_inodes--;
667 		spin_unlock(&sbinfo->stat_lock);
668 	}
669 
670 	return 1;
671 }
672 
673 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
674 {
675 	if (sbinfo->free_inodes >= 0) {
676 		spin_lock(&sbinfo->stat_lock);
677 		sbinfo->free_inodes++;
678 		spin_unlock(&sbinfo->stat_lock);
679 	}
680 }
681 
682 
683 static struct kmem_cache *hugetlbfs_inode_cachep;
684 
685 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
686 {
687 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
688 	struct hugetlbfs_inode_info *p;
689 
690 	if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
691 		return NULL;
692 	p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
693 	if (unlikely(!p)) {
694 		hugetlbfs_inc_free_inodes(sbinfo);
695 		return NULL;
696 	}
697 	return &p->vfs_inode;
698 }
699 
700 static void hugetlbfs_i_callback(struct rcu_head *head)
701 {
702 	struct inode *inode = container_of(head, struct inode, i_rcu);
703 	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
704 }
705 
706 static void hugetlbfs_destroy_inode(struct inode *inode)
707 {
708 	hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
709 	mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
710 	call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
711 }
712 
713 static const struct address_space_operations hugetlbfs_aops = {
714 	.write_begin	= hugetlbfs_write_begin,
715 	.write_end	= hugetlbfs_write_end,
716 	.set_page_dirty	= hugetlbfs_set_page_dirty,
717 	.migratepage    = hugetlbfs_migrate_page,
718 };
719 
720 
721 static void init_once(void *foo)
722 {
723 	struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
724 
725 	inode_init_once(&ei->vfs_inode);
726 }
727 
728 const struct file_operations hugetlbfs_file_operations = {
729 	.read			= hugetlbfs_read,
730 	.mmap			= hugetlbfs_file_mmap,
731 	.fsync			= noop_fsync,
732 	.get_unmapped_area	= hugetlb_get_unmapped_area,
733 	.llseek		= default_llseek,
734 };
735 
736 static const struct inode_operations hugetlbfs_dir_inode_operations = {
737 	.create		= hugetlbfs_create,
738 	.lookup		= simple_lookup,
739 	.link		= simple_link,
740 	.unlink		= simple_unlink,
741 	.symlink	= hugetlbfs_symlink,
742 	.mkdir		= hugetlbfs_mkdir,
743 	.rmdir		= simple_rmdir,
744 	.mknod		= hugetlbfs_mknod,
745 	.rename		= simple_rename,
746 	.setattr	= hugetlbfs_setattr,
747 };
748 
749 static const struct inode_operations hugetlbfs_inode_operations = {
750 	.setattr	= hugetlbfs_setattr,
751 };
752 
753 static const struct super_operations hugetlbfs_ops = {
754 	.alloc_inode    = hugetlbfs_alloc_inode,
755 	.destroy_inode  = hugetlbfs_destroy_inode,
756 	.evict_inode	= hugetlbfs_evict_inode,
757 	.statfs		= hugetlbfs_statfs,
758 	.put_super	= hugetlbfs_put_super,
759 	.show_options	= generic_show_options,
760 };
761 
762 static int
763 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
764 {
765 	char *p, *rest;
766 	substring_t args[MAX_OPT_ARGS];
767 	int option;
768 	unsigned long long size = 0;
769 	enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
770 
771 	if (!options)
772 		return 0;
773 
774 	while ((p = strsep(&options, ",")) != NULL) {
775 		int token;
776 		if (!*p)
777 			continue;
778 
779 		token = match_token(p, tokens, args);
780 		switch (token) {
781 		case Opt_uid:
782 			if (match_int(&args[0], &option))
783  				goto bad_val;
784 			pconfig->uid = make_kuid(current_user_ns(), option);
785 			if (!uid_valid(pconfig->uid))
786 				goto bad_val;
787 			break;
788 
789 		case Opt_gid:
790 			if (match_int(&args[0], &option))
791  				goto bad_val;
792 			pconfig->gid = make_kgid(current_user_ns(), option);
793 			if (!gid_valid(pconfig->gid))
794 				goto bad_val;
795 			break;
796 
797 		case Opt_mode:
798 			if (match_octal(&args[0], &option))
799  				goto bad_val;
800 			pconfig->mode = option & 01777U;
801 			break;
802 
803 		case Opt_size: {
804 			/* memparse() will accept a K/M/G without a digit */
805 			if (!isdigit(*args[0].from))
806 				goto bad_val;
807 			size = memparse(args[0].from, &rest);
808 			setsize = SIZE_STD;
809 			if (*rest == '%')
810 				setsize = SIZE_PERCENT;
811 			break;
812 		}
813 
814 		case Opt_nr_inodes:
815 			/* memparse() will accept a K/M/G without a digit */
816 			if (!isdigit(*args[0].from))
817 				goto bad_val;
818 			pconfig->nr_inodes = memparse(args[0].from, &rest);
819 			break;
820 
821 		case Opt_pagesize: {
822 			unsigned long ps;
823 			ps = memparse(args[0].from, &rest);
824 			pconfig->hstate = size_to_hstate(ps);
825 			if (!pconfig->hstate) {
826 				printk(KERN_ERR
827 				"hugetlbfs: Unsupported page size %lu MB\n",
828 					ps >> 20);
829 				return -EINVAL;
830 			}
831 			break;
832 		}
833 
834 		default:
835 			printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n",
836 				 p);
837 			return -EINVAL;
838 			break;
839 		}
840 	}
841 
842 	/* Do size after hstate is set up */
843 	if (setsize > NO_SIZE) {
844 		struct hstate *h = pconfig->hstate;
845 		if (setsize == SIZE_PERCENT) {
846 			size <<= huge_page_shift(h);
847 			size *= h->max_huge_pages;
848 			do_div(size, 100);
849 		}
850 		pconfig->nr_blocks = (size >> huge_page_shift(h));
851 	}
852 
853 	return 0;
854 
855 bad_val:
856  	printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
857 	       args[0].from, p);
858  	return -EINVAL;
859 }
860 
861 static int
862 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
863 {
864 	int ret;
865 	struct hugetlbfs_config config;
866 	struct hugetlbfs_sb_info *sbinfo;
867 
868 	save_mount_options(sb, data);
869 
870 	config.nr_blocks = -1; /* No limit on size by default */
871 	config.nr_inodes = -1; /* No limit on number of inodes by default */
872 	config.uid = current_fsuid();
873 	config.gid = current_fsgid();
874 	config.mode = 0755;
875 	config.hstate = &default_hstate;
876 	ret = hugetlbfs_parse_options(data, &config);
877 	if (ret)
878 		return ret;
879 
880 	sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
881 	if (!sbinfo)
882 		return -ENOMEM;
883 	sb->s_fs_info = sbinfo;
884 	sbinfo->hstate = config.hstate;
885 	spin_lock_init(&sbinfo->stat_lock);
886 	sbinfo->max_inodes = config.nr_inodes;
887 	sbinfo->free_inodes = config.nr_inodes;
888 	sbinfo->spool = NULL;
889 	if (config.nr_blocks != -1) {
890 		sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
891 		if (!sbinfo->spool)
892 			goto out_free;
893 	}
894 	sb->s_maxbytes = MAX_LFS_FILESIZE;
895 	sb->s_blocksize = huge_page_size(config.hstate);
896 	sb->s_blocksize_bits = huge_page_shift(config.hstate);
897 	sb->s_magic = HUGETLBFS_MAGIC;
898 	sb->s_op = &hugetlbfs_ops;
899 	sb->s_time_gran = 1;
900 	sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
901 	if (!sb->s_root)
902 		goto out_free;
903 	return 0;
904 out_free:
905 	if (sbinfo->spool)
906 		kfree(sbinfo->spool);
907 	kfree(sbinfo);
908 	return -ENOMEM;
909 }
910 
911 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
912 	int flags, const char *dev_name, void *data)
913 {
914 	return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
915 }
916 
917 static struct file_system_type hugetlbfs_fs_type = {
918 	.name		= "hugetlbfs",
919 	.mount		= hugetlbfs_mount,
920 	.kill_sb	= kill_litter_super,
921 };
922 MODULE_ALIAS_FS("hugetlbfs");
923 
924 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
925 
926 static int can_do_hugetlb_shm(void)
927 {
928 	kgid_t shm_group;
929 	shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
930 	return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
931 }
932 
933 static int get_hstate_idx(int page_size_log)
934 {
935 	struct hstate *h = hstate_sizelog(page_size_log);
936 
937 	if (!h)
938 		return -1;
939 	return h - hstates;
940 }
941 
942 static struct dentry_operations anon_ops = {
943 	.d_dname = simple_dname
944 };
945 
946 /*
947  * Note that size should be aligned to proper hugepage size in caller side,
948  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
949  */
950 struct file *hugetlb_file_setup(const char *name, size_t size,
951 				vm_flags_t acctflag, struct user_struct **user,
952 				int creat_flags, int page_size_log)
953 {
954 	struct file *file = ERR_PTR(-ENOMEM);
955 	struct inode *inode;
956 	struct path path;
957 	struct super_block *sb;
958 	struct qstr quick_string;
959 	int hstate_idx;
960 
961 	hstate_idx = get_hstate_idx(page_size_log);
962 	if (hstate_idx < 0)
963 		return ERR_PTR(-ENODEV);
964 
965 	*user = NULL;
966 	if (!hugetlbfs_vfsmount[hstate_idx])
967 		return ERR_PTR(-ENOENT);
968 
969 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
970 		*user = current_user();
971 		if (user_shm_lock(size, *user)) {
972 			task_lock(current);
973 			printk_once(KERN_WARNING
974 				"%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
975 				current->comm, current->pid);
976 			task_unlock(current);
977 		} else {
978 			*user = NULL;
979 			return ERR_PTR(-EPERM);
980 		}
981 	}
982 
983 	sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
984 	quick_string.name = name;
985 	quick_string.len = strlen(quick_string.name);
986 	quick_string.hash = 0;
987 	path.dentry = d_alloc_pseudo(sb, &quick_string);
988 	if (!path.dentry)
989 		goto out_shm_unlock;
990 
991 	d_set_d_op(path.dentry, &anon_ops);
992 	path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
993 	file = ERR_PTR(-ENOSPC);
994 	inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
995 	if (!inode)
996 		goto out_dentry;
997 
998 	file = ERR_PTR(-ENOMEM);
999 	if (hugetlb_reserve_pages(inode, 0,
1000 			size >> huge_page_shift(hstate_inode(inode)), NULL,
1001 			acctflag))
1002 		goto out_inode;
1003 
1004 	d_instantiate(path.dentry, inode);
1005 	inode->i_size = size;
1006 	clear_nlink(inode);
1007 
1008 	file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
1009 			&hugetlbfs_file_operations);
1010 	if (IS_ERR(file))
1011 		goto out_dentry; /* inode is already attached */
1012 
1013 	return file;
1014 
1015 out_inode:
1016 	iput(inode);
1017 out_dentry:
1018 	path_put(&path);
1019 out_shm_unlock:
1020 	if (*user) {
1021 		user_shm_unlock(size, *user);
1022 		*user = NULL;
1023 	}
1024 	return file;
1025 }
1026 
1027 static int __init init_hugetlbfs_fs(void)
1028 {
1029 	struct hstate *h;
1030 	int error;
1031 	int i;
1032 
1033 	error = bdi_init(&hugetlbfs_backing_dev_info);
1034 	if (error)
1035 		return error;
1036 
1037 	error = -ENOMEM;
1038 	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1039 					sizeof(struct hugetlbfs_inode_info),
1040 					0, 0, init_once);
1041 	if (hugetlbfs_inode_cachep == NULL)
1042 		goto out2;
1043 
1044 	error = register_filesystem(&hugetlbfs_fs_type);
1045 	if (error)
1046 		goto out;
1047 
1048 	i = 0;
1049 	for_each_hstate(h) {
1050 		char buf[50];
1051 		unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1052 
1053 		snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1054 		hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1055 							buf);
1056 
1057 		if (IS_ERR(hugetlbfs_vfsmount[i])) {
1058 			pr_err("hugetlb: Cannot mount internal hugetlbfs for "
1059 				"page size %uK", ps_kb);
1060 			error = PTR_ERR(hugetlbfs_vfsmount[i]);
1061 			hugetlbfs_vfsmount[i] = NULL;
1062 		}
1063 		i++;
1064 	}
1065 	/* Non default hstates are optional */
1066 	if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1067 		return 0;
1068 
1069  out:
1070 	kmem_cache_destroy(hugetlbfs_inode_cachep);
1071  out2:
1072 	bdi_destroy(&hugetlbfs_backing_dev_info);
1073 	return error;
1074 }
1075 
1076 static void __exit exit_hugetlbfs_fs(void)
1077 {
1078 	struct hstate *h;
1079 	int i;
1080 
1081 
1082 	/*
1083 	 * Make sure all delayed rcu free inodes are flushed before we
1084 	 * destroy cache.
1085 	 */
1086 	rcu_barrier();
1087 	kmem_cache_destroy(hugetlbfs_inode_cachep);
1088 	i = 0;
1089 	for_each_hstate(h)
1090 		kern_unmount(hugetlbfs_vfsmount[i++]);
1091 	unregister_filesystem(&hugetlbfs_fs_type);
1092 	bdi_destroy(&hugetlbfs_backing_dev_info);
1093 }
1094 
1095 module_init(init_hugetlbfs_fs)
1096 module_exit(exit_hugetlbfs_fs)
1097 
1098 MODULE_LICENSE("GPL");
1099