xref: /linux/fs/hugetlbfs/inode.c (revision da939ef4c494246bc2102ecb628bbcc71d650410)
1 /*
2  * hugetlbpage-backed filesystem.  Based on ramfs.
3  *
4  * Nadia Yvette Chambers, 2002
5  *
6  * Copyright (C) 2002 Linus Torvalds.
7  * License: GPL
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/falloc.h>
15 #include <linux/fs.h>
16 #include <linux/mount.h>
17 #include <linux/file.h>
18 #include <linux/kernel.h>
19 #include <linux/writeback.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/init.h>
23 #include <linux/string.h>
24 #include <linux/capability.h>
25 #include <linux/ctype.h>
26 #include <linux/backing-dev.h>
27 #include <linux/hugetlb.h>
28 #include <linux/pagevec.h>
29 #include <linux/fs_parser.h>
30 #include <linux/mman.h>
31 #include <linux/slab.h>
32 #include <linux/dnotify.h>
33 #include <linux/statfs.h>
34 #include <linux/security.h>
35 #include <linux/magic.h>
36 #include <linux/migrate.h>
37 #include <linux/uio.h>
38 
39 #include <linux/uaccess.h>
40 #include <linux/sched/mm.h>
41 
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/hugetlbfs.h>
44 
45 static const struct address_space_operations hugetlbfs_aops;
46 static const struct file_operations hugetlbfs_file_operations;
47 static const struct inode_operations hugetlbfs_dir_inode_operations;
48 static const struct inode_operations hugetlbfs_inode_operations;
49 
50 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
51 
52 struct hugetlbfs_fs_context {
53 	struct hstate		*hstate;
54 	unsigned long long	max_size_opt;
55 	unsigned long long	min_size_opt;
56 	long			max_hpages;
57 	long			nr_inodes;
58 	long			min_hpages;
59 	enum hugetlbfs_size_type max_val_type;
60 	enum hugetlbfs_size_type min_val_type;
61 	kuid_t			uid;
62 	kgid_t			gid;
63 	umode_t			mode;
64 };
65 
66 int sysctl_hugetlb_shm_group;
67 
68 enum hugetlb_param {
69 	Opt_gid,
70 	Opt_min_size,
71 	Opt_mode,
72 	Opt_nr_inodes,
73 	Opt_pagesize,
74 	Opt_size,
75 	Opt_uid,
76 };
77 
78 static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
79 	fsparam_gid   ("gid",		Opt_gid),
80 	fsparam_string("min_size",	Opt_min_size),
81 	fsparam_u32oct("mode",		Opt_mode),
82 	fsparam_string("nr_inodes",	Opt_nr_inodes),
83 	fsparam_string("pagesize",	Opt_pagesize),
84 	fsparam_string("size",		Opt_size),
85 	fsparam_uid   ("uid",		Opt_uid),
86 	{}
87 };
88 
89 /*
90  * Mask used when checking the page offset value passed in via system
91  * calls.  This value will be converted to a loff_t which is signed.
92  * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
93  * value.  The extra bit (- 1 in the shift value) is to take the sign
94  * bit into account.
95  */
96 #define PGOFF_LOFFT_MAX \
97 	(((1UL << (PAGE_SHIFT + 1)) - 1) <<  (BITS_PER_LONG - (PAGE_SHIFT + 1)))
98 
99 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
100 {
101 	struct inode *inode = file_inode(file);
102 	loff_t len, vma_len;
103 	int ret;
104 	struct hstate *h = hstate_file(file);
105 	vm_flags_t vm_flags;
106 
107 	/*
108 	 * vma address alignment (but not the pgoff alignment) has
109 	 * already been checked by prepare_hugepage_range.  If you add
110 	 * any error returns here, do so after setting VM_HUGETLB, so
111 	 * is_vm_hugetlb_page tests below unmap_region go the right
112 	 * way when do_mmap unwinds (may be important on powerpc
113 	 * and ia64).
114 	 */
115 	vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
116 	vma->vm_ops = &hugetlb_vm_ops;
117 
118 	/*
119 	 * page based offset in vm_pgoff could be sufficiently large to
120 	 * overflow a loff_t when converted to byte offset.  This can
121 	 * only happen on architectures where sizeof(loff_t) ==
122 	 * sizeof(unsigned long).  So, only check in those instances.
123 	 */
124 	if (sizeof(unsigned long) == sizeof(loff_t)) {
125 		if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
126 			return -EINVAL;
127 	}
128 
129 	/* must be huge page aligned */
130 	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
131 		return -EINVAL;
132 
133 	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
134 	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
135 	/* check for overflow */
136 	if (len < vma_len)
137 		return -EINVAL;
138 
139 	inode_lock(inode);
140 	file_accessed(file);
141 
142 	ret = -ENOMEM;
143 
144 	vm_flags = vma->vm_flags;
145 	/*
146 	 * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
147 	 * reserving here. Note: only for SHM hugetlbfs file, the inode
148 	 * flag S_PRIVATE is set.
149 	 */
150 	if (inode->i_flags & S_PRIVATE)
151 		vm_flags |= VM_NORESERVE;
152 
153 	if (hugetlb_reserve_pages(inode,
154 				vma->vm_pgoff >> huge_page_order(h),
155 				len >> huge_page_shift(h), vma,
156 				vm_flags) < 0)
157 		goto out;
158 
159 	ret = 0;
160 	if (vma->vm_flags & VM_WRITE && inode->i_size < len)
161 		i_size_write(inode, len);
162 out:
163 	inode_unlock(inode);
164 
165 	return ret;
166 }
167 
168 /*
169  * Called under mmap_write_lock(mm).
170  */
171 
172 unsigned long
173 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
174 			    unsigned long len, unsigned long pgoff,
175 			    unsigned long flags)
176 {
177 	unsigned long addr0 = 0;
178 	struct hstate *h = hstate_file(file);
179 
180 	if (len & ~huge_page_mask(h))
181 		return -EINVAL;
182 	if ((flags & MAP_FIXED) && (addr & ~huge_page_mask(h)))
183 		return -EINVAL;
184 	if (addr)
185 		addr0 = ALIGN(addr, huge_page_size(h));
186 
187 	return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff,
188 					    flags, 0);
189 }
190 
191 /*
192  * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset.
193  * Returns the maximum number of bytes one can read without touching the 1st raw
194  * HWPOISON page.
195  */
196 static size_t adjust_range_hwpoison(struct folio *folio, size_t offset,
197 		size_t bytes)
198 {
199 	struct page *page = folio_page(folio, offset / PAGE_SIZE);
200 	size_t safe_bytes;
201 
202 	if (is_raw_hwpoison_page_in_hugepage(page))
203 		return 0;
204 	/* Safe to read the remaining bytes in this page. */
205 	safe_bytes = PAGE_SIZE - (offset % PAGE_SIZE);
206 	page++;
207 
208 	/* Check each remaining page as long as we are not done yet. */
209 	for (; safe_bytes < bytes; safe_bytes += PAGE_SIZE, page++)
210 		if (is_raw_hwpoison_page_in_hugepage(page))
211 			break;
212 
213 	return min(safe_bytes, bytes);
214 }
215 
216 /*
217  * Support for read() - Find the page attached to f_mapping and copy out the
218  * data. This provides functionality similar to filemap_read().
219  */
220 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
221 {
222 	struct file *file = iocb->ki_filp;
223 	struct hstate *h = hstate_file(file);
224 	struct address_space *mapping = file->f_mapping;
225 	struct inode *inode = mapping->host;
226 	unsigned long index = iocb->ki_pos >> huge_page_shift(h);
227 	unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
228 	unsigned long end_index;
229 	loff_t isize;
230 	ssize_t retval = 0;
231 
232 	while (iov_iter_count(to)) {
233 		struct folio *folio;
234 		size_t nr, copied, want;
235 
236 		/* nr is the maximum number of bytes to copy from this page */
237 		nr = huge_page_size(h);
238 		isize = i_size_read(inode);
239 		if (!isize)
240 			break;
241 		end_index = (isize - 1) >> huge_page_shift(h);
242 		if (index > end_index)
243 			break;
244 		if (index == end_index) {
245 			nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
246 			if (nr <= offset)
247 				break;
248 		}
249 		nr = nr - offset;
250 
251 		/* Find the folio */
252 		folio = filemap_lock_hugetlb_folio(h, mapping, index);
253 		if (IS_ERR(folio)) {
254 			/*
255 			 * We have a HOLE, zero out the user-buffer for the
256 			 * length of the hole or request.
257 			 */
258 			copied = iov_iter_zero(nr, to);
259 		} else {
260 			folio_unlock(folio);
261 
262 			if (!folio_test_hwpoison(folio))
263 				want = nr;
264 			else {
265 				/*
266 				 * Adjust how many bytes safe to read without
267 				 * touching the 1st raw HWPOISON page after
268 				 * offset.
269 				 */
270 				want = adjust_range_hwpoison(folio, offset, nr);
271 				if (want == 0) {
272 					folio_put(folio);
273 					retval = -EIO;
274 					break;
275 				}
276 			}
277 
278 			/*
279 			 * We have the folio, copy it to user space buffer.
280 			 */
281 			copied = copy_folio_to_iter(folio, offset, want, to);
282 			folio_put(folio);
283 		}
284 		offset += copied;
285 		retval += copied;
286 		if (copied != nr && iov_iter_count(to)) {
287 			if (!retval)
288 				retval = -EFAULT;
289 			break;
290 		}
291 		index += offset >> huge_page_shift(h);
292 		offset &= ~huge_page_mask(h);
293 	}
294 	iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
295 	return retval;
296 }
297 
298 static int hugetlbfs_write_begin(const struct kiocb *iocb,
299 			struct address_space *mapping,
300 			loff_t pos, unsigned len,
301 			struct folio **foliop, void **fsdata)
302 {
303 	return -EINVAL;
304 }
305 
306 static int hugetlbfs_write_end(const struct kiocb *iocb,
307 			       struct address_space *mapping,
308 			       loff_t pos, unsigned len, unsigned copied,
309 			       struct folio *folio, void *fsdata)
310 {
311 	BUG();
312 	return -EINVAL;
313 }
314 
315 static void hugetlb_delete_from_page_cache(struct folio *folio)
316 {
317 	folio_clear_dirty(folio);
318 	folio_clear_uptodate(folio);
319 	filemap_remove_folio(folio);
320 }
321 
322 /*
323  * Called with i_mmap_rwsem held for inode based vma maps.  This makes
324  * sure vma (and vm_mm) will not go away.  We also hold the hugetlb fault
325  * mutex for the page in the mapping.  So, we can not race with page being
326  * faulted into the vma.
327  */
328 static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma,
329 				unsigned long addr, unsigned long pfn)
330 {
331 	pte_t *ptep, pte;
332 
333 	ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma)));
334 	if (!ptep)
335 		return false;
336 
337 	pte = huge_ptep_get(vma->vm_mm, addr, ptep);
338 	if (huge_pte_none(pte) || !pte_present(pte))
339 		return false;
340 
341 	if (pte_pfn(pte) == pfn)
342 		return true;
343 
344 	return false;
345 }
346 
347 /*
348  * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
349  * No, because the interval tree returns us only those vmas
350  * which overlap the truncated area starting at pgoff,
351  * and no vma on a 32-bit arch can span beyond the 4GB.
352  */
353 static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
354 {
355 	unsigned long offset = 0;
356 
357 	if (vma->vm_pgoff < start)
358 		offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
359 
360 	return vma->vm_start + offset;
361 }
362 
363 static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
364 {
365 	unsigned long t_end;
366 
367 	if (!end)
368 		return vma->vm_end;
369 
370 	t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
371 	if (t_end > vma->vm_end)
372 		t_end = vma->vm_end;
373 	return t_end;
374 }
375 
376 /*
377  * Called with hugetlb fault mutex held.  Therefore, no more mappings to
378  * this folio can be created while executing the routine.
379  */
380 static void hugetlb_unmap_file_folio(struct hstate *h,
381 					struct address_space *mapping,
382 					struct folio *folio, pgoff_t index)
383 {
384 	struct rb_root_cached *root = &mapping->i_mmap;
385 	struct hugetlb_vma_lock *vma_lock;
386 	unsigned long pfn = folio_pfn(folio);
387 	struct vm_area_struct *vma;
388 	unsigned long v_start;
389 	unsigned long v_end;
390 	pgoff_t start, end;
391 
392 	start = index * pages_per_huge_page(h);
393 	end = (index + 1) * pages_per_huge_page(h);
394 
395 	i_mmap_lock_write(mapping);
396 retry:
397 	vma_lock = NULL;
398 	vma_interval_tree_foreach(vma, root, start, end - 1) {
399 		v_start = vma_offset_start(vma, start);
400 		v_end = vma_offset_end(vma, end);
401 
402 		if (!hugetlb_vma_maps_pfn(vma, v_start, pfn))
403 			continue;
404 
405 		if (!hugetlb_vma_trylock_write(vma)) {
406 			vma_lock = vma->vm_private_data;
407 			/*
408 			 * If we can not get vma lock, we need to drop
409 			 * immap_sema and take locks in order.  First,
410 			 * take a ref on the vma_lock structure so that
411 			 * we can be guaranteed it will not go away when
412 			 * dropping immap_sema.
413 			 */
414 			kref_get(&vma_lock->refs);
415 			break;
416 		}
417 
418 		unmap_hugepage_range(vma, v_start, v_end, NULL,
419 				     ZAP_FLAG_DROP_MARKER);
420 		hugetlb_vma_unlock_write(vma);
421 	}
422 
423 	i_mmap_unlock_write(mapping);
424 
425 	if (vma_lock) {
426 		/*
427 		 * Wait on vma_lock.  We know it is still valid as we have
428 		 * a reference.  We must 'open code' vma locking as we do
429 		 * not know if vma_lock is still attached to vma.
430 		 */
431 		down_write(&vma_lock->rw_sema);
432 		i_mmap_lock_write(mapping);
433 
434 		vma = vma_lock->vma;
435 		if (!vma) {
436 			/*
437 			 * If lock is no longer attached to vma, then just
438 			 * unlock, drop our reference and retry looking for
439 			 * other vmas.
440 			 */
441 			up_write(&vma_lock->rw_sema);
442 			kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
443 			goto retry;
444 		}
445 
446 		/*
447 		 * vma_lock is still attached to vma.  Check to see if vma
448 		 * still maps page and if so, unmap.
449 		 */
450 		v_start = vma_offset_start(vma, start);
451 		v_end = vma_offset_end(vma, end);
452 		if (hugetlb_vma_maps_pfn(vma, v_start, pfn))
453 			unmap_hugepage_range(vma, v_start, v_end, NULL,
454 					     ZAP_FLAG_DROP_MARKER);
455 
456 		kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
457 		hugetlb_vma_unlock_write(vma);
458 
459 		goto retry;
460 	}
461 }
462 
463 static void
464 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
465 		      zap_flags_t zap_flags)
466 {
467 	struct vm_area_struct *vma;
468 
469 	/*
470 	 * end == 0 indicates that the entire range after start should be
471 	 * unmapped.  Note, end is exclusive, whereas the interval tree takes
472 	 * an inclusive "last".
473 	 */
474 	vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
475 		unsigned long v_start;
476 		unsigned long v_end;
477 
478 		if (!hugetlb_vma_trylock_write(vma))
479 			continue;
480 
481 		v_start = vma_offset_start(vma, start);
482 		v_end = vma_offset_end(vma, end);
483 
484 		unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags);
485 
486 		/*
487 		 * Note that vma lock only exists for shared/non-private
488 		 * vmas.  Therefore, lock is not held when calling
489 		 * unmap_hugepage_range for private vmas.
490 		 */
491 		hugetlb_vma_unlock_write(vma);
492 	}
493 }
494 
495 /*
496  * Called with hugetlb fault mutex held.
497  * Returns true if page was actually removed, false otherwise.
498  */
499 static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
500 					struct address_space *mapping,
501 					struct folio *folio, pgoff_t index,
502 					bool truncate_op)
503 {
504 	bool ret = false;
505 
506 	/*
507 	 * If folio is mapped, it was faulted in after being
508 	 * unmapped in caller.  Unmap (again) while holding
509 	 * the fault mutex.  The mutex will prevent faults
510 	 * until we finish removing the folio.
511 	 */
512 	if (unlikely(folio_mapped(folio)))
513 		hugetlb_unmap_file_folio(h, mapping, folio, index);
514 
515 	folio_lock(folio);
516 	/*
517 	 * We must remove the folio from page cache before removing
518 	 * the region/ reserve map (hugetlb_unreserve_pages).  In
519 	 * rare out of memory conditions, removal of the region/reserve
520 	 * map could fail.  Correspondingly, the subpool and global
521 	 * reserve usage count can need to be adjusted.
522 	 */
523 	VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio);
524 	hugetlb_delete_from_page_cache(folio);
525 	ret = true;
526 	if (!truncate_op) {
527 		if (unlikely(hugetlb_unreserve_pages(inode, index,
528 							index + 1, 1)))
529 			hugetlb_fix_reserve_counts(inode);
530 	}
531 
532 	folio_unlock(folio);
533 	return ret;
534 }
535 
536 /*
537  * remove_inode_hugepages handles two distinct cases: truncation and hole
538  * punch.  There are subtle differences in operation for each case.
539  *
540  * truncation is indicated by end of range being LLONG_MAX
541  *	In this case, we first scan the range and release found pages.
542  *	After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
543  *	maps and global counts.  Page faults can race with truncation.
544  *	During faults, hugetlb_no_page() checks i_size before page allocation,
545  *	and again after obtaining page table lock.  It will 'back out'
546  *	allocations in the truncated range.
547  * hole punch is indicated if end is not LLONG_MAX
548  *	In the hole punch case we scan the range and release found pages.
549  *	Only when releasing a page is the associated region/reserve map
550  *	deleted.  The region/reserve map for ranges without associated
551  *	pages are not modified.  Page faults can race with hole punch.
552  *	This is indicated if we find a mapped page.
553  * Note: If the passed end of range value is beyond the end of file, but
554  * not LLONG_MAX this routine still performs a hole punch operation.
555  */
556 static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
557 				   loff_t lend)
558 {
559 	struct hstate *h = hstate_inode(inode);
560 	struct address_space *mapping = &inode->i_data;
561 	const pgoff_t end = lend >> PAGE_SHIFT;
562 	struct folio_batch fbatch;
563 	pgoff_t next, index;
564 	int i, freed = 0;
565 	bool truncate_op = (lend == LLONG_MAX);
566 
567 	folio_batch_init(&fbatch);
568 	next = lstart >> PAGE_SHIFT;
569 	while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
570 		for (i = 0; i < folio_batch_count(&fbatch); ++i) {
571 			struct folio *folio = fbatch.folios[i];
572 			u32 hash = 0;
573 
574 			index = folio->index >> huge_page_order(h);
575 			hash = hugetlb_fault_mutex_hash(mapping, index);
576 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
577 
578 			/*
579 			 * Remove folio that was part of folio_batch.
580 			 */
581 			if (remove_inode_single_folio(h, inode, mapping, folio,
582 							index, truncate_op))
583 				freed++;
584 
585 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
586 		}
587 		folio_batch_release(&fbatch);
588 		cond_resched();
589 	}
590 
591 	if (truncate_op)
592 		(void)hugetlb_unreserve_pages(inode,
593 				lstart >> huge_page_shift(h),
594 				LONG_MAX, freed);
595 }
596 
597 static void hugetlbfs_evict_inode(struct inode *inode)
598 {
599 	struct resv_map *resv_map;
600 
601 	trace_hugetlbfs_evict_inode(inode);
602 	remove_inode_hugepages(inode, 0, LLONG_MAX);
603 
604 	/*
605 	 * Get the resv_map from the address space embedded in the inode.
606 	 * This is the address space which points to any resv_map allocated
607 	 * at inode creation time.  If this is a device special inode,
608 	 * i_mapping may not point to the original address space.
609 	 */
610 	resv_map = (struct resv_map *)(&inode->i_data)->i_private_data;
611 	/* Only regular and link inodes have associated reserve maps */
612 	if (resv_map)
613 		resv_map_release(&resv_map->refs);
614 	clear_inode(inode);
615 }
616 
617 static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
618 {
619 	pgoff_t pgoff;
620 	struct address_space *mapping = inode->i_mapping;
621 	struct hstate *h = hstate_inode(inode);
622 
623 	BUG_ON(offset & ~huge_page_mask(h));
624 	pgoff = offset >> PAGE_SHIFT;
625 
626 	i_size_write(inode, offset);
627 	i_mmap_lock_write(mapping);
628 	if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
629 		hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
630 				      ZAP_FLAG_DROP_MARKER);
631 	i_mmap_unlock_write(mapping);
632 	remove_inode_hugepages(inode, offset, LLONG_MAX);
633 }
634 
635 static void hugetlbfs_zero_partial_page(struct hstate *h,
636 					struct address_space *mapping,
637 					loff_t start,
638 					loff_t end)
639 {
640 	pgoff_t idx = start >> huge_page_shift(h);
641 	struct folio *folio;
642 
643 	folio = filemap_lock_hugetlb_folio(h, mapping, idx);
644 	if (IS_ERR(folio))
645 		return;
646 
647 	start = start & ~huge_page_mask(h);
648 	end = end & ~huge_page_mask(h);
649 	if (!end)
650 		end = huge_page_size(h);
651 
652 	folio_zero_segment(folio, (size_t)start, (size_t)end);
653 
654 	folio_unlock(folio);
655 	folio_put(folio);
656 }
657 
658 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
659 {
660 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
661 	struct address_space *mapping = inode->i_mapping;
662 	struct hstate *h = hstate_inode(inode);
663 	loff_t hpage_size = huge_page_size(h);
664 	loff_t hole_start, hole_end;
665 
666 	/*
667 	 * hole_start and hole_end indicate the full pages within the hole.
668 	 */
669 	hole_start = round_up(offset, hpage_size);
670 	hole_end = round_down(offset + len, hpage_size);
671 
672 	inode_lock(inode);
673 
674 	/* protected by i_rwsem */
675 	if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
676 		inode_unlock(inode);
677 		return -EPERM;
678 	}
679 
680 	i_mmap_lock_write(mapping);
681 
682 	/* If range starts before first full page, zero partial page. */
683 	if (offset < hole_start)
684 		hugetlbfs_zero_partial_page(h, mapping,
685 				offset, min(offset + len, hole_start));
686 
687 	/* Unmap users of full pages in the hole. */
688 	if (hole_end > hole_start) {
689 		if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
690 			hugetlb_vmdelete_list(&mapping->i_mmap,
691 					      hole_start >> PAGE_SHIFT,
692 					      hole_end >> PAGE_SHIFT, 0);
693 	}
694 
695 	/* If range extends beyond last full page, zero partial page. */
696 	if ((offset + len) > hole_end && (offset + len) > hole_start)
697 		hugetlbfs_zero_partial_page(h, mapping,
698 				hole_end, offset + len);
699 
700 	i_mmap_unlock_write(mapping);
701 
702 	/* Remove full pages from the file. */
703 	if (hole_end > hole_start)
704 		remove_inode_hugepages(inode, hole_start, hole_end);
705 
706 	inode_unlock(inode);
707 
708 	return 0;
709 }
710 
711 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
712 				loff_t len)
713 {
714 	struct inode *inode = file_inode(file);
715 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
716 	struct address_space *mapping = inode->i_mapping;
717 	struct hstate *h = hstate_inode(inode);
718 	struct vm_area_struct pseudo_vma;
719 	struct mm_struct *mm = current->mm;
720 	loff_t hpage_size = huge_page_size(h);
721 	unsigned long hpage_shift = huge_page_shift(h);
722 	pgoff_t start, index, end;
723 	int error;
724 	u32 hash;
725 
726 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
727 		return -EOPNOTSUPP;
728 
729 	if (mode & FALLOC_FL_PUNCH_HOLE) {
730 		error = hugetlbfs_punch_hole(inode, offset, len);
731 		goto out_nolock;
732 	}
733 
734 	/*
735 	 * Default preallocate case.
736 	 * For this range, start is rounded down and end is rounded up
737 	 * as well as being converted to page offsets.
738 	 */
739 	start = offset >> hpage_shift;
740 	end = (offset + len + hpage_size - 1) >> hpage_shift;
741 
742 	inode_lock(inode);
743 
744 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
745 	error = inode_newsize_ok(inode, offset + len);
746 	if (error)
747 		goto out;
748 
749 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
750 		error = -EPERM;
751 		goto out;
752 	}
753 
754 	/*
755 	 * Initialize a pseudo vma as this is required by the huge page
756 	 * allocation routines.
757 	 */
758 	vma_init(&pseudo_vma, mm);
759 	vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
760 	pseudo_vma.vm_file = file;
761 
762 	for (index = start; index < end; index++) {
763 		/*
764 		 * This is supposed to be the vaddr where the page is being
765 		 * faulted in, but we have no vaddr here.
766 		 */
767 		struct folio *folio;
768 		unsigned long addr;
769 
770 		cond_resched();
771 
772 		/*
773 		 * fallocate(2) manpage permits EINTR; we may have been
774 		 * interrupted because we are using up too much memory.
775 		 */
776 		if (signal_pending(current)) {
777 			error = -EINTR;
778 			break;
779 		}
780 
781 		/* addr is the offset within the file (zero based) */
782 		addr = index * hpage_size;
783 
784 		/* mutex taken here, fault path and hole punch */
785 		hash = hugetlb_fault_mutex_hash(mapping, index);
786 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
787 
788 		/* See if already present in mapping to avoid alloc/free */
789 		folio = filemap_get_folio(mapping, index << huge_page_order(h));
790 		if (!IS_ERR(folio)) {
791 			folio_put(folio);
792 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
793 			continue;
794 		}
795 
796 		/*
797 		 * Allocate folio without setting the avoid_reserve argument.
798 		 * There certainly are no reserves associated with the
799 		 * pseudo_vma.  However, there could be shared mappings with
800 		 * reserves for the file at the inode level.  If we fallocate
801 		 * folios in these areas, we need to consume the reserves
802 		 * to keep reservation accounting consistent.
803 		 */
804 		folio = alloc_hugetlb_folio(&pseudo_vma, addr, false);
805 		if (IS_ERR(folio)) {
806 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
807 			error = PTR_ERR(folio);
808 			goto out;
809 		}
810 		folio_zero_user(folio, addr);
811 		__folio_mark_uptodate(folio);
812 		error = hugetlb_add_to_page_cache(folio, mapping, index);
813 		if (unlikely(error)) {
814 			restore_reserve_on_error(h, &pseudo_vma, addr, folio);
815 			folio_put(folio);
816 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
817 			goto out;
818 		}
819 
820 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
821 
822 		folio_set_hugetlb_migratable(folio);
823 		/*
824 		 * folio_unlock because locked by hugetlb_add_to_page_cache()
825 		 * folio_put() due to reference from alloc_hugetlb_folio()
826 		 */
827 		folio_unlock(folio);
828 		folio_put(folio);
829 	}
830 
831 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
832 		i_size_write(inode, offset + len);
833 	inode_set_ctime_current(inode);
834 out:
835 	inode_unlock(inode);
836 
837 out_nolock:
838 	trace_hugetlbfs_fallocate(inode, mode, offset, len, error);
839 	return error;
840 }
841 
842 static int hugetlbfs_setattr(struct mnt_idmap *idmap,
843 			     struct dentry *dentry, struct iattr *attr)
844 {
845 	struct inode *inode = d_inode(dentry);
846 	struct hstate *h = hstate_inode(inode);
847 	int error;
848 	unsigned int ia_valid = attr->ia_valid;
849 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
850 
851 	error = setattr_prepare(idmap, dentry, attr);
852 	if (error)
853 		return error;
854 
855 	trace_hugetlbfs_setattr(inode, dentry, attr);
856 
857 	if (ia_valid & ATTR_SIZE) {
858 		loff_t oldsize = inode->i_size;
859 		loff_t newsize = attr->ia_size;
860 
861 		if (newsize & ~huge_page_mask(h))
862 			return -EINVAL;
863 		/* protected by i_rwsem */
864 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
865 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
866 			return -EPERM;
867 		hugetlb_vmtruncate(inode, newsize);
868 	}
869 
870 	setattr_copy(idmap, inode, attr);
871 	mark_inode_dirty(inode);
872 	return 0;
873 }
874 
875 static struct inode *hugetlbfs_get_root(struct super_block *sb,
876 					struct hugetlbfs_fs_context *ctx)
877 {
878 	struct inode *inode;
879 
880 	inode = new_inode(sb);
881 	if (inode) {
882 		inode->i_ino = get_next_ino();
883 		inode->i_mode = S_IFDIR | ctx->mode;
884 		inode->i_uid = ctx->uid;
885 		inode->i_gid = ctx->gid;
886 		simple_inode_init_ts(inode);
887 		inode->i_op = &hugetlbfs_dir_inode_operations;
888 		inode->i_fop = &simple_dir_operations;
889 		/* directory inodes start off with i_nlink == 2 (for "." entry) */
890 		inc_nlink(inode);
891 		lockdep_annotate_inode_mutex_key(inode);
892 	}
893 	return inode;
894 }
895 
896 /*
897  * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
898  * be taken from reclaim -- unlike regular filesystems. This needs an
899  * annotation because huge_pmd_share() does an allocation under hugetlb's
900  * i_mmap_rwsem.
901  */
902 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
903 
904 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
905 					struct mnt_idmap *idmap,
906 					struct inode *dir,
907 					umode_t mode, dev_t dev)
908 {
909 	struct inode *inode;
910 	struct resv_map *resv_map = NULL;
911 
912 	/*
913 	 * Reserve maps are only needed for inodes that can have associated
914 	 * page allocations.
915 	 */
916 	if (S_ISREG(mode) || S_ISLNK(mode)) {
917 		resv_map = resv_map_alloc();
918 		if (!resv_map)
919 			return NULL;
920 	}
921 
922 	inode = new_inode(sb);
923 	if (inode) {
924 		struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
925 
926 		inode->i_ino = get_next_ino();
927 		inode_init_owner(idmap, inode, dir, mode);
928 		lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
929 				&hugetlbfs_i_mmap_rwsem_key);
930 		inode->i_mapping->a_ops = &hugetlbfs_aops;
931 		simple_inode_init_ts(inode);
932 		inode->i_mapping->i_private_data = resv_map;
933 		info->seals = F_SEAL_SEAL;
934 		switch (mode & S_IFMT) {
935 		default:
936 			init_special_inode(inode, mode, dev);
937 			break;
938 		case S_IFREG:
939 			inode->i_op = &hugetlbfs_inode_operations;
940 			inode->i_fop = &hugetlbfs_file_operations;
941 			break;
942 		case S_IFDIR:
943 			inode->i_op = &hugetlbfs_dir_inode_operations;
944 			inode->i_fop = &simple_dir_operations;
945 
946 			/* directory inodes start off with i_nlink == 2 (for "." entry) */
947 			inc_nlink(inode);
948 			break;
949 		case S_IFLNK:
950 			inode->i_op = &page_symlink_inode_operations;
951 			inode_nohighmem(inode);
952 			break;
953 		}
954 		lockdep_annotate_inode_mutex_key(inode);
955 		trace_hugetlbfs_alloc_inode(inode, dir, mode);
956 	} else {
957 		if (resv_map)
958 			kref_put(&resv_map->refs, resv_map_release);
959 	}
960 
961 	return inode;
962 }
963 
964 /*
965  * File creation. Allocate an inode, and we're done..
966  */
967 static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
968 			   struct dentry *dentry, umode_t mode, dev_t dev)
969 {
970 	struct inode *inode;
971 
972 	inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev);
973 	if (!inode)
974 		return -ENOSPC;
975 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
976 	d_instantiate(dentry, inode);
977 	dget(dentry);/* Extra count - pin the dentry in core */
978 	return 0;
979 }
980 
981 static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
982 				      struct dentry *dentry, umode_t mode)
983 {
984 	int retval = hugetlbfs_mknod(idmap, dir, dentry,
985 				     mode | S_IFDIR, 0);
986 	if (!retval)
987 		inc_nlink(dir);
988 	return ERR_PTR(retval);
989 }
990 
991 static int hugetlbfs_create(struct mnt_idmap *idmap,
992 			    struct inode *dir, struct dentry *dentry,
993 			    umode_t mode, bool excl)
994 {
995 	return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
996 }
997 
998 static int hugetlbfs_tmpfile(struct mnt_idmap *idmap,
999 			     struct inode *dir, struct file *file,
1000 			     umode_t mode)
1001 {
1002 	struct inode *inode;
1003 
1004 	inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0);
1005 	if (!inode)
1006 		return -ENOSPC;
1007 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
1008 	d_tmpfile(file, inode);
1009 	return finish_open_simple(file, 0);
1010 }
1011 
1012 static int hugetlbfs_symlink(struct mnt_idmap *idmap,
1013 			     struct inode *dir, struct dentry *dentry,
1014 			     const char *symname)
1015 {
1016 	const umode_t mode = S_IFLNK|S_IRWXUGO;
1017 	struct inode *inode;
1018 	int error = -ENOSPC;
1019 
1020 	inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0);
1021 	if (inode) {
1022 		int l = strlen(symname)+1;
1023 		error = page_symlink(inode, symname, l);
1024 		if (!error) {
1025 			d_instantiate(dentry, inode);
1026 			dget(dentry);
1027 		} else
1028 			iput(inode);
1029 	}
1030 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
1031 
1032 	return error;
1033 }
1034 
1035 #ifdef CONFIG_MIGRATION
1036 static int hugetlbfs_migrate_folio(struct address_space *mapping,
1037 				struct folio *dst, struct folio *src,
1038 				enum migrate_mode mode)
1039 {
1040 	int rc;
1041 
1042 	rc = migrate_huge_page_move_mapping(mapping, dst, src);
1043 	if (rc)
1044 		return rc;
1045 
1046 	if (hugetlb_folio_subpool(src)) {
1047 		hugetlb_set_folio_subpool(dst,
1048 					hugetlb_folio_subpool(src));
1049 		hugetlb_set_folio_subpool(src, NULL);
1050 	}
1051 
1052 	folio_migrate_flags(dst, src);
1053 
1054 	return 0;
1055 }
1056 #else
1057 #define hugetlbfs_migrate_folio NULL
1058 #endif
1059 
1060 static int hugetlbfs_error_remove_folio(struct address_space *mapping,
1061 				struct folio *folio)
1062 {
1063 	return 0;
1064 }
1065 
1066 /*
1067  * Display the mount options in /proc/mounts.
1068  */
1069 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
1070 {
1071 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1072 	struct hugepage_subpool *spool = sbinfo->spool;
1073 	unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1074 	unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1075 	char mod;
1076 
1077 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1078 		seq_printf(m, ",uid=%u",
1079 			   from_kuid_munged(&init_user_ns, sbinfo->uid));
1080 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1081 		seq_printf(m, ",gid=%u",
1082 			   from_kgid_munged(&init_user_ns, sbinfo->gid));
1083 	if (sbinfo->mode != 0755)
1084 		seq_printf(m, ",mode=%o", sbinfo->mode);
1085 	if (sbinfo->max_inodes != -1)
1086 		seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1087 
1088 	hpage_size /= 1024;
1089 	mod = 'K';
1090 	if (hpage_size >= 1024) {
1091 		hpage_size /= 1024;
1092 		mod = 'M';
1093 	}
1094 	seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1095 	if (spool) {
1096 		if (spool->max_hpages != -1)
1097 			seq_printf(m, ",size=%llu",
1098 				   (unsigned long long)spool->max_hpages << hpage_shift);
1099 		if (spool->min_hpages != -1)
1100 			seq_printf(m, ",min_size=%llu",
1101 				   (unsigned long long)spool->min_hpages << hpage_shift);
1102 	}
1103 	return 0;
1104 }
1105 
1106 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1107 {
1108 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
1109 	struct hstate *h = hstate_inode(d_inode(dentry));
1110 	u64 id = huge_encode_dev(dentry->d_sb->s_dev);
1111 
1112 	buf->f_fsid = u64_to_fsid(id);
1113 	buf->f_type = HUGETLBFS_MAGIC;
1114 	buf->f_bsize = huge_page_size(h);
1115 	if (sbinfo) {
1116 		spin_lock(&sbinfo->stat_lock);
1117 		/* If no limits set, just report 0 or -1 for max/free/used
1118 		 * blocks, like simple_statfs() */
1119 		if (sbinfo->spool) {
1120 			long free_pages;
1121 
1122 			spin_lock_irq(&sbinfo->spool->lock);
1123 			buf->f_blocks = sbinfo->spool->max_hpages;
1124 			free_pages = sbinfo->spool->max_hpages
1125 				- sbinfo->spool->used_hpages;
1126 			buf->f_bavail = buf->f_bfree = free_pages;
1127 			spin_unlock_irq(&sbinfo->spool->lock);
1128 			buf->f_files = sbinfo->max_inodes;
1129 			buf->f_ffree = sbinfo->free_inodes;
1130 		}
1131 		spin_unlock(&sbinfo->stat_lock);
1132 	}
1133 	buf->f_namelen = NAME_MAX;
1134 	return 0;
1135 }
1136 
1137 static void hugetlbfs_put_super(struct super_block *sb)
1138 {
1139 	struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1140 
1141 	if (sbi) {
1142 		sb->s_fs_info = NULL;
1143 
1144 		if (sbi->spool)
1145 			hugepage_put_subpool(sbi->spool);
1146 
1147 		kfree(sbi);
1148 	}
1149 }
1150 
1151 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1152 {
1153 	if (sbinfo->free_inodes >= 0) {
1154 		spin_lock(&sbinfo->stat_lock);
1155 		if (unlikely(!sbinfo->free_inodes)) {
1156 			spin_unlock(&sbinfo->stat_lock);
1157 			return 0;
1158 		}
1159 		sbinfo->free_inodes--;
1160 		spin_unlock(&sbinfo->stat_lock);
1161 	}
1162 
1163 	return 1;
1164 }
1165 
1166 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1167 {
1168 	if (sbinfo->free_inodes >= 0) {
1169 		spin_lock(&sbinfo->stat_lock);
1170 		sbinfo->free_inodes++;
1171 		spin_unlock(&sbinfo->stat_lock);
1172 	}
1173 }
1174 
1175 
1176 static struct kmem_cache *hugetlbfs_inode_cachep;
1177 
1178 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1179 {
1180 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1181 	struct hugetlbfs_inode_info *p;
1182 
1183 	if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
1184 		return NULL;
1185 	p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
1186 	if (unlikely(!p)) {
1187 		hugetlbfs_inc_free_inodes(sbinfo);
1188 		return NULL;
1189 	}
1190 	return &p->vfs_inode;
1191 }
1192 
1193 static void hugetlbfs_free_inode(struct inode *inode)
1194 {
1195 	trace_hugetlbfs_free_inode(inode);
1196 	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1197 }
1198 
1199 static void hugetlbfs_destroy_inode(struct inode *inode)
1200 {
1201 	hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1202 }
1203 
1204 static const struct address_space_operations hugetlbfs_aops = {
1205 	.write_begin	= hugetlbfs_write_begin,
1206 	.write_end	= hugetlbfs_write_end,
1207 	.dirty_folio	= noop_dirty_folio,
1208 	.migrate_folio  = hugetlbfs_migrate_folio,
1209 	.error_remove_folio	= hugetlbfs_error_remove_folio,
1210 };
1211 
1212 
1213 static void init_once(void *foo)
1214 {
1215 	struct hugetlbfs_inode_info *ei = foo;
1216 
1217 	inode_init_once(&ei->vfs_inode);
1218 }
1219 
1220 static const struct file_operations hugetlbfs_file_operations = {
1221 	.read_iter		= hugetlbfs_read_iter,
1222 	.mmap			= hugetlbfs_file_mmap,
1223 	.fsync			= noop_fsync,
1224 	.get_unmapped_area	= hugetlb_get_unmapped_area,
1225 	.llseek			= default_llseek,
1226 	.fallocate		= hugetlbfs_fallocate,
1227 	.fop_flags		= FOP_HUGE_PAGES,
1228 };
1229 
1230 static const struct inode_operations hugetlbfs_dir_inode_operations = {
1231 	.create		= hugetlbfs_create,
1232 	.lookup		= simple_lookup,
1233 	.link		= simple_link,
1234 	.unlink		= simple_unlink,
1235 	.symlink	= hugetlbfs_symlink,
1236 	.mkdir		= hugetlbfs_mkdir,
1237 	.rmdir		= simple_rmdir,
1238 	.mknod		= hugetlbfs_mknod,
1239 	.rename		= simple_rename,
1240 	.setattr	= hugetlbfs_setattr,
1241 	.tmpfile	= hugetlbfs_tmpfile,
1242 };
1243 
1244 static const struct inode_operations hugetlbfs_inode_operations = {
1245 	.setattr	= hugetlbfs_setattr,
1246 };
1247 
1248 static const struct super_operations hugetlbfs_ops = {
1249 	.alloc_inode    = hugetlbfs_alloc_inode,
1250 	.free_inode     = hugetlbfs_free_inode,
1251 	.destroy_inode  = hugetlbfs_destroy_inode,
1252 	.evict_inode	= hugetlbfs_evict_inode,
1253 	.statfs		= hugetlbfs_statfs,
1254 	.put_super	= hugetlbfs_put_super,
1255 	.show_options	= hugetlbfs_show_options,
1256 };
1257 
1258 /*
1259  * Convert size option passed from command line to number of huge pages
1260  * in the pool specified by hstate.  Size option could be in bytes
1261  * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1262  */
1263 static long
1264 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1265 			 enum hugetlbfs_size_type val_type)
1266 {
1267 	if (val_type == NO_SIZE)
1268 		return -1;
1269 
1270 	if (val_type == SIZE_PERCENT) {
1271 		size_opt <<= huge_page_shift(h);
1272 		size_opt *= h->max_huge_pages;
1273 		do_div(size_opt, 100);
1274 	}
1275 
1276 	size_opt >>= huge_page_shift(h);
1277 	return size_opt;
1278 }
1279 
1280 /*
1281  * Parse one mount parameter.
1282  */
1283 static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1284 {
1285 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
1286 	struct fs_parse_result result;
1287 	struct hstate *h;
1288 	char *rest;
1289 	unsigned long ps;
1290 	int opt;
1291 
1292 	opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
1293 	if (opt < 0)
1294 		return opt;
1295 
1296 	switch (opt) {
1297 	case Opt_uid:
1298 		ctx->uid = result.uid;
1299 		return 0;
1300 
1301 	case Opt_gid:
1302 		ctx->gid = result.gid;
1303 		return 0;
1304 
1305 	case Opt_mode:
1306 		ctx->mode = result.uint_32 & 01777U;
1307 		return 0;
1308 
1309 	case Opt_size:
1310 		/* memparse() will accept a K/M/G without a digit */
1311 		if (!param->string || !isdigit(param->string[0]))
1312 			goto bad_val;
1313 		ctx->max_size_opt = memparse(param->string, &rest);
1314 		ctx->max_val_type = SIZE_STD;
1315 		if (*rest == '%')
1316 			ctx->max_val_type = SIZE_PERCENT;
1317 		return 0;
1318 
1319 	case Opt_nr_inodes:
1320 		/* memparse() will accept a K/M/G without a digit */
1321 		if (!param->string || !isdigit(param->string[0]))
1322 			goto bad_val;
1323 		ctx->nr_inodes = memparse(param->string, &rest);
1324 		return 0;
1325 
1326 	case Opt_pagesize:
1327 		ps = memparse(param->string, &rest);
1328 		h = size_to_hstate(ps);
1329 		if (!h) {
1330 			pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
1331 			return -EINVAL;
1332 		}
1333 		ctx->hstate = h;
1334 		return 0;
1335 
1336 	case Opt_min_size:
1337 		/* memparse() will accept a K/M/G without a digit */
1338 		if (!param->string || !isdigit(param->string[0]))
1339 			goto bad_val;
1340 		ctx->min_size_opt = memparse(param->string, &rest);
1341 		ctx->min_val_type = SIZE_STD;
1342 		if (*rest == '%')
1343 			ctx->min_val_type = SIZE_PERCENT;
1344 		return 0;
1345 
1346 	default:
1347 		return -EINVAL;
1348 	}
1349 
1350 bad_val:
1351 	return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
1352 		      param->string, param->key);
1353 }
1354 
1355 /*
1356  * Validate the parsed options.
1357  */
1358 static int hugetlbfs_validate(struct fs_context *fc)
1359 {
1360 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
1361 
1362 	/*
1363 	 * Use huge page pool size (in hstate) to convert the size
1364 	 * options to number of huge pages.  If NO_SIZE, -1 is returned.
1365 	 */
1366 	ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1367 						   ctx->max_size_opt,
1368 						   ctx->max_val_type);
1369 	ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1370 						   ctx->min_size_opt,
1371 						   ctx->min_val_type);
1372 
1373 	/*
1374 	 * If max_size was specified, then min_size must be smaller
1375 	 */
1376 	if (ctx->max_val_type > NO_SIZE &&
1377 	    ctx->min_hpages > ctx->max_hpages) {
1378 		pr_err("Minimum size can not be greater than maximum size\n");
1379 		return -EINVAL;
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 static int
1386 hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
1387 {
1388 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
1389 	struct hugetlbfs_sb_info *sbinfo;
1390 
1391 	sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1392 	if (!sbinfo)
1393 		return -ENOMEM;
1394 	sb->s_fs_info = sbinfo;
1395 	spin_lock_init(&sbinfo->stat_lock);
1396 	sbinfo->hstate		= ctx->hstate;
1397 	sbinfo->max_inodes	= ctx->nr_inodes;
1398 	sbinfo->free_inodes	= ctx->nr_inodes;
1399 	sbinfo->spool		= NULL;
1400 	sbinfo->uid		= ctx->uid;
1401 	sbinfo->gid		= ctx->gid;
1402 	sbinfo->mode		= ctx->mode;
1403 
1404 	/*
1405 	 * Allocate and initialize subpool if maximum or minimum size is
1406 	 * specified.  Any needed reservations (for minimum size) are taken
1407 	 * when the subpool is created.
1408 	 */
1409 	if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1410 		sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1411 						     ctx->max_hpages,
1412 						     ctx->min_hpages);
1413 		if (!sbinfo->spool)
1414 			goto out_free;
1415 	}
1416 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1417 	sb->s_blocksize = huge_page_size(ctx->hstate);
1418 	sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
1419 	sb->s_magic = HUGETLBFS_MAGIC;
1420 	sb->s_op = &hugetlbfs_ops;
1421 	sb->s_d_flags = DCACHE_DONTCACHE;
1422 	sb->s_time_gran = 1;
1423 
1424 	/*
1425 	 * Due to the special and limited functionality of hugetlbfs, it does
1426 	 * not work well as a stacking filesystem.
1427 	 */
1428 	sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
1429 	sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
1430 	if (!sb->s_root)
1431 		goto out_free;
1432 	return 0;
1433 out_free:
1434 	kfree(sbinfo->spool);
1435 	kfree(sbinfo);
1436 	return -ENOMEM;
1437 }
1438 
1439 static int hugetlbfs_get_tree(struct fs_context *fc)
1440 {
1441 	int err = hugetlbfs_validate(fc);
1442 	if (err)
1443 		return err;
1444 	return get_tree_nodev(fc, hugetlbfs_fill_super);
1445 }
1446 
1447 static void hugetlbfs_fs_context_free(struct fs_context *fc)
1448 {
1449 	kfree(fc->fs_private);
1450 }
1451 
1452 static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1453 	.free		= hugetlbfs_fs_context_free,
1454 	.parse_param	= hugetlbfs_parse_param,
1455 	.get_tree	= hugetlbfs_get_tree,
1456 };
1457 
1458 static int hugetlbfs_init_fs_context(struct fs_context *fc)
1459 {
1460 	struct hugetlbfs_fs_context *ctx;
1461 
1462 	ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1463 	if (!ctx)
1464 		return -ENOMEM;
1465 
1466 	ctx->max_hpages	= -1; /* No limit on size by default */
1467 	ctx->nr_inodes	= -1; /* No limit on number of inodes by default */
1468 	ctx->uid	= current_fsuid();
1469 	ctx->gid	= current_fsgid();
1470 	ctx->mode	= 0755;
1471 	ctx->hstate	= &default_hstate;
1472 	ctx->min_hpages	= -1; /* No default minimum size */
1473 	ctx->max_val_type = NO_SIZE;
1474 	ctx->min_val_type = NO_SIZE;
1475 	fc->fs_private = ctx;
1476 	fc->ops	= &hugetlbfs_fs_context_ops;
1477 	return 0;
1478 }
1479 
1480 static struct file_system_type hugetlbfs_fs_type = {
1481 	.name			= "hugetlbfs",
1482 	.init_fs_context	= hugetlbfs_init_fs_context,
1483 	.parameters		= hugetlb_fs_parameters,
1484 	.kill_sb		= kill_litter_super,
1485 	.fs_flags               = FS_ALLOW_IDMAP,
1486 };
1487 
1488 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1489 
1490 static int can_do_hugetlb_shm(void)
1491 {
1492 	kgid_t shm_group;
1493 	shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1494 	return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1495 }
1496 
1497 static int get_hstate_idx(int page_size_log)
1498 {
1499 	struct hstate *h = hstate_sizelog(page_size_log);
1500 
1501 	if (!h)
1502 		return -1;
1503 	return hstate_index(h);
1504 }
1505 
1506 /*
1507  * Note that size should be aligned to proper hugepage size in caller side,
1508  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1509  */
1510 struct file *hugetlb_file_setup(const char *name, size_t size,
1511 				vm_flags_t acctflag, int creat_flags,
1512 				int page_size_log)
1513 {
1514 	struct inode *inode;
1515 	struct vfsmount *mnt;
1516 	int hstate_idx;
1517 	struct file *file;
1518 
1519 	hstate_idx = get_hstate_idx(page_size_log);
1520 	if (hstate_idx < 0)
1521 		return ERR_PTR(-ENODEV);
1522 
1523 	mnt = hugetlbfs_vfsmount[hstate_idx];
1524 	if (!mnt)
1525 		return ERR_PTR(-ENOENT);
1526 
1527 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1528 		struct ucounts *ucounts = current_ucounts();
1529 
1530 		if (user_shm_lock(size, ucounts)) {
1531 			pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
1532 				current->comm, current->pid);
1533 			user_shm_unlock(size, ucounts);
1534 		}
1535 		return ERR_PTR(-EPERM);
1536 	}
1537 
1538 	file = ERR_PTR(-ENOSPC);
1539 	/* hugetlbfs_vfsmount[] mounts do not use idmapped mounts.  */
1540 	inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL,
1541 				    S_IFREG | S_IRWXUGO, 0);
1542 	if (!inode)
1543 		goto out;
1544 	if (creat_flags == HUGETLB_SHMFS_INODE)
1545 		inode->i_flags |= S_PRIVATE;
1546 
1547 	inode->i_size = size;
1548 	clear_nlink(inode);
1549 
1550 	if (hugetlb_reserve_pages(inode, 0,
1551 			size >> huge_page_shift(hstate_inode(inode)), NULL,
1552 			acctflag) < 0)
1553 		file = ERR_PTR(-ENOMEM);
1554 	else
1555 		file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1556 					&hugetlbfs_file_operations);
1557 	if (!IS_ERR(file))
1558 		return file;
1559 
1560 	iput(inode);
1561 out:
1562 	return file;
1563 }
1564 
1565 static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1566 {
1567 	struct fs_context *fc;
1568 	struct vfsmount *mnt;
1569 
1570 	fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1571 	if (IS_ERR(fc)) {
1572 		mnt = ERR_CAST(fc);
1573 	} else {
1574 		struct hugetlbfs_fs_context *ctx = fc->fs_private;
1575 		ctx->hstate = h;
1576 		mnt = fc_mount_longterm(fc);
1577 		put_fs_context(fc);
1578 	}
1579 	if (IS_ERR(mnt))
1580 		pr_err("Cannot mount internal hugetlbfs for page size %luK",
1581 		       huge_page_size(h) / SZ_1K);
1582 	return mnt;
1583 }
1584 
1585 static int __init init_hugetlbfs_fs(void)
1586 {
1587 	struct vfsmount *mnt;
1588 	struct hstate *h;
1589 	int error;
1590 	int i;
1591 
1592 	if (!hugepages_supported()) {
1593 		pr_info("disabling because there are no supported hugepage sizes\n");
1594 		return -ENOTSUPP;
1595 	}
1596 
1597 	error = -ENOMEM;
1598 	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1599 					sizeof(struct hugetlbfs_inode_info),
1600 					0, SLAB_ACCOUNT, init_once);
1601 	if (hugetlbfs_inode_cachep == NULL)
1602 		goto out;
1603 
1604 	error = register_filesystem(&hugetlbfs_fs_type);
1605 	if (error)
1606 		goto out_free;
1607 
1608 	/* default hstate mount is required */
1609 	mnt = mount_one_hugetlbfs(&default_hstate);
1610 	if (IS_ERR(mnt)) {
1611 		error = PTR_ERR(mnt);
1612 		goto out_unreg;
1613 	}
1614 	hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1615 
1616 	/* other hstates are optional */
1617 	i = 0;
1618 	for_each_hstate(h) {
1619 		if (i == default_hstate_idx) {
1620 			i++;
1621 			continue;
1622 		}
1623 
1624 		mnt = mount_one_hugetlbfs(h);
1625 		if (IS_ERR(mnt))
1626 			hugetlbfs_vfsmount[i] = NULL;
1627 		else
1628 			hugetlbfs_vfsmount[i] = mnt;
1629 		i++;
1630 	}
1631 
1632 	return 0;
1633 
1634  out_unreg:
1635 	(void)unregister_filesystem(&hugetlbfs_fs_type);
1636  out_free:
1637 	kmem_cache_destroy(hugetlbfs_inode_cachep);
1638  out:
1639 	return error;
1640 }
1641 fs_initcall(init_hugetlbfs_fs)
1642