xref: /linux/mm/shmem.c (revision f850548ef88e5ff9e40bae9e1a7140bef0653e6b)
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *		 2000 Transmeta Corp.
6  *		 2000-2001 Christoph Rohland
7  *		 2000-2001 SAP AG
8  *		 2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * tiny-shmem:
18  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19  *
20  * This file is released under the GPL.
21  */
22 
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/pagemap.h>
28 #include <linux/file.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/percpu_counter.h>
32 #include <linux/swap.h>
33 
34 static struct vfsmount *shm_mnt;
35 
36 #ifdef CONFIG_SHMEM
37 /*
38  * This virtual memory filesystem is heavily based on the ramfs. It
39  * extends ramfs by the ability to use swap and honor resource limits
40  * which makes it a completely usable filesystem.
41  */
42 
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/security.h>
55 #include <linux/swapops.h>
56 #include <linux/mempolicy.h>
57 #include <linux/namei.h>
58 #include <linux/ctype.h>
59 #include <linux/migrate.h>
60 #include <linux/highmem.h>
61 #include <linux/seq_file.h>
62 #include <linux/magic.h>
63 
64 #include <asm/uaccess.h>
65 #include <asm/div64.h>
66 #include <asm/pgtable.h>
67 
68 /*
69  * The maximum size of a shmem/tmpfs file is limited by the maximum size of
70  * its triple-indirect swap vector - see illustration at shmem_swp_entry().
71  *
72  * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
73  * but one eighth of that on a 64-bit kernel.  With 8kB page size, maximum
74  * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
75  * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
76  *
77  * We use / and * instead of shifts in the definitions below, so that the swap
78  * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
79  */
80 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
81 #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
82 
83 #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
84 #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
85 
86 #define SHMEM_MAX_BYTES  min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
87 #define SHMEM_MAX_INDEX  ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
88 
89 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
90 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
91 
92 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
93 #define SHMEM_PAGEIN	 VM_READ
94 #define SHMEM_TRUNCATE	 VM_WRITE
95 
96 /* Definition to limit shmem_truncate's steps between cond_rescheds */
97 #define LATENCY_LIMIT	 64
98 
99 /* Pretend that each entry is of this size in directory's i_size */
100 #define BOGO_DIRENT_SIZE 20
101 
102 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
103 enum sgp_type {
104 	SGP_READ,	/* don't exceed i_size, don't allocate page */
105 	SGP_CACHE,	/* don't exceed i_size, may allocate page */
106 	SGP_DIRTY,	/* like SGP_CACHE, but set new page dirty */
107 	SGP_WRITE,	/* may exceed i_size, may allocate page */
108 };
109 
110 #ifdef CONFIG_TMPFS
111 static unsigned long shmem_default_max_blocks(void)
112 {
113 	return totalram_pages / 2;
114 }
115 
116 static unsigned long shmem_default_max_inodes(void)
117 {
118 	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
119 }
120 #endif
121 
122 static int shmem_getpage(struct inode *inode, unsigned long idx,
123 			 struct page **pagep, enum sgp_type sgp, int *type);
124 
125 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
126 {
127 	/*
128 	 * The above definition of ENTRIES_PER_PAGE, and the use of
129 	 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
130 	 * might be reconsidered if it ever diverges from PAGE_SIZE.
131 	 *
132 	 * Mobility flags are masked out as swap vectors cannot move
133 	 */
134 	return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
135 				PAGE_CACHE_SHIFT-PAGE_SHIFT);
136 }
137 
138 static inline void shmem_dir_free(struct page *page)
139 {
140 	__free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
141 }
142 
143 static struct page **shmem_dir_map(struct page *page)
144 {
145 	return (struct page **)kmap_atomic(page, KM_USER0);
146 }
147 
148 static inline void shmem_dir_unmap(struct page **dir)
149 {
150 	kunmap_atomic(dir, KM_USER0);
151 }
152 
153 static swp_entry_t *shmem_swp_map(struct page *page)
154 {
155 	return (swp_entry_t *)kmap_atomic(page, KM_USER1);
156 }
157 
158 static inline void shmem_swp_balance_unmap(void)
159 {
160 	/*
161 	 * When passing a pointer to an i_direct entry, to code which
162 	 * also handles indirect entries and so will shmem_swp_unmap,
163 	 * we must arrange for the preempt count to remain in balance.
164 	 * What kmap_atomic of a lowmem page does depends on config
165 	 * and architecture, so pretend to kmap_atomic some lowmem page.
166 	 */
167 	(void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
168 }
169 
170 static inline void shmem_swp_unmap(swp_entry_t *entry)
171 {
172 	kunmap_atomic(entry, KM_USER1);
173 }
174 
175 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
176 {
177 	return sb->s_fs_info;
178 }
179 
180 /*
181  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
182  * for shared memory and for shared anonymous (/dev/zero) mappings
183  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
184  * consistent with the pre-accounting of private mappings ...
185  */
186 static inline int shmem_acct_size(unsigned long flags, loff_t size)
187 {
188 	return (flags & VM_NORESERVE) ?
189 		0 : security_vm_enough_memory_kern(VM_ACCT(size));
190 }
191 
192 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
193 {
194 	if (!(flags & VM_NORESERVE))
195 		vm_unacct_memory(VM_ACCT(size));
196 }
197 
198 /*
199  * ... whereas tmpfs objects are accounted incrementally as
200  * pages are allocated, in order to allow huge sparse files.
201  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
202  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
203  */
204 static inline int shmem_acct_block(unsigned long flags)
205 {
206 	return (flags & VM_NORESERVE) ?
207 		security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
208 }
209 
210 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
211 {
212 	if (flags & VM_NORESERVE)
213 		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
214 }
215 
216 static const struct super_operations shmem_ops;
217 static const struct address_space_operations shmem_aops;
218 static const struct file_operations shmem_file_operations;
219 static const struct inode_operations shmem_inode_operations;
220 static const struct inode_operations shmem_dir_inode_operations;
221 static const struct inode_operations shmem_special_inode_operations;
222 static const struct vm_operations_struct shmem_vm_ops;
223 
224 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
225 	.ra_pages	= 0,	/* No readahead */
226 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
227 };
228 
229 static LIST_HEAD(shmem_swaplist);
230 static DEFINE_MUTEX(shmem_swaplist_mutex);
231 
232 static void shmem_free_blocks(struct inode *inode, long pages)
233 {
234 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
235 	if (sbinfo->max_blocks) {
236 		percpu_counter_add(&sbinfo->used_blocks, -pages);
237 		spin_lock(&inode->i_lock);
238 		inode->i_blocks -= pages*BLOCKS_PER_PAGE;
239 		spin_unlock(&inode->i_lock);
240 	}
241 }
242 
243 static int shmem_reserve_inode(struct super_block *sb)
244 {
245 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
246 	if (sbinfo->max_inodes) {
247 		spin_lock(&sbinfo->stat_lock);
248 		if (!sbinfo->free_inodes) {
249 			spin_unlock(&sbinfo->stat_lock);
250 			return -ENOSPC;
251 		}
252 		sbinfo->free_inodes--;
253 		spin_unlock(&sbinfo->stat_lock);
254 	}
255 	return 0;
256 }
257 
258 static void shmem_free_inode(struct super_block *sb)
259 {
260 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
261 	if (sbinfo->max_inodes) {
262 		spin_lock(&sbinfo->stat_lock);
263 		sbinfo->free_inodes++;
264 		spin_unlock(&sbinfo->stat_lock);
265 	}
266 }
267 
268 /**
269  * shmem_recalc_inode - recalculate the size of an inode
270  * @inode: inode to recalc
271  *
272  * We have to calculate the free blocks since the mm can drop
273  * undirtied hole pages behind our back.
274  *
275  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
276  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
277  *
278  * It has to be called with the spinlock held.
279  */
280 static void shmem_recalc_inode(struct inode *inode)
281 {
282 	struct shmem_inode_info *info = SHMEM_I(inode);
283 	long freed;
284 
285 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
286 	if (freed > 0) {
287 		info->alloced -= freed;
288 		shmem_unacct_blocks(info->flags, freed);
289 		shmem_free_blocks(inode, freed);
290 	}
291 }
292 
293 /**
294  * shmem_swp_entry - find the swap vector position in the info structure
295  * @info:  info structure for the inode
296  * @index: index of the page to find
297  * @page:  optional page to add to the structure. Has to be preset to
298  *         all zeros
299  *
300  * If there is no space allocated yet it will return NULL when
301  * page is NULL, else it will use the page for the needed block,
302  * setting it to NULL on return to indicate that it has been used.
303  *
304  * The swap vector is organized the following way:
305  *
306  * There are SHMEM_NR_DIRECT entries directly stored in the
307  * shmem_inode_info structure. So small files do not need an addional
308  * allocation.
309  *
310  * For pages with index > SHMEM_NR_DIRECT there is the pointer
311  * i_indirect which points to a page which holds in the first half
312  * doubly indirect blocks, in the second half triple indirect blocks:
313  *
314  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
315  * following layout (for SHMEM_NR_DIRECT == 16):
316  *
317  * i_indirect -> dir --> 16-19
318  * 	      |	     +-> 20-23
319  * 	      |
320  * 	      +-->dir2 --> 24-27
321  * 	      |	       +-> 28-31
322  * 	      |	       +-> 32-35
323  * 	      |	       +-> 36-39
324  * 	      |
325  * 	      +-->dir3 --> 40-43
326  * 	       	       +-> 44-47
327  * 	      	       +-> 48-51
328  * 	      	       +-> 52-55
329  */
330 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
331 {
332 	unsigned long offset;
333 	struct page **dir;
334 	struct page *subdir;
335 
336 	if (index < SHMEM_NR_DIRECT) {
337 		shmem_swp_balance_unmap();
338 		return info->i_direct+index;
339 	}
340 	if (!info->i_indirect) {
341 		if (page) {
342 			info->i_indirect = *page;
343 			*page = NULL;
344 		}
345 		return NULL;			/* need another page */
346 	}
347 
348 	index -= SHMEM_NR_DIRECT;
349 	offset = index % ENTRIES_PER_PAGE;
350 	index /= ENTRIES_PER_PAGE;
351 	dir = shmem_dir_map(info->i_indirect);
352 
353 	if (index >= ENTRIES_PER_PAGE/2) {
354 		index -= ENTRIES_PER_PAGE/2;
355 		dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
356 		index %= ENTRIES_PER_PAGE;
357 		subdir = *dir;
358 		if (!subdir) {
359 			if (page) {
360 				*dir = *page;
361 				*page = NULL;
362 			}
363 			shmem_dir_unmap(dir);
364 			return NULL;		/* need another page */
365 		}
366 		shmem_dir_unmap(dir);
367 		dir = shmem_dir_map(subdir);
368 	}
369 
370 	dir += index;
371 	subdir = *dir;
372 	if (!subdir) {
373 		if (!page || !(subdir = *page)) {
374 			shmem_dir_unmap(dir);
375 			return NULL;		/* need a page */
376 		}
377 		*dir = subdir;
378 		*page = NULL;
379 	}
380 	shmem_dir_unmap(dir);
381 	return shmem_swp_map(subdir) + offset;
382 }
383 
384 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
385 {
386 	long incdec = value? 1: -1;
387 
388 	entry->val = value;
389 	info->swapped += incdec;
390 	if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
391 		struct page *page = kmap_atomic_to_page(entry);
392 		set_page_private(page, page_private(page) + incdec);
393 	}
394 }
395 
396 /**
397  * shmem_swp_alloc - get the position of the swap entry for the page.
398  * @info:	info structure for the inode
399  * @index:	index of the page to find
400  * @sgp:	check and recheck i_size? skip allocation?
401  *
402  * If the entry does not exist, allocate it.
403  */
404 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
405 {
406 	struct inode *inode = &info->vfs_inode;
407 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
408 	struct page *page = NULL;
409 	swp_entry_t *entry;
410 
411 	if (sgp != SGP_WRITE &&
412 	    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
413 		return ERR_PTR(-EINVAL);
414 
415 	while (!(entry = shmem_swp_entry(info, index, &page))) {
416 		if (sgp == SGP_READ)
417 			return shmem_swp_map(ZERO_PAGE(0));
418 		/*
419 		 * Test used_blocks against 1 less max_blocks, since we have 1 data
420 		 * page (and perhaps indirect index pages) yet to allocate:
421 		 * a waste to allocate index if we cannot allocate data.
422 		 */
423 		if (sbinfo->max_blocks) {
424 			if (percpu_counter_compare(&sbinfo->used_blocks,
425 						sbinfo->max_blocks - 1) >= 0)
426 				return ERR_PTR(-ENOSPC);
427 			percpu_counter_inc(&sbinfo->used_blocks);
428 			spin_lock(&inode->i_lock);
429 			inode->i_blocks += BLOCKS_PER_PAGE;
430 			spin_unlock(&inode->i_lock);
431 		}
432 
433 		spin_unlock(&info->lock);
434 		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
435 		spin_lock(&info->lock);
436 
437 		if (!page) {
438 			shmem_free_blocks(inode, 1);
439 			return ERR_PTR(-ENOMEM);
440 		}
441 		if (sgp != SGP_WRITE &&
442 		    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
443 			entry = ERR_PTR(-EINVAL);
444 			break;
445 		}
446 		if (info->next_index <= index)
447 			info->next_index = index + 1;
448 	}
449 	if (page) {
450 		/* another task gave its page, or truncated the file */
451 		shmem_free_blocks(inode, 1);
452 		shmem_dir_free(page);
453 	}
454 	if (info->next_index <= index && !IS_ERR(entry))
455 		info->next_index = index + 1;
456 	return entry;
457 }
458 
459 /**
460  * shmem_free_swp - free some swap entries in a directory
461  * @dir:        pointer to the directory
462  * @edir:       pointer after last entry of the directory
463  * @punch_lock: pointer to spinlock when needed for the holepunch case
464  */
465 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
466 						spinlock_t *punch_lock)
467 {
468 	spinlock_t *punch_unlock = NULL;
469 	swp_entry_t *ptr;
470 	int freed = 0;
471 
472 	for (ptr = dir; ptr < edir; ptr++) {
473 		if (ptr->val) {
474 			if (unlikely(punch_lock)) {
475 				punch_unlock = punch_lock;
476 				punch_lock = NULL;
477 				spin_lock(punch_unlock);
478 				if (!ptr->val)
479 					continue;
480 			}
481 			free_swap_and_cache(*ptr);
482 			*ptr = (swp_entry_t){0};
483 			freed++;
484 		}
485 	}
486 	if (punch_unlock)
487 		spin_unlock(punch_unlock);
488 	return freed;
489 }
490 
491 static int shmem_map_and_free_swp(struct page *subdir, int offset,
492 		int limit, struct page ***dir, spinlock_t *punch_lock)
493 {
494 	swp_entry_t *ptr;
495 	int freed = 0;
496 
497 	ptr = shmem_swp_map(subdir);
498 	for (; offset < limit; offset += LATENCY_LIMIT) {
499 		int size = limit - offset;
500 		if (size > LATENCY_LIMIT)
501 			size = LATENCY_LIMIT;
502 		freed += shmem_free_swp(ptr+offset, ptr+offset+size,
503 							punch_lock);
504 		if (need_resched()) {
505 			shmem_swp_unmap(ptr);
506 			if (*dir) {
507 				shmem_dir_unmap(*dir);
508 				*dir = NULL;
509 			}
510 			cond_resched();
511 			ptr = shmem_swp_map(subdir);
512 		}
513 	}
514 	shmem_swp_unmap(ptr);
515 	return freed;
516 }
517 
518 static void shmem_free_pages(struct list_head *next)
519 {
520 	struct page *page;
521 	int freed = 0;
522 
523 	do {
524 		page = container_of(next, struct page, lru);
525 		next = next->next;
526 		shmem_dir_free(page);
527 		freed++;
528 		if (freed >= LATENCY_LIMIT) {
529 			cond_resched();
530 			freed = 0;
531 		}
532 	} while (next);
533 }
534 
535 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
536 {
537 	struct shmem_inode_info *info = SHMEM_I(inode);
538 	unsigned long idx;
539 	unsigned long size;
540 	unsigned long limit;
541 	unsigned long stage;
542 	unsigned long diroff;
543 	struct page **dir;
544 	struct page *topdir;
545 	struct page *middir;
546 	struct page *subdir;
547 	swp_entry_t *ptr;
548 	LIST_HEAD(pages_to_free);
549 	long nr_pages_to_free = 0;
550 	long nr_swaps_freed = 0;
551 	int offset;
552 	int freed;
553 	int punch_hole;
554 	spinlock_t *needs_lock;
555 	spinlock_t *punch_lock;
556 	unsigned long upper_limit;
557 
558 	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
559 	idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
560 	if (idx >= info->next_index)
561 		return;
562 
563 	spin_lock(&info->lock);
564 	info->flags |= SHMEM_TRUNCATE;
565 	if (likely(end == (loff_t) -1)) {
566 		limit = info->next_index;
567 		upper_limit = SHMEM_MAX_INDEX;
568 		info->next_index = idx;
569 		needs_lock = NULL;
570 		punch_hole = 0;
571 	} else {
572 		if (end + 1 >= inode->i_size) {	/* we may free a little more */
573 			limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
574 							PAGE_CACHE_SHIFT;
575 			upper_limit = SHMEM_MAX_INDEX;
576 		} else {
577 			limit = (end + 1) >> PAGE_CACHE_SHIFT;
578 			upper_limit = limit;
579 		}
580 		needs_lock = &info->lock;
581 		punch_hole = 1;
582 	}
583 
584 	topdir = info->i_indirect;
585 	if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
586 		info->i_indirect = NULL;
587 		nr_pages_to_free++;
588 		list_add(&topdir->lru, &pages_to_free);
589 	}
590 	spin_unlock(&info->lock);
591 
592 	if (info->swapped && idx < SHMEM_NR_DIRECT) {
593 		ptr = info->i_direct;
594 		size = limit;
595 		if (size > SHMEM_NR_DIRECT)
596 			size = SHMEM_NR_DIRECT;
597 		nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
598 	}
599 
600 	/*
601 	 * If there are no indirect blocks or we are punching a hole
602 	 * below indirect blocks, nothing to be done.
603 	 */
604 	if (!topdir || limit <= SHMEM_NR_DIRECT)
605 		goto done2;
606 
607 	/*
608 	 * The truncation case has already dropped info->lock, and we're safe
609 	 * because i_size and next_index have already been lowered, preventing
610 	 * access beyond.  But in the punch_hole case, we still need to take
611 	 * the lock when updating the swap directory, because there might be
612 	 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
613 	 * shmem_writepage.  However, whenever we find we can remove a whole
614 	 * directory page (not at the misaligned start or end of the range),
615 	 * we first NULLify its pointer in the level above, and then have no
616 	 * need to take the lock when updating its contents: needs_lock and
617 	 * punch_lock (either pointing to info->lock or NULL) manage this.
618 	 */
619 
620 	upper_limit -= SHMEM_NR_DIRECT;
621 	limit -= SHMEM_NR_DIRECT;
622 	idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
623 	offset = idx % ENTRIES_PER_PAGE;
624 	idx -= offset;
625 
626 	dir = shmem_dir_map(topdir);
627 	stage = ENTRIES_PER_PAGEPAGE/2;
628 	if (idx < ENTRIES_PER_PAGEPAGE/2) {
629 		middir = topdir;
630 		diroff = idx/ENTRIES_PER_PAGE;
631 	} else {
632 		dir += ENTRIES_PER_PAGE/2;
633 		dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
634 		while (stage <= idx)
635 			stage += ENTRIES_PER_PAGEPAGE;
636 		middir = *dir;
637 		if (*dir) {
638 			diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
639 				ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
640 			if (!diroff && !offset && upper_limit >= stage) {
641 				if (needs_lock) {
642 					spin_lock(needs_lock);
643 					*dir = NULL;
644 					spin_unlock(needs_lock);
645 					needs_lock = NULL;
646 				} else
647 					*dir = NULL;
648 				nr_pages_to_free++;
649 				list_add(&middir->lru, &pages_to_free);
650 			}
651 			shmem_dir_unmap(dir);
652 			dir = shmem_dir_map(middir);
653 		} else {
654 			diroff = 0;
655 			offset = 0;
656 			idx = stage;
657 		}
658 	}
659 
660 	for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
661 		if (unlikely(idx == stage)) {
662 			shmem_dir_unmap(dir);
663 			dir = shmem_dir_map(topdir) +
664 			    ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
665 			while (!*dir) {
666 				dir++;
667 				idx += ENTRIES_PER_PAGEPAGE;
668 				if (idx >= limit)
669 					goto done1;
670 			}
671 			stage = idx + ENTRIES_PER_PAGEPAGE;
672 			middir = *dir;
673 			if (punch_hole)
674 				needs_lock = &info->lock;
675 			if (upper_limit >= stage) {
676 				if (needs_lock) {
677 					spin_lock(needs_lock);
678 					*dir = NULL;
679 					spin_unlock(needs_lock);
680 					needs_lock = NULL;
681 				} else
682 					*dir = NULL;
683 				nr_pages_to_free++;
684 				list_add(&middir->lru, &pages_to_free);
685 			}
686 			shmem_dir_unmap(dir);
687 			cond_resched();
688 			dir = shmem_dir_map(middir);
689 			diroff = 0;
690 		}
691 		punch_lock = needs_lock;
692 		subdir = dir[diroff];
693 		if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
694 			if (needs_lock) {
695 				spin_lock(needs_lock);
696 				dir[diroff] = NULL;
697 				spin_unlock(needs_lock);
698 				punch_lock = NULL;
699 			} else
700 				dir[diroff] = NULL;
701 			nr_pages_to_free++;
702 			list_add(&subdir->lru, &pages_to_free);
703 		}
704 		if (subdir && page_private(subdir) /* has swap entries */) {
705 			size = limit - idx;
706 			if (size > ENTRIES_PER_PAGE)
707 				size = ENTRIES_PER_PAGE;
708 			freed = shmem_map_and_free_swp(subdir,
709 					offset, size, &dir, punch_lock);
710 			if (!dir)
711 				dir = shmem_dir_map(middir);
712 			nr_swaps_freed += freed;
713 			if (offset || punch_lock) {
714 				spin_lock(&info->lock);
715 				set_page_private(subdir,
716 					page_private(subdir) - freed);
717 				spin_unlock(&info->lock);
718 			} else
719 				BUG_ON(page_private(subdir) != freed);
720 		}
721 		offset = 0;
722 	}
723 done1:
724 	shmem_dir_unmap(dir);
725 done2:
726 	if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
727 		/*
728 		 * Call truncate_inode_pages again: racing shmem_unuse_inode
729 		 * may have swizzled a page in from swap since
730 		 * truncate_pagecache or generic_delete_inode did it, before we
731 		 * lowered next_index.  Also, though shmem_getpage checks
732 		 * i_size before adding to cache, no recheck after: so fix the
733 		 * narrow window there too.
734 		 *
735 		 * Recalling truncate_inode_pages_range and unmap_mapping_range
736 		 * every time for punch_hole (which never got a chance to clear
737 		 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
738 		 * yet hardly ever necessary: try to optimize them out later.
739 		 */
740 		truncate_inode_pages_range(inode->i_mapping, start, end);
741 		if (punch_hole)
742 			unmap_mapping_range(inode->i_mapping, start,
743 							end - start, 1);
744 	}
745 
746 	spin_lock(&info->lock);
747 	info->flags &= ~SHMEM_TRUNCATE;
748 	info->swapped -= nr_swaps_freed;
749 	if (nr_pages_to_free)
750 		shmem_free_blocks(inode, nr_pages_to_free);
751 	shmem_recalc_inode(inode);
752 	spin_unlock(&info->lock);
753 
754 	/*
755 	 * Empty swap vector directory pages to be freed?
756 	 */
757 	if (!list_empty(&pages_to_free)) {
758 		pages_to_free.prev->next = NULL;
759 		shmem_free_pages(pages_to_free.next);
760 	}
761 }
762 
763 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
764 {
765 	struct inode *inode = dentry->d_inode;
766 	loff_t newsize = attr->ia_size;
767 	int error;
768 
769 	error = inode_change_ok(inode, attr);
770 	if (error)
771 		return error;
772 
773 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)
774 					&& newsize != inode->i_size) {
775 		struct page *page = NULL;
776 
777 		if (newsize < inode->i_size) {
778 			/*
779 			 * If truncating down to a partial page, then
780 			 * if that page is already allocated, hold it
781 			 * in memory until the truncation is over, so
782 			 * truncate_partial_page cannot miss it were
783 			 * it assigned to swap.
784 			 */
785 			if (newsize & (PAGE_CACHE_SIZE-1)) {
786 				(void) shmem_getpage(inode,
787 					newsize >> PAGE_CACHE_SHIFT,
788 						&page, SGP_READ, NULL);
789 				if (page)
790 					unlock_page(page);
791 			}
792 			/*
793 			 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
794 			 * detect if any pages might have been added to cache
795 			 * after truncate_inode_pages.  But we needn't bother
796 			 * if it's being fully truncated to zero-length: the
797 			 * nrpages check is efficient enough in that case.
798 			 */
799 			if (newsize) {
800 				struct shmem_inode_info *info = SHMEM_I(inode);
801 				spin_lock(&info->lock);
802 				info->flags &= ~SHMEM_PAGEIN;
803 				spin_unlock(&info->lock);
804 			}
805 		}
806 
807 		/* XXX(truncate): truncate_setsize should be called last */
808 		truncate_setsize(inode, newsize);
809 		if (page)
810 			page_cache_release(page);
811 		shmem_truncate_range(inode, newsize, (loff_t)-1);
812 	}
813 
814 	setattr_copy(inode, attr);
815 #ifdef CONFIG_TMPFS_POSIX_ACL
816 	if (attr->ia_valid & ATTR_MODE)
817 		error = generic_acl_chmod(inode);
818 #endif
819 	return error;
820 }
821 
822 static void shmem_evict_inode(struct inode *inode)
823 {
824 	struct shmem_inode_info *info = SHMEM_I(inode);
825 
826 	if (inode->i_mapping->a_ops == &shmem_aops) {
827 		truncate_inode_pages(inode->i_mapping, 0);
828 		shmem_unacct_size(info->flags, inode->i_size);
829 		inode->i_size = 0;
830 		shmem_truncate_range(inode, 0, (loff_t)-1);
831 		if (!list_empty(&info->swaplist)) {
832 			mutex_lock(&shmem_swaplist_mutex);
833 			list_del_init(&info->swaplist);
834 			mutex_unlock(&shmem_swaplist_mutex);
835 		}
836 	}
837 	BUG_ON(inode->i_blocks);
838 	shmem_free_inode(inode->i_sb);
839 	end_writeback(inode);
840 }
841 
842 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
843 {
844 	swp_entry_t *ptr;
845 
846 	for (ptr = dir; ptr < edir; ptr++) {
847 		if (ptr->val == entry.val)
848 			return ptr - dir;
849 	}
850 	return -1;
851 }
852 
853 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
854 {
855 	struct inode *inode;
856 	unsigned long idx;
857 	unsigned long size;
858 	unsigned long limit;
859 	unsigned long stage;
860 	struct page **dir;
861 	struct page *subdir;
862 	swp_entry_t *ptr;
863 	int offset;
864 	int error;
865 
866 	idx = 0;
867 	ptr = info->i_direct;
868 	spin_lock(&info->lock);
869 	if (!info->swapped) {
870 		list_del_init(&info->swaplist);
871 		goto lost2;
872 	}
873 	limit = info->next_index;
874 	size = limit;
875 	if (size > SHMEM_NR_DIRECT)
876 		size = SHMEM_NR_DIRECT;
877 	offset = shmem_find_swp(entry, ptr, ptr+size);
878 	if (offset >= 0)
879 		goto found;
880 	if (!info->i_indirect)
881 		goto lost2;
882 
883 	dir = shmem_dir_map(info->i_indirect);
884 	stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
885 
886 	for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
887 		if (unlikely(idx == stage)) {
888 			shmem_dir_unmap(dir-1);
889 			if (cond_resched_lock(&info->lock)) {
890 				/* check it has not been truncated */
891 				if (limit > info->next_index) {
892 					limit = info->next_index;
893 					if (idx >= limit)
894 						goto lost2;
895 				}
896 			}
897 			dir = shmem_dir_map(info->i_indirect) +
898 			    ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
899 			while (!*dir) {
900 				dir++;
901 				idx += ENTRIES_PER_PAGEPAGE;
902 				if (idx >= limit)
903 					goto lost1;
904 			}
905 			stage = idx + ENTRIES_PER_PAGEPAGE;
906 			subdir = *dir;
907 			shmem_dir_unmap(dir);
908 			dir = shmem_dir_map(subdir);
909 		}
910 		subdir = *dir;
911 		if (subdir && page_private(subdir)) {
912 			ptr = shmem_swp_map(subdir);
913 			size = limit - idx;
914 			if (size > ENTRIES_PER_PAGE)
915 				size = ENTRIES_PER_PAGE;
916 			offset = shmem_find_swp(entry, ptr, ptr+size);
917 			shmem_swp_unmap(ptr);
918 			if (offset >= 0) {
919 				shmem_dir_unmap(dir);
920 				goto found;
921 			}
922 		}
923 	}
924 lost1:
925 	shmem_dir_unmap(dir-1);
926 lost2:
927 	spin_unlock(&info->lock);
928 	return 0;
929 found:
930 	idx += offset;
931 	inode = igrab(&info->vfs_inode);
932 	spin_unlock(&info->lock);
933 
934 	/*
935 	 * Move _head_ to start search for next from here.
936 	 * But be careful: shmem_evict_inode checks list_empty without taking
937 	 * mutex, and there's an instant in list_move_tail when info->swaplist
938 	 * would appear empty, if it were the only one on shmem_swaplist.  We
939 	 * could avoid doing it if inode NULL; or use this minor optimization.
940 	 */
941 	if (shmem_swaplist.next != &info->swaplist)
942 		list_move_tail(&shmem_swaplist, &info->swaplist);
943 	mutex_unlock(&shmem_swaplist_mutex);
944 
945 	error = 1;
946 	if (!inode)
947 		goto out;
948 	/*
949 	 * Charge page using GFP_KERNEL while we can wait.
950 	 * Charged back to the user(not to caller) when swap account is used.
951 	 * add_to_page_cache() will be called with GFP_NOWAIT.
952 	 */
953 	error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
954 	if (error)
955 		goto out;
956 	error = radix_tree_preload(GFP_KERNEL);
957 	if (error) {
958 		mem_cgroup_uncharge_cache_page(page);
959 		goto out;
960 	}
961 	error = 1;
962 
963 	spin_lock(&info->lock);
964 	ptr = shmem_swp_entry(info, idx, NULL);
965 	if (ptr && ptr->val == entry.val) {
966 		error = add_to_page_cache_locked(page, inode->i_mapping,
967 						idx, GFP_NOWAIT);
968 		/* does mem_cgroup_uncharge_cache_page on error */
969 	} else	/* we must compensate for our precharge above */
970 		mem_cgroup_uncharge_cache_page(page);
971 
972 	if (error == -EEXIST) {
973 		struct page *filepage = find_get_page(inode->i_mapping, idx);
974 		error = 1;
975 		if (filepage) {
976 			/*
977 			 * There might be a more uptodate page coming down
978 			 * from a stacked writepage: forget our swappage if so.
979 			 */
980 			if (PageUptodate(filepage))
981 				error = 0;
982 			page_cache_release(filepage);
983 		}
984 	}
985 	if (!error) {
986 		delete_from_swap_cache(page);
987 		set_page_dirty(page);
988 		info->flags |= SHMEM_PAGEIN;
989 		shmem_swp_set(info, ptr, 0);
990 		swap_free(entry);
991 		error = 1;	/* not an error, but entry was found */
992 	}
993 	if (ptr)
994 		shmem_swp_unmap(ptr);
995 	spin_unlock(&info->lock);
996 	radix_tree_preload_end();
997 out:
998 	unlock_page(page);
999 	page_cache_release(page);
1000 	iput(inode);		/* allows for NULL */
1001 	return error;
1002 }
1003 
1004 /*
1005  * shmem_unuse() search for an eventually swapped out shmem page.
1006  */
1007 int shmem_unuse(swp_entry_t entry, struct page *page)
1008 {
1009 	struct list_head *p, *next;
1010 	struct shmem_inode_info *info;
1011 	int found = 0;
1012 
1013 	mutex_lock(&shmem_swaplist_mutex);
1014 	list_for_each_safe(p, next, &shmem_swaplist) {
1015 		info = list_entry(p, struct shmem_inode_info, swaplist);
1016 		found = shmem_unuse_inode(info, entry, page);
1017 		cond_resched();
1018 		if (found)
1019 			goto out;
1020 	}
1021 	mutex_unlock(&shmem_swaplist_mutex);
1022 	/*
1023 	 * Can some race bring us here?  We've been holding page lock,
1024 	 * so I think not; but would rather try again later than BUG()
1025 	 */
1026 	unlock_page(page);
1027 	page_cache_release(page);
1028 out:
1029 	return (found < 0) ? found : 0;
1030 }
1031 
1032 /*
1033  * Move the page from the page cache to the swap cache.
1034  */
1035 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1036 {
1037 	struct shmem_inode_info *info;
1038 	swp_entry_t *entry, swap;
1039 	struct address_space *mapping;
1040 	unsigned long index;
1041 	struct inode *inode;
1042 
1043 	BUG_ON(!PageLocked(page));
1044 	mapping = page->mapping;
1045 	index = page->index;
1046 	inode = mapping->host;
1047 	info = SHMEM_I(inode);
1048 	if (info->flags & VM_LOCKED)
1049 		goto redirty;
1050 	if (!total_swap_pages)
1051 		goto redirty;
1052 
1053 	/*
1054 	 * shmem_backing_dev_info's capabilities prevent regular writeback or
1055 	 * sync from ever calling shmem_writepage; but a stacking filesystem
1056 	 * may use the ->writepage of its underlying filesystem, in which case
1057 	 * tmpfs should write out to swap only in response to memory pressure,
1058 	 * and not for the writeback threads or sync.  However, in those cases,
1059 	 * we do still want to check if there's a redundant swappage to be
1060 	 * discarded.
1061 	 */
1062 	if (wbc->for_reclaim)
1063 		swap = get_swap_page();
1064 	else
1065 		swap.val = 0;
1066 
1067 	spin_lock(&info->lock);
1068 	if (index >= info->next_index) {
1069 		BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1070 		goto unlock;
1071 	}
1072 	entry = shmem_swp_entry(info, index, NULL);
1073 	if (entry->val) {
1074 		/*
1075 		 * The more uptodate page coming down from a stacked
1076 		 * writepage should replace our old swappage.
1077 		 */
1078 		free_swap_and_cache(*entry);
1079 		shmem_swp_set(info, entry, 0);
1080 	}
1081 	shmem_recalc_inode(inode);
1082 
1083 	if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1084 		delete_from_page_cache(page);
1085 		shmem_swp_set(info, entry, swap.val);
1086 		shmem_swp_unmap(entry);
1087 		if (list_empty(&info->swaplist))
1088 			inode = igrab(inode);
1089 		else
1090 			inode = NULL;
1091 		spin_unlock(&info->lock);
1092 		swap_shmem_alloc(swap);
1093 		BUG_ON(page_mapped(page));
1094 		swap_writepage(page, wbc);
1095 		if (inode) {
1096 			mutex_lock(&shmem_swaplist_mutex);
1097 			/* move instead of add in case we're racing */
1098 			list_move_tail(&info->swaplist, &shmem_swaplist);
1099 			mutex_unlock(&shmem_swaplist_mutex);
1100 			iput(inode);
1101 		}
1102 		return 0;
1103 	}
1104 
1105 	shmem_swp_unmap(entry);
1106 unlock:
1107 	spin_unlock(&info->lock);
1108 	/*
1109 	 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1110 	 * clear SWAP_HAS_CACHE flag.
1111 	 */
1112 	swapcache_free(swap, NULL);
1113 redirty:
1114 	set_page_dirty(page);
1115 	if (wbc->for_reclaim)
1116 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1117 	unlock_page(page);
1118 	return 0;
1119 }
1120 
1121 #ifdef CONFIG_NUMA
1122 #ifdef CONFIG_TMPFS
1123 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1124 {
1125 	char buffer[64];
1126 
1127 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1128 		return;		/* show nothing */
1129 
1130 	mpol_to_str(buffer, sizeof(buffer), mpol, 1);
1131 
1132 	seq_printf(seq, ",mpol=%s", buffer);
1133 }
1134 
1135 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1136 {
1137 	struct mempolicy *mpol = NULL;
1138 	if (sbinfo->mpol) {
1139 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1140 		mpol = sbinfo->mpol;
1141 		mpol_get(mpol);
1142 		spin_unlock(&sbinfo->stat_lock);
1143 	}
1144 	return mpol;
1145 }
1146 #endif /* CONFIG_TMPFS */
1147 
1148 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1149 			struct shmem_inode_info *info, unsigned long idx)
1150 {
1151 	struct mempolicy mpol, *spol;
1152 	struct vm_area_struct pvma;
1153 	struct page *page;
1154 
1155 	spol = mpol_cond_copy(&mpol,
1156 				mpol_shared_policy_lookup(&info->policy, idx));
1157 
1158 	/* Create a pseudo vma that just contains the policy */
1159 	pvma.vm_start = 0;
1160 	pvma.vm_pgoff = idx;
1161 	pvma.vm_ops = NULL;
1162 	pvma.vm_policy = spol;
1163 	page = swapin_readahead(entry, gfp, &pvma, 0);
1164 	return page;
1165 }
1166 
1167 static struct page *shmem_alloc_page(gfp_t gfp,
1168 			struct shmem_inode_info *info, unsigned long idx)
1169 {
1170 	struct vm_area_struct pvma;
1171 
1172 	/* Create a pseudo vma that just contains the policy */
1173 	pvma.vm_start = 0;
1174 	pvma.vm_pgoff = idx;
1175 	pvma.vm_ops = NULL;
1176 	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1177 
1178 	/*
1179 	 * alloc_page_vma() will drop the shared policy reference
1180 	 */
1181 	return alloc_page_vma(gfp, &pvma, 0);
1182 }
1183 #else /* !CONFIG_NUMA */
1184 #ifdef CONFIG_TMPFS
1185 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
1186 {
1187 }
1188 #endif /* CONFIG_TMPFS */
1189 
1190 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1191 			struct shmem_inode_info *info, unsigned long idx)
1192 {
1193 	return swapin_readahead(entry, gfp, NULL, 0);
1194 }
1195 
1196 static inline struct page *shmem_alloc_page(gfp_t gfp,
1197 			struct shmem_inode_info *info, unsigned long idx)
1198 {
1199 	return alloc_page(gfp);
1200 }
1201 #endif /* CONFIG_NUMA */
1202 
1203 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1204 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1205 {
1206 	return NULL;
1207 }
1208 #endif
1209 
1210 /*
1211  * shmem_getpage - either get the page from swap or allocate a new one
1212  *
1213  * If we allocate a new one we do not mark it dirty. That's up to the
1214  * vm. If we swap it in we mark it dirty since we also free the swap
1215  * entry since a page cannot live in both the swap and page cache
1216  */
1217 static int shmem_getpage(struct inode *inode, unsigned long idx,
1218 			struct page **pagep, enum sgp_type sgp, int *type)
1219 {
1220 	struct address_space *mapping = inode->i_mapping;
1221 	struct shmem_inode_info *info = SHMEM_I(inode);
1222 	struct shmem_sb_info *sbinfo;
1223 	struct page *filepage = *pagep;
1224 	struct page *swappage;
1225 	struct page *prealloc_page = NULL;
1226 	swp_entry_t *entry;
1227 	swp_entry_t swap;
1228 	gfp_t gfp;
1229 	int error;
1230 
1231 	if (idx >= SHMEM_MAX_INDEX)
1232 		return -EFBIG;
1233 
1234 	if (type)
1235 		*type = 0;
1236 
1237 	/*
1238 	 * Normally, filepage is NULL on entry, and either found
1239 	 * uptodate immediately, or allocated and zeroed, or read
1240 	 * in under swappage, which is then assigned to filepage.
1241 	 * But shmem_readpage (required for splice) passes in a locked
1242 	 * filepage, which may be found not uptodate by other callers
1243 	 * too, and may need to be copied from the swappage read in.
1244 	 */
1245 repeat:
1246 	if (!filepage)
1247 		filepage = find_lock_page(mapping, idx);
1248 	if (filepage && PageUptodate(filepage))
1249 		goto done;
1250 	gfp = mapping_gfp_mask(mapping);
1251 	if (!filepage) {
1252 		/*
1253 		 * Try to preload while we can wait, to not make a habit of
1254 		 * draining atomic reserves; but don't latch on to this cpu.
1255 		 */
1256 		error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
1257 		if (error)
1258 			goto failed;
1259 		radix_tree_preload_end();
1260 		if (sgp != SGP_READ && !prealloc_page) {
1261 			/* We don't care if this fails */
1262 			prealloc_page = shmem_alloc_page(gfp, info, idx);
1263 			if (prealloc_page) {
1264 				if (mem_cgroup_cache_charge(prealloc_page,
1265 						current->mm, GFP_KERNEL)) {
1266 					page_cache_release(prealloc_page);
1267 					prealloc_page = NULL;
1268 				}
1269 			}
1270 		}
1271 	}
1272 	error = 0;
1273 
1274 	spin_lock(&info->lock);
1275 	shmem_recalc_inode(inode);
1276 	entry = shmem_swp_alloc(info, idx, sgp);
1277 	if (IS_ERR(entry)) {
1278 		spin_unlock(&info->lock);
1279 		error = PTR_ERR(entry);
1280 		goto failed;
1281 	}
1282 	swap = *entry;
1283 
1284 	if (swap.val) {
1285 		/* Look it up and read it in.. */
1286 		swappage = lookup_swap_cache(swap);
1287 		if (!swappage) {
1288 			shmem_swp_unmap(entry);
1289 			/* here we actually do the io */
1290 			if (type && !(*type & VM_FAULT_MAJOR)) {
1291 				__count_vm_event(PGMAJFAULT);
1292 				*type |= VM_FAULT_MAJOR;
1293 			}
1294 			spin_unlock(&info->lock);
1295 			swappage = shmem_swapin(swap, gfp, info, idx);
1296 			if (!swappage) {
1297 				spin_lock(&info->lock);
1298 				entry = shmem_swp_alloc(info, idx, sgp);
1299 				if (IS_ERR(entry))
1300 					error = PTR_ERR(entry);
1301 				else {
1302 					if (entry->val == swap.val)
1303 						error = -ENOMEM;
1304 					shmem_swp_unmap(entry);
1305 				}
1306 				spin_unlock(&info->lock);
1307 				if (error)
1308 					goto failed;
1309 				goto repeat;
1310 			}
1311 			wait_on_page_locked(swappage);
1312 			page_cache_release(swappage);
1313 			goto repeat;
1314 		}
1315 
1316 		/* We have to do this with page locked to prevent races */
1317 		if (!trylock_page(swappage)) {
1318 			shmem_swp_unmap(entry);
1319 			spin_unlock(&info->lock);
1320 			wait_on_page_locked(swappage);
1321 			page_cache_release(swappage);
1322 			goto repeat;
1323 		}
1324 		if (PageWriteback(swappage)) {
1325 			shmem_swp_unmap(entry);
1326 			spin_unlock(&info->lock);
1327 			wait_on_page_writeback(swappage);
1328 			unlock_page(swappage);
1329 			page_cache_release(swappage);
1330 			goto repeat;
1331 		}
1332 		if (!PageUptodate(swappage)) {
1333 			shmem_swp_unmap(entry);
1334 			spin_unlock(&info->lock);
1335 			unlock_page(swappage);
1336 			page_cache_release(swappage);
1337 			error = -EIO;
1338 			goto failed;
1339 		}
1340 
1341 		if (filepage) {
1342 			shmem_swp_set(info, entry, 0);
1343 			shmem_swp_unmap(entry);
1344 			delete_from_swap_cache(swappage);
1345 			spin_unlock(&info->lock);
1346 			copy_highpage(filepage, swappage);
1347 			unlock_page(swappage);
1348 			page_cache_release(swappage);
1349 			flush_dcache_page(filepage);
1350 			SetPageUptodate(filepage);
1351 			set_page_dirty(filepage);
1352 			swap_free(swap);
1353 		} else if (!(error = add_to_page_cache_locked(swappage, mapping,
1354 					idx, GFP_NOWAIT))) {
1355 			info->flags |= SHMEM_PAGEIN;
1356 			shmem_swp_set(info, entry, 0);
1357 			shmem_swp_unmap(entry);
1358 			delete_from_swap_cache(swappage);
1359 			spin_unlock(&info->lock);
1360 			filepage = swappage;
1361 			set_page_dirty(filepage);
1362 			swap_free(swap);
1363 		} else {
1364 			shmem_swp_unmap(entry);
1365 			spin_unlock(&info->lock);
1366 			if (error == -ENOMEM) {
1367 				/*
1368 				 * reclaim from proper memory cgroup and
1369 				 * call memcg's OOM if needed.
1370 				 */
1371 				error = mem_cgroup_shmem_charge_fallback(
1372 								swappage,
1373 								current->mm,
1374 								gfp);
1375 				if (error) {
1376 					unlock_page(swappage);
1377 					page_cache_release(swappage);
1378 					goto failed;
1379 				}
1380 			}
1381 			unlock_page(swappage);
1382 			page_cache_release(swappage);
1383 			goto repeat;
1384 		}
1385 	} else if (sgp == SGP_READ && !filepage) {
1386 		shmem_swp_unmap(entry);
1387 		filepage = find_get_page(mapping, idx);
1388 		if (filepage &&
1389 		    (!PageUptodate(filepage) || !trylock_page(filepage))) {
1390 			spin_unlock(&info->lock);
1391 			wait_on_page_locked(filepage);
1392 			page_cache_release(filepage);
1393 			filepage = NULL;
1394 			goto repeat;
1395 		}
1396 		spin_unlock(&info->lock);
1397 	} else {
1398 		shmem_swp_unmap(entry);
1399 		sbinfo = SHMEM_SB(inode->i_sb);
1400 		if (sbinfo->max_blocks) {
1401 			if (percpu_counter_compare(&sbinfo->used_blocks,
1402 						sbinfo->max_blocks) >= 0 ||
1403 			    shmem_acct_block(info->flags)) {
1404 				spin_unlock(&info->lock);
1405 				error = -ENOSPC;
1406 				goto failed;
1407 			}
1408 			percpu_counter_inc(&sbinfo->used_blocks);
1409 			spin_lock(&inode->i_lock);
1410 			inode->i_blocks += BLOCKS_PER_PAGE;
1411 			spin_unlock(&inode->i_lock);
1412 		} else if (shmem_acct_block(info->flags)) {
1413 			spin_unlock(&info->lock);
1414 			error = -ENOSPC;
1415 			goto failed;
1416 		}
1417 
1418 		if (!filepage) {
1419 			int ret;
1420 
1421 			if (!prealloc_page) {
1422 				spin_unlock(&info->lock);
1423 				filepage = shmem_alloc_page(gfp, info, idx);
1424 				if (!filepage) {
1425 					shmem_unacct_blocks(info->flags, 1);
1426 					shmem_free_blocks(inode, 1);
1427 					error = -ENOMEM;
1428 					goto failed;
1429 				}
1430 				SetPageSwapBacked(filepage);
1431 
1432 				/*
1433 				 * Precharge page while we can wait, compensate
1434 				 * after
1435 				 */
1436 				error = mem_cgroup_cache_charge(filepage,
1437 					current->mm, GFP_KERNEL);
1438 				if (error) {
1439 					page_cache_release(filepage);
1440 					shmem_unacct_blocks(info->flags, 1);
1441 					shmem_free_blocks(inode, 1);
1442 					filepage = NULL;
1443 					goto failed;
1444 				}
1445 
1446 				spin_lock(&info->lock);
1447 			} else {
1448 				filepage = prealloc_page;
1449 				prealloc_page = NULL;
1450 				SetPageSwapBacked(filepage);
1451 			}
1452 
1453 			entry = shmem_swp_alloc(info, idx, sgp);
1454 			if (IS_ERR(entry))
1455 				error = PTR_ERR(entry);
1456 			else {
1457 				swap = *entry;
1458 				shmem_swp_unmap(entry);
1459 			}
1460 			ret = error || swap.val;
1461 			if (ret)
1462 				mem_cgroup_uncharge_cache_page(filepage);
1463 			else
1464 				ret = add_to_page_cache_lru(filepage, mapping,
1465 						idx, GFP_NOWAIT);
1466 			/*
1467 			 * At add_to_page_cache_lru() failure, uncharge will
1468 			 * be done automatically.
1469 			 */
1470 			if (ret) {
1471 				spin_unlock(&info->lock);
1472 				page_cache_release(filepage);
1473 				shmem_unacct_blocks(info->flags, 1);
1474 				shmem_free_blocks(inode, 1);
1475 				filepage = NULL;
1476 				if (error)
1477 					goto failed;
1478 				goto repeat;
1479 			}
1480 			info->flags |= SHMEM_PAGEIN;
1481 		}
1482 
1483 		info->alloced++;
1484 		spin_unlock(&info->lock);
1485 		clear_highpage(filepage);
1486 		flush_dcache_page(filepage);
1487 		SetPageUptodate(filepage);
1488 		if (sgp == SGP_DIRTY)
1489 			set_page_dirty(filepage);
1490 	}
1491 done:
1492 	*pagep = filepage;
1493 	error = 0;
1494 	goto out;
1495 
1496 failed:
1497 	if (*pagep != filepage) {
1498 		unlock_page(filepage);
1499 		page_cache_release(filepage);
1500 	}
1501 out:
1502 	if (prealloc_page) {
1503 		mem_cgroup_uncharge_cache_page(prealloc_page);
1504 		page_cache_release(prealloc_page);
1505 	}
1506 	return error;
1507 }
1508 
1509 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1510 {
1511 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1512 	int error;
1513 	int ret;
1514 
1515 	if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1516 		return VM_FAULT_SIGBUS;
1517 
1518 	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1519 	if (error)
1520 		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1521 
1522 	return ret | VM_FAULT_LOCKED;
1523 }
1524 
1525 #ifdef CONFIG_NUMA
1526 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1527 {
1528 	struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1529 	return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1530 }
1531 
1532 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1533 					  unsigned long addr)
1534 {
1535 	struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1536 	unsigned long idx;
1537 
1538 	idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1539 	return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1540 }
1541 #endif
1542 
1543 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1544 {
1545 	struct inode *inode = file->f_path.dentry->d_inode;
1546 	struct shmem_inode_info *info = SHMEM_I(inode);
1547 	int retval = -ENOMEM;
1548 
1549 	spin_lock(&info->lock);
1550 	if (lock && !(info->flags & VM_LOCKED)) {
1551 		if (!user_shm_lock(inode->i_size, user))
1552 			goto out_nomem;
1553 		info->flags |= VM_LOCKED;
1554 		mapping_set_unevictable(file->f_mapping);
1555 	}
1556 	if (!lock && (info->flags & VM_LOCKED) && user) {
1557 		user_shm_unlock(inode->i_size, user);
1558 		info->flags &= ~VM_LOCKED;
1559 		mapping_clear_unevictable(file->f_mapping);
1560 		scan_mapping_unevictable_pages(file->f_mapping);
1561 	}
1562 	retval = 0;
1563 
1564 out_nomem:
1565 	spin_unlock(&info->lock);
1566 	return retval;
1567 }
1568 
1569 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1570 {
1571 	file_accessed(file);
1572 	vma->vm_ops = &shmem_vm_ops;
1573 	vma->vm_flags |= VM_CAN_NONLINEAR;
1574 	return 0;
1575 }
1576 
1577 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1578 				     int mode, dev_t dev, unsigned long flags)
1579 {
1580 	struct inode *inode;
1581 	struct shmem_inode_info *info;
1582 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1583 
1584 	if (shmem_reserve_inode(sb))
1585 		return NULL;
1586 
1587 	inode = new_inode(sb);
1588 	if (inode) {
1589 		inode->i_ino = get_next_ino();
1590 		inode_init_owner(inode, dir, mode);
1591 		inode->i_blocks = 0;
1592 		inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1593 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1594 		inode->i_generation = get_seconds();
1595 		info = SHMEM_I(inode);
1596 		memset(info, 0, (char *)inode - (char *)info);
1597 		spin_lock_init(&info->lock);
1598 		info->flags = flags & VM_NORESERVE;
1599 		INIT_LIST_HEAD(&info->swaplist);
1600 		cache_no_acl(inode);
1601 
1602 		switch (mode & S_IFMT) {
1603 		default:
1604 			inode->i_op = &shmem_special_inode_operations;
1605 			init_special_inode(inode, mode, dev);
1606 			break;
1607 		case S_IFREG:
1608 			inode->i_mapping->a_ops = &shmem_aops;
1609 			inode->i_op = &shmem_inode_operations;
1610 			inode->i_fop = &shmem_file_operations;
1611 			mpol_shared_policy_init(&info->policy,
1612 						 shmem_get_sbmpol(sbinfo));
1613 			break;
1614 		case S_IFDIR:
1615 			inc_nlink(inode);
1616 			/* Some things misbehave if size == 0 on a directory */
1617 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
1618 			inode->i_op = &shmem_dir_inode_operations;
1619 			inode->i_fop = &simple_dir_operations;
1620 			break;
1621 		case S_IFLNK:
1622 			/*
1623 			 * Must not load anything in the rbtree,
1624 			 * mpol_free_shared_policy will not be called.
1625 			 */
1626 			mpol_shared_policy_init(&info->policy, NULL);
1627 			break;
1628 		}
1629 	} else
1630 		shmem_free_inode(sb);
1631 	return inode;
1632 }
1633 
1634 #ifdef CONFIG_TMPFS
1635 static const struct inode_operations shmem_symlink_inode_operations;
1636 static const struct inode_operations shmem_symlink_inline_operations;
1637 
1638 /*
1639  * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1640  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1641  * below the loop driver, in the generic fashion that many filesystems support.
1642  */
1643 static int shmem_readpage(struct file *file, struct page *page)
1644 {
1645 	struct inode *inode = page->mapping->host;
1646 	int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1647 	unlock_page(page);
1648 	return error;
1649 }
1650 
1651 static int
1652 shmem_write_begin(struct file *file, struct address_space *mapping,
1653 			loff_t pos, unsigned len, unsigned flags,
1654 			struct page **pagep, void **fsdata)
1655 {
1656 	struct inode *inode = mapping->host;
1657 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1658 	*pagep = NULL;
1659 	return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1660 }
1661 
1662 static int
1663 shmem_write_end(struct file *file, struct address_space *mapping,
1664 			loff_t pos, unsigned len, unsigned copied,
1665 			struct page *page, void *fsdata)
1666 {
1667 	struct inode *inode = mapping->host;
1668 
1669 	if (pos + copied > inode->i_size)
1670 		i_size_write(inode, pos + copied);
1671 
1672 	set_page_dirty(page);
1673 	unlock_page(page);
1674 	page_cache_release(page);
1675 
1676 	return copied;
1677 }
1678 
1679 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1680 {
1681 	struct inode *inode = filp->f_path.dentry->d_inode;
1682 	struct address_space *mapping = inode->i_mapping;
1683 	unsigned long index, offset;
1684 	enum sgp_type sgp = SGP_READ;
1685 
1686 	/*
1687 	 * Might this read be for a stacking filesystem?  Then when reading
1688 	 * holes of a sparse file, we actually need to allocate those pages,
1689 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1690 	 */
1691 	if (segment_eq(get_fs(), KERNEL_DS))
1692 		sgp = SGP_DIRTY;
1693 
1694 	index = *ppos >> PAGE_CACHE_SHIFT;
1695 	offset = *ppos & ~PAGE_CACHE_MASK;
1696 
1697 	for (;;) {
1698 		struct page *page = NULL;
1699 		unsigned long end_index, nr, ret;
1700 		loff_t i_size = i_size_read(inode);
1701 
1702 		end_index = i_size >> PAGE_CACHE_SHIFT;
1703 		if (index > end_index)
1704 			break;
1705 		if (index == end_index) {
1706 			nr = i_size & ~PAGE_CACHE_MASK;
1707 			if (nr <= offset)
1708 				break;
1709 		}
1710 
1711 		desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1712 		if (desc->error) {
1713 			if (desc->error == -EINVAL)
1714 				desc->error = 0;
1715 			break;
1716 		}
1717 		if (page)
1718 			unlock_page(page);
1719 
1720 		/*
1721 		 * We must evaluate after, since reads (unlike writes)
1722 		 * are called without i_mutex protection against truncate
1723 		 */
1724 		nr = PAGE_CACHE_SIZE;
1725 		i_size = i_size_read(inode);
1726 		end_index = i_size >> PAGE_CACHE_SHIFT;
1727 		if (index == end_index) {
1728 			nr = i_size & ~PAGE_CACHE_MASK;
1729 			if (nr <= offset) {
1730 				if (page)
1731 					page_cache_release(page);
1732 				break;
1733 			}
1734 		}
1735 		nr -= offset;
1736 
1737 		if (page) {
1738 			/*
1739 			 * If users can be writing to this page using arbitrary
1740 			 * virtual addresses, take care about potential aliasing
1741 			 * before reading the page on the kernel side.
1742 			 */
1743 			if (mapping_writably_mapped(mapping))
1744 				flush_dcache_page(page);
1745 			/*
1746 			 * Mark the page accessed if we read the beginning.
1747 			 */
1748 			if (!offset)
1749 				mark_page_accessed(page);
1750 		} else {
1751 			page = ZERO_PAGE(0);
1752 			page_cache_get(page);
1753 		}
1754 
1755 		/*
1756 		 * Ok, we have the page, and it's up-to-date, so
1757 		 * now we can copy it to user space...
1758 		 *
1759 		 * The actor routine returns how many bytes were actually used..
1760 		 * NOTE! This may not be the same as how much of a user buffer
1761 		 * we filled up (we may be padding etc), so we can only update
1762 		 * "pos" here (the actor routine has to update the user buffer
1763 		 * pointers and the remaining count).
1764 		 */
1765 		ret = actor(desc, page, offset, nr);
1766 		offset += ret;
1767 		index += offset >> PAGE_CACHE_SHIFT;
1768 		offset &= ~PAGE_CACHE_MASK;
1769 
1770 		page_cache_release(page);
1771 		if (ret != nr || !desc->count)
1772 			break;
1773 
1774 		cond_resched();
1775 	}
1776 
1777 	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1778 	file_accessed(filp);
1779 }
1780 
1781 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1782 		const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1783 {
1784 	struct file *filp = iocb->ki_filp;
1785 	ssize_t retval;
1786 	unsigned long seg;
1787 	size_t count;
1788 	loff_t *ppos = &iocb->ki_pos;
1789 
1790 	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1791 	if (retval)
1792 		return retval;
1793 
1794 	for (seg = 0; seg < nr_segs; seg++) {
1795 		read_descriptor_t desc;
1796 
1797 		desc.written = 0;
1798 		desc.arg.buf = iov[seg].iov_base;
1799 		desc.count = iov[seg].iov_len;
1800 		if (desc.count == 0)
1801 			continue;
1802 		desc.error = 0;
1803 		do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1804 		retval += desc.written;
1805 		if (desc.error) {
1806 			retval = retval ?: desc.error;
1807 			break;
1808 		}
1809 		if (desc.count > 0)
1810 			break;
1811 	}
1812 	return retval;
1813 }
1814 
1815 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1816 {
1817 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1818 
1819 	buf->f_type = TMPFS_MAGIC;
1820 	buf->f_bsize = PAGE_CACHE_SIZE;
1821 	buf->f_namelen = NAME_MAX;
1822 	if (sbinfo->max_blocks) {
1823 		buf->f_blocks = sbinfo->max_blocks;
1824 		buf->f_bavail = buf->f_bfree =
1825 				sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
1826 	}
1827 	if (sbinfo->max_inodes) {
1828 		buf->f_files = sbinfo->max_inodes;
1829 		buf->f_ffree = sbinfo->free_inodes;
1830 	}
1831 	/* else leave those fields 0 like simple_statfs */
1832 	return 0;
1833 }
1834 
1835 /*
1836  * File creation. Allocate an inode, and we're done..
1837  */
1838 static int
1839 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1840 {
1841 	struct inode *inode;
1842 	int error = -ENOSPC;
1843 
1844 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1845 	if (inode) {
1846 		error = security_inode_init_security(inode, dir,
1847 						     &dentry->d_name, NULL,
1848 						     NULL, NULL);
1849 		if (error) {
1850 			if (error != -EOPNOTSUPP) {
1851 				iput(inode);
1852 				return error;
1853 			}
1854 		}
1855 #ifdef CONFIG_TMPFS_POSIX_ACL
1856 		error = generic_acl_init(inode, dir);
1857 		if (error) {
1858 			iput(inode);
1859 			return error;
1860 		}
1861 #else
1862 		error = 0;
1863 #endif
1864 		dir->i_size += BOGO_DIRENT_SIZE;
1865 		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1866 		d_instantiate(dentry, inode);
1867 		dget(dentry); /* Extra count - pin the dentry in core */
1868 	}
1869 	return error;
1870 }
1871 
1872 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1873 {
1874 	int error;
1875 
1876 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1877 		return error;
1878 	inc_nlink(dir);
1879 	return 0;
1880 }
1881 
1882 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1883 		struct nameidata *nd)
1884 {
1885 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1886 }
1887 
1888 /*
1889  * Link a file..
1890  */
1891 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1892 {
1893 	struct inode *inode = old_dentry->d_inode;
1894 	int ret;
1895 
1896 	/*
1897 	 * No ordinary (disk based) filesystem counts links as inodes;
1898 	 * but each new link needs a new dentry, pinning lowmem, and
1899 	 * tmpfs dentries cannot be pruned until they are unlinked.
1900 	 */
1901 	ret = shmem_reserve_inode(inode->i_sb);
1902 	if (ret)
1903 		goto out;
1904 
1905 	dir->i_size += BOGO_DIRENT_SIZE;
1906 	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1907 	inc_nlink(inode);
1908 	ihold(inode);	/* New dentry reference */
1909 	dget(dentry);		/* Extra pinning count for the created dentry */
1910 	d_instantiate(dentry, inode);
1911 out:
1912 	return ret;
1913 }
1914 
1915 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1916 {
1917 	struct inode *inode = dentry->d_inode;
1918 
1919 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1920 		shmem_free_inode(inode->i_sb);
1921 
1922 	dir->i_size -= BOGO_DIRENT_SIZE;
1923 	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1924 	drop_nlink(inode);
1925 	dput(dentry);	/* Undo the count from "create" - this does all the work */
1926 	return 0;
1927 }
1928 
1929 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1930 {
1931 	if (!simple_empty(dentry))
1932 		return -ENOTEMPTY;
1933 
1934 	drop_nlink(dentry->d_inode);
1935 	drop_nlink(dir);
1936 	return shmem_unlink(dir, dentry);
1937 }
1938 
1939 /*
1940  * The VFS layer already does all the dentry stuff for rename,
1941  * we just have to decrement the usage count for the target if
1942  * it exists so that the VFS layer correctly free's it when it
1943  * gets overwritten.
1944  */
1945 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1946 {
1947 	struct inode *inode = old_dentry->d_inode;
1948 	int they_are_dirs = S_ISDIR(inode->i_mode);
1949 
1950 	if (!simple_empty(new_dentry))
1951 		return -ENOTEMPTY;
1952 
1953 	if (new_dentry->d_inode) {
1954 		(void) shmem_unlink(new_dir, new_dentry);
1955 		if (they_are_dirs)
1956 			drop_nlink(old_dir);
1957 	} else if (they_are_dirs) {
1958 		drop_nlink(old_dir);
1959 		inc_nlink(new_dir);
1960 	}
1961 
1962 	old_dir->i_size -= BOGO_DIRENT_SIZE;
1963 	new_dir->i_size += BOGO_DIRENT_SIZE;
1964 	old_dir->i_ctime = old_dir->i_mtime =
1965 	new_dir->i_ctime = new_dir->i_mtime =
1966 	inode->i_ctime = CURRENT_TIME;
1967 	return 0;
1968 }
1969 
1970 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1971 {
1972 	int error;
1973 	int len;
1974 	struct inode *inode;
1975 	struct page *page = NULL;
1976 	char *kaddr;
1977 	struct shmem_inode_info *info;
1978 
1979 	len = strlen(symname) + 1;
1980 	if (len > PAGE_CACHE_SIZE)
1981 		return -ENAMETOOLONG;
1982 
1983 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1984 	if (!inode)
1985 		return -ENOSPC;
1986 
1987 	error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
1988 					     NULL, NULL);
1989 	if (error) {
1990 		if (error != -EOPNOTSUPP) {
1991 			iput(inode);
1992 			return error;
1993 		}
1994 		error = 0;
1995 	}
1996 
1997 	info = SHMEM_I(inode);
1998 	inode->i_size = len-1;
1999 	if (len <= (char *)inode - (char *)info) {
2000 		/* do it inline */
2001 		memcpy(info, symname, len);
2002 		inode->i_op = &shmem_symlink_inline_operations;
2003 	} else {
2004 		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2005 		if (error) {
2006 			iput(inode);
2007 			return error;
2008 		}
2009 		inode->i_mapping->a_ops = &shmem_aops;
2010 		inode->i_op = &shmem_symlink_inode_operations;
2011 		kaddr = kmap_atomic(page, KM_USER0);
2012 		memcpy(kaddr, symname, len);
2013 		kunmap_atomic(kaddr, KM_USER0);
2014 		set_page_dirty(page);
2015 		unlock_page(page);
2016 		page_cache_release(page);
2017 	}
2018 	dir->i_size += BOGO_DIRENT_SIZE;
2019 	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2020 	d_instantiate(dentry, inode);
2021 	dget(dentry);
2022 	return 0;
2023 }
2024 
2025 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
2026 {
2027 	nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
2028 	return NULL;
2029 }
2030 
2031 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
2032 {
2033 	struct page *page = NULL;
2034 	int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2035 	nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
2036 	if (page)
2037 		unlock_page(page);
2038 	return page;
2039 }
2040 
2041 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
2042 {
2043 	if (!IS_ERR(nd_get_link(nd))) {
2044 		struct page *page = cookie;
2045 		kunmap(page);
2046 		mark_page_accessed(page);
2047 		page_cache_release(page);
2048 	}
2049 }
2050 
2051 static const struct inode_operations shmem_symlink_inline_operations = {
2052 	.readlink	= generic_readlink,
2053 	.follow_link	= shmem_follow_link_inline,
2054 };
2055 
2056 static const struct inode_operations shmem_symlink_inode_operations = {
2057 	.readlink	= generic_readlink,
2058 	.follow_link	= shmem_follow_link,
2059 	.put_link	= shmem_put_link,
2060 };
2061 
2062 #ifdef CONFIG_TMPFS_POSIX_ACL
2063 /*
2064  * Superblocks without xattr inode operations will get security.* xattr
2065  * support from the VFS "for free". As soon as we have any other xattrs
2066  * like ACLs, we also need to implement the security.* handlers at
2067  * filesystem level, though.
2068  */
2069 
2070 static size_t shmem_xattr_security_list(struct dentry *dentry, char *list,
2071 					size_t list_len, const char *name,
2072 					size_t name_len, int handler_flags)
2073 {
2074 	return security_inode_listsecurity(dentry->d_inode, list, list_len);
2075 }
2076 
2077 static int shmem_xattr_security_get(struct dentry *dentry, const char *name,
2078 		void *buffer, size_t size, int handler_flags)
2079 {
2080 	if (strcmp(name, "") == 0)
2081 		return -EINVAL;
2082 	return xattr_getsecurity(dentry->d_inode, name, buffer, size);
2083 }
2084 
2085 static int shmem_xattr_security_set(struct dentry *dentry, const char *name,
2086 		const void *value, size_t size, int flags, int handler_flags)
2087 {
2088 	if (strcmp(name, "") == 0)
2089 		return -EINVAL;
2090 	return security_inode_setsecurity(dentry->d_inode, name, value,
2091 					  size, flags);
2092 }
2093 
2094 static const struct xattr_handler shmem_xattr_security_handler = {
2095 	.prefix = XATTR_SECURITY_PREFIX,
2096 	.list   = shmem_xattr_security_list,
2097 	.get    = shmem_xattr_security_get,
2098 	.set    = shmem_xattr_security_set,
2099 };
2100 
2101 static const struct xattr_handler *shmem_xattr_handlers[] = {
2102 	&generic_acl_access_handler,
2103 	&generic_acl_default_handler,
2104 	&shmem_xattr_security_handler,
2105 	NULL
2106 };
2107 #endif
2108 
2109 static struct dentry *shmem_get_parent(struct dentry *child)
2110 {
2111 	return ERR_PTR(-ESTALE);
2112 }
2113 
2114 static int shmem_match(struct inode *ino, void *vfh)
2115 {
2116 	__u32 *fh = vfh;
2117 	__u64 inum = fh[2];
2118 	inum = (inum << 32) | fh[1];
2119 	return ino->i_ino == inum && fh[0] == ino->i_generation;
2120 }
2121 
2122 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2123 		struct fid *fid, int fh_len, int fh_type)
2124 {
2125 	struct inode *inode;
2126 	struct dentry *dentry = NULL;
2127 	u64 inum = fid->raw[2];
2128 	inum = (inum << 32) | fid->raw[1];
2129 
2130 	if (fh_len < 3)
2131 		return NULL;
2132 
2133 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2134 			shmem_match, fid->raw);
2135 	if (inode) {
2136 		dentry = d_find_alias(inode);
2137 		iput(inode);
2138 	}
2139 
2140 	return dentry;
2141 }
2142 
2143 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2144 				int connectable)
2145 {
2146 	struct inode *inode = dentry->d_inode;
2147 
2148 	if (*len < 3) {
2149 		*len = 3;
2150 		return 255;
2151 	}
2152 
2153 	if (inode_unhashed(inode)) {
2154 		/* Unfortunately insert_inode_hash is not idempotent,
2155 		 * so as we hash inodes here rather than at creation
2156 		 * time, we need a lock to ensure we only try
2157 		 * to do it once
2158 		 */
2159 		static DEFINE_SPINLOCK(lock);
2160 		spin_lock(&lock);
2161 		if (inode_unhashed(inode))
2162 			__insert_inode_hash(inode,
2163 					    inode->i_ino + inode->i_generation);
2164 		spin_unlock(&lock);
2165 	}
2166 
2167 	fh[0] = inode->i_generation;
2168 	fh[1] = inode->i_ino;
2169 	fh[2] = ((__u64)inode->i_ino) >> 32;
2170 
2171 	*len = 3;
2172 	return 1;
2173 }
2174 
2175 static const struct export_operations shmem_export_ops = {
2176 	.get_parent     = shmem_get_parent,
2177 	.encode_fh      = shmem_encode_fh,
2178 	.fh_to_dentry	= shmem_fh_to_dentry,
2179 };
2180 
2181 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2182 			       bool remount)
2183 {
2184 	char *this_char, *value, *rest;
2185 
2186 	while (options != NULL) {
2187 		this_char = options;
2188 		for (;;) {
2189 			/*
2190 			 * NUL-terminate this option: unfortunately,
2191 			 * mount options form a comma-separated list,
2192 			 * but mpol's nodelist may also contain commas.
2193 			 */
2194 			options = strchr(options, ',');
2195 			if (options == NULL)
2196 				break;
2197 			options++;
2198 			if (!isdigit(*options)) {
2199 				options[-1] = '\0';
2200 				break;
2201 			}
2202 		}
2203 		if (!*this_char)
2204 			continue;
2205 		if ((value = strchr(this_char,'=')) != NULL) {
2206 			*value++ = 0;
2207 		} else {
2208 			printk(KERN_ERR
2209 			    "tmpfs: No value for mount option '%s'\n",
2210 			    this_char);
2211 			return 1;
2212 		}
2213 
2214 		if (!strcmp(this_char,"size")) {
2215 			unsigned long long size;
2216 			size = memparse(value,&rest);
2217 			if (*rest == '%') {
2218 				size <<= PAGE_SHIFT;
2219 				size *= totalram_pages;
2220 				do_div(size, 100);
2221 				rest++;
2222 			}
2223 			if (*rest)
2224 				goto bad_val;
2225 			sbinfo->max_blocks =
2226 				DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2227 		} else if (!strcmp(this_char,"nr_blocks")) {
2228 			sbinfo->max_blocks = memparse(value, &rest);
2229 			if (*rest)
2230 				goto bad_val;
2231 		} else if (!strcmp(this_char,"nr_inodes")) {
2232 			sbinfo->max_inodes = memparse(value, &rest);
2233 			if (*rest)
2234 				goto bad_val;
2235 		} else if (!strcmp(this_char,"mode")) {
2236 			if (remount)
2237 				continue;
2238 			sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2239 			if (*rest)
2240 				goto bad_val;
2241 		} else if (!strcmp(this_char,"uid")) {
2242 			if (remount)
2243 				continue;
2244 			sbinfo->uid = simple_strtoul(value, &rest, 0);
2245 			if (*rest)
2246 				goto bad_val;
2247 		} else if (!strcmp(this_char,"gid")) {
2248 			if (remount)
2249 				continue;
2250 			sbinfo->gid = simple_strtoul(value, &rest, 0);
2251 			if (*rest)
2252 				goto bad_val;
2253 		} else if (!strcmp(this_char,"mpol")) {
2254 			if (mpol_parse_str(value, &sbinfo->mpol, 1))
2255 				goto bad_val;
2256 		} else {
2257 			printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2258 			       this_char);
2259 			return 1;
2260 		}
2261 	}
2262 	return 0;
2263 
2264 bad_val:
2265 	printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2266 	       value, this_char);
2267 	return 1;
2268 
2269 }
2270 
2271 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2272 {
2273 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2274 	struct shmem_sb_info config = *sbinfo;
2275 	unsigned long inodes;
2276 	int error = -EINVAL;
2277 
2278 	if (shmem_parse_options(data, &config, true))
2279 		return error;
2280 
2281 	spin_lock(&sbinfo->stat_lock);
2282 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2283 	if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2284 		goto out;
2285 	if (config.max_inodes < inodes)
2286 		goto out;
2287 	/*
2288 	 * Those tests also disallow limited->unlimited while any are in
2289 	 * use, so i_blocks will always be zero when max_blocks is zero;
2290 	 * but we must separately disallow unlimited->limited, because
2291 	 * in that case we have no record of how much is already in use.
2292 	 */
2293 	if (config.max_blocks && !sbinfo->max_blocks)
2294 		goto out;
2295 	if (config.max_inodes && !sbinfo->max_inodes)
2296 		goto out;
2297 
2298 	error = 0;
2299 	sbinfo->max_blocks  = config.max_blocks;
2300 	sbinfo->max_inodes  = config.max_inodes;
2301 	sbinfo->free_inodes = config.max_inodes - inodes;
2302 
2303 	mpol_put(sbinfo->mpol);
2304 	sbinfo->mpol        = config.mpol;	/* transfers initial ref */
2305 out:
2306 	spin_unlock(&sbinfo->stat_lock);
2307 	return error;
2308 }
2309 
2310 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2311 {
2312 	struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2313 
2314 	if (sbinfo->max_blocks != shmem_default_max_blocks())
2315 		seq_printf(seq, ",size=%luk",
2316 			sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2317 	if (sbinfo->max_inodes != shmem_default_max_inodes())
2318 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2319 	if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2320 		seq_printf(seq, ",mode=%03o", sbinfo->mode);
2321 	if (sbinfo->uid != 0)
2322 		seq_printf(seq, ",uid=%u", sbinfo->uid);
2323 	if (sbinfo->gid != 0)
2324 		seq_printf(seq, ",gid=%u", sbinfo->gid);
2325 	shmem_show_mpol(seq, sbinfo->mpol);
2326 	return 0;
2327 }
2328 #endif /* CONFIG_TMPFS */
2329 
2330 static void shmem_put_super(struct super_block *sb)
2331 {
2332 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2333 
2334 	percpu_counter_destroy(&sbinfo->used_blocks);
2335 	kfree(sbinfo);
2336 	sb->s_fs_info = NULL;
2337 }
2338 
2339 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2340 {
2341 	struct inode *inode;
2342 	struct dentry *root;
2343 	struct shmem_sb_info *sbinfo;
2344 	int err = -ENOMEM;
2345 
2346 	/* Round up to L1_CACHE_BYTES to resist false sharing */
2347 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2348 				L1_CACHE_BYTES), GFP_KERNEL);
2349 	if (!sbinfo)
2350 		return -ENOMEM;
2351 
2352 	sbinfo->mode = S_IRWXUGO | S_ISVTX;
2353 	sbinfo->uid = current_fsuid();
2354 	sbinfo->gid = current_fsgid();
2355 	sb->s_fs_info = sbinfo;
2356 
2357 #ifdef CONFIG_TMPFS
2358 	/*
2359 	 * Per default we only allow half of the physical ram per
2360 	 * tmpfs instance, limiting inodes to one per page of lowmem;
2361 	 * but the internal instance is left unlimited.
2362 	 */
2363 	if (!(sb->s_flags & MS_NOUSER)) {
2364 		sbinfo->max_blocks = shmem_default_max_blocks();
2365 		sbinfo->max_inodes = shmem_default_max_inodes();
2366 		if (shmem_parse_options(data, sbinfo, false)) {
2367 			err = -EINVAL;
2368 			goto failed;
2369 		}
2370 	}
2371 	sb->s_export_op = &shmem_export_ops;
2372 #else
2373 	sb->s_flags |= MS_NOUSER;
2374 #endif
2375 
2376 	spin_lock_init(&sbinfo->stat_lock);
2377 	if (percpu_counter_init(&sbinfo->used_blocks, 0))
2378 		goto failed;
2379 	sbinfo->free_inodes = sbinfo->max_inodes;
2380 
2381 	sb->s_maxbytes = SHMEM_MAX_BYTES;
2382 	sb->s_blocksize = PAGE_CACHE_SIZE;
2383 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2384 	sb->s_magic = TMPFS_MAGIC;
2385 	sb->s_op = &shmem_ops;
2386 	sb->s_time_gran = 1;
2387 #ifdef CONFIG_TMPFS_POSIX_ACL
2388 	sb->s_xattr = shmem_xattr_handlers;
2389 	sb->s_flags |= MS_POSIXACL;
2390 #endif
2391 
2392 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2393 	if (!inode)
2394 		goto failed;
2395 	inode->i_uid = sbinfo->uid;
2396 	inode->i_gid = sbinfo->gid;
2397 	root = d_alloc_root(inode);
2398 	if (!root)
2399 		goto failed_iput;
2400 	sb->s_root = root;
2401 	return 0;
2402 
2403 failed_iput:
2404 	iput(inode);
2405 failed:
2406 	shmem_put_super(sb);
2407 	return err;
2408 }
2409 
2410 static struct kmem_cache *shmem_inode_cachep;
2411 
2412 static struct inode *shmem_alloc_inode(struct super_block *sb)
2413 {
2414 	struct shmem_inode_info *p;
2415 	p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2416 	if (!p)
2417 		return NULL;
2418 	return &p->vfs_inode;
2419 }
2420 
2421 static void shmem_i_callback(struct rcu_head *head)
2422 {
2423 	struct inode *inode = container_of(head, struct inode, i_rcu);
2424 	INIT_LIST_HEAD(&inode->i_dentry);
2425 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2426 }
2427 
2428 static void shmem_destroy_inode(struct inode *inode)
2429 {
2430 	if ((inode->i_mode & S_IFMT) == S_IFREG) {
2431 		/* only struct inode is valid if it's an inline symlink */
2432 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2433 	}
2434 	call_rcu(&inode->i_rcu, shmem_i_callback);
2435 }
2436 
2437 static void init_once(void *foo)
2438 {
2439 	struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2440 
2441 	inode_init_once(&p->vfs_inode);
2442 }
2443 
2444 static int init_inodecache(void)
2445 {
2446 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2447 				sizeof(struct shmem_inode_info),
2448 				0, SLAB_PANIC, init_once);
2449 	return 0;
2450 }
2451 
2452 static void destroy_inodecache(void)
2453 {
2454 	kmem_cache_destroy(shmem_inode_cachep);
2455 }
2456 
2457 static const struct address_space_operations shmem_aops = {
2458 	.writepage	= shmem_writepage,
2459 	.set_page_dirty	= __set_page_dirty_no_writeback,
2460 #ifdef CONFIG_TMPFS
2461 	.readpage	= shmem_readpage,
2462 	.write_begin	= shmem_write_begin,
2463 	.write_end	= shmem_write_end,
2464 #endif
2465 	.migratepage	= migrate_page,
2466 	.error_remove_page = generic_error_remove_page,
2467 };
2468 
2469 static const struct file_operations shmem_file_operations = {
2470 	.mmap		= shmem_mmap,
2471 #ifdef CONFIG_TMPFS
2472 	.llseek		= generic_file_llseek,
2473 	.read		= do_sync_read,
2474 	.write		= do_sync_write,
2475 	.aio_read	= shmem_file_aio_read,
2476 	.aio_write	= generic_file_aio_write,
2477 	.fsync		= noop_fsync,
2478 	.splice_read	= generic_file_splice_read,
2479 	.splice_write	= generic_file_splice_write,
2480 #endif
2481 };
2482 
2483 static const struct inode_operations shmem_inode_operations = {
2484 	.setattr	= shmem_notify_change,
2485 	.truncate_range	= shmem_truncate_range,
2486 #ifdef CONFIG_TMPFS_POSIX_ACL
2487 	.setxattr	= generic_setxattr,
2488 	.getxattr	= generic_getxattr,
2489 	.listxattr	= generic_listxattr,
2490 	.removexattr	= generic_removexattr,
2491 	.check_acl	= generic_check_acl,
2492 #endif
2493 
2494 };
2495 
2496 static const struct inode_operations shmem_dir_inode_operations = {
2497 #ifdef CONFIG_TMPFS
2498 	.create		= shmem_create,
2499 	.lookup		= simple_lookup,
2500 	.link		= shmem_link,
2501 	.unlink		= shmem_unlink,
2502 	.symlink	= shmem_symlink,
2503 	.mkdir		= shmem_mkdir,
2504 	.rmdir		= shmem_rmdir,
2505 	.mknod		= shmem_mknod,
2506 	.rename		= shmem_rename,
2507 #endif
2508 #ifdef CONFIG_TMPFS_POSIX_ACL
2509 	.setattr	= shmem_notify_change,
2510 	.setxattr	= generic_setxattr,
2511 	.getxattr	= generic_getxattr,
2512 	.listxattr	= generic_listxattr,
2513 	.removexattr	= generic_removexattr,
2514 	.check_acl	= generic_check_acl,
2515 #endif
2516 };
2517 
2518 static const struct inode_operations shmem_special_inode_operations = {
2519 #ifdef CONFIG_TMPFS_POSIX_ACL
2520 	.setattr	= shmem_notify_change,
2521 	.setxattr	= generic_setxattr,
2522 	.getxattr	= generic_getxattr,
2523 	.listxattr	= generic_listxattr,
2524 	.removexattr	= generic_removexattr,
2525 	.check_acl	= generic_check_acl,
2526 #endif
2527 };
2528 
2529 static const struct super_operations shmem_ops = {
2530 	.alloc_inode	= shmem_alloc_inode,
2531 	.destroy_inode	= shmem_destroy_inode,
2532 #ifdef CONFIG_TMPFS
2533 	.statfs		= shmem_statfs,
2534 	.remount_fs	= shmem_remount_fs,
2535 	.show_options	= shmem_show_options,
2536 #endif
2537 	.evict_inode	= shmem_evict_inode,
2538 	.drop_inode	= generic_delete_inode,
2539 	.put_super	= shmem_put_super,
2540 };
2541 
2542 static const struct vm_operations_struct shmem_vm_ops = {
2543 	.fault		= shmem_fault,
2544 #ifdef CONFIG_NUMA
2545 	.set_policy     = shmem_set_policy,
2546 	.get_policy     = shmem_get_policy,
2547 #endif
2548 };
2549 
2550 
2551 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2552 	int flags, const char *dev_name, void *data)
2553 {
2554 	return mount_nodev(fs_type, flags, data, shmem_fill_super);
2555 }
2556 
2557 static struct file_system_type tmpfs_fs_type = {
2558 	.owner		= THIS_MODULE,
2559 	.name		= "tmpfs",
2560 	.mount		= shmem_mount,
2561 	.kill_sb	= kill_litter_super,
2562 };
2563 
2564 int __init init_tmpfs(void)
2565 {
2566 	int error;
2567 
2568 	error = bdi_init(&shmem_backing_dev_info);
2569 	if (error)
2570 		goto out4;
2571 
2572 	error = init_inodecache();
2573 	if (error)
2574 		goto out3;
2575 
2576 	error = register_filesystem(&tmpfs_fs_type);
2577 	if (error) {
2578 		printk(KERN_ERR "Could not register tmpfs\n");
2579 		goto out2;
2580 	}
2581 
2582 	shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2583 				tmpfs_fs_type.name, NULL);
2584 	if (IS_ERR(shm_mnt)) {
2585 		error = PTR_ERR(shm_mnt);
2586 		printk(KERN_ERR "Could not kern_mount tmpfs\n");
2587 		goto out1;
2588 	}
2589 	return 0;
2590 
2591 out1:
2592 	unregister_filesystem(&tmpfs_fs_type);
2593 out2:
2594 	destroy_inodecache();
2595 out3:
2596 	bdi_destroy(&shmem_backing_dev_info);
2597 out4:
2598 	shm_mnt = ERR_PTR(error);
2599 	return error;
2600 }
2601 
2602 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2603 /**
2604  * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2605  * @inode: the inode to be searched
2606  * @pgoff: the offset to be searched
2607  * @pagep: the pointer for the found page to be stored
2608  * @ent: the pointer for the found swap entry to be stored
2609  *
2610  * If a page is found, refcount of it is incremented. Callers should handle
2611  * these refcount.
2612  */
2613 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2614 					struct page **pagep, swp_entry_t *ent)
2615 {
2616 	swp_entry_t entry = { .val = 0 }, *ptr;
2617 	struct page *page = NULL;
2618 	struct shmem_inode_info *info = SHMEM_I(inode);
2619 
2620 	if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2621 		goto out;
2622 
2623 	spin_lock(&info->lock);
2624 	ptr = shmem_swp_entry(info, pgoff, NULL);
2625 #ifdef CONFIG_SWAP
2626 	if (ptr && ptr->val) {
2627 		entry.val = ptr->val;
2628 		page = find_get_page(&swapper_space, entry.val);
2629 	} else
2630 #endif
2631 		page = find_get_page(inode->i_mapping, pgoff);
2632 	if (ptr)
2633 		shmem_swp_unmap(ptr);
2634 	spin_unlock(&info->lock);
2635 out:
2636 	*pagep = page;
2637 	*ent = entry;
2638 }
2639 #endif
2640 
2641 #else /* !CONFIG_SHMEM */
2642 
2643 /*
2644  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2645  *
2646  * This is intended for small system where the benefits of the full
2647  * shmem code (swap-backed and resource-limited) are outweighed by
2648  * their complexity. On systems without swap this code should be
2649  * effectively equivalent, but much lighter weight.
2650  */
2651 
2652 #include <linux/ramfs.h>
2653 
2654 static struct file_system_type tmpfs_fs_type = {
2655 	.name		= "tmpfs",
2656 	.mount		= ramfs_mount,
2657 	.kill_sb	= kill_litter_super,
2658 };
2659 
2660 int __init init_tmpfs(void)
2661 {
2662 	BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
2663 
2664 	shm_mnt = kern_mount(&tmpfs_fs_type);
2665 	BUG_ON(IS_ERR(shm_mnt));
2666 
2667 	return 0;
2668 }
2669 
2670 int shmem_unuse(swp_entry_t entry, struct page *page)
2671 {
2672 	return 0;
2673 }
2674 
2675 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2676 {
2677 	return 0;
2678 }
2679 
2680 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2681 /**
2682  * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2683  * @inode: the inode to be searched
2684  * @pgoff: the offset to be searched
2685  * @pagep: the pointer for the found page to be stored
2686  * @ent: the pointer for the found swap entry to be stored
2687  *
2688  * If a page is found, refcount of it is incremented. Callers should handle
2689  * these refcount.
2690  */
2691 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2692 					struct page **pagep, swp_entry_t *ent)
2693 {
2694 	struct page *page = NULL;
2695 
2696 	if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2697 		goto out;
2698 	page = find_get_page(inode->i_mapping, pgoff);
2699 out:
2700 	*pagep = page;
2701 	*ent = (swp_entry_t){ .val = 0 };
2702 }
2703 #endif
2704 
2705 #define shmem_vm_ops				generic_file_vm_ops
2706 #define shmem_file_operations			ramfs_file_operations
2707 #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
2708 #define shmem_acct_size(flags, size)		0
2709 #define shmem_unacct_size(flags, size)		do {} while (0)
2710 #define SHMEM_MAX_BYTES				MAX_LFS_FILESIZE
2711 
2712 #endif /* CONFIG_SHMEM */
2713 
2714 /* common code */
2715 
2716 /**
2717  * shmem_file_setup - get an unlinked file living in tmpfs
2718  * @name: name for dentry (to be seen in /proc/<pid>/maps
2719  * @size: size to be set for the file
2720  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2721  */
2722 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2723 {
2724 	int error;
2725 	struct file *file;
2726 	struct inode *inode;
2727 	struct path path;
2728 	struct dentry *root;
2729 	struct qstr this;
2730 
2731 	if (IS_ERR(shm_mnt))
2732 		return (void *)shm_mnt;
2733 
2734 	if (size < 0 || size > SHMEM_MAX_BYTES)
2735 		return ERR_PTR(-EINVAL);
2736 
2737 	if (shmem_acct_size(flags, size))
2738 		return ERR_PTR(-ENOMEM);
2739 
2740 	error = -ENOMEM;
2741 	this.name = name;
2742 	this.len = strlen(name);
2743 	this.hash = 0; /* will go */
2744 	root = shm_mnt->mnt_root;
2745 	path.dentry = d_alloc(root, &this);
2746 	if (!path.dentry)
2747 		goto put_memory;
2748 	path.mnt = mntget(shm_mnt);
2749 
2750 	error = -ENOSPC;
2751 	inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2752 	if (!inode)
2753 		goto put_dentry;
2754 
2755 	d_instantiate(path.dentry, inode);
2756 	inode->i_size = size;
2757 	inode->i_nlink = 0;	/* It is unlinked */
2758 #ifndef CONFIG_MMU
2759 	error = ramfs_nommu_expand_for_mapping(inode, size);
2760 	if (error)
2761 		goto put_dentry;
2762 #endif
2763 
2764 	error = -ENFILE;
2765 	file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2766 		  &shmem_file_operations);
2767 	if (!file)
2768 		goto put_dentry;
2769 
2770 	return file;
2771 
2772 put_dentry:
2773 	path_put(&path);
2774 put_memory:
2775 	shmem_unacct_size(flags, size);
2776 	return ERR_PTR(error);
2777 }
2778 EXPORT_SYMBOL_GPL(shmem_file_setup);
2779 
2780 /**
2781  * shmem_zero_setup - setup a shared anonymous mapping
2782  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2783  */
2784 int shmem_zero_setup(struct vm_area_struct *vma)
2785 {
2786 	struct file *file;
2787 	loff_t size = vma->vm_end - vma->vm_start;
2788 
2789 	file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2790 	if (IS_ERR(file))
2791 		return PTR_ERR(file);
2792 
2793 	if (vma->vm_file)
2794 		fput(vma->vm_file);
2795 	vma->vm_file = file;
2796 	vma->vm_ops = &shmem_vm_ops;
2797 	vma->vm_flags |= VM_CAN_NONLINEAR;
2798 	return 0;
2799 }
2800