filemap.c (a528910e12ec7ee203095eb1711468a66b9b60b0) filemap.c (449dd6984d0e47643c04c807f609dd56d48d5bcc)
1/*
2 * linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by

--- 96 unchanged lines hidden (view full) ---

105 *
106 * ->i_mmap_mutex
107 * ->tasklist_lock (memory_failure, collect_procs_ao)
108 */
109
110static void page_cache_tree_delete(struct address_space *mapping,
111 struct page *page, void *shadow)
112{
1/*
2 * linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by

--- 96 unchanged lines hidden (view full) ---

105 *
106 * ->i_mmap_mutex
107 * ->tasklist_lock (memory_failure, collect_procs_ao)
108 */
109
110static void page_cache_tree_delete(struct address_space *mapping,
111 struct page *page, void *shadow)
112{
113 if (shadow) {
114 void **slot;
113 struct radix_tree_node *node;
114 unsigned long index;
115 unsigned int offset;
116 unsigned int tag;
117 void **slot;
115
118
116 slot = radix_tree_lookup_slot(&mapping->page_tree, page->index);
117 radix_tree_replace_slot(slot, shadow);
119 VM_BUG_ON(!PageLocked(page));
120
121 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
122
123 if (shadow) {
118 mapping->nrshadows++;
119 /*
120 * Make sure the nrshadows update is committed before
121 * the nrpages update so that final truncate racing
122 * with reclaim does not see both counters 0 at the
123 * same time and miss a shadow entry.
124 */
125 smp_wmb();
124 mapping->nrshadows++;
125 /*
126 * Make sure the nrshadows update is committed before
127 * the nrpages update so that final truncate racing
128 * with reclaim does not see both counters 0 at the
129 * same time and miss a shadow entry.
130 */
131 smp_wmb();
126 } else
127 radix_tree_delete(&mapping->page_tree, page->index);
132 }
128 mapping->nrpages--;
133 mapping->nrpages--;
134
135 if (!node) {
136 /* Clear direct pointer tags in root node */
137 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
138 radix_tree_replace_slot(slot, shadow);
139 return;
140 }
141
142 /* Clear tree tags for the removed page */
143 index = page->index;
144 offset = index & RADIX_TREE_MAP_MASK;
145 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
146 if (test_bit(offset, node->tags[tag]))
147 radix_tree_tag_clear(&mapping->page_tree, index, tag);
148 }
149
150 /* Delete page, swap shadow entry */
151 radix_tree_replace_slot(slot, shadow);
152 workingset_node_pages_dec(node);
153 if (shadow)
154 workingset_node_shadows_inc(node);
155 else
156 if (__radix_tree_delete_node(&mapping->page_tree, node))
157 return;
158
159 /*
160 * Track node that only contains shadow entries.
161 *
162 * Avoid acquiring the list_lru lock if already tracked. The
163 * list_empty() test is safe as node->private_list is
164 * protected by mapping->tree_lock.
165 */
166 if (!workingset_node_pages(node) &&
167 list_empty(&node->private_list)) {
168 node->private_data = mapping;
169 list_lru_add(&workingset_shadow_nodes, &node->private_list);
170 }
129}
130
131/*
132 * Delete a page from the page cache and free it. Caller has to make
133 * sure the page is locked and that nobody else uses it - or that usage
134 * is safe. The caller must hold the mapping's tree_lock.
135 */
136void __delete_from_page_cache(struct page *page, void *shadow)

--- 329 unchanged lines hidden (view full) ---

466
467 return error;
468}
469EXPORT_SYMBOL_GPL(replace_page_cache_page);
470
471static int page_cache_tree_insert(struct address_space *mapping,
472 struct page *page, void **shadowp)
473{
171}
172
173/*
174 * Delete a page from the page cache and free it. Caller has to make
175 * sure the page is locked and that nobody else uses it - or that usage
176 * is safe. The caller must hold the mapping's tree_lock.
177 */
178void __delete_from_page_cache(struct page *page, void *shadow)

--- 329 unchanged lines hidden (view full) ---

508
509 return error;
510}
511EXPORT_SYMBOL_GPL(replace_page_cache_page);
512
513static int page_cache_tree_insert(struct address_space *mapping,
514 struct page *page, void **shadowp)
515{
516 struct radix_tree_node *node;
474 void **slot;
475 int error;
476
517 void **slot;
518 int error;
519
477 slot = radix_tree_lookup_slot(&mapping->page_tree, page->index);
478 if (slot) {
520 error = __radix_tree_create(&mapping->page_tree, page->index,
521 &node, &slot);
522 if (error)
523 return error;
524 if (*slot) {
479 void *p;
480
481 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
482 if (!radix_tree_exceptional_entry(p))
483 return -EEXIST;
525 void *p;
526
527 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
528 if (!radix_tree_exceptional_entry(p))
529 return -EEXIST;
484 radix_tree_replace_slot(slot, page);
485 mapping->nrshadows--;
486 mapping->nrpages++;
487 if (shadowp)
488 *shadowp = p;
530 if (shadowp)
531 *shadowp = p;
489 return 0;
532 mapping->nrshadows--;
533 if (node)
534 workingset_node_shadows_dec(node);
490 }
535 }
491 error = radix_tree_insert(&mapping->page_tree, page->index, page);
492 if (!error)
493 mapping->nrpages++;
494 return error;
536 radix_tree_replace_slot(slot, page);
537 mapping->nrpages++;
538 if (node) {
539 workingset_node_pages_inc(node);
540 /*
541 * Don't track node that contains actual pages.
542 *
543 * Avoid acquiring the list_lru lock if already
544 * untracked. The list_empty() test is safe as
545 * node->private_list is protected by
546 * mapping->tree_lock.
547 */
548 if (!list_empty(&node->private_list))
549 list_lru_del(&workingset_shadow_nodes,
550 &node->private_list);
551 }
552 return 0;
495}
496
497static int __add_to_page_cache_locked(struct page *page,
498 struct address_space *mapping,
499 pgoff_t offset, gfp_t gfp_mask,
500 void **shadowp)
501{
502 int error;

--- 2372 unchanged lines hidden ---
553}
554
555static int __add_to_page_cache_locked(struct page *page,
556 struct address_space *mapping,
557 pgoff_t offset, gfp_t gfp_mask,
558 void **shadowp)
559{
560 int error;

--- 2372 unchanged lines hidden ---