1 /* 2 * linux/mm/swap.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7 /* 8 * This file contains the default values for the opereation of the 9 * Linux VM subsystem. Fine-tuning documentation can be found in 10 * Documentation/sysctl/vm.txt. 11 * Started 18.12.91 12 * Swap aging added 23.2.95, Stephen Tweedie. 13 * Buffermem limits added 12.3.98, Rik van Riel. 14 */ 15 16 #include <linux/mm.h> 17 #include <linux/sched.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/swap.h> 20 #include <linux/mman.h> 21 #include <linux/pagemap.h> 22 #include <linux/pagevec.h> 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/mm_inline.h> 26 #include <linux/buffer_head.h> /* for try_to_release_page() */ 27 #include <linux/module.h> 28 #include <linux/percpu_counter.h> 29 #include <linux/percpu.h> 30 #include <linux/cpu.h> 31 #include <linux/notifier.h> 32 #include <linux/init.h> 33 34 /* How many pages do we try to swap or page in/out together? */ 35 int page_cluster; 36 37 static void put_compound_page(struct page *page) 38 { 39 page = (struct page *)page_private(page); 40 if (put_page_testzero(page)) { 41 void (*dtor)(struct page *page); 42 43 dtor = (void (*)(struct page *))page[1].lru.next; 44 (*dtor)(page); 45 } 46 } 47 48 void put_page(struct page *page) 49 { 50 if (unlikely(PageCompound(page))) 51 put_compound_page(page); 52 else if (put_page_testzero(page)) 53 __page_cache_release(page); 54 } 55 EXPORT_SYMBOL(put_page); 56 57 /* 58 * Writeback is about to end against a page which has been marked for immediate 59 * reclaim. If it still appears to be reclaimable, move it to the tail of the 60 * inactive list. The page still has PageWriteback set, which will pin it. 61 * 62 * We don't expect many pages to come through here, so don't bother batching 63 * things up. 64 * 65 * To avoid placing the page at the tail of the LRU while PG_writeback is still 66 * set, this function will clear PG_writeback before performing the page 67 * motion. Do that inside the lru lock because once PG_writeback is cleared 68 * we may not touch the page. 69 * 70 * Returns zero if it cleared PG_writeback. 71 */ 72 int rotate_reclaimable_page(struct page *page) 73 { 74 struct zone *zone; 75 unsigned long flags; 76 77 if (PageLocked(page)) 78 return 1; 79 if (PageDirty(page)) 80 return 1; 81 if (PageActive(page)) 82 return 1; 83 if (!PageLRU(page)) 84 return 1; 85 86 zone = page_zone(page); 87 spin_lock_irqsave(&zone->lru_lock, flags); 88 if (PageLRU(page) && !PageActive(page)) { 89 list_del(&page->lru); 90 list_add_tail(&page->lru, &zone->inactive_list); 91 inc_page_state(pgrotated); 92 } 93 if (!test_clear_page_writeback(page)) 94 BUG(); 95 spin_unlock_irqrestore(&zone->lru_lock, flags); 96 return 0; 97 } 98 99 /* 100 * FIXME: speed this up? 101 */ 102 void fastcall activate_page(struct page *page) 103 { 104 struct zone *zone = page_zone(page); 105 106 spin_lock_irq(&zone->lru_lock); 107 if (PageLRU(page) && !PageActive(page)) { 108 del_page_from_inactive_list(zone, page); 109 SetPageActive(page); 110 add_page_to_active_list(zone, page); 111 inc_page_state(pgactivate); 112 } 113 spin_unlock_irq(&zone->lru_lock); 114 } 115 116 /* 117 * Mark a page as having seen activity. 118 * 119 * inactive,unreferenced -> inactive,referenced 120 * inactive,referenced -> active,unreferenced 121 * active,unreferenced -> active,referenced 122 */ 123 void fastcall mark_page_accessed(struct page *page) 124 { 125 if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { 126 activate_page(page); 127 ClearPageReferenced(page); 128 } else if (!PageReferenced(page)) { 129 SetPageReferenced(page); 130 } 131 } 132 133 EXPORT_SYMBOL(mark_page_accessed); 134 135 /** 136 * lru_cache_add: add a page to the page lists 137 * @page: the page to add 138 */ 139 static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; 140 static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; 141 142 void fastcall lru_cache_add(struct page *page) 143 { 144 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); 145 146 page_cache_get(page); 147 if (!pagevec_add(pvec, page)) 148 __pagevec_lru_add(pvec); 149 put_cpu_var(lru_add_pvecs); 150 } 151 152 void fastcall lru_cache_add_active(struct page *page) 153 { 154 struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); 155 156 page_cache_get(page); 157 if (!pagevec_add(pvec, page)) 158 __pagevec_lru_add_active(pvec); 159 put_cpu_var(lru_add_active_pvecs); 160 } 161 162 static void __lru_add_drain(int cpu) 163 { 164 struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); 165 166 /* CPU is dead, so no locking needed. */ 167 if (pagevec_count(pvec)) 168 __pagevec_lru_add(pvec); 169 pvec = &per_cpu(lru_add_active_pvecs, cpu); 170 if (pagevec_count(pvec)) 171 __pagevec_lru_add_active(pvec); 172 } 173 174 void lru_add_drain(void) 175 { 176 __lru_add_drain(get_cpu()); 177 put_cpu(); 178 } 179 180 #ifdef CONFIG_NUMA 181 static void lru_add_drain_per_cpu(void *dummy) 182 { 183 lru_add_drain(); 184 } 185 186 /* 187 * Returns 0 for success 188 */ 189 int lru_add_drain_all(void) 190 { 191 return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); 192 } 193 194 #else 195 196 /* 197 * Returns 0 for success 198 */ 199 int lru_add_drain_all(void) 200 { 201 lru_add_drain(); 202 return 0; 203 } 204 #endif 205 206 /* 207 * This path almost never happens for VM activity - pages are normally 208 * freed via pagevecs. But it gets used by networking. 209 */ 210 void fastcall __page_cache_release(struct page *page) 211 { 212 if (PageLRU(page)) { 213 unsigned long flags; 214 struct zone *zone = page_zone(page); 215 216 spin_lock_irqsave(&zone->lru_lock, flags); 217 BUG_ON(!PageLRU(page)); 218 __ClearPageLRU(page); 219 del_page_from_lru(zone, page); 220 spin_unlock_irqrestore(&zone->lru_lock, flags); 221 } 222 free_hot_page(page); 223 } 224 EXPORT_SYMBOL(__page_cache_release); 225 226 /* 227 * Batched page_cache_release(). Decrement the reference count on all the 228 * passed pages. If it fell to zero then remove the page from the LRU and 229 * free it. 230 * 231 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it 232 * for the remainder of the operation. 233 * 234 * The locking in this function is against shrink_cache(): we recheck the 235 * page count inside the lock to see whether shrink_cache grabbed the page 236 * via the LRU. If it did, give up: shrink_cache will free it. 237 */ 238 void release_pages(struct page **pages, int nr, int cold) 239 { 240 int i; 241 struct pagevec pages_to_free; 242 struct zone *zone = NULL; 243 244 pagevec_init(&pages_to_free, cold); 245 for (i = 0; i < nr; i++) { 246 struct page *page = pages[i]; 247 248 if (unlikely(PageCompound(page))) { 249 if (zone) { 250 spin_unlock_irq(&zone->lru_lock); 251 zone = NULL; 252 } 253 put_compound_page(page); 254 continue; 255 } 256 257 if (!put_page_testzero(page)) 258 continue; 259 260 if (PageLRU(page)) { 261 struct zone *pagezone = page_zone(page); 262 if (pagezone != zone) { 263 if (zone) 264 spin_unlock_irq(&zone->lru_lock); 265 zone = pagezone; 266 spin_lock_irq(&zone->lru_lock); 267 } 268 BUG_ON(!PageLRU(page)); 269 __ClearPageLRU(page); 270 del_page_from_lru(zone, page); 271 } 272 273 if (!pagevec_add(&pages_to_free, page)) { 274 if (zone) { 275 spin_unlock_irq(&zone->lru_lock); 276 zone = NULL; 277 } 278 __pagevec_free(&pages_to_free); 279 pagevec_reinit(&pages_to_free); 280 } 281 } 282 if (zone) 283 spin_unlock_irq(&zone->lru_lock); 284 285 pagevec_free(&pages_to_free); 286 } 287 288 /* 289 * The pages which we're about to release may be in the deferred lru-addition 290 * queues. That would prevent them from really being freed right now. That's 291 * OK from a correctness point of view but is inefficient - those pages may be 292 * cache-warm and we want to give them back to the page allocator ASAP. 293 * 294 * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 295 * and __pagevec_lru_add_active() call release_pages() directly to avoid 296 * mutual recursion. 297 */ 298 void __pagevec_release(struct pagevec *pvec) 299 { 300 lru_add_drain(); 301 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 302 pagevec_reinit(pvec); 303 } 304 305 EXPORT_SYMBOL(__pagevec_release); 306 307 /* 308 * pagevec_release() for pages which are known to not be on the LRU 309 * 310 * This function reinitialises the caller's pagevec. 311 */ 312 void __pagevec_release_nonlru(struct pagevec *pvec) 313 { 314 int i; 315 struct pagevec pages_to_free; 316 317 pagevec_init(&pages_to_free, pvec->cold); 318 for (i = 0; i < pagevec_count(pvec); i++) { 319 struct page *page = pvec->pages[i]; 320 321 BUG_ON(PageLRU(page)); 322 if (put_page_testzero(page)) 323 pagevec_add(&pages_to_free, page); 324 } 325 pagevec_free(&pages_to_free); 326 pagevec_reinit(pvec); 327 } 328 329 /* 330 * Add the passed pages to the LRU, then drop the caller's refcount 331 * on them. Reinitialises the caller's pagevec. 332 */ 333 void __pagevec_lru_add(struct pagevec *pvec) 334 { 335 int i; 336 struct zone *zone = NULL; 337 338 for (i = 0; i < pagevec_count(pvec); i++) { 339 struct page *page = pvec->pages[i]; 340 struct zone *pagezone = page_zone(page); 341 342 if (pagezone != zone) { 343 if (zone) 344 spin_unlock_irq(&zone->lru_lock); 345 zone = pagezone; 346 spin_lock_irq(&zone->lru_lock); 347 } 348 BUG_ON(PageLRU(page)); 349 SetPageLRU(page); 350 add_page_to_inactive_list(zone, page); 351 } 352 if (zone) 353 spin_unlock_irq(&zone->lru_lock); 354 release_pages(pvec->pages, pvec->nr, pvec->cold); 355 pagevec_reinit(pvec); 356 } 357 358 EXPORT_SYMBOL(__pagevec_lru_add); 359 360 void __pagevec_lru_add_active(struct pagevec *pvec) 361 { 362 int i; 363 struct zone *zone = NULL; 364 365 for (i = 0; i < pagevec_count(pvec); i++) { 366 struct page *page = pvec->pages[i]; 367 struct zone *pagezone = page_zone(page); 368 369 if (pagezone != zone) { 370 if (zone) 371 spin_unlock_irq(&zone->lru_lock); 372 zone = pagezone; 373 spin_lock_irq(&zone->lru_lock); 374 } 375 BUG_ON(PageLRU(page)); 376 SetPageLRU(page); 377 BUG_ON(PageActive(page)); 378 SetPageActive(page); 379 add_page_to_active_list(zone, page); 380 } 381 if (zone) 382 spin_unlock_irq(&zone->lru_lock); 383 release_pages(pvec->pages, pvec->nr, pvec->cold); 384 pagevec_reinit(pvec); 385 } 386 387 /* 388 * Try to drop buffers from the pages in a pagevec 389 */ 390 void pagevec_strip(struct pagevec *pvec) 391 { 392 int i; 393 394 for (i = 0; i < pagevec_count(pvec); i++) { 395 struct page *page = pvec->pages[i]; 396 397 if (PagePrivate(page) && !TestSetPageLocked(page)) { 398 if (PagePrivate(page)) 399 try_to_release_page(page, 0); 400 unlock_page(page); 401 } 402 } 403 } 404 405 /** 406 * pagevec_lookup - gang pagecache lookup 407 * @pvec: Where the resulting pages are placed 408 * @mapping: The address_space to search 409 * @start: The starting page index 410 * @nr_pages: The maximum number of pages 411 * 412 * pagevec_lookup() will search for and return a group of up to @nr_pages pages 413 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a 414 * reference against the pages in @pvec. 415 * 416 * The search returns a group of mapping-contiguous pages with ascending 417 * indexes. There may be holes in the indices due to not-present pages. 418 * 419 * pagevec_lookup() returns the number of pages which were found. 420 */ 421 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 422 pgoff_t start, unsigned nr_pages) 423 { 424 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); 425 return pagevec_count(pvec); 426 } 427 428 EXPORT_SYMBOL(pagevec_lookup); 429 430 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 431 pgoff_t *index, int tag, unsigned nr_pages) 432 { 433 pvec->nr = find_get_pages_tag(mapping, index, tag, 434 nr_pages, pvec->pages); 435 return pagevec_count(pvec); 436 } 437 438 EXPORT_SYMBOL(pagevec_lookup_tag); 439 440 #ifdef CONFIG_SMP 441 /* 442 * We tolerate a little inaccuracy to avoid ping-ponging the counter between 443 * CPUs 444 */ 445 #define ACCT_THRESHOLD max(16, NR_CPUS * 2) 446 447 static DEFINE_PER_CPU(long, committed_space) = 0; 448 449 void vm_acct_memory(long pages) 450 { 451 long *local; 452 453 preempt_disable(); 454 local = &__get_cpu_var(committed_space); 455 *local += pages; 456 if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { 457 atomic_add(*local, &vm_committed_space); 458 *local = 0; 459 } 460 preempt_enable(); 461 } 462 463 #ifdef CONFIG_HOTPLUG_CPU 464 465 /* Drop the CPU's cached committed space back into the central pool. */ 466 static int cpu_swap_callback(struct notifier_block *nfb, 467 unsigned long action, 468 void *hcpu) 469 { 470 long *committed; 471 472 committed = &per_cpu(committed_space, (long)hcpu); 473 if (action == CPU_DEAD) { 474 atomic_add(*committed, &vm_committed_space); 475 *committed = 0; 476 __lru_add_drain((long)hcpu); 477 } 478 return NOTIFY_OK; 479 } 480 #endif /* CONFIG_HOTPLUG_CPU */ 481 #endif /* CONFIG_SMP */ 482 483 #ifdef CONFIG_SMP 484 void percpu_counter_mod(struct percpu_counter *fbc, long amount) 485 { 486 long count; 487 long *pcount; 488 int cpu = get_cpu(); 489 490 pcount = per_cpu_ptr(fbc->counters, cpu); 491 count = *pcount + amount; 492 if (count >= FBC_BATCH || count <= -FBC_BATCH) { 493 spin_lock(&fbc->lock); 494 fbc->count += count; 495 *pcount = 0; 496 spin_unlock(&fbc->lock); 497 } else { 498 *pcount = count; 499 } 500 put_cpu(); 501 } 502 EXPORT_SYMBOL(percpu_counter_mod); 503 504 /* 505 * Add up all the per-cpu counts, return the result. This is a more accurate 506 * but much slower version of percpu_counter_read_positive() 507 */ 508 long percpu_counter_sum(struct percpu_counter *fbc) 509 { 510 long ret; 511 int cpu; 512 513 spin_lock(&fbc->lock); 514 ret = fbc->count; 515 for_each_possible_cpu(cpu) { 516 long *pcount = per_cpu_ptr(fbc->counters, cpu); 517 ret += *pcount; 518 } 519 spin_unlock(&fbc->lock); 520 return ret < 0 ? 0 : ret; 521 } 522 EXPORT_SYMBOL(percpu_counter_sum); 523 #endif 524 525 /* 526 * Perform any setup for the swap system 527 */ 528 void __init swap_setup(void) 529 { 530 unsigned long megs = num_physpages >> (20 - PAGE_SHIFT); 531 532 /* Use a smaller cluster for small-memory machines */ 533 if (megs < 16) 534 page_cluster = 2; 535 else 536 page_cluster = 3; 537 /* 538 * Right now other parts of the system means that we 539 * _really_ don't want to cluster much more 540 */ 541 hotcpu_notifier(cpu_swap_callback, 0); 542 } 543