vm_pageout.c (c7aebda8a14a3bb94bb038df338549ccde5b56ea) vm_pageout.c (c325e866f4c58f7804884581b999849982b2abd6)
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2005 Yahoo! Technologies Norway AS

--- 260 unchanged lines hidden (view full) ---

269 u_short queue;
270 vm_object_t object;
271
272 queue = m->queue;
273 vm_pageout_init_marker(&marker, queue);
274 pq = vm_page_pagequeue(m);
275 object = m->object;
276
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2005 Yahoo! Technologies Norway AS

--- 260 unchanged lines hidden (view full) ---

269 u_short queue;
270 vm_object_t object;
271
272 queue = m->queue;
273 vm_pageout_init_marker(&marker, queue);
274 pq = vm_page_pagequeue(m);
275 object = m->object;
276
277 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
277 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
278 vm_pagequeue_unlock(pq);
279 vm_page_unlock(m);
280 VM_OBJECT_WLOCK(object);
281 vm_page_lock(m);
282 vm_pagequeue_lock(pq);
283
284 /* Page queue might have changed. */
278 vm_pagequeue_unlock(pq);
279 vm_page_unlock(m);
280 VM_OBJECT_WLOCK(object);
281 vm_page_lock(m);
282 vm_pagequeue_lock(pq);
283
284 /* Page queue might have changed. */
285 *next = TAILQ_NEXT(&marker, pageq);
285 *next = TAILQ_NEXT(&marker, plinks.q);
286 unchanged = (m->queue == queue &&
287 m->object == object &&
286 unchanged = (m->queue == queue &&
287 m->object == object &&
288 &marker == TAILQ_NEXT(m, pageq));
289 TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
288 &marker == TAILQ_NEXT(m, plinks.q));
289 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
290 return (unchanged);
291}
292
293/*
294 * Lock the page while holding the page queue lock. Use marker page
295 * to detect page queue changes and maintain notion of next page on
296 * page queue. Return TRUE if no changes were detected, FALSE
297 * otherwise. The page is locked on return. The page queue lock might

--- 12 unchanged lines hidden (view full) ---

310 vm_page_lock_assert(m, MA_NOTOWNED);
311 if (vm_page_trylock(m))
312 return (TRUE);
313
314 queue = m->queue;
315 vm_pageout_init_marker(&marker, queue);
316 pq = vm_page_pagequeue(m);
317
290 return (unchanged);
291}
292
293/*
294 * Lock the page while holding the page queue lock. Use marker page
295 * to detect page queue changes and maintain notion of next page on
296 * page queue. Return TRUE if no changes were detected, FALSE
297 * otherwise. The page is locked on return. The page queue lock might

--- 12 unchanged lines hidden (view full) ---

310 vm_page_lock_assert(m, MA_NOTOWNED);
311 if (vm_page_trylock(m))
312 return (TRUE);
313
314 queue = m->queue;
315 vm_pageout_init_marker(&marker, queue);
316 pq = vm_page_pagequeue(m);
317
318 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
318 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
319 vm_pagequeue_unlock(pq);
320 vm_page_lock(m);
321 vm_pagequeue_lock(pq);
322
323 /* Page queue might have changed. */
319 vm_pagequeue_unlock(pq);
320 vm_page_lock(m);
321 vm_pagequeue_lock(pq);
322
323 /* Page queue might have changed. */
324 *next = TAILQ_NEXT(&marker, pageq);
325 unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq));
326 TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
324 *next = TAILQ_NEXT(&marker, plinks.q);
325 unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, plinks.q));
326 TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
327 return (unchanged);
328}
329
330/*
331 * vm_pageout_clean:
332 *
333 * Clean the page and remove it from the laundry.
334 *

--- 238 unchanged lines hidden (view full) ---

573{
574 struct mount *mp;
575 struct vnode *vp;
576 vm_object_t object;
577 vm_paddr_t pa;
578 vm_page_t m, m_tmp, next;
579
580 vm_pagequeue_lock(pq);
327 return (unchanged);
328}
329
330/*
331 * vm_pageout_clean:
332 *
333 * Clean the page and remove it from the laundry.
334 *

--- 238 unchanged lines hidden (view full) ---

573{
574 struct mount *mp;
575 struct vnode *vp;
576 vm_object_t object;
577 vm_paddr_t pa;
578 vm_page_t m, m_tmp, next;
579
580 vm_pagequeue_lock(pq);
581 TAILQ_FOREACH_SAFE(m, &pq->pq_pl, pageq, next) {
581 TAILQ_FOREACH_SAFE(m, &pq->pq_pl, plinks.q, next) {
582 if ((m->flags & PG_MARKER) != 0)
583 continue;
584 pa = VM_PAGE_TO_PHYS(m);
585 if (pa < low || pa + PAGE_SIZE > high)
586 continue;
587 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
588 vm_page_unlock(m);
589 continue;

--- 368 unchanged lines hidden (view full) ---

958 for (m = TAILQ_FIRST(&pq->pq_pl);
959 m != NULL && maxscan-- > 0 && page_shortage > 0;
960 m = next) {
961 vm_pagequeue_assert_locked(pq);
962 KASSERT(queues_locked, ("unlocked queues"));
963 KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m));
964
965 PCPU_INC(cnt.v_pdpages);
582 if ((m->flags & PG_MARKER) != 0)
583 continue;
584 pa = VM_PAGE_TO_PHYS(m);
585 if (pa < low || pa + PAGE_SIZE > high)
586 continue;
587 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
588 vm_page_unlock(m);
589 continue;

--- 368 unchanged lines hidden (view full) ---

958 for (m = TAILQ_FIRST(&pq->pq_pl);
959 m != NULL && maxscan-- > 0 && page_shortage > 0;
960 m = next) {
961 vm_pagequeue_assert_locked(pq);
962 KASSERT(queues_locked, ("unlocked queues"));
963 KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m));
964
965 PCPU_INC(cnt.v_pdpages);
966 next = TAILQ_NEXT(m, pageq);
966 next = TAILQ_NEXT(m, plinks.q);
967
968 /*
969 * skip marker pages
970 */
971 if (m->flags & PG_MARKER)
972 continue;
973
974 KASSERT((m->flags & PG_FICTITIOUS) == 0,

--- 33 unchanged lines hidden (view full) ---

1008 continue;
1009 }
1010
1011 /*
1012 * We unlock the inactive page queue, invalidating the
1013 * 'next' pointer. Use our marker to remember our
1014 * place.
1015 */
967
968 /*
969 * skip marker pages
970 */
971 if (m->flags & PG_MARKER)
972 continue;
973
974 KASSERT((m->flags & PG_FICTITIOUS) == 0,

--- 33 unchanged lines hidden (view full) ---

1008 continue;
1009 }
1010
1011 /*
1012 * We unlock the inactive page queue, invalidating the
1013 * 'next' pointer. Use our marker to remember our
1014 * place.
1015 */
1016 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, pageq);
1016 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
1017 vm_pagequeue_unlock(pq);
1018 queues_locked = FALSE;
1019
1020 /*
1021 * We bump the activation count if the page has been
1022 * referenced while in the inactive queue. This makes
1023 * it less likely that the page will be added back to the
1024 * inactive queue prematurely again. Here we check the

--- 177 unchanged lines hidden (view full) ---

1202 /*
1203 * The page might have been moved to another
1204 * queue during potential blocking in vget()
1205 * above. The page might have been freed and
1206 * reused for another vnode.
1207 */
1208 if (m->queue != PQ_INACTIVE ||
1209 m->object != object ||
1017 vm_pagequeue_unlock(pq);
1018 queues_locked = FALSE;
1019
1020 /*
1021 * We bump the activation count if the page has been
1022 * referenced while in the inactive queue. This makes
1023 * it less likely that the page will be added back to the
1024 * inactive queue prematurely again. Here we check the

--- 177 unchanged lines hidden (view full) ---

1202 /*
1203 * The page might have been moved to another
1204 * queue during potential blocking in vget()
1205 * above. The page might have been freed and
1206 * reused for another vnode.
1207 */
1208 if (m->queue != PQ_INACTIVE ||
1209 m->object != object ||
1210 TAILQ_NEXT(m, pageq) != &vmd->vmd_marker) {
1210 TAILQ_NEXT(m, plinks.q) != &vmd->vmd_marker) {
1211 vm_page_unlock(m);
1212 if (object->flags & OBJ_MIGHTBEDIRTY)
1213 vnodes_skipped++;
1214 goto unlock_and_continue;
1215 }
1216
1217 /*
1218 * The page may have been busied during the

--- 53 unchanged lines hidden (view full) ---

1272 }
1273 vm_page_unlock(m);
1274 VM_OBJECT_WUNLOCK(object);
1275relock_queues:
1276 if (!queues_locked) {
1277 vm_pagequeue_lock(pq);
1278 queues_locked = TRUE;
1279 }
1211 vm_page_unlock(m);
1212 if (object->flags & OBJ_MIGHTBEDIRTY)
1213 vnodes_skipped++;
1214 goto unlock_and_continue;
1215 }
1216
1217 /*
1218 * The page may have been busied during the

--- 53 unchanged lines hidden (view full) ---

1272 }
1273 vm_page_unlock(m);
1274 VM_OBJECT_WUNLOCK(object);
1275relock_queues:
1276 if (!queues_locked) {
1277 vm_pagequeue_lock(pq);
1278 queues_locked = TRUE;
1279 }
1280 next = TAILQ_NEXT(&vmd->vmd_marker, pageq);
1281 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, pageq);
1280 next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
1281 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
1282 }
1283 vm_pagequeue_unlock(pq);
1284
1285 /*
1286 * Compute the number of pages we want to try to move from the
1287 * active queue to the inactive queue.
1288 */
1289 page_shortage = vm_paging_target() +

--- 9 unchanged lines hidden (view full) ---

1299 pcount = pq->pq_cnt;
1300 vm_pagequeue_lock(pq);
1301 m = TAILQ_FIRST(&pq->pq_pl);
1302 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
1303
1304 KASSERT(m->queue == PQ_ACTIVE,
1305 ("vm_pageout_scan: page %p isn't active", m));
1306
1282 }
1283 vm_pagequeue_unlock(pq);
1284
1285 /*
1286 * Compute the number of pages we want to try to move from the
1287 * active queue to the inactive queue.
1288 */
1289 page_shortage = vm_paging_target() +

--- 9 unchanged lines hidden (view full) ---

1299 pcount = pq->pq_cnt;
1300 vm_pagequeue_lock(pq);
1301 m = TAILQ_FIRST(&pq->pq_pl);
1302 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
1303
1304 KASSERT(m->queue == PQ_ACTIVE,
1305 ("vm_pageout_scan: page %p isn't active", m));
1306
1307 next = TAILQ_NEXT(m, pageq);
1307 next = TAILQ_NEXT(m, plinks.q);
1308 if ((m->flags & PG_MARKER) != 0) {
1309 m = next;
1310 continue;
1311 }
1312 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1313 ("Fictitious page %p cannot be in active queue", m));
1314 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1315 ("Unmanaged page %p cannot be in active queue", m));

--- 291 unchanged lines hidden (view full) ---

1607 }
1608
1609 vm_pagequeue_lock(pq);
1610 m = TAILQ_FIRST(&pq->pq_pl);
1611 while (m != NULL && pcount-- > 0) {
1612 KASSERT(m->queue == PQ_ACTIVE,
1613 ("vm_pageout_page_stats: page %p isn't active", m));
1614
1308 if ((m->flags & PG_MARKER) != 0) {
1309 m = next;
1310 continue;
1311 }
1312 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1313 ("Fictitious page %p cannot be in active queue", m));
1314 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1315 ("Unmanaged page %p cannot be in active queue", m));

--- 291 unchanged lines hidden (view full) ---

1607 }
1608
1609 vm_pagequeue_lock(pq);
1610 m = TAILQ_FIRST(&pq->pq_pl);
1611 while (m != NULL && pcount-- > 0) {
1612 KASSERT(m->queue == PQ_ACTIVE,
1613 ("vm_pageout_page_stats: page %p isn't active", m));
1614
1615 next = TAILQ_NEXT(m, pageq);
1615 next = TAILQ_NEXT(m, plinks.q);
1616 if ((m->flags & PG_MARKER) != 0) {
1617 m = next;
1618 continue;
1619 }
1620 vm_page_lock_assert(m, MA_NOTOWNED);
1621 if (!vm_pageout_page_lock(m, &next)) {
1622 vm_page_unlock(m);
1623 m = next;

--- 403 unchanged lines hidden ---
1616 if ((m->flags & PG_MARKER) != 0) {
1617 m = next;
1618 continue;
1619 }
1620 vm_page_lock_assert(m, MA_NOTOWNED);
1621 if (!vm_pageout_page_lock(m, &next)) {
1622 vm_page_unlock(m);
1623 m = next;

--- 403 unchanged lines hidden ---