xref: /freebsd/sys/vm/vm_page.h (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD$
65  */
66 
67 /*
68  *	Resident memory system definitions.
69  */
70 
71 #ifndef	_VM_PAGE_
72 #define	_VM_PAGE_
73 
74 #if !defined(KLD_MODULE)
75 #include "opt_vmpage.h"
76 #endif
77 
78 #include <vm/pmap.h>
79 #include <machine/atomic.h>
80 
81 /*
82  *	Management of resident (logical) pages.
83  *
84  *	A small structure is kept for each resident
85  *	page, indexed by page number.  Each structure
86  *	is an element of several lists:
87  *
88  *		A hash table bucket used to quickly
89  *		perform object/offset lookups
90  *
91  *		A list of all pages for a given object,
92  *		so they can be quickly deactivated at
93  *		time of deallocation.
94  *
95  *		An ordered list of pages due for pageout.
96  *
97  *	In addition, the structure contains the object
98  *	and offset to which this page belongs (for pageout),
99  *	and sundry status bits.
100  *
101  *	Fields in this structure are locked either by the lock on the
102  *	object that the page belongs to (O) or by the lock on the page
103  *	queues (P).
104  *
105  *	The 'valid' and 'dirty' fields are distinct.  A page may have dirty
106  *	bits set without having associated valid bits set.  This is used by
107  *	NFS to implement piecemeal writes.
108  */
109 
110 TAILQ_HEAD(pglist, vm_page);
111 
112 struct vm_page {
113 	TAILQ_ENTRY(vm_page) pageq;	/* queue info for FIFO queue or free list (P) */
114 	struct vm_page	*hnext;		/* hash table link (O,P)	*/
115 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) 	*/
116 
117 	vm_object_t object;		/* which object am I in (O,P)*/
118 	vm_pindex_t pindex;		/* offset into object (O,P) */
119 	vm_offset_t phys_addr;		/* physical address of page */
120 	struct md_page md;		/* machine dependant stuff */
121 	u_short	queue;			/* page queue index */
122 	u_short	flags,			/* see below */
123 		pc;			/* page color */
124 	u_short wire_count;		/* wired down maps refs (P) */
125 	short hold_count;		/* page hold count */
126 	u_char	act_count;		/* page usage count */
127 	u_char	busy;			/* page busy count */
128 	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
129 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
130 #if PAGE_SIZE == 4096
131 	u_char	valid;			/* map of valid DEV_BSIZE chunks */
132 	u_char	dirty;			/* map of dirty DEV_BSIZE chunks */
133 #elif PAGE_SIZE == 8192
134 	u_short	valid;			/* map of valid DEV_BSIZE chunks */
135 	u_short	dirty;			/* map of dirty DEV_BSIZE chunks */
136 #endif
137 };
138 
139 /*
140  * note: currently use SWAPBLK_NONE as an absolute value rather then
141  * a flag bit.
142  */
143 
144 #define SWAPBLK_MASK	((daddr_t)((u_daddr_t)-1 >> 1))		/* mask */
145 #define SWAPBLK_NONE	((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */
146 
147 #if !defined(KLD_MODULE)
148 
149 /*
150  * Page coloring parameters
151  */
152 /* Each of PQ_FREE, and PQ_CACHE have PQ_HASH_SIZE entries */
153 
154 /* Backward compatibility for existing PQ_*CACHE config options. */
155 #if !defined(PQ_CACHESIZE)
156 #if defined(PQ_HUGECACHE)
157 #define PQ_CACHESIZE 1024
158 #elif defined(PQ_LARGECACHE)
159 #define PQ_CACHESIZE 512
160 #elif defined(PQ_MEDIUMCACHE)
161 #define PQ_CACHESIZE 256
162 #elif defined(PQ_NORMALCACHE)
163 #define PQ_CACHESIZE 64
164 #elif defined(PQ_NOOPT)
165 #define PQ_CACHESIZE 0
166 #else
167 #define PQ_CACHESIZE 128
168 #endif
169 #endif
170 
171 #if PQ_CACHESIZE >= 1024
172 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
173 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
174 #define PQ_L2_SIZE 256	/* A number of colors opt for 1M cache */
175 
176 #elif PQ_CACHESIZE >= 512
177 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
178 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
179 #define PQ_L2_SIZE 128	/* A number of colors opt for 512K cache */
180 
181 #elif PQ_CACHESIZE >= 256
182 #define PQ_PRIME1 13	/* Prime number somewhat less than PQ_HASH_SIZE */
183 #define PQ_PRIME2 7	/* Prime number somewhat less than PQ_HASH_SIZE */
184 #define PQ_L2_SIZE 64	/* A number of colors opt for 256K cache */
185 
186 #elif PQ_CACHESIZE >= 128
187 #define PQ_PRIME1 9	/* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
188 #define PQ_PRIME2 5	/* Prime number somewhat less than PQ_HASH_SIZE */
189 #define PQ_L2_SIZE 32	/* A number of colors opt for 128k cache */
190 
191 #elif PQ_CACHESIZE >= 64
192 #define PQ_PRIME1 5	/* Prime number somewhat less than PQ_HASH_SIZE */
193 #define PQ_PRIME2 3	/* Prime number somewhat less than PQ_HASH_SIZE */
194 #define PQ_L2_SIZE 16	/* A reasonable number of colors (opt for 64K cache) */
195 
196 #else
197 #define PQ_PRIME1 1	/* Disable page coloring. */
198 #define PQ_PRIME2 1
199 #define PQ_L2_SIZE 1
200 
201 #endif
202 
203 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
204 
205 #if 1
206 #define PQ_NONE 0
207 #define PQ_FREE	1
208 #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
209 #define PQ_ACTIVE (2 + 1*PQ_L2_SIZE)
210 #define PQ_CACHE (3 + 1*PQ_L2_SIZE)
211 #define PQ_COUNT (3 + 2*PQ_L2_SIZE)
212 #else
213 #define PQ_NONE		PQ_COUNT
214 #define PQ_FREE		0
215 #define PQ_INACTIVE	PQ_L2_SIZE
216 #define PQ_ACTIVE	(1 +   PQ_L2_SIZE)
217 #define PQ_CACHE	(2 +   PQ_L2_SIZE)
218 #define PQ_COUNT	(2 + 2*PQ_L2_SIZE)
219 #endif
220 
221 struct vpgqueues {
222 	struct pglist pl;
223 	int	*cnt;
224 	int	lcnt;
225 };
226 
227 extern struct vpgqueues vm_page_queues[PQ_COUNT];
228 
229 #endif
230 
231 /*
232  * These are the flags defined for vm_page.
233  *
234  * Note: PG_FILLED and PG_DIRTY are added for the filesystems.
235  *
236  * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
237  * 	 not under PV management but otherwise should be treated as a
238  *	 normal page.  Pages not under PV management cannot be paged out
239  *	 via the object/vm_page_t because there is no knowledge of their
240  *	 pte mappings, nor can they be removed from their objects via
241  *	 the object, and such pages are also not on any PQ queue.
242  */
243 #define	PG_BUSY		0x0001		/* page is in transit (O) */
244 #define	PG_WANTED	0x0002		/* someone is waiting for page (O) */
245 #define	PG_FICTITIOUS	0x0008		/* physical page doesn't exist (O) */
246 #define	PG_WRITEABLE	0x0010		/* page is mapped writeable */
247 #define PG_MAPPED	0x0020		/* page is mapped */
248 #define	PG_ZERO		0x0040		/* page is zeroed */
249 #define PG_REFERENCED	0x0080		/* page has been referenced */
250 #define PG_CLEANCHK	0x0100		/* page will be checked for cleaning */
251 #define PG_SWAPINPROG	0x0200		/* swap I/O in progress on page	     */
252 #define PG_NOSYNC	0x0400		/* do not collect for syncer */
253 #define PG_UNMANAGED	0x0800		/* No PV management for page */
254 #define PG_MARKER	0x1000		/* special queue marker page */
255 
256 /*
257  * Misc constants.
258  */
259 
260 #define ACT_DECLINE		1
261 #define ACT_ADVANCE		3
262 #define ACT_INIT		5
263 #define ACT_MAX			64
264 #define PFCLUSTER_BEHIND	3
265 #define PFCLUSTER_AHEAD		3
266 
267 #ifdef _KERNEL
268 /*
269  * Each pageable resident page falls into one of four lists:
270  *
271  *	free
272  *		Available for allocation now.
273  *
274  * The following are all LRU sorted:
275  *
276  *	cache
277  *		Almost available for allocation. Still in an
278  *		object, but clean and immediately freeable at
279  *		non-interrupt times.
280  *
281  *	inactive
282  *		Low activity, candidates for reclamation.
283  *		This is the list of pages that should be
284  *		paged out next.
285  *
286  *	active
287  *		Pages that are "active" i.e. they have been
288  *		recently referenced.
289  *
290  *	zero
291  *		Pages that are really free and have been pre-zeroed
292  *
293  */
294 
295 extern int vm_page_zero_count;
296 
297 extern vm_page_t vm_page_array;		/* First resident page in table */
298 extern int vm_page_array_size;		/* number of vm_page_t's */
299 extern long first_page;			/* first physical page number */
300 
301 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
302 
303 #define PHYS_TO_VM_PAGE(pa) \
304 		(&vm_page_array[atop(pa) - first_page ])
305 
306 /*
307  *	Functions implemented as macros
308  */
309 
310 static __inline void
311 vm_page_flag_set(vm_page_t m, unsigned short bits)
312 {
313 	atomic_set_short(&(m)->flags, bits);
314 }
315 
316 static __inline void
317 vm_page_flag_clear(vm_page_t m, unsigned short bits)
318 {
319 	atomic_clear_short(&(m)->flags, bits);
320 }
321 
322 #if 0
323 static __inline void
324 vm_page_assert_wait(vm_page_t m, int interruptible)
325 {
326 	vm_page_flag_set(m, PG_WANTED);
327 	assert_wait((int) m, interruptible);
328 }
329 #endif
330 
331 static __inline void
332 vm_page_busy(vm_page_t m)
333 {
334 	KASSERT((m->flags & PG_BUSY) == 0, ("vm_page_busy: page already busy!!!"));
335 	vm_page_flag_set(m, PG_BUSY);
336 }
337 
338 /*
339  *	vm_page_flash:
340  *
341  *	wakeup anyone waiting for the page.
342  */
343 
344 static __inline void
345 vm_page_flash(vm_page_t m)
346 {
347 	if (m->flags & PG_WANTED) {
348 		vm_page_flag_clear(m, PG_WANTED);
349 		wakeup(m);
350 	}
351 }
352 
353 /*
354  *	vm_page_wakeup:
355  *
356  *	clear the PG_BUSY flag and wakeup anyone waiting for the
357  *	page.
358  *
359  */
360 
361 static __inline void
362 vm_page_wakeup(vm_page_t m)
363 {
364 	KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
365 	vm_page_flag_clear(m, PG_BUSY);
366 	vm_page_flash(m);
367 }
368 
369 /*
370  *
371  *
372  */
373 
374 static __inline void
375 vm_page_io_start(vm_page_t m)
376 {
377 	atomic_add_char(&(m)->busy, 1);
378 }
379 
380 static __inline void
381 vm_page_io_finish(vm_page_t m)
382 {
383 	atomic_subtract_char(&m->busy, 1);
384 	if (m->busy == 0)
385 		vm_page_flash(m);
386 }
387 
388 
389 #if PAGE_SIZE == 4096
390 #define VM_PAGE_BITS_ALL 0xff
391 #endif
392 
393 #if PAGE_SIZE == 8192
394 #define VM_PAGE_BITS_ALL 0xffff
395 #endif
396 
397 #define VM_ALLOC_NORMAL		0
398 #define VM_ALLOC_INTERRUPT	1
399 #define VM_ALLOC_SYSTEM		2
400 #define	VM_ALLOC_ZERO		3
401 #define	VM_ALLOC_RETRY		0x80
402 
403 void vm_page_activate __P((vm_page_t));
404 vm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int));
405 vm_page_t vm_page_grab __P((vm_object_t, vm_pindex_t, int));
406 void vm_page_cache __P((register vm_page_t));
407 int vm_page_try_to_cache __P((vm_page_t));
408 void vm_page_dontneed __P((register vm_page_t));
409 static __inline void vm_page_copy __P((vm_page_t, vm_page_t));
410 static __inline void vm_page_free __P((vm_page_t));
411 static __inline void vm_page_free_zero __P((vm_page_t));
412 void vm_page_deactivate __P((vm_page_t));
413 void vm_page_insert __P((vm_page_t, vm_object_t, vm_pindex_t));
414 vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
415 void vm_page_remove __P((vm_page_t));
416 void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
417 vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
418 vm_page_t vm_add_new_page __P((vm_offset_t pa));
419 void vm_page_unmanage __P((vm_page_t));
420 void vm_page_unwire __P((vm_page_t, int));
421 void vm_page_wire __P((vm_page_t));
422 void vm_page_unqueue __P((vm_page_t));
423 void vm_page_unqueue_nowakeup __P((vm_page_t));
424 void vm_page_set_validclean __P((vm_page_t, int, int));
425 void vm_page_set_dirty __P((vm_page_t, int, int));
426 void vm_page_clear_dirty __P((vm_page_t, int, int));
427 void vm_page_set_invalid __P((vm_page_t, int, int));
428 static __inline boolean_t vm_page_zero_fill __P((vm_page_t));
429 int vm_page_is_valid __P((vm_page_t, int, int));
430 void vm_page_test_dirty __P((vm_page_t));
431 int vm_page_bits __P((int, int));
432 vm_page_t _vm_page_list_find __P((int, int));
433 #if 0
434 int vm_page_sleep(vm_page_t m, char *msg, char *busy);
435 int vm_page_asleep(vm_page_t m, char *msg, char *busy);
436 #endif
437 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
438 void vm_page_free_toq(vm_page_t m);
439 
440 /*
441  * Keep page from being freed by the page daemon
442  * much of the same effect as wiring, except much lower
443  * overhead and should be used only for *very* temporary
444  * holding ("wiring").
445  */
446 static __inline void
447 vm_page_hold(vm_page_t mem)
448 {
449 	mem->hold_count++;
450 }
451 
452 static __inline void
453 vm_page_unhold(vm_page_t mem)
454 {
455 	--mem->hold_count;
456 	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
457 }
458 
459 /*
460  * 	vm_page_protect:
461  *
462  *	Reduce the protection of a page.  This routine never raises the
463  *	protection and therefore can be safely called if the page is already
464  *	at VM_PROT_NONE (it will be a NOP effectively ).
465  */
466 
467 static __inline void
468 vm_page_protect(vm_page_t mem, int prot)
469 {
470 	if (prot == VM_PROT_NONE) {
471 		if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
472 			pmap_page_protect(mem, VM_PROT_NONE);
473 			vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
474 		}
475 	} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
476 		pmap_page_protect(mem, VM_PROT_READ);
477 		vm_page_flag_clear(mem, PG_WRITEABLE);
478 	}
479 }
480 
481 /*
482  *	vm_page_zero_fill:
483  *
484  *	Zero-fill the specified page.
485  *	Written as a standard pagein routine, to
486  *	be used by the zero-fill object.
487  */
488 static __inline boolean_t
489 vm_page_zero_fill(m)
490 	vm_page_t m;
491 {
492 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
493 	return (TRUE);
494 }
495 
496 /*
497  *	vm_page_copy:
498  *
499  *	Copy one page to another
500  */
501 static __inline void
502 vm_page_copy(src_m, dest_m)
503 	vm_page_t src_m;
504 	vm_page_t dest_m;
505 {
506 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
507 	dest_m->valid = VM_PAGE_BITS_ALL;
508 }
509 
510 /*
511  *	vm_page_free:
512  *
513  *	Free a page
514  *
515  *	The clearing of PG_ZERO is a temporary safety until the code can be
516  *	reviewed to determine that PG_ZERO is being properly cleared on
517  *	write faults or maps.  PG_ZERO was previously cleared in
518  *	vm_page_alloc().
519  */
520 static __inline void
521 vm_page_free(m)
522 	vm_page_t m;
523 {
524 	vm_page_flag_clear(m, PG_ZERO);
525 	vm_page_free_toq(m);
526 }
527 
528 /*
529  *	vm_page_free_zero:
530  *
531  *	Free a page to the zerod-pages queue
532  */
533 static __inline void
534 vm_page_free_zero(m)
535 	vm_page_t m;
536 {
537 	vm_page_flag_set(m, PG_ZERO);
538 	vm_page_free_toq(m);
539 }
540 
541 /*
542  *	vm_page_sleep_busy:
543  *
544  *	Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
545  *	m->busy is zero.  Returns TRUE if it had to sleep ( including if
546  *	it almost had to sleep and made temporary spl*() mods), FALSE
547  *	otherwise.
548  *
549  *	This routine assumes that interrupts can only remove the busy
550  *	status from a page, not set the busy status or change it from
551  *	PG_BUSY to m->busy or vise versa (which would create a timing
552  *	window).
553  *
554  *	Note that being an inline, this code will be well optimized.
555  */
556 
557 static __inline int
558 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
559 {
560 	if ((m->flags & PG_BUSY) || (also_m_busy && m->busy))  {
561 		int s = splvm();
562 		if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
563 			/*
564 			 * Page is busy. Wait and retry.
565 			 */
566 			vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
567 			tsleep(m, PVM, msg, 0);
568 		}
569 		splx(s);
570 		return(TRUE);
571 		/* not reached */
572 	}
573 	return(FALSE);
574 }
575 
576 /*
577  *	vm_page_dirty:
578  *
579  *	make page all dirty
580  */
581 
582 static __inline void
583 vm_page_dirty(vm_page_t m)
584 {
585 #if !defined(KLD_MODULE)
586 	KASSERT(m->queue - m->pc != PQ_CACHE, ("vm_page_dirty: page in cache!"));
587 #endif
588 	m->dirty = VM_PAGE_BITS_ALL;
589 }
590 
591 /*
592  *	vm_page_undirty:
593  *
594  *	Set page to not be dirty.  Note: does not clear pmap modify bits
595  */
596 
597 static __inline void
598 vm_page_undirty(vm_page_t m)
599 {
600 	m->dirty = 0;
601 }
602 
603 #if !defined(KLD_MODULE)
604 
605 static __inline vm_page_t
606 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
607 {
608 	vm_page_t m;
609 
610 #if PQ_L2_SIZE > 1
611 	if (prefer_zero) {
612 		m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
613 	} else {
614 		m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
615 	}
616 	if (m == NULL)
617 		m = _vm_page_list_find(basequeue, index);
618 #else
619 	if (prefer_zero) {
620 		m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
621 	} else {
622 		m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
623 	}
624 #endif
625 	return(m);
626 }
627 
628 #endif
629 
630 #endif				/* _KERNEL */
631 #endif				/* !_VM_PAGE_ */
632