Lines Matching +full:a +full:- +full:m

1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
74 * A small structure is kept for each resident
78 * A radix tree used to quickly
88 * synchronized using either one of or a combination of locks. If a
91 * The queue lock for a page depends on the value of its queue field and is
95 * (A) the field must be accessed using atomic(9) and may require
99 * (F) the per-domain lock for the free queues.
100 * (M) Machine dependent, defined by pmap layer.
104 * The busy lock is an embedded reader-writer lock that protects the
108 * the kernel's general-purpose synchronization primitives. As a result,
110 * detected, and an attempt to xbusy a busy page or sbusy an xbusy page
111 * results will trigger a panic rather than causing the thread to block.
113 * state changes, after which the caller must re-lookup the page and
114 * re-evaluate its state. vm_page_busy_acquire() will block until
120 * These must be protected with the busy lock to prevent page-in or
121 * creation races. Page invalidation generally happens as a result
124 * speculative read-only mappings that do not require busy. I/O
125 * routines may check for validity without a lock if they are prepared
127 * unconcerned with races so long as they hold a reference to prevent
128 * recycling. When a valid bit is set while holding a shared busy
129 * lock (A) atomic operations are used to protect against concurrent
133 * dirty field is a mix of machine dependent (M) and busy (B). In
134 * the machine-independent layer, the page busy must be held to
137 * underlying architecture does not support atomic read-modify-write
138 * operations on the field's type, then the machine-independent
139 * layer uses a 32-bit atomic on the aligned 32-bit word that
140 * contains the dirty field. In the machine-independent layer,
141 * the implementation of read-modify-write operations on the
144 * only way to ensure a page can not become dirty. I/O generally
154 * pmap_extract_and_hold(). When a page belongs to an object, it may be
156 * pmap_extract_and_hold(). As a result, if the object is locked and the
161 * allocator. A page may be present in the page queues, or even actively
163 * The page daemon must therefore handle the possibility of a concurrent
166 * The queue state of a page consists of the queue and act_count fields of
169 * index, or PQ_NONE if it does not belong to a page queue. To modify the
182 * dequeue, requeue) are batched using fixed-size per-CPU queues. A
184 * PGA_QUEUE_OP_MASK and inserting an entry into a batch queue. When a
185 * queue is full, an attempt to insert a new entry will lock the page
187 * type-stability of vm_page structures is crucial to this scheme since the
188 * processing of entries in a given batch queue may be deferred
189 * indefinitely. In particular, a page may be freed with pending batch
236 u_int ref_count; /* page references (A) */
237 u_int busy_lock; /* busy owners lock (A) */
238 union vm_page_astate a; /* state accessed atomically (A) */ member
245 /* NOTE that these must support one bit per DEV_BSIZE in a page */
248 vm_page_bits_t dirty; /* dirty DEV_BSIZE chunk map (M,B) */
263 * attempting to tear down all mappings of a given page. The page busy lock and
279 * otherwise should be treated as a normal page. Pages not
320 #define VPB_FREED (0xffffffff - VPB_BIT_SHARED)
353 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
358 * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
359 * from a page queue, respectively. It determines whether the plinks.q field
361 * be a valid queue index, and the corresponding page queue lock must be held.
363 * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
364 * queue, and cleared when the dequeue request is processed. A page may
365 * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
372 * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
376 * atomic RMW operation to ensure that the "queue" field is a valid queue index,
387 #define PGA_ENQUEUED 0x0008 /* page is enqueued in a page queue */
403 * allocated from a per-CPU cache. It is cleared the next time that the
406 #define PG_PCPU_CACHE 0x01 /* was allocated from per-CPU caches */
410 #define PG_NODUMP 0x10 /* don't include this page in a dump */
455 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
458 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
460 * object is returned for addresses that are not page-aligned.
467 * vm_page_grab_pages(). Each function supports only a subset of the flags.
475 * Bits 0 - 1 define class.
476 * Bits 2 - 15 dedicated for flags.
478 * (a) - vm_page_alloc() supports the flag.
479 * (c) - vm_page_alloc_contig() supports the flag.
480 * (g) - vm_page_grab() supports the flag.
481 * (n) - vm_page_alloc_noobj() supports the flag.
482 * (p) - vm_page_grab_pages() supports the flag.
492 #define VM_ALLOC_WIRED 0x0020 /* (acgnp) Allocate a wired page */
493 #define VM_ALLOC_ZERO 0x0040 /* (acgnp) Allocate a zeroed page */
497 #define VM_ALLOC_NOCREAT 0x0400 /* (gp) Do not allocate a page */
545 * machine-independent layer.
552 void vm_page_advise(vm_page_t m, int advice);
573 void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
575 bool vm_page_busy_acquire(vm_page_t m, int allocflags);
576 void vm_page_busy_downgrade(vm_page_t m);
577 int vm_page_busy_tryupgrade(vm_page_t m);
578 bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags);
579 void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
581 void vm_page_deactivate(vm_page_t m);
582 void vm_page_deactivate_noreuse(vm_page_t m);
583 void vm_page_dequeue(vm_page_t m);
584 void vm_page_dequeue_deferred(vm_page_t m);
585 void vm_page_free(vm_page_t m);
586 void vm_page_free_invalid(vm_page_t m);
588 void vm_page_free_zero(vm_page_t m);
606 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
608 void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool);
610 void vm_page_invalid(vm_page_t m);
611 void vm_page_iter_free(struct pctrie_iter *pages, vm_page_t m);
613 int vm_page_iter_insert(vm_page_t m, vm_object_t, vm_pindex_t,
616 bool vm_page_iter_remove(struct pctrie_iter *pages, vm_page_t m);
617 bool vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m,
619 void vm_page_launder(vm_page_t m);
623 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
624 bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
626 bool vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m);
627 void vm_page_putfake(vm_page_t m);
628 void vm_page_readahead_finish(vm_page_t m);
636 void vm_page_reference(vm_page_t m);
639 void vm_page_release(vm_page_t m, int flags);
640 void vm_page_release_locked(vm_page_t m, int flags);
646 int vm_page_sbusied(vm_page_t m);
647 vm_page_bits_t vm_page_set_dirty(vm_page_t m);
648 void vm_page_set_valid_range(vm_page_t m, int base, int size);
650 void vm_page_sunbusy(vm_page_t m);
651 bool vm_page_try_remove_all(vm_page_t m);
652 bool vm_page_try_remove_write(vm_page_t m);
653 int vm_page_trysbusy(vm_page_t m);
654 int vm_page_tryxbusy(vm_page_t m);
656 void vm_page_unswappable(vm_page_t m);
657 void vm_page_unwire(vm_page_t m, uint8_t queue);
658 bool vm_page_unwire_noq(vm_page_t m);
659 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
661 bool vm_page_wire_mapped(vm_page_t m);
662 void vm_page_xunbusy_hard(vm_page_t m);
663 void vm_page_xunbusy_hard_unchecked(vm_page_t m);
667 void vm_page_valid(vm_page_t m);
671 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
673 void vm_page_dirty_KBI(vm_page_t m);
675 #define vm_page_busy_fetch(m) atomic_load_int(&(m)->busy_lock) argument
677 #define vm_page_assert_busied(m) \ argument
678 KASSERT(vm_page_busied(m), \
680 (m), __FILE__, __LINE__))
682 #define vm_page_assert_sbusied(m) \ argument
683 KASSERT(vm_page_sbusied(m), \
685 (m), __FILE__, __LINE__))
687 #define vm_page_assert_unbusied(m) \ argument
688 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) != \
692 (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \
694 #define vm_page_assert_xbusied_unchecked(m) do { \ argument
695 KASSERT(vm_page_xbusied(m), \
697 (m), __FILE__, __LINE__)); \
699 #define vm_page_assert_xbusied(m) do { \ argument
700 vm_page_assert_xbusied_unchecked(m); \
701 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) == \
705 (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \
708 #define vm_page_busied(m) \ argument
709 (vm_page_busy_fetch(m) != VPB_UNBUSIED)
711 #define vm_page_xbusied(m) \ argument
712 ((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
714 #define vm_page_busy_freed(m) \ argument
715 (vm_page_busy_fetch(m) == VPB_FREED)
717 /* Note: page m's lock must not be owned by the caller. */
718 #define vm_page_xunbusy(m) do { \ argument
719 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
721 vm_page_xunbusy_hard(m); \
723 #define vm_page_xunbusy_unchecked(m) do { \ argument
724 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
726 vm_page_xunbusy_hard_unchecked(m); \
730 void vm_page_object_busy_assert(vm_page_t m);
731 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) vm_page_object_busy_assert(m) argument
732 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
733 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \ argument
734 vm_page_assert_pga_writeable(m, bits)
736 * Claim ownership of a page's xbusy state. In non-INVARIANTS kernels this
737 * operation is a no-op since ownership is not tracked. In particular
740 #define vm_page_xbusy_claim(m) do { \ argument
743 vm_page_assert_xbusied_unchecked((m)); \
745 _busy_lock = vm_page_busy_fetch(m); \
746 } while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock, \
750 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0 argument
751 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0 argument
752 #define vm_page_xbusy_claim(m) argument
762 * Load a snapshot of a page's 32-bit atomic state.
765 vm_page_astate_load(vm_page_t m) in vm_page_astate_load() argument
767 vm_page_astate_t a; in vm_page_astate_load() local
769 a._bits = atomic_load_32(&m->a._bits); in vm_page_astate_load()
770 return (a); in vm_page_astate_load()
774 * Atomically compare and set a page's atomic state.
777 vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) in vm_page_astate_fcmpset() argument
781 ("%s: invalid head requeue request for page %p", __func__, m)); in vm_page_astate_fcmpset()
783 ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m)); in vm_page_astate_fcmpset()
784 KASSERT(new._bits != old->_bits, in vm_page_astate_fcmpset()
787 return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0); in vm_page_astate_fcmpset()
794 vm_page_aflag_clear(vm_page_t m, uint16_t bits) in vm_page_aflag_clear() argument
799 * Access the whole 32-bit word containing the aflags field with an in vm_page_aflag_clear()
800 * atomic update. Parallel non-atomic updates to the other fields in vm_page_aflag_clear()
803 addr = (void *)&m->a; in vm_page_aflag_clear()
812 vm_page_aflag_set(vm_page_t m, uint16_t bits) in vm_page_aflag_set() argument
816 VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits); in vm_page_aflag_set()
819 * Access the whole 32-bit word containing the aflags field with an in vm_page_aflag_set()
820 * atomic update. Parallel non-atomic updates to the other fields in vm_page_aflag_set()
823 addr = (void *)&m->a; in vm_page_aflag_set()
834 * call is made from the machine-independent layer.
839 vm_page_dirty(vm_page_t m) in vm_page_dirty() argument
844 vm_page_dirty_KBI(m); in vm_page_dirty()
846 m->dirty = VM_PAGE_BITS_ALL; in vm_page_dirty()
856 vm_page_undirty(vm_page_t m) in vm_page_undirty() argument
859 VM_PAGE_OBJECT_BUSY_ASSERT(m); in vm_page_undirty()
860 m->dirty = 0; in vm_page_undirty()
875 * Return the index of the queue containing m.
878 vm_page_queue(vm_page_t m) in vm_page_queue() argument
881 return (_vm_page_queue(vm_page_astate_load(m))); in vm_page_queue()
885 vm_page_active(vm_page_t m) in vm_page_active() argument
888 return (vm_page_queue(m) == PQ_ACTIVE); in vm_page_active()
892 vm_page_inactive(vm_page_t m) in vm_page_inactive() argument
895 return (vm_page_queue(m) == PQ_INACTIVE); in vm_page_inactive()
899 vm_page_in_laundry(vm_page_t m) in vm_page_in_laundry() argument
903 queue = vm_page_queue(m); in vm_page_in_laundry()
908 vm_page_clearref(vm_page_t m) in vm_page_clearref() argument
912 r = m->ref_count; in vm_page_clearref()
913 while (atomic_fcmpset_int(&m->ref_count, &r, r & (VPRC_BLOCKED | in vm_page_clearref()
921 * Release a reference to a page and return the old reference count.
924 vm_page_drop(vm_page_t m, u_int val) in vm_page_drop() argument
933 old = atomic_fetchadd_int(&m->ref_count, -val); in vm_page_drop()
935 ("vm_page_drop: page %p has an invalid refcount value", m)); in vm_page_drop()
942 * Perform a racy check to determine whether a reference prevents the page
948 vm_page_wired(vm_page_t m) in vm_page_wired() argument
951 return (VPRC_WIRE_COUNT(m->ref_count) > 0); in vm_page_wired()
955 vm_page_all_valid(vm_page_t m) in vm_page_all_valid() argument
958 return (m->valid == VM_PAGE_BITS_ALL); in vm_page_all_valid()
962 vm_page_any_valid(vm_page_t m) in vm_page_any_valid() argument
965 return (m->valid != 0); in vm_page_any_valid()
969 vm_page_none_valid(vm_page_t m) in vm_page_none_valid() argument
972 return (m->valid == 0); in vm_page_none_valid()
976 vm_page_domain(vm_page_t m __numa_used) in vm_page_domain()
981 segind = m->segind; in vm_page_domain()
982 KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m)); in vm_page_domain()
984 KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m)); in vm_page_domain()