xref: /freebsd/sys/powerpc/include/pmap.h (revision 0ecc478b74a858011290fa1590995ce1609c4f78)
1f9bac91bSBenno Rice /*-
271e3c308SPedro F. Giffuni  * SPDX-License-Identifier: BSD-3-Clause AND BSD-4-Clause
351369649SPedro F. Giffuni  *
4ffb56695SRafal Jaworowski  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
5ffb56695SRafal Jaworowski  * All rights reserved.
6ffb56695SRafal Jaworowski  *
7ffb56695SRafal Jaworowski  * Adapted for Freescale's e500 core CPUs.
8ffb56695SRafal Jaworowski  *
9ffb56695SRafal Jaworowski  * Redistribution and use in source and binary forms, with or without
10ffb56695SRafal Jaworowski  * modification, are permitted provided that the following conditions
11ffb56695SRafal Jaworowski  * are met:
12ffb56695SRafal Jaworowski  * 1. Redistributions of source code must retain the above copyright
13ffb56695SRafal Jaworowski  *    notice, this list of conditions and the following disclaimer.
14ffb56695SRafal Jaworowski  * 2. Redistributions in binary form must reproduce the above copyright
15ffb56695SRafal Jaworowski  *    notice, this list of conditions and the following disclaimer in the
16ffb56695SRafal Jaworowski  *    documentation and/or other materials provided with the distribution.
17ffb56695SRafal Jaworowski  * 3. The name of the author may not be used to endorse or promote products
18ffb56695SRafal Jaworowski  *    derived from this software without specific prior written permission.
19ffb56695SRafal Jaworowski  *
20ffb56695SRafal Jaworowski  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21ffb56695SRafal Jaworowski  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22ffb56695SRafal Jaworowski  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
23ffb56695SRafal Jaworowski  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24ffb56695SRafal Jaworowski  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
25ffb56695SRafal Jaworowski  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26ffb56695SRafal Jaworowski  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27ffb56695SRafal Jaworowski  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28ffb56695SRafal Jaworowski  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29ffb56695SRafal Jaworowski  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30ffb56695SRafal Jaworowski  *
31ffb56695SRafal Jaworowski  * $FreeBSD$
32ffb56695SRafal Jaworowski  */
33ffb56695SRafal Jaworowski /*-
34f9bac91bSBenno Rice  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
35f9bac91bSBenno Rice  * Copyright (C) 1995, 1996 TooLs GmbH.
36f9bac91bSBenno Rice  * All rights reserved.
37f9bac91bSBenno Rice  *
38f9bac91bSBenno Rice  * Redistribution and use in source and binary forms, with or without
39f9bac91bSBenno Rice  * modification, are permitted provided that the following conditions
40f9bac91bSBenno Rice  * are met:
41f9bac91bSBenno Rice  * 1. Redistributions of source code must retain the above copyright
42f9bac91bSBenno Rice  *    notice, this list of conditions and the following disclaimer.
43f9bac91bSBenno Rice  * 2. Redistributions in binary form must reproduce the above copyright
44f9bac91bSBenno Rice  *    notice, this list of conditions and the following disclaimer in the
45f9bac91bSBenno Rice  *    documentation and/or other materials provided with the distribution.
46f9bac91bSBenno Rice  * 3. All advertising materials mentioning features or use of this software
47f9bac91bSBenno Rice  *    must display the following acknowledgement:
48f9bac91bSBenno Rice  *	This product includes software developed by TooLs GmbH.
49f9bac91bSBenno Rice  * 4. The name of TooLs GmbH may not be used to endorse or promote products
50f9bac91bSBenno Rice  *    derived from this software without specific prior written permission.
51f9bac91bSBenno Rice  *
52f9bac91bSBenno Rice  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
53f9bac91bSBenno Rice  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
54f9bac91bSBenno Rice  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
55f9bac91bSBenno Rice  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56f9bac91bSBenno Rice  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
57f9bac91bSBenno Rice  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
58f9bac91bSBenno Rice  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
59f9bac91bSBenno Rice  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
60f9bac91bSBenno Rice  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
61f9bac91bSBenno Rice  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62f9bac91bSBenno Rice  *
63ffb56695SRafal Jaworowski  *	from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $
64f9bac91bSBenno Rice  */
65f9bac91bSBenno Rice 
66f9bac91bSBenno Rice #ifndef	_MACHINE_PMAP_H_
67f9bac91bSBenno Rice #define	_MACHINE_PMAP_H_
68f9bac91bSBenno Rice 
6948d0b1a0SAlan Cox #include <sys/queue.h>
70c3e289e1SNathan Whitehorn #include <sys/tree.h>
71c47dd3dbSAttilio Rao #include <sys/_cpuset.h>
7248d0b1a0SAlan Cox #include <sys/_lock.h>
7348d0b1a0SAlan Cox #include <sys/_mutex.h>
745244eac9SBenno Rice #include <machine/sr.h>
757f89270bSPeter Grehan #include <machine/pte.h>
76c3e289e1SNathan Whitehorn #include <machine/slb.h>
77ffb56695SRafal Jaworowski #include <machine/tlb.h>
7821943937SJeff Roberson #include <machine/vmparam.h>
79ffb56695SRafal Jaworowski 
804026b447SJustin Hibbits struct pmap;
814026b447SJustin Hibbits typedef struct pmap *pmap_t;
824026b447SJustin Hibbits 
837c277971SPeter Grehan #if !defined(NPMAPS)
847c277971SPeter Grehan #define	NPMAPS		32768
857c277971SPeter Grehan #endif /* !defined(NPMAPS) */
867c277971SPeter Grehan 
8795fa3335SNathan Whitehorn struct	slbtnode;
88f9bac91bSBenno Rice 
895244eac9SBenno Rice struct pvo_entry {
905244eac9SBenno Rice 	LIST_ENTRY(pvo_entry) pvo_vlink;	/* Link to common virt page */
91827cc9b9SNathan Whitehorn #ifndef __powerpc64__
925244eac9SBenno Rice 	LIST_ENTRY(pvo_entry) pvo_olink;	/* Link to overflow entry */
93827cc9b9SNathan Whitehorn #endif
94be010188SJustin Hibbits 	union {
95ccc4a5c7SNathan Whitehorn 		RB_ENTRY(pvo_entry) pvo_plink;	/* Link to pmap entries */
96be010188SJustin Hibbits 		SLIST_ENTRY(pvo_entry) pvo_dlink; /* Link to delete enty */
97be010188SJustin Hibbits 	};
98827cc9b9SNathan Whitehorn 	struct {
99827cc9b9SNathan Whitehorn #ifndef __powerpc64__
100827cc9b9SNathan Whitehorn 		/* 32-bit fields */
1015d67b612SJustin Hibbits 		pte_t	    pte;
102827cc9b9SNathan Whitehorn #endif
103827cc9b9SNathan Whitehorn 		/* 64-bit fields */
104827cc9b9SNathan Whitehorn 		uintptr_t   slot;
105827cc9b9SNathan Whitehorn 		vm_paddr_t  pa;
106827cc9b9SNathan Whitehorn 		vm_prot_t   prot;
10752a7870dSNathan Whitehorn 	} pvo_pte;
1085244eac9SBenno Rice 	pmap_t		pvo_pmap;		/* Owning pmap */
1095244eac9SBenno Rice 	vm_offset_t	pvo_vaddr;		/* VA of entry */
110c3e289e1SNathan Whitehorn 	uint64_t	pvo_vpn;		/* Virtual page number */
1115244eac9SBenno Rice };
1125244eac9SBenno Rice LIST_HEAD(pvo_head, pvo_entry);
113be010188SJustin Hibbits SLIST_HEAD(pvo_dlist, pvo_entry);
114ccc4a5c7SNathan Whitehorn RB_HEAD(pvo_tree, pvo_entry);
115ccc4a5c7SNathan Whitehorn int pvo_vaddr_compare(struct pvo_entry *, struct pvo_entry *);
116ccc4a5c7SNathan Whitehorn RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
1175244eac9SBenno Rice 
118827cc9b9SNathan Whitehorn /* Used by 32-bit PMAP */
119bef5da7fSNathan Whitehorn #define	PVO_PTEGIDX_MASK	0x007UL		/* which PTEG slot */
120bef5da7fSNathan Whitehorn #define	PVO_PTEGIDX_VALID	0x008UL		/* slot is valid */
121827cc9b9SNathan Whitehorn /* Used by 64-bit PMAP */
122827cc9b9SNathan Whitehorn #define	PVO_HID			0x008UL		/* PVO entry in alternate hash*/
123827cc9b9SNathan Whitehorn /* Used by both */
124bef5da7fSNathan Whitehorn #define	PVO_WIRED		0x010UL		/* PVO entry is wired */
125bef5da7fSNathan Whitehorn #define	PVO_MANAGED		0x020UL		/* PVO entry is managed */
126bef5da7fSNathan Whitehorn #define	PVO_BOOTSTRAP		0x080UL		/* PVO entry allocated during
127bef5da7fSNathan Whitehorn 						   bootstrap */
128827cc9b9SNathan Whitehorn #define PVO_DEAD		0x100UL		/* waiting to be deleted */
129bef5da7fSNathan Whitehorn #define PVO_LARGE		0x200UL		/* large page */
130bef5da7fSNathan Whitehorn #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
131bef5da7fSNathan Whitehorn #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
132bef5da7fSNathan Whitehorn #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
133bef5da7fSNathan Whitehorn #define	PVO_PTEGIDX_CLR(pvo)	\
134bef5da7fSNathan Whitehorn 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
135bef5da7fSNathan Whitehorn #define	PVO_PTEGIDX_SET(pvo, i)	\
136bef5da7fSNathan Whitehorn 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
137bef5da7fSNathan Whitehorn #define	PVO_VSID(pvo)		((pvo)->pvo_vpn >> 16)
138bef5da7fSNathan Whitehorn 
139598d99ddSNathan Whitehorn struct	pmap {
1404026b447SJustin Hibbits 	struct		pmap_statistics	pm_stats;
141598d99ddSNathan Whitehorn 	struct	mtx	pm_mtx;
1425d67b612SJustin Hibbits 	cpuset_t	pm_active;
1435d67b612SJustin Hibbits 	union {
1445d67b612SJustin Hibbits 		struct {
145598d99ddSNathan Whitehorn 
146598d99ddSNathan Whitehorn 		    #ifdef __powerpc64__
147598d99ddSNathan Whitehorn 			struct slbtnode	*pm_slb_tree_root;
148598d99ddSNathan Whitehorn 			struct slb	**pm_slb;
149598d99ddSNathan Whitehorn 			int		pm_slb_len;
150598d99ddSNathan Whitehorn 		    #else
151598d99ddSNathan Whitehorn 			register_t	pm_sr[16];
152598d99ddSNathan Whitehorn 		    #endif
153598d99ddSNathan Whitehorn 
154598d99ddSNathan Whitehorn 			struct pmap	*pmap_phys;
155ccc4a5c7SNathan Whitehorn 			struct pvo_tree pmap_pvo;
156598d99ddSNathan Whitehorn 		};
1575d67b612SJustin Hibbits 		struct {
1585d67b612SJustin Hibbits 			/* TID to identify this pmap entries in TLB */
1595d67b612SJustin Hibbits 			tlbtid_t	pm_tid[MAXCPU];
1605d67b612SJustin Hibbits 
1615d67b612SJustin Hibbits #ifdef __powerpc64__
1625d67b612SJustin Hibbits 			/*
1635d67b612SJustin Hibbits 			 * Page table directory,
1645d67b612SJustin Hibbits 			 * array of pointers to page directories.
1655d67b612SJustin Hibbits 			 */
1663be09f30SJustin Hibbits 			pte_t ***pm_pp2d;
1675d67b612SJustin Hibbits #else
1685d67b612SJustin Hibbits 			/*
1695d67b612SJustin Hibbits 			 * Page table directory,
1705d67b612SJustin Hibbits 			 * array of pointers to page tables.
1715d67b612SJustin Hibbits 			 */
1723be09f30SJustin Hibbits 			pte_t		**pm_pdir;
1735d67b612SJustin Hibbits 
1745d67b612SJustin Hibbits 			/* List of allocated ptbl bufs (ptbl kva regions). */
1755d67b612SJustin Hibbits 			TAILQ_HEAD(, ptbl_buf)	pm_ptbl_list;
1765d67b612SJustin Hibbits #endif
1775d67b612SJustin Hibbits 		};
1785d67b612SJustin Hibbits 	};
1793be09f30SJustin Hibbits };
1805d67b612SJustin Hibbits 
1815d67b612SJustin Hibbits struct pv_entry {
1825d67b612SJustin Hibbits 	pmap_t pv_pmap;
1835d67b612SJustin Hibbits 	vm_offset_t pv_va;
1845d67b612SJustin Hibbits 	TAILQ_ENTRY(pv_entry) pv_link;
1855d67b612SJustin Hibbits };
1865d67b612SJustin Hibbits typedef struct pv_entry *pv_entry_t;
187598d99ddSNathan Whitehorn 
188038c6159SJustin Hibbits struct	md_page {
1895d67b612SJustin Hibbits 	union {
1905d67b612SJustin Hibbits 		struct {
191038c6159SJustin Hibbits 			volatile int32_t mdpg_attrs;
192038c6159SJustin Hibbits 			vm_memattr_t	 mdpg_cache_attrs;
193038c6159SJustin Hibbits 			struct	pvo_head mdpg_pvoh;
194038c6159SJustin Hibbits 		};
1955d67b612SJustin Hibbits 		struct {
1965d67b612SJustin Hibbits 			TAILQ_HEAD(, pv_entry)	pv_list;
1975d67b612SJustin Hibbits 			int			pv_tracked;
1985d67b612SJustin Hibbits 		};
1995d67b612SJustin Hibbits 	};
2005d67b612SJustin Hibbits };
201038c6159SJustin Hibbits 
2025d67b612SJustin Hibbits #ifdef AIM
203c1f4123bSNathan Whitehorn #define	pmap_page_get_memattr(m)	((m)->md.mdpg_cache_attrs)
204ffb56695SRafal Jaworowski #define	pmap_page_is_mapped(m)	(!LIST_EMPTY(&(m)->md.mdpg_pvoh))
2055d67b612SJustin Hibbits #else
2065d67b612SJustin Hibbits #define	pmap_page_get_memattr(m)	VM_MEMATTR_DEFAULT
2075d67b612SJustin Hibbits #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
2085d67b612SJustin Hibbits #endif
209ffb56695SRafal Jaworowski 
210c3e289e1SNathan Whitehorn /*
211c3e289e1SNathan Whitehorn  * Return the VSID corresponding to a given virtual address.
212c3e289e1SNathan Whitehorn  * If no VSID is currently defined, it will allocate one, and add
213c3e289e1SNathan Whitehorn  * it to a free slot if available.
214c3e289e1SNathan Whitehorn  *
215c3e289e1SNathan Whitehorn  * NB: The PMAP MUST be locked already.
216c3e289e1SNathan Whitehorn  */
217c3e289e1SNathan Whitehorn uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
21895fa3335SNathan Whitehorn 
21995fa3335SNathan Whitehorn /* Lock-free, non-allocating lookup routines */
22095fa3335SNathan Whitehorn uint64_t kernel_va_to_slbv(vm_offset_t va);
22195fa3335SNathan Whitehorn struct slb *user_va_to_slb_entry(pmap_t pm, vm_offset_t va);
222c3e289e1SNathan Whitehorn 
2236416b9a8SNathan Whitehorn uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large);
22495fa3335SNathan Whitehorn void	free_vsid(pmap_t pm, uint64_t esid, int large);
2256416b9a8SNathan Whitehorn void	slb_insert_user(pmap_t pm, struct slb *slb);
2266416b9a8SNathan Whitehorn void	slb_insert_kernel(uint64_t slbe, uint64_t slbv);
22795fa3335SNathan Whitehorn 
22895fa3335SNathan Whitehorn struct slbtnode *slb_alloc_tree(void);
22995fa3335SNathan Whitehorn void     slb_free_tree(pmap_t pm);
2306416b9a8SNathan Whitehorn struct slb **slb_alloc_user_cache(void);
2316416b9a8SNathan Whitehorn void	slb_free_user_cache(struct slb **);
232c3e289e1SNathan Whitehorn 
2335244eac9SBenno Rice extern	struct pmap kernel_pmap_store;
2345244eac9SBenno Rice #define	kernel_pmap	(&kernel_pmap_store)
2355244eac9SBenno Rice 
236f9bac91bSBenno Rice #ifdef _KERNEL
237f9bac91bSBenno Rice 
23848d0b1a0SAlan Cox #define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
23948d0b1a0SAlan Cox #define	PMAP_LOCK_ASSERT(pmap, type) \
24048d0b1a0SAlan Cox 				mtx_assert(&(pmap)->pm_mtx, (type))
24148d0b1a0SAlan Cox #define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
242629e40e4SNathan Whitehorn #define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, \
243629e40e4SNathan Whitehorn 				    (pmap == kernel_pmap) ? "kernelpmap" : \
244629e40e4SNathan Whitehorn 				    "pmap", NULL, MTX_DEF)
24548d0b1a0SAlan Cox #define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
24648d0b1a0SAlan Cox #define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
24748d0b1a0SAlan Cox #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
24848d0b1a0SAlan Cox #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
24948d0b1a0SAlan Cox 
2506031c68dSAlan Cox #define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
2516031c68dSAlan Cox 
2525244eac9SBenno Rice void		pmap_bootstrap(vm_offset_t, vm_offset_t);
25320b79612SRafal Jaworowski void		pmap_kenter(vm_offset_t va, vm_paddr_t pa);
254611aec25SJustin Hibbits void		pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t);
2555501d40bSJake Burkholder void		pmap_kremove(vm_offset_t);
25620b79612SRafal Jaworowski void		*pmap_mapdev(vm_paddr_t, vm_size_t);
2572109efd1SJustin Hibbits void		*pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
2588bbfa33aSBenno Rice void		pmap_unmapdev(vm_offset_t, vm_size_t);
259c1f4123bSNathan Whitehorn void		pmap_page_set_memattr(vm_page_t, vm_memattr_t);
2600f7aeab0SJustin Hibbits int		pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
26104329fa7SNathan Whitehorn int		pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr,
26204329fa7SNathan Whitehorn 		    void **kaddr, size_t ulen, size_t *klen);
263eb1baf72SNathan Whitehorn int		pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user,
264eb1baf72SNathan Whitehorn 		    vm_offset_t *decoded_addr);
265ac6ba8bdSBenno Rice void		pmap_deactivate(struct thread *);
26620b79612SRafal Jaworowski vm_paddr_t	pmap_kextract(vm_offset_t);
26720b79612SRafal Jaworowski int		pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
268f9c702dbSPeter Grehan boolean_t	pmap_mmu_install(char *name, int prio);
269*0ecc478bSLeandro Lupori const char	*pmap_mmu_name(void);
270f9c702dbSPeter Grehan 
271696effb6SJohn Baldwin #define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
272a0889814SBenno Rice 
273f9bac91bSBenno Rice extern	vm_offset_t virtual_avail;
274f9bac91bSBenno Rice extern	vm_offset_t virtual_end;
275*0ecc478bSLeandro Lupori extern	caddr_t crashdumpmap;
276f9bac91bSBenno Rice 
2775244eac9SBenno Rice extern	vm_offset_t msgbuf_phys;
278f9bac91bSBenno Rice 
279f9c702dbSPeter Grehan extern	int pmap_bootstrapped;
280f9c702dbSPeter Grehan 
28133724f17SNathan Whitehorn vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size);
28260152a40SJustin Hibbits void pmap_early_io_unmap(vm_offset_t va, vm_size_t size);
283b2f831c0SJustin Hibbits void pmap_track_page(pmap_t pmap, vm_offset_t va);
28433724f17SNathan Whitehorn 
285e7a9df16SKonstantin Belousov static inline int
286e7a9df16SKonstantin Belousov pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused)
287e7a9df16SKonstantin Belousov {
288e7a9df16SKonstantin Belousov 
289e7a9df16SKonstantin Belousov 	return (0);
290e7a9df16SKonstantin Belousov }
291e7a9df16SKonstantin Belousov 
292f9bac91bSBenno Rice #endif
293f9bac91bSBenno Rice 
2945244eac9SBenno Rice #endif /* !_MACHINE_PMAP_H_ */
295