xref: /titanic_50/usr/src/uts/sun4v/vm/mach_kpm.c (revision dab53f9907c56d61cc82bf0ba356b741b92aec63)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Kernel Physical Mapping (segkpm) hat interface routines for sun4v.
30  */
31 
32 #include <sys/types.h>
33 #include <vm/hat.h>
34 #include <vm/hat_sfmmu.h>
35 #include <vm/page.h>
36 #include <sys/cmn_err.h>
37 #include <sys/machsystm.h>
38 #include <vm/seg_kpm.h>
39 #include <vm/mach_kpm.h>
40 
41 /* kpm prototypes */
42 void	sfmmu_kpm_pageunload(page_t *);
43 
44 /*
45  * Kernel Physical Mapping (kpm) facility
46  */
47 
48 void
49 mach_kpm_init()
50 {
51 	uintptr_t start, end;
52 	struct memlist  *pmem;
53 
54 	/*
55 	 * Map each of the memsegs into the kpm segment, coalesing
56 	 * adjacent memsegs to allow mapping with the largest
57 	 * possible pages.
58 	 */
59 	pmem = phys_install;
60 	start = pmem->address;
61 	end = start + pmem->size;
62 	for (;;) {
63 		if (pmem == NULL || pmem->address > end) {
64 			hat_devload(kas.a_hat, kpm_vbase + start,
65 			    end - start, mmu_btop(start),
66 			    PROT_READ | PROT_WRITE,
67 			    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
68 			if (pmem == NULL)
69 				break;
70 			start = pmem->address;
71 		}
72 		end = pmem->address + pmem->size;
73 		pmem = pmem->next;
74 	}
75 }
76 
77 /* -- hat_kpm interface section -- */
78 
79 /*
80  * Mapin a locked page and return the vaddr.
81  */
82 /*ARGSUSED*/
83 caddr_t
84 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
85 {
86 	caddr_t		vaddr;
87 
88 	if (kpm_enable == 0) {
89 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set");
90 		return ((caddr_t)NULL);
91 	}
92 
93 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
94 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked");
95 		return ((caddr_t)NULL);
96 	}
97 
98 	vaddr = hat_kpm_page2va(pp, 1);
99 
100 	return (vaddr);
101 }
102 
103 /*
104  * Mapout a locked page.
105  */
106 /*ARGSUSED*/
107 void
108 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
109 {
110 #ifdef DEBUG
111 	if (kpm_enable == 0) {
112 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set");
113 		return;
114 	}
115 
116 	if (IS_KPM_ADDR(vaddr) == 0) {
117 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address");
118 		return;
119 	}
120 
121 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
122 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked");
123 		return;
124 	}
125 #endif
126 }
127 
128 /*
129  * Return the kpm virtual address for the page at pp.
130  */
131 /*ARGSUSED*/
132 caddr_t
133 hat_kpm_page2va(struct page *pp, int checkswap)
134 {
135 	uintptr_t	paddr, vaddr;
136 
137 	ASSERT(kpm_enable);
138 
139 	paddr = ptob(pp->p_pagenum);
140 
141 	vaddr = (uintptr_t)kpm_vbase + paddr;
142 
143 	return ((caddr_t)vaddr);
144 }
145 
146 /*
147  * Return the page for the kpm virtual address vaddr.
148  * Caller is responsible for the kpm mapping and lock
149  * state of the page.
150  */
151 page_t *
152 hat_kpm_vaddr2page(caddr_t vaddr)
153 {
154 	uintptr_t	paddr;
155 	pfn_t		pfn;
156 
157 	ASSERT(IS_KPM_ADDR(vaddr));
158 
159 	SFMMU_KPM_VTOP(vaddr, paddr);
160 	pfn = (pfn_t)btop(paddr);
161 
162 	return (page_numtopp_nolock(pfn));
163 }
164 
165 /*
166  * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred.
167  * This should never happen on sun4v.
168  */
169 int
170 hat_kpm_fault(struct hat *hat, caddr_t vaddr)
171 {
172 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p", hat, vaddr);
173 
174 	return (0);
175 }
176 
177 /*ARGSUSED*/
178 void
179 hat_kpm_mseghash_clear(int nentries)
180 {}
181 
182 /*ARGSUSED*/
183 void
184 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
185 {}
186 
187 /*ARGSUSED*/
188 void
189 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
190 	offset_t kpm_pages_off)
191 {}
192 
193 /*ARGSUSED*/
194 void
195 hat_kpm_addmem_mseg_insert(struct memseg *msp)
196 {}
197 
198 /*ARGSUSED*/
199 void
200 hat_kpm_addmem_memsegs_update(struct memseg *msp)
201 {}
202 
203 /*ARGSUSED*/
204 caddr_t
205 hat_kpm_mseg_reuse(struct memseg *msp)
206 {
207 	return (0);
208 }
209 
210 /*ARGSUSED*/
211 void
212 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
213 {}
214 
215 /*ARGSUSED*/
216 void
217 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
218 	struct memseg *lo, struct memseg *mid, struct memseg *hi)
219 {}
220 
221 /*
222  * Walk the memsegs chain, applying func to each memseg span and vcolor.
223  */
224 void
225 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
226 {
227 	pfn_t	pbase, pend;
228 	void	*base;
229 	size_t	size;
230 	struct memseg *msp;
231 
232 	for (msp = memsegs; msp; msp = msp->next) {
233 		pbase = msp->pages_base;
234 		pend = msp->pages_end;
235 		base = ptob(pbase) + kpm_vbase;
236 		size = ptob(pend - pbase);
237 		func(arg, base, size);
238 	}
239 }
240 
241 
242 /* -- sfmmu_kpm internal section -- */
243 
244 /*
245  * Return the page frame number if a valid segkpm mapping exists
246  * for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
247  * Should only be used by other sfmmu routines.
248  */
249 pfn_t
250 sfmmu_kpm_vatopfn(caddr_t vaddr)
251 {
252 	uintptr_t	paddr;
253 	pfn_t		pfn;
254 	page_t	*pp;
255 
256 	ASSERT(kpm_enable && IS_KPM_ADDR(vaddr));
257 
258 	SFMMU_KPM_VTOP(vaddr, paddr);
259 	pfn = (pfn_t)btop(paddr);
260 	pp = page_numtopp_nolock(pfn);
261 	if (pp && pp->p_kpmref)
262 		return (pfn);
263 	else
264 		return ((pfn_t)PFN_INVALID);
265 }
266 
267 /*ARGSUSED*/
268 void
269 sfmmu_kpm_pageunload(page_t *pp)
270 {}
271