xref: /titanic_51/usr/src/uts/sun4v/vm/mach_kpm.c (revision 6a72db4a7fa12c3e0d1c1cf91a07390739fa0fbf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Kernel Physical Mapping (segkpm) hat interface routines for sun4v.
28  */
29 
30 #include <sys/types.h>
31 #include <vm/hat.h>
32 #include <vm/hat_sfmmu.h>
33 #include <vm/page.h>
34 #include <sys/cmn_err.h>
35 #include <sys/machsystm.h>
36 #include <vm/seg_kpm.h>
37 #include <vm/mach_kpm.h>
38 #include <vm/faultcode.h>
39 
40 extern pfn_t memseg_get_start(struct memseg *);
41 
42 /*
43  * Kernel Physical Mapping (kpm) facility
44  */
45 
46 
47 void
48 mach_kpm_init()
49 {
50 	uintptr_t start, end;
51 	struct memlist  *pmem;
52 
53 	/*
54 	 * Map each of the memsegs into the kpm segment, coalesing
55 	 * adjacent memsegs to allow mapping with the largest
56 	 * possible pages.
57 	 */
58 	pmem = phys_install;
59 	start = pmem->ml_address;
60 	end = start + pmem->ml_size;
61 	for (;;) {
62 		if (pmem == NULL || pmem->ml_address > end) {
63 			hat_devload(kas.a_hat, kpm_vbase + start,
64 			    end - start, mmu_btop(start),
65 			    PROT_READ | PROT_WRITE,
66 			    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
67 			if (pmem == NULL)
68 				break;
69 			start = pmem->ml_address;
70 		}
71 		end = pmem->ml_address + pmem->ml_size;
72 		pmem = pmem->ml_next;
73 	}
74 }
75 
76 /* -- hat_kpm interface section -- */
77 
78 /*
79  * Mapin a locked page and return the vaddr.
80  */
81 /*ARGSUSED*/
82 caddr_t
83 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
84 {
85 	caddr_t		vaddr;
86 
87 	if (kpm_enable == 0) {
88 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set");
89 		return ((caddr_t)NULL);
90 	}
91 
92 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
93 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked");
94 		return ((caddr_t)NULL);
95 	}
96 
97 	vaddr = hat_kpm_page2va(pp, 1);
98 
99 	return (vaddr);
100 }
101 
102 /*
103  * Mapout a locked page.
104  */
105 /*ARGSUSED*/
106 void
107 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
108 {
109 #ifdef DEBUG
110 	if (kpm_enable == 0) {
111 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set");
112 		return;
113 	}
114 
115 	if (IS_KPM_ADDR(vaddr) == 0) {
116 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address");
117 		return;
118 	}
119 
120 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
121 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked");
122 		return;
123 	}
124 #endif
125 }
126 
127 /*
128  * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
129  * memory addresses that are not described by a page_t.  It can
130  * also be used for normal pages that are not locked, but beware
131  * this is dangerous - no locking is performed, so the identity of
132  * the page could change.  hat_kpm_mapin_pfn is not supported when
133  * vac_colors > 1, because the chosen va depends on the page identity,
134  * which could change.
135  * The caller must only pass pfn's for valid physical addresses; violation
136  * of this rule will cause panic.
137  */
138 caddr_t
139 hat_kpm_mapin_pfn(pfn_t pfn)
140 {
141 	caddr_t paddr, vaddr;
142 
143 	if (kpm_enable == 0)
144 		return ((caddr_t)NULL);
145 
146 	paddr = (caddr_t)ptob(pfn);
147 	vaddr = (uintptr_t)kpm_vbase + paddr;
148 
149 	return ((caddr_t)vaddr);
150 }
151 
152 /*ARGSUSED*/
153 void
154 hat_kpm_mapout_pfn(pfn_t pfn)
155 {
156 	/* empty */
157 }
158 
159 /*
160  * Return the kpm virtual address for the page at pp.
161  */
162 /*ARGSUSED*/
163 caddr_t
164 hat_kpm_page2va(struct page *pp, int checkswap)
165 {
166 	uintptr_t	paddr, vaddr;
167 
168 	ASSERT(kpm_enable);
169 
170 	paddr = ptob(pp->p_pagenum);
171 
172 	vaddr = (uintptr_t)kpm_vbase + paddr;
173 
174 	return ((caddr_t)vaddr);
175 }
176 
177 /*
178  * Return the page for the kpm virtual address vaddr.
179  * Caller is responsible for the kpm mapping and lock
180  * state of the page.
181  */
182 page_t *
183 hat_kpm_vaddr2page(caddr_t vaddr)
184 {
185 	uintptr_t	paddr;
186 	pfn_t		pfn;
187 
188 	ASSERT(IS_KPM_ADDR(vaddr));
189 
190 	SFMMU_KPM_VTOP(vaddr, paddr);
191 	pfn = (pfn_t)btop(paddr);
192 
193 	return (page_numtopp_nolock(pfn));
194 }
195 /*ARGSUSED*/
196 /*
197  * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred.
198  * This should never happen on sun4v.
199  */
200 int
201 hat_kpm_fault(struct hat *hat, caddr_t vaddr)
202 {
203 	/*
204 	 * Return FC_NOMAP for sun4v to allow the t_lofault_handler
205 	 * to handle this fault if one is installed
206 	 */
207 
208 	return (FC_NOMAP);
209 }
210 
211 /*ARGSUSED*/
212 void
213 hat_kpm_mseghash_clear(int nentries)
214 {}
215 
216 /*ARGSUSED*/
217 void
218 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
219 {}
220 
221 /*ARGSUSED*/
222 void
223 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
224 	offset_t kpm_pages_off)
225 {
226 	pfn_t base, end;
227 
228 	/*
229 	 * kphysm_add_memory_dynamic() does not set nkpmpgs
230 	 * when page_t memory is externally allocated.  That
231 	 * code must properly calculate nkpmpgs in all cases
232 	 * if nkpmpgs needs to be used at some point.
233 	 */
234 
235 	/*
236 	 * The meta (page_t) pages for dynamically added memory are allocated
237 	 * either from the incoming memory itself or from existing memory.
238 	 * In the former case the base of the incoming pages will be different
239 	 * than the base of the dynamic segment so call memseg_get_start() to
240 	 * get the actual base of the incoming memory for each case.
241 	 */
242 
243 	base = memseg_get_start(msp);
244 	end = msp->pages_end;
245 
246 	hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
247 	    mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
248 	    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
249 }
250 
251 /*
252  * Return end of metadata for an already setup memseg.
253  */
254 caddr_t
255 hat_kpm_mseg_reuse(struct memseg *msp)
256 {
257 	return ((caddr_t)msp->epages);
258 }
259 
260 /*ARGSUSED*/
261 void
262 hat_kpm_addmem_mseg_insert(struct memseg *msp)
263 {}
264 
265 /*ARGSUSED*/
266 void
267 hat_kpm_addmem_memsegs_update(struct memseg *msp)
268 {}
269 
270 /*ARGSUSED*/
271 void
272 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
273 {
274 	pfn_t base, end;
275 
276 	/*
277 	 * The meta (page_t) pages for dynamically added memory are allocated
278 	 * either from the incoming memory itself or from existing memory.
279 	 * In the former case the base of the incoming pages will be different
280 	 * than the base of the dynamic segment so call memseg_get_start() to
281 	 * get the actual base of the incoming memory for each case.
282 	 */
283 
284 	base = memseg_get_start(msp);
285 	end = msp->pages_end;
286 
287 	hat_unload(kas.a_hat, kpm_vbase +  mmu_ptob(base), mmu_ptob(end - base),
288 	    HAT_UNLOAD | HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP);
289 }
290 
291 /*ARGSUSED*/
292 void
293 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
294 	struct memseg *lo, struct memseg *mid, struct memseg *hi)
295 {}
296 
297 /*
298  * Walk the memsegs chain, applying func to each memseg span and vcolor.
299  */
300 void
301 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
302 {
303 	pfn_t	pbase, pend;
304 	void	*base;
305 	size_t	size;
306 	struct memseg *msp;
307 
308 	for (msp = memsegs; msp; msp = msp->next) {
309 		pbase = msp->pages_base;
310 		pend = msp->pages_end;
311 		base = ptob(pbase) + kpm_vbase;
312 		size = ptob(pend - pbase);
313 		func(arg, base, size);
314 	}
315 }
316 
317 
318 /* -- sfmmu_kpm internal section -- */
319 
320 /*
321  * Return the page frame number if a valid segkpm mapping exists
322  * for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
323  * Should only be used by other sfmmu routines.
324  */
325 pfn_t
326 sfmmu_kpm_vatopfn(caddr_t vaddr)
327 {
328 	uintptr_t	paddr;
329 	pfn_t		pfn;
330 	page_t	*pp;
331 
332 	ASSERT(kpm_enable && IS_KPM_ADDR(vaddr));
333 
334 	SFMMU_KPM_VTOP(vaddr, paddr);
335 	pfn = (pfn_t)btop(paddr);
336 	pp = page_numtopp_nolock(pfn);
337 	if (pp)
338 		return (pfn);
339 	else
340 		return ((pfn_t)PFN_INVALID);
341 }
342