xref: /titanic_51/usr/src/uts/sun4v/vm/mach_kpm.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Kernel Physical Mapping (segkpm) hat interface routines for sun4v.
28  */
29 
30 #include <sys/types.h>
31 #include <vm/hat.h>
32 #include <vm/hat_sfmmu.h>
33 #include <vm/page.h>
34 #include <sys/cmn_err.h>
35 #include <sys/machsystm.h>
36 #include <vm/seg_kpm.h>
37 #include <vm/mach_kpm.h>
38 
39 /*
40  * Kernel Physical Mapping (kpm) facility
41  */
42 
43 void
44 mach_kpm_init()
45 {
46 	uintptr_t start, end;
47 	struct memlist  *pmem;
48 
49 	/*
50 	 * Map each of the memsegs into the kpm segment, coalesing
51 	 * adjacent memsegs to allow mapping with the largest
52 	 * possible pages.
53 	 */
54 	pmem = phys_install;
55 	start = pmem->address;
56 	end = start + pmem->size;
57 	for (;;) {
58 		if (pmem == NULL || pmem->address > end) {
59 			hat_devload(kas.a_hat, kpm_vbase + start,
60 			    end - start, mmu_btop(start),
61 			    PROT_READ | PROT_WRITE,
62 			    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
63 			if (pmem == NULL)
64 				break;
65 			start = pmem->address;
66 		}
67 		end = pmem->address + pmem->size;
68 		pmem = pmem->next;
69 	}
70 }
71 
72 /* -- hat_kpm interface section -- */
73 
74 /*
75  * Mapin a locked page and return the vaddr.
76  */
77 /*ARGSUSED*/
78 caddr_t
79 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
80 {
81 	caddr_t		vaddr;
82 
83 	if (kpm_enable == 0) {
84 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set");
85 		return ((caddr_t)NULL);
86 	}
87 
88 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
89 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked");
90 		return ((caddr_t)NULL);
91 	}
92 
93 	vaddr = hat_kpm_page2va(pp, 1);
94 
95 	return (vaddr);
96 }
97 
98 /*
99  * Mapout a locked page.
100  */
101 /*ARGSUSED*/
102 void
103 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
104 {
105 #ifdef DEBUG
106 	if (kpm_enable == 0) {
107 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set");
108 		return;
109 	}
110 
111 	if (IS_KPM_ADDR(vaddr) == 0) {
112 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address");
113 		return;
114 	}
115 
116 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
117 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked");
118 		return;
119 	}
120 #endif
121 }
122 
123 /*
124  * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
125  * memory addresses that are not described by a page_t.  It can
126  * also be used for normal pages that are not locked, but beware
127  * this is dangerous - no locking is performed, so the identity of
128  * the page could change.  hat_kpm_mapin_pfn is not supported when
129  * vac_colors > 1, because the chosen va depends on the page identity,
130  * which could change.
131  * The caller must only pass pfn's for valid physical addresses; violation
132  * of this rule will cause panic.
133  */
134 caddr_t
135 hat_kpm_mapin_pfn(pfn_t pfn)
136 {
137 	caddr_t paddr, vaddr;
138 
139 	if (kpm_enable == 0)
140 		return ((caddr_t)NULL);
141 
142 	paddr = (caddr_t)ptob(pfn);
143 	vaddr = (uintptr_t)kpm_vbase + paddr;
144 
145 	return ((caddr_t)vaddr);
146 }
147 
148 /*ARGSUSED*/
149 void
150 hat_kpm_mapout_pfn(pfn_t pfn)
151 {
152 	/* empty */
153 }
154 
155 /*
156  * Return the kpm virtual address for the page at pp.
157  */
158 /*ARGSUSED*/
159 caddr_t
160 hat_kpm_page2va(struct page *pp, int checkswap)
161 {
162 	uintptr_t	paddr, vaddr;
163 
164 	ASSERT(kpm_enable);
165 
166 	paddr = ptob(pp->p_pagenum);
167 
168 	vaddr = (uintptr_t)kpm_vbase + paddr;
169 
170 	return ((caddr_t)vaddr);
171 }
172 
173 /*
174  * Return the page for the kpm virtual address vaddr.
175  * Caller is responsible for the kpm mapping and lock
176  * state of the page.
177  */
178 page_t *
179 hat_kpm_vaddr2page(caddr_t vaddr)
180 {
181 	uintptr_t	paddr;
182 	pfn_t		pfn;
183 
184 	ASSERT(IS_KPM_ADDR(vaddr));
185 
186 	SFMMU_KPM_VTOP(vaddr, paddr);
187 	pfn = (pfn_t)btop(paddr);
188 
189 	return (page_numtopp_nolock(pfn));
190 }
191 
192 /*
193  * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred.
194  * This should never happen on sun4v.
195  */
196 int
197 hat_kpm_fault(struct hat *hat, caddr_t vaddr)
198 {
199 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
200 	    (void *)hat, (void *)vaddr);
201 
202 	return (0);
203 }
204 
205 /*ARGSUSED*/
206 void
207 hat_kpm_mseghash_clear(int nentries)
208 {}
209 
210 /*ARGSUSED*/
211 void
212 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
213 {}
214 
215 /*ARGSUSED*/
216 void
217 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
218 	offset_t kpm_pages_off)
219 {
220 	pfn_t base, end;
221 
222 	/*
223 	 * kphysm_add_memory_dynamic() does not set nkpmpgs
224 	 * when page_t memory is externally allocated.  That
225 	 * code must properly calculate nkpmpgs in all cases
226 	 * if nkpmpgs needs to be used at some point.
227 	 */
228 
229 	base = msp->pages_base;
230 	end = msp->pages_end;
231 
232 	hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
233 	    mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
234 	    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
235 }
236 
237 /*
238  * Return end of metadata for an already setup memseg.
239  */
240 caddr_t
241 hat_kpm_mseg_reuse(struct memseg *msp)
242 {
243 	return ((caddr_t)msp->epages);
244 }
245 
246 /*ARGSUSED*/
247 void
248 hat_kpm_addmem_mseg_insert(struct memseg *msp)
249 {}
250 
251 /*ARGSUSED*/
252 void
253 hat_kpm_addmem_memsegs_update(struct memseg *msp)
254 {}
255 
256 /*ARGSUSED*/
257 void
258 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
259 {
260 	pfn_t base, end;
261 
262 	base = msp->pages_base;
263 	end = msp->pages_end;
264 
265 	hat_unload(kas.a_hat, kpm_vbase +  mmu_ptob(base), mmu_ptob(end - base),
266 	    HAT_UNLOAD | HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP);
267 }
268 
269 /*ARGSUSED*/
270 void
271 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
272 	struct memseg *lo, struct memseg *mid, struct memseg *hi)
273 {}
274 
275 /*
276  * Walk the memsegs chain, applying func to each memseg span and vcolor.
277  */
278 void
279 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
280 {
281 	pfn_t	pbase, pend;
282 	void	*base;
283 	size_t	size;
284 	struct memseg *msp;
285 
286 	for (msp = memsegs; msp; msp = msp->next) {
287 		pbase = msp->pages_base;
288 		pend = msp->pages_end;
289 		base = ptob(pbase) + kpm_vbase;
290 		size = ptob(pend - pbase);
291 		func(arg, base, size);
292 	}
293 }
294 
295 
296 /* -- sfmmu_kpm internal section -- */
297 
298 /*
299  * Return the page frame number if a valid segkpm mapping exists
300  * for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
301  * Should only be used by other sfmmu routines.
302  */
303 pfn_t
304 sfmmu_kpm_vatopfn(caddr_t vaddr)
305 {
306 	uintptr_t	paddr;
307 	pfn_t		pfn;
308 	page_t	*pp;
309 
310 	ASSERT(kpm_enable && IS_KPM_ADDR(vaddr));
311 
312 	SFMMU_KPM_VTOP(vaddr, paddr);
313 	pfn = (pfn_t)btop(paddr);
314 	pp = page_numtopp_nolock(pfn);
315 	if (pp)
316 		return (pfn);
317 	else
318 		return ((pfn_t)PFN_INVALID);
319 }
320