xref: /titanic_52/usr/src/uts/sun4v/vm/mach_kpm.c (revision c1ecd8b9404ee0d96d93f02e82c441b9bb149a3d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Kernel Physical Mapping (segkpm) hat interface routines for sun4v.
30  */
31 
32 #include <sys/types.h>
33 #include <vm/hat.h>
34 #include <vm/hat_sfmmu.h>
35 #include <vm/page.h>
36 #include <sys/cmn_err.h>
37 #include <sys/machsystm.h>
38 #include <vm/seg_kpm.h>
39 #include <vm/mach_kpm.h>
40 
41 /*
42  * Kernel Physical Mapping (kpm) facility
43  */
44 
45 void
46 mach_kpm_init()
47 {
48 	uintptr_t start, end;
49 	struct memlist  *pmem;
50 
51 	/*
52 	 * Map each of the memsegs into the kpm segment, coalesing
53 	 * adjacent memsegs to allow mapping with the largest
54 	 * possible pages.
55 	 */
56 	pmem = phys_install;
57 	start = pmem->address;
58 	end = start + pmem->size;
59 	for (;;) {
60 		if (pmem == NULL || pmem->address > end) {
61 			hat_devload(kas.a_hat, kpm_vbase + start,
62 			    end - start, mmu_btop(start),
63 			    PROT_READ | PROT_WRITE,
64 			    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
65 			if (pmem == NULL)
66 				break;
67 			start = pmem->address;
68 		}
69 		end = pmem->address + pmem->size;
70 		pmem = pmem->next;
71 	}
72 }
73 
74 /* -- hat_kpm interface section -- */
75 
76 /*
77  * Mapin a locked page and return the vaddr.
78  */
79 /*ARGSUSED*/
80 caddr_t
81 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
82 {
83 	caddr_t		vaddr;
84 
85 	if (kpm_enable == 0) {
86 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set");
87 		return ((caddr_t)NULL);
88 	}
89 
90 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
91 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked");
92 		return ((caddr_t)NULL);
93 	}
94 
95 	vaddr = hat_kpm_page2va(pp, 1);
96 
97 	return (vaddr);
98 }
99 
100 /*
101  * Mapout a locked page.
102  */
103 /*ARGSUSED*/
104 void
105 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
106 {
107 #ifdef DEBUG
108 	if (kpm_enable == 0) {
109 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set");
110 		return;
111 	}
112 
113 	if (IS_KPM_ADDR(vaddr) == 0) {
114 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address");
115 		return;
116 	}
117 
118 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
119 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked");
120 		return;
121 	}
122 #endif
123 }
124 
125 /*
126  * Return the kpm virtual address for the page at pp.
127  */
128 /*ARGSUSED*/
129 caddr_t
130 hat_kpm_page2va(struct page *pp, int checkswap)
131 {
132 	uintptr_t	paddr, vaddr;
133 
134 	ASSERT(kpm_enable);
135 
136 	paddr = ptob(pp->p_pagenum);
137 
138 	vaddr = (uintptr_t)kpm_vbase + paddr;
139 
140 	return ((caddr_t)vaddr);
141 }
142 
143 /*
144  * Return the page for the kpm virtual address vaddr.
145  * Caller is responsible for the kpm mapping and lock
146  * state of the page.
147  */
148 page_t *
149 hat_kpm_vaddr2page(caddr_t vaddr)
150 {
151 	uintptr_t	paddr;
152 	pfn_t		pfn;
153 
154 	ASSERT(IS_KPM_ADDR(vaddr));
155 
156 	SFMMU_KPM_VTOP(vaddr, paddr);
157 	pfn = (pfn_t)btop(paddr);
158 
159 	return (page_numtopp_nolock(pfn));
160 }
161 
162 /*
163  * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred.
164  * This should never happen on sun4v.
165  */
166 int
167 hat_kpm_fault(struct hat *hat, caddr_t vaddr)
168 {
169 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
170 	    (void *)hat, (void *)vaddr);
171 
172 	return (0);
173 }
174 
175 /*ARGSUSED*/
176 void
177 hat_kpm_mseghash_clear(int nentries)
178 {}
179 
180 /*ARGSUSED*/
181 void
182 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
183 {}
184 
185 /*ARGSUSED*/
186 void
187 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
188 	offset_t kpm_pages_off)
189 {}
190 
191 /*ARGSUSED*/
192 void
193 hat_kpm_addmem_mseg_insert(struct memseg *msp)
194 {}
195 
196 /*ARGSUSED*/
197 void
198 hat_kpm_addmem_memsegs_update(struct memseg *msp)
199 {}
200 
201 /*ARGSUSED*/
202 caddr_t
203 hat_kpm_mseg_reuse(struct memseg *msp)
204 {
205 	return (0);
206 }
207 
208 /*ARGSUSED*/
209 void
210 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
211 {}
212 
213 /*ARGSUSED*/
214 void
215 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
216 	struct memseg *lo, struct memseg *mid, struct memseg *hi)
217 {}
218 
219 /*
220  * Walk the memsegs chain, applying func to each memseg span and vcolor.
221  */
222 void
223 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
224 {
225 	pfn_t	pbase, pend;
226 	void	*base;
227 	size_t	size;
228 	struct memseg *msp;
229 
230 	for (msp = memsegs; msp; msp = msp->next) {
231 		pbase = msp->pages_base;
232 		pend = msp->pages_end;
233 		base = ptob(pbase) + kpm_vbase;
234 		size = ptob(pend - pbase);
235 		func(arg, base, size);
236 	}
237 }
238 
239 
240 /* -- sfmmu_kpm internal section -- */
241 
242 /*
243  * Return the page frame number if a valid segkpm mapping exists
244  * for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
245  * Should only be used by other sfmmu routines.
246  */
247 pfn_t
248 sfmmu_kpm_vatopfn(caddr_t vaddr)
249 {
250 	uintptr_t	paddr;
251 	pfn_t		pfn;
252 	page_t	*pp;
253 
254 	ASSERT(kpm_enable && IS_KPM_ADDR(vaddr));
255 
256 	SFMMU_KPM_VTOP(vaddr, paddr);
257 	pfn = (pfn_t)btop(paddr);
258 	pp = page_numtopp_nolock(pfn);
259 	if (pp)
260 		return (pfn);
261 	else
262 		return ((pfn_t)PFN_INVALID);
263 }
264