xref: /illumos-gate/usr/src/uts/i86pc/vm/hat_kdi.c (revision 7c80a9608efb5c2bb78fb923e352a01088239788)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * HAT interfaces used by the kernel debugger to interact with the VM system.
29  * These interfaces are invoked when the world is stopped.  As such, no blocking
30  * operations may be performed.
31  */
32 
33 #include <sys/cpuvar.h>
34 #include <sys/kdi_impl.h>
35 #include <sys/errno.h>
36 #include <sys/systm.h>
37 #include <sys/sysmacros.h>
38 #include <sys/mman.h>
39 #include <sys/bootconf.h>
40 #include <sys/cmn_err.h>
41 #include <vm/seg_kmem.h>
42 #include <vm/hat_i86.h>
43 #if defined(__xpv)
44 #include <sys/hypervisor.h>
45 #endif
46 #include <sys/bootinfo.h>
47 #include <vm/kboot_mmu.h>
48 #include <sys/machsystm.h>
49 
50 /*
51  * The debugger needs direct access to the PTE of one page table entry
52  * in order to implement vtop and physical read/writes
53  */
54 static uintptr_t hat_kdi_page = 0;	/* vaddr for phsical page accesses */
55 static uint_t use_kbm = 1;
56 uint_t hat_kdi_use_pae;			/* if 0, use x86pte32_t for pte type */
57 
58 #if !defined(__xpv)
59 static x86pte_t *hat_kdi_pte = NULL;	/* vaddr of pte for hat_kdi_page */
60 #endif
61 
62 /*
63  * Get the address for remapping physical pages during boot
64  */
65 void
66 hat_boot_kdi_init(void)
67 {
68 	hat_kdi_page = (uintptr_t)kbm_push(0);	/* first call gets address... */
69 }
70 
71 /*
72  * Switch to using a page in the kernel's va range for physical memory access.
73  * We need to allocate a virtual page, then permanently map in the page that
74  * contains the PTE to it.
75  */
76 void
77 hat_kdi_init(void)
78 {
79 	/*LINTED:set but not used in function*/
80 	htable_t *ht __unused;
81 
82 	/*
83 	 * Get an kernel page VA to use for phys mem access. Then make sure
84 	 * the VA has a page table.
85 	 */
86 	hat_kdi_use_pae = mmu.pae_hat;
87 	hat_kdi_page = (uintptr_t)vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
88 	ht = htable_create(kas.a_hat, hat_kdi_page, 0, NULL);
89 	use_kbm = 0;
90 
91 #ifndef __xpv
92 	/*
93 	 * Get an address at which to put the pagetable and devload it.
94 	 */
95 	hat_kdi_pte = vmem_xalloc(heap_arena, MMU_PAGESIZE, MMU_PAGESIZE, 0,
96 	    0, NULL, NULL, VM_SLEEP);
97 	hat_devload(kas.a_hat, (caddr_t)hat_kdi_pte, MMU_PAGESIZE, ht->ht_pfn,
98 	    PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
99 	    HAT_LOAD | HAT_LOAD_NOCONSIST);
100 	hat_kdi_pte =
101 	    PT_INDEX_PTR(hat_kdi_pte, htable_va2entry(hat_kdi_page, ht));
102 
103 	HTABLE_INC(ht->ht_valid_cnt);
104 	htable_release(ht);
105 #endif
106 }
107 
108 #ifdef __xpv
109 
110 /*
111  * translate machine address to physical address
112  */
113 static uint64_t
114 kdi_ptom(uint64_t pa)
115 {
116 	extern pfn_t *mfn_list;
117 	ulong_t mfn = mfn_list[mmu_btop(pa)];
118 
119 	return (pfn_to_pa(mfn) | (pa & MMU_PAGEOFFSET));
120 }
121 
122 /*
123  * This is like mfn_to_pfn(), but we can't use ontrap() from kmdb.
124  * Instead we let the fault happen and kmdb deals with it.
125  */
126 static uint64_t
127 kdi_mtop(uint64_t ma)
128 {
129 	pfn_t pfn;
130 	mfn_t mfn = ma >> MMU_PAGESHIFT;
131 
132 	if (HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL) < mfn)
133 		return (ma | PFN_IS_FOREIGN_MFN);
134 
135 	pfn = mfn_to_pfn_mapping[mfn];
136 	if (pfn >= mfn_count || pfn_to_mfn(pfn) != mfn)
137 		return (ma | PFN_IS_FOREIGN_MFN);
138 	return (pfn_to_pa(pfn) | (ma & MMU_PAGEOFFSET));
139 }
140 
141 #else
142 #define	kdi_mtop(m)	(m)
143 #define	kdi_ptom(p)	(p)
144 #endif
145 
146 /*ARGSUSED*/
147 int
148 kdi_vtop(uintptr_t va, uint64_t *pap)
149 {
150 	uintptr_t vaddr = va;
151 	size_t	len;
152 	pfn_t	pfn;
153 	uint_t	prot;
154 	int	level;
155 	x86pte_t pte;
156 	int	index;
157 
158 	/*
159 	 * if the mmu struct isn't relevant yet, we need to probe
160 	 * the boot loader's pagetables.
161 	 */
162 	if (!khat_running) {
163 		if (kbm_probe(&vaddr, &len, &pfn, &prot) == 0)
164 			return (ENOENT);
165 		if (vaddr > va)
166 			return (ENOENT);
167 		if (vaddr < va)
168 			pfn += mmu_btop(va - vaddr);
169 		*pap = pfn_to_pa(pfn) + (vaddr & MMU_PAGEOFFSET);
170 		return (0);
171 	}
172 
173 	/*
174 	 * We can't go through normal hat routines, so we'll use
175 	 * kdi_pread() to walk the page tables
176 	 */
177 #if defined(__xpv)
178 	*pap = pfn_to_pa(CPU->cpu_current_hat->hat_htable->ht_pfn);
179 #else
180 	*pap = getcr3() & MMU_PAGEMASK;
181 #endif
182 	for (level = mmu.max_level; ; --level) {
183 		index = (va >> LEVEL_SHIFT(level)) & (mmu.ptes_per_table - 1);
184 		*pap += index << mmu.pte_size_shift;
185 		pte = 0;
186 		if (kdi_pread((caddr_t)&pte, mmu.pte_size, *pap, &len) != 0)
187 			return (ENOENT);
188 		if (pte == 0)
189 			return (ENOENT);
190 		if (level > 0 && level <= mmu.max_page_level &&
191 		    (pte & PT_PAGESIZE)) {
192 			*pap = kdi_mtop(pte & PT_PADDR_LGPG);
193 			break;
194 		} else {
195 			*pap = kdi_mtop(pte & PT_PADDR);
196 			if (level == 0)
197 				break;
198 		}
199 	}
200 	*pap += va & LEVEL_OFFSET(level);
201 	return (0);
202 }
203 
204 static int
205 kdi_prw(caddr_t buf, size_t nbytes, uint64_t pa, size_t *ncopiedp, int doread)
206 {
207 	size_t	ncopied = 0;
208 	off_t	pgoff;
209 	size_t	sz;
210 	caddr_t	va;
211 	caddr_t	from;
212 	caddr_t	to;
213 	x86pte_t pte;
214 
215 	/*
216 	 * if this is called before any initialization - fail
217 	 */
218 	if (hat_kdi_page == 0)
219 		return (EAGAIN);
220 
221 	while (nbytes > 0) {
222 		/*
223 		 * figure out the addresses and construct a minimal PTE
224 		 */
225 		pgoff = pa & MMU_PAGEOFFSET;
226 		sz = MIN(nbytes, MMU_PAGESIZE - pgoff);
227 		va = (caddr_t)hat_kdi_page + pgoff;
228 		pte = kdi_ptom(mmu_ptob(mmu_btop(pa))) | PT_VALID;
229 		if (doread) {
230 			from = va;
231 			to = buf;
232 		} else {
233 			PTE_SET(pte, PT_WRITABLE);
234 			from = buf;
235 			to = va;
236 		}
237 
238 		/*
239 		 * map the physical page
240 		 */
241 		if (use_kbm)
242 			(void) kbm_push(pa);
243 #if defined(__xpv)
244 		else
245 			(void) HYPERVISOR_update_va_mapping(
246 			    (uintptr_t)va, pte, UVMF_INVLPG);
247 #else
248 		else if (hat_kdi_use_pae)
249 			*hat_kdi_pte = pte;
250 		else
251 			*(x86pte32_t *)hat_kdi_pte = pte;
252 		mmu_tlbflush_entry((caddr_t)hat_kdi_page);
253 #endif
254 
255 		bcopy(from, to, sz);
256 
257 		/*
258 		 * erase the mapping
259 		 */
260 		if (use_kbm)
261 			kbm_pop();
262 #if defined(__xpv)
263 		else
264 			(void) HYPERVISOR_update_va_mapping(
265 			    (uintptr_t)va, 0, UVMF_INVLPG);
266 #else
267 		else if (hat_kdi_use_pae)
268 			*hat_kdi_pte = 0;
269 		else
270 			*(x86pte32_t *)hat_kdi_pte = 0;
271 		mmu_tlbflush_entry((caddr_t)hat_kdi_page);
272 #endif
273 
274 		buf += sz;
275 		pa += sz;
276 		nbytes -= sz;
277 		ncopied += sz;
278 	}
279 
280 	if (ncopied == 0)
281 		return (ENOENT);
282 
283 	*ncopiedp = ncopied;
284 	return (0);
285 }
286 
287 int
288 kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
289 {
290 	return (kdi_prw(buf, nbytes, addr, ncopiedp, 1));
291 }
292 
293 int
294 kdi_pwrite(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
295 {
296 	return (kdi_prw(buf, nbytes, addr, ncopiedp, 0));
297 }
298 
299 
300 /*
301  * Return the number of bytes, relative to the beginning of a given range, that
302  * are non-toxic (can be read from and written to with relative impunity).
303  */
304 /*ARGSUSED*/
305 size_t
306 kdi_range_is_nontoxic(uintptr_t va, size_t sz, int write)
307 {
308 #if defined(__amd64)
309 	extern uintptr_t toxic_addr;
310 	extern size_t	toxic_size;
311 
312 	/*
313 	 * Check 64 bit toxic range.
314 	 */
315 	if (toxic_addr != 0 &&
316 	    va + sz >= toxic_addr &&
317 	    va < toxic_addr + toxic_size)
318 		return (va < toxic_addr ? toxic_addr - va : 0);
319 
320 	/*
321 	 * avoid any Virtual Address hole
322 	 */
323 	if (va + sz >= hole_start && va < hole_end)
324 		return (va < hole_start ? hole_start - va : 0);
325 
326 	return (sz);
327 
328 #elif defined(__i386)
329 	extern void *device_arena_contains(void *, size_t, size_t *);
330 	uintptr_t v;
331 
332 	v = (uintptr_t)device_arena_contains((void *)va, sz, NULL);
333 	if (v == 0)
334 		return (sz);
335 	else if (v <= va)
336 		return (0);
337 	else
338 		return (v - va);
339 
340 #endif	/* __i386 */
341 }
342