xref: /titanic_41/usr/src/uts/i86pc/vm/hat_kdi.c (revision 036aa26189b72905886e39d76d63352185cfd9d2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * HAT interfaces used by the kernel debugger to interact with the VM system.
31  * These interfaces are invoked when the world is stopped.  As such, no blocking
32  * operations may be performed.
33  */
34 
35 #include <sys/cpuvar.h>
36 #include <sys/kdi_impl.h>
37 #include <sys/errno.h>
38 #include <sys/systm.h>
39 #include <sys/sysmacros.h>
40 #include <sys/mman.h>
41 #include <sys/bootconf.h>
42 #include <sys/cmn_err.h>
43 #include <vm/seg_kmem.h>
44 #include <vm/hat_i86.h>
45 #include <sys/bootinfo.h>
46 #include <vm/kboot_mmu.h>
47 #include <sys/machsystm.h>
48 
49 /*
50  * The debugger needs direct access to the PTE of one page table entry
51  * in order to implement vtop and physical read/writes
52  */
53 static uintptr_t hat_kdi_page = 0;	/* vaddr for phsical page accesses */
54 static x86pte_t *hat_kdi_pte = NULL;	/* vaddr of pte for hat_kdi_page */
55 static uint_t use_kbm = 1;
56 uint_t hat_kdi_use_pae;			/* if 0, use x86pte32_t for pte type */
57 
58 /*
59  * Get the address for remapping physical pages during boot
60  */
61 void
62 hat_boot_kdi_init(void)
63 {
64 	hat_kdi_page = (uintptr_t)kbm_push(0);	/* first call gets address... */
65 }
66 
67 /*
68  * Switch to using a page in the kernel's va range for physical memory access.
69  * We need to allocate a virtual page, then permanently map in the page that
70  * contains the PTE to it.
71  */
72 void
73 hat_kdi_init(void)
74 {
75 	/*LINTED:set but not used in function*/
76 	htable_t *ht;
77 
78 	/*
79 	 * Get an kernel page VA to use for phys mem access. Then make sure
80 	 * the VA has a page table.
81 	 */
82 	hat_kdi_use_pae = mmu.pae_hat;
83 	hat_kdi_page = (uintptr_t)vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
84 	ht = htable_create(kas.a_hat, hat_kdi_page, 0, NULL);
85 	use_kbm = 0;
86 
87 	/*
88 	 * Get an address at which to put the pagetable and devload it.
89 	 */
90 	hat_kdi_pte = vmem_xalloc(heap_arena, MMU_PAGESIZE, MMU_PAGESIZE, 0,
91 	    0, NULL, NULL, VM_SLEEP);
92 	hat_devload(kas.a_hat, (caddr_t)hat_kdi_pte, MMU_PAGESIZE, ht->ht_pfn,
93 	    PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
94 	    HAT_LOAD | HAT_LOAD_NOCONSIST);
95 	hat_kdi_pte =
96 	    PT_INDEX_PTR(hat_kdi_pte, htable_va2entry(hat_kdi_page, ht));
97 
98 	HTABLE_INC(ht->ht_valid_cnt);
99 	htable_release(ht);
100 }
101 #define	kdi_mtop(m)	(m)
102 #define	kdi_ptom(p)	(p)
103 
104 /*ARGSUSED*/
105 int
106 kdi_vtop(uintptr_t va, uint64_t *pap)
107 {
108 	uintptr_t vaddr = va;
109 	size_t	len;
110 	pfn_t	pfn;
111 	uint_t	prot;
112 	int	level;
113 	x86pte_t pte;
114 	int	index;
115 
116 	/*
117 	 * if the mmu struct isn't relevant yet, we need to probe
118 	 * the boot loader's pagetables.
119 	 */
120 	if (!khat_running) {
121 		if (kbm_probe(&vaddr, &len, &pfn, &prot) == 0)
122 			return (ENOENT);
123 		if (vaddr > va)
124 			return (ENOENT);
125 		if (vaddr < va)
126 			pfn += mmu_btop(va - vaddr);
127 		*pap = pfn_to_pa(pfn) + (vaddr & MMU_PAGEOFFSET);
128 		return (0);
129 	}
130 
131 	/*
132 	 * We can't go through normal hat routines, so we'll use
133 	 * kdi_pread() to walk the page tables
134 	 */
135 	*pap = getcr3() & MMU_PAGEMASK;
136 	for (level = mmu.max_level; ; --level) {
137 		index = (va >> LEVEL_SHIFT(level)) & (mmu.ptes_per_table - 1);
138 		*pap += index << mmu.pte_size_shift;
139 		pte = 0;
140 		if (kdi_pread((caddr_t)&pte, mmu.pte_size, *pap, &len) != 0)
141 			return (ENOENT);
142 		if (pte == 0)
143 			return (ENOENT);
144 		if (level > 0 && level <= mmu.max_page_level &&
145 		    (pte & PT_PAGESIZE)) {
146 			*pap = kdi_mtop(pte & PT_PADDR_LGPG);
147 			break;
148 		} else {
149 			*pap = kdi_mtop(pte & PT_PADDR);
150 			if (level == 0)
151 				break;
152 		}
153 	}
154 	*pap += va & LEVEL_OFFSET(level);
155 	return (0);
156 }
157 
158 static int
159 kdi_prw(caddr_t buf, size_t nbytes, uint64_t pa, size_t *ncopiedp, int doread)
160 {
161 	size_t	ncopied = 0;
162 	off_t	pgoff;
163 	size_t	sz;
164 	caddr_t	va;
165 	caddr_t	from;
166 	caddr_t	to;
167 	x86pte_t pte;
168 
169 	/*
170 	 * if this is called before any initialization - fail
171 	 */
172 	if (hat_kdi_page == 0)
173 		return (EAGAIN);
174 
175 	while (nbytes > 0) {
176 		/*
177 		 * figure out the addresses and construct a minimal PTE
178 		 */
179 		pgoff = pa & MMU_PAGEOFFSET;
180 		sz = MIN(nbytes, MMU_PAGESIZE - pgoff);
181 		va = (caddr_t)hat_kdi_page + pgoff;
182 		pte = mmu_ptob(mmu_btop(pa)) | PT_VALID;
183 		if (doread) {
184 			from = va;
185 			to = buf;
186 		} else {
187 			PTE_SET(pte, PT_WRITABLE);
188 			from = buf;
189 			to = va;
190 		}
191 
192 		/*
193 		 * map the physical page
194 		 */
195 		if (use_kbm)
196 			(void) kbm_push(pa);
197 		else if (hat_kdi_use_pae)
198 			*hat_kdi_pte = pte;
199 		else
200 			*(x86pte32_t *)hat_kdi_pte = pte;
201 		mmu_tlbflush_entry((caddr_t)hat_kdi_page);
202 
203 		bcopy(from, to, sz);
204 
205 		/*
206 		 * erase the mapping
207 		 */
208 		if (use_kbm)
209 			kbm_pop();
210 		else if (hat_kdi_use_pae)
211 			*hat_kdi_pte = 0;
212 		else
213 			*(x86pte32_t *)hat_kdi_pte = 0;
214 		mmu_tlbflush_entry((caddr_t)hat_kdi_page);
215 
216 		buf += sz;
217 		pa += sz;
218 		nbytes -= sz;
219 		ncopied += sz;
220 	}
221 
222 	if (ncopied == 0)
223 		return (ENOENT);
224 
225 	*ncopiedp = ncopied;
226 	return (0);
227 }
228 
229 int
230 kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
231 {
232 	return (kdi_prw(buf, nbytes, addr, ncopiedp, 1));
233 }
234 
235 int
236 kdi_pwrite(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
237 {
238 	return (kdi_prw(buf, nbytes, addr, ncopiedp, 0));
239 }
240 
241 
242 /*
243  * Return the number of bytes, relative to the beginning of a given range, that
244  * are non-toxic (can be read from and written to with relative impunity).
245  */
246 /*ARGSUSED*/
247 size_t
248 kdi_range_is_nontoxic(uintptr_t va, size_t sz, int write)
249 {
250 #if defined(__amd64)
251 	extern uintptr_t toxic_addr;
252 	extern size_t	toxic_size;
253 
254 	/*
255 	 * Check 64 bit toxic range.
256 	 */
257 	if (toxic_addr != 0 &&
258 	    va + sz >= toxic_addr &&
259 	    va < toxic_addr + toxic_size)
260 		return (va < toxic_addr ? toxic_addr - va : 0);
261 
262 	/*
263 	 * avoid any Virtual Address hole
264 	 */
265 	if (va + sz >= hole_start && va < hole_end)
266 		return (va < hole_start ? hole_start - va : 0);
267 
268 	return (sz);
269 
270 #elif defined(__i386)
271 	extern void *device_arena_contains(void *, size_t, size_t *);
272 	uintptr_t v;
273 
274 	v = (uintptr_t)device_arena_contains((void *)va, sz, NULL);
275 	if (v == 0)
276 		return (sz);
277 	else if (v <= va)
278 		return (0);
279 	else
280 		return (v - va);
281 
282 #endif	/* __i386 */
283 }
284