xref: /titanic_41/usr/src/uts/sun4u/vm/mach_sfmmu.c (revision 03831d35f7499c87d51205817c93e9a8d42c4bae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <vm/hat.h>
31 #include <vm/hat_sfmmu.h>
32 #include <vm/page.h>
33 #include <sys/pte.h>
34 #include <sys/systm.h>
35 #include <sys/mman.h>
36 #include <sys/sysmacros.h>
37 #include <sys/machparam.h>
38 #include <sys/vtrace.h>
39 #include <sys/kmem.h>
40 #include <sys/mmu.h>
41 #include <sys/cmn_err.h>
42 #include <sys/cpu.h>
43 #include <sys/cpuvar.h>
44 #include <sys/debug.h>
45 #include <sys/lgrp.h>
46 #include <sys/archsystm.h>
47 #include <sys/machsystm.h>
48 #include <sys/vmsystm.h>
49 #include <sys/bitmap.h>
50 #include <vm/rm.h>
51 #include <sys/t_lock.h>
52 #include <sys/vm_machparam.h>
53 #include <sys/promif.h>
54 #include <sys/prom_isa.h>
55 #include <sys/prom_plat.h>
56 #include <sys/prom_debug.h>
57 #include <sys/privregs.h>
58 #include <sys/bootconf.h>
59 #include <sys/memlist.h>
60 #include <sys/memlist_plat.h>
61 #include <sys/cpu_module.h>
62 #include <sys/reboot.h>
63 #include <sys/kdi.h>
64 #include <sys/fpu/fpusystm.h>
65 
66 /*
67  * External routines and data structures
68  */
69 extern void	sfmmu_cache_flushcolor(int, pfn_t);
70 
71 /*
72  * Static routines
73  */
74 static void	sfmmu_set_tlb(void);
75 
76 /*
77  * Global Data:
78  */
79 caddr_t	textva, datava;
80 tte_t	ktext_tte, kdata_tte;		/* ttes for kernel text and data */
81 
82 int	enable_bigktsb = 1;
83 
84 tte_t bigktsb_ttes[MAX_BIGKTSB_TTES];
85 int bigktsb_nttes = 0;
86 
87 
88 /*
89  * Controls the logic which enables the use of the
90  * QUAD_LDD_PHYS ASI for TSB accesses.
91  */
92 int	ktsb_phys = 0;
93 
94 
95 
96 /*
97  * This routine remaps the kernel using large ttes
98  * All entries except locked ones will be removed from the tlb.
99  * It assumes that both the text and data segments reside in a separate
100  * 4mb virtual and physical contigous memory chunk.  This routine
101  * is only executed by the first cpu.  The remaining cpus execute
102  * sfmmu_mp_startup() instead.
103  * XXX It assumes that the start of the text segment is KERNELBASE.  It should
104  * actually be based on start.
105  */
106 void
107 sfmmu_remap_kernel(void)
108 {
109 	pfn_t	pfn;
110 	uint_t	attr;
111 	int	flags;
112 
113 	extern char end[];
114 	extern struct as kas;
115 
116 	textva = (caddr_t)(KERNELBASE & MMU_PAGEMASK4M);
117 	pfn = va_to_pfn(textva);
118 	if (pfn == PFN_INVALID)
119 		prom_panic("can't find kernel text pfn");
120 	pfn &= TTE_PFNMASK(TTE4M);
121 
122 	attr = PROC_TEXT | HAT_NOSYNC;
123 	flags = HAT_LOAD_LOCK | SFMMU_NO_TSBLOAD;
124 	sfmmu_memtte(&ktext_tte, pfn, attr, TTE4M);
125 	/*
126 	 * We set the lock bit in the tte to lock the translation in
127 	 * the tlb. Note we cannot lock Panther 32M/256M pages into the tlb.
128 	 * This note is here to make sure that no one tries to remap the
129 	 * kernel using 32M or 256M tte's on Panther cpus.
130 	 */
131 	TTE_SET_LOCKED(&ktext_tte);
132 	sfmmu_tteload(kas.a_hat, &ktext_tte, textva, NULL, flags);
133 
134 	datava = (caddr_t)((uintptr_t)end & MMU_PAGEMASK4M);
135 	pfn = va_to_pfn(datava);
136 	if (pfn == PFN_INVALID)
137 		prom_panic("can't find kernel data pfn");
138 	pfn &= TTE_PFNMASK(TTE4M);
139 
140 	attr = PROC_DATA | HAT_NOSYNC;
141 	sfmmu_memtte(&kdata_tte, pfn, attr, TTE4M);
142 	/*
143 	 * We set the lock bit in the tte to lock the translation in
144 	 * the tlb.  We also set the mod bit to avoid taking dirty bit
145 	 * traps on kernel data.
146 	 */
147 	TTE_SET_LOCKED(&kdata_tte);
148 	TTE_SET_LOFLAGS(&kdata_tte, 0, TTE_HWWR_INT);
149 	sfmmu_tteload(kas.a_hat, &kdata_tte, datava,
150 	    (struct page *)NULL, flags);
151 
152 	/*
153 	 * create bigktsb ttes if necessary.
154 	 */
155 	if (enable_bigktsb) {
156 		int i = 0;
157 		caddr_t va = ktsb_base;
158 		size_t tsbsz = ktsb_sz;
159 		tte_t tte;
160 
161 		ASSERT(va >= datava + MMU_PAGESIZE4M);
162 		ASSERT(tsbsz >= MMU_PAGESIZE4M);
163 		ASSERT(IS_P2ALIGNED(tsbsz, tsbsz));
164 		ASSERT(IS_P2ALIGNED(va, tsbsz));
165 		attr = PROC_DATA | HAT_NOSYNC;
166 		while (tsbsz != 0) {
167 			ASSERT(i < MAX_BIGKTSB_TTES);
168 			pfn = va_to_pfn(va);
169 			ASSERT(pfn != PFN_INVALID);
170 			ASSERT((pfn & ~TTE_PFNMASK(TTE4M)) == 0);
171 			sfmmu_memtte(&tte, pfn, attr, TTE4M);
172 			ASSERT(TTE_IS_MOD(&tte));
173 			/*
174 			 * No need to lock if we use physical addresses.
175 			 * Since we invalidate the kernel TSB using virtual
176 			 * addresses, it's an optimization to load them now
177 			 * so that we won't have to load them later.
178 			 */
179 			if (!ktsb_phys) {
180 				TTE_SET_LOCKED(&tte);
181 			}
182 			sfmmu_tteload(kas.a_hat, &tte, va, NULL, flags);
183 			bigktsb_ttes[i] = tte;
184 			va += MMU_PAGESIZE4M;
185 			tsbsz -= MMU_PAGESIZE4M;
186 			i++;
187 		}
188 		bigktsb_nttes = i;
189 	}
190 
191 	sfmmu_set_tlb();
192 }
193 
194 /*
195  * Unmap all references to user TSBs from the TLB of the current processor.
196  */
197 static void
198 sfmmu_clear_user_tsbs()
199 {
200 	caddr_t va;
201 	caddr_t end_va;
202 
203 	/* Demap all pages in the VA range for the first user TSB */
204 	va = utsb_vabase;
205 	end_va = va + tsb_slab_size;
206 	while (va < end_va) {
207 		vtag_flushpage(va, KCONTEXT);
208 		va += MMU_PAGESIZE;
209 	}
210 
211 	/* Demap all pages in the VA range for the second user TSB */
212 	va = utsb4m_vabase;
213 	end_va = va + tsb_slab_size;
214 	while (va < end_va) {
215 		vtag_flushpage(va, KCONTEXT);
216 		va += MMU_PAGESIZE;
217 	}
218 }
219 
220 /*
221  * Setup the kernel's locked tte's
222  */
223 void
224 sfmmu_set_tlb(void)
225 {
226 	uint_t index;
227 	struct cpu_node *cpunode;
228 
229 	cpunode = &cpunodes[getprocessorid()];
230 	index = cpunode->itlb_size;
231 
232 	/*
233 	 * NOTE: the prom will do an explicit unmap of the VAs from the TLBs
234 	 * in the following functions before loading the new value into the
235 	 * TLB.  Thus if there was an entry already in the TLB at a different
236 	 * location, it will get unmapped before we load the entry at the
237 	 * specified location.
238 	 */
239 	(void) prom_itlb_load(index - 1, *(uint64_t *)&ktext_tte, textva);
240 	index = cpunode->dtlb_size;
241 	(void) prom_dtlb_load(index - 1, *(uint64_t *)&kdata_tte, datava);
242 	(void) prom_dtlb_load(index - 2, *(uint64_t *)&ktext_tte, textva);
243 	index -= 3;
244 
245 	utsb_dtlb_ttenum = index--;
246 	utsb4m_dtlb_ttenum = index--;
247 	sfmmu_clear_user_tsbs();
248 
249 	if (!ktsb_phys && enable_bigktsb) {
250 		int i;
251 		caddr_t va = ktsb_base;
252 		uint64_t tte;
253 
254 		ASSERT(bigktsb_nttes <= MAX_BIGKTSB_TTES);
255 		for (i = 0; i < bigktsb_nttes; i++) {
256 			tte = *(uint64_t *)&bigktsb_ttes[i];
257 			(void) prom_dtlb_load(index, tte, va);
258 			va += MMU_PAGESIZE4M;
259 			index--;
260 		}
261 	}
262 
263 	dtlb_resv_ttenum = index + 1;
264 }
265 
266 /*
267  * This routine is executed by all other cpus except the first one
268  * at initialization time.  It is responsible for taking over the
269  * mmu from the prom.  We follow these steps.
270  * Lock the kernel's ttes in the TLB
271  * Initialize the tsb hardware registers
272  * Take over the trap table
273  * Flush the prom's locked entries from the TLB
274  */
275 void
276 sfmmu_mp_startup(void)
277 {
278 	sfmmu_set_tlb();
279 	setwstate(WSTATE_KERN);
280 	prom_set_traptable(&trap_table);
281 	install_va_to_tte();
282 }
283 
284 void
285 kdi_tlb_page_lock(caddr_t va, int do_dtlb)
286 {
287 	tte_t tte;
288 	pfn_t pfn = va_to_pfn(va);
289 
290 	tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) | TTE_PFN_INTHI(pfn);
291 	tte.tte_intlo = TTE_PFN_INTLO(pfn) | TTE_LCK_INT | TTE_CP_INT |
292 	    TTE_PRIV_INT | TTE_HWWR_INT;
293 
294 	vtag_flushpage(va, KCONTEXT);
295 
296 	sfmmu_itlb_ld(va, KCONTEXT, &tte);
297 	if (do_dtlb)
298 		sfmmu_dtlb_ld(va, KCONTEXT, &tte);
299 }
300 
301 /*ARGSUSED*/
302 void
303 kdi_tlb_page_unlock(caddr_t va, int do_dtlb)
304 {
305 	vtag_flushpage(va, KCONTEXT);
306 }
307 
308 /* clear user TSB information (applicable to hardware TSB walkers) */
309 void
310 sfmmu_clear_utsbinfo()
311 {
312 }
313 
314 /*ARGSUSED*/
315 void
316 sfmmu_setup_tsbinfo(sfmmu_t *sfmmup)
317 {
318 }
319 
320 /*
321  * Invalidate a TSB.  If floating point is enabled we use
322  * a fast block-store routine, otherwise we use the old method
323  * of walking the TSB setting each tag to TSBTAG_INVALID.
324  */
325 void
326 sfmmu_inv_tsb(caddr_t tsb_base, uint_t tsb_bytes)
327 {
328 	extern void sfmmu_inv_tsb_fast(caddr_t, uint_t);
329 	struct tsbe *tsbaddr;
330 
331 	/* CONSTCOND */
332 	if (fpu_exists) {
333 		sfmmu_inv_tsb_fast(tsb_base, tsb_bytes);
334 		return;
335 	}
336 
337 	for (tsbaddr = (struct tsbe *)tsb_base;
338 	    (uintptr_t)tsbaddr < (uintptr_t)(tsb_base + tsb_bytes);
339 	    tsbaddr++) {
340 		tsbaddr->tte_tag.tag_inthi = TSBTAG_INVALID;
341 	}
342 
343 	if (ktsb_phys && tsb_base == ktsb_base)
344 		dcache_flushall();
345 }
346 
347 /*
348  * Completely flush the D-cache on all cpus.
349  */
350 void
351 sfmmu_cache_flushall()
352 {
353 	int i;
354 
355 	for (i = 0; i < CACHE_NUM_COLOR; i++)
356 		sfmmu_cache_flushcolor(i, 0);
357 }
358