xref: /titanic_41/usr/src/uts/common/vm/seg_kpm.c (revision 93fb2a5ff9019dc98ff5e9836d0c2c7b5c5ecd7f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Kernel Physical Mapping (kpm) segment driver (segkpm).
29  *
30  * This driver delivers along with the hat_kpm* interfaces an alternative
31  * mechanism for kernel mappings within the 64-bit Solaris operating system,
32  * which allows the mapping of all physical memory into the kernel address
33  * space at once. This is feasible in 64 bit kernels, e.g. for Ultrasparc II
34  * and beyond processors, since the available VA range is much larger than
35  * possible physical memory. Momentarily all physical memory is supported,
36  * that is represented by the list of memory segments (memsegs).
37  *
38  * Segkpm mappings have also very low overhead and large pages are used
39  * (when possible) to minimize the TLB and TSB footprint. It is also
40  * extentable for other than Sparc architectures (e.g. AMD64). Main
41  * advantage is the avoidance of the TLB-shootdown X-calls, which are
42  * normally needed when a kernel (global) mapping has to be removed.
43  *
44  * First example of a kernel facility that uses the segkpm mapping scheme
45  * is seg_map, where it is used as an alternative to hat_memload().
46  * See also hat layer for more information about the hat_kpm* routines.
47  * The kpm facilty can be turned off at boot time (e.g. /etc/system).
48  */
49 
50 #include <sys/types.h>
51 #include <sys/param.h>
52 #include <sys/sysmacros.h>
53 #include <sys/systm.h>
54 #include <sys/vnode.h>
55 #include <sys/cmn_err.h>
56 #include <sys/debug.h>
57 #include <sys/thread.h>
58 #include <sys/cpuvar.h>
59 #include <sys/bitmap.h>
60 #include <sys/atomic.h>
61 #include <sys/lgrp.h>
62 
63 #include <vm/seg_kmem.h>
64 #include <vm/seg_kpm.h>
65 #include <vm/hat.h>
66 #include <vm/as.h>
67 #include <vm/seg.h>
68 #include <vm/page.h>
69 
70 /*
71  * Global kpm controls.
72  * See also platform and mmu specific controls.
73  *
74  * kpm_enable -- global on/off switch for segkpm.
75  * . Set by default on 64bit platforms that have kpm support.
76  * . Will be disabled from platform layer if not supported.
77  * . Can be disabled via /etc/system.
78  *
79  * kpm_smallpages -- use only regular/system pagesize for kpm mappings.
80  * . Can be useful for critical debugging of kpm clients.
81  * . Set to zero by default for platforms that support kpm large pages.
82  *   The use of kpm large pages reduces the footprint of kpm meta data
83  *   and has all the other advantages of using large pages (e.g TLB
84  *   miss reduction).
85  * . Set by default for platforms that don't support kpm large pages or
86  *   where large pages cannot be used for other reasons (e.g. there are
87  *   only few full associative TLB entries available for large pages).
88  *
89  * segmap_kpm -- separate on/off switch for segmap using segkpm:
90  * . Set by default.
91  * . Will be disabled when kpm_enable is zero.
92  * . Will be disabled when MAXBSIZE != PAGESIZE.
93  * . Can be disabled via /etc/system.
94  *
95  */
96 int kpm_enable = 1;
97 int kpm_smallpages = 0;
98 int segmap_kpm = 1;
99 
100 /*
101  * Private seg op routines.
102  */
103 faultcode_t segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr,
104 			size_t len, enum fault_type type, enum seg_rw rw);
105 static void	segkpm_dump(struct seg *);
106 static void	segkpm_badop(void);
107 static int	segkpm_notsup(void);
108 static int	segkpm_capable(struct seg *, segcapability_t);
109 
110 #define	SEGKPM_BADOP(t)	(t(*)())segkpm_badop
111 #define	SEGKPM_NOTSUP	(int(*)())segkpm_notsup
112 
113 static struct seg_ops segkpm_ops = {
114 	SEGKPM_BADOP(int),	/* dup */
115 	SEGKPM_BADOP(int),	/* unmap */
116 	SEGKPM_BADOP(void),	/* free */
117 	segkpm_fault,
118 	SEGKPM_BADOP(int),	/* faulta */
119 	SEGKPM_BADOP(int),	/* setprot */
120 	SEGKPM_BADOP(int),	/* checkprot */
121 	SEGKPM_BADOP(int),	/* kluster */
122 	SEGKPM_BADOP(size_t),	/* swapout */
123 	SEGKPM_BADOP(int),	/* sync */
124 	SEGKPM_BADOP(size_t),	/* incore */
125 	SEGKPM_BADOP(int),	/* lockop */
126 	SEGKPM_BADOP(int),	/* getprot */
127 	SEGKPM_BADOP(u_offset_t), /* getoffset */
128 	SEGKPM_BADOP(int),	/* gettype */
129 	SEGKPM_BADOP(int),	/* getvp */
130 	SEGKPM_BADOP(int),	/* advise */
131 	segkpm_dump,		/* dump */
132 	SEGKPM_NOTSUP,		/* pagelock */
133 	SEGKPM_BADOP(int),	/* setpgsz */
134 	SEGKPM_BADOP(int),	/* getmemid */
135 	SEGKPM_BADOP(lgrp_mem_policy_info_t *),	/* getpolicy */
136 	segkpm_capable,		/* capable */
137 	seg_inherit_notsup	/* inherit */
138 };
139 
140 /*
141  * kpm_pgsz and kpm_pgshft are set by platform layer.
142  */
143 size_t		kpm_pgsz;	/* kpm page size */
144 uint_t		kpm_pgshft;	/* kpm page shift */
145 u_offset_t	kpm_pgoff;	/* kpm page offset mask */
146 uint_t		kpmp2pshft;	/* kpm page to page shift */
147 pgcnt_t		kpmpnpgs;	/* how many pages per kpm page */
148 
149 
150 #ifdef	SEGKPM_SUPPORT
151 
152 int
segkpm_create(struct seg * seg,void * argsp)153 segkpm_create(struct seg *seg, void *argsp)
154 {
155 	struct segkpm_data *skd;
156 	struct segkpm_crargs *b = (struct segkpm_crargs *)argsp;
157 	ushort_t *p;
158 	int i, j;
159 
160 	ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
161 	ASSERT(btokpmp(seg->s_size) >= 1 &&
162 	    kpmpageoff((uintptr_t)seg->s_base) == 0 &&
163 	    kpmpageoff((uintptr_t)seg->s_base + seg->s_size) == 0);
164 
165 	skd = kmem_zalloc(sizeof (struct segkpm_data), KM_SLEEP);
166 
167 	seg->s_data = (void *)skd;
168 	seg->s_ops = &segkpm_ops;
169 	skd->skd_prot = b->prot;
170 
171 	/*
172 	 * (1) Segkpm virtual addresses are based on physical adresses.
173 	 * From this and in opposite to other segment drivers it is
174 	 * often required to allocate a page first to be able to
175 	 * calculate the final segkpm virtual address.
176 	 * (2) Page  allocation is done by calling page_create_va(),
177 	 * one important input argument is a virtual address (also
178 	 * expressed by the "va" in the function name). This function
179 	 * is highly optimized to select the right page for an optimal
180 	 * processor and platform support (e.g. virtual addressed
181 	 * caches (VAC), physical addressed caches, NUMA).
182 	 *
183 	 * Because of (1) the approach is to generate a faked virtual
184 	 * address for calling page_create_va(). In order to exploit
185 	 * the abilities of (2), especially to utilize the cache
186 	 * hierarchy (3) and to avoid VAC alias conflicts (4) the
187 	 * selection has to be done carefully. For each virtual color
188 	 * a separate counter is provided (4). The count values are
189 	 * used for the utilization of all cache lines (3) and are
190 	 * corresponding to the cache bins.
191 	 */
192 	skd->skd_nvcolors = b->nvcolors;
193 
194 	p = skd->skd_va_select =
195 	    kmem_zalloc(NCPU * b->nvcolors * sizeof (ushort_t), KM_SLEEP);
196 
197 	for (i = 0; i < NCPU; i++)
198 		for (j = 0; j < b->nvcolors; j++, p++)
199 			*p = j;
200 
201 	return (0);
202 }
203 
204 /*
205  * This routine is called via a machine specific fault handling
206  * routine.
207  */
208 /* ARGSUSED */
209 faultcode_t
segkpm_fault(struct hat * hat,struct seg * seg,caddr_t addr,size_t len,enum fault_type type,enum seg_rw rw)210 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
211 	enum fault_type type, enum seg_rw rw)
212 {
213 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
214 
215 	switch (type) {
216 	case F_INVAL:
217 		return (hat_kpm_fault(hat, addr));
218 	case F_SOFTLOCK:
219 	case F_SOFTUNLOCK:
220 		return (0);
221 	default:
222 		return (FC_NOSUPPORT);
223 	}
224 	/*NOTREACHED*/
225 }
226 
227 #define	addr_to_vcolor(addr, vcolors) \
228 	((int)(((uintptr_t)(addr) & ((vcolors << PAGESHIFT) - 1)) >> PAGESHIFT))
229 
230 /*
231  * Create a virtual address that can be used for invocations of
232  * page_create_va. Goal is to utilize the cache hierarchy (round
233  * robin bins) and to select the right color for virtual indexed
234  * caches. It isn't exact since we also increment the bin counter
235  * when the caller uses VOP_GETPAGE and gets a hit in the page
236  * cache, but we keep the bins turning for cache distribution
237  * (see also segkpm_create block comment).
238  */
239 caddr_t
segkpm_create_va(u_offset_t off)240 segkpm_create_va(u_offset_t off)
241 {
242 	int vcolor;
243 	ushort_t *p;
244 	struct segkpm_data *skd = (struct segkpm_data *)segkpm->s_data;
245 	int nvcolors = skd->skd_nvcolors;
246 	caddr_t	va;
247 
248 	vcolor = (nvcolors > 1) ? addr_to_vcolor(off, nvcolors) : 0;
249 	p = &skd->skd_va_select[(CPU->cpu_id * nvcolors) + vcolor];
250 	va = (caddr_t)ptob(*p);
251 
252 	atomic_add_16(p, nvcolors);
253 
254 	return (va);
255 }
256 
257 /*
258  * Unload mapping if the instance has an active kpm mapping.
259  */
260 void
segkpm_mapout_validkpme(struct kpme * kpme)261 segkpm_mapout_validkpme(struct kpme *kpme)
262 {
263 	caddr_t vaddr;
264 	page_t *pp;
265 
266 retry:
267 	if ((pp = kpme->kpe_page) == NULL) {
268 		return;
269 	}
270 
271 	if (page_lock(pp, SE_SHARED, (kmutex_t *)NULL, P_RECLAIM) == 0)
272 		goto retry;
273 
274 	/*
275 	 * Check if segkpm mapping is not unloaded in the meantime
276 	 */
277 	if (kpme->kpe_page == NULL) {
278 		page_unlock(pp);
279 		return;
280 	}
281 
282 	vaddr = hat_kpm_page2va(pp, 1);
283 	hat_kpm_mapout(pp, kpme, vaddr);
284 	page_unlock(pp);
285 }
286 
287 static void
segkpm_badop()288 segkpm_badop()
289 {
290 	panic("segkpm_badop");
291 }
292 
293 #else	/* SEGKPM_SUPPORT */
294 
295 /* segkpm stubs */
296 
297 /*ARGSUSED*/
segkpm_create(struct seg * seg,void * argsp)298 int segkpm_create(struct seg *seg, void *argsp) { return (0); }
299 
300 /* ARGSUSED */
301 faultcode_t
segkpm_fault(struct hat * hat,struct seg * seg,caddr_t addr,size_t len,enum fault_type type,enum seg_rw rw)302 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
303 	enum fault_type type, enum seg_rw rw)
304 {
305 	return ((faultcode_t)0);
306 }
307 
308 /* ARGSUSED */
segkpm_create_va(u_offset_t off)309 caddr_t segkpm_create_va(u_offset_t off) { return (NULL); }
310 
311 /* ARGSUSED */
segkpm_mapout_validkpme(struct kpme * kpme)312 void segkpm_mapout_validkpme(struct kpme *kpme) {}
313 
314 static void
segkpm_badop()315 segkpm_badop() {}
316 
317 #endif	/* SEGKPM_SUPPORT */
318 
319 static int
segkpm_notsup()320 segkpm_notsup()
321 {
322 	return (ENOTSUP);
323 }
324 
325 /*
326  * segkpm pages are not dumped, so we just return
327  */
328 /*ARGSUSED*/
329 static void
segkpm_dump(struct seg * seg)330 segkpm_dump(struct seg *seg)
331 {}
332 
333 /*
334  * We claim to have no special capabilities.
335  */
336 /*ARGSUSED*/
337 static int
segkpm_capable(struct seg * seg,segcapability_t capability)338 segkpm_capable(struct seg *seg, segcapability_t capability)
339 {
340 	return (0);
341 }
342