xref: /titanic_52/usr/src/uts/sparc/v9/vm/seg_nf.c (revision 45916cd2fec6e79bca5dee0421bd39e3c2910d1e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
29 
30 /*
31  * Portions of this source code were derived from Berkeley 4.3 BSD
32  * under license from the Regents of the University of California.
33  */
34 
35 #pragma ident	"%Z%%M%	%I%	%E% SMI"
36 
37 /*
38  * VM - segment for non-faulting loads.
39  */
40 
41 #include <sys/types.h>
42 #include <sys/t_lock.h>
43 #include <sys/param.h>
44 #include <sys/mman.h>
45 #include <sys/errno.h>
46 #include <sys/kmem.h>
47 #include <sys/cmn_err.h>
48 #include <sys/vnode.h>
49 #include <sys/proc.h>
50 #include <sys/conf.h>
51 #include <sys/debug.h>
52 #include <sys/archsystm.h>
53 #include <sys/lgrp.h>
54 
55 #include <vm/page.h>
56 #include <vm/hat.h>
57 #include <vm/as.h>
58 #include <vm/seg.h>
59 #include <vm/vpage.h>
60 
61 /*
62  * Private seg op routines.
63  */
64 static int	segnf_dup(struct seg *seg, struct seg *newseg);
65 static int	segnf_unmap(struct seg *seg, caddr_t addr, size_t len);
66 static void	segnf_free(struct seg *seg);
67 static faultcode_t segnf_nomap(void);
68 static int	segnf_setprot(struct seg *seg, caddr_t addr,
69 		    size_t len, uint_t prot);
70 static int	segnf_checkprot(struct seg *seg, caddr_t addr,
71 		    size_t len, uint_t prot);
72 static void	segnf_badop(void);
73 static int	segnf_nop(void);
74 static int	segnf_getprot(struct seg *seg, caddr_t addr,
75 		    size_t len, uint_t *protv);
76 static u_offset_t segnf_getoffset(struct seg *seg, caddr_t addr);
77 static int	segnf_gettype(struct seg *seg, caddr_t addr);
78 static int	segnf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
79 static void	segnf_dump(struct seg *seg);
80 static int	segnf_pagelock(struct seg *seg, caddr_t addr, size_t len,
81 		    struct page ***ppp, enum lock_type type, enum seg_rw rw);
82 static int	segnf_setpagesize(struct seg *seg, caddr_t addr, size_t len,
83 		    uint_t szc);
84 static int	segnf_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
85 static lgrp_mem_policy_info_t	*segnf_getpolicy(struct seg *seg,
86     caddr_t addr);
87 
88 
89 struct seg_ops segnf_ops = {
90 	segnf_dup,
91 	segnf_unmap,
92 	segnf_free,
93 	(faultcode_t (*)(struct hat *, struct seg *, caddr_t, size_t,
94 	    enum fault_type, enum seg_rw))
95 		segnf_nomap,		/* fault */
96 	(faultcode_t (*)(struct seg *, caddr_t))
97 		segnf_nomap,		/* faulta */
98 	segnf_setprot,
99 	segnf_checkprot,
100 	(int (*)())segnf_badop,		/* kluster */
101 	(size_t (*)(struct seg *))NULL,	/* swapout */
102 	(int (*)(struct seg *, caddr_t, size_t, int, uint_t))
103 		segnf_nop,		/* sync */
104 	(size_t (*)(struct seg *, caddr_t, size_t, char *))
105 		segnf_nop,		/* incore */
106 	(int (*)(struct seg *, caddr_t, size_t, int, int, ulong_t *, size_t))
107 		segnf_nop,		/* lockop */
108 	segnf_getprot,
109 	segnf_getoffset,
110 	segnf_gettype,
111 	segnf_getvp,
112 	(int (*)(struct seg *, caddr_t, size_t, uint_t))
113 		segnf_nop,		/* advise */
114 	segnf_dump,
115 	segnf_pagelock,
116 	segnf_setpagesize,
117 	segnf_getmemid,
118 	segnf_getpolicy,
119 };
120 
121 /*
122  * vnode and page for the page of zeros we use for the nf mappings.
123  */
124 static kmutex_t segnf_lock;
125 static struct vnode zvp;
126 static struct page **zpp;
127 
128 #define	addr_to_vcolor(addr)                                            \
129 	(shm_alignment) ?						\
130 	((int)(((uintptr_t)(addr) & (shm_alignment - 1)) >> PAGESHIFT)) : 0
131 
132 /*
133  * We try to limit the number of Non-fault segments created.
134  * Non fault segments are created to optimize sparc V9 code which uses
135  * the sparc nonfaulting load ASI (ASI_PRIMARY_NOFAULT).
136  *
137  * There are several reasons why creating too many non-fault segments
138  * could cause problems.
139  *
140  * 	First, excessive allocation of kernel resources for the seg
141  *	structures and the HAT data to map the zero pages.
142  *
143  * 	Secondly, creating nofault segments actually uses up user virtual
144  * 	address space. This makes it unavailable for subsequent mmap(0, ...)
145  *	calls which use as_gap() to find empty va regions.  Creation of too
146  *	many nofault segments could thus interfere with the ability of the
147  *	runtime linker to load a shared object.
148  */
149 #define	MAXSEGFORNF	(10000)
150 #define	MAXNFSEARCH	(5)
151 
152 
153 /*
154  * Must be called from startup()
155  */
156 void
157 segnf_init()
158 {
159 	mutex_init(&segnf_lock, NULL, MUTEX_DEFAULT, NULL);
160 }
161 
162 
163 /*
164  * Create a no-fault segment.
165  *
166  * The no-fault segment is not technically necessary, as the code in
167  * nfload() in trap.c will emulate the SPARC instruction and load
168  * a value of zero in the destination register.
169  *
170  * However, this code tries to put a page of zero's at the nofault address
171  * so that subsequent non-faulting loads to the same page will not
172  * trap with a tlb miss.
173  *
174  * In order to help limit the number of segments we merge adjacent nofault
175  * segments into a single segment.  If we get a large number of segments
176  * we'll also try to delete a random other nf segment.
177  */
178 /* ARGSUSED */
179 int
180 segnf_create(struct seg *seg, void *argsp)
181 {
182 	uint_t prot;
183 	pgcnt_t	vacpgs;
184 	u_offset_t off = 0;
185 	caddr_t	vaddr = NULL;
186 	int i, color;
187 	struct seg *s1;
188 	struct seg *s2;
189 	size_t size;
190 	struct as *as = seg->s_as;
191 
192 	ASSERT(as && AS_WRITE_HELD(as, &as->a_lock));
193 
194 	/*
195 	 * Need a page per virtual color or just 1 if no vac.
196 	 */
197 	mutex_enter(&segnf_lock);
198 	if (zpp == NULL) {
199 		struct seg kseg;
200 
201 		vacpgs = 1;
202 		if (shm_alignment > PAGESIZE) {
203 			vacpgs = shm_alignment >> PAGESHIFT;
204 		}
205 
206 		zpp = kmem_alloc(sizeof (*zpp) * vacpgs, KM_SLEEP);
207 
208 		kseg.s_as = &kas;
209 		for (i = 0; i < vacpgs; i++, off += PAGESIZE,
210 		    vaddr += PAGESIZE) {
211 			zpp[i] = page_create_va(&zvp, off, PAGESIZE,
212 			    PG_WAIT | PG_NORELOC, &kseg, vaddr);
213 			page_io_unlock(zpp[i]);
214 			page_downgrade(zpp[i]);
215 			pagezero(zpp[i], 0, PAGESIZE);
216 		}
217 	}
218 	mutex_exit(&segnf_lock);
219 
220 	hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
221 
222 	/*
223 	 * s_data can't be NULL because of ASSERTS in the common vm code.
224 	 */
225 	seg->s_ops = &segnf_ops;
226 	seg->s_data = seg;
227 	seg->s_flags |= S_PURGE;
228 
229 	mutex_enter(&as->a_contents);
230 	as->a_flags |= AS_NEEDSPURGE;
231 	mutex_exit(&as->a_contents);
232 
233 	prot = PROT_READ;
234 	color = addr_to_vcolor(seg->s_base);
235 	if (as != &kas)
236 		prot |= PROT_USER;
237 	hat_memload(as->a_hat, seg->s_base, zpp[color],
238 	    prot | HAT_NOFAULT, HAT_LOAD);
239 
240 	/*
241 	 * At this point see if we can concatenate a segment to
242 	 * a non-fault segment immediately before and/or after it.
243 	 */
244 	if ((s1 = AS_SEGPREV(as, seg)) != NULL &&
245 	    s1->s_ops == &segnf_ops &&
246 	    s1->s_base + s1->s_size == seg->s_base) {
247 		size = s1->s_size;
248 		seg_free(s1);
249 		seg->s_base -= size;
250 		seg->s_size += size;
251 	}
252 
253 	if ((s2 = AS_SEGNEXT(as, seg)) != NULL &&
254 	    s2->s_ops == &segnf_ops &&
255 	    seg->s_base + seg->s_size == s2->s_base) {
256 		size = s2->s_size;
257 		seg_free(s2);
258 		seg->s_size += size;
259 	}
260 
261 	/*
262 	 * if we already have a lot of segments, try to delete some other
263 	 * nofault segment to reduce the probability of uncontrolled segment
264 	 * creation.
265 	 *
266 	 * the code looks around quickly (no more than MAXNFSEARCH segments
267 	 * each way) for another NF segment and then deletes it.
268 	 */
269 	if (avl_numnodes(&as->a_segtree) > MAXSEGFORNF) {
270 		size = 0;
271 		s2 = NULL;
272 		s1 = AS_SEGPREV(as, seg);
273 		while (size++ < MAXNFSEARCH && s1 != NULL) {
274 			if (s1->s_ops == &segnf_ops)
275 				s2 = s1;
276 			s1 = AS_SEGPREV(s1->s_as, seg);
277 		}
278 		if (s2 == NULL) {
279 			s1 = AS_SEGNEXT(as, seg);
280 			while (size-- > 0 && s1 != NULL) {
281 				if (s1->s_ops == &segnf_ops)
282 					s2 = s1;
283 				s1 = AS_SEGNEXT(as, seg);
284 			}
285 		}
286 		if (s2 != NULL)
287 			seg_unmap(s2);
288 	}
289 
290 	return (0);
291 }
292 
293 /*
294  * Never really need "No fault" segments, so they aren't dup'd.
295  */
296 /* ARGSUSED */
297 static int
298 segnf_dup(struct seg *seg, struct seg *newseg)
299 {
300 	panic("segnf_dup");
301 	return (0);
302 }
303 
304 /*
305  * Split a segment at addr for length len.
306  */
307 static int
308 segnf_unmap(struct seg *seg, caddr_t addr, size_t len)
309 {
310 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
311 
312 	/*
313 	 * Check for bad sizes.
314 	 */
315 	if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
316 	    (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
317 		cmn_err(CE_PANIC, "segnf_unmap: bad unmap size");
318 	}
319 
320 	/*
321 	 * Unload any hardware translations in the range to be taken out.
322 	 */
323 	hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
324 
325 	if (addr == seg->s_base && len == seg->s_size) {
326 		/*
327 		 * Freeing entire segment.
328 		 */
329 		seg_free(seg);
330 	} else if (addr == seg->s_base) {
331 		/*
332 		 * Freeing the beginning of the segment.
333 		 */
334 		seg->s_base += len;
335 		seg->s_size -= len;
336 	} else if (addr + len == seg->s_base + seg->s_size) {
337 		/*
338 		 * Freeing the end of the segment.
339 		 */
340 		seg->s_size -= len;
341 	} else {
342 		/*
343 		 * The section to go is in the middle of the segment, so we
344 		 * have to cut it into two segments.  We shrink the existing
345 		 * "seg" at the low end, and create "nseg" for the high end.
346 		 */
347 		caddr_t nbase = addr + len;
348 		size_t nsize = (seg->s_base + seg->s_size) - nbase;
349 		struct seg *nseg;
350 
351 		/*
352 		 * Trim down "seg" before trying to stick "nseg" into the as.
353 		 */
354 		seg->s_size = addr - seg->s_base;
355 		nseg = seg_alloc(seg->s_as, nbase, nsize);
356 		if (nseg == NULL)
357 			cmn_err(CE_PANIC, "segnf_unmap: seg_alloc failed");
358 
359 		/*
360 		 * s_data can't be NULL because of ASSERTs in common VM code.
361 		 */
362 		nseg->s_ops = seg->s_ops;
363 		nseg->s_data = nseg;
364 		nseg->s_flags |= S_PURGE;
365 		mutex_enter(&seg->s_as->a_contents);
366 		seg->s_as->a_flags |= AS_NEEDSPURGE;
367 		mutex_exit(&seg->s_as->a_contents);
368 	}
369 
370 	return (0);
371 }
372 
373 /*
374  * Free a segment.
375  */
376 static void
377 segnf_free(struct seg *seg)
378 {
379 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
380 }
381 
382 /*
383  * No faults allowed on segnf.
384  */
385 static faultcode_t
386 segnf_nomap(void)
387 {
388 	return (FC_NOMAP);
389 }
390 
391 /* ARGSUSED */
392 static int
393 segnf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
394 {
395 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
396 	return (EACCES);
397 }
398 
399 /* ARGSUSED */
400 static int
401 segnf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
402 {
403 	uint_t sprot;
404 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
405 
406 	sprot = seg->s_as == &kas ?  PROT_READ : PROT_READ|PROT_USER;
407 	return ((prot & sprot) == prot ? 0 : EACCES);
408 }
409 
410 static void
411 segnf_badop(void)
412 {
413 	panic("segnf_badop");
414 	/*NOTREACHED*/
415 }
416 
417 static int
418 segnf_nop(void)
419 {
420 	return (0);
421 }
422 
423 static int
424 segnf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
425 {
426 	size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
427 	size_t p;
428 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
429 
430 	for (p = 0; p < pgno; ++p)
431 		protv[p] = PROT_READ;
432 	return (0);
433 }
434 
435 /* ARGSUSED */
436 static u_offset_t
437 segnf_getoffset(struct seg *seg, caddr_t addr)
438 {
439 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
440 
441 	return ((u_offset_t)0);
442 }
443 
444 /* ARGSUSED */
445 static int
446 segnf_gettype(struct seg *seg, caddr_t addr)
447 {
448 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
449 
450 	return (MAP_SHARED);
451 }
452 
453 /* ARGSUSED */
454 static int
455 segnf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
456 {
457 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
458 
459 	*vpp = &zvp;
460 	return (0);
461 }
462 
463 /*
464  * segnf pages are not dumped, so we just return
465  */
466 /* ARGSUSED */
467 static void
468 segnf_dump(struct seg *seg)
469 {}
470 
471 /*ARGSUSED*/
472 static int
473 segnf_pagelock(struct seg *seg, caddr_t addr, size_t len,
474     struct page ***ppp, enum lock_type type, enum seg_rw rw)
475 {
476 	return (ENOTSUP);
477 }
478 
479 /*ARGSUSED*/
480 static int
481 segnf_setpagesize(struct seg *seg, caddr_t addr, size_t len,
482     uint_t szc)
483 {
484 	return (ENOTSUP);
485 }
486 
487 /*ARGSUSED*/
488 static int
489 segnf_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
490 {
491 	return (ENODEV);
492 }
493 
494 /*ARGSUSED*/
495 static lgrp_mem_policy_info_t *
496 segnf_getpolicy(struct seg *seg, caddr_t addr)
497 {
498 	return (NULL);
499 }
500