xref: /titanic_51/usr/src/uts/common/os/bp_map.c (revision 381a2a9a387f449fab7d0c7e97c4184c26963abf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/sysmacros.h>
30 #include <sys/systm.h>
31 #include <sys/mman.h>
32 #include <sys/buf.h>
33 #include <sys/vmem.h>
34 #include <sys/cmn_err.h>
35 #include <sys/debug.h>
36 #include <sys/machparam.h>
37 #include <vm/page.h>
38 #include <vm/seg_kmem.h>
39 #include <vm/seg_kpm.h>
40 
41 #ifdef __sparc
42 #include <sys/cpu_module.h>
43 #define	BP_FLUSH(addr, size)	flush_instr_mem((void *)addr, size);
44 #else
45 #define	BP_FLUSH(addr, size)
46 #endif
47 
48 static vmem_t *bp_map_arena;
49 static size_t bp_align;
50 static uint_t bp_devload_flags = PROT_READ | PROT_WRITE | HAT_NOSYNC;
51 int	bp_max_cache = 1 << 17;		/* 128K default; tunable */
52 int	bp_mapin_kpm_enable = 1;	/* enable default; tunable */
53 
54 static void *
55 bp_vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
56 {
57 	return (vmem_xalloc(vmp, size, bp_align, 0, 0, NULL, NULL, vmflag));
58 }
59 
60 void
61 bp_init(size_t align, uint_t devload_flags)
62 {
63 	bp_align = MAX(align, PAGESIZE);
64 	bp_devload_flags |= devload_flags;
65 
66 	if (bp_align <= bp_max_cache)
67 		bp_map_arena = vmem_create("bp_map", NULL, 0, bp_align,
68 		    bp_vmem_alloc, vmem_free, heap_arena,
69 		    MIN(8 * bp_align, bp_max_cache), VM_SLEEP);
70 }
71 
72 /*
73  * common routine so can be called with/without VM_SLEEP
74  */
75 void *
76 bp_mapin_common(struct buf *bp, int flag)
77 {
78 	struct as	*as;
79 	pfn_t		pfnum;
80 	page_t		*pp;
81 	page_t		**pplist;
82 	caddr_t		kaddr;
83 	caddr_t		addr;
84 	uintptr_t	off;
85 	size_t		size;
86 	pgcnt_t		npages;
87 	int		color;
88 
89 	/* return if already mapped in, no pageio/physio, or physio to kas */
90 	if ((bp->b_flags & B_REMAPPED) ||
91 	    !(bp->b_flags & (B_PAGEIO | B_PHYS)) ||
92 	    (((bp->b_flags & (B_PAGEIO | B_PHYS)) == B_PHYS) &&
93 	    ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas))))
94 		return (bp->b_un.b_addr);
95 
96 	ASSERT((bp->b_flags & (B_PAGEIO | B_PHYS)) != (B_PAGEIO | B_PHYS));
97 
98 	addr = (caddr_t)bp->b_un.b_addr;
99 	off = (uintptr_t)addr & PAGEOFFSET;
100 	size = P2ROUNDUP(bp->b_bcount + off, PAGESIZE);
101 	npages = btop(size);
102 
103 	/* Fastpath single page IO to locked memory by using kpm. */
104 	if ((bp->b_flags & (B_SHADOW | B_PAGEIO)) && (npages == 1) &&
105 	    kpm_enable && bp_mapin_kpm_enable) {
106 		if (bp->b_flags & B_SHADOW)
107 			pp = *bp->b_shadow;
108 		else
109 			pp = bp->b_pages;
110 		kaddr = hat_kpm_mapin(pp, NULL);
111 		bp->b_un.b_addr = kaddr + off;
112 		bp->b_flags |= B_REMAPPED;
113 		return (bp->b_un.b_addr);
114 	}
115 
116 	/*
117 	 * Allocate kernel virtual space for remapping.
118 	 */
119 	color = bp_color(bp);
120 	ASSERT(color < bp_align);
121 
122 	if (bp_map_arena != NULL) {
123 		kaddr = (caddr_t)vmem_alloc(bp_map_arena,
124 		    P2ROUNDUP(color + size, bp_align), flag);
125 		if (kaddr == NULL)
126 			return (NULL);
127 		kaddr += color;
128 	} else {
129 		kaddr = vmem_xalloc(heap_arena, size, bp_align, color,
130 		    0, NULL, NULL, flag);
131 		if (kaddr == NULL)
132 			return (NULL);
133 	}
134 
135 	ASSERT(P2PHASE((uintptr_t)kaddr, bp_align) == color);
136 
137 	/*
138 	 * Map bp into the virtual space we just allocated.
139 	 */
140 	if (bp->b_flags & B_PAGEIO) {
141 		pp = bp->b_pages;
142 		pplist = NULL;
143 	} else if (bp->b_flags & B_SHADOW) {
144 		pp = NULL;
145 		pplist = bp->b_shadow;
146 	} else {
147 		pp = NULL;
148 		pplist = NULL;
149 		if (bp->b_proc == NULL || (as = bp->b_proc->p_as) == NULL)
150 			as = &kas;
151 	}
152 
153 	bp->b_flags |= B_REMAPPED;
154 	bp->b_un.b_addr = kaddr + off;
155 
156 	while (npages-- != 0) {
157 		if (pp) {
158 			pfnum = pp->p_pagenum;
159 			pp = pp->p_next;
160 		} else if (pplist == NULL) {
161 			pfnum = hat_getpfnum(as->a_hat,
162 			    (caddr_t)((uintptr_t)addr & MMU_PAGEMASK));
163 			if (pfnum == PFN_INVALID)
164 				panic("bp_mapin_common: hat_getpfnum for"
165 				    " addr %p failed\n", (void *)addr);
166 			addr += PAGESIZE;
167 		} else {
168 			pfnum = (*pplist)->p_pagenum;
169 			pplist++;
170 		}
171 
172 		hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum,
173 		    bp_devload_flags, HAT_LOAD_LOCK);
174 
175 		kaddr += PAGESIZE;
176 	}
177 	return (bp->b_un.b_addr);
178 }
179 
180 /*
181  * Convert bp for pageio/physio to a kernel addressable location.
182  */
183 void
184 bp_mapin(struct buf *bp)
185 {
186 	(void) bp_mapin_common(bp, VM_SLEEP);
187 }
188 
189 /*
190  * Release all the resources associated with a previous bp_mapin() call.
191  */
192 void
193 bp_mapout(struct buf *bp)
194 {
195 	caddr_t		addr;
196 	uintptr_t	off;
197 	uintptr_t	base;
198 	uintptr_t	color;
199 	size_t		size;
200 	pgcnt_t		npages;
201 	page_t		*pp;
202 
203 	if ((bp->b_flags & B_REMAPPED) == 0)
204 		return;
205 
206 	addr = bp->b_un.b_addr;
207 	off = (uintptr_t)addr & PAGEOFFSET;
208 	size = P2ROUNDUP(bp->b_bcount + off, PAGESIZE);
209 	npages = btop(size);
210 
211 	bp->b_un.b_addr = (caddr_t)off;		/* debugging aid */
212 
213 	if ((bp->b_flags & (B_SHADOW | B_PAGEIO)) && (npages == 1) &&
214 	    kpm_enable && bp_mapin_kpm_enable) {
215 		if (bp->b_flags & B_SHADOW)
216 			pp = *bp->b_shadow;
217 		else
218 			pp = bp->b_pages;
219 		addr = (caddr_t)((uintptr_t)addr & MMU_PAGEMASK);
220 		hat_kpm_mapout(pp, NULL, addr);
221 		bp->b_flags &= ~B_REMAPPED;
222 		return;
223 	}
224 
225 	base = (uintptr_t)addr & MMU_PAGEMASK;
226 	BP_FLUSH(base, size);
227 	hat_unload(kas.a_hat, (void *)base, size,
228 	    HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
229 	if (bp_map_arena != NULL) {
230 		color = P2PHASE(base, bp_align);
231 		vmem_free(bp_map_arena, (void *)(base - color),
232 		    P2ROUNDUP(color + size, bp_align));
233 	} else
234 		vmem_free(heap_arena, (void *)base, size);
235 	bp->b_flags &= ~B_REMAPPED;
236 }
237