xref: /illumos-gate/usr/src/uts/i86pc/io/gfx_private/gfxp_vm.c (revision 20a7641f9918de8574b8b3b47dbe35c4bfc78df1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright 2018 Joyent, Inc.
27  */
28 
29 #include <sys/debug.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/time.h>
33 #include <sys/buf.h>
34 #include <sys/errno.h>
35 #include <sys/systm.h>
36 #include <sys/conf.h>
37 #include <sys/signal.h>
38 #include <sys/file.h>
39 #include <sys/uio.h>
40 #include <sys/ioctl.h>
41 #include <sys/map.h>
42 #include <sys/proc.h>
43 #include <sys/user.h>
44 #include <sys/mman.h>
45 #include <sys/cred.h>
46 #include <sys/open.h>
47 #include <sys/stat.h>
48 #include <sys/utsname.h>
49 #include <sys/kmem.h>
50 #include <sys/cmn_err.h>
51 #include <sys/vnode.h>
52 #include <vm/page.h>
53 #include <vm/as.h>
54 #include <vm/hat.h>
55 #include <vm/seg.h>
56 #include <vm/seg_kmem.h>
57 #include <vm/hat_i86.h>
58 #include <sys/vmsystm.h>
59 #include <sys/ddi.h>
60 #include <sys/devops.h>
61 #include <sys/sunddi.h>
62 #include <sys/ddi_impldefs.h>
63 #include <sys/fs/snode.h>
64 #include <sys/pci.h>
65 #include <sys/modctl.h>
66 #include <sys/uio.h>
67 #include <sys/visual_io.h>
68 #include <sys/fbio.h>
69 #include <sys/ddidmareq.h>
70 #include <sys/kstat.h>
71 #include <sys/callb.h>
72 #include <sys/promif.h>
73 #include <sys/atomic.h>
74 #include <sys/gfx_private.h>
75 
76 #ifdef __xpv
77 #include <sys/hypervisor.h>
78 #endif
79 
80 /*
81  * Create a kva mapping for a pa (start..start+size) with
82  * the specified cache attributes (mode).
83  */
84 gfxp_kva_t
85 gfxp_map_kernel_space(uint64_t start, size_t size, uint32_t mode)
86 {
87 	uint_t pgoffset;
88 	uint64_t base;
89 	pgcnt_t npages;
90 	caddr_t cvaddr;
91 	int hat_flags;
92 	uint_t hat_attr;
93 	pfn_t pfn;
94 
95 	if (size == 0)
96 		return (0);
97 
98 #ifdef __xpv
99 	/*
100 	 * The hypervisor doesn't allow r/w mappings to some pages, such as
101 	 * page tables, gdt, etc. Detect %cr3 to notify users of this interface.
102 	 */
103 	if (start == mmu_ptob(mmu_btop(getcr3_pa())))
104 		return (0);
105 #endif
106 
107 	if (mode == GFXP_MEMORY_CACHED)
108 		hat_attr = HAT_STORECACHING_OK;
109 	else if (mode == GFXP_MEMORY_WRITECOMBINED)
110 		hat_attr = HAT_MERGING_OK | HAT_PLAT_NOCACHE;
111 	else	/* GFXP_MEMORY_UNCACHED */
112 		hat_attr = HAT_STRICTORDER | HAT_PLAT_NOCACHE;
113 	hat_flags = HAT_LOAD_LOCK;
114 	pgoffset = start & PAGEOFFSET;
115 	base = start - pgoffset;
116 	npages = btopr(size + pgoffset);
117 	cvaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP);
118 	if (cvaddr == NULL)
119 		return (NULL);
120 
121 #ifdef __xpv
122 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
123 	pfn = xen_assign_pfn(mmu_btop(base));
124 #else
125 	pfn = btop(base);
126 #endif
127 
128 	hat_devload(kas.a_hat, cvaddr, ptob(npages), pfn,
129 	    PROT_READ|PROT_WRITE|hat_attr, hat_flags);
130 	return (cvaddr + pgoffset);
131 }
132 
133 /*
134  * Destroy the mapping created by gfxp_map_kernel_space().
135  * Physical memory is not reclaimed.
136  */
137 void
138 gfxp_unmap_kernel_space(gfxp_kva_t address, size_t size)
139 {
140 	uint_t pgoffset;
141 	caddr_t base;
142 	pgcnt_t npages;
143 
144 	if (size == 0 || address == NULL)
145 		return;
146 
147 	pgoffset = (uintptr_t)address & PAGEOFFSET;
148 	base = (caddr_t)address - pgoffset;
149 	npages = btopr(size + pgoffset);
150 	hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
151 	vmem_free(heap_arena, base, ptob(npages));
152 }
153 
154 /*
155  * For a VA return the pfn
156  */
157 int
158 gfxp_va2pa(struct as *as, caddr_t addr, uint64_t *pa)
159 {
160 #ifdef __xpv
161 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
162 	*pa = pa_to_ma(pfn_to_pa(hat_getpfnum(as->a_hat, addr)));
163 #else
164 	*pa = pfn_to_pa(hat_getpfnum(as->a_hat, addr));
165 #endif
166 	return (0);
167 }
168 
169 /*
170  * NOP now
171  */
172 /* ARGSUSED */
173 void
174 gfxp_fix_mem_cache_attrs(caddr_t kva_start, size_t length, int cache_attr)
175 {
176 }
177 
178 int
179 gfxp_ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
180     ddi_device_acc_attr_t  *accattrp, uint_t flags, int (*waitfp) (caddr_t),
181     caddr_t arg, caddr_t *kaddrp, size_t *real_length,
182     ddi_acc_handle_t *handlep)
183 {
184 	uint_t l_flags = flags & ~IOMEM_DATA_MASK; /* clear cache attrs */
185 	int e;
186 
187 	/*
188 	 * Set an appropriate attribute from devacc_attr_dataorder
189 	 * to keep compatibility. The cache attributes are igonred
190 	 * if specified.
191 	 */
192 	if (accattrp != NULL) {
193 		if (accattrp->devacc_attr_dataorder == DDI_STRICTORDER_ACC) {
194 			l_flags |= IOMEM_DATA_UNCACHED;
195 		} else if (accattrp->devacc_attr_dataorder ==
196 		    DDI_MERGING_OK_ACC) {
197 			l_flags |= IOMEM_DATA_UC_WR_COMBINE;
198 		} else {
199 			l_flags |= IOMEM_DATA_CACHED;
200 		}
201 	}
202 
203 	e = ddi_dma_mem_alloc(handle, length, accattrp, l_flags, waitfp,
204 	    arg, kaddrp, real_length, handlep);
205 	return (e);
206 }
207 
208 int
209 gfxp_mlock_user_memory(caddr_t address, size_t length)
210 {
211 	struct as *as = ttoproc(curthread)->p_as;
212 	int error = 0;
213 
214 	if (((uintptr_t)address & PAGEOFFSET) != 0 || length == 0)
215 		return (set_errno(EINVAL));
216 
217 	if (valid_usr_range(address, length, 0, as, as->a_userlimit) !=
218 	    RANGE_OKAY)
219 		return (set_errno(ENOMEM));
220 
221 	error = as_ctl(as, address, length, MC_LOCK, 0, 0, NULL, 0);
222 	if (error)
223 		(void) set_errno(error);
224 
225 	return (error);
226 }
227 
228 int
229 gfxp_munlock_user_memory(caddr_t address, size_t length)
230 {
231 	struct as *as = ttoproc(curthread)->p_as;
232 	int error = 0;
233 
234 	if (((uintptr_t)address & PAGEOFFSET) != 0 || length == 0)
235 		return (set_errno(EINVAL));
236 
237 	if (valid_usr_range(address, length, 0, as, as->a_userlimit) !=
238 	    RANGE_OKAY)
239 		return (set_errno(ENOMEM));
240 
241 	error = as_ctl(as, address, length, MC_UNLOCK, 0, 0, NULL, 0);
242 	if (error)
243 		(void) set_errno(error);
244 
245 	return (error);
246 }
247 
248 gfx_maddr_t
249 gfxp_convert_addr(paddr_t paddr)
250 {
251 #ifdef __xpv
252 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
253 	return (pfn_to_pa(xen_assign_pfn(btop(paddr))));
254 #else
255 	return ((gfx_maddr_t)paddr);
256 #endif
257 }
258 
259 /*
260  * Support getting VA space separately from pages
261  */
262 
263 /*
264  * A little like gfxp_map_kernel_space, but
265  * just the vmem_alloc part.
266  */
267 caddr_t
268 gfxp_alloc_kernel_space(size_t size)
269 {
270 	caddr_t cvaddr;
271 	pgcnt_t npages;
272 
273 	npages = btopr(size);
274 	cvaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP);
275 	return (cvaddr);
276 }
277 
278 /*
279  * Like gfxp_unmap_kernel_space, but
280  * just the vmem_free part.
281  */
282 void
283 gfxp_free_kernel_space(caddr_t address, size_t size)
284 {
285 
286 	uint_t pgoffset;
287 	caddr_t base;
288 	pgcnt_t npages;
289 
290 	if (size == 0 || address == NULL)
291 		return;
292 
293 	pgoffset = (uintptr_t)address & PAGEOFFSET;
294 	base = (caddr_t)address - pgoffset;
295 	npages = btopr(size + pgoffset);
296 	vmem_free(heap_arena, base, ptob(npages));
297 }
298 
299 /*
300  * Like gfxp_map_kernel_space, but
301  * just the hat_devload part.
302  */
303 void
304 gfxp_load_kernel_space(uint64_t start, size_t size,
305     uint32_t mode, caddr_t cvaddr)
306 {
307 	uint_t pgoffset;
308 	uint64_t base;
309 	pgcnt_t npages;
310 	int hat_flags;
311 	uint_t hat_attr;
312 	pfn_t pfn;
313 
314 	if (size == 0)
315 		return;
316 
317 #ifdef __xpv
318 	/*
319 	 * The hypervisor doesn't allow r/w mappings to some pages, such as
320 	 * page tables, gdt, etc. Detect %cr3 to notify users of this interface.
321 	 */
322 	if (start == mmu_ptob(mmu_btop(getcr3_pa())))
323 		return;
324 #endif
325 
326 	if (mode == GFXP_MEMORY_CACHED)
327 		hat_attr = HAT_STORECACHING_OK;
328 	else if (mode == GFXP_MEMORY_WRITECOMBINED)
329 		hat_attr = HAT_MERGING_OK | HAT_PLAT_NOCACHE;
330 	else	/* GFXP_MEMORY_UNCACHED */
331 		hat_attr = HAT_STRICTORDER | HAT_PLAT_NOCACHE;
332 	hat_flags = HAT_LOAD_LOCK;
333 
334 	pgoffset = start & PAGEOFFSET;
335 	base = start - pgoffset;
336 	npages = btopr(size + pgoffset);
337 
338 #ifdef __xpv
339 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
340 	pfn = xen_assign_pfn(mmu_btop(base));
341 #else
342 	pfn = btop(base);
343 #endif
344 
345 	hat_devload(kas.a_hat, cvaddr, ptob(npages), pfn,
346 	    PROT_READ|PROT_WRITE|hat_attr, hat_flags);
347 }
348 
349 /*
350  * Like gfxp_unmap_kernel_space, but
351  * just the had_unload part.
352  */
353 void
354 gfxp_unload_kernel_space(caddr_t address, size_t size)
355 {
356 	uint_t pgoffset;
357 	caddr_t base;
358 	pgcnt_t npages;
359 
360 	if (size == 0 || address == NULL)
361 		return;
362 
363 	pgoffset = (uintptr_t)address & PAGEOFFSET;
364 	base = (caddr_t)address - pgoffset;
365 	npages = btopr(size + pgoffset);
366 	hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
367 }
368 
369 /*
370  * Note that "mempool" is optional and normally disabled in drm_gem.c
371  * (see HAS_MEM_POOL).  Let's just stub these out so we can reduce
372  * changes from the upstream in the DRM driver code.
373  */
374 
375 void
376 gfxp_mempool_init(void)
377 {
378 }
379 
380 void
381 gfxp_mempool_destroy(void)
382 {
383 }
384 
385 /* ARGSUSED */
386 int
387 gfxp_alloc_from_mempool(struct gfxp_pmem_cookie *cookie, caddr_t *kva,
388     pfn_t *pgarray, pgcnt_t alen, int flags)
389 {
390 	return (-1);
391 }
392 
393 /* ARGSUSED */
394 void
395 gfxp_free_mempool(struct gfxp_pmem_cookie *cookie, caddr_t kva, size_t len)
396 {
397 }
398