xref: /titanic_50/usr/src/uts/common/io/drm/drm_bufs.c (revision 0035d21c77a24d02faf34c10aabc120ca692efb5)
1 /*
2  * drm_bufs.h -- Generic buffer template -*- linux-c -*-
3  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4  */
5 /*
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * Copyright (c) 2009, Intel Corporation.
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28  * OTHER DEALINGS IN THE SOFTWARE.
29  *
30  * Authors:
31  *    Rickard E. (Rik) Faith <faith@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *
34  */
35 
36 /*
37  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
38  * Use is subject to license terms.
39  */
40 
41 #include "drmP.h"
42 #include <gfx_private.h>
43 #include "drm_io32.h"
44 
45 
46 #define	PAGE_MASK	(PAGE_SIZE-1)
47 #define	round_page(x)	(((x) + PAGE_MASK) & ~PAGE_MASK)
48 
49 /*
50  * Compute order.  Can be made faster.
51  */
52 int
drm_order(unsigned long size)53 drm_order(unsigned long size)
54 {
55 	int order = 0;
56 	unsigned long tmp = size;
57 
58 	while (tmp >>= 1)
59 		order ++;
60 
61 	if (size & ~(1 << order))
62 		++order;
63 
64 	return (order);
65 }
66 
67 static inline drm_local_map_t *
drm_find_map(drm_device_t * dev,u_offset_t offset,int type)68 drm_find_map(drm_device_t *dev, u_offset_t offset, int type)
69 {
70 	drm_local_map_t		*map;
71 
72 	TAILQ_FOREACH(map, &dev->maplist, link) {
73 		if ((map->type == type) && ((map->offset == offset) ||
74 		    (map->flags == _DRM_CONTAINS_LOCK) &&
75 		    (map->type == _DRM_SHM)))
76 			return (map);
77 	}
78 
79 	return (NULL);
80 }
81 
drm_addmap(drm_device_t * dev,unsigned long offset,unsigned long size,drm_map_type_t type,drm_map_flags_t flags,drm_local_map_t ** map_ptr)82 int drm_addmap(drm_device_t *dev, unsigned long offset,
83     unsigned long size, drm_map_type_t type,
84     drm_map_flags_t flags, drm_local_map_t **map_ptr)
85 {
86 	drm_local_map_t *map;
87 	caddr_t		kva;
88 	int		retval;
89 
90 	/*
91 	 * Only allow shared memory to be removable since we only keep
92 	 * enough book keeping information about shared memory to allow
93 	 * for removal when processes fork.
94 	 */
95 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM)
96 		return (EINVAL);
97 	if ((offset & PAGE_MASK) || (size & PAGE_MASK))
98 		return (EINVAL);
99 	if (offset + size < offset)
100 		return (EINVAL);
101 
102 	/*
103 	 * Check if this is just another version of a kernel-allocated
104 	 * map, and just hand that back if so.
105 	 */
106 	map = drm_find_map(dev, offset, type);
107 	if (map != NULL) {
108 		goto done;
109 	}
110 
111 	/*
112 	 * Allocate a new map structure, fill it in, and do any
113 	 * type-specific initialization necessary.
114 	 */
115 	map = drm_alloc(sizeof (*map), DRM_MEM_MAPS);
116 	if (!map)
117 		return (ENOMEM);
118 
119 	map->offset = offset;
120 	map->size = size;
121 	map->type = type;
122 	map->flags = flags;
123 
124 	switch (map->type) {
125 	case _DRM_REGISTERS:
126 	case _DRM_FRAME_BUFFER:
127 		retval = drm_ioremap(dev, map);
128 		if (retval)
129 			return (retval);
130 		break;
131 
132 	case _DRM_SHM:
133 		/*
134 		 * ddi_umem_alloc() grants page-aligned memory. We needn't
135 		 * handle alignment issue here.
136 		 */
137 		map->handle = ddi_umem_alloc(map->size,
138 		    DDI_UMEM_NOSLEEP, &map->drm_umem_cookie);
139 		if (!map->handle) {
140 			DRM_ERROR("drm_addmap: ddi_umem_alloc failed");
141 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
142 			return (ENOMEM);
143 		}
144 		/*
145 		 * record only low 32-bit of this handle, since 32-bit
146 		 * user app is incapable of passing in 64bit offset when
147 		 * doing mmap.
148 		 */
149 		map->offset = (uintptr_t)map->handle;
150 		map->offset &= 0xffffffffUL;
151 		if (map->flags & _DRM_CONTAINS_LOCK) {
152 			/* Prevent a 2nd X Server from creating a 2nd lock */
153 			if (dev->lock.hw_lock != NULL) {
154 				ddi_umem_free(map->drm_umem_cookie);
155 				drm_free(map, sizeof (*map), DRM_MEM_MAPS);
156 				return (EBUSY);
157 			}
158 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
159 		}
160 		map->dev_addr = map->handle;
161 		break;
162 	case _DRM_SCATTER_GATHER:
163 		if (!dev->sg) {
164 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
165 			return (EINVAL);
166 		}
167 		map->offset += (uintptr_t)dev->sg->virtual;
168 		map->handle = (void *)(uintptr_t)map->offset;
169 		map->dev_addr = dev->sg->virtual;
170 		map->dev_handle = dev->sg->dmah_sg->acc_hdl;
171 		break;
172 
173 	case _DRM_CONSISTENT:
174 		DRM_ERROR("%d DRM_AGP_CONSISTENT", __LINE__);
175 		return (ENOTSUP);
176 	case _DRM_AGP:
177 		map->offset += dev->agp->base;
178 		kva = gfxp_map_kernel_space(map->offset, map->size,
179 		    GFXP_MEMORY_WRITECOMBINED);
180 		if (kva == 0) {
181 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
182 			cmn_err(CE_WARN,
183 			    "drm_addmap: failed to map AGP aperture");
184 			return (ENOMEM);
185 		}
186 		map->handle = (void *)(uintptr_t)kva;
187 		map->dev_addr = kva;
188 		break;
189 	default:
190 		drm_free(map, sizeof (*map), DRM_MEM_MAPS);
191 		return (EINVAL);
192 	}
193 
194 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
195 
196 done:
197 	/* Jumped to, with lock held, when a kernel map is found. */
198 	*map_ptr = map;
199 
200 	return (0);
201 }
202 
203 /*ARGSUSED*/
204 int
drm_addmap_ioctl(DRM_IOCTL_ARGS)205 drm_addmap_ioctl(DRM_IOCTL_ARGS)
206 {
207 	drm_map_t request;
208 	drm_local_map_t *map;
209 	int err;
210 	DRM_DEVICE;
211 
212 #ifdef	_MULTI_DATAMODEL
213 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
214 		drm_map_32_t request32;
215 		DRM_COPYFROM_WITH_RETURN(&request32,
216 		    (void *)data, sizeof (request32));
217 		request.offset = request32.offset;
218 		request.size = request32.size;
219 		request.type = request32.type;
220 		request.flags = request32.flags;
221 		request.mtrr = request32.mtrr;
222 	} else
223 #endif
224 		DRM_COPYFROM_WITH_RETURN(&request,
225 		    (void *)data, sizeof (request));
226 
227 	err = drm_addmap(dev, request.offset, request.size, request.type,
228 	    request.flags, &map);
229 
230 	if (err != 0)
231 		return (err);
232 
233 	request.offset = map->offset;
234 	request.size = map->size;
235 	request.type = map->type;
236 	request.flags = map->flags;
237 	request.mtrr   = map->mtrr;
238 	request.handle = (uintptr_t)map->handle;
239 
240 #ifdef	_MULTI_DATAMODEL
241 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
242 		drm_map_32_t request32;
243 		request32.offset = request.offset;
244 		request32.size = (uint32_t)request.size;
245 		request32.type = request.type;
246 		request32.flags = request.flags;
247 		request32.handle = request.handle;
248 		request32.mtrr = request.mtrr;
249 		DRM_COPYTO_WITH_RETURN((void *)data,
250 		    &request32, sizeof (request32));
251 	} else
252 #endif
253 		DRM_COPYTO_WITH_RETURN((void *)data,
254 		    &request, sizeof (request));
255 
256 	return (0);
257 }
258 
259 void
drm_rmmap(drm_device_t * dev,drm_local_map_t * map)260 drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
261 {
262 	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
263 
264 	TAILQ_REMOVE(&dev->maplist, map, link);
265 
266 	switch (map->type) {
267 	case _DRM_REGISTERS:
268 		drm_ioremapfree(map);
269 		break;
270 		/* FALLTHROUGH */
271 	case _DRM_FRAME_BUFFER:
272 		drm_ioremapfree(map);
273 		break;
274 	case _DRM_SHM:
275 		ddi_umem_free(map->drm_umem_cookie);
276 		break;
277 	case _DRM_AGP:
278 		/*
279 		 * we mapped AGP aperture into kernel space in drm_addmap,
280 		 * here, unmap them and release kernel virtual address space
281 		 */
282 		gfxp_unmap_kernel_space(map->dev_addr, map->size);
283 		break;
284 
285 	case _DRM_SCATTER_GATHER:
286 		break;
287 	case _DRM_CONSISTENT:
288 		break;
289 	default:
290 		break;
291 	}
292 
293 	drm_free(map, sizeof (*map), DRM_MEM_MAPS);
294 }
295 
296 /*
297  * Remove a map private from list and deallocate resources if the
298  * mapping isn't in use.
299  */
300 /*ARGSUSED*/
301 int
drm_rmmap_ioctl(DRM_IOCTL_ARGS)302 drm_rmmap_ioctl(DRM_IOCTL_ARGS)
303 {
304 	DRM_DEVICE;
305 	drm_local_map_t *map;
306 	drm_map_t request;
307 
308 #ifdef	_MULTI_DATAMODEL
309 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
310 		drm_map_32_t request32;
311 		DRM_COPYFROM_WITH_RETURN(&request32,
312 		    (void *)data, sizeof (drm_map_32_t));
313 		request.offset = request32.offset;
314 		request.size = request32.size;
315 		request.type = request32.type;
316 		request.flags = request32.flags;
317 		request.handle = request32.handle;
318 		request.mtrr = request32.mtrr;
319 	} else
320 #endif
321 		DRM_COPYFROM_WITH_RETURN(&request,
322 		    (void *)data, sizeof (request));
323 
324 	DRM_LOCK();
325 	TAILQ_FOREACH(map, &dev->maplist, link) {
326 	if (((uintptr_t)map->handle == (request.handle & 0xffffffff)) &&
327 	    (map->flags & _DRM_REMOVABLE))
328 			break;
329 	}
330 
331 	/* No match found. */
332 	if (map == NULL) {
333 		DRM_UNLOCK();
334 		return (EINVAL);
335 	}
336 
337 	drm_rmmap(dev, map);
338 	DRM_UNLOCK();
339 
340 	return (0);
341 }
342 
343 /*ARGSUSED*/
344 static void
drm_cleanup_buf_error(drm_device_t * dev,drm_buf_entry_t * entry)345 drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
346 {
347 	int i;
348 
349 	if (entry->seg_count) {
350 		for (i = 0; i < entry->seg_count; i++) {
351 			if (entry->seglist[i]) {
352 				DRM_ERROR(
353 				    "drm_cleanup_buf_error: not implemented");
354 			}
355 		}
356 		drm_free(entry->seglist,
357 		    entry->seg_count *
358 		    sizeof (*entry->seglist), DRM_MEM_SEGS);
359 		entry->seg_count = 0;
360 	}
361 
362 	if (entry->buf_count) {
363 		for (i = 0; i < entry->buf_count; i++) {
364 			if (entry->buflist[i].dev_private) {
365 				drm_free(entry->buflist[i].dev_private,
366 				    entry->buflist[i].dev_priv_size,
367 				    DRM_MEM_BUFS);
368 			}
369 		}
370 		drm_free(entry->buflist,
371 		    entry->buf_count *
372 		    sizeof (*entry->buflist), DRM_MEM_BUFS);
373 		entry->buflist = NULL;
374 		entry->buf_count = 0;
375 	}
376 }
377 
378 /*ARGSUSED*/
379 int
drm_markbufs(DRM_IOCTL_ARGS)380 drm_markbufs(DRM_IOCTL_ARGS)
381 {
382 	DRM_DEBUG("drm_markbufs");
383 	return (EINVAL);
384 }
385 
386 /*ARGSUSED*/
387 int
drm_infobufs(DRM_IOCTL_ARGS)388 drm_infobufs(DRM_IOCTL_ARGS)
389 {
390 	DRM_DEBUG("drm_infobufs");
391 	return (EINVAL);
392 }
393 
394 static int
drm_do_addbufs_agp(drm_device_t * dev,drm_buf_desc_t * request)395 drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
396 {
397 	drm_device_dma_t *dma = dev->dma;
398 	drm_buf_entry_t *entry;
399 	drm_buf_t **temp_buflist;
400 	drm_buf_t *buf;
401 	unsigned long offset;
402 	unsigned long agp_offset;
403 	int count;
404 	int order;
405 	int size;
406 	int alignment;
407 	int page_order;
408 	int byte_count;
409 	int i;
410 
411 	if (!dma)
412 		return (EINVAL);
413 
414 	count = request->count;
415 	order = drm_order(request->size);
416 	size = 1 << order;
417 
418 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
419 	    ? round_page(size) : size;
420 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
421 
422 	byte_count = 0;
423 	agp_offset = dev->agp->base + request->agp_start;
424 
425 	entry = &dma->bufs[order];
426 
427 	/* No more than one allocation per order */
428 	if (entry->buf_count) {
429 		return (ENOMEM);
430 	}
431 
432 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
433 	    DRM_MEM_BUFS);
434 	if (!entry->buflist) {
435 		return (ENOMEM);
436 	}
437 	entry->buf_size = size;
438 	entry->page_order = page_order;
439 
440 	offset = 0;
441 
442 	while (entry->buf_count < count) {
443 		buf		= &entry->buflist[entry->buf_count];
444 		buf->idx	= dma->buf_count + entry->buf_count;
445 		buf->total	= alignment;
446 		buf->order	= order;
447 		buf->used	= 0;
448 
449 		buf->offset	= (dma->byte_count + offset);
450 		buf->bus_address = agp_offset + offset;
451 		buf->address	= (void *)(agp_offset + offset);
452 		buf->next	= NULL;
453 		buf->pending	= 0;
454 		buf->filp	= NULL;
455 
456 		buf->dev_priv_size = dev->driver->buf_priv_size;
457 		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
458 		if (buf->dev_private == NULL) {
459 			/* Set count correctly so we free the proper amount. */
460 			entry->buf_count = count;
461 			drm_cleanup_buf_error(dev, entry);
462 			return (ENOMEM);
463 		}
464 
465 		offset += alignment;
466 		entry->buf_count++;
467 		byte_count += PAGE_SIZE << page_order;
468 	}
469 
470 	temp_buflist = drm_alloc(
471 	    (dma->buf_count + entry->buf_count) * sizeof (*dma->buflist),
472 	    DRM_MEM_BUFS);
473 
474 	if (temp_buflist == NULL) {
475 		/* Free the entry because it isn't valid */
476 		drm_cleanup_buf_error(dev, entry);
477 		DRM_ERROR(" temp_buflist is NULL");
478 		return (ENOMEM);
479 	}
480 
481 	bcopy(temp_buflist, dma->buflist,
482 	    dma->buf_count * sizeof (*dma->buflist));
483 	kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
484 	dma->buflist = temp_buflist;
485 
486 	for (i = 0; i < entry->buf_count; i++) {
487 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
488 	}
489 
490 	dma->buf_count += entry->buf_count;
491 	dma->byte_count += byte_count;
492 	dma->seg_count += entry->seg_count;
493 	dma->page_count += byte_count >> PAGE_SHIFT;
494 
495 	request->count = entry->buf_count;
496 	request->size = size;
497 
498 	dma->flags = _DRM_DMA_USE_AGP;
499 
500 	return (0);
501 }
502 
503 static int
drm_do_addbufs_sg(drm_device_t * dev,drm_buf_desc_t * request)504 drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
505 {
506 	drm_device_dma_t *dma = dev->dma;
507 	drm_buf_entry_t *entry;
508 	drm_buf_t *buf;
509 	unsigned long offset;
510 	unsigned long agp_offset;
511 	int count;
512 	int order;
513 	int size;
514 	int alignment;
515 	int page_order;
516 	int byte_count;
517 	int i;
518 	drm_buf_t **temp_buflist;
519 
520 	count = request->count;
521 	order = drm_order(request->size);
522 	size = 1 << order;
523 
524 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
525 	    ? round_page(size) : size;
526 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
527 
528 	byte_count = 0;
529 	agp_offset = request->agp_start;
530 	entry = &dma->bufs[order];
531 
532 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
533 	    DRM_MEM_BUFS);
534 	if (entry->buflist == NULL)
535 		return (ENOMEM);
536 
537 	entry->buf_size = size;
538 	entry->page_order = page_order;
539 
540 	offset = 0;
541 
542 	while (entry->buf_count < count) {
543 		buf		= &entry->buflist[entry->buf_count];
544 		buf->idx	= dma->buf_count + entry->buf_count;
545 		buf->total	= alignment;
546 		buf->order	= order;
547 		buf->used	= 0;
548 
549 		buf->offset	= (dma->byte_count + offset);
550 		buf->bus_address = agp_offset + offset;
551 		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
552 		buf->next	= NULL;
553 		buf->pending	= 0;
554 		buf->filp	= NULL;
555 
556 		buf->dev_priv_size = dev->driver->buf_priv_size;
557 		buf->dev_private = drm_alloc(buf->dev_priv_size,
558 		    DRM_MEM_BUFS);
559 		if (buf->dev_private == NULL) {
560 			/* Set count correctly so we free the proper amount. */
561 			entry->buf_count = count;
562 			drm_cleanup_buf_error(dev, entry);
563 			return (ENOMEM);
564 		}
565 
566 		offset += alignment;
567 		entry->buf_count++;
568 		byte_count += PAGE_SIZE << page_order;
569 	}
570 
571 	temp_buflist = drm_realloc(dma->buflist,
572 	    dma->buf_count * sizeof (*dma->buflist),
573 	    (dma->buf_count + entry->buf_count)
574 	    * sizeof (*dma->buflist), DRM_MEM_BUFS);
575 	if (!temp_buflist) {
576 		drm_cleanup_buf_error(dev, entry);
577 		return (ENOMEM);
578 	}
579 	dma->buflist = temp_buflist;
580 
581 	for (i = 0; i < entry->buf_count; i++) {
582 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
583 	}
584 
585 	dma->buf_count += entry->buf_count;
586 	dma->byte_count += byte_count;
587 	request->count = entry->buf_count;
588 	request->size = size;
589 	dma->flags = _DRM_DMA_USE_SG;
590 
591 	return (0);
592 }
593 
594 int
drm_addbufs_agp(drm_device_t * dev,drm_buf_desc_t * request)595 drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
596 {
597 	int order, ret;
598 
599 	DRM_SPINLOCK(&dev->dma_lock);
600 
601 	if (request->count < 0 || request->count > 4096) {
602 		DRM_SPINLOCK(&dev->dma_lock);
603 		return (EINVAL);
604 	}
605 
606 	order = drm_order(request->size);
607 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
608 		DRM_SPINLOCK(&dev->dma_lock);
609 		return (EINVAL);
610 	}
611 
612 	/* No more allocations after first buffer-using ioctl. */
613 	if (dev->buf_use != 0) {
614 		DRM_SPINUNLOCK(&dev->dma_lock);
615 		return (EBUSY);
616 	}
617 	/* No more than one allocation per order */
618 	if (dev->dma->bufs[order].buf_count != 0) {
619 		DRM_SPINUNLOCK(&dev->dma_lock);
620 		return (ENOMEM);
621 	}
622 
623 	ret = drm_do_addbufs_agp(dev, request);
624 
625 	DRM_SPINUNLOCK(&dev->dma_lock);
626 
627 	return (ret);
628 }
629 
630 int
drm_addbufs_sg(drm_device_t * dev,drm_buf_desc_t * request)631 drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
632 {
633 	int order, ret;
634 
635 	DRM_SPINLOCK(&dev->dma_lock);
636 
637 	if (request->count < 0 || request->count > 4096) {
638 		DRM_SPINUNLOCK(&dev->dma_lock);
639 		return (EINVAL);
640 	}
641 
642 	order = drm_order(request->size);
643 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
644 		DRM_SPINUNLOCK(&dev->dma_lock);
645 		return (EINVAL);
646 	}
647 
648 	/* No more allocations after first buffer-using ioctl. */
649 	if (dev->buf_use != 0) {
650 		DRM_SPINUNLOCK(&dev->dma_lock);
651 		return (EBUSY);
652 	}
653 
654 	/* No more than one allocation per order */
655 	if (dev->dma->bufs[order].buf_count != 0) {
656 		DRM_SPINUNLOCK(&dev->dma_lock);
657 		return (ENOMEM);
658 	}
659 
660 	ret = drm_do_addbufs_sg(dev, request);
661 	DRM_SPINUNLOCK(&dev->dma_lock);
662 	return (ret);
663 }
664 
665 /*ARGSUSED*/
666 int
drm_addbufs_ioctl(DRM_IOCTL_ARGS)667 drm_addbufs_ioctl(DRM_IOCTL_ARGS)
668 {
669 	DRM_DEVICE;
670 	drm_buf_desc_t request;
671 	int err;
672 
673 #ifdef	_MULTI_DATAMODEL
674 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
675 		drm_buf_desc_32_t request32;
676 		DRM_COPYFROM_WITH_RETURN(&request32,
677 		    (void *)data, sizeof (request32));
678 		request.count = request32.count;
679 		request.size = request32.size;
680 		request.low_mark = request32.low_mark;
681 		request.high_mark = request32.high_mark;
682 		request.flags = request32.flags;
683 		request.agp_start = request32.agp_start;
684 	} else
685 #endif
686 		DRM_COPYFROM_WITH_RETURN(&request,
687 		    (void *)data, sizeof (request));
688 
689 	if (request.flags & _DRM_AGP_BUFFER)
690 		err = drm_addbufs_agp(dev, &request);
691 	else if (request.flags & _DRM_SG_BUFFER)
692 		err = drm_addbufs_sg(dev, &request);
693 
694 #ifdef	_MULTI_DATAMODEL
695 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
696 		drm_buf_desc_32_t request32;
697 		request32.count = request.count;
698 		request32.size = request.size;
699 		request32.low_mark = request.low_mark;
700 		request32.high_mark = request.high_mark;
701 		request32.flags = request.flags;
702 		request32.agp_start = (uint32_t)request.agp_start;
703 		DRM_COPYTO_WITH_RETURN((void *)data,
704 		    &request32, sizeof (request32));
705 	} else
706 #endif
707 		DRM_COPYTO_WITH_RETURN((void *)data,
708 		    &request, sizeof (request));
709 
710 	return (err);
711 }
712 
713 /*ARGSUSED*/
714 int
drm_freebufs(DRM_IOCTL_ARGS)715 drm_freebufs(DRM_IOCTL_ARGS)
716 {
717 	DRM_DEVICE;
718 	drm_device_dma_t *dma = dev->dma;
719 	drm_buf_free_t request;
720 	int i;
721 	int idx;
722 	drm_buf_t *buf;
723 	int retcode = 0;
724 
725 #ifdef	_MULTI_DATAMODEL
726 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
727 		drm_buf_free_32_t request32;
728 		DRM_COPYFROM_WITH_RETURN(&request32,
729 		    (void*)data, sizeof (request32));
730 		request.count = request32.count;
731 		request.list = (int *)(uintptr_t)request32.list;
732 	} else
733 #endif
734 		DRM_COPYFROM_WITH_RETURN(&request,
735 		    (void *)data, sizeof (request));
736 
737 	for (i = 0; i < request.count; i++) {
738 		if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof (idx))) {
739 			retcode = EFAULT;
740 			break;
741 		}
742 		if (idx < 0 || idx >= dma->buf_count) {
743 			DRM_ERROR("drm_freebufs: Index %d (of %d max)\n",
744 			    idx, dma->buf_count - 1);
745 			retcode = EINVAL;
746 			break;
747 		}
748 		buf = dma->buflist[idx];
749 		if (buf->filp != fpriv) {
750 			DRM_ERROR(
751 			    "drm_freebufs: process %d not owning the buffer.\n",
752 			    DRM_CURRENTPID);
753 			retcode = EINVAL;
754 			break;
755 		}
756 		drm_free_buffer(dev, buf);
757 	}
758 
759 	return (retcode);
760 }
761 
762 #ifdef _LP64
763 extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
764 #define	drm_smmap	smmap64
765 #else
766 #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
767 extern caddr_t smmap32(caddr32_t, size32_t, int, int, int, off32_t);
768 #define	drm_smmap smmap32
769 #else
770 #error "No define for _LP64, _SYSCALL32_IMPL or _ILP32"
771 #endif
772 #endif
773 
774 
775 /*ARGSUSED*/
776 int
drm_mapbufs(DRM_IOCTL_ARGS)777 drm_mapbufs(DRM_IOCTL_ARGS)
778 {
779 	DRM_DEVICE;
780 	drm_buf_map_t request;
781 	const int zero = 0;
782 	unsigned long	vaddr;
783 	unsigned long address;
784 	drm_device_dma_t *dma = dev->dma;
785 	uint_t	size;
786 	uint_t	foff;
787 	int		ret_tmp;
788 	int 	i;
789 
790 #ifdef	_MULTI_DATAMODEL
791 	drm_buf_map_32_t request32;
792 	drm_buf_pub_32_t	*list32;
793 	uint_t		address32;
794 
795 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
796 		DRM_COPYFROM_WITH_RETURN(&request32,
797 		    (void *)data, sizeof (request32));
798 		request.count = request32.count;
799 		request.virtual = (void *)(uintptr_t)request32.virtual;
800 		request.list = (drm_buf_pub_t *)(uintptr_t)request32.list;
801 		request.fd = request32.fd;
802 	} else
803 #endif
804 		DRM_COPYFROM_WITH_RETURN(&request,
805 		    (void *)data, sizeof (request));
806 
807 	dev->buf_use++;
808 
809 	if (request.count < dma->buf_count)
810 		goto done;
811 
812 	if ((dev->driver->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
813 	    (dev->driver->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
814 		drm_local_map_t *map = dev->agp_buffer_map;
815 		if (map == NULL)
816 			return (EINVAL);
817 		size = round_page(map->size);
818 		foff = (uintptr_t)map->handle;
819 	} else {
820 		size = round_page(dma->byte_count);
821 		foff = 0;
822 	}
823 	request.virtual = drm_smmap(NULL, size, PROT_READ | PROT_WRITE,
824 	    MAP_SHARED, request.fd, foff);
825 	if (request.virtual == NULL) {
826 		DRM_ERROR("drm_mapbufs: request.virtual is NULL");
827 		return (EINVAL);
828 	}
829 
830 	vaddr = (unsigned long) request.virtual;
831 #ifdef	_MULTI_DATAMODEL
832 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
833 		list32 = (drm_buf_pub_32_t *)(uintptr_t)request32.list;
834 		for (i = 0; i < dma->buf_count; i++) {
835 			if (DRM_COPY_TO_USER(&list32[i].idx,
836 			    &dma->buflist[i]->idx, sizeof (list32[0].idx))) {
837 				return (EFAULT);
838 			}
839 			if (DRM_COPY_TO_USER(&list32[i].total,
840 			    &dma->buflist[i]->total,
841 			    sizeof (list32[0].total))) {
842 				return (EFAULT);
843 			}
844 			if (DRM_COPY_TO_USER(&list32[i].used,
845 			    &zero, sizeof (zero))) {
846 				return (EFAULT);
847 			}
848 			address32 = vaddr + dma->buflist[i]->offset; /* *** */
849 			ret_tmp = DRM_COPY_TO_USER(&list32[i].address,
850 			    &address32, sizeof (list32[0].address));
851 			if (ret_tmp)
852 				return (EFAULT);
853 		}
854 		goto done;
855 	}
856 #endif
857 
858 	ASSERT(ddi_model_convert_from(mode & FMODELS) != DDI_MODEL_ILP32);
859 	for (i = 0; i < dma->buf_count; i++) {
860 		if (DRM_COPY_TO_USER(&request.list[i].idx,
861 		    &dma->buflist[i]->idx, sizeof (request.list[0].idx))) {
862 			return (EFAULT);
863 		}
864 		if (DRM_COPY_TO_USER(&request.list[i].total,
865 		    &dma->buflist[i]->total, sizeof (request.list[0].total))) {
866 			return (EFAULT);
867 		}
868 		if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
869 		    sizeof (zero))) {
870 			return (EFAULT);
871 		}
872 		address = vaddr + dma->buflist[i]->offset; /* *** */
873 
874 		ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
875 		    &address, sizeof (address));
876 		if (ret_tmp) {
877 			return (EFAULT);
878 		}
879 	}
880 
881 done:
882 #ifdef	_MULTI_DATAMODEL
883 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
884 		request32.count = dma->buf_count;
885 		request32.virtual = (caddr32_t)(uintptr_t)request.virtual;
886 		DRM_COPYTO_WITH_RETURN((void *)data,
887 		    &request32, sizeof (request32));
888 	} else {
889 #endif
890 		request.count = dma->buf_count;
891 		DRM_COPYTO_WITH_RETURN((void *)data,
892 		    &request, sizeof (request));
893 #ifdef	_MULTI_DATAMODEL
894 	}
895 #endif
896 	return (0);
897 }
898