xref: /titanic_44/usr/src/uts/common/io/drm/drm_bufs.c (revision 2a8d6eba033e4713ab12b61178f0513f1f075482)
1 /*
2  * drm_bufs.h -- Generic buffer template -*- linux-c -*-
3  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4  */
5 /*
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Gareth Hughes <gareth@valinux.com>
32  *
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #include "drmP.h"
41 #include <gfx_private.h>
42 #include "drm_io32.h"
43 
44 
45 #define	PAGE_MASK	(PAGE_SIZE-1)
46 #define	round_page(x)	(((x) + PAGE_MASK) & ~PAGE_MASK)
47 
48 /*
49  * Compute order.  Can be made faster.
50  */
51 int
52 drm_order(unsigned long size)
53 {
54 	int order = 0;
55 	unsigned long tmp = size;
56 
57 	while (tmp >>= 1)
58 		order ++;
59 
60 	if (size & ~(1 << order))
61 		++order;
62 
63 	return (order);
64 }
65 
66 static inline drm_local_map_t *
67 drm_find_map(drm_device_t *dev, u_offset_t offset, int type)
68 {
69 	drm_local_map_t		*map;
70 
71 	TAILQ_FOREACH(map, &dev->maplist, link) {
72 		if ((map->type == type) && ((map->offset == offset) ||
73 		    (map->flags == _DRM_CONTAINS_LOCK) &&
74 		    (map->type == _DRM_SHM)))
75 			return (map);
76 	}
77 
78 	return (NULL);
79 }
80 
81 int drm_addmap(drm_device_t *dev, unsigned long offset,
82     unsigned long size, drm_map_type_t type,
83     drm_map_flags_t flags, drm_local_map_t **map_ptr)
84 {
85 	drm_local_map_t *map;
86 	caddr_t		kva;
87 	int		retval;
88 
89 	/*
90 	 * Only allow shared memory to be removable since we only keep
91 	 * enough book keeping information about shared memory to allow
92 	 * for removal when processes fork.
93 	 */
94 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM)
95 		return (EINVAL);
96 	if ((offset & PAGE_MASK) || (size & PAGE_MASK))
97 		return (EINVAL);
98 	if (offset + size < offset)
99 		return (EINVAL);
100 
101 	/*
102 	 * Check if this is just another version of a kernel-allocated
103 	 * map, and just hand that back if so.
104 	 */
105 	map = drm_find_map(dev, offset, type);
106 	if (map != NULL) {
107 		goto done;
108 	}
109 
110 	/*
111 	 * Allocate a new map structure, fill it in, and do any
112 	 * type-specific initialization necessary.
113 	 */
114 	map = drm_alloc(sizeof (*map), DRM_MEM_MAPS);
115 	if (!map)
116 		return (ENOMEM);
117 
118 	map->offset = offset;
119 	map->size = size;
120 	map->type = type;
121 	map->flags = flags;
122 
123 	switch (map->type) {
124 	case _DRM_REGISTERS:
125 	case _DRM_FRAME_BUFFER:
126 		retval = drm_ioremap(dev, map);
127 		if (retval)
128 			return (retval);
129 		break;
130 
131 	case _DRM_SHM:
132 		/*
133 		 * ddi_umem_alloc() grants page-aligned memory. We needn't
134 		 * handle alignment issue here.
135 		 */
136 		map->handle = ddi_umem_alloc(map->size,
137 		    DDI_UMEM_NOSLEEP, &map->drm_umem_cookie);
138 		if (!map->handle) {
139 			DRM_ERROR("drm_addmap: ddi_umem_alloc failed");
140 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
141 			return (ENOMEM);
142 		}
143 		/*
144 		 * record only low 32-bit of this handle, since 32-bit
145 		 * user app is incapable of passing in 64bit offset when
146 		 * doing mmap.
147 		 */
148 		map->offset = (uintptr_t)map->handle;
149 		map->offset &= 0xffffffffUL;
150 		if (map->flags & _DRM_CONTAINS_LOCK) {
151 			/* Prevent a 2nd X Server from creating a 2nd lock */
152 			if (dev->lock.hw_lock != NULL) {
153 				ddi_umem_free(map->drm_umem_cookie);
154 				drm_free(map, sizeof (*map), DRM_MEM_MAPS);
155 				return (EBUSY);
156 			}
157 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
158 		}
159 		map->dev_addr = map->handle;
160 		break;
161 	case _DRM_SCATTER_GATHER:
162 		if (!dev->sg) {
163 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
164 			return (EINVAL);
165 		}
166 		map->offset += (uintptr_t)dev->sg->virtual;
167 		map->handle = (void *)(uintptr_t)map->offset;
168 		map->dev_addr = dev->sg->virtual;
169 		map->dev_handle = dev->sg->dmah_sg->acc_hdl;
170 		break;
171 
172 	case _DRM_CONSISTENT:
173 		cmn_err(CE_WARN, "%d DRM_AGP_CONSISTENT", __LINE__);
174 		return (ENOTSUP);
175 	case _DRM_AGP:
176 		map->offset += dev->agp->base;
177 		kva = gfxp_map_kernel_space(map->offset, map->size,
178 		    GFXP_MEMORY_WRITECOMBINED);
179 		if (kva == 0) {
180 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
181 			cmn_err(CE_WARN,
182 			    "drm_addmap: failed to map AGP aperture");
183 			return (ENOMEM);
184 		}
185 		map->handle = (void *)(uintptr_t)kva;
186 		map->dev_addr = kva;
187 		break;
188 	default:
189 		drm_free(map, sizeof (*map), DRM_MEM_MAPS);
190 		return (EINVAL);
191 	}
192 
193 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
194 
195 done:
196 	/* Jumped to, with lock held, when a kernel map is found. */
197 	*map_ptr = map;
198 
199 	return (0);
200 }
201 
202 /*ARGSUSED*/
203 int
204 drm_addmap_ioctl(DRM_IOCTL_ARGS)
205 {
206 	drm_map_t request;
207 	drm_local_map_t *map;
208 	int err;
209 	DRM_DEVICE;
210 
211 #ifdef	_MULTI_DATAMODEL
212 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
213 		drm_map_32_t request32;
214 		DRM_COPYFROM_WITH_RETURN(&request32,
215 		    (void *)data, sizeof (request32));
216 		request.offset = request32.offset;
217 		request.size = request32.size;
218 		request.type = request32.type;
219 		request.flags = request32.flags;
220 		request.mtrr = request32.mtrr;
221 	} else
222 #endif
223 		DRM_COPYFROM_WITH_RETURN(&request,
224 		    (void *)data, sizeof (request));
225 
226 	err = drm_addmap(dev, request.offset, request.size, request.type,
227 	    request.flags, &map);
228 
229 	if (err != 0)
230 		return (err);
231 
232 	request.offset = map->offset;
233 	request.size = map->size;
234 	request.type = map->type;
235 	request.flags = map->flags;
236 	request.mtrr   = map->mtrr;
237 	request.handle = (uintptr_t)map->handle;
238 
239 #ifdef	_MULTI_DATAMODEL
240 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
241 		drm_map_32_t request32;
242 		request32.offset = request.offset;
243 		request32.size = (uint32_t)request.size;
244 		request32.type = request.type;
245 		request32.flags = request.flags;
246 		request32.handle = request.handle;
247 		request32.mtrr = request.mtrr;
248 		DRM_COPYTO_WITH_RETURN((void *)data,
249 		    &request32, sizeof (request32));
250 	} else
251 #endif
252 		DRM_COPYTO_WITH_RETURN((void *)data,
253 		    &request, sizeof (request));
254 
255 	return (0);
256 }
257 
258 void
259 drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
260 {
261 	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
262 
263 	TAILQ_REMOVE(&dev->maplist, map, link);
264 
265 	switch (map->type) {
266 	case _DRM_REGISTERS:
267 		drm_ioremapfree(map);
268 		break;
269 		/* FALLTHROUGH */
270 	case _DRM_FRAME_BUFFER:
271 		drm_ioremapfree(map);
272 		break;
273 	case _DRM_SHM:
274 		ddi_umem_free(map->drm_umem_cookie);
275 		break;
276 	case _DRM_AGP:
277 		/*
278 		 * we mapped AGP aperture into kernel space in drm_addmap,
279 		 * here, unmap them and release kernel virtual address space
280 		 */
281 		gfxp_unmap_kernel_space(map->dev_addr, map->size);
282 		break;
283 
284 	case _DRM_SCATTER_GATHER:
285 		break;
286 	case _DRM_CONSISTENT:
287 		break;
288 	default:
289 		break;
290 	}
291 
292 	drm_free(map, sizeof (*map), DRM_MEM_MAPS);
293 }
294 
295 /*
296  * Remove a map private from list and deallocate resources if the
297  * mapping isn't in use.
298  */
299 /*ARGSUSED*/
300 int
301 drm_rmmap_ioctl(DRM_IOCTL_ARGS)
302 {
303 	DRM_DEVICE;
304 	drm_local_map_t *map;
305 	drm_map_t request;
306 
307 #ifdef	_MULTI_DATAMODEL
308 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
309 		drm_map_32_t request32;
310 		DRM_COPYFROM_WITH_RETURN(&request32,
311 		    (void *)data, sizeof (drm_map_32_t));
312 		request.offset = request32.offset;
313 		request.size = request32.size;
314 		request.type = request32.type;
315 		request.flags = request32.flags;
316 		request.handle = request32.handle;
317 		request.mtrr = request32.mtrr;
318 	} else
319 #endif
320 		DRM_COPYFROM_WITH_RETURN(&request,
321 		    (void *)data, sizeof (request));
322 
323 	DRM_LOCK();
324 	TAILQ_FOREACH(map, &dev->maplist, link) {
325 	if (((uintptr_t)map->handle == (request.handle & 0xffffffff)) &&
326 	    (map->flags & _DRM_REMOVABLE))
327 			break;
328 	}
329 
330 	/* No match found. */
331 	if (map == NULL) {
332 		DRM_UNLOCK();
333 		return (EINVAL);
334 	}
335 
336 	drm_rmmap(dev, map);
337 	DRM_UNLOCK();
338 
339 	return (0);
340 }
341 
342 /*ARGSUSED*/
343 static void
344 drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
345 {
346 	int i;
347 
348 	if (entry->seg_count) {
349 		for (i = 0; i < entry->seg_count; i++) {
350 			if (entry->seglist[i]) {
351 				DRM_ERROR(
352 				    "drm_cleanup_buf_error: not implemented");
353 			}
354 		}
355 		drm_free(entry->seglist,
356 		    entry->seg_count *
357 		    sizeof (*entry->seglist), DRM_MEM_SEGS);
358 		entry->seg_count = 0;
359 	}
360 
361 	if (entry->buf_count) {
362 		for (i = 0; i < entry->buf_count; i++) {
363 			if (entry->buflist[i].dev_private) {
364 				drm_free(entry->buflist[i].dev_private,
365 				    entry->buflist[i].dev_priv_size,
366 				    DRM_MEM_BUFS);
367 			}
368 		}
369 		drm_free(entry->buflist,
370 		    entry->buf_count *
371 		    sizeof (*entry->buflist), DRM_MEM_BUFS);
372 		entry->buflist = NULL;
373 		entry->buf_count = 0;
374 	}
375 }
376 
377 /*ARGSUSED*/
378 int
379 drm_markbufs(DRM_IOCTL_ARGS)
380 {
381 	DRM_DEBUG("drm_markbufs");
382 	return (EINVAL);
383 }
384 
385 /*ARGSUSED*/
386 int
387 drm_infobufs(DRM_IOCTL_ARGS)
388 {
389 	DRM_DEBUG("drm_infobufs");
390 	return (EINVAL);
391 }
392 
393 static int
394 drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
395 {
396 	drm_device_dma_t *dma = dev->dma;
397 	drm_buf_entry_t *entry;
398 	drm_buf_t **temp_buflist;
399 	drm_buf_t *buf;
400 	unsigned long offset;
401 	unsigned long agp_offset;
402 	int count;
403 	int order;
404 	int size;
405 	int alignment;
406 	int page_order;
407 	int byte_count;
408 	int i;
409 
410 	if (!dma)
411 		return (EINVAL);
412 
413 	count = request->count;
414 	order = drm_order(request->size);
415 	size = 1 << order;
416 
417 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
418 	    ? round_page(size) : size;
419 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
420 
421 	byte_count = 0;
422 	agp_offset = dev->agp->base + request->agp_start;
423 
424 	entry = &dma->bufs[order];
425 
426 	/* No more than one allocation per order */
427 	if (entry->buf_count) {
428 		return (ENOMEM);
429 	}
430 
431 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
432 	    DRM_MEM_BUFS);
433 	if (!entry->buflist) {
434 		return (ENOMEM);
435 	}
436 	entry->buf_size = size;
437 	entry->page_order = page_order;
438 
439 	offset = 0;
440 
441 	while (entry->buf_count < count) {
442 		buf		= &entry->buflist[entry->buf_count];
443 		buf->idx	= dma->buf_count + entry->buf_count;
444 		buf->total	= alignment;
445 		buf->order	= order;
446 		buf->used	= 0;
447 
448 		buf->offset	= (dma->byte_count + offset);
449 		buf->bus_address = agp_offset + offset;
450 		buf->address	= (void *)(agp_offset + offset);
451 		buf->next	= NULL;
452 		buf->pending	= 0;
453 		buf->filp	= NULL;
454 
455 		buf->dev_priv_size = dev->driver->buf_priv_size;
456 		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
457 		if (buf->dev_private == NULL) {
458 			/* Set count correctly so we free the proper amount. */
459 			entry->buf_count = count;
460 			drm_cleanup_buf_error(dev, entry);
461 			return (ENOMEM);
462 		}
463 
464 		offset += alignment;
465 		entry->buf_count++;
466 		byte_count += PAGE_SIZE << page_order;
467 	}
468 
469 	temp_buflist = drm_alloc(
470 	    (dma->buf_count + entry->buf_count) * sizeof (*dma->buflist),
471 	    DRM_MEM_BUFS);
472 
473 	if (temp_buflist == NULL) {
474 		/* Free the entry because it isn't valid */
475 		drm_cleanup_buf_error(dev, entry);
476 		DRM_ERROR(" temp_buflist is NULL");
477 		return (ENOMEM);
478 	}
479 
480 	bcopy(temp_buflist, dma->buflist,
481 	    dma->buf_count * sizeof (*dma->buflist));
482 	kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
483 	dma->buflist = temp_buflist;
484 
485 	for (i = 0; i < entry->buf_count; i++) {
486 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
487 	}
488 
489 	dma->buf_count += entry->buf_count;
490 	dma->byte_count += byte_count;
491 	dma->seg_count += entry->seg_count;
492 	dma->page_count += byte_count >> PAGE_SHIFT;
493 
494 	request->count = entry->buf_count;
495 	request->size = size;
496 
497 	dma->flags = _DRM_DMA_USE_AGP;
498 
499 	return (0);
500 }
501 
502 static int
503 drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
504 {
505 	drm_device_dma_t *dma = dev->dma;
506 	drm_buf_entry_t *entry;
507 	drm_buf_t *buf;
508 	unsigned long offset;
509 	unsigned long agp_offset;
510 	int count;
511 	int order;
512 	int size;
513 	int alignment;
514 	int page_order;
515 	int byte_count;
516 	int i;
517 	drm_buf_t **temp_buflist;
518 
519 	count = request->count;
520 	order = drm_order(request->size);
521 	size = 1 << order;
522 
523 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
524 	    ? round_page(size) : size;
525 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
526 
527 	byte_count = 0;
528 	agp_offset = request->agp_start;
529 	entry = &dma->bufs[order];
530 
531 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
532 	    DRM_MEM_BUFS);
533 	if (entry->buflist == NULL)
534 		return (ENOMEM);
535 
536 	entry->buf_size = size;
537 	entry->page_order = page_order;
538 
539 	offset = 0;
540 
541 	while (entry->buf_count < count) {
542 		buf		= &entry->buflist[entry->buf_count];
543 		buf->idx	= dma->buf_count + entry->buf_count;
544 		buf->total	= alignment;
545 		buf->order	= order;
546 		buf->used	= 0;
547 
548 		buf->offset	= (dma->byte_count + offset);
549 		buf->bus_address = agp_offset + offset;
550 		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
551 		buf->next	= NULL;
552 		buf->pending	= 0;
553 		buf->filp	= NULL;
554 
555 		buf->dev_priv_size = dev->driver->buf_priv_size;
556 		buf->dev_private = drm_alloc(buf->dev_priv_size,
557 		    DRM_MEM_BUFS);
558 		if (buf->dev_private == NULL) {
559 			/* Set count correctly so we free the proper amount. */
560 			entry->buf_count = count;
561 			drm_cleanup_buf_error(dev, entry);
562 			return (ENOMEM);
563 		}
564 
565 		offset += alignment;
566 		entry->buf_count++;
567 		byte_count += PAGE_SIZE << page_order;
568 	}
569 
570 	temp_buflist = drm_realloc(dma->buflist,
571 	    dma->buf_count * sizeof (*dma->buflist),
572 	    (dma->buf_count + entry->buf_count)
573 	    * sizeof (*dma->buflist), DRM_MEM_BUFS);
574 	if (!temp_buflist) {
575 		drm_cleanup_buf_error(dev, entry);
576 		return (ENOMEM);
577 	}
578 	dma->buflist = temp_buflist;
579 
580 	for (i = 0; i < entry->buf_count; i++) {
581 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
582 	}
583 
584 	dma->buf_count += entry->buf_count;
585 	dma->byte_count += byte_count;
586 	request->count = entry->buf_count;
587 	request->size = size;
588 	dma->flags = _DRM_DMA_USE_SG;
589 
590 	return (0);
591 }
592 
593 int
594 drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
595 {
596 	int order, ret;
597 
598 	DRM_SPINLOCK(&dev->dma_lock);
599 
600 	if (request->count < 0 || request->count > 4096) {
601 		DRM_SPINLOCK(&dev->dma_lock);
602 		return (EINVAL);
603 	}
604 
605 	order = drm_order(request->size);
606 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
607 		DRM_SPINLOCK(&dev->dma_lock);
608 		return (EINVAL);
609 	}
610 
611 	/* No more allocations after first buffer-using ioctl. */
612 	if (dev->buf_use != 0) {
613 		DRM_SPINUNLOCK(&dev->dma_lock);
614 		return (EBUSY);
615 	}
616 	/* No more than one allocation per order */
617 	if (dev->dma->bufs[order].buf_count != 0) {
618 		DRM_SPINUNLOCK(&dev->dma_lock);
619 		return (ENOMEM);
620 	}
621 
622 	ret = drm_do_addbufs_agp(dev, request);
623 
624 	DRM_SPINUNLOCK(&dev->dma_lock);
625 
626 	return (ret);
627 }
628 
629 int
630 drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
631 {
632 	int order, ret;
633 
634 	DRM_SPINLOCK(&dev->dma_lock);
635 
636 	if (request->count < 0 || request->count > 4096) {
637 		DRM_SPINUNLOCK(&dev->dma_lock);
638 		return (EINVAL);
639 	}
640 
641 	order = drm_order(request->size);
642 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
643 		DRM_SPINUNLOCK(&dev->dma_lock);
644 		return (EINVAL);
645 	}
646 
647 	/* No more allocations after first buffer-using ioctl. */
648 	if (dev->buf_use != 0) {
649 		DRM_SPINUNLOCK(&dev->dma_lock);
650 		return (EBUSY);
651 	}
652 
653 	/* No more than one allocation per order */
654 	if (dev->dma->bufs[order].buf_count != 0) {
655 		DRM_SPINUNLOCK(&dev->dma_lock);
656 		return (ENOMEM);
657 	}
658 
659 	ret = drm_do_addbufs_sg(dev, request);
660 	DRM_SPINUNLOCK(&dev->dma_lock);
661 	return (ret);
662 }
663 
664 /*ARGSUSED*/
665 int
666 drm_addbufs_ioctl(DRM_IOCTL_ARGS)
667 {
668 	DRM_DEVICE;
669 	drm_buf_desc_t request;
670 	int err;
671 
672 #ifdef	_MULTI_DATAMODEL
673 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
674 		drm_buf_desc_32_t request32;
675 		DRM_COPYFROM_WITH_RETURN(&request32,
676 		    (void *)data, sizeof (request32));
677 		request.count = request32.count;
678 		request.size = request32.size;
679 		request.low_mark = request32.low_mark;
680 		request.high_mark = request32.high_mark;
681 		request.flags = request32.flags;
682 		request.agp_start = request32.agp_start;
683 	} else
684 #endif
685 		DRM_COPYFROM_WITH_RETURN(&request,
686 		    (void *)data, sizeof (request));
687 
688 	if (request.flags & _DRM_AGP_BUFFER)
689 		err = drm_addbufs_agp(dev, &request);
690 	else if (request.flags & _DRM_SG_BUFFER)
691 		err = drm_addbufs_sg(dev, &request);
692 
693 #ifdef	_MULTI_DATAMODEL
694 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
695 		drm_buf_desc_32_t request32;
696 		request32.count = request.count;
697 		request32.size = request.size;
698 		request32.low_mark = request.low_mark;
699 		request32.high_mark = request.high_mark;
700 		request32.flags = request.flags;
701 		request32.agp_start = (uint32_t)request.agp_start;
702 		DRM_COPYTO_WITH_RETURN((void *)data,
703 		    &request32, sizeof (request32));
704 	} else
705 #endif
706 		DRM_COPYTO_WITH_RETURN((void *)data,
707 		    &request, sizeof (request));
708 
709 	return (err);
710 }
711 
712 /*ARGSUSED*/
713 int
714 drm_freebufs(DRM_IOCTL_ARGS)
715 {
716 	DRM_DEVICE;
717 	drm_device_dma_t *dma = dev->dma;
718 	drm_buf_free_t request;
719 	int i;
720 	int idx;
721 	drm_buf_t *buf;
722 	int retcode = 0;
723 
724 #ifdef	_MULTI_DATAMODEL
725 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
726 		drm_buf_free_32_t request32;
727 		DRM_COPYFROM_WITH_RETURN(&request32,
728 		    (void*)data, sizeof (request32));
729 		request.count = request32.count;
730 		request.list = (int *)(uintptr_t)request32.list;
731 	} else
732 #endif
733 		DRM_COPYFROM_WITH_RETURN(&request,
734 		    (void *)data, sizeof (request));
735 
736 	for (i = 0; i < request.count; i++) {
737 		if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof (idx))) {
738 			retcode = EFAULT;
739 			break;
740 		}
741 		if (idx < 0 || idx >= dma->buf_count) {
742 			DRM_ERROR("drm_freebufs: Index %d (of %d max)\n",
743 			    idx, dma->buf_count - 1);
744 			retcode = EINVAL;
745 			break;
746 		}
747 		buf = dma->buflist[idx];
748 		if (buf->filp != fpriv) {
749 			DRM_ERROR(
750 			    "drm_freebufs: process %d not owning the buffer.\n",
751 			    DRM_CURRENTPID);
752 			retcode = EINVAL;
753 			break;
754 		}
755 		drm_free_buffer(dev, buf);
756 	}
757 
758 	return (retcode);
759 }
760 
761 #ifdef _LP64
762 extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
763 #define	drm_smmap	smmap64
764 #else
765 #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
766 extern caddr_t smmap32(caddr32_t, size32_t, int, int, int, off32_t);
767 #define	drm_smmap smmap32
768 #else
769 #error "No define for _LP64, _SYSCALL32_IMPL or _ILP32"
770 #endif
771 #endif
772 
773 
774 /*ARGSUSED*/
775 int
776 drm_mapbufs(DRM_IOCTL_ARGS)
777 {
778 	DRM_DEVICE;
779 	drm_buf_map_t request;
780 	const int zero = 0;
781 	unsigned long	vaddr;
782 	unsigned long address;
783 	drm_device_dma_t *dma = dev->dma;
784 	uint_t	size;
785 	uint_t	foff;
786 	int		ret_tmp;
787 	int 	i;
788 
789 #ifdef	_MULTI_DATAMODEL
790 	drm_buf_map_32_t request32;
791 	drm_buf_pub_32_t	*list32;
792 	uint_t		address32;
793 
794 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
795 		DRM_COPYFROM_WITH_RETURN(&request32,
796 		    (void *)data, sizeof (request32));
797 		request.count = request32.count;
798 		request.virtual = (void *)(uintptr_t)request32.virtual;
799 		request.list = (drm_buf_pub_t *)(uintptr_t)request32.list;
800 		request.fd = request32.fd;
801 	} else
802 #endif
803 		DRM_COPYFROM_WITH_RETURN(&request,
804 		    (void *)data, sizeof (request));
805 
806 	dev->buf_use++;
807 
808 	if (request.count < dma->buf_count)
809 		goto done;
810 
811 	if ((dev->driver->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
812 	    (dev->driver->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
813 		drm_local_map_t *map = dev->agp_buffer_map;
814 		if (map == NULL)
815 			return (EINVAL);
816 		size = round_page(map->size);
817 		foff = (uintptr_t)map->handle;
818 	} else {
819 		size = round_page(dma->byte_count);
820 		foff = 0;
821 	}
822 	request.virtual = drm_smmap(NULL, size, PROT_READ | PROT_WRITE,
823 	    MAP_SHARED, request.fd, foff);
824 	if (request.virtual == NULL) {
825 		DRM_ERROR("drm_mapbufs: request.virtual is NULL");
826 		return (EINVAL);
827 	}
828 
829 	vaddr = (unsigned long) request.virtual;
830 #ifdef	_MULTI_DATAMODEL
831 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
832 		list32 = (drm_buf_pub_32_t *)(uintptr_t)request32.list;
833 		for (i = 0; i < dma->buf_count; i++) {
834 			if (DRM_COPY_TO_USER(&list32[i].idx,
835 			    &dma->buflist[i]->idx, sizeof (list32[0].idx))) {
836 				return (EFAULT);
837 			}
838 			if (DRM_COPY_TO_USER(&list32[i].total,
839 			    &dma->buflist[i]->total,
840 			    sizeof (list32[0].total))) {
841 				return (EFAULT);
842 			}
843 			if (DRM_COPY_TO_USER(&list32[i].used,
844 			    &zero, sizeof (zero))) {
845 				return (EFAULT);
846 			}
847 			address32 = vaddr + dma->buflist[i]->offset; /* *** */
848 			ret_tmp = DRM_COPY_TO_USER(&list32[i].address,
849 			    &address32, sizeof (list32[0].address));
850 			if (ret_tmp)
851 				return (EFAULT);
852 		}
853 		goto done;
854 	}
855 #endif
856 
857 	ASSERT(ddi_model_convert_from(mode & FMODELS) != DDI_MODEL_ILP32);
858 	for (i = 0; i < dma->buf_count; i++) {
859 		if (DRM_COPY_TO_USER(&request.list[i].idx,
860 		    &dma->buflist[i]->idx, sizeof (request.list[0].idx))) {
861 			return (EFAULT);
862 		}
863 		if (DRM_COPY_TO_USER(&request.list[i].total,
864 		    &dma->buflist[i]->total, sizeof (request.list[0].total))) {
865 			return (EFAULT);
866 		}
867 		if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
868 		    sizeof (zero))) {
869 			return (EFAULT);
870 		}
871 		address = vaddr + dma->buflist[i]->offset; /* *** */
872 
873 		ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
874 		    &address, sizeof (address));
875 		if (ret_tmp) {
876 			return (EFAULT);
877 		}
878 	}
879 
880 done:
881 #ifdef	_MULTI_DATAMODEL
882 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
883 		request32.count = dma->buf_count;
884 		request32.virtual = (caddr32_t)(uintptr_t)request.virtual;
885 		DRM_COPYTO_WITH_RETURN((void *)data,
886 		    &request32, sizeof (request32));
887 	} else {
888 #endif
889 		request.count = dma->buf_count;
890 		DRM_COPYTO_WITH_RETURN((void *)data,
891 		    &request, sizeof (request));
892 #ifdef	_MULTI_DATAMODEL
893 	}
894 #endif
895 	return (0);
896 }
897