xref: /titanic_50/usr/src/uts/common/io/drm/drm_bufs.c (revision 275c9da86e89f8abf71135cf63d9fc23671b2e60)
1 /*
2  * drm_bufs.h -- Generic buffer template -*- linux-c -*-
3  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4  */
5 /*
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Gareth Hughes <gareth@valinux.com>
32  *
33  */
34 
35 /*
36  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #pragma ident	"%Z%%M%	%I%	%E% SMI"
41 
42 #include "drmP.h"
43 #include <gfx_private.h>
44 #include "drm_io32.h"
45 
46 
47 #define	PAGE_MASK	(PAGE_SIZE-1)
48 #define	round_page(x)	(((x) + PAGE_MASK) & ~PAGE_MASK)
49 
50 /*
51  * Compute order.  Can be made faster.
52  */
53 int
54 drm_order(unsigned long size)
55 {
56 	int order = 0;
57 	unsigned long tmp = size;
58 
59 	while (tmp >>= 1)
60 		order ++;
61 
62 	if (size & ~(1 << order))
63 		++order;
64 
65 	return (order);
66 }
67 
68 static inline drm_local_map_t *
69 drm_find_map(drm_device_t *dev, u_offset_t offset, int type)
70 {
71 	drm_local_map_t		*map;
72 
73 	TAILQ_FOREACH(map, &dev->maplist, link) {
74 		if ((map->type == type) && ((map->offset == offset) ||
75 		    (map->flags == _DRM_CONTAINS_LOCK) &&
76 		    (map->type == _DRM_SHM)))
77 			return (map);
78 	}
79 
80 	return (NULL);
81 }
82 
83 int drm_addmap(drm_device_t *dev, unsigned long offset,
84     unsigned long size, drm_map_type_t type,
85     drm_map_flags_t flags, drm_local_map_t **map_ptr)
86 {
87 	drm_local_map_t *map;
88 	caddr_t		kva;
89 	int		retval;
90 
91 	if (!(dev->flags & (FREAD|FWRITE)))
92 		return (EACCES); /* Require read/write */
93 
94 	/*
95 	 * Only allow shared memory to be removable since we only keep
96 	 * enough book keeping information about shared memory to allow
97 	 * for removal when processes fork.
98 	 */
99 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM)
100 		return (EINVAL);
101 	if ((offset & PAGE_MASK) || (size & PAGE_MASK))
102 		return (EINVAL);
103 	if (offset + size < offset)
104 		return (EINVAL);
105 
106 	/*
107 	 * Check if this is just another version of a kernel-allocated
108 	 * map, and just hand that back if so.
109 	 */
110 	map = drm_find_map(dev, offset, type);
111 	if (map != NULL) {
112 		goto done;
113 	}
114 
115 	/*
116 	 * Allocate a new map structure, fill it in, and do any
117 	 * type-specific initialization necessary.
118 	 */
119 	map = drm_alloc(sizeof (*map), DRM_MEM_MAPS);
120 	if (!map)
121 		return (ENOMEM);
122 
123 	map->offset = offset;
124 	map->size = size;
125 	map->type = type;
126 	map->flags = flags;
127 
128 	switch (map->type) {
129 	case _DRM_REGISTERS:
130 	case _DRM_FRAME_BUFFER:
131 		retval = drm_ioremap(dev, map);
132 		if (retval)
133 			return (retval);
134 		break;
135 
136 	case _DRM_SHM:
137 		/*
138 		 * ddi_umem_alloc() grants page-aligned memory. We needn't
139 		 * handle alignment issue here.
140 		 */
141 		map->handle = ddi_umem_alloc(map->size,
142 		    DDI_UMEM_NOSLEEP, &map->drm_umem_cookie);
143 		if (!map->handle) {
144 			DRM_ERROR("drm_addmap: ddi_umem_alloc failed");
145 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
146 			return (ENOMEM);
147 		}
148 		/*
149 		 * record only low 32-bit of this handle, since 32-bit
150 		 * user app is incapable of passing in 64bit offset when
151 		 * doing mmap.
152 		 */
153 		map->offset = (uintptr_t)map->handle;
154 		map->offset &= 0xffffffffUL;
155 		if (map->flags & _DRM_CONTAINS_LOCK) {
156 			/* Prevent a 2nd X Server from creating a 2nd lock */
157 			if (dev->lock.hw_lock != NULL) {
158 				ddi_umem_free(map->drm_umem_cookie);
159 				drm_free(map, sizeof (*map), DRM_MEM_MAPS);
160 				return (EBUSY);
161 			}
162 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
163 		}
164 		map->dev_addr = map->handle;
165 		break;
166 	case _DRM_SCATTER_GATHER:
167 		if (!dev->sg) {
168 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
169 			return (EINVAL);
170 		}
171 		map->offset += (uintptr_t)dev->sg->virtual;
172 		map->handle = (void *)(uintptr_t)map->offset;
173 		map->dev_addr = dev->sg->virtual;
174 		map->dev_handle = dev->sg->dmah_sg->acc_hdl;
175 		break;
176 
177 	case _DRM_CONSISTENT:
178 		cmn_err(CE_WARN, "%d DRM_AGP_CONSISTENT", __LINE__);
179 		return (ENOTSUP);
180 	case _DRM_AGP:
181 		map->offset += dev->agp->base;
182 		kva = gfxp_map_kernel_space(map->offset, map->size,
183 		    GFXP_MEMORY_WRITECOMBINED);
184 		if (kva == 0) {
185 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
186 			cmn_err(CE_WARN,
187 			    "drm_addmap: failed to map AGP aperture");
188 			return (ENOMEM);
189 		}
190 		map->handle = (void *)(uintptr_t)kva;
191 		map->dev_addr = kva;
192 		break;
193 	default:
194 		drm_free(map, sizeof (*map), DRM_MEM_MAPS);
195 		return (EINVAL);
196 	}
197 
198 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
199 
200 done:
201 	/* Jumped to, with lock held, when a kernel map is found. */
202 	*map_ptr = map;
203 
204 	return (0);
205 }
206 
207 /*ARGSUSED*/
208 int
209 drm_addmap_ioctl(DRM_IOCTL_ARGS)
210 {
211 	drm_map_t request;
212 	drm_local_map_t *map;
213 	int err;
214 	DRM_DEVICE;
215 
216 	if (!(dev->flags & (FREAD|FWRITE)))
217 		return (EACCES); /* Require read/write */
218 
219 #ifdef	_MULTI_DATAMODEL
220 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
221 		drm_map_32_t request32;
222 		DRM_COPYFROM_WITH_RETURN(&request32,
223 		    (void *)data, sizeof (request32));
224 		request.offset = request32.offset;
225 		request.size = request32.size;
226 		request.type = request32.type;
227 		request.flags = request32.flags;
228 		request.mtrr = request32.mtrr;
229 	} else
230 #endif
231 		DRM_COPYFROM_WITH_RETURN(&request,
232 		    (void *)data, sizeof (request));
233 
234 	err = drm_addmap(dev, request.offset, request.size, request.type,
235 	    request.flags, &map);
236 
237 	if (err != 0)
238 		return (err);
239 
240 	request.offset = map->offset;
241 	request.size = map->size;
242 	request.type = map->type;
243 	request.flags = map->flags;
244 	request.mtrr   = map->mtrr;
245 	request.handle = (uintptr_t)map->handle;
246 
247 #ifdef	_MULTI_DATAMODEL
248 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
249 		drm_map_32_t request32;
250 		request32.offset = request.offset;
251 		request32.size = (uint32_t)request.size;
252 		request32.type = request.type;
253 		request32.flags = request.flags;
254 		request32.handle = request.handle;
255 		request32.mtrr = request.mtrr;
256 		DRM_COPYTO_WITH_RETURN((void *)data,
257 		    &request32, sizeof (request32));
258 	} else
259 #endif
260 		DRM_COPYTO_WITH_RETURN((void *)data,
261 		    &request, sizeof (request));
262 
263 	return (0);
264 }
265 
266 void
267 drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
268 {
269 	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
270 
271 	TAILQ_REMOVE(&dev->maplist, map, link);
272 
273 	switch (map->type) {
274 	case _DRM_REGISTERS:
275 		drm_ioremapfree(map);
276 		break;
277 		/* FALLTHROUGH */
278 	case _DRM_FRAME_BUFFER:
279 		drm_ioremapfree(map);
280 		break;
281 	case _DRM_SHM:
282 		ddi_umem_free(map->drm_umem_cookie);
283 		break;
284 	case _DRM_AGP:
285 		/*
286 		 * we mapped AGP aperture into kernel space in drm_addmap,
287 		 * here, unmap them and release kernel virtual address space
288 		 */
289 		gfxp_unmap_kernel_space(map->dev_addr, map->size);
290 		break;
291 
292 	case _DRM_SCATTER_GATHER:
293 		break;
294 	case _DRM_CONSISTENT:
295 		break;
296 	default:
297 		break;
298 	}
299 
300 	drm_free(map, sizeof (*map), DRM_MEM_MAPS);
301 }
302 
303 /*
304  * Remove a map private from list and deallocate resources if the
305  * mapping isn't in use.
306  */
307 /*ARGSUSED*/
308 int
309 drm_rmmap_ioctl(DRM_IOCTL_ARGS)
310 {
311 	DRM_DEVICE;
312 	drm_local_map_t *map;
313 	drm_map_t request;
314 
315 #ifdef	_MULTI_DATAMODEL
316 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
317 		drm_map_32_t request32;
318 		DRM_COPYFROM_WITH_RETURN(&request32,
319 		    (void *)data, sizeof (drm_map_32_t));
320 		request.offset = request32.offset;
321 		request.size = request32.size;
322 		request.type = request32.type;
323 		request.flags = request32.flags;
324 		request.handle = request32.handle;
325 		request.mtrr = request32.mtrr;
326 	} else
327 #endif
328 		DRM_COPYFROM_WITH_RETURN(&request,
329 		    (void *)data, sizeof (request));
330 
331 	DRM_LOCK();
332 	TAILQ_FOREACH(map, &dev->maplist, link) {
333 	if (((uintptr_t)map->handle == (request.handle & 0xffffffff)) &&
334 	    (map->flags & _DRM_REMOVABLE))
335 			break;
336 	}
337 
338 	/* No match found. */
339 	if (map == NULL) {
340 		DRM_UNLOCK();
341 		return (EINVAL);
342 	}
343 
344 	drm_rmmap(dev, map);
345 	DRM_UNLOCK();
346 
347 	return (0);
348 }
349 
350 /*ARGSUSED*/
351 static void
352 drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
353 {
354 	int i;
355 
356 	if (entry->seg_count) {
357 		for (i = 0; i < entry->seg_count; i++) {
358 			if (entry->seglist[i]) {
359 				DRM_ERROR(
360 				    "drm_cleanup_buf_error: not implemented");
361 			}
362 		}
363 		drm_free(entry->seglist,
364 		    entry->seg_count *
365 		    sizeof (*entry->seglist), DRM_MEM_SEGS);
366 		entry->seg_count = 0;
367 	}
368 
369 	if (entry->buf_count) {
370 		for (i = 0; i < entry->buf_count; i++) {
371 			if (entry->buflist[i].dev_private) {
372 				drm_free(entry->buflist[i].dev_private,
373 				    entry->buflist[i].dev_priv_size,
374 				    DRM_MEM_BUFS);
375 			}
376 		}
377 		drm_free(entry->buflist,
378 		    entry->buf_count *
379 		    sizeof (*entry->buflist), DRM_MEM_BUFS);
380 		entry->buflist = NULL;
381 		entry->buf_count = 0;
382 	}
383 }
384 
385 /*ARGSUSED*/
386 int
387 drm_markbufs(DRM_IOCTL_ARGS)
388 {
389 	DRM_DEBUG("drm_markbufs");
390 	return (EINVAL);
391 }
392 
393 /*ARGSUSED*/
394 int
395 drm_infobufs(DRM_IOCTL_ARGS)
396 {
397 	DRM_DEBUG("drm_infobufs");
398 	return (EINVAL);
399 }
400 
401 static int
402 drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
403 {
404 	drm_device_dma_t *dma = dev->dma;
405 	drm_buf_entry_t *entry;
406 	drm_buf_t **temp_buflist;
407 	drm_buf_t *buf;
408 	unsigned long offset;
409 	unsigned long agp_offset;
410 	int count;
411 	int order;
412 	int size;
413 	int alignment;
414 	int page_order;
415 	int byte_count;
416 	int i;
417 
418 	if (!dma)
419 		return (EINVAL);
420 
421 	count = request->count;
422 	order = drm_order(request->size);
423 	size = 1 << order;
424 
425 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
426 	    ? round_page(size) : size;
427 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
428 
429 	byte_count = 0;
430 	agp_offset = dev->agp->base + request->agp_start;
431 
432 	entry = &dma->bufs[order];
433 
434 	/* No more than one allocation per order */
435 	if (entry->buf_count) {
436 		return (ENOMEM);
437 	}
438 
439 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
440 	    DRM_MEM_BUFS);
441 	if (!entry->buflist) {
442 		return (ENOMEM);
443 	}
444 	entry->buf_size = size;
445 	entry->page_order = page_order;
446 
447 	offset = 0;
448 
449 	while (entry->buf_count < count) {
450 		buf		= &entry->buflist[entry->buf_count];
451 		buf->idx	= dma->buf_count + entry->buf_count;
452 		buf->total	= alignment;
453 		buf->order	= order;
454 		buf->used	= 0;
455 
456 		buf->offset	= (dma->byte_count + offset);
457 		buf->bus_address = agp_offset + offset;
458 		buf->address	= (void *)(agp_offset + offset);
459 		buf->next	= NULL;
460 		buf->pending	= 0;
461 		buf->filp	= NULL;
462 
463 		buf->dev_priv_size = dev->driver->buf_priv_size;
464 		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
465 		if (buf->dev_private == NULL) {
466 			/* Set count correctly so we free the proper amount. */
467 			entry->buf_count = count;
468 			drm_cleanup_buf_error(dev, entry);
469 			return (ENOMEM);
470 		}
471 
472 		offset += alignment;
473 		entry->buf_count++;
474 		byte_count += PAGE_SIZE << page_order;
475 	}
476 
477 	temp_buflist = drm_alloc(
478 	    (dma->buf_count + entry->buf_count) * sizeof (*dma->buflist),
479 	    DRM_MEM_BUFS);
480 
481 	if (temp_buflist == NULL) {
482 		/* Free the entry because it isn't valid */
483 		drm_cleanup_buf_error(dev, entry);
484 		DRM_ERROR(" temp_buflist is NULL");
485 		return (ENOMEM);
486 	}
487 
488 	bcopy(temp_buflist, dma->buflist,
489 	    dma->buf_count * sizeof (*dma->buflist));
490 	kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
491 	dma->buflist = temp_buflist;
492 
493 	for (i = 0; i < entry->buf_count; i++) {
494 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
495 	}
496 
497 	dma->buf_count += entry->buf_count;
498 	dma->byte_count += byte_count;
499 	dma->seg_count += entry->seg_count;
500 	dma->page_count += byte_count >> PAGE_SHIFT;
501 
502 	request->count = entry->buf_count;
503 	request->size = size;
504 
505 	dma->flags = _DRM_DMA_USE_AGP;
506 
507 	return (0);
508 }
509 
510 static int
511 drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
512 {
513 	drm_device_dma_t *dma = dev->dma;
514 	drm_buf_entry_t *entry;
515 	drm_buf_t *buf;
516 	unsigned long offset;
517 	unsigned long agp_offset;
518 	int count;
519 	int order;
520 	int size;
521 	int alignment;
522 	int page_order;
523 	int byte_count;
524 	int i;
525 	drm_buf_t **temp_buflist;
526 
527 	count = request->count;
528 	order = drm_order(request->size);
529 	size = 1 << order;
530 
531 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
532 	    ? round_page(size) : size;
533 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
534 
535 	byte_count = 0;
536 	agp_offset = request->agp_start;
537 	entry = &dma->bufs[order];
538 
539 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
540 	    DRM_MEM_BUFS);
541 	if (entry->buflist == NULL)
542 		return (ENOMEM);
543 
544 	entry->buf_size = size;
545 	entry->page_order = page_order;
546 
547 	offset = 0;
548 
549 	while (entry->buf_count < count) {
550 		buf		= &entry->buflist[entry->buf_count];
551 		buf->idx	= dma->buf_count + entry->buf_count;
552 		buf->total	= alignment;
553 		buf->order	= order;
554 		buf->used	= 0;
555 
556 		buf->offset	= (dma->byte_count + offset);
557 		buf->bus_address = agp_offset + offset;
558 		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
559 		buf->next	= NULL;
560 		buf->pending	= 0;
561 		buf->filp	= NULL;
562 
563 		buf->dev_priv_size = dev->driver->buf_priv_size;
564 		buf->dev_private = drm_alloc(buf->dev_priv_size,
565 		    DRM_MEM_BUFS);
566 		if (buf->dev_private == NULL) {
567 			/* Set count correctly so we free the proper amount. */
568 			entry->buf_count = count;
569 			drm_cleanup_buf_error(dev, entry);
570 			return (ENOMEM);
571 		}
572 
573 		offset += alignment;
574 		entry->buf_count++;
575 		byte_count += PAGE_SIZE << page_order;
576 	}
577 
578 	temp_buflist = drm_realloc(dma->buflist,
579 	    dma->buf_count * sizeof (*dma->buflist),
580 	    (dma->buf_count + entry->buf_count)
581 	    * sizeof (*dma->buflist), DRM_MEM_BUFS);
582 	if (!temp_buflist) {
583 		drm_cleanup_buf_error(dev, entry);
584 		return (ENOMEM);
585 	}
586 	dma->buflist = temp_buflist;
587 
588 	for (i = 0; i < entry->buf_count; i++) {
589 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
590 	}
591 
592 	dma->buf_count += entry->buf_count;
593 	dma->byte_count += byte_count;
594 	request->count = entry->buf_count;
595 	request->size = size;
596 	dma->flags = _DRM_DMA_USE_SG;
597 
598 	return (0);
599 }
600 
601 int
602 drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
603 {
604 	int order, ret;
605 
606 	DRM_SPINLOCK(&dev->dma_lock);
607 
608 	if (request->count < 0 || request->count > 4096) {
609 		DRM_SPINLOCK(&dev->dma_lock);
610 		return (EINVAL);
611 	}
612 
613 	order = drm_order(request->size);
614 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
615 		DRM_SPINLOCK(&dev->dma_lock);
616 		return (EINVAL);
617 	}
618 
619 	/* No more allocations after first buffer-using ioctl. */
620 	if (dev->buf_use != 0) {
621 		DRM_SPINUNLOCK(&dev->dma_lock);
622 		return (EBUSY);
623 	}
624 	/* No more than one allocation per order */
625 	if (dev->dma->bufs[order].buf_count != 0) {
626 		DRM_SPINUNLOCK(&dev->dma_lock);
627 		return (ENOMEM);
628 	}
629 
630 	ret = drm_do_addbufs_agp(dev, request);
631 
632 	DRM_SPINUNLOCK(&dev->dma_lock);
633 
634 	return (ret);
635 }
636 
637 int
638 drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
639 {
640 	int order, ret;
641 
642 	DRM_SPINLOCK(&dev->dma_lock);
643 
644 	if (request->count < 0 || request->count > 4096) {
645 		DRM_SPINUNLOCK(&dev->dma_lock);
646 		return (EINVAL);
647 	}
648 
649 	order = drm_order(request->size);
650 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
651 		DRM_SPINUNLOCK(&dev->dma_lock);
652 		return (EINVAL);
653 	}
654 
655 	/* No more allocations after first buffer-using ioctl. */
656 	if (dev->buf_use != 0) {
657 		DRM_SPINUNLOCK(&dev->dma_lock);
658 		return (EBUSY);
659 	}
660 
661 	/* No more than one allocation per order */
662 	if (dev->dma->bufs[order].buf_count != 0) {
663 		DRM_SPINUNLOCK(&dev->dma_lock);
664 		return (ENOMEM);
665 	}
666 
667 	ret = drm_do_addbufs_sg(dev, request);
668 	DRM_SPINUNLOCK(&dev->dma_lock);
669 	return (ret);
670 }
671 
672 /*ARGSUSED*/
673 int
674 drm_addbufs_ioctl(DRM_IOCTL_ARGS)
675 {
676 	DRM_DEVICE;
677 	drm_buf_desc_t request;
678 	int err;
679 
680 #ifdef	_MULTI_DATAMODEL
681 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
682 		drm_buf_desc_32_t request32;
683 		DRM_COPYFROM_WITH_RETURN(&request32,
684 		    (void *)data, sizeof (request32));
685 		request.count = request32.count;
686 		request.size = request32.size;
687 		request.low_mark = request32.low_mark;
688 		request.high_mark = request32.high_mark;
689 		request.flags = request32.flags;
690 		request.agp_start = request32.agp_start;
691 	} else
692 #endif
693 		DRM_COPYFROM_WITH_RETURN(&request,
694 		    (void *)data, sizeof (request));
695 
696 	if (request.flags & _DRM_AGP_BUFFER)
697 		err = drm_addbufs_agp(dev, &request);
698 	else if (request.flags & _DRM_SG_BUFFER)
699 		err = drm_addbufs_sg(dev, &request);
700 
701 #ifdef	_MULTI_DATAMODEL
702 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
703 		drm_buf_desc_32_t request32;
704 		request32.count = request.count;
705 		request32.size = request.size;
706 		request32.low_mark = request.low_mark;
707 		request32.high_mark = request.high_mark;
708 		request32.flags = request.flags;
709 		request32.agp_start = (uint32_t)request.agp_start;
710 		DRM_COPYTO_WITH_RETURN((void *)data,
711 		    &request32, sizeof (request32));
712 	} else
713 #endif
714 		DRM_COPYTO_WITH_RETURN((void *)data,
715 		    &request, sizeof (request));
716 
717 	return (err);
718 }
719 
720 /*ARGSUSED*/
721 int
722 drm_freebufs(DRM_IOCTL_ARGS)
723 {
724 	DRM_DEVICE;
725 	drm_device_dma_t *dma = dev->dma;
726 	drm_buf_free_t request;
727 	int i;
728 	int idx;
729 	drm_buf_t *buf;
730 	int retcode = 0;
731 
732 #ifdef	_MULTI_DATAMODEL
733 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
734 		drm_buf_free_32_t request32;
735 		DRM_COPYFROM_WITH_RETURN(&request32,
736 		    (void*)data, sizeof (request32));
737 		request.count = request32.count;
738 		request.list = (int *)(uintptr_t)request32.list;
739 	} else
740 #endif
741 		DRM_COPYFROM_WITH_RETURN(&request,
742 		    (void *)data, sizeof (request));
743 
744 	for (i = 0; i < request.count; i++) {
745 		if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof (idx))) {
746 			retcode = EFAULT;
747 			break;
748 		}
749 		if (idx < 0 || idx >= dma->buf_count) {
750 			DRM_ERROR("drm_freebufs: Index %d (of %d max)\n",
751 			    idx, dma->buf_count - 1);
752 			retcode = EINVAL;
753 			break;
754 		}
755 		buf = dma->buflist[idx];
756 		if (buf->filp != fpriv) {
757 			DRM_ERROR(
758 			    "drm_freebufs: process %d not owning the buffer.\n",
759 			    DRM_CURRENTPID);
760 			retcode = EINVAL;
761 			break;
762 		}
763 		drm_free_buffer(dev, buf);
764 	}
765 
766 	return (retcode);
767 }
768 
769 #ifdef _LP64
770 extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
771 #define	drm_smmap	smmap64
772 #else
773 #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
774 extern caddr_t smmap32(caddr32_t, size32_t, int, int, int, off32_t);
775 #define	drm_smmap smmap32
776 #else
777 #error "No define for _LP64, _SYSCALL32_IMPL or _ILP32"
778 #endif
779 #endif
780 
781 
782 /*ARGSUSED*/
783 int
784 drm_mapbufs(DRM_IOCTL_ARGS)
785 {
786 	DRM_DEVICE;
787 	drm_buf_map_t request;
788 	const int zero = 0;
789 	unsigned long	vaddr;
790 	unsigned long address;
791 	drm_device_dma_t *dma = dev->dma;
792 	uint_t	size;
793 	uint_t	foff;
794 	int		ret_tmp;
795 	int 	i;
796 
797 #ifdef	_MULTI_DATAMODEL
798 	drm_buf_map_32_t request32;
799 	drm_buf_pub_32_t	*list32;
800 	uint_t		address32;
801 
802 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
803 		DRM_COPYFROM_WITH_RETURN(&request32,
804 		    (void *)data, sizeof (request32));
805 		request.count = request32.count;
806 		request.virtual = (void *)(uintptr_t)request32.virtual;
807 		request.list = (drm_buf_pub_t *)(uintptr_t)request32.list;
808 		request.fd = request32.fd;
809 	} else
810 #endif
811 		DRM_COPYFROM_WITH_RETURN(&request,
812 		    (void *)data, sizeof (request));
813 
814 	dev->buf_use++;
815 
816 	if (request.count < dma->buf_count)
817 		goto done;
818 
819 	if ((dev->driver->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
820 	    (dev->driver->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
821 		drm_local_map_t *map = dev->agp_buffer_map;
822 		if (map == NULL)
823 			return (EINVAL);
824 		size = round_page(map->size);
825 		foff = (uintptr_t)map->handle;
826 	} else {
827 		size = round_page(dma->byte_count);
828 		foff = 0;
829 	}
830 	request.virtual = drm_smmap(NULL, size, PROT_READ | PROT_WRITE,
831 	    MAP_SHARED, request.fd, foff);
832 	if (request.virtual == NULL) {
833 		DRM_ERROR("drm_mapbufs: request.virtual is NULL");
834 		return (EINVAL);
835 	}
836 
837 	vaddr = (unsigned long) request.virtual;
838 #ifdef	_MULTI_DATAMODEL
839 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
840 		list32 = (drm_buf_pub_32_t *)(uintptr_t)request32.list;
841 		for (i = 0; i < dma->buf_count; i++) {
842 			if (DRM_COPY_TO_USER(&list32[i].idx,
843 			    &dma->buflist[i]->idx, sizeof (list32[0].idx))) {
844 				return (EFAULT);
845 			}
846 			if (DRM_COPY_TO_USER(&list32[i].total,
847 			    &dma->buflist[i]->total,
848 			    sizeof (list32[0].total))) {
849 				return (EFAULT);
850 			}
851 			if (DRM_COPY_TO_USER(&list32[i].used,
852 			    &zero, sizeof (zero))) {
853 				return (EFAULT);
854 			}
855 			address32 = vaddr + dma->buflist[i]->offset; /* *** */
856 			ret_tmp = DRM_COPY_TO_USER(&list32[i].address,
857 			    &address32, sizeof (list32[0].address));
858 			if (ret_tmp)
859 				return (EFAULT);
860 		}
861 		goto done;
862 	}
863 #endif
864 
865 	ASSERT(ddi_model_convert_from(mode & FMODELS) != DDI_MODEL_ILP32);
866 	for (i = 0; i < dma->buf_count; i++) {
867 		if (DRM_COPY_TO_USER(&request.list[i].idx,
868 		    &dma->buflist[i]->idx, sizeof (request.list[0].idx))) {
869 			return (EFAULT);
870 		}
871 		if (DRM_COPY_TO_USER(&request.list[i].total,
872 		    &dma->buflist[i]->total, sizeof (request.list[0].total))) {
873 			return (EFAULT);
874 		}
875 		if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
876 		    sizeof (zero))) {
877 			return (EFAULT);
878 		}
879 		address = vaddr + dma->buflist[i]->offset; /* *** */
880 
881 		ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
882 		    &address, sizeof (address));
883 		if (ret_tmp) {
884 			return (EFAULT);
885 		}
886 	}
887 
888 done:
889 #ifdef	_MULTI_DATAMODEL
890 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
891 		request32.count = dma->buf_count;
892 		request32.virtual = (caddr32_t)(uintptr_t)request.virtual;
893 		DRM_COPYTO_WITH_RETURN((void *)data,
894 		    &request32, sizeof (request32));
895 	} else {
896 #endif
897 		request.count = dma->buf_count;
898 		DRM_COPYTO_WITH_RETURN((void *)data,
899 		    &request, sizeof (request));
900 #ifdef	_MULTI_DATAMODEL
901 	}
902 #endif
903 	return (0);
904 }
905