xref: /titanic_41/usr/src/uts/common/io/drm/drm_bufs.c (revision 9113a79cf228b8f7bd509b1328adf88659dfe218)
1 /*
2  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * drm_bufs.h -- Generic buffer template -*- linux-c -*-
8  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
9  */
10 /*
11  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
12  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13  * All Rights Reserved.
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a
16  * copy of this software and associated documentation files (the "Software"),
17  * to deal in the Software without restriction, including without limitation
18  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
19  * and/or sell copies of the Software, and to permit persons to whom the
20  * Software is furnished to do so, subject to the following conditions:
21  *
22  * The above copyright notice and this permission notice (including the next
23  * paragraph) shall be included in all copies or substantial portions of the
24  * Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
29  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
30  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
31  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
32  * OTHER DEALINGS IN THE SOFTWARE.
33  *
34  * Authors:
35  *    Rickard E. (Rik) Faith <faith@valinux.com>
36  *    Gareth Hughes <gareth@valinux.com>
37  *
38  */
39 
40 #pragma ident	"%Z%%M%	%I%	%E% SMI"
41 
42 #include "drmP.h"
43 
44 #define	PAGE_MASK	(PAGE_SIZE-1)
45 #define	round_page(x)	(((x) + PAGE_MASK) & ~PAGE_MASK)
46 
47 /*
48  * Compute order.  Can be made faster.
49  */
50 int
51 drm_order(unsigned long size)
52 {
53 	int order;
54 	unsigned long tmp;
55 
56 	for (order = 0, tmp = size; tmp >>= 1; ++order);
57 
58 	if (size & ~(1 << order))
59 		++order;
60 
61 	return (order);
62 }
63 
64 
65 int
66 drm_addmap(drm_device_t *dev, unsigned long long offset, unsigned long size,
67     drm_map_type_t type, drm_map_flags_t flags, drm_local_map_t **map_ptr)
68 {
69 	drm_local_map_t *map;
70 
71 	DRM_DEBUG("drm_addmap: offset = 0x%08llx, size = 0x%08lx, type = %d\n",
72 	    offset, size, type);
73 
74 	if (!(dev->flags & (FREAD|FWRITE)))
75 		return (DRM_ERR(EACCES)); /* Require read/write */
76 
77 	/*
78 	 * Only allow shared memory to be removable since we only keep enough
79 	 * book keeping information about shared memory to allow for removal
80 	 * when processes fork.
81 	 */
82 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM)
83 		return (DRM_ERR(EINVAL));
84 	if ((offset & PAGE_MASK) || (size & PAGE_MASK))
85 		return (DRM_ERR(EINVAL));
86 	if (offset + size < offset)
87 		return (DRM_ERR(EINVAL));
88 
89 	/*
90 	 * Check if this is just another version of a kernel-allocated map, and
91 	 * just hand that back if so.
92 	 */
93 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
94 	    type == _DRM_SHM) {
95 		DRM_LOCK();
96 		TAILQ_FOREACH(map, &dev->maplist, link) {
97 			if (map->type == type &&
98 			    map->offset.off == (u_offset_t)offset) {
99 				map->size = size;
100 				DRM_DEBUG("drm_addmap: Found kernel map %d\n",
101 						type);
102 				goto done;
103 			}
104 		}
105 		DRM_UNLOCK();
106 	}
107 
108 	/*
109 	 * Allocate a new map structure, fill it in, and do any type-specific
110 	 * initialization necessary.
111 	 */
112 	map = drm_alloc(sizeof (*map), DRM_MEM_MAPS);
113 	if (!map)
114 		return (DRM_ERR(ENOMEM));
115 
116 	map->offset.off = (u_offset_t)offset;
117 	map->size = size;
118 	map->type = type;
119 	map->flags = flags;
120 
121 	DRM_DEBUG("drm_addmap: map->type = %x", map->type);
122 	DRM_DEBUG("drm_addmap: map->size = %lx", map->size);
123 	DRM_DEBUG("drm_addmap: map->offset.off = %llx", map->offset.off);
124 	switch (map->type) {
125 	case _DRM_REGISTERS:
126 		DRM_DEBUG("drm_addmap: map the Registers");
127 		(void) drm_ioremap(dev, map);
128 		if (!(map->flags & _DRM_WRITE_COMBINING))
129 			break;
130 		/* FALLTHROUGH */
131 	case _DRM_FRAME_BUFFER:
132 		(void) drm_ioremap(dev, map);
133 		break;
134 	case _DRM_SHM:
135 		map->handle = ddi_umem_alloc(map->size, DDI_UMEM_NOSLEEP,
136 				&map->drm_umem_cookie);
137 		DRM_DEBUG("drm_addmap: size=0x%lx drm_order(size)=%d "
138 			"handle=0x%p\n",
139 			(unsigned long) map->size,
140 			drm_order(map->size), map->handle);
141 		if (!map->handle) {
142 			DRM_ERROR("drm_addmap: ddi_umem_alloc failed");
143 			ddi_umem_free(map->drm_umem_cookie);
144 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
145 			return (DRM_ERR(ENOMEM));
146 		}
147 		/*
148 		 * record only low 32-bit of this handle, since 32-bit user
149 		 * app is incapable of passing in 64bit offset when doing mmap.
150 		 */
151 		map->offset.ptr = map->handle;
152 		map->offset.off &= 0xffffffffUL;
153 		DRM_DEBUG("drm_addmap: offset=0x%llx", map->offset);
154 		if (map->flags & _DRM_CONTAINS_LOCK) {
155 			/* Prevent a 2nd X Server from creating a 2nd lock */
156 			DRM_LOCK();
157 			if (dev->lock.hw_lock != NULL) {
158 				DRM_UNLOCK();
159 				ddi_umem_free(map->drm_umem_cookie);
160 				drm_free(map, sizeof (*map), DRM_MEM_MAPS);
161 				return (DRM_ERR(EBUSY));
162 			}
163 			DRM_DEBUG("drm_addmap: map shm to hw_lock");
164 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
165 			DRM_UNLOCK();
166 		}
167 		break;
168 	case _DRM_SCATTER_GATHER:
169 		if (!dev->sg) {
170 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
171 			return (DRM_ERR(EINVAL));
172 		}
173 		map->offset.off = map->offset.off + dev->sg->handle;
174 		map->drm_umem_cookie = dev->sg->sg_umem_cookie;
175 		break;
176 	case _DRM_CONSISTENT:
177 		break;
178 	case _DRM_AGP:
179 		break;
180 	case _DRM_AGP_UMEM:
181 		map->offset.off += dev->agp->base;
182 		break;
183 	default:
184 		drm_free(map, sizeof (*map), DRM_MEM_MAPS);
185 		return (DRM_ERR(EINVAL));
186 	}
187 
188 	DRM_LOCK();
189 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
190 
191 done:
192 	/* Jumped to, with lock held, when a kernel map is found. */
193 	DRM_UNLOCK();
194 
195 	DRM_DEBUG("drm_addmap: Added map %d 0x%llx/0x%x\n",
196 		map->type, map->offset, map->size);
197 
198 	*map_ptr = map;
199 	TAILQ_FOREACH(map, &dev->maplist, link) {
200 		DRM_DEBUG("type=%x, offset=%llx, size=%x",
201 			map->type, map->offset, map->size);
202 	}
203 
204 	return (0);
205 }
206 
207 /*ARGSUSED*/
208 int
209 drm_addmap_ioctl(DRM_IOCTL_ARGS)
210 {
211 	drm_map_t request;
212 	drm_local_map_t *map;
213 	int err;
214 	DRM_DEVICE;
215 
216 	if (!(dev->flags & (FREAD|FWRITE)))
217 		return (DRM_ERR(EACCES)); /* Require read/write */
218 
219 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
220 		drm_map32_t request32;
221 		DRM_COPY_FROM_USER_IOCTL(request32,
222 			(drm_map32_t *)data,
223 			sizeof (drm_map32_t));
224 		request.offset = request32.offset;
225 		request.size = request32.size;
226 		request.type = request32.type;
227 		request.flags = request32.flags;
228 		request.handle = request32.handle;
229 		request.mtrr = request32.mtrr;
230 	} else
231 		DRM_COPY_FROM_USER_IOCTL(request, (drm_map_t *)data,
232 			sizeof (drm_map_t));
233 
234 	DRM_DEBUG("drm_addmap: request.offset=%llx, request.size=%lx,"
235 	    "request.type=%x", request.offset, request.size, request.type);
236 	err = drm_addmap(dev, request.offset, request.size, request.type,
237 	    request.flags, &map);
238 
239 	if (err != 0)
240 		return (err);
241 
242 	request.offset = map->offset.off;
243 	request.size = map->size;
244 	request.type = map->type;
245 	request.flags = map->flags;
246 	request.mtrr   = map->mtrr;
247 	request.handle = (unsigned long long)(uintptr_t)map->handle;
248 
249 	if (request.type != _DRM_SHM) {
250 		request.handle = request.offset;
251 	}
252 
253 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
254 		drm_map32_t request32;
255 		request32.offset = request.offset;
256 		request32.size = request.size;
257 		request32.type = request.type;
258 		request32.flags = request.flags;
259 		request32.handle = request.handle;
260 		request32.mtrr = request.mtrr;
261 		DRM_COPY_TO_USER_IOCTL((drm_map32_t *)data,
262 			request32,
263 			sizeof (drm_map32_t));
264 	} else
265 		DRM_COPY_TO_USER_IOCTL((drm_map_t *)data, request,
266 		    sizeof (drm_map_t));
267 
268 	return (0);
269 }
270 
271 void
272 drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
273 {
274 	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
275 
276 	TAILQ_REMOVE(&dev->maplist, map, link);
277 
278 	switch (map->type) {
279 	case _DRM_REGISTERS:
280 		drm_ioremapfree(map);
281 		break;
282 		/* FALLTHROUGH */
283 	case _DRM_FRAME_BUFFER:
284 		drm_ioremapfree(map);
285 		break;
286 	case _DRM_SHM:
287 		ddi_umem_free(map->drm_umem_cookie);
288 		break;
289 	case _DRM_AGP:
290 	case _DRM_SCATTER_GATHER:
291 		break;
292 	case _DRM_CONSISTENT:
293 		break;
294 	default:
295 		break;
296 	}
297 
298 	drm_free(map, sizeof (*map), DRM_MEM_MAPS);
299 }
300 
301 /*
302  *  Remove a map private from list and deallocate resources if the mapping
303  * isn't in use.
304  */
305 /*ARGSUSED*/
306 int
307 drm_rmmap_ioctl(DRM_IOCTL_ARGS)
308 {
309 	DRM_DEVICE;
310 	drm_local_map_t *map;
311 	drm_map_t request;
312 
313 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
314 		drm_map32_t request32;
315 		DRM_COPY_FROM_USER_IOCTL(request32,
316 			(drm_map32_t *)data,
317 			sizeof (drm_map32_t));
318 		request.offset = request32.offset;
319 		request.size = request32.size;
320 		request.type = request32.type;
321 		request.flags = request32.flags;
322 		request.handle = request32.handle;
323 		request.mtrr = request32.mtrr;
324 	} else
325 		DRM_COPY_FROM_USER_IOCTL(request, (drm_map_t *)data,
326 		    sizeof (request));
327 
328 	DRM_LOCK();
329 	TAILQ_FOREACH(map, &dev->maplist, link) {
330 		if (((unsigned long long)(uintptr_t)map->handle ==
331 			request.handle) &&
332 			(map->flags & _DRM_REMOVABLE))
333 			break;
334 	}
335 
336 	/* No match found. */
337 	if (map == NULL) {
338 		DRM_UNLOCK();
339 		return (DRM_ERR(EINVAL));
340 	}
341 
342 	drm_rmmap(dev, map);
343 
344 	DRM_UNLOCK();
345 
346 	return (0);
347 }
348 
349 /*ARGSUSED*/
350 static void
351 drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
352 {
353 	int i;
354 
355 	if (entry->seg_count) {
356 		for (i = 0; i < entry->seg_count; i++) {
357 			if (entry->seglist[i]) {
358 				DRM_ERROR(
359 				    "drm_cleanup_buf_error: not implemented");
360 			}
361 		}
362 		drm_free(entry->seglist,
363 		    entry->seg_count *
364 		    sizeof (*entry->seglist), DRM_MEM_SEGS);
365 		entry->seg_count = 0;
366 	}
367 
368 	if (entry->buf_count) {
369 		for (i = 0; i < entry->buf_count; i++) {
370 			if (entry->buflist[i].dev_private) {
371 				drm_free(entry->buflist[i].dev_private,
372 				    entry->buflist[i].dev_priv_size,
373 				    DRM_MEM_BUFS);
374 			}
375 		}
376 		drm_free(entry->buflist,
377 		    entry->buf_count *
378 		    sizeof (*entry->buflist), DRM_MEM_BUFS);
379 		entry->buf_count = 0;
380 	}
381 }
382 
383 /*ARGSUSED*/
384 int
385 drm_markbufs(DRM_IOCTL_ARGS)
386 {
387 	DRM_DEBUG("drm_markbufs");
388 	return (DRM_ERR(EINVAL));
389 }
390 
391 /*ARGSUSED*/
392 int
393 drm_infobufs(DRM_IOCTL_ARGS)
394 {
395 	DRM_DEBUG("drm_infobufs");
396 	return (DRM_ERR(EINVAL));
397 }
398 
399 static int
400 drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
401 {
402 	drm_device_dma_t *dma = dev->dma;
403 	drm_buf_entry_t *entry;
404 	drm_buf_t *buf;
405 	unsigned long offset;
406 	unsigned long agp_offset;
407 	int count;
408 	int order;
409 	int size;
410 	int alignment;
411 	int page_order;
412 	int total;
413 	int byte_count;
414 	int i;
415 	drm_buf_t **temp_buflist;
416 
417 	if (!dma)
418 		return (DRM_ERR(EINVAL));
419 
420 	count = request->count;
421 	order = drm_order(request->size);
422 	size = 1 << order;
423 
424 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
425 		? round_page(size) : size;
426 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
427 	total = PAGE_SIZE << page_order;
428 
429 	byte_count = 0;
430 	agp_offset = dev->agp->base + request->agp_start;
431 
432 	DRM_DEBUG("drm_do_addbufs_agp: count:      %d\n",  count);
433 	DRM_DEBUG("drm_do_addbufs_agp: order:      %d\n",  order);
434 	DRM_DEBUG("drm_do_addbufs_agp: size:       %d\n",  size);
435 	DRM_DEBUG("drm_do_addbufs_agp: agp_offset: 0x%lx\n", agp_offset);
436 	DRM_DEBUG("drm_do_addbufs_agp: alignment:  %d\n",  alignment);
437 	DRM_DEBUG("drm_do_addbufs_agp: page_order: %d\n",  page_order);
438 	DRM_DEBUG("drm_do_addbufs_agp: total:      %d\n",  total);
439 
440 	entry = &dma->bufs[order];
441 
442 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
443 	    DRM_MEM_BUFS);
444 	if (!entry->buflist) {
445 		return (DRM_ERR(ENOMEM));
446 	}
447 	entry->buf_size = size;
448 	entry->page_order = page_order;
449 
450 	offset = 0;
451 
452 	while (entry->buf_count < count) {
453 		buf		= &entry->buflist[entry->buf_count];
454 		buf->idx	= dma->buf_count + entry->buf_count;
455 		buf->total	= alignment;
456 		buf->order	= order;
457 		buf->used	= 0;
458 
459 		buf->offset	= (dma->byte_count + offset);
460 		buf->bus_address = agp_offset + offset;
461 		buf->address	= (void *)(agp_offset + offset);
462 		buf->next	= NULL;
463 		buf->pending	= 0;
464 		buf->filp	= NULL;
465 
466 		buf->dev_priv_size = dev->dev_priv_size;
467 		buf->dev_private = drm_alloc(count * sizeof (*entry->buflist),
468 				DRM_MEM_BUFS);
469 		if (buf->dev_private == NULL) {
470 			/* Set count correctly so we free the proper amount. */
471 			entry->buf_count = count;
472 			drm_cleanup_buf_error(dev, entry);
473 			return (DRM_ERR(ENOMEM));
474 		}
475 
476 		offset += alignment;
477 		entry->buf_count++;
478 		byte_count += PAGE_SIZE << page_order;
479 	}
480 
481 	DRM_DEBUG("drm_do_addbufs_agp: byte_count: %d\n", byte_count);
482 
483 	temp_buflist = drm_alloc(
484 	    (dma->buf_count + entry->buf_count) * sizeof (*dma->buflist),
485 	    DRM_MEM_BUFS);
486 
487 	if (temp_buflist == NULL) {
488 		/* Free the entry because it isn't valid */
489 		drm_cleanup_buf_error(dev, entry);
490 		DRM_ERROR(" temp_buflist is NULL");
491 		return (DRM_ERR(ENOMEM));
492 	}
493 
494 	bcopy(temp_buflist, dma->buflist,
495 	    dma->buf_count * sizeof (*dma->buflist));
496 	kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
497 	dma->buflist = temp_buflist;
498 
499 	for (i = 0; i < entry->buf_count; i++) {
500 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
501 	}
502 
503 	dma->buf_count += entry->buf_count;
504 	dma->byte_count += byte_count;
505 
506 	DRM_DEBUG("drm_do_addbufs_agp: dma->buf_count : %d\n", dma->buf_count);
507 	DRM_DEBUG("drm_do_addbufs_agp: entry->buf_count : %d\n",
508 	    entry->buf_count);
509 
510 	request->count = entry->buf_count;
511 	request->size = size;
512 
513 	dma->flags = _DRM_DMA_USE_AGP;
514 
515 	DRM_DEBUG("drm_do_addbufs_agp: add bufs succesfful.");
516 	return (0);
517 }
518 
519 static int
520 drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
521 {
522 	drm_device_dma_t *dma = dev->dma;
523 	drm_buf_entry_t *entry;
524 	drm_buf_t *buf;
525 	unsigned long offset;
526 	unsigned long agp_offset;
527 	int count;
528 	int order;
529 	int size;
530 	int alignment;
531 	int page_order;
532 	int total;
533 	int byte_count;
534 	int i;
535 	drm_buf_t **temp_buflist;
536 
537 	count = request->count;
538 	order = drm_order(request->size);
539 	size = 1 << order;
540 
541 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
542 		? round_page(size) : size;
543 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
544 	total = PAGE_SIZE << page_order;
545 
546 	byte_count = 0;
547 	agp_offset = request->agp_start;
548 
549 	DRM_DEBUG("count:      %d\n",  count);
550 	DRM_DEBUG("order:      %d\n",  order);
551 	DRM_DEBUG("size:       %d\n",  size);
552 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
553 	DRM_DEBUG("alignment:  %d\n",  alignment);
554 	DRM_DEBUG("page_order: %d\n",  page_order);
555 	DRM_DEBUG("total:      %d\n",  total);
556 
557 	entry = &dma->bufs[order];
558 
559 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
560 	    DRM_MEM_BUFS);
561 	if (entry->buflist == NULL)
562 		return (DRM_ERR(ENOMEM));
563 
564 	entry->buf_size = size;
565 	entry->page_order = page_order;
566 
567 	offset = 0;
568 
569 	while (entry->buf_count < count) {
570 		buf		= &entry->buflist[entry->buf_count];
571 		buf->idx	= dma->buf_count + entry->buf_count;
572 		buf->total	= alignment;
573 		buf->order	= order;
574 		buf->used	= 0;
575 
576 		buf->offset	= (dma->byte_count + offset);
577 		buf->bus_address = agp_offset + offset;
578 		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
579 		buf->next	= NULL;
580 		buf->pending	= 0;
581 		buf->filp	= NULL;
582 
583 		buf->dev_priv_size = dev->dev_priv_size;
584 		buf->dev_private = drm_alloc(buf->dev_priv_size,
585 		    DRM_MEM_BUFS);
586 		if (buf->dev_private == NULL) {
587 			/* Set count correctly so we free the proper amount. */
588 			entry->buf_count = count;
589 			drm_cleanup_buf_error(dev, entry);
590 			return (DRM_ERR(ENOMEM));
591 		}
592 
593 		DRM_DEBUG("drm_do_addbufs_sg: buffer %d @ %p\n",
594 		    entry->buf_count, buf->address);
595 		offset += alignment;
596 		entry->buf_count++;
597 		byte_count += PAGE_SIZE << page_order;
598 	}
599 	DRM_DEBUG("drm_do_addbufs_sg: byte_count %d\n", byte_count);
600 
601 	temp_buflist = drm_realloc(dma->buflist,
602 	    dma->buf_count * sizeof (*dma->buflist),
603 	    (dma->buf_count + entry->buf_count)
604 	    * sizeof (*dma->buflist), DRM_MEM_BUFS);
605 	if (!temp_buflist) {
606 		drm_cleanup_buf_error(dev, entry);
607 		return (DRM_ERR(ENOMEM));
608 	}
609 	dma->buflist = temp_buflist;
610 
611 	for (i = 0; i < entry->buf_count; i++) {
612 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
613 	}
614 
615 	dma->buf_count += entry->buf_count;
616 	dma->byte_count += byte_count;
617 
618 	DRM_DEBUG("drm_do_addbufs_sg: dma->buf_count: %d\n", dma->buf_count);
619 	DRM_DEBUG("drm_do_addbufs_sg: entry->buf_count: %d\n",
620 	    entry->buf_count);
621 	request->count = entry->buf_count;
622 	request->size = size;
623 
624 	dma->flags = _DRM_DMA_USE_SG;
625 
626 	return (0);
627 }
628 
629 int
630 drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
631 {
632 	int order, ret;
633 
634 	DRM_SPINLOCK(&dev->dma_lock);
635 
636 	if (request->count < 0 || request->count > 4096) {
637 		DRM_SPINLOCK(&dev->dma_lock);
638 		return (DRM_ERR(EINVAL));
639 	}
640 
641 	order = drm_order(request->size);
642 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
643 		DRM_SPINLOCK(&dev->dma_lock);
644 		return (DRM_ERR(EINVAL));
645 	}
646 
647 	/* No more allocations after first buffer-using ioctl. */
648 	if (dev->buf_use != 0) {
649 		DRM_SPINUNLOCK(&dev->dma_lock);
650 		return (DRM_ERR(EBUSY));
651 	}
652 	/* No more than one allocation per order */
653 	if (dev->dma->bufs[order].buf_count != 0) {
654 		DRM_SPINUNLOCK(&dev->dma_lock);
655 		return (DRM_ERR(ENOMEM));
656 	}
657 
658 	ret = drm_do_addbufs_agp(dev, request);
659 
660 	DRM_SPINUNLOCK(&dev->dma_lock);
661 
662 	return (ret);
663 }
664 
665 int
666 drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
667 {
668 	int order, ret;
669 
670 	DRM_SPINLOCK(&dev->dma_lock);
671 
672 	if (request->count < 0 || request->count > 4096) {
673 		DRM_SPINUNLOCK(&dev->dma_lock);
674 		return (DRM_ERR(EINVAL));
675 	}
676 
677 	order = drm_order(request->size);
678 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
679 		DRM_SPINUNLOCK(&dev->dma_lock);
680 		return (DRM_ERR(EINVAL));
681 	}
682 
683 	/* No more allocations after first buffer-using ioctl. */
684 	if (dev->buf_use != 0) {
685 		DRM_SPINUNLOCK(&dev->dma_lock);
686 		return (DRM_ERR(EBUSY));
687 	}
688 
689 	/* No more than one allocation per order */
690 	if (dev->dma->bufs[order].buf_count != 0) {
691 		DRM_SPINUNLOCK(&dev->dma_lock);
692 		return (DRM_ERR(ENOMEM));
693 	}
694 
695 	ret = drm_do_addbufs_sg(dev, request);
696 	DRM_SPINUNLOCK(&dev->dma_lock);
697 	return (ret);
698 }
699 
700 /*ARGSUSED*/
701 int
702 drm_addbufs_ioctl(DRM_IOCTL_ARGS)
703 {
704 	DRM_DEVICE;
705 	drm_buf_desc_t request;
706 	int err;
707 
708 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
709 		drm_buf_desc32_t request32;
710 		DRM_COPY_FROM_USER_IOCTL(request32,
711 			(drm_buf_desc32_t *)data,
712 			sizeof (drm_buf_desc32_t));
713 		request.count = request32.count;
714 		request.size = request32.size;
715 		request.low_mark = request32.low_mark;
716 		request.high_mark = request32.high_mark;
717 		request.flags = request32.flags;
718 		request.agp_start = request32.agp_start;
719 	} else
720 		DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_desc_t *)data,
721 			sizeof (request));
722 
723 	if (request.flags & _DRM_AGP_BUFFER)
724 		err = drm_addbufs_agp(dev, &request);
725 	else if (request.flags & _DRM_SG_BUFFER)
726 		err = drm_addbufs_sg(dev, &request);
727 
728 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
729 		drm_buf_desc32_t request32;
730 		request32.count = request.count;
731 		request32.size = request.size;
732 		request32.low_mark = request.low_mark;
733 		request32.high_mark = request.high_mark;
734 		request32.flags = request.flags;
735 		request32.agp_start = request.agp_start;
736 		DRM_COPY_TO_USER_IOCTL((drm_buf_desc32_t *)data,
737 			request32,
738 			sizeof (drm_buf_desc32_t));
739 	} else
740 		DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request,
741 			sizeof (request));
742 
743 	return (err);
744 }
745 
746 /*ARGSUSED*/
747 int
748 drm_freebufs(DRM_IOCTL_ARGS)
749 {
750 	DRM_DEVICE;
751 	drm_device_dma_t *dma = dev->dma;
752 	drm_buf_free_t request;
753 	int i;
754 	int idx;
755 	drm_buf_t *buf;
756 	int retcode = 0;
757 
758 	DRM_DEBUG("drm_freebufs: ");
759 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
760 		drm_buf_free32_t request32;
761 		DRM_COPY_FROM_USER_IOCTL(request32,
762 			(drm_buf_free32_t *)data,
763 			sizeof (drm_buf_free32_t));
764 		request.count = request32.count;
765 		request.list = (int __user *)(uintptr_t)request32.list;
766 	} else
767 		DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_free_t *)data,
768 			sizeof (request));
769 
770 	for (i = 0; i < request.count; i++) {
771 		if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof (idx))) {
772 			retcode = DRM_ERR(EFAULT);
773 			break;
774 		}
775 		if (idx < 0 || idx >= dma->buf_count) {
776 			DRM_ERROR("drm_freebufs: Index %d (of %d max)\n",
777 			    idx, dma->buf_count - 1);
778 			retcode = DRM_ERR(EINVAL);
779 			break;
780 		}
781 		buf = dma->buflist[idx];
782 		if (buf->filp != filp) {
783 			DRM_ERROR(
784 			    "drm_freebufs: process %d not owning the buffer.\n",
785 			    DRM_CURRENTPID);
786 			retcode = DRM_ERR(EINVAL);
787 			break;
788 		}
789 		drm_free_buffer(dev, buf);
790 	}
791 
792 	return (retcode);
793 }
794 
795 /*ARGSUSED*/
796 int
797 drm_mapbufs(DRM_IOCTL_ARGS)
798 {
799 	DRM_DEVICE;
800 	drm_buf_map_t request;
801 	int i;
802 	int retcode = 0;
803 	const int zero = 0;
804 	unsigned long vaddr;
805 	unsigned long address;
806 	drm_device_dma_t *dma = dev->dma;
807 	int ret_tmp;
808 
809 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
810 		drm_buf_map32_t request32;
811 		DRM_COPY_FROM_USER_IOCTL(request32,
812 			(drm_buf_map32_t *)data,
813 			sizeof (drm_buf_map32_t));
814 		request.count = request32.count;
815 		request.virtual = (void __user *)(uintptr_t)request32.virtual;
816 		request.list = (drm_buf_pub_t __user *)
817 			(uintptr_t)request32.list;
818 	} else
819 		DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_map_t *)data,
820 			sizeof (request));
821 
822 	dev->buf_use++;
823 
824 	if (request.count < dma->buf_count)
825 		goto done;
826 
827 	if (request.virtual == NULL) {
828 		DRM_ERROR("drm_mapbufs: request.virtual is NULL");
829 		return (DRM_ERR(EINVAL));
830 	}
831 	vaddr = (unsigned long) request.virtual;
832 
833 	for (i = 0; i < dma->buf_count; i++) {
834 		if (DRM_COPY_TO_USER(&request.list[i].idx,
835 		    &dma->buflist[i]->idx, sizeof (request.list[0].idx))) {
836 			retcode = EFAULT;
837 			goto done;
838 		}
839 		if (DRM_COPY_TO_USER(&request.list[i].total,
840 		    &dma->buflist[i]->total, sizeof (request.list[0].total))) {
841 			retcode = EFAULT;
842 			goto done;
843 		}
844 		if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
845 		    sizeof (zero))) {
846 			retcode = EFAULT;
847 			goto done;
848 		}
849 		address = vaddr + dma->buflist[i]->offset; /* *** */
850 
851 		if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
852 			caddr32_t address32;
853 			address32 = address;
854 			ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
855 				&address32,
856 				sizeof (caddr32_t));
857 		} else
858 			ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
859 				&address, sizeof (address));
860 
861 		if (ret_tmp) {
862 			retcode = EFAULT;
863 			goto done;
864 		}
865 	}
866 
867 done:
868 	request.count = dma->buf_count;
869 	DRM_DEBUG("drm_mapbufs: %d buffers, retcode = %d\n",
870 	    request.count, retcode);
871 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
872 		drm_buf_map32_t request32;
873 		request32.count = request.count;
874 		request32.virtual = (caddr32_t)(uintptr_t)request.virtual;
875 		request32.list = (caddr32_t)(uintptr_t)request.list;
876 		DRM_COPY_TO_USER_IOCTL((drm_buf_map32_t *)data,
877 			request32, sizeof (drm_buf_map32_t));
878 	} else
879 		DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request,
880 			sizeof (request));
881 
882 	return (DRM_ERR(retcode));
883 }
884