xref: /titanic_50/usr/src/uts/common/io/drm/drm_bufs.c (revision 2df1fe9ca32bb227b9158c67f5c00b54c20b10fd)
1 /*
2  * drm_bufs.h -- Generic buffer template -*- linux-c -*-
3  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4  */
5 /*
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Gareth Hughes <gareth@valinux.com>
32  *
33  */
34 
35 /*
36  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #pragma ident	"%Z%%M%	%I%	%E% SMI"
41 
42 #include "drmP.h"
43 
44 #define	PAGE_MASK	(PAGE_SIZE-1)
45 #define	round_page(x)	(((x) + PAGE_MASK) & ~PAGE_MASK)
46 
47 /*
48  * Compute order.  Can be made faster.
49  */
50 int
51 drm_order(unsigned long size)
52 {
53 	int order;
54 	unsigned long tmp;
55 
56 	for (order = 0, tmp = size; tmp >>= 1; ++order);
57 
58 	if (size & ~(1 << order))
59 		++order;
60 
61 	return (order);
62 }
63 
64 
65 int
66 drm_addmap(drm_device_t *dev, unsigned long long offset, unsigned long size,
67     drm_map_type_t type, drm_map_flags_t flags, drm_local_map_t **map_ptr)
68 {
69 	drm_local_map_t *map;
70 
71 	DRM_DEBUG("drm_addmap: offset = 0x%08llx, size = 0x%08lx, type = %d\n",
72 	    offset, size, type);
73 
74 	if (!(dev->flags & (FREAD|FWRITE)))
75 		return (DRM_ERR(EACCES)); /* Require read/write */
76 
77 	/*
78 	 * Only allow shared memory to be removable since we only keep enough
79 	 * book keeping information about shared memory to allow for removal
80 	 * when processes fork.
81 	 */
82 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM)
83 		return (DRM_ERR(EINVAL));
84 	if ((offset & PAGE_MASK) || (size & PAGE_MASK))
85 		return (DRM_ERR(EINVAL));
86 	if (offset + size < offset)
87 		return (DRM_ERR(EINVAL));
88 
89 	/*
90 	 * Check if this is just another version of a kernel-allocated map, and
91 	 * just hand that back if so.
92 	 */
93 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
94 	    type == _DRM_SHM) {
95 		DRM_LOCK();
96 		TAILQ_FOREACH(map, &dev->maplist, link) {
97 			if (map->type == type &&
98 			    map->offset.off == (u_offset_t)offset) {
99 				map->size = size;
100 				DRM_DEBUG("drm_addmap: Found kernel map %d\n",
101 						type);
102 				goto done;
103 			}
104 		}
105 		DRM_UNLOCK();
106 	}
107 
108 	/*
109 	 * Allocate a new map structure, fill it in, and do any type-specific
110 	 * initialization necessary.
111 	 */
112 	map = drm_alloc(sizeof (*map), DRM_MEM_MAPS);
113 	if (!map)
114 		return (DRM_ERR(ENOMEM));
115 
116 	map->offset.off = (u_offset_t)offset;
117 	map->size = size;
118 	map->type = type;
119 	map->flags = flags;
120 
121 	DRM_DEBUG("drm_addmap: map->type = %x", map->type);
122 	DRM_DEBUG("drm_addmap: map->size = %lx", map->size);
123 	DRM_DEBUG("drm_addmap: map->offset.off = %llx", map->offset.off);
124 	switch (map->type) {
125 	case _DRM_REGISTERS:
126 		DRM_DEBUG("drm_addmap: map the Registers");
127 		(void) drm_ioremap(dev, map);
128 		if (!(map->flags & _DRM_WRITE_COMBINING))
129 			break;
130 		/* FALLTHROUGH */
131 	case _DRM_FRAME_BUFFER:
132 		(void) drm_ioremap(dev, map);
133 		break;
134 	case _DRM_SHM:
135 		map->handle = ddi_umem_alloc(map->size, DDI_UMEM_NOSLEEP,
136 				&map->drm_umem_cookie);
137 		DRM_DEBUG("drm_addmap: size=0x%lx drm_order(size)=%d "
138 			"handle=0x%p\n",
139 			(unsigned long) map->size,
140 			drm_order(map->size), map->handle);
141 		if (!map->handle) {
142 			DRM_ERROR("drm_addmap: ddi_umem_alloc failed");
143 			ddi_umem_free(map->drm_umem_cookie);
144 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
145 			return (DRM_ERR(ENOMEM));
146 		}
147 		/*
148 		 * record only low 32-bit of this handle, since 32-bit user
149 		 * app is incapable of passing in 64bit offset when doing mmap.
150 		 */
151 		map->offset.ptr = map->handle;
152 		map->offset.off &= 0xffffffffUL;
153 		DRM_DEBUG("drm_addmap: offset=0x%llx", map->offset);
154 		if (map->flags & _DRM_CONTAINS_LOCK) {
155 			/* Prevent a 2nd X Server from creating a 2nd lock */
156 			DRM_LOCK();
157 			if (dev->lock.hw_lock != NULL) {
158 				DRM_UNLOCK();
159 				ddi_umem_free(map->drm_umem_cookie);
160 				drm_free(map, sizeof (*map), DRM_MEM_MAPS);
161 				return (DRM_ERR(EBUSY));
162 			}
163 			DRM_DEBUG("drm_addmap: map shm to hw_lock");
164 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
165 			DRM_UNLOCK();
166 		}
167 		break;
168 	case _DRM_SCATTER_GATHER:
169 		if (!dev->sg) {
170 			drm_free(map, sizeof (*map), DRM_MEM_MAPS);
171 			return (DRM_ERR(EINVAL));
172 		}
173 		map->offset.off = map->offset.off + dev->sg->handle;
174 		map->drm_umem_cookie = dev->sg->sg_umem_cookie;
175 		break;
176 	case _DRM_CONSISTENT:
177 		break;
178 	case _DRM_AGP:
179 		break;
180 	case _DRM_AGP_UMEM:
181 		map->offset.off += dev->agp->base;
182 		break;
183 	default:
184 		drm_free(map, sizeof (*map), DRM_MEM_MAPS);
185 		return (DRM_ERR(EINVAL));
186 	}
187 
188 	DRM_LOCK();
189 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
190 
191 done:
192 	/* Jumped to, with lock held, when a kernel map is found. */
193 	DRM_UNLOCK();
194 
195 	DRM_DEBUG("drm_addmap: Added map %d 0x%llx/0x%x\n",
196 		map->type, map->offset, map->size);
197 
198 	*map_ptr = map;
199 	TAILQ_FOREACH(map, &dev->maplist, link) {
200 		DRM_DEBUG("type=%x, offset=%llx, size=%x",
201 			map->type, map->offset, map->size);
202 	}
203 
204 	return (0);
205 }
206 
207 /*ARGSUSED*/
208 int
209 drm_addmap_ioctl(DRM_IOCTL_ARGS)
210 {
211 	drm_map_t request;
212 	drm_local_map_t *map;
213 	int err;
214 	DRM_DEVICE;
215 
216 	if (!(dev->flags & (FREAD|FWRITE)))
217 		return (DRM_ERR(EACCES)); /* Require read/write */
218 
219 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
220 		drm_map32_t request32;
221 		DRM_COPY_FROM_USER_IOCTL(request32,
222 			(drm_map32_t *)data,
223 			sizeof (drm_map32_t));
224 		request.offset = request32.offset;
225 		request.size = request32.size;
226 		request.type = request32.type;
227 		request.flags = request32.flags;
228 		request.handle = request32.handle;
229 		request.mtrr = request32.mtrr;
230 	} else
231 		DRM_COPY_FROM_USER_IOCTL(request, (drm_map_t *)data,
232 			sizeof (drm_map_t));
233 
234 	DRM_DEBUG("drm_addmap: request.offset=%llx, request.size=%lx,"
235 	    "request.type=%x", request.offset, request.size, request.type);
236 	err = drm_addmap(dev, request.offset, request.size, request.type,
237 	    request.flags, &map);
238 
239 	if (err != 0)
240 		return (err);
241 
242 	request.offset = map->offset.off;
243 	request.size = map->size;
244 	request.type = map->type;
245 	request.flags = map->flags;
246 	request.mtrr   = map->mtrr;
247 	request.handle = (unsigned long long)(uintptr_t)map->handle;
248 
249 	if (request.type != _DRM_SHM) {
250 		request.handle = request.offset;
251 	}
252 
253 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
254 		drm_map32_t request32;
255 		request32.offset = request.offset;
256 		request32.size = request.size;
257 		request32.type = request.type;
258 		request32.flags = request.flags;
259 		request32.handle = request.handle;
260 		request32.mtrr = request.mtrr;
261 		DRM_COPY_TO_USER_IOCTL((drm_map32_t *)data,
262 			request32,
263 			sizeof (drm_map32_t));
264 	} else
265 		DRM_COPY_TO_USER_IOCTL((drm_map_t *)data, request,
266 		    sizeof (drm_map_t));
267 
268 	return (0);
269 }
270 
271 void
272 drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
273 {
274 	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
275 
276 	TAILQ_REMOVE(&dev->maplist, map, link);
277 
278 	switch (map->type) {
279 	case _DRM_REGISTERS:
280 		drm_ioremapfree(map);
281 		break;
282 		/* FALLTHROUGH */
283 	case _DRM_FRAME_BUFFER:
284 		drm_ioremapfree(map);
285 		break;
286 	case _DRM_SHM:
287 		ddi_umem_free(map->drm_umem_cookie);
288 		break;
289 	case _DRM_AGP:
290 	case _DRM_SCATTER_GATHER:
291 		break;
292 	case _DRM_CONSISTENT:
293 		break;
294 	default:
295 		DRM_ERROR("Bad map type %d\n", map->type);
296 		break;
297 	}
298 
299 	drm_free(map, sizeof (*map), DRM_MEM_MAPS);
300 }
301 
302 /*
303  *  Remove a map private from list and deallocate resources if the mapping
304  * isn't in use.
305  */
306 /*ARGSUSED*/
307 int
308 drm_rmmap_ioctl(DRM_IOCTL_ARGS)
309 {
310 	DRM_DEVICE;
311 	drm_local_map_t *map;
312 	drm_map_t request;
313 
314 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
315 		drm_map32_t request32;
316 		DRM_COPY_FROM_USER_IOCTL(request32,
317 			(drm_map32_t *)data,
318 			sizeof (drm_map32_t));
319 		request.offset = request32.offset;
320 		request.size = request32.size;
321 		request.type = request32.type;
322 		request.flags = request32.flags;
323 		request.handle = request32.handle;
324 		request.mtrr = request32.mtrr;
325 	} else
326 		DRM_COPY_FROM_USER_IOCTL(request, (drm_map_t *)data,
327 		    sizeof (request));
328 
329 	DRM_LOCK();
330 	TAILQ_FOREACH(map, &dev->maplist, link) {
331 		if (((unsigned long long)(uintptr_t)map->handle ==
332 			request.handle) &&
333 			(map->flags & _DRM_REMOVABLE))
334 			break;
335 	}
336 
337 	/* No match found. */
338 	if (map == NULL) {
339 		DRM_UNLOCK();
340 		return (DRM_ERR(EINVAL));
341 	}
342 
343 	drm_rmmap(dev, map);
344 
345 	DRM_UNLOCK();
346 
347 	return (0);
348 }
349 
350 /*ARGSUSED*/
351 static void
352 drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
353 {
354 	int i;
355 
356 	if (entry->seg_count) {
357 		for (i = 0; i < entry->seg_count; i++) {
358 			if (entry->seglist[i]) {
359 				DRM_ERROR(
360 				    "drm_cleanup_buf_error: not implemented");
361 			}
362 		}
363 		drm_free(entry->seglist,
364 		    entry->seg_count *
365 		    sizeof (*entry->seglist), DRM_MEM_SEGS);
366 		entry->seg_count = 0;
367 	}
368 
369 	if (entry->buf_count) {
370 		for (i = 0; i < entry->buf_count; i++) {
371 			if (entry->buflist[i].dev_private) {
372 				drm_free(entry->buflist[i].dev_private,
373 				    entry->buflist[i].dev_priv_size,
374 				    DRM_MEM_BUFS);
375 			}
376 		}
377 		drm_free(entry->buflist,
378 		    entry->buf_count *
379 		    sizeof (*entry->buflist), DRM_MEM_BUFS);
380 		entry->buf_count = 0;
381 	}
382 }
383 
384 /*ARGSUSED*/
385 int
386 drm_markbufs(DRM_IOCTL_ARGS)
387 {
388 	DRM_DEBUG("drm_markbufs");
389 	return (DRM_ERR(EINVAL));
390 }
391 
392 /*ARGSUSED*/
393 int
394 drm_infobufs(DRM_IOCTL_ARGS)
395 {
396 	DRM_DEBUG("drm_infobufs");
397 	return (DRM_ERR(EINVAL));
398 }
399 
400 static int
401 drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
402 {
403 	drm_device_dma_t *dma = dev->dma;
404 	drm_buf_entry_t *entry;
405 	drm_buf_t *buf;
406 	unsigned long offset;
407 	unsigned long agp_offset;
408 	int count;
409 	int order;
410 	int size;
411 	int alignment;
412 	int page_order;
413 	int total;
414 	int byte_count;
415 	int i;
416 	drm_buf_t **temp_buflist;
417 
418 	if (!dma)
419 		return (DRM_ERR(EINVAL));
420 
421 	count = request->count;
422 	order = drm_order(request->size);
423 	size = 1 << order;
424 
425 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
426 		? round_page(size) : size;
427 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
428 	total = PAGE_SIZE << page_order;
429 
430 	byte_count = 0;
431 	agp_offset = dev->agp->base + request->agp_start;
432 
433 	DRM_DEBUG("drm_do_addbufs_agp: count:      %d\n",  count);
434 	DRM_DEBUG("drm_do_addbufs_agp: order:      %d\n",  order);
435 	DRM_DEBUG("drm_do_addbufs_agp: size:       %d\n",  size);
436 	DRM_DEBUG("drm_do_addbufs_agp: agp_offset: 0x%lx\n", agp_offset);
437 	DRM_DEBUG("drm_do_addbufs_agp: alignment:  %d\n",  alignment);
438 	DRM_DEBUG("drm_do_addbufs_agp: page_order: %d\n",  page_order);
439 	DRM_DEBUG("drm_do_addbufs_agp: total:      %d\n",  total);
440 
441 	entry = &dma->bufs[order];
442 
443 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
444 	    DRM_MEM_BUFS);
445 	if (!entry->buflist) {
446 		return (DRM_ERR(ENOMEM));
447 	}
448 	entry->buf_size = size;
449 	entry->page_order = page_order;
450 
451 	offset = 0;
452 
453 	while (entry->buf_count < count) {
454 		buf		= &entry->buflist[entry->buf_count];
455 		buf->idx	= dma->buf_count + entry->buf_count;
456 		buf->total	= alignment;
457 		buf->order	= order;
458 		buf->used	= 0;
459 
460 		buf->offset	= (dma->byte_count + offset);
461 		buf->bus_address = agp_offset + offset;
462 		buf->address	= (void *)(agp_offset + offset);
463 		buf->next	= NULL;
464 		buf->pending	= 0;
465 		buf->filp	= NULL;
466 
467 		buf->dev_priv_size = dev->dev_priv_size;
468 		buf->dev_private = drm_alloc(count * sizeof (*entry->buflist),
469 				DRM_MEM_BUFS);
470 		if (buf->dev_private == NULL) {
471 			/* Set count correctly so we free the proper amount. */
472 			entry->buf_count = count;
473 			drm_cleanup_buf_error(dev, entry);
474 			return (DRM_ERR(ENOMEM));
475 		}
476 
477 		offset += alignment;
478 		entry->buf_count++;
479 		byte_count += PAGE_SIZE << page_order;
480 	}
481 
482 	DRM_DEBUG("drm_do_addbufs_agp: byte_count: %d\n", byte_count);
483 
484 	temp_buflist = drm_alloc(
485 	    (dma->buf_count + entry->buf_count) * sizeof (*dma->buflist),
486 	    DRM_MEM_BUFS);
487 
488 	if (temp_buflist == NULL) {
489 		/* Free the entry because it isn't valid */
490 		drm_cleanup_buf_error(dev, entry);
491 		DRM_ERROR(" temp_buflist is NULL");
492 		return (DRM_ERR(ENOMEM));
493 	}
494 
495 	bcopy(temp_buflist, dma->buflist,
496 	    dma->buf_count * sizeof (*dma->buflist));
497 	kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
498 	dma->buflist = temp_buflist;
499 
500 	for (i = 0; i < entry->buf_count; i++) {
501 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
502 	}
503 
504 	dma->buf_count += entry->buf_count;
505 	dma->byte_count += byte_count;
506 
507 	DRM_DEBUG("drm_do_addbufs_agp: dma->buf_count : %d\n", dma->buf_count);
508 	DRM_DEBUG("drm_do_addbufs_agp: entry->buf_count : %d\n",
509 	    entry->buf_count);
510 
511 	request->count = entry->buf_count;
512 	request->size = size;
513 
514 	dma->flags = _DRM_DMA_USE_AGP;
515 
516 	DRM_DEBUG("drm_do_addbufs_agp: add bufs succesfful.");
517 	return (0);
518 }
519 
520 static int
521 drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
522 {
523 	drm_device_dma_t *dma = dev->dma;
524 	drm_buf_entry_t *entry;
525 	drm_buf_t *buf;
526 	unsigned long offset;
527 	unsigned long agp_offset;
528 	int count;
529 	int order;
530 	int size;
531 	int alignment;
532 	int page_order;
533 	int total;
534 	int byte_count;
535 	int i;
536 	drm_buf_t **temp_buflist;
537 
538 	count = request->count;
539 	order = drm_order(request->size);
540 	size = 1 << order;
541 
542 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
543 		? round_page(size) : size;
544 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
545 	total = PAGE_SIZE << page_order;
546 
547 	byte_count = 0;
548 	agp_offset = request->agp_start;
549 
550 	DRM_DEBUG("count:      %d\n",  count);
551 	DRM_DEBUG("order:      %d\n",  order);
552 	DRM_DEBUG("size:       %d\n",  size);
553 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
554 	DRM_DEBUG("alignment:  %d\n",  alignment);
555 	DRM_DEBUG("page_order: %d\n",  page_order);
556 	DRM_DEBUG("total:      %d\n",  total);
557 
558 	entry = &dma->bufs[order];
559 
560 	entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
561 	    DRM_MEM_BUFS);
562 	if (entry->buflist == NULL)
563 		return (DRM_ERR(ENOMEM));
564 
565 	entry->buf_size = size;
566 	entry->page_order = page_order;
567 
568 	offset = 0;
569 
570 	while (entry->buf_count < count) {
571 		buf		= &entry->buflist[entry->buf_count];
572 		buf->idx	= dma->buf_count + entry->buf_count;
573 		buf->total	= alignment;
574 		buf->order	= order;
575 		buf->used	= 0;
576 
577 		buf->offset	= (dma->byte_count + offset);
578 		buf->bus_address = agp_offset + offset;
579 		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
580 		buf->next	= NULL;
581 		buf->pending	= 0;
582 		buf->filp	= NULL;
583 
584 		buf->dev_priv_size = dev->dev_priv_size;
585 		buf->dev_private = drm_alloc(buf->dev_priv_size,
586 		    DRM_MEM_BUFS);
587 		if (buf->dev_private == NULL) {
588 			/* Set count correctly so we free the proper amount. */
589 			entry->buf_count = count;
590 			drm_cleanup_buf_error(dev, entry);
591 			return (DRM_ERR(ENOMEM));
592 		}
593 
594 		DRM_DEBUG("drm_do_addbufs_sg: buffer %d @ %p\n",
595 		    entry->buf_count, buf->address);
596 		offset += alignment;
597 		entry->buf_count++;
598 		byte_count += PAGE_SIZE << page_order;
599 	}
600 	DRM_DEBUG("drm_do_addbufs_sg: byte_count %d\n", byte_count);
601 
602 	temp_buflist = drm_realloc(dma->buflist,
603 	    dma->buf_count * sizeof (*dma->buflist),
604 	    (dma->buf_count + entry->buf_count)
605 	    * sizeof (*dma->buflist), DRM_MEM_BUFS);
606 	if (!temp_buflist) {
607 		drm_cleanup_buf_error(dev, entry);
608 		return (DRM_ERR(ENOMEM));
609 	}
610 	dma->buflist = temp_buflist;
611 
612 	for (i = 0; i < entry->buf_count; i++) {
613 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
614 	}
615 
616 	dma->buf_count += entry->buf_count;
617 	dma->byte_count += byte_count;
618 
619 	DRM_DEBUG("drm_do_addbufs_sg: dma->buf_count: %d\n", dma->buf_count);
620 	DRM_DEBUG("drm_do_addbufs_sg: entry->buf_count: %d\n",
621 	    entry->buf_count);
622 	request->count = entry->buf_count;
623 	request->size = size;
624 
625 	dma->flags = _DRM_DMA_USE_SG;
626 
627 	return (0);
628 }
629 
630 int
631 drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
632 {
633 	int order, ret;
634 
635 	DRM_SPINLOCK(&dev->dma_lock);
636 
637 	if (request->count < 0 || request->count > 4096) {
638 		DRM_SPINLOCK(&dev->dma_lock);
639 		return (DRM_ERR(EINVAL));
640 	}
641 
642 	order = drm_order(request->size);
643 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
644 		DRM_SPINLOCK(&dev->dma_lock);
645 		return (DRM_ERR(EINVAL));
646 	}
647 
648 	/* No more allocations after first buffer-using ioctl. */
649 	if (dev->buf_use != 0) {
650 		DRM_SPINUNLOCK(&dev->dma_lock);
651 		return (DRM_ERR(EBUSY));
652 	}
653 	/* No more than one allocation per order */
654 	if (dev->dma->bufs[order].buf_count != 0) {
655 		DRM_SPINUNLOCK(&dev->dma_lock);
656 		return (DRM_ERR(ENOMEM));
657 	}
658 
659 	ret = drm_do_addbufs_agp(dev, request);
660 
661 	DRM_SPINUNLOCK(&dev->dma_lock);
662 
663 	return (ret);
664 }
665 
666 int
667 drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
668 {
669 	int order, ret;
670 
671 	DRM_SPINLOCK(&dev->dma_lock);
672 
673 	if (request->count < 0 || request->count > 4096) {
674 		DRM_SPINUNLOCK(&dev->dma_lock);
675 		return (DRM_ERR(EINVAL));
676 	}
677 
678 	order = drm_order(request->size);
679 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
680 		DRM_SPINUNLOCK(&dev->dma_lock);
681 		return (DRM_ERR(EINVAL));
682 	}
683 
684 	/* No more allocations after first buffer-using ioctl. */
685 	if (dev->buf_use != 0) {
686 		DRM_SPINUNLOCK(&dev->dma_lock);
687 		return (DRM_ERR(EBUSY));
688 	}
689 
690 	/* No more than one allocation per order */
691 	if (dev->dma->bufs[order].buf_count != 0) {
692 		DRM_SPINUNLOCK(&dev->dma_lock);
693 		return (DRM_ERR(ENOMEM));
694 	}
695 
696 	ret = drm_do_addbufs_sg(dev, request);
697 	DRM_SPINUNLOCK(&dev->dma_lock);
698 	return (ret);
699 }
700 
701 /*ARGSUSED*/
702 int
703 drm_addbufs_ioctl(DRM_IOCTL_ARGS)
704 {
705 	DRM_DEVICE;
706 	drm_buf_desc_t request;
707 	int err;
708 
709 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
710 		drm_buf_desc32_t request32;
711 		DRM_COPY_FROM_USER_IOCTL(request32,
712 			(drm_buf_desc32_t *)data,
713 			sizeof (drm_buf_desc32_t));
714 		request.count = request32.count;
715 		request.size = request32.size;
716 		request.low_mark = request32.low_mark;
717 		request.high_mark = request32.high_mark;
718 		request.flags = request32.flags;
719 		request.agp_start = request32.agp_start;
720 	} else
721 		DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_desc_t *)data,
722 			sizeof (request));
723 
724 	if (request.flags & _DRM_AGP_BUFFER)
725 		err = drm_addbufs_agp(dev, &request);
726 	else if (request.flags & _DRM_SG_BUFFER)
727 		err = drm_addbufs_sg(dev, &request);
728 
729 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
730 		drm_buf_desc32_t request32;
731 		request32.count = request.count;
732 		request32.size = request.size;
733 		request32.low_mark = request.low_mark;
734 		request32.high_mark = request.high_mark;
735 		request32.flags = request.flags;
736 		request32.agp_start = request.agp_start;
737 		DRM_COPY_TO_USER_IOCTL((drm_buf_desc32_t *)data,
738 			request32,
739 			sizeof (drm_buf_desc32_t));
740 	} else
741 		DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request,
742 			sizeof (request));
743 
744 	return (err);
745 }
746 
747 /*ARGSUSED*/
748 int
749 drm_freebufs(DRM_IOCTL_ARGS)
750 {
751 	DRM_DEVICE;
752 	drm_device_dma_t *dma = dev->dma;
753 	drm_buf_free_t request;
754 	int i;
755 	int idx;
756 	drm_buf_t *buf;
757 	int retcode = 0;
758 
759 	DRM_DEBUG("drm_freebufs: ");
760 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
761 		drm_buf_free32_t request32;
762 		DRM_COPY_FROM_USER_IOCTL(request32,
763 			(drm_buf_free32_t *)data,
764 			sizeof (drm_buf_free32_t));
765 		request.count = request32.count;
766 		request.list = (int __user *)(uintptr_t)request32.list;
767 	} else
768 		DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_free_t *)data,
769 			sizeof (request));
770 
771 	for (i = 0; i < request.count; i++) {
772 		if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof (idx))) {
773 			retcode = DRM_ERR(EFAULT);
774 			break;
775 		}
776 		if (idx < 0 || idx >= dma->buf_count) {
777 			DRM_ERROR("drm_freebufs: Index %d (of %d max)\n",
778 			    idx, dma->buf_count - 1);
779 			retcode = DRM_ERR(EINVAL);
780 			break;
781 		}
782 		buf = dma->buflist[idx];
783 		if (buf->filp != filp) {
784 			DRM_ERROR(
785 			    "drm_freebufs: process %d not owning the buffer.\n",
786 			    DRM_CURRENTPID);
787 			retcode = DRM_ERR(EINVAL);
788 			break;
789 		}
790 		drm_free_buffer(dev, buf);
791 	}
792 
793 	return (retcode);
794 }
795 
796 /*ARGSUSED*/
797 int
798 drm_mapbufs(DRM_IOCTL_ARGS)
799 {
800 	DRM_DEVICE;
801 	drm_buf_map_t request;
802 	int i;
803 	int retcode = 0;
804 	const int zero = 0;
805 	unsigned long vaddr;
806 	unsigned long address;
807 	drm_device_dma_t *dma = dev->dma;
808 	int ret_tmp;
809 
810 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
811 		drm_buf_map32_t request32;
812 		DRM_COPY_FROM_USER_IOCTL(request32,
813 			(drm_buf_map32_t *)data,
814 			sizeof (drm_buf_map32_t));
815 		request.count = request32.count;
816 		request.virtual = (void __user *)(uintptr_t)request32.virtual;
817 		request.list = (drm_buf_pub_t __user *)
818 			(uintptr_t)request32.list;
819 	} else
820 		DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_map_t *)data,
821 			sizeof (request));
822 
823 	dev->buf_use++;
824 
825 	if (request.count < dma->buf_count)
826 		goto done;
827 
828 	if (request.virtual == NULL) {
829 		DRM_ERROR("drm_mapbufs: request.virtual is NULL");
830 		return (DRM_ERR(EINVAL));
831 	}
832 	vaddr = (unsigned long) request.virtual;
833 
834 	for (i = 0; i < dma->buf_count; i++) {
835 		if (DRM_COPY_TO_USER(&request.list[i].idx,
836 		    &dma->buflist[i]->idx, sizeof (request.list[0].idx))) {
837 			retcode = EFAULT;
838 			goto done;
839 		}
840 		if (DRM_COPY_TO_USER(&request.list[i].total,
841 		    &dma->buflist[i]->total, sizeof (request.list[0].total))) {
842 			retcode = EFAULT;
843 			goto done;
844 		}
845 		if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
846 		    sizeof (zero))) {
847 			retcode = EFAULT;
848 			goto done;
849 		}
850 		address = vaddr + dma->buflist[i]->offset; /* *** */
851 
852 		if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
853 			caddr32_t address32;
854 			address32 = address;
855 			ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
856 				&address32,
857 				sizeof (caddr32_t));
858 		} else
859 			ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
860 				&address, sizeof (address));
861 
862 		if (ret_tmp) {
863 			retcode = EFAULT;
864 			goto done;
865 		}
866 	}
867 
868 done:
869 	request.count = dma->buf_count;
870 	DRM_DEBUG("drm_mapbufs: %d buffers, retcode = %d\n",
871 	    request.count, retcode);
872 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
873 		drm_buf_map32_t request32;
874 		request32.count = request.count;
875 		request32.virtual = (caddr32_t)(uintptr_t)request.virtual;
876 		request32.list = (caddr32_t)(uintptr_t)request.list;
877 		DRM_COPY_TO_USER_IOCTL((drm_buf_map32_t *)data,
878 			request32, sizeof (drm_buf_map32_t));
879 	} else
880 		DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request,
881 			sizeof (request));
882 
883 	return (DRM_ERR(retcode));
884 }
885