1 /* BEGIN CSTYLED */ 2 3 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 4 */ 5 /* 6 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 7 * All Rights Reserved. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the 11 * "Software"), to deal in the Software without restriction, including 12 * without limitation the rights to use, copy, modify, merge, publish, 13 * distribute, sub license, and/or sell copies of the Software, and to 14 * permit persons to whom the Software is furnished to do so, subject to 15 * the following conditions: 16 * 17 * The above copyright notice and this permission notice (including the 18 * next paragraph) shall be included in all copies or substantial portions 19 * of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 22 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 24 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 25 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 26 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 27 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 28 * 29 */ 30 31 /* 32 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 33 * Use is subject to license terms. 34 */ 35 36 #pragma ident "%Z%%M% %I% %E% SMI" 37 38 #include "drmP.h" 39 #include "drm.h" 40 #include "i915_drm.h" 41 #include "i915_drv.h" 42 43 #define IS_I965G(dev) (dev->pci_device == 0x2972 || \ 44 dev->pci_device == 0x2982 || \ 45 dev->pci_device == 0x2992 || \ 46 dev->pci_device == 0x29A2) 47 48 49 /* Really want an OS-independent resettable timer. Would like to have 50 * this loop run for (eg) 3 sec, but have the timer reset every time 51 * the head pointer changes, so that EBUSY only happens if the ring 52 * actually stalls for (eg) 3 seconds. 53 */ 54 /*ARGSUSED*/ 55 int i915_wait_ring(drm_device_t * dev, int n, const char *caller) 56 { 57 drm_i915_private_t *dev_priv = dev->dev_private; 58 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 59 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 60 int i; 61 62 for (i = 0; i < 10000; i++) { 63 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 64 ring->space = ring->head - (ring->tail + 8); 65 if (ring->space < 0) 66 ring->space += ring->Size; 67 if (ring->space >= n) 68 return 0; 69 70 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 71 72 if (ring->head != last_head) 73 i = 0; 74 75 last_head = ring->head; 76 DRM_UDELAY(1); 77 } 78 79 return DRM_ERR(EBUSY); 80 } 81 82 void i915_kernel_lost_context(drm_device_t * dev) 83 { 84 drm_i915_private_t *dev_priv = dev->dev_private; 85 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 86 87 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 88 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR; 89 ring->space = ring->head - (ring->tail + 8); 90 if (ring->space < 0) 91 ring->space += ring->Size; 92 93 if (ring->head == ring->tail) 94 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 95 } 96 97 static int i915_dma_cleanup(drm_device_t * dev) 98 { 99 /* Make sure interrupts are disabled here because the uninstall ioctl 100 * may not have been called from userspace and after dev_private 101 * is freed, it's too late. 102 */ 103 if (dev->irq) 104 (void) drm_irq_uninstall(dev); 105 106 if (dev->dev_private) { 107 drm_i915_private_t *dev_priv = 108 (drm_i915_private_t *) dev->dev_private; 109 110 if (dev_priv->ring.virtual_start) { 111 drm_core_ioremapfree(&dev_priv->ring.map, dev); 112 } 113 114 #if defined(__SOLARIS__) || defined(sun) 115 if (dev_priv->hw_status_page) { 116 drm_pci_free(dev); 117 #else 118 if (dev_priv->status_page_dmah) { 119 drm_pci_free(dev, dev_priv->status_page_dmah); 120 #endif 121 /* Need to rewrite hardware status page */ 122 I915_WRITE(0x02080, 0x1ffff000); 123 } 124 125 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 126 DRM_MEM_DRIVER); 127 128 dev->dev_private = NULL; 129 } 130 131 return 0; 132 } 133 134 static int i915_initialize(drm_device_t * dev, 135 drm_i915_private_t * dev_priv, 136 drm_i915_init_t * init) 137 { 138 (void) memset(dev_priv, 0, sizeof(drm_i915_private_t)); 139 140 DRM_GETSAREA(); 141 if (!dev_priv->sarea) { 142 DRM_ERROR("can not find sarea!\n"); 143 dev->dev_private = (void *)dev_priv; 144 (void) i915_dma_cleanup(dev); 145 return DRM_ERR(EINVAL); 146 } 147 148 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); 149 if (!dev_priv->mmio_map) { 150 dev->dev_private = (void *)dev_priv; 151 (void) i915_dma_cleanup(dev); 152 DRM_ERROR("can not find mmio map!\n"); 153 return DRM_ERR(EINVAL); 154 } 155 156 dev_priv->sarea_priv = (drm_i915_sarea_t *) 157 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); 158 159 dev_priv->ring.Start = init->ring_start; 160 dev_priv->ring.End = init->ring_end; 161 dev_priv->ring.Size = init->ring_size; 162 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 163 164 dev_priv->ring.map.offset.off = (u_offset_t)init->ring_start; 165 dev_priv->ring.map.size = init->ring_size; 166 dev_priv->ring.map.type = 0; 167 dev_priv->ring.map.flags = 0; 168 dev_priv->ring.map.mtrr = 0; 169 170 drm_core_ioremap(&dev_priv->ring.map, dev); 171 172 if (dev_priv->ring.map.handle == NULL) { 173 dev->dev_private = (void *)dev_priv; 174 (void) i915_dma_cleanup(dev); 175 DRM_ERROR("can not ioremap virtual address for" 176 " ring buffer\n"); 177 return DRM_ERR(ENOMEM); 178 } 179 180 dev_priv->ring.virtual_start = (u8 *)dev_priv->ring.map.dev_addr; 181 182 dev_priv->cpp = init->cpp; 183 dev_priv->back_offset = init->back_offset; 184 dev_priv->front_offset = init->front_offset; 185 dev_priv->current_page = 0; 186 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 187 188 /* We are using separate values as placeholders for mechanisms for 189 * private backbuffer/depthbuffer usage. 190 */ 191 dev_priv->use_mi_batchbuffer_start = 0; 192 193 /* Allow hardware batchbuffers unless told otherwise. 194 */ 195 dev_priv->allow_batchbuffer = 1; 196 197 /* Program Hardware Status Page */ 198 #if defined(__SOLARIS__) || defined(sun) 199 dev_priv->hw_status_page = 200 drm_pci_alloc(dev, DRM_PAGE_SIZE, &dev_priv->dma_status_page); 201 #else 202 dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 203 0xffffffff); 204 #endif 205 206 #if defined(__SOLARIS__) || defined(sun) 207 if (!dev_priv->hw_status_page) { 208 #else 209 if (!dev_priv->status_page_dmah) { 210 #endif 211 dev->dev_private = (void *)dev_priv; 212 (void) i915_dma_cleanup(dev); 213 DRM_ERROR("Can not allocate hardware status page\n"); 214 return DRM_ERR(ENOMEM); 215 } 216 217 #if !defined(__SOLARIS__) && !defined(sun) 218 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 219 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 220 #endif 221 (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 222 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 223 224 I915_WRITE(0x02080, dev_priv->dma_status_page); 225 DRM_DEBUG("Enabled hardware status page\n"); 226 227 dev->dev_private = (void *)dev_priv; 228 229 #ifdef I915_HAVE_BUFFER 230 drm_bo_driver_init(dev); 231 #endif 232 return 0; 233 } 234 235 static int i915_dma_resume(drm_device_t * dev) 236 { 237 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 238 239 DRM_DEBUG("%s\n", __FUNCTION__); 240 241 if (!dev_priv->sarea) { 242 DRM_ERROR("can not find sarea!\n"); 243 return DRM_ERR(EINVAL); 244 } 245 246 if (!dev_priv->mmio_map) { 247 DRM_ERROR("can not find mmio map!\n"); 248 return DRM_ERR(EINVAL); 249 } 250 251 if (dev_priv->ring.map.handle == NULL) { 252 DRM_ERROR("can not ioremap virtual address for" 253 " ring buffer\n"); 254 return DRM_ERR(ENOMEM); 255 } 256 257 /* Program Hardware Status Page */ 258 if (!dev_priv->hw_status_page) { 259 DRM_ERROR("Can not find hardware status page\n"); 260 return DRM_ERR(EINVAL); 261 } 262 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 263 264 I915_WRITE(0x02080, dev_priv->dma_status_page); 265 DRM_DEBUG("Enabled hardware status page\n"); 266 267 return 0; 268 } 269 270 /*ARGSUSED*/ 271 static int i915_dma_init(DRM_IOCTL_ARGS) 272 { 273 DRM_DEVICE; 274 drm_i915_private_t *dev_priv; 275 drm_i915_init_t init; 276 int retcode = 0; 277 278 DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data, 279 sizeof(init)); 280 281 switch (init.func) { 282 case I915_INIT_DMA: 283 dev_priv = drm_alloc(sizeof(drm_i915_private_t), 284 DRM_MEM_DRIVER); 285 if (dev_priv == NULL) 286 return DRM_ERR(ENOMEM); 287 retcode = i915_initialize(dev, dev_priv, &init); 288 break; 289 case I915_CLEANUP_DMA: 290 retcode = i915_dma_cleanup(dev); 291 break; 292 case I915_RESUME_DMA: 293 retcode = i915_dma_resume(dev); 294 break; 295 default: 296 retcode = DRM_ERR(EINVAL); 297 break; 298 } 299 300 return retcode; 301 } 302 303 /* Implement basically the same security restrictions as hardware does 304 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 305 * 306 * Most of the calculations below involve calculating the size of a 307 * particular instruction. It's important to get the size right as 308 * that tells us where the next instruction to check is. Any illegal 309 * instruction detected will be given a size of zero, which is a 310 * signal to abort the rest of the buffer. 311 */ 312 static int do_validate_cmd(int cmd) 313 { 314 switch (((cmd >> 29) & 0x7)) { 315 case 0x0: 316 switch ((cmd >> 23) & 0x3f) { 317 case 0x0: 318 return 1; /* MI_NOOP */ 319 case 0x4: 320 return 1; /* MI_FLUSH */ 321 default: 322 return 0; /* disallow everything else */ 323 } 324 #ifndef __SUNPRO_C 325 break; 326 #endif 327 case 0x1: 328 return 0; /* reserved */ 329 case 0x2: 330 return (cmd & 0xff) + 2; /* 2d commands */ 331 case 0x3: 332 if (((cmd >> 24) & 0x1f) <= 0x18) 333 return 1; 334 335 switch ((cmd >> 24) & 0x1f) { 336 case 0x1c: 337 return 1; 338 case 0x1d: 339 switch ((cmd >> 16) & 0xff) { 340 case 0x3: 341 return (cmd & 0x1f) + 2; 342 case 0x4: 343 return (cmd & 0xf) + 2; 344 default: 345 return (cmd & 0xffff) + 2; 346 } 347 case 0x1e: 348 if (cmd & (1 << 23)) 349 return (cmd & 0xffff) + 1; 350 else 351 return 1; 352 case 0x1f: 353 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 354 return (cmd & 0x1ffff) + 2; 355 else if (cmd & (1 << 17)) /* indirect random */ 356 if ((cmd & 0xffff) == 0) 357 return 0; /* unknown length, too hard */ 358 else 359 return (((cmd & 0xffff) + 1) / 2) + 1; 360 else 361 return 2; /* indirect sequential */ 362 default: 363 return 0; 364 } 365 default: 366 return 0; 367 } 368 369 #ifndef __SUNPRO_C 370 return 0; 371 #endif 372 } 373 374 static int validate_cmd(int cmd) 375 { 376 int ret = do_validate_cmd(cmd); 377 378 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ 379 380 return ret; 381 } 382 383 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords, int mode) 384 { 385 drm_i915_private_t *dev_priv = dev->dev_private; 386 int i; 387 RING_LOCALS; 388 389 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 390 return DRM_ERR(EINVAL); 391 392 BEGIN_LP_RING((dwords+1)&~1); 393 394 for (i = 0; i < dwords;) { 395 int cmd, sz; 396 397 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 398 return DRM_ERR(EINVAL); 399 400 401 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 402 return DRM_ERR(EINVAL); 403 404 OUT_RING(cmd); 405 406 while (++i, --sz) { 407 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 408 sizeof(cmd))) { 409 return DRM_ERR(EINVAL); 410 } 411 OUT_RING(cmd); 412 } 413 } 414 415 if (dwords & 1) 416 OUT_RING(0); 417 418 ADVANCE_LP_RING(); 419 420 return 0; 421 } 422 423 static int i915_emit_box(drm_device_t * dev, 424 drm_clip_rect_t __user * boxes, 425 int i, int DR1, int DR4, int mode) 426 { 427 drm_i915_private_t *dev_priv = dev->dev_private; 428 drm_clip_rect_t box; 429 RING_LOCALS; 430 431 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { 432 return DRM_ERR(EFAULT); 433 } 434 435 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 436 DRM_ERROR("Bad box %d,%d..%d,%d\n", 437 box.x1, box.y1, box.x2, box.y2); 438 return DRM_ERR(EINVAL); 439 } 440 441 if (IS_I965G(dev)) { 442 BEGIN_LP_RING(4); 443 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 444 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 445 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 446 OUT_RING(DR4); 447 ADVANCE_LP_RING(); 448 } else { 449 BEGIN_LP_RING(6); 450 OUT_RING(GFX_OP_DRAWRECT_INFO); 451 OUT_RING(DR1); 452 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 453 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 454 OUT_RING(DR4); 455 OUT_RING(0); 456 ADVANCE_LP_RING(); 457 } 458 459 return 0; 460 } 461 462 /* XXX: Emitting the counter should really be moved to part of the IRQ 463 * emit. For now, do it in both places: 464 */ 465 466 static void i915_emit_breadcrumb(drm_device_t *dev) 467 { 468 drm_i915_private_t *dev_priv = dev->dev_private; 469 RING_LOCALS; 470 471 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; 472 473 BEGIN_LP_RING(4); 474 OUT_RING(CMD_STORE_DWORD_IDX); 475 OUT_RING(20); 476 OUT_RING(dev_priv->counter); 477 OUT_RING(0); 478 ADVANCE_LP_RING(); 479 #ifdef I915_HAVE_FENCE 480 drm_fence_flush_old(dev, 0, dev_priv->counter); 481 #endif 482 } 483 484 485 int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush) 486 { 487 drm_i915_private_t *dev_priv = dev->dev_private; 488 uint32_t flush_cmd = CMD_MI_FLUSH; 489 RING_LOCALS; 490 491 flush_cmd |= flush; 492 493 i915_kernel_lost_context(dev); 494 495 BEGIN_LP_RING(4); 496 OUT_RING(flush_cmd); 497 OUT_RING(0); 498 OUT_RING(0); 499 OUT_RING(0); 500 ADVANCE_LP_RING(); 501 502 return 0; 503 } 504 505 static int i915_dispatch_cmdbuffer(drm_device_t * dev, 506 drm_i915_cmdbuffer_t * cmd, int mode) 507 { 508 int nbox = cmd->num_cliprects; 509 int i = 0, count, ret; 510 511 if (cmd->sz & 0x3) { 512 DRM_ERROR("alignment"); 513 return DRM_ERR(EINVAL); 514 } 515 516 i915_kernel_lost_context(dev); 517 518 count = nbox ? nbox : 1; 519 520 for (i = 0; i < count; i++) { 521 if (i < nbox) { 522 ret = i915_emit_box(dev, cmd->cliprects, i, 523 cmd->DR1, cmd->DR4, mode); 524 if (ret) 525 return ret; 526 } 527 528 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4, mode); 529 if (ret) 530 return ret; 531 } 532 533 i915_emit_breadcrumb( dev ); 534 return 0; 535 } 536 537 static int i915_dispatch_batchbuffer(drm_device_t * dev, 538 drm_i915_batchbuffer_t * batch, int mode) 539 { 540 drm_i915_private_t *dev_priv = dev->dev_private; 541 drm_clip_rect_t __user *boxes = batch->cliprects; 542 int nbox = batch->num_cliprects; 543 int i = 0, count; 544 RING_LOCALS; 545 546 if ((batch->start | batch->used) & 0x7) { 547 DRM_ERROR("alignment"); 548 return DRM_ERR(EINVAL); 549 } 550 551 i915_kernel_lost_context(dev); 552 553 count = nbox ? nbox : 1; 554 555 for (i = 0; i < count; i++) { 556 if (i < nbox) { 557 int ret = i915_emit_box(dev, boxes, i, 558 batch->DR1, batch->DR4, mode); 559 if (ret) 560 return ret; 561 } 562 563 if (dev_priv->use_mi_batchbuffer_start) { 564 BEGIN_LP_RING(2); 565 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 566 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 567 ADVANCE_LP_RING(); 568 } else { 569 BEGIN_LP_RING(4); 570 OUT_RING(MI_BATCH_BUFFER); 571 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 572 OUT_RING(batch->start + batch->used - 4); 573 OUT_RING(0); 574 ADVANCE_LP_RING(); 575 } 576 } 577 578 i915_emit_breadcrumb( dev ); 579 580 return 0; 581 } 582 583 static int i915_dispatch_flip(drm_device_t * dev) 584 { 585 drm_i915_private_t *dev_priv = dev->dev_private; 586 RING_LOCALS; 587 588 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 589 __FUNCTION__, 590 dev_priv->current_page, 591 dev_priv->sarea_priv->pf_current_page); 592 593 i915_kernel_lost_context(dev); 594 595 BEGIN_LP_RING(2); 596 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); 597 OUT_RING(0); 598 ADVANCE_LP_RING(); 599 600 BEGIN_LP_RING(6); 601 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 602 OUT_RING(0); 603 if (dev_priv->current_page == 0) { 604 OUT_RING(dev_priv->back_offset); 605 dev_priv->current_page = 1; 606 } else { 607 OUT_RING(dev_priv->front_offset); 608 dev_priv->current_page = 0; 609 } 610 OUT_RING(0); 611 ADVANCE_LP_RING(); 612 613 BEGIN_LP_RING(2); 614 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 615 OUT_RING(0); 616 ADVANCE_LP_RING(); 617 618 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 619 620 BEGIN_LP_RING(4); 621 OUT_RING(CMD_STORE_DWORD_IDX); 622 OUT_RING(20); 623 OUT_RING(dev_priv->counter); 624 OUT_RING(0); 625 ADVANCE_LP_RING(); 626 #ifdef I915_HAVE_FENCE 627 drm_fence_flush_old(dev, 0, dev_priv->counter); 628 #endif 629 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 630 return 0; 631 } 632 633 static int i915_quiescent(drm_device_t * dev) 634 { 635 drm_i915_private_t *dev_priv = dev->dev_private; 636 637 i915_kernel_lost_context(dev); 638 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 639 } 640 641 /*ARGSUSED*/ 642 static int i915_flush_ioctl(DRM_IOCTL_ARGS) 643 { 644 DRM_DEVICE; 645 646 LOCK_TEST_WITH_RETURN(dev, filp); 647 648 return i915_quiescent(dev); 649 } 650 651 /*ARGSUSED*/ 652 static int i915_batchbuffer(DRM_IOCTL_ARGS) 653 { 654 DRM_DEVICE; 655 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 656 u32 *hw_status = dev_priv->hw_status_page; 657 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 658 dev_priv->sarea_priv; 659 drm_i915_batchbuffer_t batch; 660 int ret; 661 662 if (!dev_priv->allow_batchbuffer) { 663 DRM_ERROR("Batchbuffer ioctl disabled\n"); 664 return DRM_ERR(EINVAL); 665 } 666 667 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { 668 drm_i915_batchbuffer32_t batchbuffer32_t; 669 670 DRM_COPY_FROM_USER_IOCTL(batchbuffer32_t, 671 (drm_i915_batchbuffer32_t __user *) data, 672 sizeof (drm_i915_batchbuffer32_t)); 673 674 batch.start = batchbuffer32_t.start; 675 batch.used = batchbuffer32_t.used; 676 batch.DR1 = batchbuffer32_t.DR1; 677 batch.DR4 = batchbuffer32_t.DR4; 678 batch.num_cliprects = batchbuffer32_t.num_cliprects; 679 batch.cliprects = (drm_clip_rect_t __user *) 680 (uintptr_t)batchbuffer32_t.cliprects; 681 } else 682 DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data, 683 sizeof(batch)); 684 685 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 686 batch.start, batch.used, batch.num_cliprects); 687 688 LOCK_TEST_WITH_RETURN(dev, filp); 689 /* 690 691 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, 692 batch.num_cliprects * 693 sizeof(drm_clip_rect_t))) 694 return DRM_ERR(EFAULT); 695 */ 696 697 ret = i915_dispatch_batchbuffer(dev, &batch, mode); 698 699 sarea_priv->last_dispatch = (int)hw_status[5]; 700 return ret; 701 } 702 703 /*ARGSUSED*/ 704 static int i915_cmdbuffer(DRM_IOCTL_ARGS) 705 { 706 DRM_DEVICE; 707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 708 u32 *hw_status = dev_priv->hw_status_page; 709 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 710 dev_priv->sarea_priv; 711 drm_i915_cmdbuffer_t cmdbuf; 712 int ret; 713 714 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { 715 drm_i915_cmdbuffer32_t cmdbuffer32_t; 716 717 DRM_COPY_FROM_USER_IOCTL(cmdbuffer32_t, 718 (drm_i915_cmdbuffer32_t __user *) data, 719 sizeof (drm_i915_cmdbuffer32_t)); 720 721 cmdbuf.buf = (char __user *)(uintptr_t)cmdbuffer32_t.buf; 722 cmdbuf.sz = cmdbuffer32_t.sz; 723 cmdbuf.DR1 = cmdbuffer32_t.DR1; 724 cmdbuf.DR4 = cmdbuffer32_t.DR4; 725 cmdbuf.num_cliprects = cmdbuffer32_t.num_cliprects; 726 cmdbuf.cliprects = (drm_clip_rect_t __user *) 727 (uintptr_t)cmdbuffer32_t.cliprects; 728 } else 729 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data, 730 sizeof(cmdbuf)); 731 732 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 733 cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects); 734 735 LOCK_TEST_WITH_RETURN(dev, filp); 736 /* 737 738 if (cmdbuf.num_cliprects && 739 DRM_VERIFYAREA_READ(cmdbuf.cliprects, 740 cmdbuf.num_cliprects * 741 sizeof(drm_clip_rect_t))) { 742 DRM_ERROR("Fault accessing cliprects\n"); 743 return DRM_ERR(EFAULT); 744 } 745 */ 746 747 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf, mode); 748 if (ret) { 749 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 750 return ret; 751 } 752 753 sarea_priv->last_dispatch = (int)hw_status[5]; 754 return 0; 755 } 756 757 static int i915_do_cleanup_pageflip(drm_device_t * dev) 758 { 759 drm_i915_private_t *dev_priv = dev->dev_private; 760 761 DRM_DEBUG("%s\n", __FUNCTION__); 762 if (dev_priv->current_page != 0) 763 (void) i915_dispatch_flip(dev); 764 765 return 0; 766 } 767 768 /*ARGSUSED*/ 769 static int i915_flip_bufs(DRM_IOCTL_ARGS) 770 { 771 DRM_DEVICE; 772 773 DRM_DEBUG("%s\n", __FUNCTION__); 774 775 LOCK_TEST_WITH_RETURN(dev, filp); 776 777 return i915_dispatch_flip(dev); 778 } 779 780 /*ARGSUSED*/ 781 static int i915_getparam(DRM_IOCTL_ARGS) 782 { 783 DRM_DEVICE; 784 drm_i915_private_t *dev_priv = dev->dev_private; 785 drm_i915_getparam_t param; 786 int value; 787 788 if (!dev_priv) { 789 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 790 return DRM_ERR(EINVAL); 791 } 792 793 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { 794 drm_i915_getparam32_t getparam32_t; 795 796 DRM_COPY_FROM_USER_IOCTL(getparam32_t, 797 (drm_i915_getparam32_t __user *) data, 798 sizeof (drm_i915_getparam32_t)); 799 800 param.param = getparam32_t.param; 801 param.value = (int __user *)(uintptr_t)getparam32_t.value; 802 } else 803 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, 804 sizeof(param)); 805 806 switch (param.param) { 807 case I915_PARAM_IRQ_ACTIVE: 808 value = dev->irq ? 1 : 0; 809 break; 810 case I915_PARAM_ALLOW_BATCHBUFFER: 811 value = dev_priv->allow_batchbuffer ? 1 : 0; 812 break; 813 case I915_PARAM_LAST_DISPATCH: 814 value = READ_BREADCRUMB(dev_priv); 815 break; 816 default: 817 DRM_ERROR("Unknown parameter %d\n", param.param); 818 return DRM_ERR(EINVAL); 819 } 820 821 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 822 DRM_ERROR("i915_getparam failed\n"); 823 return DRM_ERR(EFAULT); 824 } 825 return 0; 826 } 827 828 /*ARGSUSED*/ 829 static int i915_setparam(DRM_IOCTL_ARGS) 830 { 831 DRM_DEVICE; 832 drm_i915_private_t *dev_priv = dev->dev_private; 833 drm_i915_setparam_t param; 834 835 if (!dev_priv) { 836 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 837 return DRM_ERR(EINVAL); 838 } 839 840 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, 841 sizeof(param)); 842 843 switch (param.param) { 844 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 845 dev_priv->use_mi_batchbuffer_start = param.value; 846 break; 847 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 848 dev_priv->tex_lru_log_granularity = param.value; 849 break; 850 case I915_SETPARAM_ALLOW_BATCHBUFFER: 851 dev_priv->allow_batchbuffer = param.value; 852 break; 853 default: 854 DRM_ERROR("unknown parameter %d\n", param.param); 855 return DRM_ERR(EINVAL); 856 } 857 858 return 0; 859 } 860 861 /*ARGSUSED*/ 862 int i915_driver_load(drm_device_t *dev, unsigned long flags) 863 { 864 /* i915 has 4 more counters */ 865 dev->counters += 4; 866 dev->types[6] = _DRM_STAT_IRQ; 867 dev->types[7] = _DRM_STAT_PRIMARY; 868 dev->types[8] = _DRM_STAT_SECONDARY; 869 dev->types[9] = _DRM_STAT_DMA; 870 871 return 0; 872 } 873 874 void i915_driver_lastclose(drm_device_t * dev) 875 { 876 if (dev->dev_private) { 877 drm_i915_private_t *dev_priv = dev->dev_private; 878 i915_mem_takedown(&(dev_priv->agp_heap)); 879 } 880 (void) i915_dma_cleanup(dev); 881 } 882 883 void i915_driver_preclose(drm_device_t * dev, DRMFILE filp) 884 { 885 if (dev->dev_private) { 886 drm_i915_private_t *dev_priv = dev->dev_private; 887 if (dev_priv->page_flipping) { 888 (void) i915_do_cleanup_pageflip(dev); 889 } 890 i915_mem_release(dev, filp, dev_priv->agp_heap); 891 } 892 } 893 894 extern drm_ioctl_desc_t i915_ioctls[]; 895 896 void i915_set_ioctl_desc(int n, drm_ioctl_t * func, 897 int auth_needed, int root_only, char *desc) 898 { 899 i915_ioctls[n].func = func; 900 i915_ioctls[n].auth_needed = auth_needed; 901 i915_ioctls[n].root_only = root_only; 902 i915_ioctls[n].desc = desc; 903 } 904 void 905 i915_init_ioctl_arrays(void) 906 { 907 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_INIT), 908 i915_dma_init, 1, 1, "i915_dma_init"); 909 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FLUSH), 910 i915_flush_ioctl, 1, 0, "i915_flush_ioctl"); 911 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FLIP), 912 i915_flip_bufs, 1, 0, "i915_flip_bufs"); 913 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_BATCHBUFFER), 914 i915_batchbuffer, 1, 0, "i915_batchbuffer"); 915 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_IRQ_EMIT), 916 i915_irq_emit, 1, 0, " i915_irq_emit"); 917 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_IRQ_WAIT), 918 i915_irq_wait, 1, 0, "i915_irq_wait"); 919 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_GETPARAM), 920 i915_getparam, 1, 0, "i915_getparam"); 921 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_SETPARAM), 922 i915_setparam, 1, 1, "i915_setparam"); 923 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_ALLOC), 924 i915_mem_alloc, 1, 0, "i915_mem_alloc"); 925 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FREE), 926 i915_mem_free, 1, 0, "i915_mem_free"); 927 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_INIT_HEAP), 928 i915_mem_init_heap, 1, 1, "i915_mem_init_heap"); 929 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_CMDBUFFER), 930 i915_cmdbuffer, 1, 0, "i915_cmdbuffer"); 931 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP), 932 i915_mem_destroy_heap, 1, 1, "i915_mem_destroy_heap"); 933 } 934 /** 935 * Determine if the device really is AGP or not. 936 * 937 * All Intel graphics chipsets are treated as AGP, even if they are really 938 * PCI-e. 939 * 940 * \param dev The device to be tested. 941 * 942 * \returns 943 * A value of 1 is always retured to indictate every i9x5 is AGP. 944 */ 945 /*ARGSUSED*/ 946 int i915_driver_device_is_agp(drm_device_t * dev) 947 { 948 return 1; 949 } 950