1 /* BEGIN CSTYLED */ 2 3 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 4 */ 5 /* 6 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 7 * All Rights Reserved. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the 11 * "Software"), to deal in the Software without restriction, including 12 * without limitation the rights to use, copy, modify, merge, publish, 13 * distribute, sub license, and/or sell copies of the Software, and to 14 * permit persons to whom the Software is furnished to do so, subject to 15 * the following conditions: 16 * 17 * The above copyright notice and this permission notice (including the 18 * next paragraph) shall be included in all copies or substantial portions 19 * of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 22 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 24 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 25 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 26 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 27 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 28 * 29 */ 30 31 /* 32 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 33 * Use is subject to license terms. 34 */ 35 36 #include "drmP.h" 37 #include "drm.h" 38 #include "i915_drm.h" 39 #include "i915_drv.h" 40 41 42 43 /* Really want an OS-independent resettable timer. Would like to have 44 * this loop run for (eg) 3 sec, but have the timer reset every time 45 * the head pointer changes, so that EBUSY only happens if the ring 46 * actually stalls for (eg) 3 seconds. 47 */ 48 /*ARGSUSED*/ 49 int i915_wait_ring(drm_device_t * dev, int n, const char *caller) 50 { 51 drm_i915_private_t *dev_priv = dev->dev_private; 52 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 53 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 54 int i; 55 56 for (i = 0; i < 10000; i++) { 57 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 58 ring->space = ring->head - (ring->tail + 8); 59 if (ring->space < 0) 60 ring->space += ring->Size; 61 if (ring->space >= n) 62 return 0; 63 64 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 65 66 if (ring->head != last_head) 67 i = 0; 68 69 last_head = ring->head; 70 DRM_UDELAY(1); 71 } 72 73 return (EBUSY); 74 } 75 76 void i915_kernel_lost_context(drm_device_t * dev) 77 { 78 drm_i915_private_t *dev_priv = dev->dev_private; 79 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 80 81 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 82 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR; 83 ring->space = ring->head - (ring->tail + 8); 84 if (ring->space < 0) 85 ring->space += ring->Size; 86 87 if (ring->head == ring->tail) 88 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 89 } 90 91 static int i915_dma_cleanup(drm_device_t * dev) 92 { 93 drm_i915_private_t *dev_priv = 94 (drm_i915_private_t *) dev->dev_private; 95 96 /* Make sure interrupts are disabled here because the uninstall ioctl 97 * may not have been called from userspace and after dev_private 98 * is freed, it's too late. 99 */ 100 if (dev->irq) 101 (void) drm_irq_uninstall(dev); 102 103 if (dev_priv->ring.virtual_start) { 104 drm_core_ioremapfree(&dev_priv->ring.map, dev); 105 dev_priv->ring.virtual_start = 0; 106 dev_priv->ring.map.handle = 0; 107 dev_priv->ring.map.size = 0; 108 } 109 110 if (dev_priv->status_page_dmah) { 111 drm_pci_free(dev, dev_priv->status_page_dmah); 112 dev_priv->status_page_dmah = NULL; 113 114 /* Need to rewrite hardware status page */ 115 I915_WRITE(0x02080, 0x1ffff000); 116 } 117 118 if (dev_priv->status_gfx_addr) { 119 dev_priv->status_gfx_addr = 0; 120 drm_core_ioremapfree(&dev_priv->hws_map, dev); 121 I915_WRITE(0x2080, 0x1ffff000); 122 } 123 124 return 0; 125 } 126 127 static int i915_initialize(drm_device_t * dev, 128 drm_i915_init_t * init) 129 { 130 drm_i915_private_t *dev_priv = 131 (drm_i915_private_t *)dev->dev_private; 132 133 DRM_GETSAREA(); 134 if (!dev_priv->sarea) { 135 DRM_ERROR("can not find sarea!\n"); 136 dev->dev_private = (void *)dev_priv; 137 (void) i915_dma_cleanup(dev); 138 return (EINVAL); 139 } 140 141 /* 142 * mmio_map will be destoried after DMA clean up. We should not 143 * access mmio_map in suspend or resume process. 144 */ 145 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); 146 if (!dev_priv->mmio_map) { 147 dev->dev_private = (void *)dev_priv; 148 (void) i915_dma_cleanup(dev); 149 DRM_ERROR("can not find mmio map!\n"); 150 return (EINVAL); 151 } 152 153 dev_priv->sarea_priv = (drm_i915_sarea_t *)(void *) 154 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); 155 156 dev_priv->ring.Start = init->ring_start; 157 dev_priv->ring.End = init->ring_end; 158 dev_priv->ring.Size = init->ring_size; 159 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 160 161 dev_priv->ring.map.offset = (u_offset_t)init->ring_start; 162 dev_priv->ring.map.size = init->ring_size; 163 dev_priv->ring.map.type = 0; 164 dev_priv->ring.map.flags = 0; 165 dev_priv->ring.map.mtrr = 0; 166 167 drm_core_ioremap(&dev_priv->ring.map, dev); 168 169 if (dev_priv->ring.map.handle == NULL) { 170 dev->dev_private = (void *)dev_priv; 171 (void) i915_dma_cleanup(dev); 172 DRM_ERROR("can not ioremap virtual address for" 173 " ring buffer\n"); 174 return (ENOMEM); 175 } 176 177 dev_priv->ring.virtual_start = (u8 *)dev_priv->ring.map.dev_addr; 178 179 dev_priv->cpp = init->cpp; 180 dev_priv->back_offset = init->back_offset; 181 dev_priv->front_offset = init->front_offset; 182 dev_priv->current_page = 0; 183 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 184 185 /* We are using separate values as placeholders for mechanisms for 186 * private backbuffer/depthbuffer usage. 187 */ 188 dev_priv->use_mi_batchbuffer_start = 0; 189 190 /* Allow hardware batchbuffers unless told otherwise. 191 */ 192 dev_priv->allow_batchbuffer = 1; 193 194 195 if (!I915_NEED_GFX_HWS(dev)) { 196 /* Program Hardware Status Page */ 197 dev_priv->status_page_dmah = 198 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 199 0xffffffff, 1); 200 201 if (!dev_priv->status_page_dmah) { 202 dev->dev_private = (void *)dev_priv; 203 (void) i915_dma_cleanup(dev); 204 DRM_ERROR("Can not allocate hardware status page\n"); 205 return (ENOMEM); 206 } 207 208 dev_priv->hw_status_page = 209 (void *)dev_priv->status_page_dmah->vaddr; 210 dev_priv->dma_status_page = dev_priv->status_page_dmah->paddr; 211 (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 212 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 213 214 I915_WRITE(0x02080, dev_priv->dma_status_page); 215 } 216 DRM_DEBUG("Enabled hardware status page\n"); 217 218 219 #ifdef I915_HAVE_BUFFER 220 drm_bo_driver_init(dev); 221 #endif 222 return 0; 223 } 224 225 static int i915_dma_resume(drm_device_t * dev) 226 { 227 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 228 229 DRM_DEBUG("%s\n", __FUNCTION__); 230 231 if (!dev_priv->sarea) { 232 DRM_ERROR("can not find sarea!\n"); 233 return (EINVAL); 234 } 235 236 if (!dev_priv->mmio_map) { 237 DRM_ERROR("can not find mmio map!\n"); 238 return (EINVAL); 239 } 240 241 if (dev_priv->ring.map.handle == NULL) { 242 DRM_ERROR("can not ioremap virtual address for" 243 " ring buffer\n"); 244 return (ENOMEM); 245 } 246 247 /* Program Hardware Status Page */ 248 if (!dev_priv->hw_status_page) { 249 DRM_ERROR("Can not find hardware status page\n"); 250 return (EINVAL); 251 } 252 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 253 254 if (!I915_NEED_GFX_HWS(dev)) 255 I915_WRITE(0x02080, dev_priv->dma_status_page); 256 else 257 I915_WRITE(0x02080, dev_priv->status_gfx_addr); 258 DRM_DEBUG("Enabled hardware status page\n"); 259 260 return 0; 261 } 262 263 /*ARGSUSED*/ 264 static int i915_dma_init(DRM_IOCTL_ARGS) 265 { 266 DRM_DEVICE; 267 drm_i915_init_t init; 268 int retcode = 0; 269 270 DRM_COPYFROM_WITH_RETURN(&init, (drm_i915_init_t *)data, sizeof(init)); 271 272 switch (init.func) { 273 case I915_INIT_DMA: 274 retcode = i915_initialize(dev, &init); 275 break; 276 case I915_CLEANUP_DMA: 277 retcode = i915_dma_cleanup(dev); 278 break; 279 case I915_RESUME_DMA: 280 retcode = i915_dma_resume(dev); 281 break; 282 default: 283 retcode = EINVAL; 284 break; 285 } 286 287 return retcode; 288 } 289 290 /* Implement basically the same security restrictions as hardware does 291 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 292 * 293 * Most of the calculations below involve calculating the size of a 294 * particular instruction. It's important to get the size right as 295 * that tells us where the next instruction to check is. Any illegal 296 * instruction detected will be given a size of zero, which is a 297 * signal to abort the rest of the buffer. 298 */ 299 static int do_validate_cmd(int cmd) 300 { 301 switch (((cmd >> 29) & 0x7)) { 302 case 0x0: 303 switch ((cmd >> 23) & 0x3f) { 304 case 0x0: 305 return 1; /* MI_NOOP */ 306 case 0x4: 307 return 1; /* MI_FLUSH */ 308 default: 309 return 0; /* disallow everything else */ 310 } 311 #ifndef __SUNPRO_C 312 break; 313 #endif 314 case 0x1: 315 return 0; /* reserved */ 316 case 0x2: 317 return (cmd & 0xff) + 2; /* 2d commands */ 318 case 0x3: 319 if (((cmd >> 24) & 0x1f) <= 0x18) 320 return 1; 321 322 switch ((cmd >> 24) & 0x1f) { 323 case 0x1c: 324 return 1; 325 case 0x1d: 326 switch ((cmd >> 16) & 0xff) { 327 case 0x3: 328 return (cmd & 0x1f) + 2; 329 case 0x4: 330 return (cmd & 0xf) + 2; 331 default: 332 return (cmd & 0xffff) + 2; 333 } 334 case 0x1e: 335 if (cmd & (1 << 23)) 336 return (cmd & 0xffff) + 1; 337 else 338 return 1; 339 case 0x1f: 340 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 341 return (cmd & 0x1ffff) + 2; 342 else if (cmd & (1 << 17)) /* indirect random */ 343 if ((cmd & 0xffff) == 0) 344 return 0; /* unknown length, too hard */ 345 else 346 return (((cmd & 0xffff) + 1) / 2) + 1; 347 else 348 return 2; /* indirect sequential */ 349 default: 350 return 0; 351 } 352 default: 353 return 0; 354 } 355 356 #ifndef __SUNPRO_C 357 return 0; 358 #endif 359 } 360 361 static int validate_cmd(int cmd) 362 { 363 int ret = do_validate_cmd(cmd); 364 365 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ 366 367 return ret; 368 } 369 370 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) 371 { 372 drm_i915_private_t *dev_priv = dev->dev_private; 373 int i; 374 RING_LOCALS; 375 376 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 377 return (EINVAL); 378 379 BEGIN_LP_RING((dwords+1)&~1); 380 381 for (i = 0; i < dwords;) { 382 int cmd, sz; 383 384 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 385 return (EINVAL); 386 387 388 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 389 return (EINVAL); 390 391 OUT_RING(cmd); 392 393 while (++i, --sz) { 394 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 395 sizeof(cmd))) { 396 return (EINVAL); 397 } 398 OUT_RING(cmd); 399 } 400 } 401 402 if (dwords & 1) 403 OUT_RING(0); 404 405 ADVANCE_LP_RING(); 406 407 return 0; 408 } 409 410 static int i915_emit_box(drm_device_t * dev, 411 drm_clip_rect_t __user * boxes, 412 int i, int DR1, int DR4) 413 { 414 drm_i915_private_t *dev_priv = dev->dev_private; 415 drm_clip_rect_t box; 416 RING_LOCALS; 417 418 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { 419 return (EFAULT); 420 } 421 422 if (box.y2 <= box.y1 || box.x2 <= box.x1) { 423 DRM_ERROR("Bad box %d,%d..%d,%d\n", 424 box.x1, box.y1, box.x2, box.y2); 425 return (EINVAL); 426 } 427 428 if (IS_I965G(dev)) { 429 BEGIN_LP_RING(4); 430 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 431 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 432 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 433 OUT_RING(DR4); 434 ADVANCE_LP_RING(); 435 } else { 436 BEGIN_LP_RING(6); 437 OUT_RING(GFX_OP_DRAWRECT_INFO); 438 OUT_RING(DR1); 439 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 440 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 441 OUT_RING(DR4); 442 OUT_RING(0); 443 ADVANCE_LP_RING(); 444 } 445 446 return 0; 447 } 448 449 /* XXX: Emitting the counter should really be moved to part of the IRQ 450 * emit. For now, do it in both places: 451 */ 452 453 static void i915_emit_breadcrumb(drm_device_t *dev) 454 { 455 drm_i915_private_t *dev_priv = dev->dev_private; 456 RING_LOCALS; 457 458 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; 459 460 BEGIN_LP_RING(4); 461 OUT_RING(CMD_STORE_DWORD_IDX); 462 OUT_RING(BREADCRUMB_OFFSET << 2); 463 OUT_RING(dev_priv->counter); 464 OUT_RING(0); 465 ADVANCE_LP_RING(); 466 #ifdef I915_HAVE_FENCE 467 drm_fence_flush_old(dev, 0, dev_priv->counter); 468 #endif 469 } 470 471 472 int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush) 473 { 474 drm_i915_private_t *dev_priv = dev->dev_private; 475 uint32_t flush_cmd = CMD_MI_FLUSH; 476 RING_LOCALS; 477 478 flush_cmd |= flush; 479 480 i915_kernel_lost_context(dev); 481 482 BEGIN_LP_RING(4); 483 OUT_RING(flush_cmd); 484 OUT_RING(0); 485 OUT_RING(0); 486 OUT_RING(0); 487 ADVANCE_LP_RING(); 488 489 return 0; 490 } 491 492 static int i915_dispatch_cmdbuffer(drm_device_t * dev, 493 drm_i915_cmdbuffer_t * cmd) 494 { 495 int nbox = cmd->num_cliprects; 496 int i = 0, count, ret; 497 498 if (cmd->sz & 0x3) { 499 DRM_ERROR("alignment"); 500 return (EINVAL); 501 } 502 503 i915_kernel_lost_context(dev); 504 505 count = nbox ? nbox : 1; 506 507 for (i = 0; i < count; i++) { 508 if (i < nbox) { 509 ret = i915_emit_box(dev, cmd->cliprects, i, 510 cmd->DR1, cmd->DR4); 511 if (ret) 512 return ret; 513 } 514 515 ret = i915_emit_cmds(dev, (int __user *)(void *)cmd->buf, cmd->sz / 4); 516 if (ret) 517 return ret; 518 } 519 520 i915_emit_breadcrumb( dev ); 521 return 0; 522 } 523 524 static int i915_dispatch_batchbuffer(drm_device_t * dev, 525 drm_i915_batchbuffer_t * batch) 526 { 527 drm_i915_private_t *dev_priv = dev->dev_private; 528 drm_clip_rect_t __user *boxes = batch->cliprects; 529 int nbox = batch->num_cliprects; 530 int i = 0, count; 531 RING_LOCALS; 532 533 if ((batch->start | batch->used) & 0x7) { 534 DRM_ERROR("alignment"); 535 return (EINVAL); 536 } 537 538 i915_kernel_lost_context(dev); 539 540 count = nbox ? nbox : 1; 541 542 for (i = 0; i < count; i++) { 543 if (i < nbox) { 544 int ret = i915_emit_box(dev, boxes, i, 545 batch->DR1, batch->DR4); 546 if (ret) 547 return ret; 548 } 549 550 if (dev_priv->use_mi_batchbuffer_start) { 551 BEGIN_LP_RING(2); 552 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 553 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 554 ADVANCE_LP_RING(); 555 } else { 556 BEGIN_LP_RING(4); 557 OUT_RING(MI_BATCH_BUFFER); 558 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 559 OUT_RING(batch->start + batch->used - 4); 560 OUT_RING(0); 561 ADVANCE_LP_RING(); 562 } 563 } 564 565 i915_emit_breadcrumb( dev ); 566 567 return 0; 568 } 569 570 static int i915_dispatch_flip(drm_device_t * dev) 571 { 572 drm_i915_private_t *dev_priv = dev->dev_private; 573 RING_LOCALS; 574 575 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 576 __FUNCTION__, 577 dev_priv->current_page, 578 dev_priv->sarea_priv->pf_current_page); 579 580 i915_kernel_lost_context(dev); 581 582 BEGIN_LP_RING(2); 583 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); 584 OUT_RING(0); 585 ADVANCE_LP_RING(); 586 587 BEGIN_LP_RING(6); 588 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 589 OUT_RING(0); 590 if (dev_priv->current_page == 0) { 591 OUT_RING(dev_priv->back_offset); 592 dev_priv->current_page = 1; 593 } else { 594 OUT_RING(dev_priv->front_offset); 595 dev_priv->current_page = 0; 596 } 597 OUT_RING(0); 598 ADVANCE_LP_RING(); 599 600 BEGIN_LP_RING(2); 601 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 602 OUT_RING(0); 603 ADVANCE_LP_RING(); 604 605 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 606 607 BEGIN_LP_RING(4); 608 OUT_RING(CMD_STORE_DWORD_IDX); 609 OUT_RING(BREADCRUMB_OFFSET << 2); 610 OUT_RING(dev_priv->counter); 611 OUT_RING(0); 612 ADVANCE_LP_RING(); 613 #ifdef I915_HAVE_FENCE 614 drm_fence_flush_old(dev, 0, dev_priv->counter); 615 #endif 616 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 617 return 0; 618 } 619 620 static int i915_quiescent(drm_device_t * dev) 621 { 622 drm_i915_private_t *dev_priv = dev->dev_private; 623 624 i915_kernel_lost_context(dev); 625 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 626 } 627 628 /*ARGSUSED*/ 629 static int i915_flush_ioctl(DRM_IOCTL_ARGS) 630 { 631 DRM_DEVICE; 632 633 LOCK_TEST_WITH_RETURN(dev, fpriv); 634 635 return i915_quiescent(dev); 636 } 637 638 /*ARGSUSED*/ 639 static int i915_batchbuffer(DRM_IOCTL_ARGS) 640 { 641 DRM_DEVICE; 642 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 643 u32 *hw_status = dev_priv->hw_status_page; 644 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 645 dev_priv->sarea_priv; 646 drm_i915_batchbuffer_t batch; 647 int ret; 648 649 if (!dev_priv->allow_batchbuffer) { 650 DRM_ERROR("Batchbuffer ioctl disabled\n"); 651 return (EINVAL); 652 } 653 654 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { 655 drm_i915_batchbuffer32_t batchbuffer32_t; 656 657 DRM_COPYFROM_WITH_RETURN(&batchbuffer32_t, 658 (void *) data, sizeof (batchbuffer32_t)); 659 660 batch.start = batchbuffer32_t.start; 661 batch.used = batchbuffer32_t.used; 662 batch.DR1 = batchbuffer32_t.DR1; 663 batch.DR4 = batchbuffer32_t.DR4; 664 batch.num_cliprects = batchbuffer32_t.num_cliprects; 665 batch.cliprects = (drm_clip_rect_t __user *) 666 (uintptr_t)batchbuffer32_t.cliprects; 667 } else 668 DRM_COPYFROM_WITH_RETURN(&batch, (void *) data, 669 sizeof(batch)); 670 671 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 672 batch.start, batch.used, batch.num_cliprects); 673 674 LOCK_TEST_WITH_RETURN(dev, fpriv); 675 /* 676 677 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, 678 batch.num_cliprects * 679 sizeof(drm_clip_rect_t))) 680 return (EFAULT); 681 */ 682 683 ret = i915_dispatch_batchbuffer(dev, &batch); 684 685 sarea_priv->last_dispatch = (int)hw_status[5]; 686 return ret; 687 } 688 689 /*ARGSUSED*/ 690 static int i915_cmdbuffer(DRM_IOCTL_ARGS) 691 { 692 DRM_DEVICE; 693 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 694 u32 *hw_status = dev_priv->hw_status_page; 695 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 696 dev_priv->sarea_priv; 697 drm_i915_cmdbuffer_t cmdbuf; 698 int ret; 699 700 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { 701 drm_i915_cmdbuffer32_t cmdbuffer32_t; 702 703 DRM_COPYFROM_WITH_RETURN(&cmdbuffer32_t, 704 (drm_i915_cmdbuffer32_t __user *) data, 705 sizeof (drm_i915_cmdbuffer32_t)); 706 707 cmdbuf.buf = (char __user *)(uintptr_t)cmdbuffer32_t.buf; 708 cmdbuf.sz = cmdbuffer32_t.sz; 709 cmdbuf.DR1 = cmdbuffer32_t.DR1; 710 cmdbuf.DR4 = cmdbuffer32_t.DR4; 711 cmdbuf.num_cliprects = cmdbuffer32_t.num_cliprects; 712 cmdbuf.cliprects = (drm_clip_rect_t __user *) 713 (uintptr_t)cmdbuffer32_t.cliprects; 714 } else 715 DRM_COPYFROM_WITH_RETURN(&cmdbuf, (void *) data, 716 sizeof(cmdbuf)); 717 718 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 719 cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects); 720 721 LOCK_TEST_WITH_RETURN(dev, fpriv); 722 /* 723 724 if (cmdbuf.num_cliprects && 725 DRM_VERIFYAREA_READ(cmdbuf.cliprects, 726 cmdbuf.num_cliprects * 727 sizeof(drm_clip_rect_t))) { 728 DRM_ERROR("Fault accessing cliprects\n"); 729 return (EFAULT); 730 } 731 */ 732 733 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); 734 if (ret) { 735 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 736 return ret; 737 } 738 739 sarea_priv->last_dispatch = (int)hw_status[5]; 740 return 0; 741 } 742 743 static int i915_do_cleanup_pageflip(drm_device_t * dev) 744 { 745 drm_i915_private_t *dev_priv = dev->dev_private; 746 747 DRM_DEBUG("%s\n", __FUNCTION__); 748 if (dev_priv->current_page != 0) 749 (void) i915_dispatch_flip(dev); 750 751 return 0; 752 } 753 754 /*ARGSUSED*/ 755 static int i915_flip_bufs(DRM_IOCTL_ARGS) 756 { 757 DRM_DEVICE; 758 759 DRM_DEBUG("%s\n", __FUNCTION__); 760 761 LOCK_TEST_WITH_RETURN(dev, fpriv); 762 763 return i915_dispatch_flip(dev); 764 } 765 766 /*ARGSUSED*/ 767 static int i915_getparam(DRM_IOCTL_ARGS) 768 { 769 DRM_DEVICE; 770 drm_i915_private_t *dev_priv = dev->dev_private; 771 drm_i915_getparam_t param; 772 int value; 773 774 if (!dev_priv) { 775 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 776 return (EINVAL); 777 } 778 779 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { 780 drm_i915_getparam32_t getparam32_t; 781 782 DRM_COPYFROM_WITH_RETURN(&getparam32_t, 783 (drm_i915_getparam32_t __user *) data, 784 sizeof (drm_i915_getparam32_t)); 785 786 param.param = getparam32_t.param; 787 param.value = (int __user *)(uintptr_t)getparam32_t.value; 788 } else 789 DRM_COPYFROM_WITH_RETURN(¶m, 790 (drm_i915_getparam_t *) data, sizeof(param)); 791 792 switch (param.param) { 793 case I915_PARAM_IRQ_ACTIVE: 794 value = dev->irq ? 1 : 0; 795 break; 796 case I915_PARAM_ALLOW_BATCHBUFFER: 797 value = dev_priv->allow_batchbuffer ? 1 : 0; 798 break; 799 case I915_PARAM_LAST_DISPATCH: 800 value = READ_BREADCRUMB(dev_priv); 801 break; 802 default: 803 DRM_ERROR("Unknown parameter %d\n", param.param); 804 return (EINVAL); 805 } 806 807 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 808 DRM_ERROR("i915_getparam failed\n"); 809 return (EFAULT); 810 } 811 return 0; 812 } 813 814 /*ARGSUSED*/ 815 static int i915_setparam(DRM_IOCTL_ARGS) 816 { 817 DRM_DEVICE; 818 drm_i915_private_t *dev_priv = dev->dev_private; 819 drm_i915_setparam_t param; 820 821 if (!dev_priv) { 822 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 823 return (EINVAL); 824 } 825 826 DRM_COPYFROM_WITH_RETURN(¶m, (drm_i915_setparam_t *) data, 827 sizeof(param)); 828 829 switch (param.param) { 830 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 831 dev_priv->use_mi_batchbuffer_start = param.value; 832 break; 833 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 834 dev_priv->tex_lru_log_granularity = param.value; 835 break; 836 case I915_SETPARAM_ALLOW_BATCHBUFFER: 837 dev_priv->allow_batchbuffer = param.value; 838 break; 839 default: 840 DRM_ERROR("unknown parameter %d\n", param.param); 841 return (EINVAL); 842 } 843 844 return 0; 845 } 846 847 /*ARGSUSED*/ 848 static int i915_set_status_page(DRM_IOCTL_ARGS) 849 { 850 DRM_DEVICE; 851 drm_i915_private_t *dev_priv = dev->dev_private; 852 drm_i915_hws_addr_t hws; 853 854 if (!I915_NEED_GFX_HWS(dev)) 855 return (EINVAL); 856 857 if (!dev_priv) { 858 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 859 return (EINVAL); 860 } 861 DRM_COPYFROM_WITH_RETURN(&hws, (drm_i915_hws_addr_t __user *) data, 862 sizeof(hws)); 863 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws.addr); 864 865 dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12); 866 DRM_DEBUG("set gfx_addr 0x%08x\n", dev_priv->status_gfx_addr); 867 868 dev_priv->hws_map.offset = 869 (u_offset_t)dev->agp->agp_info.agpi_aperbase + hws.addr; 870 dev_priv->hws_map.size = PAGE_SIZE; /* 4K pages */ 871 dev_priv->hws_map.type = _DRM_REGISTERS; 872 dev_priv->hws_map.flags = 0; 873 dev_priv->hws_map.mtrr = 0; 874 875 DRM_DEBUG("set status page: i915_set_status_page: mapoffset 0x%llx\n", 876 dev_priv->hws_map.offset); 877 drm_core_ioremap(&dev_priv->hws_map, dev); 878 if (dev_priv->hws_map.handle == NULL) { 879 dev->dev_private = (void *)dev_priv; 880 (void) i915_dma_cleanup(dev); 881 dev_priv->status_gfx_addr = 0; 882 DRM_ERROR("can not ioremap virtual address for" 883 " G33 hw status page\n"); 884 return (ENOMEM); 885 } 886 dev_priv->hw_status_page = dev_priv->hws_map.dev_addr; 887 888 (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 889 I915_WRITE(0x02080, dev_priv->status_gfx_addr); 890 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n", 891 dev_priv->status_gfx_addr); 892 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 893 return 0; 894 } 895 896 /*ARGSUSED*/ 897 int i915_driver_load(drm_device_t *dev, unsigned long flags) 898 { 899 struct drm_i915_private *dev_priv; 900 901 /* i915 has 4 more counters */ 902 dev->counters += 4; 903 dev->types[6] = _DRM_STAT_IRQ; 904 dev->types[7] = _DRM_STAT_PRIMARY; 905 dev->types[8] = _DRM_STAT_SECONDARY; 906 dev->types[9] = _DRM_STAT_DMA; 907 908 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); 909 if (dev_priv == NULL) 910 return ENOMEM; 911 912 (void) memset(dev_priv, 0, sizeof(drm_i915_private_t)); 913 dev->dev_private = (void *)dev_priv; 914 915 return 0; 916 } 917 918 int i915_driver_unload(struct drm_device *dev) 919 { 920 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 921 DRM_MEM_DRIVER); 922 923 return 0; 924 } 925 926 927 void i915_driver_lastclose(drm_device_t * dev) 928 { 929 if (dev->dev_private) { 930 drm_i915_private_t *dev_priv = dev->dev_private; 931 i915_mem_takedown(&(dev_priv->agp_heap)); 932 } 933 (void) i915_dma_cleanup(dev); 934 } 935 936 void i915_driver_preclose(drm_device_t * dev, drm_file_t *fpriv) 937 { 938 if (dev->dev_private) { 939 drm_i915_private_t *dev_priv = dev->dev_private; 940 if (dev_priv->page_flipping) { 941 (void) i915_do_cleanup_pageflip(dev); 942 } 943 i915_mem_release(dev, fpriv, dev_priv->agp_heap); 944 } 945 } 946 947 extern drm_ioctl_desc_t i915_ioctls[]; 948 949 void i915_set_ioctl_desc(int n, drm_ioctl_t * func, 950 int auth_needed, int root_only, char *desc) 951 { 952 i915_ioctls[n].func = func; 953 i915_ioctls[n].auth_needed = auth_needed; 954 i915_ioctls[n].root_only = root_only; 955 i915_ioctls[n].desc = desc; 956 } 957 void 958 i915_init_ioctl_arrays(void) 959 { 960 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_INIT), 961 i915_dma_init, 1, 1, "i915_dma_init"); 962 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FLUSH), 963 i915_flush_ioctl, 1, 0, "i915_flush_ioctl"); 964 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FLIP), 965 i915_flip_bufs, 1, 0, "i915_flip_bufs"); 966 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_BATCHBUFFER), 967 i915_batchbuffer, 1, 0, "i915_batchbuffer"); 968 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_IRQ_EMIT), 969 i915_irq_emit, 1, 0, " i915_irq_emit"); 970 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_IRQ_WAIT), 971 i915_irq_wait, 1, 0, "i915_irq_wait"); 972 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_GETPARAM), 973 i915_getparam, 1, 0, "i915_getparam"); 974 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_SETPARAM), 975 i915_setparam, 1, 1, "i915_setparam"); 976 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_ALLOC), 977 i915_mem_alloc, 1, 0, "i915_mem_alloc"); 978 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FREE), 979 i915_mem_free, 1, 0, "i915_mem_free"); 980 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_INIT_HEAP), 981 i915_mem_init_heap, 1, 1, "i915_mem_init_heap"); 982 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_CMDBUFFER), 983 i915_cmdbuffer, 1, 0, "i915_cmdbuffer"); 984 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP), 985 i915_mem_destroy_heap, 1, 1, "i915_mem_destroy_heap"); 986 i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_HWS_ADDR), 987 i915_set_status_page, 1, 0, "i915_set_status_page"); 988 } 989 /** 990 * Determine if the device really is AGP or not. 991 * 992 * All Intel graphics chipsets are treated as AGP, even if they are really 993 * PCI-e. 994 * 995 * \param dev The device to be tested. 996 * 997 * \returns 998 * A value of 1 is always retured to indictate every i9x5 is AGP. 999 */ 1000 /*ARGSUSED*/ 1001 int i915_driver_device_is_agp(drm_device_t * dev) 1002 { 1003 return 1; 1004 } 1005