1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include "radeon.h" 30 #include <drm/radeon_drm.h> 31 #include "radeon_asic.h" 32 33 #include <linux/vga_switcheroo.h> 34 #include <linux/slab.h> 35 36 /** 37 * radeon_driver_unload_kms - Main unload function for KMS. 38 * 39 * @dev: drm dev pointer 40 * 41 * This is the main unload function for KMS (all asics). 42 * It calls radeon_modeset_fini() to tear down the 43 * displays, and radeon_device_fini() to tear down 44 * the rest of the device (CP, writeback, etc.). 45 * Returns 0 on success. 46 */ 47 int radeon_driver_unload_kms(struct drm_device *dev) 48 { 49 struct radeon_device *rdev = dev->dev_private; 50 51 if (rdev == NULL) 52 return 0; 53 radeon_acpi_fini(rdev); 54 radeon_modeset_fini(rdev); 55 radeon_device_fini(rdev); 56 kfree(rdev); 57 dev->dev_private = NULL; 58 return 0; 59 } 60 61 /** 62 * radeon_driver_load_kms - Main load function for KMS. 63 * 64 * @dev: drm dev pointer 65 * @flags: device flags 66 * 67 * This is the main load function for KMS (all asics). 68 * It calls radeon_device_init() to set up the non-display 69 * parts of the chip (asic init, CP, writeback, etc.), and 70 * radeon_modeset_init() to set up the display parts 71 * (crtcs, encoders, hotplug detect, etc.). 72 * Returns 0 on success, error on failure. 73 */ 74 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 75 { 76 struct radeon_device *rdev; 77 int r, acpi_status; 78 79 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); 80 if (rdev == NULL) { 81 return -ENOMEM; 82 } 83 dev->dev_private = (void *)rdev; 84 85 /* update BUS flag */ 86 if (drm_pci_device_is_agp(dev)) { 87 flags |= RADEON_IS_AGP; 88 } else if (pci_is_pcie(dev->pdev)) { 89 flags |= RADEON_IS_PCIE; 90 } else { 91 flags |= RADEON_IS_PCI; 92 } 93 94 /* radeon_device_init should report only fatal error 95 * like memory allocation failure or iomapping failure, 96 * or memory manager initialization failure, it must 97 * properly initialize the GPU MC controller and permit 98 * VRAM allocation 99 */ 100 r = radeon_device_init(rdev, dev, dev->pdev, flags); 101 if (r) { 102 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 103 goto out; 104 } 105 106 /* Again modeset_init should fail only on fatal error 107 * otherwise it should provide enough functionalities 108 * for shadowfb to run 109 */ 110 r = radeon_modeset_init(rdev); 111 if (r) 112 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); 113 114 /* Call ACPI methods: require modeset init 115 * but failure is not fatal 116 */ 117 if (!r) { 118 acpi_status = radeon_acpi_init(rdev); 119 if (acpi_status) 120 dev_dbg(&dev->pdev->dev, 121 "Error during ACPI methods call\n"); 122 } 123 124 out: 125 if (r) 126 radeon_driver_unload_kms(dev); 127 return r; 128 } 129 130 /** 131 * radeon_set_filp_rights - Set filp right. 132 * 133 * @dev: drm dev pointer 134 * @owner: drm file 135 * @applier: drm file 136 * @value: value 137 * 138 * Sets the filp rights for the device (all asics). 139 */ 140 static void radeon_set_filp_rights(struct drm_device *dev, 141 struct drm_file **owner, 142 struct drm_file *applier, 143 uint32_t *value) 144 { 145 mutex_lock(&dev->struct_mutex); 146 if (*value == 1) { 147 /* wants rights */ 148 if (!*owner) 149 *owner = applier; 150 } else if (*value == 0) { 151 /* revokes rights */ 152 if (*owner == applier) 153 *owner = NULL; 154 } 155 *value = *owner == applier ? 1 : 0; 156 mutex_unlock(&dev->struct_mutex); 157 } 158 159 /* 160 * Userspace get information ioctl 161 */ 162 /** 163 * radeon_info_ioctl - answer a device specific request. 164 * 165 * @rdev: radeon device pointer 166 * @data: request object 167 * @filp: drm filp 168 * 169 * This function is used to pass device specific parameters to the userspace 170 * drivers. Examples include: pci device id, pipeline parms, tiling params, 171 * etc. (all asics). 172 * Returns 0 on success, -EINVAL on failure. 173 */ 174 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 175 { 176 struct radeon_device *rdev = dev->dev_private; 177 struct drm_radeon_info *info = data; 178 struct radeon_mode_info *minfo = &rdev->mode_info; 179 uint32_t value, *value_ptr; 180 uint64_t value64, *value_ptr64; 181 struct drm_crtc *crtc; 182 int i, found; 183 184 /* TIMESTAMP is a 64-bit value, needs special handling. */ 185 if (info->request == RADEON_INFO_TIMESTAMP) { 186 if (rdev->family >= CHIP_R600) { 187 value_ptr64 = (uint64_t*)((unsigned long)info->value); 188 if (rdev->family >= CHIP_TAHITI) { 189 value64 = si_get_gpu_clock(rdev); 190 } else { 191 value64 = r600_get_gpu_clock(rdev); 192 } 193 194 if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) { 195 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 196 return -EFAULT; 197 } 198 return 0; 199 } else { 200 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); 201 return -EINVAL; 202 } 203 } 204 205 value_ptr = (uint32_t *)((unsigned long)info->value); 206 if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) { 207 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 208 return -EFAULT; 209 } 210 211 switch (info->request) { 212 case RADEON_INFO_DEVICE_ID: 213 value = dev->pci_device; 214 break; 215 case RADEON_INFO_NUM_GB_PIPES: 216 value = rdev->num_gb_pipes; 217 break; 218 case RADEON_INFO_NUM_Z_PIPES: 219 value = rdev->num_z_pipes; 220 break; 221 case RADEON_INFO_ACCEL_WORKING: 222 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ 223 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) 224 value = false; 225 else 226 value = rdev->accel_working; 227 break; 228 case RADEON_INFO_CRTC_FROM_ID: 229 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 230 crtc = (struct drm_crtc *)minfo->crtcs[i]; 231 if (crtc && crtc->base.id == value) { 232 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 233 value = radeon_crtc->crtc_id; 234 found = 1; 235 break; 236 } 237 } 238 if (!found) { 239 DRM_DEBUG_KMS("unknown crtc id %d\n", value); 240 return -EINVAL; 241 } 242 break; 243 case RADEON_INFO_ACCEL_WORKING2: 244 value = rdev->accel_working; 245 break; 246 case RADEON_INFO_TILING_CONFIG: 247 if (rdev->family >= CHIP_TAHITI) 248 value = rdev->config.si.tile_config; 249 else if (rdev->family >= CHIP_CAYMAN) 250 value = rdev->config.cayman.tile_config; 251 else if (rdev->family >= CHIP_CEDAR) 252 value = rdev->config.evergreen.tile_config; 253 else if (rdev->family >= CHIP_RV770) 254 value = rdev->config.rv770.tile_config; 255 else if (rdev->family >= CHIP_R600) 256 value = rdev->config.r600.tile_config; 257 else { 258 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 259 return -EINVAL; 260 } 261 break; 262 case RADEON_INFO_WANT_HYPERZ: 263 /* The "value" here is both an input and output parameter. 264 * If the input value is 1, filp requests hyper-z access. 265 * If the input value is 0, filp revokes its hyper-z access. 266 * 267 * When returning, the value is 1 if filp owns hyper-z access, 268 * 0 otherwise. */ 269 if (value >= 2) { 270 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); 271 return -EINVAL; 272 } 273 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value); 274 break; 275 case RADEON_INFO_WANT_CMASK: 276 /* The same logic as Hyper-Z. */ 277 if (value >= 2) { 278 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value); 279 return -EINVAL; 280 } 281 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); 282 break; 283 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 284 /* return clock value in KHz */ 285 value = rdev->clock.spll.reference_freq * 10; 286 break; 287 case RADEON_INFO_NUM_BACKENDS: 288 if (rdev->family >= CHIP_TAHITI) 289 value = rdev->config.si.max_backends_per_se * 290 rdev->config.si.max_shader_engines; 291 else if (rdev->family >= CHIP_CAYMAN) 292 value = rdev->config.cayman.max_backends_per_se * 293 rdev->config.cayman.max_shader_engines; 294 else if (rdev->family >= CHIP_CEDAR) 295 value = rdev->config.evergreen.max_backends; 296 else if (rdev->family >= CHIP_RV770) 297 value = rdev->config.rv770.max_backends; 298 else if (rdev->family >= CHIP_R600) 299 value = rdev->config.r600.max_backends; 300 else { 301 return -EINVAL; 302 } 303 break; 304 case RADEON_INFO_NUM_TILE_PIPES: 305 if (rdev->family >= CHIP_TAHITI) 306 value = rdev->config.si.max_tile_pipes; 307 else if (rdev->family >= CHIP_CAYMAN) 308 value = rdev->config.cayman.max_tile_pipes; 309 else if (rdev->family >= CHIP_CEDAR) 310 value = rdev->config.evergreen.max_tile_pipes; 311 else if (rdev->family >= CHIP_RV770) 312 value = rdev->config.rv770.max_tile_pipes; 313 else if (rdev->family >= CHIP_R600) 314 value = rdev->config.r600.max_tile_pipes; 315 else { 316 return -EINVAL; 317 } 318 break; 319 case RADEON_INFO_FUSION_GART_WORKING: 320 value = 1; 321 break; 322 case RADEON_INFO_BACKEND_MAP: 323 if (rdev->family >= CHIP_TAHITI) 324 value = rdev->config.si.backend_map; 325 else if (rdev->family >= CHIP_CAYMAN) 326 value = rdev->config.cayman.backend_map; 327 else if (rdev->family >= CHIP_CEDAR) 328 value = rdev->config.evergreen.backend_map; 329 else if (rdev->family >= CHIP_RV770) 330 value = rdev->config.rv770.backend_map; 331 else if (rdev->family >= CHIP_R600) 332 value = rdev->config.r600.backend_map; 333 else { 334 return -EINVAL; 335 } 336 break; 337 case RADEON_INFO_VA_START: 338 /* this is where we report if vm is supported or not */ 339 if (rdev->family < CHIP_CAYMAN) 340 return -EINVAL; 341 value = RADEON_VA_RESERVED_SIZE; 342 break; 343 case RADEON_INFO_IB_VM_MAX_SIZE: 344 /* this is where we report if vm is supported or not */ 345 if (rdev->family < CHIP_CAYMAN) 346 return -EINVAL; 347 value = RADEON_IB_VM_MAX_SIZE; 348 break; 349 case RADEON_INFO_MAX_PIPES: 350 if (rdev->family >= CHIP_TAHITI) 351 value = rdev->config.si.max_cu_per_sh; 352 else if (rdev->family >= CHIP_CAYMAN) 353 value = rdev->config.cayman.max_pipes_per_simd; 354 else if (rdev->family >= CHIP_CEDAR) 355 value = rdev->config.evergreen.max_pipes; 356 else if (rdev->family >= CHIP_RV770) 357 value = rdev->config.rv770.max_pipes; 358 else if (rdev->family >= CHIP_R600) 359 value = rdev->config.r600.max_pipes; 360 else { 361 return -EINVAL; 362 } 363 break; 364 case RADEON_INFO_MAX_SE: 365 if (rdev->family >= CHIP_TAHITI) 366 value = rdev->config.si.max_shader_engines; 367 else if (rdev->family >= CHIP_CAYMAN) 368 value = rdev->config.cayman.max_shader_engines; 369 else if (rdev->family >= CHIP_CEDAR) 370 value = rdev->config.evergreen.num_ses; 371 else 372 value = 1; 373 break; 374 case RADEON_INFO_MAX_SH_PER_SE: 375 if (rdev->family >= CHIP_TAHITI) 376 value = rdev->config.si.max_sh_per_se; 377 else 378 return -EINVAL; 379 break; 380 default: 381 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 382 return -EINVAL; 383 } 384 if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { 385 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 386 return -EFAULT; 387 } 388 return 0; 389 } 390 391 392 /* 393 * Outdated mess for old drm with Xorg being in charge (void function now). 394 */ 395 /** 396 * radeon_driver_firstopen_kms - drm callback for first open 397 * 398 * @dev: drm dev pointer 399 * 400 * Nothing to be done for KMS (all asics). 401 * Returns 0 on success. 402 */ 403 int radeon_driver_firstopen_kms(struct drm_device *dev) 404 { 405 return 0; 406 } 407 408 /** 409 * radeon_driver_firstopen_kms - drm callback for last close 410 * 411 * @dev: drm dev pointer 412 * 413 * Switch vga switcheroo state after last close (all asics). 414 */ 415 void radeon_driver_lastclose_kms(struct drm_device *dev) 416 { 417 vga_switcheroo_process_delayed_switch(); 418 } 419 420 /** 421 * radeon_driver_open_kms - drm callback for open 422 * 423 * @dev: drm dev pointer 424 * @file_priv: drm file 425 * 426 * On device open, init vm on cayman+ (all asics). 427 * Returns 0 on success, error on failure. 428 */ 429 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 430 { 431 struct radeon_device *rdev = dev->dev_private; 432 433 file_priv->driver_priv = NULL; 434 435 /* new gpu have virtual address space support */ 436 if (rdev->family >= CHIP_CAYMAN) { 437 struct radeon_fpriv *fpriv; 438 struct radeon_bo_va *bo_va; 439 int r; 440 441 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 442 if (unlikely(!fpriv)) { 443 return -ENOMEM; 444 } 445 446 radeon_vm_init(rdev, &fpriv->vm); 447 448 /* map the ib pool buffer read only into 449 * virtual address space */ 450 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, 451 rdev->ring_tmp_bo.bo); 452 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 453 RADEON_VM_PAGE_READABLE | 454 RADEON_VM_PAGE_SNOOPED); 455 if (r) { 456 radeon_vm_fini(rdev, &fpriv->vm); 457 kfree(fpriv); 458 return r; 459 } 460 461 file_priv->driver_priv = fpriv; 462 } 463 return 0; 464 } 465 466 /** 467 * radeon_driver_postclose_kms - drm callback for post close 468 * 469 * @dev: drm dev pointer 470 * @file_priv: drm file 471 * 472 * On device post close, tear down vm on cayman+ (all asics). 473 */ 474 void radeon_driver_postclose_kms(struct drm_device *dev, 475 struct drm_file *file_priv) 476 { 477 struct radeon_device *rdev = dev->dev_private; 478 479 /* new gpu have virtual address space support */ 480 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 481 struct radeon_fpriv *fpriv = file_priv->driver_priv; 482 struct radeon_bo_va *bo_va; 483 int r; 484 485 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 486 if (!r) { 487 bo_va = radeon_vm_bo_find(&fpriv->vm, 488 rdev->ring_tmp_bo.bo); 489 if (bo_va) 490 radeon_vm_bo_rmv(rdev, bo_va); 491 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 492 } 493 494 radeon_vm_fini(rdev, &fpriv->vm); 495 kfree(fpriv); 496 file_priv->driver_priv = NULL; 497 } 498 } 499 500 /** 501 * radeon_driver_preclose_kms - drm callback for pre close 502 * 503 * @dev: drm dev pointer 504 * @file_priv: drm file 505 * 506 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx 507 * (all asics). 508 */ 509 void radeon_driver_preclose_kms(struct drm_device *dev, 510 struct drm_file *file_priv) 511 { 512 struct radeon_device *rdev = dev->dev_private; 513 if (rdev->hyperz_filp == file_priv) 514 rdev->hyperz_filp = NULL; 515 if (rdev->cmask_filp == file_priv) 516 rdev->cmask_filp = NULL; 517 } 518 519 /* 520 * VBlank related functions. 521 */ 522 /** 523 * radeon_get_vblank_counter_kms - get frame count 524 * 525 * @dev: drm dev pointer 526 * @crtc: crtc to get the frame count from 527 * 528 * Gets the frame count on the requested crtc (all asics). 529 * Returns frame count on success, -EINVAL on failure. 530 */ 531 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 532 { 533 struct radeon_device *rdev = dev->dev_private; 534 535 if (crtc < 0 || crtc >= rdev->num_crtc) { 536 DRM_ERROR("Invalid crtc %d\n", crtc); 537 return -EINVAL; 538 } 539 540 return radeon_get_vblank_counter(rdev, crtc); 541 } 542 543 /** 544 * radeon_enable_vblank_kms - enable vblank interrupt 545 * 546 * @dev: drm dev pointer 547 * @crtc: crtc to enable vblank interrupt for 548 * 549 * Enable the interrupt on the requested crtc (all asics). 550 * Returns 0 on success, -EINVAL on failure. 551 */ 552 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) 553 { 554 struct radeon_device *rdev = dev->dev_private; 555 unsigned long irqflags; 556 int r; 557 558 if (crtc < 0 || crtc >= rdev->num_crtc) { 559 DRM_ERROR("Invalid crtc %d\n", crtc); 560 return -EINVAL; 561 } 562 563 spin_lock_irqsave(&rdev->irq.lock, irqflags); 564 rdev->irq.crtc_vblank_int[crtc] = true; 565 r = radeon_irq_set(rdev); 566 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 567 return r; 568 } 569 570 /** 571 * radeon_disable_vblank_kms - disable vblank interrupt 572 * 573 * @dev: drm dev pointer 574 * @crtc: crtc to disable vblank interrupt for 575 * 576 * Disable the interrupt on the requested crtc (all asics). 577 */ 578 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) 579 { 580 struct radeon_device *rdev = dev->dev_private; 581 unsigned long irqflags; 582 583 if (crtc < 0 || crtc >= rdev->num_crtc) { 584 DRM_ERROR("Invalid crtc %d\n", crtc); 585 return; 586 } 587 588 spin_lock_irqsave(&rdev->irq.lock, irqflags); 589 rdev->irq.crtc_vblank_int[crtc] = false; 590 radeon_irq_set(rdev); 591 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 592 } 593 594 /** 595 * radeon_get_vblank_timestamp_kms - get vblank timestamp 596 * 597 * @dev: drm dev pointer 598 * @crtc: crtc to get the timestamp for 599 * @max_error: max error 600 * @vblank_time: time value 601 * @flags: flags passed to the driver 602 * 603 * Gets the timestamp on the requested crtc based on the 604 * scanout position. (all asics). 605 * Returns postive status flags on success, negative error on failure. 606 */ 607 int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 608 int *max_error, 609 struct timeval *vblank_time, 610 unsigned flags) 611 { 612 struct drm_crtc *drmcrtc; 613 struct radeon_device *rdev = dev->dev_private; 614 615 if (crtc < 0 || crtc >= dev->num_crtcs) { 616 DRM_ERROR("Invalid crtc %d\n", crtc); 617 return -EINVAL; 618 } 619 620 /* Get associated drm_crtc: */ 621 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 622 623 /* Helper routine in DRM core does all the work: */ 624 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 625 vblank_time, flags, 626 drmcrtc); 627 } 628 629 /* 630 * IOCTL. 631 */ 632 int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, 633 struct drm_file *file_priv) 634 { 635 /* Not valid in KMS. */ 636 return -EINVAL; 637 } 638 639 #define KMS_INVALID_IOCTL(name) \ 640 int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ 641 { \ 642 DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ 643 return -EINVAL; \ 644 } 645 646 /* 647 * All these ioctls are invalid in kms world. 648 */ 649 KMS_INVALID_IOCTL(radeon_cp_init_kms) 650 KMS_INVALID_IOCTL(radeon_cp_start_kms) 651 KMS_INVALID_IOCTL(radeon_cp_stop_kms) 652 KMS_INVALID_IOCTL(radeon_cp_reset_kms) 653 KMS_INVALID_IOCTL(radeon_cp_idle_kms) 654 KMS_INVALID_IOCTL(radeon_cp_resume_kms) 655 KMS_INVALID_IOCTL(radeon_engine_reset_kms) 656 KMS_INVALID_IOCTL(radeon_fullscreen_kms) 657 KMS_INVALID_IOCTL(radeon_cp_swap_kms) 658 KMS_INVALID_IOCTL(radeon_cp_clear_kms) 659 KMS_INVALID_IOCTL(radeon_cp_vertex_kms) 660 KMS_INVALID_IOCTL(radeon_cp_indices_kms) 661 KMS_INVALID_IOCTL(radeon_cp_texture_kms) 662 KMS_INVALID_IOCTL(radeon_cp_stipple_kms) 663 KMS_INVALID_IOCTL(radeon_cp_indirect_kms) 664 KMS_INVALID_IOCTL(radeon_cp_vertex2_kms) 665 KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms) 666 KMS_INVALID_IOCTL(radeon_cp_getparam_kms) 667 KMS_INVALID_IOCTL(radeon_cp_flip_kms) 668 KMS_INVALID_IOCTL(radeon_mem_alloc_kms) 669 KMS_INVALID_IOCTL(radeon_mem_free_kms) 670 KMS_INVALID_IOCTL(radeon_mem_init_heap_kms) 671 KMS_INVALID_IOCTL(radeon_irq_emit_kms) 672 KMS_INVALID_IOCTL(radeon_irq_wait_kms) 673 KMS_INVALID_IOCTL(radeon_cp_setparam_kms) 674 KMS_INVALID_IOCTL(radeon_surface_alloc_kms) 675 KMS_INVALID_IOCTL(radeon_surface_free_kms) 676 677 678 struct drm_ioctl_desc radeon_ioctls_kms[] = { 679 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 680 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 681 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 682 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 683 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), 684 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), 685 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), 686 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), 687 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), 688 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), 689 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), 690 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), 691 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), 692 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), 693 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 694 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), 695 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), 696 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), 697 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), 698 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), 699 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), 700 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 701 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), 702 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), 703 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), 704 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 705 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 706 /* KMS */ 707 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 708 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), 709 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), 710 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), 711 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), 712 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), 713 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), 714 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), 715 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 716 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 717 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 718 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 719 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED), 720 }; 721 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 722