1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/irq.h> 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc_helper.h> 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 #include "amdgpu_ih.h" 34 #include "atom.h" 35 #include "amdgpu_connectors.h" 36 #include "amdgpu_trace.h" 37 38 #include <linux/pm_runtime.h> 39 40 #ifdef CONFIG_DRM_AMD_DC 41 #include "amdgpu_dm_irq.h" 42 #endif 43 44 #define AMDGPU_WAIT_IDLE_TIMEOUT 200 45 46 /* 47 * Handle hotplug events outside the interrupt handler proper. 48 */ 49 /** 50 * amdgpu_hotplug_work_func - display hotplug work handler 51 * 52 * @work: work struct 53 * 54 * This is the hot plug event work handler (all asics). 55 * The work gets scheduled from the irq handler if there 56 * was a hot plug interrupt. It walks the connector table 57 * and calls the hotplug handler for each one, then sends 58 * a drm hotplug event to alert userspace. 59 */ 60 static void amdgpu_hotplug_work_func(struct work_struct *work) 61 { 62 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 63 hotplug_work); 64 struct drm_device *dev = adev->ddev; 65 struct drm_mode_config *mode_config = &dev->mode_config; 66 struct drm_connector *connector; 67 68 mutex_lock(&mode_config->mutex); 69 list_for_each_entry(connector, &mode_config->connector_list, head) 70 amdgpu_connector_hotplug(connector); 71 mutex_unlock(&mode_config->mutex); 72 /* Just fire off a uevent and let userspace tell us what to do */ 73 drm_helper_hpd_irq_event(dev); 74 } 75 76 /** 77 * amdgpu_irq_reset_work_func - execute gpu reset 78 * 79 * @work: work struct 80 * 81 * Execute scheduled gpu reset (cayman+). 82 * This function is called when the irq handler 83 * thinks we need a gpu reset. 84 */ 85 static void amdgpu_irq_reset_work_func(struct work_struct *work) 86 { 87 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 88 reset_work); 89 90 if (!amdgpu_sriov_vf(adev)) 91 amdgpu_device_gpu_recover(adev, NULL, false); 92 } 93 94 /* Disable *all* interrupts */ 95 void amdgpu_irq_disable_all(struct amdgpu_device *adev) 96 { 97 unsigned long irqflags; 98 unsigned i, j, k; 99 int r; 100 101 spin_lock_irqsave(&adev->irq.lock, irqflags); 102 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 103 if (!adev->irq.client[i].sources) 104 continue; 105 106 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 107 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 108 109 if (!src || !src->funcs->set || !src->num_types) 110 continue; 111 112 for (k = 0; k < src->num_types; ++k) { 113 atomic_set(&src->enabled_types[k], 0); 114 r = src->funcs->set(adev, src, k, 115 AMDGPU_IRQ_STATE_DISABLE); 116 if (r) 117 DRM_ERROR("error disabling interrupt (%d)\n", 118 r); 119 } 120 } 121 } 122 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 123 } 124 125 /** 126 * amdgpu_irq_handler - irq handler 127 * 128 * @int irq, void *arg: args 129 * 130 * This is the irq handler for the amdgpu driver (all asics). 131 */ 132 irqreturn_t amdgpu_irq_handler(int irq, void *arg) 133 { 134 struct drm_device *dev = (struct drm_device *) arg; 135 struct amdgpu_device *adev = dev->dev_private; 136 irqreturn_t ret; 137 138 ret = amdgpu_ih_process(adev); 139 if (ret == IRQ_HANDLED) 140 pm_runtime_mark_last_busy(dev->dev); 141 return ret; 142 } 143 144 /** 145 * amdgpu_msi_ok - asic specific msi checks 146 * 147 * @adev: amdgpu device pointer 148 * 149 * Handles asic specific MSI checks to determine if 150 * MSIs should be enabled on a particular chip (all asics). 151 * Returns true if MSIs should be enabled, false if MSIs 152 * should not be enabled. 153 */ 154 static bool amdgpu_msi_ok(struct amdgpu_device *adev) 155 { 156 /* force MSI on */ 157 if (amdgpu_msi == 1) 158 return true; 159 else if (amdgpu_msi == 0) 160 return false; 161 162 return true; 163 } 164 165 /** 166 * amdgpu_irq_init - init driver interrupt info 167 * 168 * @adev: amdgpu device pointer 169 * 170 * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). 171 * Returns 0 for success, error for failure. 172 */ 173 int amdgpu_irq_init(struct amdgpu_device *adev) 174 { 175 int r = 0; 176 177 spin_lock_init(&adev->irq.lock); 178 179 /* enable msi */ 180 adev->irq.msi_enabled = false; 181 182 if (amdgpu_msi_ok(adev)) { 183 int ret = pci_enable_msi(adev->pdev); 184 if (!ret) { 185 adev->irq.msi_enabled = true; 186 dev_dbg(adev->dev, "amdgpu: using MSI.\n"); 187 } 188 } 189 190 if (!amdgpu_device_has_dc_support(adev)) { 191 if (!adev->enable_virtual_display) 192 /* Disable vblank irqs aggressively for power-saving */ 193 /* XXX: can this be enabled for DC? */ 194 adev->ddev->vblank_disable_immediate = true; 195 196 r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); 197 if (r) 198 return r; 199 200 /* pre DCE11 */ 201 INIT_WORK(&adev->hotplug_work, 202 amdgpu_hotplug_work_func); 203 } 204 205 INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); 206 207 adev->irq.installed = true; 208 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); 209 if (r) { 210 adev->irq.installed = false; 211 if (!amdgpu_device_has_dc_support(adev)) 212 flush_work(&adev->hotplug_work); 213 cancel_work_sync(&adev->reset_work); 214 return r; 215 } 216 adev->ddev->max_vblank_count = 0x00ffffff; 217 218 DRM_DEBUG("amdgpu: irq initialized.\n"); 219 return 0; 220 } 221 222 /** 223 * amdgpu_irq_fini - tear down driver interrupt info 224 * 225 * @adev: amdgpu device pointer 226 * 227 * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). 228 */ 229 void amdgpu_irq_fini(struct amdgpu_device *adev) 230 { 231 unsigned i, j; 232 233 if (adev->irq.installed) { 234 drm_irq_uninstall(adev->ddev); 235 adev->irq.installed = false; 236 if (adev->irq.msi_enabled) 237 pci_disable_msi(adev->pdev); 238 if (!amdgpu_device_has_dc_support(adev)) 239 flush_work(&adev->hotplug_work); 240 cancel_work_sync(&adev->reset_work); 241 } 242 243 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 244 if (!adev->irq.client[i].sources) 245 continue; 246 247 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 248 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 249 250 if (!src) 251 continue; 252 253 kfree(src->enabled_types); 254 src->enabled_types = NULL; 255 if (src->data) { 256 kfree(src->data); 257 kfree(src); 258 adev->irq.client[i].sources[j] = NULL; 259 } 260 } 261 kfree(adev->irq.client[i].sources); 262 adev->irq.client[i].sources = NULL; 263 } 264 } 265 266 /** 267 * amdgpu_irq_add_id - register irq source 268 * 269 * @adev: amdgpu device pointer 270 * @src_id: source id for this source 271 * @source: irq source 272 * 273 */ 274 int amdgpu_irq_add_id(struct amdgpu_device *adev, 275 unsigned client_id, unsigned src_id, 276 struct amdgpu_irq_src *source) 277 { 278 if (client_id >= AMDGPU_IH_CLIENTID_MAX) 279 return -EINVAL; 280 281 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) 282 return -EINVAL; 283 284 if (!source->funcs) 285 return -EINVAL; 286 287 if (!adev->irq.client[client_id].sources) { 288 adev->irq.client[client_id].sources = 289 kcalloc(AMDGPU_MAX_IRQ_SRC_ID, 290 sizeof(struct amdgpu_irq_src *), 291 GFP_KERNEL); 292 if (!adev->irq.client[client_id].sources) 293 return -ENOMEM; 294 } 295 296 if (adev->irq.client[client_id].sources[src_id] != NULL) 297 return -EINVAL; 298 299 if (source->num_types && !source->enabled_types) { 300 atomic_t *types; 301 302 types = kcalloc(source->num_types, sizeof(atomic_t), 303 GFP_KERNEL); 304 if (!types) 305 return -ENOMEM; 306 307 source->enabled_types = types; 308 } 309 310 adev->irq.client[client_id].sources[src_id] = source; 311 return 0; 312 } 313 314 /** 315 * amdgpu_irq_dispatch - dispatch irq to IP blocks 316 * 317 * @adev: amdgpu device pointer 318 * @entry: interrupt vector 319 * 320 * Dispatches the irq to the different IP blocks 321 */ 322 void amdgpu_irq_dispatch(struct amdgpu_device *adev, 323 struct amdgpu_iv_entry *entry) 324 { 325 unsigned client_id = entry->client_id; 326 unsigned src_id = entry->src_id; 327 struct amdgpu_irq_src *src; 328 int r; 329 330 trace_amdgpu_iv(entry); 331 332 if (client_id >= AMDGPU_IH_CLIENTID_MAX) { 333 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); 334 return; 335 } 336 337 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { 338 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); 339 return; 340 } 341 342 if (adev->irq.virq[src_id]) { 343 generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); 344 } else { 345 if (!adev->irq.client[client_id].sources) { 346 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", 347 client_id, src_id); 348 return; 349 } 350 351 src = adev->irq.client[client_id].sources[src_id]; 352 if (!src) { 353 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); 354 return; 355 } 356 357 r = src->funcs->process(adev, src, entry); 358 if (r) 359 DRM_ERROR("error processing interrupt (%d)\n", r); 360 } 361 } 362 363 /** 364 * amdgpu_irq_update - update hw interrupt state 365 * 366 * @adev: amdgpu device pointer 367 * @src: interrupt src you want to enable 368 * @type: type of interrupt you want to update 369 * 370 * Updates the interrupt state for a specific src (all asics). 371 */ 372 int amdgpu_irq_update(struct amdgpu_device *adev, 373 struct amdgpu_irq_src *src, unsigned type) 374 { 375 unsigned long irqflags; 376 enum amdgpu_interrupt_state state; 377 int r; 378 379 spin_lock_irqsave(&adev->irq.lock, irqflags); 380 381 /* we need to determine after taking the lock, otherwise 382 we might disable just enabled interrupts again */ 383 if (amdgpu_irq_enabled(adev, src, type)) 384 state = AMDGPU_IRQ_STATE_ENABLE; 385 else 386 state = AMDGPU_IRQ_STATE_DISABLE; 387 388 r = src->funcs->set(adev, src, type, state); 389 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 390 return r; 391 } 392 393 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) 394 { 395 int i, j, k; 396 397 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 398 if (!adev->irq.client[i].sources) 399 continue; 400 401 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 402 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 403 404 if (!src) 405 continue; 406 for (k = 0; k < src->num_types; k++) 407 amdgpu_irq_update(adev, src, k); 408 } 409 } 410 } 411 412 /** 413 * amdgpu_irq_get - enable interrupt 414 * 415 * @adev: amdgpu device pointer 416 * @src: interrupt src you want to enable 417 * @type: type of interrupt you want to enable 418 * 419 * Enables the interrupt type for a specific src (all asics). 420 */ 421 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 422 unsigned type) 423 { 424 if (!adev->ddev->irq_enabled) 425 return -ENOENT; 426 427 if (type >= src->num_types) 428 return -EINVAL; 429 430 if (!src->enabled_types || !src->funcs->set) 431 return -EINVAL; 432 433 if (atomic_inc_return(&src->enabled_types[type]) == 1) 434 return amdgpu_irq_update(adev, src, type); 435 436 return 0; 437 } 438 439 /** 440 * amdgpu_irq_put - disable interrupt 441 * 442 * @adev: amdgpu device pointer 443 * @src: interrupt src you want to disable 444 * @type: type of interrupt you want to disable 445 * 446 * Disables the interrupt type for a specific src (all asics). 447 */ 448 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 449 unsigned type) 450 { 451 if (!adev->ddev->irq_enabled) 452 return -ENOENT; 453 454 if (type >= src->num_types) 455 return -EINVAL; 456 457 if (!src->enabled_types || !src->funcs->set) 458 return -EINVAL; 459 460 if (atomic_dec_and_test(&src->enabled_types[type])) 461 return amdgpu_irq_update(adev, src, type); 462 463 return 0; 464 } 465 466 /** 467 * amdgpu_irq_enabled - test if irq is enabled or not 468 * 469 * @adev: amdgpu device pointer 470 * @idx: interrupt src you want to test 471 * 472 * Tests if the given interrupt source is enabled or not 473 */ 474 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 475 unsigned type) 476 { 477 if (!adev->ddev->irq_enabled) 478 return false; 479 480 if (type >= src->num_types) 481 return false; 482 483 if (!src->enabled_types || !src->funcs->set) 484 return false; 485 486 return !!atomic_read(&src->enabled_types[type]); 487 } 488 489 /* gen irq */ 490 static void amdgpu_irq_mask(struct irq_data *irqd) 491 { 492 /* XXX */ 493 } 494 495 static void amdgpu_irq_unmask(struct irq_data *irqd) 496 { 497 /* XXX */ 498 } 499 500 static struct irq_chip amdgpu_irq_chip = { 501 .name = "amdgpu-ih", 502 .irq_mask = amdgpu_irq_mask, 503 .irq_unmask = amdgpu_irq_unmask, 504 }; 505 506 static int amdgpu_irqdomain_map(struct irq_domain *d, 507 unsigned int irq, irq_hw_number_t hwirq) 508 { 509 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID) 510 return -EPERM; 511 512 irq_set_chip_and_handler(irq, 513 &amdgpu_irq_chip, handle_simple_irq); 514 return 0; 515 } 516 517 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = { 518 .map = amdgpu_irqdomain_map, 519 }; 520 521 /** 522 * amdgpu_irq_add_domain - create a linear irq domain 523 * 524 * @adev: amdgpu device pointer 525 * 526 * Create an irq domain for GPU interrupt sources 527 * that may be driven by another driver (e.g., ACP). 528 */ 529 int amdgpu_irq_add_domain(struct amdgpu_device *adev) 530 { 531 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, 532 &amdgpu_hw_irqdomain_ops, adev); 533 if (!adev->irq.domain) { 534 DRM_ERROR("GPU irq add domain failed\n"); 535 return -ENODEV; 536 } 537 538 return 0; 539 } 540 541 /** 542 * amdgpu_irq_remove_domain - remove the irq domain 543 * 544 * @adev: amdgpu device pointer 545 * 546 * Remove the irq domain for GPU interrupt sources 547 * that may be driven by another driver (e.g., ACP). 548 */ 549 void amdgpu_irq_remove_domain(struct amdgpu_device *adev) 550 { 551 if (adev->irq.domain) { 552 irq_domain_remove(adev->irq.domain); 553 adev->irq.domain = NULL; 554 } 555 } 556 557 /** 558 * amdgpu_irq_create_mapping - create a mapping between a domain irq and a 559 * Linux irq 560 * 561 * @adev: amdgpu device pointer 562 * @src_id: IH source id 563 * 564 * Create a mapping between a domain irq (GPU IH src id) and a Linux irq 565 * Use this for components that generate a GPU interrupt, but are driven 566 * by a different driver (e.g., ACP). 567 * Returns the Linux irq. 568 */ 569 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) 570 { 571 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); 572 573 return adev->irq.virq[src_id]; 574 } 575