1 // SPDX-License-Identifier: GPL-2.0-only 2 /************************************************************************** 3 * Copyright (c) 2007, Intel Corporation. 4 * All Rights Reserved. 5 * 6 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to 7 * develop this driver. 8 * 9 **************************************************************************/ 10 11 #include <drm/drm_drv.h> 12 #include <drm/drm_print.h> 13 #include <drm/drm_vblank.h> 14 15 #include "power.h" 16 #include "psb_drv.h" 17 #include "psb_intel_reg.h" 18 #include "psb_irq.h" 19 #include "psb_reg.h" 20 21 /* 22 * inline functions 23 */ 24 25 static inline u32 gma_pipestat(int pipe) 26 { 27 if (pipe == 0) 28 return PIPEASTAT; 29 if (pipe == 1) 30 return PIPEBSTAT; 31 if (pipe == 2) 32 return PIPECSTAT; 33 BUG(); 34 } 35 36 static inline u32 gma_pipeconf(int pipe) 37 { 38 if (pipe == 0) 39 return PIPEACONF; 40 if (pipe == 1) 41 return PIPEBCONF; 42 if (pipe == 2) 43 return PIPECCONF; 44 BUG(); 45 } 46 47 void gma_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask) 48 { 49 if ((dev_priv->pipestat[pipe] & mask) != mask) { 50 u32 reg = gma_pipestat(pipe); 51 dev_priv->pipestat[pipe] |= mask; 52 /* Enable the interrupt, clear any pending status */ 53 if (gma_power_begin(&dev_priv->dev, false)) { 54 u32 writeVal = PSB_RVDC32(reg); 55 writeVal |= (mask | (mask >> 16)); 56 PSB_WVDC32(writeVal, reg); 57 (void) PSB_RVDC32(reg); 58 gma_power_end(&dev_priv->dev); 59 } 60 } 61 } 62 63 void gma_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask) 64 { 65 if ((dev_priv->pipestat[pipe] & mask) != 0) { 66 u32 reg = gma_pipestat(pipe); 67 dev_priv->pipestat[pipe] &= ~mask; 68 if (gma_power_begin(&dev_priv->dev, false)) { 69 u32 writeVal = PSB_RVDC32(reg); 70 writeVal &= ~mask; 71 PSB_WVDC32(writeVal, reg); 72 (void) PSB_RVDC32(reg); 73 gma_power_end(&dev_priv->dev); 74 } 75 } 76 } 77 78 /* 79 * Display controller interrupt handler for pipe event. 80 */ 81 static void gma_pipe_event_handler(struct drm_device *dev, int pipe) 82 { 83 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 84 85 uint32_t pipe_stat_val = 0; 86 uint32_t pipe_stat_reg = gma_pipestat(pipe); 87 uint32_t pipe_enable = dev_priv->pipestat[pipe]; 88 uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16; 89 uint32_t pipe_clear; 90 uint32_t i = 0; 91 92 spin_lock(&dev_priv->irqmask_lock); 93 94 pipe_stat_val = PSB_RVDC32(pipe_stat_reg); 95 pipe_stat_val &= pipe_enable | pipe_status; 96 pipe_stat_val &= pipe_stat_val >> 16; 97 98 spin_unlock(&dev_priv->irqmask_lock); 99 100 /* Clear the 2nd level interrupt status bits 101 * Sometimes the bits are very sticky so we repeat until they unstick */ 102 for (i = 0; i < 0xffff; i++) { 103 PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg); 104 pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status; 105 106 if (pipe_clear == 0) 107 break; 108 } 109 110 if (pipe_clear) 111 dev_err(dev->dev, 112 "%s, can't clear status bits for pipe %d, its value = 0x%x.\n", 113 __func__, pipe, PSB_RVDC32(pipe_stat_reg)); 114 115 if (pipe_stat_val & PIPE_VBLANK_STATUS) { 116 struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); 117 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 118 unsigned long flags; 119 120 drm_handle_vblank(dev, pipe); 121 122 spin_lock_irqsave(&dev->event_lock, flags); 123 if (gma_crtc->page_flip_event) { 124 drm_crtc_send_vblank_event(crtc, 125 gma_crtc->page_flip_event); 126 gma_crtc->page_flip_event = NULL; 127 drm_crtc_vblank_put(crtc); 128 } 129 spin_unlock_irqrestore(&dev->event_lock, flags); 130 } 131 } 132 133 /* 134 * Display controller interrupt handler. 135 */ 136 static void gma_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat) 137 { 138 if (vdc_stat & _PSB_IRQ_ASLE) 139 psb_intel_opregion_asle_intr(dev); 140 141 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG) 142 gma_pipe_event_handler(dev, 0); 143 144 if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG) 145 gma_pipe_event_handler(dev, 1); 146 } 147 148 /* 149 * SGX interrupt handler 150 */ 151 static void gma_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2) 152 { 153 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 154 u32 val, addr; 155 156 if (stat_1 & _PSB_CE_TWOD_COMPLETE) 157 val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS); 158 159 if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) { 160 val = PSB_RSGX32(PSB_CR_BIF_INT_STAT); 161 addr = PSB_RSGX32(PSB_CR_BIF_FAULT); 162 if (val) { 163 if (val & _PSB_CBI_STAT_PF_N_RW) 164 DRM_ERROR("SGX MMU page fault:"); 165 else 166 DRM_ERROR("SGX MMU read / write protection fault:"); 167 168 if (val & _PSB_CBI_STAT_FAULT_CACHE) 169 DRM_ERROR("\tCache requestor"); 170 if (val & _PSB_CBI_STAT_FAULT_TA) 171 DRM_ERROR("\tTA requestor"); 172 if (val & _PSB_CBI_STAT_FAULT_VDM) 173 DRM_ERROR("\tVDM requestor"); 174 if (val & _PSB_CBI_STAT_FAULT_2D) 175 DRM_ERROR("\t2D requestor"); 176 if (val & _PSB_CBI_STAT_FAULT_PBE) 177 DRM_ERROR("\tPBE requestor"); 178 if (val & _PSB_CBI_STAT_FAULT_TSP) 179 DRM_ERROR("\tTSP requestor"); 180 if (val & _PSB_CBI_STAT_FAULT_ISP) 181 DRM_ERROR("\tISP requestor"); 182 if (val & _PSB_CBI_STAT_FAULT_USSEPDS) 183 DRM_ERROR("\tUSSEPDS requestor"); 184 if (val & _PSB_CBI_STAT_FAULT_HOST) 185 DRM_ERROR("\tHost requestor"); 186 187 DRM_ERROR("\tMMU failing address is 0x%08x.\n", 188 (unsigned int)addr); 189 } 190 } 191 192 /* Clear bits */ 193 PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR); 194 PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2); 195 PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2); 196 } 197 198 static irqreturn_t gma_irq_handler(int irq, void *arg) 199 { 200 struct drm_device *dev = arg; 201 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 202 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0; 203 u32 sgx_stat_1, sgx_stat_2; 204 int handled = 0; 205 206 spin_lock(&dev_priv->irqmask_lock); 207 208 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R); 209 210 if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE)) 211 dsp_int = 1; 212 213 if (vdc_stat & _PSB_IRQ_SGX_FLAG) 214 sgx_int = 1; 215 if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC) 216 hotplug_int = 1; 217 218 vdc_stat &= dev_priv->vdc_irq_mask; 219 spin_unlock(&dev_priv->irqmask_lock); 220 221 if (dsp_int) { 222 gma_vdc_interrupt(dev, vdc_stat); 223 handled = 1; 224 } 225 226 if (sgx_int) { 227 sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS); 228 sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2); 229 gma_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2); 230 handled = 1; 231 } 232 233 /* Note: this bit has other meanings on some devices, so we will 234 need to address that later if it ever matters */ 235 if (hotplug_int && dev_priv->ops->hotplug) { 236 handled = dev_priv->ops->hotplug(dev); 237 REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT)); 238 } 239 240 PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R); 241 (void) PSB_RVDC32(PSB_INT_IDENTITY_R); 242 rmb(); 243 244 if (!handled) 245 return IRQ_NONE; 246 247 return IRQ_HANDLED; 248 } 249 250 void gma_irq_preinstall(struct drm_device *dev) 251 { 252 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 253 struct drm_crtc *crtc; 254 unsigned long irqflags; 255 256 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 257 258 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 259 PSB_WVDC32(0x00000000, PSB_INT_MASK_R); 260 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R); 261 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); 262 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); 263 264 drm_for_each_crtc(crtc, dev) { 265 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 266 267 if (vblank->enabled) { 268 u32 mask = drm_crtc_index(crtc) ? _PSB_VSYNC_PIPEB_FLAG : 269 _PSB_VSYNC_PIPEA_FLAG; 270 dev_priv->vdc_irq_mask |= mask; 271 } 272 } 273 274 /* Revisit this area - want per device masks ? */ 275 if (dev_priv->ops->hotplug) 276 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC; 277 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG; 278 279 /* This register is safe even if display island is off */ 280 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 281 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 282 } 283 284 void gma_irq_postinstall(struct drm_device *dev) 285 { 286 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 287 struct drm_crtc *crtc; 288 unsigned long irqflags; 289 290 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 291 292 /* Enable 2D and MMU fault interrupts */ 293 PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2); 294 PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE); 295 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */ 296 297 /* This register is safe even if display island is off */ 298 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 299 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 300 301 drm_for_each_crtc(crtc, dev) { 302 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 303 304 if (vblank->enabled) 305 gma_enable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); 306 else 307 gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); 308 } 309 310 if (dev_priv->ops->hotplug_enable) 311 dev_priv->ops->hotplug_enable(dev, true); 312 313 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 314 } 315 316 int gma_irq_install(struct drm_device *dev) 317 { 318 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 319 struct pci_dev *pdev = to_pci_dev(dev->dev); 320 int ret; 321 322 if (dev_priv->use_msi && pci_enable_msi(pdev)) { 323 dev_warn(dev->dev, "Enabling MSI failed!\n"); 324 dev_priv->use_msi = false; 325 } 326 327 if (pdev->irq == IRQ_NOTCONNECTED) 328 return -ENOTCONN; 329 330 gma_irq_preinstall(dev); 331 332 /* PCI devices require shared interrupts. */ 333 ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev); 334 if (ret) 335 return ret; 336 337 gma_irq_postinstall(dev); 338 339 dev_priv->irq_enabled = true; 340 341 return 0; 342 } 343 344 void gma_irq_uninstall(struct drm_device *dev) 345 { 346 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 347 struct pci_dev *pdev = to_pci_dev(dev->dev); 348 struct drm_crtc *crtc; 349 unsigned long irqflags; 350 351 if (!dev_priv->irq_enabled) 352 return; 353 354 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 355 356 if (dev_priv->ops->hotplug_enable) 357 dev_priv->ops->hotplug_enable(dev, false); 358 359 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 360 361 drm_for_each_crtc(crtc, dev) { 362 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 363 364 if (vblank->enabled) 365 gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); 366 } 367 368 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | 369 _PSB_IRQ_MSVDX_FLAG | 370 _LNC_IRQ_TOPAZ_FLAG; 371 372 /* These two registers are safe even if display island is off */ 373 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 374 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 375 376 wmb(); 377 378 /* This register is safe even if display island is off */ 379 PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R); 380 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 381 382 free_irq(pdev->irq, dev); 383 if (dev_priv->use_msi) 384 pci_disable_msi(pdev); 385 } 386 387 int gma_crtc_enable_vblank(struct drm_crtc *crtc) 388 { 389 struct drm_device *dev = crtc->dev; 390 unsigned int pipe = crtc->index; 391 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 392 unsigned long irqflags; 393 uint32_t reg_val = 0; 394 uint32_t pipeconf_reg = gma_pipeconf(pipe); 395 396 if (gma_power_begin(dev, false)) { 397 reg_val = REG_READ(pipeconf_reg); 398 gma_power_end(dev); 399 } 400 401 if (!(reg_val & PIPEACONF_ENABLE)) 402 return -EINVAL; 403 404 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 405 406 if (pipe == 0) 407 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; 408 else if (pipe == 1) 409 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; 410 411 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 412 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 413 gma_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); 414 415 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 416 417 return 0; 418 } 419 420 void gma_crtc_disable_vblank(struct drm_crtc *crtc) 421 { 422 struct drm_device *dev = crtc->dev; 423 unsigned int pipe = crtc->index; 424 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 425 unsigned long irqflags; 426 427 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 428 429 if (pipe == 0) 430 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG; 431 else if (pipe == 1) 432 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG; 433 434 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 435 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 436 gma_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); 437 438 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 439 } 440 441 /* Called from drm generic code, passed a 'crtc', which 442 * we use as a pipe index 443 */ 444 u32 gma_crtc_get_vblank_counter(struct drm_crtc *crtc) 445 { 446 struct drm_device *dev = crtc->dev; 447 unsigned int pipe = crtc->index; 448 uint32_t high_frame = PIPEAFRAMEHIGH; 449 uint32_t low_frame = PIPEAFRAMEPIXEL; 450 uint32_t pipeconf_reg = PIPEACONF; 451 uint32_t reg_val = 0; 452 uint32_t high1 = 0, high2 = 0, low = 0, count = 0; 453 454 switch (pipe) { 455 case 0: 456 break; 457 case 1: 458 high_frame = PIPEBFRAMEHIGH; 459 low_frame = PIPEBFRAMEPIXEL; 460 pipeconf_reg = PIPEBCONF; 461 break; 462 case 2: 463 high_frame = PIPECFRAMEHIGH; 464 low_frame = PIPECFRAMEPIXEL; 465 pipeconf_reg = PIPECCONF; 466 break; 467 default: 468 dev_err(dev->dev, "%s, invalid pipe.\n", __func__); 469 return 0; 470 } 471 472 if (!gma_power_begin(dev, false)) 473 return 0; 474 475 reg_val = REG_READ(pipeconf_reg); 476 477 if (!(reg_val & PIPEACONF_ENABLE)) { 478 dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n", 479 pipe); 480 goto err_gma_power_end; 481 } 482 483 /* 484 * High & low register fields aren't synchronized, so make sure 485 * we get a low value that's stable across two reads of the high 486 * register. 487 */ 488 do { 489 high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 490 PIPE_FRAME_HIGH_SHIFT); 491 low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> 492 PIPE_FRAME_LOW_SHIFT); 493 high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 494 PIPE_FRAME_HIGH_SHIFT); 495 } while (high1 != high2); 496 497 count = (high1 << 8) | low; 498 499 err_gma_power_end: 500 gma_power_end(dev); 501 502 return count; 503 } 504 505