1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. 4 * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. 5 * Copyright 2009 Jonathan Corbet <corbet@lwn.net> 6 */ 7 8 /* 9 * Core code for the Via multifunction framebuffer device. 10 */ 11 #include <linux/aperture.h> 12 #include <linux/via-core.h> 13 #include <linux/via_i2c.h> 14 #include <linux/via-gpio.h> 15 #include "global.h" 16 17 #include <linux/module.h> 18 #include <linux/interrupt.h> 19 #include <linux/platform_device.h> 20 #include <linux/list.h> 21 #include <linux/pm.h> 22 23 /* 24 * The default port config. 25 */ 26 static struct via_port_cfg adap_configs[] = { 27 [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x26 }, 28 [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 }, 29 [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 }, 30 [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c }, 31 [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d }, 32 { 0, 0, 0, 0 } 33 }; 34 35 /* 36 * The OLPC XO-1.5 puts the camera power and reset lines onto 37 * GPIO 2C. 38 */ 39 static struct via_port_cfg olpc_adap_configs[] = { 40 [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x26 }, 41 [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 }, 42 [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 }, 43 [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x2c }, 44 [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d }, 45 { 0, 0, 0, 0 } 46 }; 47 48 /* 49 * We currently only support one viafb device (will there ever be 50 * more than one?), so just declare it globally here. 51 */ 52 static struct viafb_dev global_dev; 53 54 55 /* 56 * Basic register access; spinlock required. 57 */ 58 static inline void viafb_mmio_write(int reg, u32 v) 59 { 60 iowrite32(v, global_dev.engine_mmio + reg); 61 } 62 63 static inline int viafb_mmio_read(int reg) 64 { 65 return ioread32(global_dev.engine_mmio + reg); 66 } 67 68 /* ---------------------------------------------------------------------- */ 69 /* 70 * Interrupt management. We have a single IRQ line for a lot of 71 * different functions, so we need to share it. The design here 72 * is that we don't want to reimplement the shared IRQ code here; 73 * we also want to avoid having contention for a single handler thread. 74 * So each subdev driver which needs interrupts just requests 75 * them directly from the kernel. We just have what's needed for 76 * overall access to the interrupt control register. 77 */ 78 79 /* 80 * Which interrupts are enabled now? 81 */ 82 static u32 viafb_enabled_ints; 83 84 static void viafb_int_init(void) 85 { 86 viafb_enabled_ints = 0; 87 88 viafb_mmio_write(VDE_INTERRUPT, 0); 89 } 90 91 /* 92 * Allow subdevs to ask for specific interrupts to be enabled. These 93 * functions must be called with reg_lock held 94 */ 95 void viafb_irq_enable(u32 mask) 96 { 97 viafb_enabled_ints |= mask; 98 viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE); 99 } 100 EXPORT_SYMBOL_GPL(viafb_irq_enable); 101 102 void viafb_irq_disable(u32 mask) 103 { 104 viafb_enabled_ints &= ~mask; 105 if (viafb_enabled_ints == 0) 106 viafb_mmio_write(VDE_INTERRUPT, 0); /* Disable entirely */ 107 else 108 viafb_mmio_write(VDE_INTERRUPT, 109 viafb_enabled_ints | VDE_I_ENABLE); 110 } 111 EXPORT_SYMBOL_GPL(viafb_irq_disable); 112 113 /* ---------------------------------------------------------------------- */ 114 /* 115 * Currently, the camera driver is the only user of the DMA code, so we 116 * only compile it in if the camera driver is being built. Chances are, 117 * most viafb systems will not need to have this extra code for a while. 118 * As soon as another user comes long, the ifdef can be removed. 119 */ 120 #if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA) 121 /* 122 * Access to the DMA engine. This currently provides what the camera 123 * driver needs (i.e. outgoing only) but is easily expandable if need 124 * be. 125 */ 126 127 /* 128 * There are four DMA channels in the vx855. For now, we only 129 * use one of them, though. Most of the time, the DMA channel 130 * will be idle, so we keep the IRQ handler unregistered except 131 * when some subsystem has indicated an interest. 132 */ 133 static int viafb_dma_users; 134 static DECLARE_COMPLETION(viafb_dma_completion); 135 /* 136 * This mutex protects viafb_dma_users and our global interrupt 137 * registration state; it also serializes access to the DMA 138 * engine. 139 */ 140 static DEFINE_MUTEX(viafb_dma_lock); 141 142 /* 143 * The VX855 DMA descriptor (used for s/g transfers) looks 144 * like this. 145 */ 146 struct viafb_vx855_dma_descr { 147 u32 addr_low; /* Low part of phys addr */ 148 u32 addr_high; /* High 12 bits of addr */ 149 u32 fb_offset; /* Offset into FB memory */ 150 u32 seg_size; /* Size, 16-byte units */ 151 u32 tile_mode; /* "tile mode" setting */ 152 u32 next_desc_low; /* Next descriptor addr */ 153 u32 next_desc_high; 154 u32 pad; /* Fill out to 64 bytes */ 155 }; 156 157 /* 158 * Flags added to the "next descriptor low" pointers 159 */ 160 #define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */ 161 #define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */ 162 163 /* 164 * The completion IRQ handler. 165 */ 166 static irqreturn_t viafb_dma_irq(int irq, void *data) 167 { 168 int csr; 169 irqreturn_t ret = IRQ_NONE; 170 171 spin_lock(&global_dev.reg_lock); 172 csr = viafb_mmio_read(VDMA_CSR0); 173 if (csr & VDMA_C_DONE) { 174 viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE); 175 complete(&viafb_dma_completion); 176 ret = IRQ_HANDLED; 177 } 178 spin_unlock(&global_dev.reg_lock); 179 return ret; 180 } 181 182 /* 183 * Indicate a need for DMA functionality. 184 */ 185 int viafb_request_dma(void) 186 { 187 int ret = 0; 188 189 /* 190 * Only VX855 is supported currently. 191 */ 192 if (global_dev.chip_type != UNICHROME_VX855) 193 return -ENODEV; 194 /* 195 * Note the new user and set up our interrupt handler 196 * if need be. 197 */ 198 mutex_lock(&viafb_dma_lock); 199 viafb_dma_users++; 200 if (viafb_dma_users == 1) { 201 ret = request_irq(global_dev.pdev->irq, viafb_dma_irq, 202 IRQF_SHARED, "via-dma", &viafb_dma_users); 203 if (ret) 204 viafb_dma_users--; 205 else 206 viafb_irq_enable(VDE_I_DMA0TDEN); 207 } 208 mutex_unlock(&viafb_dma_lock); 209 return ret; 210 } 211 EXPORT_SYMBOL_GPL(viafb_request_dma); 212 213 void viafb_release_dma(void) 214 { 215 mutex_lock(&viafb_dma_lock); 216 viafb_dma_users--; 217 if (viafb_dma_users == 0) { 218 viafb_irq_disable(VDE_I_DMA0TDEN); 219 free_irq(global_dev.pdev->irq, &viafb_dma_users); 220 } 221 mutex_unlock(&viafb_dma_lock); 222 } 223 EXPORT_SYMBOL_GPL(viafb_release_dma); 224 225 /* 226 * Do a scatter/gather DMA copy from FB memory. You must have done 227 * a successful call to viafb_request_dma() first. 228 */ 229 int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg) 230 { 231 struct viafb_vx855_dma_descr *descr; 232 void *descrpages; 233 dma_addr_t descr_handle; 234 unsigned long flags; 235 int i; 236 struct scatterlist *sgentry; 237 dma_addr_t nextdesc; 238 239 /* 240 * Get a place to put the descriptors. 241 */ 242 descrpages = dma_alloc_coherent(&global_dev.pdev->dev, 243 nsg*sizeof(struct viafb_vx855_dma_descr), 244 &descr_handle, GFP_KERNEL); 245 if (descrpages == NULL) { 246 dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n"); 247 return -ENOMEM; 248 } 249 mutex_lock(&viafb_dma_lock); 250 /* 251 * Fill them in. 252 */ 253 descr = descrpages; 254 nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr); 255 for_each_sg(sg, sgentry, nsg, i) { 256 dma_addr_t paddr = sg_dma_address(sgentry); 257 descr->addr_low = paddr & 0xfffffff0; 258 descr->addr_high = ((u64) paddr >> 32) & 0x0fff; 259 descr->fb_offset = offset; 260 descr->seg_size = sg_dma_len(sgentry) >> 4; 261 descr->tile_mode = 0; 262 descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC; 263 descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff; 264 descr->pad = 0xffffffff; /* VIA driver does this */ 265 offset += sg_dma_len(sgentry); 266 nextdesc += sizeof(struct viafb_vx855_dma_descr); 267 descr++; 268 } 269 descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC; 270 /* 271 * Program the engine. 272 */ 273 spin_lock_irqsave(&global_dev.reg_lock, flags); 274 init_completion(&viafb_dma_completion); 275 viafb_mmio_write(VDMA_DQWCR0, 0); 276 viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE); 277 viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN); 278 viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC); 279 viafb_mmio_write(VDMA_DPRH0, 280 (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000); 281 (void) viafb_mmio_read(VDMA_CSR0); 282 viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START); 283 spin_unlock_irqrestore(&global_dev.reg_lock, flags); 284 /* 285 * Now we just wait until the interrupt handler says 286 * we're done. Except that, actually, we need to wait a little 287 * longer: the interrupts seem to jump the gun a little and we 288 * get corrupted frames sometimes. 289 */ 290 wait_for_completion_timeout(&viafb_dma_completion, 1); 291 msleep(1); 292 if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0) 293 printk(KERN_ERR "VIA DMA timeout!\n"); 294 /* 295 * Clean up and we're done. 296 */ 297 viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE); 298 viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */ 299 mutex_unlock(&viafb_dma_lock); 300 dma_free_coherent(&global_dev.pdev->dev, 301 nsg*sizeof(struct viafb_vx855_dma_descr), descrpages, 302 descr_handle); 303 return 0; 304 } 305 EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg); 306 #endif /* CONFIG_VIDEO_VIA_CAMERA */ 307 308 /* ---------------------------------------------------------------------- */ 309 /* 310 * Figure out how big our framebuffer memory is. Kind of ugly, 311 * but evidently we can't trust the information found in the 312 * fbdev configuration area. 313 */ 314 static u16 via_function3[] = { 315 CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3, 316 CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3, 317 P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3, VX900_FUNCTION3, 318 }; 319 320 /* Get the BIOS-configured framebuffer size from PCI configuration space 321 * of function 3 in the respective chipset */ 322 static int viafb_get_fb_size_from_pci(int chip_type) 323 { 324 int i; 325 u8 offset = 0; 326 u32 FBSize; 327 u32 VideoMemSize; 328 329 /* search for the "FUNCTION3" device in this chipset */ 330 for (i = 0; i < ARRAY_SIZE(via_function3); i++) { 331 struct pci_dev *pdev; 332 333 pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i], 334 NULL); 335 if (!pdev) 336 continue; 337 338 DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device); 339 340 switch (pdev->device) { 341 case CLE266_FUNCTION3: 342 case KM400_FUNCTION3: 343 offset = 0xE0; 344 break; 345 case CN400_FUNCTION3: 346 case CN700_FUNCTION3: 347 case CX700_FUNCTION3: 348 case KM800_FUNCTION3: 349 case KM890_FUNCTION3: 350 case P4M890_FUNCTION3: 351 case P4M900_FUNCTION3: 352 case VX800_FUNCTION3: 353 case VX855_FUNCTION3: 354 case VX900_FUNCTION3: 355 /*case CN750_FUNCTION3: */ 356 offset = 0xA0; 357 break; 358 } 359 360 if (!offset) 361 break; 362 363 pci_read_config_dword(pdev, offset, &FBSize); 364 pci_dev_put(pdev); 365 } 366 367 if (!offset) { 368 printk(KERN_ERR "cannot determine framebuffer size\n"); 369 return -EIO; 370 } 371 372 FBSize = FBSize & 0x00007000; 373 DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize); 374 375 if (chip_type < UNICHROME_CX700) { 376 switch (FBSize) { 377 case 0x00004000: 378 VideoMemSize = (16 << 20); /*16M */ 379 break; 380 381 case 0x00005000: 382 VideoMemSize = (32 << 20); /*32M */ 383 break; 384 385 case 0x00006000: 386 VideoMemSize = (64 << 20); /*64M */ 387 break; 388 389 default: 390 VideoMemSize = (32 << 20); /*32M */ 391 break; 392 } 393 } else { 394 switch (FBSize) { 395 case 0x00001000: 396 VideoMemSize = (8 << 20); /*8M */ 397 break; 398 399 case 0x00002000: 400 VideoMemSize = (16 << 20); /*16M */ 401 break; 402 403 case 0x00003000: 404 VideoMemSize = (32 << 20); /*32M */ 405 break; 406 407 case 0x00004000: 408 VideoMemSize = (64 << 20); /*64M */ 409 break; 410 411 case 0x00005000: 412 VideoMemSize = (128 << 20); /*128M */ 413 break; 414 415 case 0x00006000: 416 VideoMemSize = (256 << 20); /*256M */ 417 break; 418 419 case 0x00007000: /* Only on VX855/875 */ 420 VideoMemSize = (512 << 20); /*512M */ 421 break; 422 423 default: 424 VideoMemSize = (32 << 20); /*32M */ 425 break; 426 } 427 } 428 429 return VideoMemSize; 430 } 431 432 433 /* 434 * Figure out and map our MMIO regions. 435 */ 436 static int via_pci_setup_mmio(struct viafb_dev *vdev) 437 { 438 int ret; 439 /* 440 * Hook up to the device registers. Note that we soldier 441 * on if it fails; the framebuffer can operate (without 442 * acceleration) without this region. 443 */ 444 vdev->engine_start = pci_resource_start(vdev->pdev, 1); 445 vdev->engine_len = pci_resource_len(vdev->pdev, 1); 446 vdev->engine_mmio = ioremap(vdev->engine_start, 447 vdev->engine_len); 448 if (vdev->engine_mmio == NULL) 449 dev_err(&vdev->pdev->dev, 450 "Unable to map engine MMIO; operation will be " 451 "slow and crippled.\n"); 452 /* 453 * Map in framebuffer memory. For now, failure here is 454 * fatal. Unfortunately, in the absence of significant 455 * vmalloc space, failure here is also entirely plausible. 456 * Eventually we want to move away from mapping this 457 * entire region. 458 */ 459 if (vdev->chip_type == UNICHROME_VX900) 460 vdev->fbmem_start = pci_resource_start(vdev->pdev, 2); 461 else 462 vdev->fbmem_start = pci_resource_start(vdev->pdev, 0); 463 ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type); 464 if (ret < 0) 465 goto out_unmap; 466 467 /* try to map less memory on failure, 8 MB should be still enough */ 468 for (; vdev->fbmem_len >= 8 << 20; vdev->fbmem_len /= 2) { 469 vdev->fbmem = ioremap_wc(vdev->fbmem_start, vdev->fbmem_len); 470 if (vdev->fbmem) 471 break; 472 } 473 474 if (vdev->fbmem == NULL) { 475 ret = -ENOMEM; 476 goto out_unmap; 477 } 478 return 0; 479 out_unmap: 480 iounmap(vdev->engine_mmio); 481 return ret; 482 } 483 484 static void via_pci_teardown_mmio(struct viafb_dev *vdev) 485 { 486 iounmap(vdev->fbmem); 487 iounmap(vdev->engine_mmio); 488 } 489 490 /* 491 * Create our subsidiary devices. 492 */ 493 static struct viafb_subdev_info { 494 char *name; 495 struct platform_device *platdev; 496 } viafb_subdevs[] = { 497 { 498 .name = "viafb-gpio", 499 }, 500 { 501 .name = "viafb-i2c", 502 }, 503 #if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA) 504 { 505 .name = "viafb-camera", 506 }, 507 #endif 508 }; 509 #define N_SUBDEVS ARRAY_SIZE(viafb_subdevs) 510 511 static int via_create_subdev(struct viafb_dev *vdev, 512 struct viafb_subdev_info *info) 513 { 514 int ret; 515 516 info->platdev = platform_device_alloc(info->name, -1); 517 if (!info->platdev) { 518 dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n", 519 info->name); 520 return -ENOMEM; 521 } 522 info->platdev->dev.parent = &vdev->pdev->dev; 523 info->platdev->dev.platform_data = vdev; 524 ret = platform_device_add(info->platdev); 525 if (ret) { 526 dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n", 527 info->name); 528 platform_device_put(info->platdev); 529 info->platdev = NULL; 530 } 531 return ret; 532 } 533 534 static int via_setup_subdevs(struct viafb_dev *vdev) 535 { 536 int i; 537 538 /* 539 * Ignore return values. Even if some of the devices 540 * fail to be created, we'll still be able to use some 541 * of the rest. 542 */ 543 for (i = 0; i < N_SUBDEVS; i++) 544 via_create_subdev(vdev, viafb_subdevs + i); 545 return 0; 546 } 547 548 static void via_teardown_subdevs(void) 549 { 550 int i; 551 552 for (i = 0; i < N_SUBDEVS; i++) 553 if (viafb_subdevs[i].platdev) { 554 viafb_subdevs[i].platdev->dev.platform_data = NULL; 555 platform_device_unregister(viafb_subdevs[i].platdev); 556 } 557 } 558 559 /* 560 * Power management functions 561 */ 562 static __maybe_unused LIST_HEAD(viafb_pm_hooks); 563 static __maybe_unused DEFINE_MUTEX(viafb_pm_hooks_lock); 564 565 void viafb_pm_register(struct viafb_pm_hooks *hooks) 566 { 567 INIT_LIST_HEAD(&hooks->list); 568 569 mutex_lock(&viafb_pm_hooks_lock); 570 list_add_tail(&hooks->list, &viafb_pm_hooks); 571 mutex_unlock(&viafb_pm_hooks_lock); 572 } 573 EXPORT_SYMBOL_GPL(viafb_pm_register); 574 575 void viafb_pm_unregister(struct viafb_pm_hooks *hooks) 576 { 577 mutex_lock(&viafb_pm_hooks_lock); 578 list_del(&hooks->list); 579 mutex_unlock(&viafb_pm_hooks_lock); 580 } 581 EXPORT_SYMBOL_GPL(viafb_pm_unregister); 582 583 static int __maybe_unused via_suspend(struct device *dev) 584 { 585 struct viafb_pm_hooks *hooks; 586 587 /* 588 * "I've occasionally hit a few drivers that caused suspend 589 * failures, and each and every time it was a driver bug, and 590 * the right thing to do was to just ignore the error and suspend 591 * anyway - returning an error code and trying to undo the suspend 592 * is not what anybody ever really wants, even if our model 593 *_allows_ for it." 594 * -- Linus Torvalds, Dec. 7, 2009 595 */ 596 mutex_lock(&viafb_pm_hooks_lock); 597 list_for_each_entry_reverse(hooks, &viafb_pm_hooks, list) 598 hooks->suspend(hooks->private); 599 mutex_unlock(&viafb_pm_hooks_lock); 600 601 return 0; 602 } 603 604 static int __maybe_unused via_resume(struct device *dev) 605 { 606 struct viafb_pm_hooks *hooks; 607 608 /* Now bring back any subdevs */ 609 mutex_lock(&viafb_pm_hooks_lock); 610 list_for_each_entry(hooks, &viafb_pm_hooks, list) 611 hooks->resume(hooks->private); 612 mutex_unlock(&viafb_pm_hooks_lock); 613 614 return 0; 615 } 616 617 static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 618 { 619 int ret; 620 621 ret = aperture_remove_conflicting_pci_devices(pdev, "viafb"); 622 if (ret) 623 return ret; 624 625 ret = pci_enable_device(pdev); 626 if (ret) 627 return ret; 628 629 /* 630 * Global device initialization. 631 */ 632 memset(&global_dev, 0, sizeof(global_dev)); 633 global_dev.pdev = pdev; 634 global_dev.chip_type = ent->driver_data; 635 global_dev.port_cfg = adap_configs; 636 if (machine_is_olpc()) 637 global_dev.port_cfg = olpc_adap_configs; 638 639 spin_lock_init(&global_dev.reg_lock); 640 ret = via_pci_setup_mmio(&global_dev); 641 if (ret) 642 goto out_disable; 643 /* 644 * Set up interrupts and create our subdevices. Continue even if 645 * some things fail. 646 */ 647 viafb_int_init(); 648 via_setup_subdevs(&global_dev); 649 /* 650 * Set up the framebuffer device 651 */ 652 ret = via_fb_pci_probe(&global_dev); 653 if (ret) 654 goto out_subdevs; 655 return 0; 656 657 out_subdevs: 658 via_teardown_subdevs(); 659 via_pci_teardown_mmio(&global_dev); 660 out_disable: 661 pci_disable_device(pdev); 662 return ret; 663 } 664 665 static void via_pci_remove(struct pci_dev *pdev) 666 { 667 via_teardown_subdevs(); 668 via_fb_pci_remove(pdev); 669 via_pci_teardown_mmio(&global_dev); 670 pci_disable_device(pdev); 671 } 672 673 674 static const struct pci_device_id via_pci_table[] = { 675 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID), 676 .driver_data = UNICHROME_CLE266 }, 677 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID), 678 .driver_data = UNICHROME_K400 }, 679 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID), 680 .driver_data = UNICHROME_K800 }, 681 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID), 682 .driver_data = UNICHROME_PM800 }, 683 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN700_DID), 684 .driver_data = UNICHROME_CN700 }, 685 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID), 686 .driver_data = UNICHROME_CX700 }, 687 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID), 688 .driver_data = UNICHROME_CN750 }, 689 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID), 690 .driver_data = UNICHROME_K8M890 }, 691 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID), 692 .driver_data = UNICHROME_P4M890 }, 693 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID), 694 .driver_data = UNICHROME_P4M900 }, 695 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID), 696 .driver_data = UNICHROME_VX800 }, 697 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID), 698 .driver_data = UNICHROME_VX855 }, 699 { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX900_DID), 700 .driver_data = UNICHROME_VX900 }, 701 { } 702 }; 703 MODULE_DEVICE_TABLE(pci, via_pci_table); 704 705 static const struct dev_pm_ops via_pm_ops = { 706 #ifdef CONFIG_PM_SLEEP 707 .suspend = via_suspend, 708 .resume = via_resume, 709 .freeze = NULL, 710 .thaw = via_resume, 711 .poweroff = NULL, 712 .restore = via_resume, 713 #endif 714 }; 715 716 static struct pci_driver via_driver = { 717 .name = "viafb", 718 .id_table = via_pci_table, 719 .probe = via_pci_probe, 720 .remove = via_pci_remove, 721 .driver.pm = &via_pm_ops, 722 }; 723 724 static int __init via_core_init(void) 725 { 726 int ret; 727 728 if (fb_modesetting_disabled("viafb")) 729 return -ENODEV; 730 731 ret = viafb_init(); 732 if (ret) 733 return ret; 734 viafb_i2c_init(); 735 viafb_gpio_init(); 736 ret = pci_register_driver(&via_driver); 737 if (ret) { 738 viafb_gpio_exit(); 739 viafb_i2c_exit(); 740 return ret; 741 } 742 743 return 0; 744 } 745 746 static void __exit via_core_exit(void) 747 { 748 pci_unregister_driver(&via_driver); 749 viafb_gpio_exit(); 750 viafb_i2c_exit(); 751 viafb_exit(); 752 } 753 754 module_init(via_core_init); 755 module_exit(via_core_exit); 756