1 /* 2 * ata-serverworks.c - Serverworks PATA for new ATA layer 3 * (C) 2005 Red Hat Inc 4 * Alan Cox <alan@redhat.com> 5 * 6 * based upon 7 * 8 * serverworks.c 9 * 10 * Copyright (C) 1998-2000 Michel Aubry 11 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz 12 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 13 * Portions copyright (c) 2001 Sun Microsystems 14 * 15 * 16 * RCC/ServerWorks IDE driver for Linux 17 * 18 * OSB4: `Open South Bridge' IDE Interface (fn 1) 19 * supports UDMA mode 2 (33 MB/s) 20 * 21 * CSB5: `Champion South Bridge' IDE Interface (fn 1) 22 * all revisions support UDMA mode 4 (66 MB/s) 23 * revision A2.0 and up support UDMA mode 5 (100 MB/s) 24 * 25 * *** The CSB5 does not provide ANY register *** 26 * *** to detect 80-conductor cable presence. *** 27 * 28 * CSB6: `Champion South Bridge' IDE Interface (optional: third channel) 29 * 30 * Documentation: 31 * Available under NDA only. Errata info very hard to get. 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/pci.h> 37 #include <linux/init.h> 38 #include <linux/blkdev.h> 39 #include <linux/delay.h> 40 #include <scsi/scsi_host.h> 41 #include <linux/libata.h> 42 43 #define DRV_NAME "pata_serverworks" 44 #define DRV_VERSION "0.4.0" 45 46 #define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ 47 #define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ 48 49 /* Seagate Barracuda ATA IV Family drives in UDMA mode 5 50 * can overrun their FIFOs when used with the CSB5 */ 51 52 static const char *csb_bad_ata100[] = { 53 "ST320011A", 54 "ST340016A", 55 "ST360021A", 56 "ST380021A", 57 NULL 58 }; 59 60 /** 61 * dell_cable - Dell serverworks cable detection 62 * @ap: ATA port to do cable detect 63 * 64 * Dell hide the 40/80 pin select for their interfaces in the top two 65 * bits of the subsystem ID. 66 */ 67 68 static int dell_cable(struct ata_port *ap) { 69 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 70 71 if (pdev->subsystem_device & (1 << (ap->port_no + 14))) 72 return ATA_CBL_PATA80; 73 return ATA_CBL_PATA40; 74 } 75 76 /** 77 * sun_cable - Sun Cobalt 'Alpine' cable detection 78 * @ap: ATA port to do cable select 79 * 80 * Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the 81 * subsystem ID the same as dell. We could use one function but we may 82 * need to extend the Dell one in future 83 */ 84 85 static int sun_cable(struct ata_port *ap) { 86 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 87 88 if (pdev->subsystem_device & (1 << (ap->port_no + 14))) 89 return ATA_CBL_PATA80; 90 return ATA_CBL_PATA40; 91 } 92 93 /** 94 * osb4_cable - OSB4 cable detect 95 * @ap: ATA port to check 96 * 97 * The OSB4 isn't UDMA66 capable so this is easy 98 */ 99 100 static int osb4_cable(struct ata_port *ap) { 101 return ATA_CBL_PATA40; 102 } 103 104 /** 105 * csb4_cable - CSB5/6 cable detect 106 * @ap: ATA port to check 107 * 108 * Serverworks default arrangement is to use the drive side detection 109 * only. 110 */ 111 112 static int csb_cable(struct ata_port *ap) { 113 return ATA_CBL_PATA80; 114 } 115 116 struct sv_cable_table { 117 int device; 118 int subvendor; 119 int (*cable_detect)(struct ata_port *ap); 120 }; 121 122 /* 123 * Note that we don't copy the old serverworks code because the old 124 * code contains obvious mistakes 125 */ 126 127 static struct sv_cable_table cable_detect[] = { 128 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable }, 129 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable }, 130 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, sun_cable }, 131 { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, osb4_cable }, 132 { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable }, 133 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable }, 134 { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable }, 135 { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable }, 136 { } 137 }; 138 139 /** 140 * serverworks_pre_reset - cable detection 141 * @ap: ATA port 142 * 143 * Perform cable detection according to the device and subvendor 144 * identifications 145 */ 146 147 static int serverworks_pre_reset(struct ata_port *ap) { 148 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 149 struct sv_cable_table *cb = cable_detect; 150 151 while(cb->device) { 152 if (cb->device == pdev->device && 153 (cb->subvendor == pdev->subsystem_vendor || 154 cb->subvendor == PCI_ANY_ID)) { 155 ap->cbl = cb->cable_detect(ap); 156 return ata_std_prereset(ap); 157 } 158 cb++; 159 } 160 161 BUG(); 162 return -1; /* kill compiler warning */ 163 } 164 165 static void serverworks_error_handler(struct ata_port *ap) 166 { 167 return ata_bmdma_drive_eh(ap, serverworks_pre_reset, ata_std_softreset, NULL, ata_std_postreset); 168 } 169 170 /** 171 * serverworks_is_csb - Check for CSB or OSB 172 * @pdev: PCI device to check 173 * 174 * Returns true if the device being checked is known to be a CSB 175 * series device. 176 */ 177 178 static u8 serverworks_is_csb(struct pci_dev *pdev) 179 { 180 switch (pdev->device) { 181 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: 182 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE: 183 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2: 184 case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE: 185 return 1; 186 default: 187 break; 188 } 189 return 0; 190 } 191 192 /** 193 * serverworks_osb4_filter - mode selection filter 194 * @ap: ATA interface 195 * @adev: ATA device 196 * 197 * Filter the offered modes for the device to apply controller 198 * specific rules. OSB4 requires no UDMA for disks due to a FIFO 199 * bug we hit. 200 */ 201 202 static unsigned long serverworks_osb4_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) 203 { 204 if (adev->class == ATA_DEV_ATA) 205 mask &= ~ATA_MASK_UDMA; 206 return ata_pci_default_filter(ap, adev, mask); 207 } 208 209 210 /** 211 * serverworks_csb_filter - mode selection filter 212 * @ap: ATA interface 213 * @adev: ATA device 214 * 215 * Check the blacklist and disable UDMA5 if matched 216 */ 217 218 static unsigned long serverworks_csb_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) 219 { 220 const char *p; 221 char model_num[ATA_ID_PROD_LEN + 1]; 222 int i; 223 224 /* Disk, UDMA */ 225 if (adev->class != ATA_DEV_ATA) 226 return ata_pci_default_filter(ap, adev, mask); 227 228 /* Actually do need to check */ 229 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 230 231 for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { 232 if (!strcmp(p, model_num)) 233 mask &= ~(0x1F << ATA_SHIFT_UDMA); 234 } 235 return ata_pci_default_filter(ap, adev, mask); 236 } 237 238 239 /** 240 * serverworks_set_piomode - set initial PIO mode data 241 * @ap: ATA interface 242 * @adev: ATA device 243 * 244 * Program the OSB4/CSB5 timing registers for PIO. The PIO register 245 * load is done as a simple lookup. 246 */ 247 static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev) 248 { 249 static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 }; 250 int offset = 1 + (2 * ap->port_no) - adev->devno; 251 int devbits = (2 * ap->port_no + adev->devno) * 4; 252 u16 csb5_pio; 253 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 254 int pio = adev->pio_mode - XFER_PIO_0; 255 256 pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]); 257 258 /* The OSB4 just requires the timing but the CSB series want the 259 mode number as well */ 260 if (serverworks_is_csb(pdev)) { 261 pci_read_config_word(pdev, 0x4A, &csb5_pio); 262 csb5_pio &= ~(0x0F << devbits); 263 pci_write_config_byte(pdev, 0x4A, csb5_pio | (pio << devbits)); 264 } 265 } 266 267 /** 268 * serverworks_set_dmamode - set initial DMA mode data 269 * @ap: ATA interface 270 * @adev: ATA device 271 * 272 * Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5 273 * chipset. The MWDMA mode values are pulled from a lookup table 274 * while the chipset uses mode number for UDMA. 275 */ 276 277 static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev) 278 { 279 static const u8 dma_mode[] = { 0x77, 0x21, 0x20 }; 280 int offset = 1 + 2 * ap->port_no - adev->devno; 281 int devbits = (2 * ap->port_no + adev->devno); 282 u8 ultra; 283 u8 ultra_cfg; 284 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 285 286 pci_read_config_byte(pdev, 0x54, &ultra_cfg); 287 288 if (adev->dma_mode >= XFER_UDMA_0) { 289 pci_write_config_byte(pdev, 0x44 + offset, 0x20); 290 291 pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra); 292 ultra &= ~(0x0F << (ap->port_no * 4)); 293 ultra |= (adev->dma_mode - XFER_UDMA_0) 294 << (ap->port_no * 4); 295 pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra); 296 297 ultra_cfg |= (1 << devbits); 298 } else { 299 pci_write_config_byte(pdev, 0x44 + offset, 300 dma_mode[adev->dma_mode - XFER_MW_DMA_0]); 301 ultra_cfg &= ~(1 << devbits); 302 } 303 pci_write_config_byte(pdev, 0x54, ultra_cfg); 304 } 305 306 static struct scsi_host_template serverworks_sht = { 307 .module = THIS_MODULE, 308 .name = DRV_NAME, 309 .ioctl = ata_scsi_ioctl, 310 .queuecommand = ata_scsi_queuecmd, 311 .can_queue = ATA_DEF_QUEUE, 312 .this_id = ATA_SHT_THIS_ID, 313 .sg_tablesize = LIBATA_MAX_PRD, 314 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 315 .emulated = ATA_SHT_EMULATED, 316 .use_clustering = ATA_SHT_USE_CLUSTERING, 317 .proc_name = DRV_NAME, 318 .dma_boundary = ATA_DMA_BOUNDARY, 319 .slave_configure = ata_scsi_slave_config, 320 .slave_destroy = ata_scsi_slave_destroy, 321 .bios_param = ata_std_bios_param, 322 #ifdef CONFIG_PM 323 .resume = ata_scsi_device_resume, 324 .suspend = ata_scsi_device_suspend, 325 #endif 326 }; 327 328 static struct ata_port_operations serverworks_osb4_port_ops = { 329 .port_disable = ata_port_disable, 330 .set_piomode = serverworks_set_piomode, 331 .set_dmamode = serverworks_set_dmamode, 332 .mode_filter = serverworks_osb4_filter, 333 334 .tf_load = ata_tf_load, 335 .tf_read = ata_tf_read, 336 .check_status = ata_check_status, 337 .exec_command = ata_exec_command, 338 .dev_select = ata_std_dev_select, 339 340 .freeze = ata_bmdma_freeze, 341 .thaw = ata_bmdma_thaw, 342 .error_handler = serverworks_error_handler, 343 .post_internal_cmd = ata_bmdma_post_internal_cmd, 344 345 .bmdma_setup = ata_bmdma_setup, 346 .bmdma_start = ata_bmdma_start, 347 .bmdma_stop = ata_bmdma_stop, 348 .bmdma_status = ata_bmdma_status, 349 350 .qc_prep = ata_qc_prep, 351 .qc_issue = ata_qc_issue_prot, 352 353 .data_xfer = ata_data_xfer, 354 355 .irq_handler = ata_interrupt, 356 .irq_clear = ata_bmdma_irq_clear, 357 .irq_on = ata_irq_on, 358 .irq_ack = ata_irq_ack, 359 360 .port_start = ata_port_start, 361 }; 362 363 static struct ata_port_operations serverworks_csb_port_ops = { 364 .port_disable = ata_port_disable, 365 .set_piomode = serverworks_set_piomode, 366 .set_dmamode = serverworks_set_dmamode, 367 .mode_filter = serverworks_csb_filter, 368 369 .tf_load = ata_tf_load, 370 .tf_read = ata_tf_read, 371 .check_status = ata_check_status, 372 .exec_command = ata_exec_command, 373 .dev_select = ata_std_dev_select, 374 375 .freeze = ata_bmdma_freeze, 376 .thaw = ata_bmdma_thaw, 377 .error_handler = serverworks_error_handler, 378 .post_internal_cmd = ata_bmdma_post_internal_cmd, 379 380 .bmdma_setup = ata_bmdma_setup, 381 .bmdma_start = ata_bmdma_start, 382 .bmdma_stop = ata_bmdma_stop, 383 .bmdma_status = ata_bmdma_status, 384 385 .qc_prep = ata_qc_prep, 386 .qc_issue = ata_qc_issue_prot, 387 388 .data_xfer = ata_data_xfer, 389 390 .irq_handler = ata_interrupt, 391 .irq_clear = ata_bmdma_irq_clear, 392 .irq_on = ata_irq_on, 393 .irq_ack = ata_irq_ack, 394 395 .port_start = ata_port_start, 396 }; 397 398 static int serverworks_fixup_osb4(struct pci_dev *pdev) 399 { 400 u32 reg; 401 struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 402 PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL); 403 if (isa_dev) { 404 pci_read_config_dword(isa_dev, 0x64, ®); 405 reg &= ~0x00002000; /* disable 600ns interrupt mask */ 406 if (!(reg & 0x00004000)) 407 printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n"); 408 reg |= 0x00004000; /* enable UDMA/33 support */ 409 pci_write_config_dword(isa_dev, 0x64, reg); 410 pci_dev_put(isa_dev); 411 return 0; 412 } 413 printk(KERN_WARNING "ata_serverworks: Unable to find bridge.\n"); 414 return -ENODEV; 415 } 416 417 static int serverworks_fixup_csb(struct pci_dev *pdev) 418 { 419 u8 rev; 420 u8 btr; 421 422 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 423 424 /* Third Channel Test */ 425 if (!(PCI_FUNC(pdev->devfn) & 1)) { 426 struct pci_dev * findev = NULL; 427 u32 reg4c = 0; 428 findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 429 PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL); 430 if (findev) { 431 pci_read_config_dword(findev, 0x4C, ®4c); 432 reg4c &= ~0x000007FF; 433 reg4c |= 0x00000040; 434 reg4c |= 0x00000020; 435 pci_write_config_dword(findev, 0x4C, reg4c); 436 pci_dev_put(findev); 437 } 438 } else { 439 struct pci_dev * findev = NULL; 440 u8 reg41 = 0; 441 442 findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 443 PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL); 444 if (findev) { 445 pci_read_config_byte(findev, 0x41, ®41); 446 reg41 &= ~0x40; 447 pci_write_config_byte(findev, 0x41, reg41); 448 pci_dev_put(findev); 449 } 450 } 451 /* setup the UDMA Control register 452 * 453 * 1. clear bit 6 to enable DMA 454 * 2. enable DMA modes with bits 0-1 455 * 00 : legacy 456 * 01 : udma2 457 * 10 : udma2/udma4 458 * 11 : udma2/udma4/udma5 459 */ 460 pci_read_config_byte(pdev, 0x5A, &btr); 461 btr &= ~0x40; 462 if (!(PCI_FUNC(pdev->devfn) & 1)) 463 btr |= 0x2; 464 else 465 btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2; 466 pci_write_config_byte(pdev, 0x5A, btr); 467 468 return btr; 469 } 470 471 static void serverworks_fixup_ht1000(struct pci_dev *pdev) 472 { 473 u8 btr; 474 /* Setup HT1000 SouthBridge Controller - Single Channel Only */ 475 pci_read_config_byte(pdev, 0x5A, &btr); 476 btr &= ~0x40; 477 btr |= 0x3; 478 pci_write_config_byte(pdev, 0x5A, btr); 479 } 480 481 482 static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 483 { 484 int ports = 2; 485 static struct ata_port_info info[4] = { 486 { /* OSB4 */ 487 .sht = &serverworks_sht, 488 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 489 .pio_mask = 0x1f, 490 .mwdma_mask = 0x07, 491 .udma_mask = 0x07, 492 .port_ops = &serverworks_osb4_port_ops 493 }, { /* OSB4 no UDMA */ 494 .sht = &serverworks_sht, 495 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 496 .pio_mask = 0x1f, 497 .mwdma_mask = 0x07, 498 .udma_mask = 0x00, 499 .port_ops = &serverworks_osb4_port_ops 500 }, { /* CSB5 */ 501 .sht = &serverworks_sht, 502 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 503 .pio_mask = 0x1f, 504 .mwdma_mask = 0x07, 505 .udma_mask = 0x1f, 506 .port_ops = &serverworks_csb_port_ops 507 }, { /* CSB5 - later revisions*/ 508 .sht = &serverworks_sht, 509 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 510 .pio_mask = 0x1f, 511 .mwdma_mask = 0x07, 512 .udma_mask = 0x3f, 513 .port_ops = &serverworks_csb_port_ops 514 } 515 }; 516 static struct ata_port_info *port_info[2]; 517 struct ata_port_info *devinfo = &info[id->driver_data]; 518 519 /* Force master latency timer to 64 PCI clocks */ 520 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); 521 522 /* OSB4 : South Bridge and IDE */ 523 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { 524 /* Select non UDMA capable OSB4 if we can't do fixups */ 525 if ( serverworks_fixup_osb4(pdev) < 0) 526 devinfo = &info[1]; 527 } 528 /* setup CSB5/CSB6 : South Bridge and IDE option RAID */ 529 else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) || 530 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || 531 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) { 532 533 /* If the returned btr is the newer revision then 534 select the right info block */ 535 if (serverworks_fixup_csb(pdev) == 3) 536 devinfo = &info[3]; 537 538 /* Is this the 3rd channel CSB6 IDE ? */ 539 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2) 540 ports = 1; 541 } 542 /* setup HT1000E */ 543 else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) 544 serverworks_fixup_ht1000(pdev); 545 546 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) 547 ata_pci_clear_simplex(pdev); 548 549 port_info[0] = port_info[1] = devinfo; 550 return ata_pci_init_one(pdev, port_info, ports); 551 } 552 553 #ifdef CONFIG_PM 554 static int serverworks_reinit_one(struct pci_dev *pdev) 555 { 556 /* Force master latency timer to 64 PCI clocks */ 557 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); 558 559 switch (pdev->device) 560 { 561 case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE: 562 serverworks_fixup_osb4(pdev); 563 break; 564 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: 565 ata_pci_clear_simplex(pdev); 566 /* fall through */ 567 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE: 568 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2: 569 serverworks_fixup_csb(pdev); 570 break; 571 case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE: 572 serverworks_fixup_ht1000(pdev); 573 break; 574 } 575 return ata_pci_device_resume(pdev); 576 } 577 #endif 578 579 static const struct pci_device_id serverworks[] = { 580 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0}, 581 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2}, 582 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2}, 583 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2}, 584 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2}, 585 586 { }, 587 }; 588 589 static struct pci_driver serverworks_pci_driver = { 590 .name = DRV_NAME, 591 .id_table = serverworks, 592 .probe = serverworks_init_one, 593 .remove = ata_pci_remove_one, 594 #ifdef CONFIG_PM 595 .suspend = ata_pci_device_suspend, 596 .resume = serverworks_reinit_one, 597 #endif 598 }; 599 600 static int __init serverworks_init(void) 601 { 602 return pci_register_driver(&serverworks_pci_driver); 603 } 604 605 static void __exit serverworks_exit(void) 606 { 607 pci_unregister_driver(&serverworks_pci_driver); 608 } 609 610 MODULE_AUTHOR("Alan Cox"); 611 MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6"); 612 MODULE_LICENSE("GPL"); 613 MODULE_DEVICE_TABLE(pci, serverworks); 614 MODULE_VERSION(DRV_VERSION); 615 616 module_init(serverworks_init); 617 module_exit(serverworks_exit); 618