1 /* 2 * HighPoint RR3xxx controller driver for Linux 3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * Please report bugs/comments/suggestions to linux@highpoint-tech.com 15 * 16 * For more information, visit http://www.highpoint-tech.com 17 */ 18 #include <linux/module.h> 19 #include <linux/types.h> 20 #include <linux/string.h> 21 #include <linux/kernel.h> 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 #include <linux/errno.h> 25 #include <linux/delay.h> 26 #include <linux/timer.h> 27 #include <linux/spinlock.h> 28 #include <linux/hdreg.h> 29 #include <asm/uaccess.h> 30 #include <asm/io.h> 31 #include <asm/div64.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi.h> 35 #include <scsi/scsi_tcq.h> 36 #include <scsi/scsi_host.h> 37 38 #include "hptiop.h" 39 40 MODULE_AUTHOR("HighPoint Technologies, Inc."); 41 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver"); 42 43 static char driver_name[] = "hptiop"; 44 static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; 45 static const char driver_ver[] = "v1.2 (070830)"; 46 47 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); 48 static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); 49 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); 50 51 static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop) 52 { 53 readl(&iop->outbound_intstatus); 54 } 55 56 static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec) 57 { 58 u32 req = 0; 59 int i; 60 61 for (i = 0; i < millisec; i++) { 62 req = readl(&iop->inbound_queue); 63 if (req != IOPMU_QUEUE_EMPTY) 64 break; 65 msleep(1); 66 } 67 68 if (req != IOPMU_QUEUE_EMPTY) { 69 writel(req, &iop->outbound_queue); 70 hptiop_pci_posting_flush(iop); 71 return 0; 72 } 73 74 return -1; 75 } 76 77 static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag) 78 { 79 if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) 80 return hptiop_host_request_callback(hba, 81 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); 82 else 83 return hptiop_iop_request_callback(hba, tag); 84 } 85 86 static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba) 87 { 88 u32 req; 89 90 while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) { 91 92 if (req & IOPMU_QUEUE_MASK_HOST_BITS) 93 hptiop_request_callback(hba, req); 94 else { 95 struct hpt_iop_request_header __iomem * p; 96 97 p = (struct hpt_iop_request_header __iomem *) 98 ((char __iomem *)hba->iop + req); 99 100 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { 101 if (readl(&p->context)) 102 hptiop_request_callback(hba, req); 103 else 104 writel(1, &p->context); 105 } 106 else 107 hptiop_request_callback(hba, req); 108 } 109 } 110 } 111 112 static int __iop_intr(struct hptiop_hba *hba) 113 { 114 struct hpt_iopmu __iomem *iop = hba->iop; 115 u32 status; 116 int ret = 0; 117 118 status = readl(&iop->outbound_intstatus); 119 120 if (status & IOPMU_OUTBOUND_INT_MSG0) { 121 u32 msg = readl(&iop->outbound_msgaddr0); 122 dprintk("received outbound msg %x\n", msg); 123 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); 124 hptiop_message_callback(hba, msg); 125 ret = 1; 126 } 127 128 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { 129 hptiop_drain_outbound_queue(hba); 130 ret = 1; 131 } 132 133 return ret; 134 } 135 136 static int iop_send_sync_request(struct hptiop_hba *hba, 137 void __iomem *_req, u32 millisec) 138 { 139 struct hpt_iop_request_header __iomem *req = _req; 140 u32 i; 141 142 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, 143 &req->flags); 144 145 writel(0, &req->context); 146 147 writel((unsigned long)req - (unsigned long)hba->iop, 148 &hba->iop->inbound_queue); 149 150 hptiop_pci_posting_flush(hba->iop); 151 152 for (i = 0; i < millisec; i++) { 153 __iop_intr(hba); 154 if (readl(&req->context)) 155 return 0; 156 msleep(1); 157 } 158 159 return -1; 160 } 161 162 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) 163 { 164 u32 i; 165 166 hba->msg_done = 0; 167 168 writel(msg, &hba->iop->inbound_msgaddr0); 169 170 hptiop_pci_posting_flush(hba->iop); 171 172 for (i = 0; i < millisec; i++) { 173 spin_lock_irq(hba->host->host_lock); 174 __iop_intr(hba); 175 spin_unlock_irq(hba->host->host_lock); 176 if (hba->msg_done) 177 break; 178 msleep(1); 179 } 180 181 return hba->msg_done? 0 : -1; 182 } 183 184 static int iop_get_config(struct hptiop_hba *hba, 185 struct hpt_iop_request_get_config *config) 186 { 187 u32 req32; 188 struct hpt_iop_request_get_config __iomem *req; 189 190 req32 = readl(&hba->iop->inbound_queue); 191 if (req32 == IOPMU_QUEUE_EMPTY) 192 return -1; 193 194 req = (struct hpt_iop_request_get_config __iomem *) 195 ((unsigned long)hba->iop + req32); 196 197 writel(0, &req->header.flags); 198 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); 199 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); 200 writel(IOP_RESULT_PENDING, &req->header.result); 201 202 if (iop_send_sync_request(hba, req, 20000)) { 203 dprintk("Get config send cmd failed\n"); 204 return -1; 205 } 206 207 memcpy_fromio(config, req, sizeof(*config)); 208 writel(req32, &hba->iop->outbound_queue); 209 return 0; 210 } 211 212 static int iop_set_config(struct hptiop_hba *hba, 213 struct hpt_iop_request_set_config *config) 214 { 215 u32 req32; 216 struct hpt_iop_request_set_config __iomem *req; 217 218 req32 = readl(&hba->iop->inbound_queue); 219 if (req32 == IOPMU_QUEUE_EMPTY) 220 return -1; 221 222 req = (struct hpt_iop_request_set_config __iomem *) 223 ((unsigned long)hba->iop + req32); 224 225 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), 226 (u8 *)config + sizeof(struct hpt_iop_request_header), 227 sizeof(struct hpt_iop_request_set_config) - 228 sizeof(struct hpt_iop_request_header)); 229 230 writel(0, &req->header.flags); 231 writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); 232 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); 233 writel(IOP_RESULT_PENDING, &req->header.result); 234 235 if (iop_send_sync_request(hba, req, 20000)) { 236 dprintk("Set config send cmd failed\n"); 237 return -1; 238 } 239 240 writel(req32, &hba->iop->outbound_queue); 241 return 0; 242 } 243 244 static int hptiop_initialize_iop(struct hptiop_hba *hba) 245 { 246 struct hpt_iopmu __iomem *iop = hba->iop; 247 248 /* enable interrupts */ 249 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), 250 &iop->outbound_intmask); 251 252 hba->initialized = 1; 253 254 /* start background tasks */ 255 if (iop_send_sync_msg(hba, 256 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { 257 printk(KERN_ERR "scsi%d: fail to start background task\n", 258 hba->host->host_no); 259 return -1; 260 } 261 return 0; 262 } 263 264 static int hptiop_map_pci_bar(struct hptiop_hba *hba) 265 { 266 u32 mem_base_phy, length; 267 void __iomem *mem_base_virt; 268 struct pci_dev *pcidev = hba->pcidev; 269 270 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) { 271 printk(KERN_ERR "scsi%d: pci resource invalid\n", 272 hba->host->host_no); 273 return -1; 274 } 275 276 mem_base_phy = pci_resource_start(pcidev, 0); 277 length = pci_resource_len(pcidev, 0); 278 mem_base_virt = ioremap(mem_base_phy, length); 279 280 if (!mem_base_virt) { 281 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", 282 hba->host->host_no); 283 return -1; 284 } 285 286 hba->iop = mem_base_virt; 287 dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop); 288 return 0; 289 } 290 291 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) 292 { 293 dprintk("iop message 0x%x\n", msg); 294 295 if (!hba->initialized) 296 return; 297 298 if (msg == IOPMU_INBOUND_MSG0_RESET) { 299 atomic_set(&hba->resetting, 0); 300 wake_up(&hba->reset_wq); 301 } 302 else if (msg <= IOPMU_INBOUND_MSG0_MAX) 303 hba->msg_done = 1; 304 } 305 306 static inline struct hptiop_request *get_req(struct hptiop_hba *hba) 307 { 308 struct hptiop_request *ret; 309 310 dprintk("get_req : req=%p\n", hba->req_list); 311 312 ret = hba->req_list; 313 if (ret) 314 hba->req_list = ret->next; 315 316 return ret; 317 } 318 319 static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req) 320 { 321 dprintk("free_req(%d, %p)\n", req->index, req); 322 req->next = hba->req_list; 323 hba->req_list = req; 324 } 325 326 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag) 327 { 328 struct hpt_iop_request_scsi_command *req; 329 struct scsi_cmnd *scp; 330 u32 tag; 331 332 if (hba->iopintf_v2) { 333 tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT; 334 req = hba->reqs[tag].req_virt; 335 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) 336 req->header.result = IOP_RESULT_SUCCESS; 337 } else { 338 tag = _tag; 339 req = hba->reqs[tag].req_virt; 340 } 341 342 dprintk("hptiop_host_request_callback: req=%p, type=%d, " 343 "result=%d, context=0x%x tag=%d\n", 344 req, req->header.type, req->header.result, 345 req->header.context, tag); 346 347 BUG_ON(!req->header.result); 348 BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); 349 350 scp = hba->reqs[tag].scp; 351 352 if (HPT_SCP(scp)->mapped) 353 scsi_dma_unmap(scp); 354 355 switch (le32_to_cpu(req->header.result)) { 356 case IOP_RESULT_SUCCESS: 357 scp->result = (DID_OK<<16); 358 break; 359 case IOP_RESULT_BAD_TARGET: 360 scp->result = (DID_BAD_TARGET<<16); 361 break; 362 case IOP_RESULT_BUSY: 363 scp->result = (DID_BUS_BUSY<<16); 364 break; 365 case IOP_RESULT_RESET: 366 scp->result = (DID_RESET<<16); 367 break; 368 case IOP_RESULT_FAIL: 369 scp->result = (DID_ERROR<<16); 370 break; 371 case IOP_RESULT_INVALID_REQUEST: 372 scp->result = (DID_ABORT<<16); 373 break; 374 case IOP_RESULT_MODE_SENSE_CHECK_CONDITION: 375 scp->result = SAM_STAT_CHECK_CONDITION; 376 memset(&scp->sense_buffer, 377 0, sizeof(scp->sense_buffer)); 378 memcpy(&scp->sense_buffer, 379 &req->sg_list, le32_to_cpu(req->dataxfer_length)); 380 break; 381 382 default: 383 scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) | 384 (DID_ABORT<<16); 385 break; 386 } 387 388 dprintk("scsi_done(%p)\n", scp); 389 scp->scsi_done(scp); 390 free_req(hba, &hba->reqs[tag]); 391 } 392 393 void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag) 394 { 395 struct hpt_iop_request_header __iomem *req; 396 struct hpt_iop_request_ioctl_command __iomem *p; 397 struct hpt_ioctl_k *arg; 398 399 req = (struct hpt_iop_request_header __iomem *) 400 ((unsigned long)hba->iop + tag); 401 dprintk("hptiop_iop_request_callback: req=%p, type=%d, " 402 "result=%d, context=0x%x tag=%d\n", 403 req, readl(&req->type), readl(&req->result), 404 readl(&req->context), tag); 405 406 BUG_ON(!readl(&req->result)); 407 BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); 408 409 p = (struct hpt_iop_request_ioctl_command __iomem *)req; 410 arg = (struct hpt_ioctl_k *)(unsigned long) 411 (readl(&req->context) | 412 ((u64)readl(&req->context_hi32)<<32)); 413 414 if (readl(&req->result) == IOP_RESULT_SUCCESS) { 415 arg->result = HPT_IOCTL_RESULT_OK; 416 417 if (arg->outbuf_size) 418 memcpy_fromio(arg->outbuf, 419 &p->buf[(readl(&p->inbuf_size) + 3)& ~3], 420 arg->outbuf_size); 421 422 if (arg->bytes_returned) 423 *arg->bytes_returned = arg->outbuf_size; 424 } 425 else 426 arg->result = HPT_IOCTL_RESULT_FAILED; 427 428 arg->done(arg); 429 writel(tag, &hba->iop->outbound_queue); 430 } 431 432 static irqreturn_t hptiop_intr(int irq, void *dev_id) 433 { 434 struct hptiop_hba *hba = dev_id; 435 int handled; 436 unsigned long flags; 437 438 spin_lock_irqsave(hba->host->host_lock, flags); 439 handled = __iop_intr(hba); 440 spin_unlock_irqrestore(hba->host->host_lock, flags); 441 442 return handled; 443 } 444 445 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) 446 { 447 struct Scsi_Host *host = scp->device->host; 448 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 449 struct scatterlist *sg; 450 int idx, nseg; 451 452 nseg = scsi_dma_map(scp); 453 BUG_ON(nseg < 0); 454 if (!nseg) 455 return 0; 456 457 HPT_SCP(scp)->sgcnt = nseg; 458 HPT_SCP(scp)->mapped = 1; 459 460 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); 461 462 scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { 463 psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)); 464 psg[idx].size = cpu_to_le32(sg_dma_len(sg)); 465 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? 466 cpu_to_le32(1) : 0; 467 } 468 return HPT_SCP(scp)->sgcnt; 469 } 470 471 static int hptiop_queuecommand(struct scsi_cmnd *scp, 472 void (*done)(struct scsi_cmnd *)) 473 { 474 struct Scsi_Host *host = scp->device->host; 475 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 476 struct hpt_iop_request_scsi_command *req; 477 int sg_count = 0; 478 struct hptiop_request *_req; 479 480 BUG_ON(!done); 481 scp->scsi_done = done; 482 483 _req = get_req(hba); 484 if (_req == NULL) { 485 dprintk("hptiop_queuecmd : no free req\n"); 486 return SCSI_MLQUEUE_HOST_BUSY; 487 } 488 489 _req->scp = scp; 490 491 dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) " 492 "req_index=%d, req=%p\n", 493 scp, 494 host->host_no, scp->device->channel, 495 scp->device->id, scp->device->lun, 496 *((u32 *)&scp->cmnd), 497 *((u32 *)&scp->cmnd + 1), 498 *((u32 *)&scp->cmnd + 2), 499 _req->index, _req->req_virt); 500 501 scp->result = 0; 502 503 if (scp->device->channel || scp->device->lun || 504 scp->device->id > hba->max_devices) { 505 scp->result = DID_BAD_TARGET << 16; 506 free_req(hba, _req); 507 goto cmd_done; 508 } 509 510 req = _req->req_virt; 511 512 /* build S/G table */ 513 sg_count = hptiop_buildsgl(scp, req->sg_list); 514 if (!sg_count) 515 HPT_SCP(scp)->mapped = 0; 516 517 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); 518 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); 519 req->header.result = cpu_to_le32(IOP_RESULT_PENDING); 520 req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | 521 (u32)_req->index); 522 req->header.context_hi32 = 0; 523 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); 524 req->channel = scp->device->channel; 525 req->target = scp->device->id; 526 req->lun = scp->device->lun; 527 req->header.size = cpu_to_le32( 528 sizeof(struct hpt_iop_request_scsi_command) 529 - sizeof(struct hpt_iopsg) 530 + sg_count * sizeof(struct hpt_iopsg)); 531 532 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); 533 534 if (hba->iopintf_v2) { 535 u32 size_bits; 536 if (req->header.size < 256) 537 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; 538 else if (req->header.size < 512) 539 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; 540 else 541 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | 542 IOPMU_QUEUE_ADDR_HOST_BIT; 543 writel(_req->req_shifted_phy | size_bits, &hba->iop->inbound_queue); 544 } else 545 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, 546 &hba->iop->inbound_queue); 547 548 return 0; 549 550 cmd_done: 551 dprintk("scsi_done(scp=%p)\n", scp); 552 scp->scsi_done(scp); 553 return 0; 554 } 555 556 static const char *hptiop_info(struct Scsi_Host *host) 557 { 558 return driver_name_long; 559 } 560 561 static int hptiop_reset_hba(struct hptiop_hba *hba) 562 { 563 if (atomic_xchg(&hba->resetting, 1) == 0) { 564 atomic_inc(&hba->reset_count); 565 writel(IOPMU_INBOUND_MSG0_RESET, 566 &hba->iop->inbound_msgaddr0); 567 hptiop_pci_posting_flush(hba->iop); 568 } 569 570 wait_event_timeout(hba->reset_wq, 571 atomic_read(&hba->resetting) == 0, 60 * HZ); 572 573 if (atomic_read(&hba->resetting)) { 574 /* IOP is in unkown state, abort reset */ 575 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); 576 return -1; 577 } 578 579 if (iop_send_sync_msg(hba, 580 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { 581 dprintk("scsi%d: fail to start background task\n", 582 hba->host->host_no); 583 } 584 585 return 0; 586 } 587 588 static int hptiop_reset(struct scsi_cmnd *scp) 589 { 590 struct Scsi_Host * host = scp->device->host; 591 struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata; 592 593 printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n", 594 scp->device->host->host_no, scp->device->channel, 595 scp->device->id, scp); 596 597 return hptiop_reset_hba(hba)? FAILED : SUCCESS; 598 } 599 600 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, 601 int queue_depth) 602 { 603 if(queue_depth > 256) 604 queue_depth = 256; 605 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 606 return queue_depth; 607 } 608 609 static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) 610 { 611 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); 612 } 613 614 static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) 615 { 616 struct Scsi_Host *host = class_to_shost(class_dev); 617 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 618 619 return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", 620 hba->firmware_version >> 24, 621 (hba->firmware_version >> 16) & 0xff, 622 (hba->firmware_version >> 8) & 0xff, 623 hba->firmware_version & 0xff); 624 } 625 626 static struct class_device_attribute hptiop_attr_version = { 627 .attr = { 628 .name = "driver-version", 629 .mode = S_IRUGO, 630 }, 631 .show = hptiop_show_version, 632 }; 633 634 static struct class_device_attribute hptiop_attr_fw_version = { 635 .attr = { 636 .name = "firmware-version", 637 .mode = S_IRUGO, 638 }, 639 .show = hptiop_show_fw_version, 640 }; 641 642 static struct class_device_attribute *hptiop_attrs[] = { 643 &hptiop_attr_version, 644 &hptiop_attr_fw_version, 645 NULL 646 }; 647 648 static struct scsi_host_template driver_template = { 649 .module = THIS_MODULE, 650 .name = driver_name, 651 .queuecommand = hptiop_queuecommand, 652 .eh_device_reset_handler = hptiop_reset, 653 .eh_bus_reset_handler = hptiop_reset, 654 .info = hptiop_info, 655 .unchecked_isa_dma = 0, 656 .emulated = 0, 657 .use_clustering = ENABLE_CLUSTERING, 658 .proc_name = driver_name, 659 .shost_attrs = hptiop_attrs, 660 .this_id = -1, 661 .change_queue_depth = hptiop_adjust_disk_queue_depth, 662 }; 663 664 static int __devinit hptiop_probe(struct pci_dev *pcidev, 665 const struct pci_device_id *id) 666 { 667 struct Scsi_Host *host = NULL; 668 struct hptiop_hba *hba; 669 struct hpt_iop_request_get_config iop_config; 670 struct hpt_iop_request_set_config set_config; 671 dma_addr_t start_phy; 672 void *start_virt; 673 u32 offset, i, req_size; 674 675 dprintk("hptiop_probe(%p)\n", pcidev); 676 677 if (pci_enable_device(pcidev)) { 678 printk(KERN_ERR "hptiop: fail to enable pci device\n"); 679 return -ENODEV; 680 } 681 682 printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", 683 pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, 684 pcidev->irq); 685 686 pci_set_master(pcidev); 687 688 /* Enable 64bit DMA if possible */ 689 if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) { 690 if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) { 691 printk(KERN_ERR "hptiop: fail to set dma_mask\n"); 692 goto disable_pci_device; 693 } 694 } 695 696 if (pci_request_regions(pcidev, driver_name)) { 697 printk(KERN_ERR "hptiop: pci_request_regions failed\n"); 698 goto disable_pci_device; 699 } 700 701 host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); 702 if (!host) { 703 printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); 704 goto free_pci_regions; 705 } 706 707 hba = (struct hptiop_hba *)host->hostdata; 708 709 hba->pcidev = pcidev; 710 hba->host = host; 711 hba->initialized = 0; 712 hba->iopintf_v2 = 0; 713 714 atomic_set(&hba->resetting, 0); 715 atomic_set(&hba->reset_count, 0); 716 717 init_waitqueue_head(&hba->reset_wq); 718 init_waitqueue_head(&hba->ioctl_wq); 719 720 host->max_lun = 1; 721 host->max_channel = 0; 722 host->io_port = 0; 723 host->n_io_port = 0; 724 host->irq = pcidev->irq; 725 726 if (hptiop_map_pci_bar(hba)) 727 goto free_scsi_host; 728 729 if (iop_wait_ready(hba->iop, 20000)) { 730 printk(KERN_ERR "scsi%d: firmware not ready\n", 731 hba->host->host_no); 732 goto unmap_pci_bar; 733 } 734 735 if (iop_get_config(hba, &iop_config)) { 736 printk(KERN_ERR "scsi%d: get config failed\n", 737 hba->host->host_no); 738 goto unmap_pci_bar; 739 } 740 741 hba->max_requests = min(le32_to_cpu(iop_config.max_requests), 742 HPTIOP_MAX_REQUESTS); 743 hba->max_devices = le32_to_cpu(iop_config.max_devices); 744 hba->max_request_size = le32_to_cpu(iop_config.request_size); 745 hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); 746 hba->firmware_version = le32_to_cpu(iop_config.firmware_version); 747 hba->interface_version = le32_to_cpu(iop_config.interface_version); 748 hba->sdram_size = le32_to_cpu(iop_config.sdram_size); 749 750 if (hba->firmware_version > 0x01020000 || 751 hba->interface_version > 0x01020000) 752 hba->iopintf_v2 = 1; 753 754 host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; 755 host->max_id = le32_to_cpu(iop_config.max_devices); 756 host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); 757 host->can_queue = le32_to_cpu(iop_config.max_requests); 758 host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); 759 host->max_cmd_len = 16; 760 761 req_size = sizeof(struct hpt_iop_request_scsi_command) 762 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1); 763 if ((req_size & 0x1f) != 0) 764 req_size = (req_size + 0x1f) & ~0x1f; 765 766 memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); 767 set_config.iop_id = cpu_to_le32(host->host_no); 768 set_config.vbus_id = cpu_to_le16(host->host_no); 769 set_config.max_host_request_size = cpu_to_le16(req_size); 770 771 if (iop_set_config(hba, &set_config)) { 772 printk(KERN_ERR "scsi%d: set config failed\n", 773 hba->host->host_no); 774 goto unmap_pci_bar; 775 } 776 777 pci_set_drvdata(pcidev, host); 778 779 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, 780 driver_name, hba)) { 781 printk(KERN_ERR "scsi%d: request irq %d failed\n", 782 hba->host->host_no, pcidev->irq); 783 goto unmap_pci_bar; 784 } 785 786 /* Allocate request mem */ 787 788 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); 789 790 hba->req_size = req_size; 791 start_virt = dma_alloc_coherent(&pcidev->dev, 792 hba->req_size*hba->max_requests + 0x20, 793 &start_phy, GFP_KERNEL); 794 795 if (!start_virt) { 796 printk(KERN_ERR "scsi%d: fail to alloc request mem\n", 797 hba->host->host_no); 798 goto free_request_irq; 799 } 800 801 hba->dma_coherent = start_virt; 802 hba->dma_coherent_handle = start_phy; 803 804 if ((start_phy & 0x1f) != 0) 805 { 806 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; 807 start_phy += offset; 808 start_virt += offset; 809 } 810 811 hba->req_list = start_virt; 812 for (i = 0; i < hba->max_requests; i++) { 813 hba->reqs[i].next = NULL; 814 hba->reqs[i].req_virt = start_virt; 815 hba->reqs[i].req_shifted_phy = start_phy >> 5; 816 hba->reqs[i].index = i; 817 free_req(hba, &hba->reqs[i]); 818 start_virt = (char *)start_virt + hba->req_size; 819 start_phy = start_phy + hba->req_size; 820 } 821 822 /* Enable Interrupt and start background task */ 823 if (hptiop_initialize_iop(hba)) 824 goto free_request_mem; 825 826 if (scsi_add_host(host, &pcidev->dev)) { 827 printk(KERN_ERR "scsi%d: scsi_add_host failed\n", 828 hba->host->host_no); 829 goto free_request_mem; 830 } 831 832 833 scsi_scan_host(host); 834 835 dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); 836 return 0; 837 838 free_request_mem: 839 dma_free_coherent(&hba->pcidev->dev, 840 hba->req_size*hba->max_requests + 0x20, 841 hba->dma_coherent, hba->dma_coherent_handle); 842 843 free_request_irq: 844 free_irq(hba->pcidev->irq, hba); 845 846 unmap_pci_bar: 847 iounmap(hba->iop); 848 849 free_pci_regions: 850 pci_release_regions(pcidev) ; 851 852 free_scsi_host: 853 scsi_host_put(host); 854 855 disable_pci_device: 856 pci_disable_device(pcidev); 857 858 dprintk("scsi%d: hptiop_probe fail\n", host->host_no); 859 return -ENODEV; 860 } 861 862 static void hptiop_shutdown(struct pci_dev *pcidev) 863 { 864 struct Scsi_Host *host = pci_get_drvdata(pcidev); 865 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 866 struct hpt_iopmu __iomem *iop = hba->iop; 867 u32 int_mask; 868 869 dprintk("hptiop_shutdown(%p)\n", hba); 870 871 /* stop the iop */ 872 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) 873 printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", 874 hba->host->host_no); 875 876 /* disable all outbound interrupts */ 877 int_mask = readl(&iop->outbound_intmask); 878 writel(int_mask | 879 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, 880 &iop->outbound_intmask); 881 hptiop_pci_posting_flush(iop); 882 } 883 884 static void hptiop_remove(struct pci_dev *pcidev) 885 { 886 struct Scsi_Host *host = pci_get_drvdata(pcidev); 887 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 888 889 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); 890 891 scsi_remove_host(host); 892 893 hptiop_shutdown(pcidev); 894 895 free_irq(hba->pcidev->irq, hba); 896 897 dma_free_coherent(&hba->pcidev->dev, 898 hba->req_size * hba->max_requests + 0x20, 899 hba->dma_coherent, 900 hba->dma_coherent_handle); 901 902 iounmap(hba->iop); 903 904 pci_release_regions(hba->pcidev); 905 pci_set_drvdata(hba->pcidev, NULL); 906 pci_disable_device(hba->pcidev); 907 908 scsi_host_put(host); 909 } 910 911 static struct pci_device_id hptiop_id_table[] = { 912 { PCI_VDEVICE(TTI, 0x3220) }, 913 { PCI_VDEVICE(TTI, 0x3320) }, 914 { PCI_VDEVICE(TTI, 0x3520) }, 915 { PCI_VDEVICE(TTI, 0x4320) }, 916 {}, 917 }; 918 919 MODULE_DEVICE_TABLE(pci, hptiop_id_table); 920 921 static struct pci_driver hptiop_pci_driver = { 922 .name = driver_name, 923 .id_table = hptiop_id_table, 924 .probe = hptiop_probe, 925 .remove = hptiop_remove, 926 .shutdown = hptiop_shutdown, 927 }; 928 929 static int __init hptiop_module_init(void) 930 { 931 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); 932 return pci_register_driver(&hptiop_pci_driver); 933 } 934 935 static void __exit hptiop_module_exit(void) 936 { 937 pci_unregister_driver(&hptiop_pci_driver); 938 } 939 940 941 module_init(hptiop_module_init); 942 module_exit(hptiop_module_exit); 943 944 MODULE_LICENSE("GPL"); 945 946