1 /* 2 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD 3 * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/types.h> 32 #include <sys/cons.h> 33 #if (__FreeBSD_version >= 500000) 34 #include <sys/time.h> 35 #include <sys/systm.h> 36 #else 37 #include <machine/clock.h> 38 #endif 39 40 #include <sys/stat.h> 41 #include <sys/malloc.h> 42 #include <sys/conf.h> 43 #include <sys/libkern.h> 44 #include <sys/kernel.h> 45 46 #if (__FreeBSD_version >= 500000) 47 #include <sys/kthread.h> 48 #include <sys/mutex.h> 49 #include <sys/module.h> 50 #endif 51 52 #include <sys/eventhandler.h> 53 #include <sys/bus.h> 54 #include <sys/taskqueue.h> 55 #include <sys/ioccom.h> 56 57 #include <machine/resource.h> 58 #include <machine/bus.h> 59 #include <machine/stdarg.h> 60 #include <sys/rman.h> 61 62 #include <vm/vm.h> 63 #include <vm/pmap.h> 64 65 #if (__FreeBSD_version >= 500000) 66 #include <dev/pci/pcireg.h> 67 #include <dev/pci/pcivar.h> 68 #else 69 #include <pci/pcivar.h> 70 #include <pci/pcireg.h> 71 #endif 72 73 #if (__FreeBSD_version <= 500043) 74 #include <sys/devicestat.h> 75 #endif 76 77 #include <cam/cam.h> 78 #include <cam/cam_ccb.h> 79 #include <cam/cam_sim.h> 80 #include <cam/cam_xpt_sim.h> 81 #include <cam/cam_debug.h> 82 #include <cam/cam_periph.h> 83 #include <cam/scsi/scsi_all.h> 84 #include <cam/scsi/scsi_message.h> 85 86 #if (__FreeBSD_version < 500043) 87 #include <sys/bus_private.h> 88 #endif 89 90 #include <dev/hptiop/hptiop.h> 91 92 static char driver_name[] = "hptiop"; 93 static char driver_version[] = "v1.3 (010208)"; 94 95 static devclass_t hptiop_devclass; 96 97 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, 98 u_int32_t msg, u_int32_t millisec); 99 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba, 100 u_int32_t req); 101 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req); 102 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg); 103 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, 104 struct hpt_iop_ioctl_param *pParams); 105 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, 106 struct hpt_iop_ioctl_param *pParams); 107 static int hptiop_rescan_bus(struct hpt_iop_hba *hba); 108 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba); 109 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba); 110 static int hptiop_get_config_itl(struct hpt_iop_hba *hba, 111 struct hpt_iop_request_get_config *config); 112 static int hptiop_get_config_mv(struct hpt_iop_hba *hba, 113 struct hpt_iop_request_get_config *config); 114 static int hptiop_set_config_itl(struct hpt_iop_hba *hba, 115 struct hpt_iop_request_set_config *config); 116 static int hptiop_set_config_mv(struct hpt_iop_hba *hba, 117 struct hpt_iop_request_set_config *config); 118 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba); 119 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba); 120 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, 121 u_int32_t req32, struct hpt_iop_ioctl_param *pParams); 122 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, 123 struct hpt_iop_request_ioctl_command *req, 124 struct hpt_iop_ioctl_param *pParams); 125 static void hptiop_post_req_itl(struct hpt_iop_hba *hba, 126 struct hpt_iop_srb *srb, 127 bus_dma_segment_t *segs, int nsegs); 128 static void hptiop_post_req_mv(struct hpt_iop_hba *hba, 129 struct hpt_iop_srb *srb, 130 bus_dma_segment_t *segs, int nsegs); 131 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg); 132 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg); 133 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba); 134 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba); 135 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba); 136 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba); 137 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb); 138 static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid); 139 static int hptiop_probe(device_t dev); 140 static int hptiop_attach(device_t dev); 141 static int hptiop_detach(device_t dev); 142 static int hptiop_shutdown(device_t dev); 143 static void hptiop_action(struct cam_sim *sim, union ccb *ccb); 144 static void hptiop_poll(struct cam_sim *sim); 145 static void hptiop_async(void *callback_arg, u_int32_t code, 146 struct cam_path *path, void *arg); 147 static void hptiop_pci_intr(void *arg); 148 static void hptiop_release_resource(struct hpt_iop_hba *hba); 149 static int hptiop_reset_adapter(struct hpt_iop_hba *hba); 150 151 static d_open_t hptiop_open; 152 static d_close_t hptiop_close; 153 static d_ioctl_t hptiop_ioctl; 154 155 static struct cdevsw hptiop_cdevsw = { 156 .d_open = hptiop_open, 157 .d_close = hptiop_close, 158 .d_ioctl = hptiop_ioctl, 159 .d_name = driver_name, 160 #if __FreeBSD_version>=503000 161 .d_version = D_VERSION, 162 #endif 163 #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) 164 .d_flags = D_NEEDGIANT, 165 #endif 166 #if __FreeBSD_version<600034 167 #if __FreeBSD_version>=501000 168 .d_maj = MAJOR_AUTO, 169 #else 170 .d_maj = HPT_DEV_MAJOR, 171 #endif 172 #endif 173 }; 174 175 #if __FreeBSD_version < 503000 176 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1) 177 #else 178 #define hba_from_dev(dev) \ 179 ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev))) 180 #endif 181 182 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\ 183 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value)) 184 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\ 185 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset)) 186 187 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\ 188 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value) 189 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\ 190 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset)) 191 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\ 192 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value) 193 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\ 194 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset)) 195 196 static int hptiop_open(ioctl_dev_t dev, int flags, 197 int devtype, ioctl_thread_t proc) 198 { 199 struct hpt_iop_hba *hba = hba_from_dev(dev); 200 201 if (hba==NULL) 202 return ENXIO; 203 if (hba->flag & HPT_IOCTL_FLAG_OPEN) 204 return EBUSY; 205 hba->flag |= HPT_IOCTL_FLAG_OPEN; 206 return 0; 207 } 208 209 static int hptiop_close(ioctl_dev_t dev, int flags, 210 int devtype, ioctl_thread_t proc) 211 { 212 struct hpt_iop_hba *hba = hba_from_dev(dev); 213 hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN; 214 return 0; 215 } 216 217 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, 218 int flags, ioctl_thread_t proc) 219 { 220 int ret = EFAULT; 221 struct hpt_iop_hba *hba = hba_from_dev(dev); 222 223 #if (__FreeBSD_version >= 500000) 224 mtx_lock(&Giant); 225 #endif 226 227 switch (cmd) { 228 case HPT_DO_IOCONTROL: 229 ret = hba->ops->do_ioctl(hba, 230 (struct hpt_iop_ioctl_param *)data); 231 break; 232 case HPT_SCAN_BUS: 233 ret = hptiop_rescan_bus(hba); 234 break; 235 } 236 237 #if (__FreeBSD_version >= 500000) 238 mtx_unlock(&Giant); 239 #endif 240 241 return ret; 242 } 243 244 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba) 245 { 246 u_int64_t p; 247 u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail); 248 u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head); 249 250 if (outbound_tail != outbound_head) { 251 bus_space_read_region_4(hba->bar2t, hba->bar2h, 252 offsetof(struct hpt_iopmu_mv, 253 outbound_q[outbound_tail]), 254 (u_int32_t *)&p, 2); 255 256 outbound_tail++; 257 258 if (outbound_tail == MVIOP_QUEUE_LEN) 259 outbound_tail = 0; 260 261 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail); 262 return p; 263 } else 264 return 0; 265 } 266 267 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba) 268 { 269 u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head); 270 u_int32_t head = inbound_head + 1; 271 272 if (head == MVIOP_QUEUE_LEN) 273 head = 0; 274 275 bus_space_write_region_4(hba->bar2t, hba->bar2h, 276 offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]), 277 (u_int32_t *)&p, 2); 278 BUS_SPACE_WRT4_MV2(inbound_head, head); 279 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE); 280 } 281 282 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg) 283 { 284 BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg); 285 BUS_SPACE_RD4_ITL(outbound_intstatus); 286 } 287 288 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg) 289 { 290 291 BUS_SPACE_WRT4_MV2(inbound_msg, msg); 292 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG); 293 294 BUS_SPACE_RD4_MV0(outbound_intmask); 295 } 296 297 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec) 298 { 299 u_int32_t req=0; 300 int i; 301 302 for (i = 0; i < millisec; i++) { 303 req = BUS_SPACE_RD4_ITL(inbound_queue); 304 if (req != IOPMU_QUEUE_EMPTY) 305 break; 306 DELAY(1000); 307 } 308 309 if (req!=IOPMU_QUEUE_EMPTY) { 310 BUS_SPACE_WRT4_ITL(outbound_queue, req); 311 BUS_SPACE_RD4_ITL(outbound_intstatus); 312 return 0; 313 } 314 315 return -1; 316 } 317 318 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec) 319 { 320 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) 321 return -1; 322 323 return 0; 324 } 325 326 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba, 327 u_int32_t index) 328 { 329 struct hpt_iop_srb *srb; 330 struct hpt_iop_request_scsi_command *req=0; 331 union ccb *ccb; 332 u_int8_t *cdb; 333 u_int32_t result, temp, dxfer; 334 u_int64_t temp64; 335 336 if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/ 337 if (hba->firmware_version > 0x01020000 || 338 hba->interface_version > 0x01020000) { 339 srb = hba->srb[index & ~(u_int32_t) 340 (IOPMU_QUEUE_ADDR_HOST_BIT 341 | IOPMU_QUEUE_REQUEST_RESULT_BIT)]; 342 req = (struct hpt_iop_request_scsi_command *)srb; 343 if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT) 344 result = IOP_RESULT_SUCCESS; 345 else 346 result = req->header.result; 347 } else { 348 srb = hba->srb[index & 349 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT]; 350 req = (struct hpt_iop_request_scsi_command *)srb; 351 result = req->header.result; 352 } 353 dxfer = req->dataxfer_length; 354 goto srb_complete; 355 } 356 357 /*iop req*/ 358 temp = bus_space_read_4(hba->bar0t, hba->bar0h, index + 359 offsetof(struct hpt_iop_request_header, type)); 360 result = bus_space_read_4(hba->bar0t, hba->bar0h, index + 361 offsetof(struct hpt_iop_request_header, result)); 362 switch(temp) { 363 case IOP_REQUEST_TYPE_IOCTL_COMMAND: 364 { 365 temp64 = 0; 366 bus_space_write_region_4(hba->bar0t, hba->bar0h, index + 367 offsetof(struct hpt_iop_request_header, context), 368 (u_int32_t *)&temp64, 2); 369 wakeup((void *)((unsigned long)hba->u.itl.mu + index)); 370 break; 371 } 372 373 case IOP_REQUEST_TYPE_SCSI_COMMAND: 374 bus_space_read_region_4(hba->bar0t, hba->bar0h, index + 375 offsetof(struct hpt_iop_request_header, context), 376 (u_int32_t *)&temp64, 2); 377 srb = (struct hpt_iop_srb *)(unsigned long)temp64; 378 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, 379 index + offsetof(struct hpt_iop_request_scsi_command, 380 dataxfer_length)); 381 srb_complete: 382 ccb = (union ccb *)srb->ccb; 383 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 384 cdb = ccb->csio.cdb_io.cdb_ptr; 385 else 386 cdb = ccb->csio.cdb_io.cdb_bytes; 387 388 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ 389 ccb->ccb_h.status = CAM_REQ_CMP; 390 goto scsi_done; 391 } 392 393 switch (result) { 394 case IOP_RESULT_SUCCESS: 395 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { 396 case CAM_DIR_IN: 397 bus_dmamap_sync(hba->io_dmat, 398 srb->dma_map, BUS_DMASYNC_POSTREAD); 399 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 400 break; 401 case CAM_DIR_OUT: 402 bus_dmamap_sync(hba->io_dmat, 403 srb->dma_map, BUS_DMASYNC_POSTWRITE); 404 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 405 break; 406 } 407 408 ccb->ccb_h.status = CAM_REQ_CMP; 409 break; 410 411 case IOP_RESULT_BAD_TARGET: 412 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 413 break; 414 case IOP_RESULT_BUSY: 415 ccb->ccb_h.status = CAM_BUSY; 416 break; 417 case IOP_RESULT_INVALID_REQUEST: 418 ccb->ccb_h.status = CAM_REQ_INVALID; 419 break; 420 case IOP_RESULT_FAIL: 421 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 422 break; 423 case IOP_RESULT_RESET: 424 ccb->ccb_h.status = CAM_BUSY; 425 break; 426 case IOP_RESULT_CHECK_CONDITION: 427 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/ 428 bus_space_read_region_1(hba->bar0t, hba->bar0h, 429 index + offsetof(struct hpt_iop_request_scsi_command, 430 sg_list), (u_int8_t *)&ccb->csio.sense_data, 431 MIN(dxfer, sizeof(ccb->csio.sense_data))); 432 } else { 433 memcpy(&ccb->csio.sense_data, &req->sg_list, 434 MIN(dxfer, sizeof(ccb->csio.sense_data))); 435 } 436 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 437 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 438 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 439 break; 440 default: 441 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 442 break; 443 } 444 scsi_done: 445 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) 446 BUS_SPACE_WRT4_ITL(outbound_queue, index); 447 448 ccb->csio.resid = ccb->csio.dxfer_len - dxfer; 449 450 hptiop_free_srb(hba, srb); 451 xpt_done(ccb); 452 break; 453 } 454 } 455 456 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba) 457 { 458 u_int32_t req, temp; 459 460 while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) { 461 if (req & IOPMU_QUEUE_MASK_HOST_BITS) 462 hptiop_request_callback_itl(hba, req); 463 else { 464 struct hpt_iop_request_header *p; 465 466 p = (struct hpt_iop_request_header *) 467 ((char *)hba->u.itl.mu + req); 468 temp = bus_space_read_4(hba->bar0t, 469 hba->bar0h,req + 470 offsetof(struct hpt_iop_request_header, 471 flags)); 472 if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) { 473 u_int64_t temp64; 474 bus_space_read_region_4(hba->bar0t, 475 hba->bar0h,req + 476 offsetof(struct hpt_iop_request_header, 477 context), 478 (u_int32_t *)&temp64, 2); 479 if (temp64) { 480 hptiop_request_callback_itl(hba, req); 481 } else { 482 temp64 = 1; 483 bus_space_write_region_4(hba->bar0t, 484 hba->bar0h,req + 485 offsetof(struct hpt_iop_request_header, 486 context), 487 (u_int32_t *)&temp64, 2); 488 } 489 } else 490 hptiop_request_callback_itl(hba, req); 491 } 492 } 493 } 494 495 static int hptiop_intr_itl(struct hpt_iop_hba * hba) 496 { 497 u_int32_t status; 498 int ret = 0; 499 500 status = BUS_SPACE_RD4_ITL(outbound_intstatus); 501 502 if (status & IOPMU_OUTBOUND_INT_MSG0) { 503 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0); 504 KdPrint(("hptiop: received outbound msg %x\n", msg)); 505 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0); 506 hptiop_os_message_callback(hba, msg); 507 ret = 1; 508 } 509 510 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { 511 hptiop_drain_outbound_queue_itl(hba); 512 ret = 1; 513 } 514 515 return ret; 516 } 517 518 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba, 519 u_int64_t _tag) 520 { 521 u_int32_t context = (u_int32_t)_tag; 522 523 if (context & MVIOP_CMD_TYPE_SCSI) { 524 struct hpt_iop_srb *srb; 525 struct hpt_iop_request_scsi_command *req; 526 union ccb *ccb; 527 u_int8_t *cdb; 528 529 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT]; 530 req = (struct hpt_iop_request_scsi_command *)srb; 531 ccb = (union ccb *)srb->ccb; 532 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 533 cdb = ccb->csio.cdb_io.cdb_ptr; 534 else 535 cdb = ccb->csio.cdb_io.cdb_bytes; 536 537 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ 538 ccb->ccb_h.status = CAM_REQ_CMP; 539 goto scsi_done; 540 } 541 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) 542 req->header.result = IOP_RESULT_SUCCESS; 543 544 switch (req->header.result) { 545 case IOP_RESULT_SUCCESS: 546 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { 547 case CAM_DIR_IN: 548 bus_dmamap_sync(hba->io_dmat, 549 srb->dma_map, BUS_DMASYNC_POSTREAD); 550 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 551 break; 552 case CAM_DIR_OUT: 553 bus_dmamap_sync(hba->io_dmat, 554 srb->dma_map, BUS_DMASYNC_POSTWRITE); 555 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 556 break; 557 } 558 ccb->ccb_h.status = CAM_REQ_CMP; 559 break; 560 case IOP_RESULT_BAD_TARGET: 561 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 562 break; 563 case IOP_RESULT_BUSY: 564 ccb->ccb_h.status = CAM_BUSY; 565 break; 566 case IOP_RESULT_INVALID_REQUEST: 567 ccb->ccb_h.status = CAM_REQ_INVALID; 568 break; 569 case IOP_RESULT_FAIL: 570 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 571 break; 572 case IOP_RESULT_RESET: 573 ccb->ccb_h.status = CAM_BUSY; 574 break; 575 case IOP_RESULT_CHECK_CONDITION: 576 memcpy(&ccb->csio.sense_data, &req->sg_list, 577 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); 578 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 579 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 580 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 581 break; 582 default: 583 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 584 break; 585 } 586 scsi_done: 587 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; 588 589 hptiop_free_srb(hba, srb); 590 xpt_done(ccb); 591 } else if (context & MVIOP_CMD_TYPE_IOCTL) { 592 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr; 593 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) 594 hba->config_done = 1; 595 else 596 hba->config_done = -1; 597 wakeup(req); 598 } else if (context & 599 (MVIOP_CMD_TYPE_SET_CONFIG | 600 MVIOP_CMD_TYPE_GET_CONFIG)) 601 hba->config_done = 1; 602 else { 603 device_printf(hba->pcidev, "wrong callback type\n"); 604 } 605 } 606 607 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba) 608 { 609 u_int64_t req; 610 611 while ((req = hptiop_mv_outbound_read(hba))) { 612 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) { 613 if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) { 614 hptiop_request_callback_mv(hba, req); 615 } 616 } 617 } 618 } 619 620 static int hptiop_intr_mv(struct hpt_iop_hba * hba) 621 { 622 u_int32_t status; 623 int ret = 0; 624 625 status = BUS_SPACE_RD4_MV0(outbound_doorbell); 626 627 if (status) 628 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status); 629 630 if (status & MVIOP_MU_OUTBOUND_INT_MSG) { 631 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg); 632 KdPrint(("hptiop: received outbound msg %x\n", msg)); 633 hptiop_os_message_callback(hba, msg); 634 ret = 1; 635 } 636 637 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { 638 hptiop_drain_outbound_queue_mv(hba); 639 ret = 1; 640 } 641 642 return ret; 643 } 644 645 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba, 646 u_int32_t req32, u_int32_t millisec) 647 { 648 u_int32_t i; 649 u_int64_t temp64; 650 651 BUS_SPACE_WRT4_ITL(inbound_queue, req32); 652 BUS_SPACE_RD4_ITL(outbound_intstatus); 653 654 for (i = 0; i < millisec; i++) { 655 hptiop_intr_itl(hba); 656 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + 657 offsetof(struct hpt_iop_request_header, context), 658 (u_int32_t *)&temp64, 2); 659 if (temp64) 660 return 0; 661 DELAY(1000); 662 } 663 664 return -1; 665 } 666 667 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba, 668 void *req, u_int32_t millisec) 669 { 670 u_int32_t i; 671 u_int64_t phy_addr; 672 hba->config_done = 0; 673 674 phy_addr = hba->ctlcfgcmd_phy | 675 (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT; 676 ((struct hpt_iop_request_get_config *)req)->header.flags |= 677 IOP_REQUEST_FLAG_SYNC_REQUEST | 678 IOP_REQUEST_FLAG_OUTPUT_CONTEXT; 679 hptiop_mv_inbound_write(phy_addr, hba); 680 BUS_SPACE_RD4_MV0(outbound_intmask); 681 682 for (i = 0; i < millisec; i++) { 683 hptiop_intr_mv(hba); 684 if (hba->config_done) 685 return 0; 686 DELAY(1000); 687 } 688 return -1; 689 } 690 691 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, 692 u_int32_t msg, u_int32_t millisec) 693 { 694 u_int32_t i; 695 696 hba->msg_done = 0; 697 hba->ops->post_msg(hba, msg); 698 699 for (i=0; i<millisec; i++) { 700 hba->ops->iop_intr(hba); 701 if (hba->msg_done) 702 break; 703 DELAY(1000); 704 } 705 706 return hba->msg_done? 0 : -1; 707 } 708 709 static int hptiop_get_config_itl(struct hpt_iop_hba * hba, 710 struct hpt_iop_request_get_config * config) 711 { 712 u_int32_t req32; 713 714 config->header.size = sizeof(struct hpt_iop_request_get_config); 715 config->header.type = IOP_REQUEST_TYPE_GET_CONFIG; 716 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; 717 config->header.result = IOP_RESULT_PENDING; 718 config->header.context = 0; 719 720 req32 = BUS_SPACE_RD4_ITL(inbound_queue); 721 if (req32 == IOPMU_QUEUE_EMPTY) 722 return -1; 723 724 bus_space_write_region_4(hba->bar0t, hba->bar0h, 725 req32, (u_int32_t *)config, 726 sizeof(struct hpt_iop_request_header) >> 2); 727 728 if (hptiop_send_sync_request_itl(hba, req32, 20000)) { 729 KdPrint(("hptiop: get config send cmd failed")); 730 return -1; 731 } 732 733 bus_space_read_region_4(hba->bar0t, hba->bar0h, 734 req32, (u_int32_t *)config, 735 sizeof(struct hpt_iop_request_get_config) >> 2); 736 737 BUS_SPACE_WRT4_ITL(outbound_queue, req32); 738 739 return 0; 740 } 741 742 static int hptiop_get_config_mv(struct hpt_iop_hba * hba, 743 struct hpt_iop_request_get_config * config) 744 { 745 struct hpt_iop_request_get_config *req; 746 747 if (!(req = hba->ctlcfg_ptr)) 748 return -1; 749 750 req->header.flags = 0; 751 req->header.type = IOP_REQUEST_TYPE_GET_CONFIG; 752 req->header.size = sizeof(struct hpt_iop_request_get_config); 753 req->header.result = IOP_RESULT_PENDING; 754 req->header.context = MVIOP_CMD_TYPE_GET_CONFIG; 755 756 if (hptiop_send_sync_request_mv(hba, req, 20000)) { 757 KdPrint(("hptiop: get config send cmd failed")); 758 return -1; 759 } 760 761 *config = *req; 762 return 0; 763 } 764 765 static int hptiop_set_config_itl(struct hpt_iop_hba *hba, 766 struct hpt_iop_request_set_config *config) 767 { 768 u_int32_t req32; 769 770 req32 = BUS_SPACE_RD4_ITL(inbound_queue); 771 772 if (req32 == IOPMU_QUEUE_EMPTY) 773 return -1; 774 775 config->header.size = sizeof(struct hpt_iop_request_set_config); 776 config->header.type = IOP_REQUEST_TYPE_SET_CONFIG; 777 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; 778 config->header.result = IOP_RESULT_PENDING; 779 config->header.context = 0; 780 781 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, 782 (u_int32_t *)config, 783 sizeof(struct hpt_iop_request_set_config) >> 2); 784 785 if (hptiop_send_sync_request_itl(hba, req32, 20000)) { 786 KdPrint(("hptiop: set config send cmd failed")); 787 return -1; 788 } 789 790 BUS_SPACE_WRT4_ITL(outbound_queue, req32); 791 792 return 0; 793 } 794 795 static int hptiop_set_config_mv(struct hpt_iop_hba *hba, 796 struct hpt_iop_request_set_config *config) 797 { 798 struct hpt_iop_request_set_config *req; 799 800 if (!(req = hba->ctlcfg_ptr)) 801 return -1; 802 803 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), 804 (u_int8_t *)config + sizeof(struct hpt_iop_request_header), 805 sizeof(struct hpt_iop_request_set_config) - 806 sizeof(struct hpt_iop_request_header)); 807 808 req->header.flags = 0; 809 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; 810 req->header.size = sizeof(struct hpt_iop_request_set_config); 811 req->header.result = IOP_RESULT_PENDING; 812 req->header.context = MVIOP_CMD_TYPE_SET_CONFIG; 813 814 if (hptiop_send_sync_request_mv(hba, req, 20000)) { 815 KdPrint(("hptiop: set config send cmd failed")); 816 return -1; 817 } 818 819 return 0; 820 } 821 822 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, 823 u_int32_t req32, 824 struct hpt_iop_ioctl_param *pParams) 825 { 826 u_int64_t temp64; 827 struct hpt_iop_request_ioctl_command req; 828 829 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > 830 (hba->max_request_size - 831 offsetof(struct hpt_iop_request_ioctl_command, buf))) { 832 device_printf(hba->pcidev, "request size beyond max value"); 833 return -1; 834 } 835 836 req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) 837 + pParams->nInBufferSize; 838 req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; 839 req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; 840 req.header.result = IOP_RESULT_PENDING; 841 req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu; 842 req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); 843 req.inbuf_size = pParams->nInBufferSize; 844 req.outbuf_size = pParams->nOutBufferSize; 845 req.bytes_returned = 0; 846 847 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, 848 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2); 849 850 hptiop_lock_adapter(hba); 851 852 BUS_SPACE_WRT4_ITL(inbound_queue, req32); 853 BUS_SPACE_RD4_ITL(outbound_intstatus); 854 855 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + 856 offsetof(struct hpt_iop_request_ioctl_command, header.context), 857 (u_int32_t *)&temp64, 2); 858 while (temp64) { 859 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32), 860 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) 861 break; 862 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); 863 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 + 864 offsetof(struct hpt_iop_request_ioctl_command, 865 header.context), 866 (u_int32_t *)&temp64, 2); 867 } 868 869 hptiop_unlock_adapter(hba); 870 return 0; 871 } 872 873 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) 874 { 875 unsigned char byte; 876 int i; 877 878 for (i=0; i<size; i++) { 879 if (copyin((u_int8_t *)user + i, &byte, 1)) 880 return -1; 881 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte); 882 } 883 884 return 0; 885 } 886 887 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) 888 { 889 unsigned char byte; 890 int i; 891 892 for (i=0; i<size; i++) { 893 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i); 894 if (copyout(&byte, (u_int8_t *)user + i, 1)) 895 return -1; 896 } 897 898 return 0; 899 } 900 901 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, 902 struct hpt_iop_ioctl_param * pParams) 903 { 904 u_int32_t req32; 905 u_int32_t result; 906 907 if ((pParams->Magic != HPT_IOCTL_MAGIC) && 908 (pParams->Magic != HPT_IOCTL_MAGIC32)) 909 return EFAULT; 910 911 req32 = BUS_SPACE_RD4_ITL(inbound_queue); 912 if (req32 == IOPMU_QUEUE_EMPTY) 913 return EFAULT; 914 915 if (pParams->nInBufferSize) 916 if (hptiop_bus_space_copyin(hba, req32 + 917 offsetof(struct hpt_iop_request_ioctl_command, buf), 918 (void *)pParams->lpInBuffer, pParams->nInBufferSize)) 919 goto invalid; 920 921 if (hptiop_post_ioctl_command_itl(hba, req32, pParams)) 922 goto invalid; 923 924 result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 + 925 offsetof(struct hpt_iop_request_ioctl_command, 926 header.result)); 927 928 if (result == IOP_RESULT_SUCCESS) { 929 if (pParams->nOutBufferSize) 930 if (hptiop_bus_space_copyout(hba, req32 + 931 offsetof(struct hpt_iop_request_ioctl_command, buf) + 932 ((pParams->nInBufferSize + 3) & ~3), 933 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) 934 goto invalid; 935 936 if (pParams->lpBytesReturned) { 937 if (hptiop_bus_space_copyout(hba, req32 + 938 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned), 939 (void *)pParams->lpBytesReturned, sizeof(unsigned long))) 940 goto invalid; 941 } 942 943 BUS_SPACE_WRT4_ITL(outbound_queue, req32); 944 945 return 0; 946 } else{ 947 invalid: 948 BUS_SPACE_WRT4_ITL(outbound_queue, req32); 949 950 return EFAULT; 951 } 952 } 953 954 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, 955 struct hpt_iop_request_ioctl_command *req, 956 struct hpt_iop_ioctl_param *pParams) 957 { 958 u_int64_t req_phy; 959 int size = 0; 960 961 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > 962 (hba->max_request_size - 963 offsetof(struct hpt_iop_request_ioctl_command, buf))) { 964 device_printf(hba->pcidev, "request size beyond max value"); 965 return -1; 966 } 967 968 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); 969 req->inbuf_size = pParams->nInBufferSize; 970 req->outbuf_size = pParams->nOutBufferSize; 971 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) 972 + pParams->nInBufferSize; 973 req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL; 974 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; 975 req->header.result = IOP_RESULT_PENDING; 976 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; 977 size = req->header.size >> 8; 978 size = size > 3 ? 3 : size; 979 req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size; 980 hptiop_mv_inbound_write(req_phy, hba); 981 982 BUS_SPACE_RD4_MV0(outbound_intmask); 983 984 while (hba->config_done == 0) { 985 if (hptiop_sleep(hba, req, PPAUSE, 986 "hptctl", HPT_OSM_TIMEOUT)==0) 987 continue; 988 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); 989 } 990 return 0; 991 } 992 993 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, 994 struct hpt_iop_ioctl_param *pParams) 995 { 996 struct hpt_iop_request_ioctl_command *req; 997 998 if ((pParams->Magic != HPT_IOCTL_MAGIC) && 999 (pParams->Magic != HPT_IOCTL_MAGIC32)) 1000 return EFAULT; 1001 1002 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); 1003 hba->config_done = 0; 1004 hptiop_lock_adapter(hba); 1005 if (pParams->nInBufferSize) 1006 if (copyin((void *)pParams->lpInBuffer, 1007 req->buf, pParams->nInBufferSize)) 1008 goto invalid; 1009 if (hptiop_post_ioctl_command_mv(hba, req, pParams)) 1010 goto invalid; 1011 1012 if (hba->config_done == 1) { 1013 if (pParams->nOutBufferSize) 1014 if (copyout(req->buf + 1015 ((pParams->nInBufferSize + 3) & ~3), 1016 (void *)pParams->lpOutBuffer, 1017 pParams->nOutBufferSize)) 1018 goto invalid; 1019 1020 if (pParams->lpBytesReturned) 1021 if (copyout(&req->bytes_returned, 1022 (void*)pParams->lpBytesReturned, 1023 sizeof(u_int32_t))) 1024 goto invalid; 1025 hptiop_unlock_adapter(hba); 1026 return 0; 1027 } else{ 1028 invalid: 1029 hptiop_unlock_adapter(hba); 1030 return EFAULT; 1031 } 1032 } 1033 1034 static int hptiop_rescan_bus(struct hpt_iop_hba * hba) 1035 { 1036 union ccb *ccb; 1037 1038 if ((ccb = xpt_alloc_ccb()) == NULL) 1039 return(ENOMEM); 1040 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim), 1041 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1042 xpt_free_ccb(ccb); 1043 return(EIO); 1044 } 1045 xpt_rescan(ccb); 1046 return(0); 1047 } 1048 1049 static bus_dmamap_callback_t hptiop_map_srb; 1050 static bus_dmamap_callback_t hptiop_post_scsi_command; 1051 static bus_dmamap_callback_t hptiop_mv_map_ctlcfg; 1052 1053 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba) 1054 { 1055 hba->bar0_rid = 0x10; 1056 hba->bar0_res = bus_alloc_resource_any(hba->pcidev, 1057 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); 1058 1059 if (hba->bar0_res == NULL) { 1060 device_printf(hba->pcidev, 1061 "failed to get iop base adrress.\n"); 1062 return -1; 1063 } 1064 hba->bar0t = rman_get_bustag(hba->bar0_res); 1065 hba->bar0h = rman_get_bushandle(hba->bar0_res); 1066 hba->u.itl.mu = (struct hpt_iopmu_itl *) 1067 rman_get_virtual(hba->bar0_res); 1068 1069 if (!hba->u.itl.mu) { 1070 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1071 hba->bar0_rid, hba->bar0_res); 1072 device_printf(hba->pcidev, "alloc mem res failed\n"); 1073 return -1; 1074 } 1075 1076 return 0; 1077 } 1078 1079 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba) 1080 { 1081 hba->bar0_rid = 0x10; 1082 hba->bar0_res = bus_alloc_resource_any(hba->pcidev, 1083 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); 1084 1085 if (hba->bar0_res == NULL) { 1086 device_printf(hba->pcidev, "failed to get iop bar0.\n"); 1087 return -1; 1088 } 1089 hba->bar0t = rman_get_bustag(hba->bar0_res); 1090 hba->bar0h = rman_get_bushandle(hba->bar0_res); 1091 hba->u.mv.regs = (struct hpt_iopmv_regs *) 1092 rman_get_virtual(hba->bar0_res); 1093 1094 if (!hba->u.mv.regs) { 1095 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1096 hba->bar0_rid, hba->bar0_res); 1097 device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); 1098 return -1; 1099 } 1100 1101 hba->bar2_rid = 0x18; 1102 hba->bar2_res = bus_alloc_resource_any(hba->pcidev, 1103 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); 1104 1105 if (hba->bar2_res == NULL) { 1106 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1107 hba->bar0_rid, hba->bar0_res); 1108 device_printf(hba->pcidev, "failed to get iop bar2.\n"); 1109 return -1; 1110 } 1111 1112 hba->bar2t = rman_get_bustag(hba->bar2_res); 1113 hba->bar2h = rman_get_bushandle(hba->bar2_res); 1114 hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res); 1115 1116 if (!hba->u.mv.mu) { 1117 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1118 hba->bar0_rid, hba->bar0_res); 1119 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1120 hba->bar2_rid, hba->bar2_res); 1121 device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); 1122 return -1; 1123 } 1124 1125 return 0; 1126 } 1127 1128 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba) 1129 { 1130 if (hba->bar0_res) 1131 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1132 hba->bar0_rid, hba->bar0_res); 1133 } 1134 1135 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba) 1136 { 1137 if (hba->bar0_res) 1138 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1139 hba->bar0_rid, hba->bar0_res); 1140 if (hba->bar2_res) 1141 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1142 hba->bar2_rid, hba->bar2_res); 1143 } 1144 1145 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba) 1146 { 1147 if (bus_dma_tag_create(hba->parent_dmat, 1148 1, 1149 0, 1150 BUS_SPACE_MAXADDR_32BIT, 1151 BUS_SPACE_MAXADDR, 1152 NULL, NULL, 1153 0x800 - 0x8, 1154 1, 1155 BUS_SPACE_MAXSIZE_32BIT, 1156 BUS_DMA_ALLOCNOW, 1157 #if __FreeBSD_version > 502000 1158 NULL, 1159 NULL, 1160 #endif 1161 &hba->ctlcfg_dmat)) { 1162 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); 1163 return -1; 1164 } 1165 1166 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, 1167 #if __FreeBSD_version>501000 1168 BUS_DMA_WAITOK | BUS_DMA_COHERENT, 1169 #else 1170 BUS_DMA_WAITOK, 1171 #endif 1172 &hba->ctlcfg_dmamap) != 0) { 1173 device_printf(hba->pcidev, 1174 "bus_dmamem_alloc failed!\n"); 1175 bus_dma_tag_destroy(hba->ctlcfg_dmat); 1176 return -1; 1177 } 1178 1179 if (bus_dmamap_load(hba->ctlcfg_dmat, 1180 hba->ctlcfg_dmamap, hba->ctlcfg_ptr, 1181 MVIOP_IOCTLCFG_SIZE, 1182 hptiop_mv_map_ctlcfg, hba, 0)) { 1183 device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); 1184 if (hba->ctlcfg_dmat) 1185 bus_dmamem_free(hba->ctlcfg_dmat, 1186 hba->ctlcfg_ptr, hba->ctlcfg_dmamap); 1187 bus_dma_tag_destroy(hba->ctlcfg_dmat); 1188 return -1; 1189 } 1190 1191 return 0; 1192 } 1193 1194 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba) 1195 { 1196 if (hba->ctlcfg_dmat) { 1197 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); 1198 bus_dmamem_free(hba->ctlcfg_dmat, 1199 hba->ctlcfg_ptr, hba->ctlcfg_dmamap); 1200 bus_dma_tag_destroy(hba->ctlcfg_dmat); 1201 } 1202 1203 return 0; 1204 } 1205 1206 /* 1207 * CAM driver interface 1208 */ 1209 static device_method_t driver_methods[] = { 1210 /* Device interface */ 1211 DEVMETHOD(device_probe, hptiop_probe), 1212 DEVMETHOD(device_attach, hptiop_attach), 1213 DEVMETHOD(device_detach, hptiop_detach), 1214 DEVMETHOD(device_shutdown, hptiop_shutdown), 1215 { 0, 0 } 1216 }; 1217 1218 static struct hptiop_adapter_ops hptiop_itl_ops = { 1219 .iop_wait_ready = hptiop_wait_ready_itl, 1220 .internal_memalloc = 0, 1221 .internal_memfree = 0, 1222 .alloc_pci_res = hptiop_alloc_pci_res_itl, 1223 .release_pci_res = hptiop_release_pci_res_itl, 1224 .enable_intr = hptiop_enable_intr_itl, 1225 .disable_intr = hptiop_disable_intr_itl, 1226 .get_config = hptiop_get_config_itl, 1227 .set_config = hptiop_set_config_itl, 1228 .iop_intr = hptiop_intr_itl, 1229 .post_msg = hptiop_post_msg_itl, 1230 .post_req = hptiop_post_req_itl, 1231 .do_ioctl = hptiop_do_ioctl_itl, 1232 }; 1233 1234 static struct hptiop_adapter_ops hptiop_mv_ops = { 1235 .iop_wait_ready = hptiop_wait_ready_mv, 1236 .internal_memalloc = hptiop_internal_memalloc_mv, 1237 .internal_memfree = hptiop_internal_memfree_mv, 1238 .alloc_pci_res = hptiop_alloc_pci_res_mv, 1239 .release_pci_res = hptiop_release_pci_res_mv, 1240 .enable_intr = hptiop_enable_intr_mv, 1241 .disable_intr = hptiop_disable_intr_mv, 1242 .get_config = hptiop_get_config_mv, 1243 .set_config = hptiop_set_config_mv, 1244 .iop_intr = hptiop_intr_mv, 1245 .post_msg = hptiop_post_msg_mv, 1246 .post_req = hptiop_post_req_mv, 1247 .do_ioctl = hptiop_do_ioctl_mv, 1248 }; 1249 1250 static driver_t hptiop_pci_driver = { 1251 driver_name, 1252 driver_methods, 1253 sizeof(struct hpt_iop_hba) 1254 }; 1255 1256 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0); 1257 1258 static int hptiop_probe(device_t dev) 1259 { 1260 struct hpt_iop_hba *hba; 1261 u_int32_t id; 1262 static char buf[256]; 1263 int sas = 0; 1264 struct hptiop_adapter_ops *ops; 1265 1266 if (pci_get_vendor(dev) != 0x1103) 1267 return (ENXIO); 1268 1269 id = pci_get_device(dev); 1270 1271 switch (id) { 1272 case 0x4320: 1273 sas = 1; 1274 case 0x3220: 1275 case 0x3320: 1276 case 0x3410: 1277 case 0x3520: 1278 case 0x3510: 1279 case 0x3511: 1280 case 0x3521: 1281 case 0x3522: 1282 case 0x3540: 1283 ops = &hptiop_itl_ops; 1284 break; 1285 case 0x3120: 1286 case 0x3122: 1287 case 0x3020: 1288 ops = &hptiop_mv_ops; 1289 break; 1290 default: 1291 return (ENXIO); 1292 } 1293 1294 device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n", 1295 pci_get_bus(dev), pci_get_slot(dev), 1296 pci_get_function(dev), pci_get_irq(dev)); 1297 1298 sprintf(buf, "RocketRAID %x %s Controller\n", 1299 id, sas ? "SAS" : "SATA"); 1300 device_set_desc_copy(dev, buf); 1301 1302 hba = (struct hpt_iop_hba *)device_get_softc(dev); 1303 bzero(hba, sizeof(struct hpt_iop_hba)); 1304 hba->ops = ops; 1305 1306 KdPrint(("hba->ops=%p\n", hba->ops)); 1307 return 0; 1308 } 1309 1310 static int hptiop_attach(device_t dev) 1311 { 1312 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev); 1313 struct hpt_iop_request_get_config iop_config; 1314 struct hpt_iop_request_set_config set_config; 1315 int rid = 0; 1316 struct cam_devq *devq; 1317 struct ccb_setasync ccb; 1318 u_int32_t unit = device_get_unit(dev); 1319 1320 device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n", 1321 unit, driver_version); 1322 1323 KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit, 1324 pci_get_bus(dev), pci_get_slot(dev), 1325 pci_get_function(dev), hba->ops)); 1326 1327 #if __FreeBSD_version >=440000 1328 pci_enable_busmaster(dev); 1329 #endif 1330 hba->pcidev = dev; 1331 hba->pciunit = unit; 1332 1333 if (hba->ops->alloc_pci_res(hba)) 1334 return ENXIO; 1335 1336 if (hba->ops->iop_wait_ready(hba, 2000)) { 1337 device_printf(dev, "adapter is not ready\n"); 1338 goto release_pci_res; 1339 } 1340 1341 #if (__FreeBSD_version >= 500000) 1342 mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF); 1343 #endif 1344 1345 if (bus_dma_tag_create(NULL,/* parent */ 1346 1, /* alignment */ 1347 0, /* boundary */ 1348 BUS_SPACE_MAXADDR, /* lowaddr */ 1349 BUS_SPACE_MAXADDR, /* highaddr */ 1350 NULL, NULL, /* filter, filterarg */ 1351 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1352 BUS_SPACE_UNRESTRICTED, /* nsegments */ 1353 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1354 0, /* flags */ 1355 #if __FreeBSD_version>502000 1356 NULL, /* lockfunc */ 1357 NULL, /* lockfuncarg */ 1358 #endif 1359 &hba->parent_dmat /* tag */)) 1360 { 1361 device_printf(dev, "alloc parent_dmat failed\n"); 1362 goto release_pci_res; 1363 } 1364 1365 if (hba->ops->internal_memalloc) { 1366 if (hba->ops->internal_memalloc(hba)) { 1367 device_printf(dev, "alloc srb_dmat failed\n"); 1368 goto destroy_parent_tag; 1369 } 1370 } 1371 1372 if (hba->ops->get_config(hba, &iop_config)) { 1373 device_printf(dev, "get iop config failed.\n"); 1374 goto get_config_failed; 1375 } 1376 1377 hba->firmware_version = iop_config.firmware_version; 1378 hba->interface_version = iop_config.interface_version; 1379 hba->max_requests = iop_config.max_requests; 1380 hba->max_devices = iop_config.max_devices; 1381 hba->max_request_size = iop_config.request_size; 1382 hba->max_sg_count = iop_config.max_sg_count; 1383 1384 if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 1385 4, /* alignment */ 1386 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ 1387 BUS_SPACE_MAXADDR, /* lowaddr */ 1388 BUS_SPACE_MAXADDR, /* highaddr */ 1389 NULL, NULL, /* filter, filterarg */ 1390 PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */ 1391 hba->max_sg_count, /* nsegments */ 1392 0x20000, /* maxsegsize */ 1393 BUS_DMA_ALLOCNOW, /* flags */ 1394 #if __FreeBSD_version>502000 1395 busdma_lock_mutex, /* lockfunc */ 1396 &hba->lock, /* lockfuncarg */ 1397 #endif 1398 &hba->io_dmat /* tag */)) 1399 { 1400 device_printf(dev, "alloc io_dmat failed\n"); 1401 goto get_config_failed; 1402 } 1403 1404 if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 1405 1, /* alignment */ 1406 0, /* boundary */ 1407 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1408 BUS_SPACE_MAXADDR, /* highaddr */ 1409 NULL, NULL, /* filter, filterarg */ 1410 HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20, 1411 1, /* nsegments */ 1412 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1413 0, /* flags */ 1414 #if __FreeBSD_version>502000 1415 NULL, /* lockfunc */ 1416 NULL, /* lockfuncarg */ 1417 #endif 1418 &hba->srb_dmat /* tag */)) 1419 { 1420 device_printf(dev, "alloc srb_dmat failed\n"); 1421 goto destroy_io_dmat; 1422 } 1423 1424 if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr, 1425 #if __FreeBSD_version>501000 1426 BUS_DMA_WAITOK | BUS_DMA_COHERENT, 1427 #else 1428 BUS_DMA_WAITOK, 1429 #endif 1430 &hba->srb_dmamap) != 0) 1431 { 1432 device_printf(dev, "srb bus_dmamem_alloc failed!\n"); 1433 goto destroy_srb_dmat; 1434 } 1435 1436 if (bus_dmamap_load(hba->srb_dmat, 1437 hba->srb_dmamap, hba->uncached_ptr, 1438 (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20, 1439 hptiop_map_srb, hba, 0)) 1440 { 1441 device_printf(dev, "bus_dmamap_load failed!\n"); 1442 goto srb_dmamem_free; 1443 } 1444 1445 if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) { 1446 device_printf(dev, "cam_simq_alloc failed\n"); 1447 goto srb_dmamap_unload; 1448 } 1449 1450 #if __FreeBSD_version <700000 1451 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, 1452 hba, unit, hba->max_requests - 1, 1, devq); 1453 #else 1454 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, 1455 hba, unit, &Giant, hba->max_requests - 1, 1, devq); 1456 #endif 1457 if (!hba->sim) { 1458 device_printf(dev, "cam_sim_alloc failed\n"); 1459 cam_simq_free(devq); 1460 goto srb_dmamap_unload; 1461 } 1462 #if __FreeBSD_version <700000 1463 if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS) 1464 #else 1465 if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS) 1466 #endif 1467 { 1468 device_printf(dev, "xpt_bus_register failed\n"); 1469 goto free_cam_sim; 1470 } 1471 1472 if (xpt_create_path(&hba->path, /*periph */ NULL, 1473 cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, 1474 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1475 device_printf(dev, "xpt_create_path failed\n"); 1476 goto deregister_xpt_bus; 1477 } 1478 1479 bzero(&set_config, sizeof(set_config)); 1480 set_config.iop_id = unit; 1481 set_config.vbus_id = cam_sim_path(hba->sim); 1482 set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE; 1483 1484 if (hba->ops->set_config(hba, &set_config)) { 1485 device_printf(dev, "set iop config failed.\n"); 1486 goto free_hba_path; 1487 } 1488 1489 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); 1490 ccb.ccb_h.func_code = XPT_SASYNC_CB; 1491 ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE); 1492 ccb.callback = hptiop_async; 1493 ccb.callback_arg = hba->sim; 1494 xpt_action((union ccb *)&ccb); 1495 1496 rid = 0; 1497 if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, 1498 &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { 1499 device_printf(dev, "allocate irq failed!\n"); 1500 goto free_hba_path; 1501 } 1502 1503 #if __FreeBSD_version <700000 1504 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, 1505 hptiop_pci_intr, hba, &hba->irq_handle)) 1506 #else 1507 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, 1508 NULL, hptiop_pci_intr, hba, &hba->irq_handle)) 1509 #endif 1510 { 1511 device_printf(dev, "allocate intr function failed!\n"); 1512 goto free_irq_resource; 1513 } 1514 1515 if (hptiop_send_sync_msg(hba, 1516 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { 1517 device_printf(dev, "fail to start background task\n"); 1518 goto teartown_irq_resource; 1519 } 1520 1521 hba->ops->enable_intr(hba); 1522 1523 hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit, 1524 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/, 1525 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit); 1526 1527 #if __FreeBSD_version < 503000 1528 hba->ioctl_dev->si_drv1 = hba; 1529 #endif 1530 1531 return 0; 1532 1533 1534 teartown_irq_resource: 1535 bus_teardown_intr(dev, hba->irq_res, hba->irq_handle); 1536 1537 free_irq_resource: 1538 bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res); 1539 1540 free_hba_path: 1541 xpt_free_path(hba->path); 1542 1543 deregister_xpt_bus: 1544 xpt_bus_deregister(cam_sim_path(hba->sim)); 1545 1546 free_cam_sim: 1547 cam_sim_free(hba->sim, /*free devq*/ TRUE); 1548 1549 srb_dmamap_unload: 1550 if (hba->uncached_ptr) 1551 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); 1552 1553 srb_dmamem_free: 1554 if (hba->uncached_ptr) 1555 bus_dmamem_free(hba->srb_dmat, 1556 hba->uncached_ptr, hba->srb_dmamap); 1557 1558 destroy_srb_dmat: 1559 if (hba->srb_dmat) 1560 bus_dma_tag_destroy(hba->srb_dmat); 1561 1562 destroy_io_dmat: 1563 if (hba->io_dmat) 1564 bus_dma_tag_destroy(hba->io_dmat); 1565 1566 get_config_failed: 1567 if (hba->ops->internal_memfree) 1568 hba->ops->internal_memfree(hba); 1569 1570 destroy_parent_tag: 1571 if (hba->parent_dmat) 1572 bus_dma_tag_destroy(hba->parent_dmat); 1573 1574 release_pci_res: 1575 if (hba->ops->release_pci_res) 1576 hba->ops->release_pci_res(hba); 1577 1578 return ENXIO; 1579 } 1580 1581 static int hptiop_detach(device_t dev) 1582 { 1583 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); 1584 int i; 1585 int error = EBUSY; 1586 1587 hptiop_lock_adapter(hba); 1588 for (i = 0; i < hba->max_devices; i++) 1589 if (hptiop_os_query_remove_device(hba, i)) { 1590 device_printf(dev, "%d file system is busy. id=%d", 1591 hba->pciunit, i); 1592 goto out; 1593 } 1594 1595 if ((error = hptiop_shutdown(dev)) != 0) 1596 goto out; 1597 if (hptiop_send_sync_msg(hba, 1598 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000)) 1599 goto out; 1600 1601 hptiop_release_resource(hba); 1602 error = 0; 1603 out: 1604 hptiop_unlock_adapter(hba); 1605 return error; 1606 } 1607 1608 static int hptiop_shutdown(device_t dev) 1609 { 1610 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); 1611 1612 int error = 0; 1613 1614 if (hba->flag & HPT_IOCTL_FLAG_OPEN) { 1615 device_printf(dev, "%d device is busy", hba->pciunit); 1616 return EBUSY; 1617 } 1618 1619 hba->ops->disable_intr(hba); 1620 1621 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) 1622 error = EBUSY; 1623 1624 return error; 1625 } 1626 1627 static void hptiop_pci_intr(void *arg) 1628 { 1629 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; 1630 hptiop_lock_adapter(hba); 1631 hba->ops->iop_intr(hba); 1632 hptiop_unlock_adapter(hba); 1633 } 1634 1635 static void hptiop_poll(struct cam_sim *sim) 1636 { 1637 hptiop_pci_intr(cam_sim_softc(sim)); 1638 } 1639 1640 static void hptiop_async(void * callback_arg, u_int32_t code, 1641 struct cam_path * path, void * arg) 1642 { 1643 } 1644 1645 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba) 1646 { 1647 BUS_SPACE_WRT4_ITL(outbound_intmask, 1648 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0)); 1649 } 1650 1651 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba) 1652 { 1653 u_int32_t int_mask; 1654 1655 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); 1656 1657 int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE 1658 | MVIOP_MU_OUTBOUND_INT_MSG; 1659 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); 1660 } 1661 1662 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba) 1663 { 1664 u_int32_t int_mask; 1665 1666 int_mask = BUS_SPACE_RD4_ITL(outbound_intmask); 1667 1668 int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0; 1669 BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask); 1670 BUS_SPACE_RD4_ITL(outbound_intstatus); 1671 } 1672 1673 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba) 1674 { 1675 u_int32_t int_mask; 1676 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); 1677 1678 int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG 1679 | MVIOP_MU_OUTBOUND_INT_POSTQUEUE); 1680 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); 1681 BUS_SPACE_RD4_MV0(outbound_intmask); 1682 } 1683 1684 static int hptiop_reset_adapter(struct hpt_iop_hba * hba) 1685 { 1686 return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); 1687 } 1688 1689 static void *hptiop_get_srb(struct hpt_iop_hba * hba) 1690 { 1691 struct hpt_iop_srb * srb; 1692 1693 if (hba->srb_list) { 1694 srb = hba->srb_list; 1695 hba->srb_list = srb->next; 1696 return srb; 1697 } 1698 1699 return NULL; 1700 } 1701 1702 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb) 1703 { 1704 srb->next = hba->srb_list; 1705 hba->srb_list = srb; 1706 } 1707 1708 static void hptiop_action(struct cam_sim *sim, union ccb *ccb) 1709 { 1710 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim); 1711 struct hpt_iop_srb * srb; 1712 1713 switch (ccb->ccb_h.func_code) { 1714 1715 case XPT_SCSI_IO: 1716 hptiop_lock_adapter(hba); 1717 if (ccb->ccb_h.target_lun != 0 || 1718 ccb->ccb_h.target_id >= hba->max_devices || 1719 (ccb->ccb_h.flags & CAM_CDB_PHYS)) 1720 { 1721 ccb->ccb_h.status = CAM_TID_INVALID; 1722 xpt_done(ccb); 1723 goto scsi_done; 1724 } 1725 1726 if ((srb = hptiop_get_srb(hba)) == NULL) { 1727 device_printf(hba->pcidev, "srb allocated failed"); 1728 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1729 xpt_done(ccb); 1730 goto scsi_done; 1731 } 1732 1733 srb->ccb = ccb; 1734 1735 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 1736 hptiop_post_scsi_command(srb, NULL, 0, 0); 1737 else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1738 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1739 int error; 1740 1741 error = bus_dmamap_load(hba->io_dmat, 1742 srb->dma_map, 1743 ccb->csio.data_ptr, 1744 ccb->csio.dxfer_len, 1745 hptiop_post_scsi_command, 1746 srb, 0); 1747 1748 if (error && error != EINPROGRESS) { 1749 device_printf(hba->pcidev, 1750 "%d bus_dmamap_load error %d", 1751 hba->pciunit, error); 1752 xpt_freeze_simq(hba->sim, 1); 1753 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1754 invalid: 1755 hptiop_free_srb(hba, srb); 1756 xpt_done(ccb); 1757 goto scsi_done; 1758 } 1759 } 1760 else { 1761 device_printf(hba->pcidev, 1762 "CAM_DATA_PHYS not supported"); 1763 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1764 goto invalid; 1765 } 1766 } 1767 else { 1768 struct bus_dma_segment *segs; 1769 1770 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 || 1771 (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1772 device_printf(hba->pcidev, "SCSI cmd failed"); 1773 ccb->ccb_h.status=CAM_PROVIDE_FAIL; 1774 goto invalid; 1775 } 1776 1777 segs = (struct bus_dma_segment *)ccb->csio.data_ptr; 1778 hptiop_post_scsi_command(srb, segs, 1779 ccb->csio.sglist_cnt, 0); 1780 } 1781 1782 scsi_done: 1783 hptiop_unlock_adapter(hba); 1784 return; 1785 1786 case XPT_RESET_BUS: 1787 device_printf(hba->pcidev, "reset adapter"); 1788 hptiop_lock_adapter(hba); 1789 hba->msg_done = 0; 1790 hptiop_reset_adapter(hba); 1791 hptiop_unlock_adapter(hba); 1792 break; 1793 1794 case XPT_GET_TRAN_SETTINGS: 1795 case XPT_SET_TRAN_SETTINGS: 1796 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1797 break; 1798 1799 case XPT_CALC_GEOMETRY: 1800 ccb->ccg.heads = 255; 1801 ccb->ccg.secs_per_track = 63; 1802 ccb->ccg.cylinders = ccb->ccg.volume_size / 1803 (ccb->ccg.heads * ccb->ccg.secs_per_track); 1804 ccb->ccb_h.status = CAM_REQ_CMP; 1805 break; 1806 1807 case XPT_PATH_INQ: 1808 { 1809 struct ccb_pathinq *cpi = &ccb->cpi; 1810 1811 cpi->version_num = 1; 1812 cpi->hba_inquiry = PI_SDTR_ABLE; 1813 cpi->target_sprt = 0; 1814 cpi->hba_misc = PIM_NOBUSRESET; 1815 cpi->hba_eng_cnt = 0; 1816 cpi->max_target = hba->max_devices; 1817 cpi->max_lun = 0; 1818 cpi->unit_number = cam_sim_unit(sim); 1819 cpi->bus_id = cam_sim_bus(sim); 1820 cpi->initiator_id = hba->max_devices; 1821 cpi->base_transfer_speed = 3300; 1822 1823 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1824 strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); 1825 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1826 cpi->transport = XPORT_SPI; 1827 cpi->transport_version = 2; 1828 cpi->protocol = PROTO_SCSI; 1829 cpi->protocol_version = SCSI_REV_2; 1830 cpi->ccb_h.status = CAM_REQ_CMP; 1831 break; 1832 } 1833 1834 default: 1835 ccb->ccb_h.status = CAM_REQ_INVALID; 1836 break; 1837 } 1838 1839 xpt_done(ccb); 1840 return; 1841 } 1842 1843 static void hptiop_post_req_itl(struct hpt_iop_hba *hba, 1844 struct hpt_iop_srb *srb, 1845 bus_dma_segment_t *segs, int nsegs) 1846 { 1847 int idx; 1848 union ccb *ccb = srb->ccb; 1849 u_int8_t *cdb; 1850 1851 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 1852 cdb = ccb->csio.cdb_io.cdb_ptr; 1853 else 1854 cdb = ccb->csio.cdb_io.cdb_bytes; 1855 1856 KdPrint(("ccb=%p %x-%x-%x\n", 1857 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2))); 1858 1859 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) { 1860 u_int32_t iop_req32; 1861 struct hpt_iop_request_scsi_command req; 1862 1863 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue); 1864 1865 if (iop_req32 == IOPMU_QUEUE_EMPTY) { 1866 device_printf(hba->pcidev, "invaild req offset\n"); 1867 ccb->ccb_h.status = CAM_BUSY; 1868 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 1869 hptiop_free_srb(hba, srb); 1870 xpt_done(ccb); 1871 return; 1872 } 1873 1874 if (ccb->csio.dxfer_len && nsegs > 0) { 1875 struct hpt_iopsg *psg = req.sg_list; 1876 for (idx = 0; idx < nsegs; idx++, psg++) { 1877 psg->pci_address = (u_int64_t)segs[idx].ds_addr; 1878 psg->size = segs[idx].ds_len; 1879 psg->eot = 0; 1880 } 1881 psg[-1].eot = 1; 1882 } 1883 1884 bcopy(cdb, req.cdb, ccb->csio.cdb_len); 1885 1886 req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) 1887 + nsegs*sizeof(struct hpt_iopsg); 1888 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; 1889 req.header.flags = 0; 1890 req.header.result = IOP_RESULT_PENDING; 1891 req.header.context = (u_int64_t)(unsigned long)srb; 1892 req.dataxfer_length = ccb->csio.dxfer_len; 1893 req.channel = 0; 1894 req.target = ccb->ccb_h.target_id; 1895 req.lun = ccb->ccb_h.target_lun; 1896 1897 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32, 1898 (u_int8_t *)&req, req.header.size); 1899 1900 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1901 bus_dmamap_sync(hba->io_dmat, 1902 srb->dma_map, BUS_DMASYNC_PREREAD); 1903 } 1904 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1905 bus_dmamap_sync(hba->io_dmat, 1906 srb->dma_map, BUS_DMASYNC_PREWRITE); 1907 1908 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32); 1909 } else { 1910 struct hpt_iop_request_scsi_command *req; 1911 1912 req = (struct hpt_iop_request_scsi_command *)srb; 1913 if (ccb->csio.dxfer_len && nsegs > 0) { 1914 struct hpt_iopsg *psg = req->sg_list; 1915 for (idx = 0; idx < nsegs; idx++, psg++) { 1916 psg->pci_address = 1917 (u_int64_t)segs[idx].ds_addr; 1918 psg->size = segs[idx].ds_len; 1919 psg->eot = 0; 1920 } 1921 psg[-1].eot = 1; 1922 } 1923 1924 bcopy(cdb, req->cdb, ccb->csio.cdb_len); 1925 1926 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; 1927 req->header.result = IOP_RESULT_PENDING; 1928 req->dataxfer_length = ccb->csio.dxfer_len; 1929 req->channel = 0; 1930 req->target = ccb->ccb_h.target_id; 1931 req->lun = ccb->ccb_h.target_lun; 1932 req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) 1933 + nsegs*sizeof(struct hpt_iopsg); 1934 req->header.context = (u_int64_t)srb->index | 1935 IOPMU_QUEUE_ADDR_HOST_BIT; 1936 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; 1937 1938 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1939 bus_dmamap_sync(hba->io_dmat, 1940 srb->dma_map, BUS_DMASYNC_PREREAD); 1941 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1942 bus_dmamap_sync(hba->io_dmat, 1943 srb->dma_map, BUS_DMASYNC_PREWRITE); 1944 } 1945 1946 if (hba->firmware_version > 0x01020000 1947 || hba->interface_version > 0x01020000) { 1948 u_int32_t size_bits; 1949 1950 if (req->header.size < 256) 1951 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; 1952 else if (req->header.size < 512) 1953 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; 1954 else 1955 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT 1956 | IOPMU_QUEUE_ADDR_HOST_BIT; 1957 1958 BUS_SPACE_WRT4_ITL(inbound_queue, 1959 (u_int32_t)srb->phy_addr | size_bits); 1960 } else 1961 BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr 1962 |IOPMU_QUEUE_ADDR_HOST_BIT); 1963 } 1964 } 1965 1966 static void hptiop_post_req_mv(struct hpt_iop_hba *hba, 1967 struct hpt_iop_srb *srb, 1968 bus_dma_segment_t *segs, int nsegs) 1969 { 1970 int idx, size; 1971 union ccb *ccb = srb->ccb; 1972 u_int8_t *cdb; 1973 struct hpt_iop_request_scsi_command *req; 1974 u_int64_t req_phy; 1975 1976 req = (struct hpt_iop_request_scsi_command *)srb; 1977 req_phy = srb->phy_addr; 1978 1979 if (ccb->csio.dxfer_len && nsegs > 0) { 1980 struct hpt_iopsg *psg = req->sg_list; 1981 for (idx = 0; idx < nsegs; idx++, psg++) { 1982 psg->pci_address = (u_int64_t)segs[idx].ds_addr; 1983 psg->size = segs[idx].ds_len; 1984 psg->eot = 0; 1985 } 1986 psg[-1].eot = 1; 1987 } 1988 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 1989 cdb = ccb->csio.cdb_io.cdb_ptr; 1990 else 1991 cdb = ccb->csio.cdb_io.cdb_bytes; 1992 1993 bcopy(cdb, req->cdb, ccb->csio.cdb_len); 1994 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; 1995 req->header.result = IOP_RESULT_PENDING; 1996 req->dataxfer_length = ccb->csio.dxfer_len; 1997 req->channel = 0; 1998 req->target = ccb->ccb_h.target_id; 1999 req->lun = ccb->ccb_h.target_lun; 2000 req->header.size = sizeof(struct hpt_iop_request_scsi_command) 2001 - sizeof(struct hpt_iopsg) 2002 + nsegs * sizeof(struct hpt_iopsg); 2003 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2004 bus_dmamap_sync(hba->io_dmat, 2005 srb->dma_map, BUS_DMASYNC_PREREAD); 2006 } 2007 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 2008 bus_dmamap_sync(hba->io_dmat, 2009 srb->dma_map, BUS_DMASYNC_PREWRITE); 2010 req->header.context = (u_int64_t)srb->index 2011 << MVIOP_REQUEST_NUMBER_START_BIT 2012 | MVIOP_CMD_TYPE_SCSI; 2013 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; 2014 size = req->header.size >> 8; 2015 hptiop_mv_inbound_write(req_phy 2016 | MVIOP_MU_QUEUE_ADDR_HOST_BIT 2017 | (size > 3 ? 3 : size), hba); 2018 } 2019 2020 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs, 2021 int nsegs, int error) 2022 { 2023 struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg; 2024 union ccb *ccb = srb->ccb; 2025 struct hpt_iop_hba *hba = srb->hba; 2026 2027 if (error || nsegs > hba->max_sg_count) { 2028 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n", 2029 ccb->ccb_h.func_code, 2030 ccb->ccb_h.target_id, 2031 ccb->ccb_h.target_lun, nsegs)); 2032 ccb->ccb_h.status = CAM_BUSY; 2033 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 2034 hptiop_free_srb(hba, srb); 2035 xpt_done(ccb); 2036 return; 2037 } 2038 2039 hba->ops->post_req(hba, srb, segs, nsegs); 2040 } 2041 2042 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs, 2043 int nsegs, int error) 2044 { 2045 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; 2046 hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) 2047 & ~(u_int64_t)0x1F; 2048 hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) 2049 & ~0x1F); 2050 } 2051 2052 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs, 2053 int nsegs, int error) 2054 { 2055 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; 2056 bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F; 2057 struct hpt_iop_srb *srb, *tmp_srb; 2058 int i; 2059 2060 if (error || nsegs == 0) { 2061 device_printf(hba->pcidev, "hptiop_map_srb error"); 2062 return; 2063 } 2064 2065 /* map srb */ 2066 srb = (struct hpt_iop_srb *) 2067 (((unsigned long)hba->uncached_ptr + 0x1F) 2068 & ~(unsigned long)0x1F); 2069 2070 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { 2071 tmp_srb = (struct hpt_iop_srb *) 2072 ((char *)srb + i * HPT_SRB_MAX_SIZE); 2073 if (((unsigned long)tmp_srb & 0x1F) == 0) { 2074 if (bus_dmamap_create(hba->io_dmat, 2075 0, &tmp_srb->dma_map)) { 2076 device_printf(hba->pcidev, "dmamap create failed"); 2077 return; 2078 } 2079 2080 bzero(tmp_srb, sizeof(struct hpt_iop_srb)); 2081 tmp_srb->hba = hba; 2082 tmp_srb->index = i; 2083 if (hba->ctlcfg_ptr == 0) {/*itl iop*/ 2084 tmp_srb->phy_addr = (u_int64_t)(u_int32_t) 2085 (phy_addr >> 5); 2086 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G) 2087 tmp_srb->srb_flag = 2088 HPT_SRB_FLAG_HIGH_MEM_ACESS; 2089 } else { 2090 tmp_srb->phy_addr = phy_addr; 2091 } 2092 2093 hptiop_free_srb(hba, tmp_srb); 2094 hba->srb[i] = tmp_srb; 2095 phy_addr += HPT_SRB_MAX_SIZE; 2096 } 2097 else { 2098 device_printf(hba->pcidev, "invalid alignment"); 2099 return; 2100 } 2101 } 2102 } 2103 2104 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg) 2105 { 2106 hba->msg_done = 1; 2107 } 2108 2109 static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba, 2110 int target_id) 2111 { 2112 struct cam_periph *periph = NULL; 2113 struct cam_path *path; 2114 int status, retval = 0; 2115 2116 status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0); 2117 2118 if (status == CAM_REQ_CMP) { 2119 if ((periph = cam_periph_find(path, "da")) != NULL) { 2120 if (periph->refcount >= 1) { 2121 device_printf(hba->pcidev, "%d ," 2122 "target_id=0x%x," 2123 "refcount=%d", 2124 hba->pciunit, target_id, periph->refcount); 2125 retval = -1; 2126 } 2127 } 2128 xpt_free_path(path); 2129 } 2130 return retval; 2131 } 2132 2133 static void hptiop_release_resource(struct hpt_iop_hba *hba) 2134 { 2135 int i; 2136 if (hba->path) { 2137 struct ccb_setasync ccb; 2138 2139 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); 2140 ccb.ccb_h.func_code = XPT_SASYNC_CB; 2141 ccb.event_enable = 0; 2142 ccb.callback = hptiop_async; 2143 ccb.callback_arg = hba->sim; 2144 xpt_action((union ccb *)&ccb); 2145 xpt_free_path(hba->path); 2146 } 2147 2148 if (hba->sim) { 2149 xpt_bus_deregister(cam_sim_path(hba->sim)); 2150 cam_sim_free(hba->sim, TRUE); 2151 } 2152 2153 if (hba->ctlcfg_dmat) { 2154 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); 2155 bus_dmamem_free(hba->ctlcfg_dmat, 2156 hba->ctlcfg_ptr, hba->ctlcfg_dmamap); 2157 bus_dma_tag_destroy(hba->ctlcfg_dmat); 2158 } 2159 2160 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { 2161 struct hpt_iop_srb *srb = hba->srb[i]; 2162 if (srb->dma_map) 2163 bus_dmamap_destroy(hba->io_dmat, srb->dma_map); 2164 } 2165 2166 if (hba->srb_dmat) { 2167 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); 2168 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap); 2169 bus_dma_tag_destroy(hba->srb_dmat); 2170 } 2171 2172 if (hba->io_dmat) 2173 bus_dma_tag_destroy(hba->io_dmat); 2174 2175 if (hba->parent_dmat) 2176 bus_dma_tag_destroy(hba->parent_dmat); 2177 2178 if (hba->irq_handle) 2179 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); 2180 2181 if (hba->irq_res) 2182 bus_release_resource(hba->pcidev, SYS_RES_IRQ, 2183 0, hba->irq_res); 2184 2185 if (hba->bar0_res) 2186 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 2187 hba->bar0_rid, hba->bar0_res); 2188 if (hba->bar2_res) 2189 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 2190 hba->bar2_rid, hba->bar2_res); 2191 if (hba->ioctl_dev) 2192 destroy_dev(hba->ioctl_dev); 2193 } 2194