1 /* 2 ***************************************************************************************** 3 ** O.S : FreeBSD 4 ** FILE NAME : arcmsr.c 5 ** BY : Erich Chen 6 ** Description: SCSI RAID Device Driver for 7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX) SATA/SAS RAID HOST Adapter 8 ** ARCMSR RAID Host adapter 9 ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set] 10 ****************************************************************************************** 11 ************************************************************************ 12 ** 13 ** Copyright (c) 2004-2006 ARECA Co. Ltd. 14 ** Erich Chen, Taipei Taiwan All rights reserved. 15 ** 16 ** Redistribution and use in source and binary forms, with or without 17 ** modification, are permitted provided that the following conditions 18 ** are met: 19 ** 1. Redistributions of source code must retain the above copyright 20 ** notice, this list of conditions and the following disclaimer. 21 ** 2. Redistributions in binary form must reproduce the above copyright 22 ** notice, this list of conditions and the following disclaimer in the 23 ** documentation and/or other materials provided with the distribution. 24 ** 3. The name of the author may not be used to endorse or promote products 25 ** derived from this software without specific prior written permission. 26 ** 27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT 32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 ************************************************************************** 38 ** History 39 ** 40 ** REV# DATE NAME DESCRIPTION 41 ** 1.00.00.00 3/31/2004 Erich Chen First release 42 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error 43 ** 1.20.00.03 4/19/2005 Erich Chen add SATA 24 Ports adapter type support 44 ** clean unused function 45 ** 1.20.00.12 9/12/2005 Erich Chen bug fix with abort command handling, 46 ** firmware version check 47 ** and firmware update notify for hardware bug fix 48 ** handling if none zero high part physical address 49 ** of srb resource 50 ** 1.20.00.13 8/18/2006 Erich Chen remove pending srb and report busy 51 ** add iop message xfer 52 ** with scsi pass-through command 53 ** add new device id of sas raid adapters 54 ** code fit for SPARC64 & PPC 55 ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report 56 ** and cause g_vfs_done() read write error 57 58 ****************************************************************************************** 59 * $FreeBSD$ 60 */ 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/malloc.h> 64 #include <sys/kernel.h> 65 #include <sys/bus.h> 66 #include <sys/queue.h> 67 #include <sys/stat.h> 68 #include <sys/devicestat.h> 69 #include <sys/kthread.h> 70 #include <sys/module.h> 71 #include <sys/proc.h> 72 #include <sys/lock.h> 73 #include <sys/sysctl.h> 74 #include <sys/poll.h> 75 #include <sys/ioccom.h> 76 #include <vm/vm.h> 77 #include <vm/vm_param.h> 78 #include <vm/pmap.h> 79 80 #include <isa/rtc.h> 81 82 #include <machine/bus.h> 83 #include <machine/resource.h> 84 #include <machine/atomic.h> 85 #include <sys/conf.h> 86 #include <sys/rman.h> 87 88 #include <cam/cam.h> 89 #include <cam/cam_ccb.h> 90 #include <cam/cam_sim.h> 91 #include <cam/cam_xpt_sim.h> 92 #include <cam/cam_debug.h> 93 #include <cam/scsi/scsi_all.h> 94 #include <cam/scsi/scsi_message.h> 95 /* 96 ************************************************************************** 97 ************************************************************************** 98 */ 99 #if __FreeBSD_version >= 500005 100 #include <sys/selinfo.h> 101 #include <sys/mutex.h> 102 #include <sys/endian.h> 103 #include <dev/pci/pcivar.h> 104 #include <dev/pci/pcireg.h> 105 #define ARCMSR_LOCK_INIT(l, s) mtx_init(l, s, NULL, MTX_DEF|MTX_RECURSE) 106 #define ARCMSR_LOCK_ACQUIRE(l) mtx_lock(l) 107 #define ARCMSR_LOCK_RELEASE(l) mtx_unlock(l) 108 #define ARCMSR_LOCK_TRY(l) mtx_trylock(l) 109 #define arcmsr_htole32(x) htole32(x) 110 typedef struct mtx arcmsr_lock_t; 111 #else 112 #include <sys/select.h> 113 #include <pci/pcivar.h> 114 #include <pci/pcireg.h> 115 #define ARCMSR_LOCK_INIT(l, s) simple_lock_init(l) 116 #define ARCMSR_LOCK_ACQUIRE(l) simple_lock(l) 117 #define ARCMSR_LOCK_RELEASE(l) simple_unlock(l) 118 #define ARCMSR_LOCK_TRY(l) simple_lock_try(l) 119 #define arcmsr_htole32(x) (x) 120 typedef struct simplelock arcmsr_lock_t; 121 #endif 122 #include <dev/arcmsr/arcmsr.h> 123 #define ARCMSR_SRBS_POOL_SIZE ((sizeof(struct CommandControlBlock) * ARCMSR_MAX_FREESRB_NUM)+0x20) 124 /* 125 ************************************************************************** 126 ************************************************************************** 127 */ 128 #define CHIP_REG_READ32(r) bus_space_read_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r)) 129 #define CHIP_REG_WRITE32(r,d) bus_space_write_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r), d) 130 /* 131 ************************************************************************** 132 ************************************************************************** 133 */ 134 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb); 135 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb); 136 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb); 137 static u_int32_t arcmsr_probe(device_t dev); 138 static u_int32_t arcmsr_attach(device_t dev); 139 static u_int32_t arcmsr_detach(device_t dev); 140 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); 141 static void arcmsr_iop_parking(struct AdapterControlBlock *acb); 142 static void arcmsr_shutdown(device_t dev); 143 static void arcmsr_interrupt(void *arg); 144 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); 145 static void arcmsr_free_resource(struct AdapterControlBlock *acb); 146 static void arcmsr_bus_reset(struct AdapterControlBlock *acb); 147 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 148 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 149 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 150 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); 151 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb); 152 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); 153 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); 154 static void arcmsr_iop_reset(struct AdapterControlBlock *acb); 155 static void arcmsr_report_sense_info(struct CommandControlBlock *srb); 156 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg); 157 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb); 158 static int arcmsr_resume(device_t dev); 159 static int arcmsr_suspend(device_t dev); 160 /* 161 ************************************************************************** 162 ************************************************************************** 163 */ 164 static void UDELAY(u_int32_t us) { DELAY(us); } 165 /* 166 ************************************************************************** 167 ************************************************************************** 168 */ 169 static bus_dmamap_callback_t arcmsr_map_freesrb; 170 static bus_dmamap_callback_t arcmsr_executesrb; 171 /* 172 ************************************************************************** 173 ************************************************************************** 174 */ 175 static d_open_t arcmsr_open; 176 static d_close_t arcmsr_close; 177 static d_ioctl_t arcmsr_ioctl; 178 179 static device_method_t arcmsr_methods[]={ 180 DEVMETHOD(device_probe, arcmsr_probe), 181 DEVMETHOD(device_attach, arcmsr_attach), 182 DEVMETHOD(device_detach, arcmsr_detach), 183 DEVMETHOD(device_shutdown, arcmsr_shutdown), 184 DEVMETHOD(device_suspend, arcmsr_suspend), 185 DEVMETHOD(device_resume, arcmsr_resume), 186 187 DEVMETHOD(bus_print_child, bus_generic_print_child), 188 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 189 { 0, 0 } 190 }; 191 192 static driver_t arcmsr_driver={ 193 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) 194 }; 195 196 static devclass_t arcmsr_devclass; 197 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); 198 MODULE_DEPEND(arcmsr, pci, 1, 1, 1); 199 MODULE_DEPEND(arcmsr, cam, 1, 1, 1); 200 #ifndef BUS_DMA_COHERENT 201 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ 202 #endif 203 #if __FreeBSD_version >= 501000 204 #ifndef D_NEEDGIANT 205 #define D_NEEDGIANT 0x00400000 /* driver want Giant */ 206 #endif 207 #ifndef D_VERSION 208 #define D_VERSION 0x20011966 209 #endif 210 static struct cdevsw arcmsr_cdevsw={ 211 #if __FreeBSD_version > 502010 212 .d_version = D_VERSION, 213 #endif 214 .d_flags = D_NEEDGIANT, 215 .d_open = arcmsr_open, /* open */ 216 .d_close = arcmsr_close, /* close */ 217 .d_ioctl = arcmsr_ioctl, /* ioctl */ 218 .d_name = "arcmsr", /* name */ 219 }; 220 #else 221 #define ARCMSR_CDEV_MAJOR 180 222 223 static struct cdevsw arcmsr_cdevsw = { 224 arcmsr_open, /* open */ 225 arcmsr_close, /* close */ 226 noread, /* read */ 227 nowrite, /* write */ 228 arcmsr_ioctl, /* ioctl */ 229 nopoll, /* poll */ 230 nommap, /* mmap */ 231 nostrategy, /* strategy */ 232 "arcmsr", /* name */ 233 ARCMSR_CDEV_MAJOR, /* major */ 234 nodump, /* dump */ 235 nopsize, /* psize */ 236 0 /* flags */ 237 }; 238 #endif 239 240 #if __FreeBSD_version < 500005 241 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) 242 #else 243 #if __FreeBSD_version < 503000 244 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) 245 #else 246 static int arcmsr_open(struct cdev *dev, int flags, int fmt, d_thread_t *proc) 247 #endif 248 #endif 249 { 250 #if __FreeBSD_version < 503000 251 struct AdapterControlBlock *acb=dev->si_drv1; 252 #else 253 int unit = minor(dev); 254 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 255 #endif 256 if(acb==NULL) { 257 return ENXIO; 258 } 259 return 0; 260 } 261 /* 262 ************************************************************************** 263 ************************************************************************** 264 */ 265 #if __FreeBSD_version < 500005 266 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) 267 #else 268 #if __FreeBSD_version < 503000 269 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) 270 #else 271 static int arcmsr_close(struct cdev *dev, int flags, int fmt, d_thread_t *proc) 272 #endif 273 #endif 274 { 275 #if __FreeBSD_version < 503000 276 struct AdapterControlBlock *acb=dev->si_drv1; 277 #else 278 int unit = minor(dev); 279 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 280 #endif 281 if(acb==NULL) { 282 return ENXIO; 283 } 284 return 0; 285 } 286 /* 287 ************************************************************************** 288 ************************************************************************** 289 */ 290 #if __FreeBSD_version < 500005 291 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) 292 #else 293 #if __FreeBSD_version < 503000 294 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) 295 #else 296 static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, d_thread_t *proc) 297 #endif 298 #endif 299 { 300 #if __FreeBSD_version < 503000 301 struct AdapterControlBlock *acb=dev->si_drv1; 302 #else 303 int unit = minor(dev); 304 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 305 #endif 306 307 if(acb==NULL) { 308 return ENXIO; 309 } 310 return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); 311 } 312 /* 313 ******************************************************************************* 314 ******************************************************************************* 315 */ 316 static int arcmsr_suspend(device_t dev) 317 { 318 struct AdapterControlBlock *acb = device_get_softc(dev); 319 u_int32_t intmask_org; 320 321 /* disable all outbound interrupt */ 322 intmask_org=CHIP_REG_READ32(outbound_intmask); 323 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE)); 324 /* flush controller */ 325 arcmsr_iop_parking(acb); 326 return(0); 327 } 328 /* 329 ******************************************************************************* 330 ******************************************************************************* 331 */ 332 static int arcmsr_resume(device_t dev) 333 { 334 struct AdapterControlBlock *acb = device_get_softc(dev); 335 336 arcmsr_iop_init(acb); 337 return(0); 338 } 339 /* 340 ********************************************************************************* 341 ********************************************************************************* 342 */ 343 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) 344 { 345 struct AdapterControlBlock *acb; 346 u_int8_t target_id, target_lun; 347 struct cam_sim * sim; 348 349 sim=(struct cam_sim *) cb_arg; 350 acb =(struct AdapterControlBlock *) cam_sim_softc(sim); 351 switch (code) { 352 case AC_LOST_DEVICE: 353 target_id=xpt_path_target_id(path); 354 target_lun=xpt_path_lun_id(path); 355 if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { 356 break; 357 } 358 printf("%s:scsi id%d lun%d device lost \n" 359 , device_get_name(acb->pci_dev), target_id, target_lun); 360 break; 361 default: 362 break; 363 } 364 } 365 /* 366 ************************************************************************ 367 ************************************************************************ 368 */ 369 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 370 { 371 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 372 if(arcmsr_wait_msgint_ready(acb)) { 373 printf("arcmsr%d: wait 'flush adapter cache' timeout \n" 374 , acb->pci_unit); 375 } 376 return; 377 } 378 /* 379 ********************************************************************** 380 ********************************************************************** 381 */ 382 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb) 383 { 384 u_int32_t Index; 385 u_int8_t Retries=0x00; 386 387 do { 388 for(Index=0; Index < 100; Index++) { 389 if(CHIP_REG_READ32(outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 390 /*clear interrupt*/ 391 CHIP_REG_WRITE32(outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); 392 return 0x00; 393 } 394 /* one us delay */ 395 UDELAY(10000); 396 }/*max 1 seconds*/ 397 }while(Retries++ < 20);/*max 20 sec*/ 398 return 0xff; 399 } 400 /* 401 ********************************************************************** 402 ********************************************************************** 403 */ 404 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) 405 { 406 struct AdapterControlBlock *acb=srb->acb; 407 union ccb * pccb=srb->pccb; 408 409 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 410 bus_dmasync_op_t op; 411 412 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 413 op = BUS_DMASYNC_POSTREAD; 414 } else { 415 op = BUS_DMASYNC_POSTWRITE; 416 } 417 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 418 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 419 } 420 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_done_lock); 421 if(stand_flag==1) { 422 atomic_subtract_int(&acb->srboutstandingcount, 1); 423 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( 424 acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) { 425 acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; 426 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 427 } 428 } 429 srb->startdone=ARCMSR_SRB_DONE; 430 srb->srb_flags=0; 431 acb->srbworkingQ[acb->workingsrb_doneindex]=srb; 432 acb->workingsrb_doneindex++; 433 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; 434 ARCMSR_LOCK_RELEASE(&acb->workingQ_done_lock); 435 xpt_done(pccb); 436 return; 437 } 438 /* 439 ********************************************************************** 440 ********************************************************************** 441 */ 442 static void arcmsr_report_sense_info(struct CommandControlBlock *srb) 443 { 444 union ccb * pccb=srb->pccb; 445 446 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 447 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 448 if(&pccb->csio.sense_data) { 449 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); 450 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, 451 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); 452 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ 453 pccb->ccb_h.status |= CAM_AUTOSNS_VALID; 454 } 455 return; 456 } 457 /* 458 ********************************************************************* 459 ** 460 ********************************************************************* 461 */ 462 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 463 { 464 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 465 if(arcmsr_wait_msgint_ready(acb)) { 466 printf("arcmsr%d: wait 'abort all outstanding command' timeout \n" 467 , acb->pci_unit); 468 } 469 return; 470 } 471 /* 472 **************************************************************************** 473 **************************************************************************** 474 */ 475 static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 476 { 477 struct CommandControlBlock *srb; 478 u_int32_t intmask_org, mask; 479 u_int32_t i=0; 480 481 if(acb->srboutstandingcount!=0) 482 { 483 /* talk to iop 331 outstanding command aborted*/ 484 arcmsr_abort_allcmd(acb); 485 UDELAY(3000*1000);/*wait for 3 sec for all command aborted*/ 486 /* disable all outbound interrupt */ 487 intmask_org=CHIP_REG_READ32(outbound_intmask); 488 CHIP_REG_WRITE32(outbound_intmask 489 , intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 490 /*clear all outbound posted Q*/ 491 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) { 492 CHIP_REG_READ32(outbound_queueport); 493 } 494 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 495 srb=acb->psrb_pool[i]; 496 if(srb->startdone==ARCMSR_SRB_START) { 497 srb->startdone=ARCMSR_SRB_ABORTED; 498 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 499 arcmsr_srb_complete(srb, 1); 500 } 501 } 502 /* enable all outbound interrupt */ 503 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 504 |ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 505 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 506 /* post abort all outstanding command message to RAID controller */ 507 } 508 atomic_set_int(&acb->srboutstandingcount, 0); 509 acb->workingsrb_doneindex=0; 510 acb->workingsrb_startindex=0; 511 return; 512 } 513 /* 514 ********************************************************************** 515 ********************************************************************** 516 */ 517 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) 518 { 519 struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb; 520 u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u; 521 u_int32_t address_lo, address_hi; 522 union ccb * pccb=srb->pccb; 523 struct ccb_scsiio * pcsio= &pccb->csio; 524 u_int32_t arccdbsize=0x30; 525 526 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 527 arcmsr_cdb->Bus=0; 528 arcmsr_cdb->TargetID=pccb->ccb_h.target_id; 529 arcmsr_cdb->LUN=pccb->ccb_h.target_lun; 530 arcmsr_cdb->Function=1; 531 arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len; 532 arcmsr_cdb->Context=(unsigned long)arcmsr_cdb; 533 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); 534 if(nseg != 0) { 535 struct AdapterControlBlock *acb=srb->acb; 536 bus_dmasync_op_t op; 537 u_int32_t length, i, cdb_sgcount=0; 538 539 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 540 op=BUS_DMASYNC_PREREAD; 541 } else { 542 op=BUS_DMASYNC_PREWRITE; 543 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE; 544 srb->srb_flags|=SRB_FLAG_WRITE; 545 } 546 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 547 for(i=0;i<nseg;i++) { 548 /* Get the physical address of the current data pointer */ 549 length=arcmsr_htole32(dm_segs[i].ds_len); 550 address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); 551 address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); 552 if(address_hi==0) { 553 struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge; 554 pdma_sg->address=address_lo; 555 pdma_sg->length=length; 556 psge += sizeof(struct SG32ENTRY); 557 arccdbsize += sizeof(struct SG32ENTRY); 558 } else { 559 u_int32_t sg64s_size=0, tmplength=length; 560 561 while(1) { 562 u_int64_t span4G, length0; 563 struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge; 564 565 span4G=(u_int64_t)address_lo + tmplength; 566 pdma_sg->addresshigh=address_hi; 567 pdma_sg->address=address_lo; 568 if(span4G > 0x100000000) { 569 /*see if cross 4G boundary*/ 570 length0=0x100000000-address_lo; 571 pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR; 572 address_hi=address_hi+1; 573 address_lo=0; 574 tmplength=tmplength-(u_int32_t)length0; 575 sg64s_size += sizeof(struct SG64ENTRY); 576 psge += sizeof(struct SG64ENTRY); 577 cdb_sgcount++; 578 } else { 579 pdma_sg->length=tmplength|IS_SG64_ADDR; 580 sg64s_size += sizeof(struct SG64ENTRY); 581 psge += sizeof(struct SG64ENTRY); 582 break; 583 } 584 } 585 arccdbsize += sg64s_size; 586 } 587 cdb_sgcount++; 588 } 589 arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount; 590 arcmsr_cdb->DataLength=pcsio->dxfer_len; 591 if( arccdbsize > 256) { 592 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE; 593 } 594 } 595 return; 596 } 597 /* 598 ************************************************************************** 599 ************************************************************************** 600 */ 601 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) 602 { 603 u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr; 604 struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb; 605 606 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 607 (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); 608 atomic_add_int(&acb->srboutstandingcount, 1); 609 srb->startdone=ARCMSR_SRB_START; 610 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 611 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); 612 } else { 613 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr); 614 } 615 return; 616 } 617 /* 618 ********************************************************************** 619 ********************************************************************** 620 */ 621 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb) 622 { 623 u_int8_t * pQbuffer; 624 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer; 625 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data; 626 u_int32_t allxfer_len=0; 627 628 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 629 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 630 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { 631 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex]; 632 memcpy(iop_data, pQbuffer, 1); 633 acb->wqbuf_firstindex++; 634 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 635 /*if last index number set it to 0 */ 636 iop_data++; 637 allxfer_len++; 638 } 639 pwbuffer->data_len=allxfer_len; 640 /* 641 ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post 642 */ 643 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 644 } 645 return; 646 } 647 /* 648 ************************************************************************ 649 ************************************************************************ 650 */ 651 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 652 { 653 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 654 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 655 if(arcmsr_wait_msgint_ready(acb)) { 656 printf("arcmsr%d: wait 'stop adapter rebulid' timeout \n" 657 , acb->pci_unit); 658 } 659 return; 660 } 661 /* 662 ************************************************************************ 663 ************************************************************************ 664 */ 665 static void arcmsr_poll(struct cam_sim * psim) 666 { 667 arcmsr_interrupt(cam_sim_softc(psim)); 668 return; 669 } 670 /* 671 ********************************************************************** 672 ********************************************************************** 673 */ 674 static void arcmsr_interrupt(void *arg) 675 { 676 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg; 677 struct CommandControlBlock *srb; 678 u_int32_t flag_srb, outbound_intstatus, outbound_doorbell; 679 680 /* 681 ********************************************* 682 ** check outbound intstatus 683 ********************************************* 684 */ 685 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable; 686 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 687 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { 688 /* 689 ********************************************* 690 ** DOORBELL 691 ********************************************* 692 */ 693 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell); 694 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */ 695 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 696 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer; 697 u_int8_t * iop_data=(u_int8_t *)prbuffer->data; 698 u_int8_t * pQbuffer; 699 u_int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; 700 701 /*check this iop data if overflow my rqbuffer*/ 702 rqbuf_lastindex=acb->rqbuf_lastindex; 703 rqbuf_firstindex=acb->rqbuf_firstindex; 704 iop_len=prbuffer->data_len; 705 my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 706 if(my_empty_len>=iop_len) { 707 while(iop_len > 0) { 708 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 709 memcpy(pQbuffer, iop_data, 1); 710 acb->rqbuf_lastindex++; 711 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 712 /*if last index number set it to 0 */ 713 iop_data++; 714 iop_len--; 715 } 716 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 717 /*signature, let IOP331 know data has been readed */ 718 } else { 719 acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW; 720 } 721 } 722 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 723 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; 724 /* 725 ********************************************* 726 ********************************************* 727 */ 728 if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) { 729 u_int8_t * pQbuffer; 730 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer; 731 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data; 732 u_int32_t allxfer_len=0; 733 734 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 735 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { 736 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex]; 737 memcpy(iop_data, pQbuffer, 1); 738 acb->wqbuf_firstindex++; 739 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 740 /*if last index number set it to 0 */ 741 iop_data++; 742 allxfer_len++; 743 } 744 pwbuffer->data_len=allxfer_len; 745 /* 746 ** push inbound doorbell tell iop driver data write ok 747 ** and wait reply on next hwinterrupt for next Qbuffer post 748 */ 749 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 750 } 751 if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) { 752 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 753 } 754 } 755 } 756 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 757 int target, lun; 758 /* 759 ***************************************************************************** 760 ** areca cdb command done 761 ***************************************************************************** 762 */ 763 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 764 while(1) { 765 if((flag_srb=CHIP_REG_READ32(outbound_queueport)) == 0xFFFFFFFF) { 766 break;/*chip FIFO no srb for completion already*/ 767 } 768 /* check if command done with no error*/ 769 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5)); 770 /*frame must be 32 bytes aligned*/ 771 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 772 if(srb->startdone==ARCMSR_SRB_ABORTED) { 773 printf("arcmsr%d: srb='%p' isr got aborted command \n" 774 , acb->pci_unit, srb); 775 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 776 arcmsr_srb_complete(srb, 1); 777 continue; 778 } 779 printf("arcmsr%d: isr get an illegal srb command done" 780 "acb='%p' srb='%p' srbacb='%p' startdone=0x%x" 781 "srboutstandingcount=%d \n", 782 acb->pci_unit, acb, srb, srb->acb, 783 srb->startdone, acb->srboutstandingcount); 784 continue; 785 } 786 target=srb->pccb->ccb_h.target_id; 787 lun=srb->pccb->ccb_h.target_lun; 788 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) { 789 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 790 acb->devstate[target][lun]=ARECA_RAID_GOOD; 791 } 792 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 793 arcmsr_srb_complete(srb, 1); 794 } else { 795 switch(srb->arcmsr_cdb.DeviceStatus) { 796 case ARCMSR_DEV_SELECT_TIMEOUT: { 797 acb->devstate[target][lun]=ARECA_RAID_GONE; 798 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 799 arcmsr_srb_complete(srb, 1); 800 } 801 break; 802 case ARCMSR_DEV_ABORTED: 803 case ARCMSR_DEV_INIT_FAIL: { 804 acb->devstate[target][lun]=ARECA_RAID_GONE; 805 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 806 arcmsr_srb_complete(srb, 1); 807 } 808 break; 809 case SCSISTAT_CHECK_CONDITION: { 810 acb->devstate[target][lun]=ARECA_RAID_GOOD; 811 arcmsr_report_sense_info(srb); 812 arcmsr_srb_complete(srb, 1); 813 } 814 break; 815 default: 816 printf("arcmsr%d: scsi id=%d lun=%d" 817 "isr get command error done," 818 "but got unknow DeviceStatus=0x%x \n" 819 , acb->pci_unit, target, lun 820 ,srb->arcmsr_cdb.DeviceStatus); 821 acb->devstate[target][lun]=ARECA_RAID_GONE; 822 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 823 /*unknow error or crc error just for retry*/ 824 arcmsr_srb_complete(srb, 1); 825 break; 826 } 827 } 828 } /*drain reply FIFO*/ 829 } 830 return; 831 } 832 /* 833 ******************************************************************************* 834 ** 835 ******************************************************************************* 836 */ 837 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 838 { 839 if(acb!=NULL) { 840 /* stop adapter background rebuild */ 841 if(acb->acb_flags & ACB_F_MSG_START_BGRB) { 842 arcmsr_stop_adapter_bgrb(acb); 843 arcmsr_flush_adapter_cache(acb); 844 } 845 } 846 } 847 /* 848 *********************************************************************** 849 ** 850 ************************************************************************ 851 */ 852 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) 853 { 854 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 855 u_int32_t retvalue=EINVAL; 856 857 pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg; 858 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { 859 return retvalue; 860 } 861 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 862 switch(ioctl_cmd) { 863 case ARCMSR_MESSAGE_READ_RQBUFFER: { 864 u_int8_t * pQbuffer; 865 u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 866 u_int32_t allxfer_len=0; 867 868 while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex) && (allxfer_len<1031)) { 869 /*copy READ QBUFFER to srb*/ 870 pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex]; 871 memcpy(ptmpQbuffer, pQbuffer, 1); 872 acb->rqbuf_firstindex++; 873 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 874 /*if last index number set it to 0 */ 875 ptmpQbuffer++; 876 allxfer_len++; 877 } 878 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 879 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer; 880 u_int8_t * iop_data=(u_int8_t *)prbuffer->data; 881 u_int32_t iop_len; 882 883 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 884 iop_len=(u_int32_t)prbuffer->data_len; 885 /*this iop data does no chance to make me overflow again here, so just do it*/ 886 while(iop_len>0) { 887 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 888 memcpy(pQbuffer, iop_data, 1); 889 acb->rqbuf_lastindex++; 890 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 891 /*if last index number set it to 0 */ 892 iop_data++; 893 iop_len--; 894 } 895 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 896 /*signature, let IOP331 know data has been readed */ 897 } 898 pcmdmessagefld->cmdmessage.Length=allxfer_len; 899 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 900 retvalue=ARCMSR_MESSAGE_SUCCESS; 901 } 902 break; 903 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 904 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 905 u_int8_t * pQbuffer; 906 u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 907 908 user_len=pcmdmessagefld->cmdmessage.Length; 909 /*check if data xfer length of this request will overflow my array qbuffer */ 910 wqbuf_lastindex=acb->wqbuf_lastindex; 911 wqbuf_firstindex=acb->wqbuf_firstindex; 912 if(wqbuf_lastindex!=wqbuf_firstindex) { 913 arcmsr_post_Qbuffer(acb); 914 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 915 } else { 916 my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 917 if(my_empty_len>=user_len) { 918 while(user_len>0) { 919 /*copy srb data to wqbuffer*/ 920 pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex]; 921 memcpy(pQbuffer, ptmpuserbuffer, 1); 922 acb->wqbuf_lastindex++; 923 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 924 /*if last index number set it to 0 */ 925 ptmpuserbuffer++; 926 user_len--; 927 } 928 /*post fist Qbuffer*/ 929 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 930 acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED; 931 arcmsr_post_Qbuffer(acb); 932 } 933 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 934 } else { 935 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 936 } 937 } 938 retvalue=ARCMSR_MESSAGE_SUCCESS; 939 } 940 break; 941 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 942 u_int8_t * pQbuffer=acb->rqbuffer; 943 944 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 945 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 946 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 947 /*signature, let IOP331 know data has been readed */ 948 } 949 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 950 acb->rqbuf_firstindex=0; 951 acb->rqbuf_lastindex=0; 952 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 953 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 954 retvalue=ARCMSR_MESSAGE_SUCCESS; 955 } 956 break; 957 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: 958 { 959 u_int8_t * pQbuffer=acb->wqbuffer; 960 961 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 962 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 963 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 964 /*signature, let IOP331 know data has been readed */ 965 } 966 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READED); 967 acb->wqbuf_firstindex=0; 968 acb->wqbuf_lastindex=0; 969 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 970 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 971 retvalue=ARCMSR_MESSAGE_SUCCESS; 972 } 973 break; 974 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 975 u_int8_t * pQbuffer; 976 977 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 978 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 979 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 980 /*signature, let IOP331 know data has been readed */ 981 } 982 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 983 |ACB_F_MESSAGE_RQBUFFER_CLEARED 984 |ACB_F_MESSAGE_WQBUFFER_READED); 985 acb->rqbuf_firstindex=0; 986 acb->rqbuf_lastindex=0; 987 acb->wqbuf_firstindex=0; 988 acb->wqbuf_lastindex=0; 989 pQbuffer=acb->rqbuffer; 990 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 991 pQbuffer=acb->wqbuffer; 992 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 993 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 994 retvalue=ARCMSR_MESSAGE_SUCCESS; 995 } 996 break; 997 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 998 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F; 999 retvalue=ARCMSR_MESSAGE_SUCCESS; 1000 } 1001 break; 1002 case ARCMSR_MESSAGE_SAY_HELLO: { 1003 u_int8_t * hello_string="Hello! I am ARCMSR"; 1004 u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer; 1005 1006 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { 1007 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 1008 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1009 return ENOIOCTL; 1010 } 1011 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1012 retvalue=ARCMSR_MESSAGE_SUCCESS; 1013 } 1014 break; 1015 case ARCMSR_MESSAGE_SAY_GOODBYE: { 1016 arcmsr_iop_parking(acb); 1017 retvalue=ARCMSR_MESSAGE_SUCCESS; 1018 } 1019 break; 1020 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 1021 arcmsr_flush_adapter_cache(acb); 1022 retvalue=ARCMSR_MESSAGE_SUCCESS; 1023 } 1024 break; 1025 } 1026 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1027 return retvalue; 1028 } 1029 /* 1030 ************************************************************************** 1031 ************************************************************************** 1032 */ 1033 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb) 1034 { 1035 struct CommandControlBlock *srb=NULL; 1036 u_int32_t workingsrb_startindex, workingsrb_doneindex; 1037 1038 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_start_lock); 1039 workingsrb_doneindex=acb->workingsrb_doneindex; 1040 workingsrb_startindex=acb->workingsrb_startindex; 1041 srb=acb->srbworkingQ[workingsrb_startindex]; 1042 workingsrb_startindex++; 1043 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; 1044 if(workingsrb_doneindex!=workingsrb_startindex) { 1045 acb->workingsrb_startindex=workingsrb_startindex; 1046 } else { 1047 srb=NULL; 1048 } 1049 ARCMSR_LOCK_RELEASE(&acb->workingQ_start_lock); 1050 return(srb); 1051 } 1052 /* 1053 ************************************************************************** 1054 ************************************************************************** 1055 */ 1056 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb) 1057 { 1058 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 1059 int retvalue = 0, transfer_len = 0; 1060 char *buffer; 1061 uint32_t controlcode = (uint32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | 1062 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | 1063 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | 1064 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[8]; 1065 /* 4 bytes: Areca io control code */ 1066 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1067 buffer = pccb->csio.data_ptr; 1068 transfer_len = pccb->csio.dxfer_len; 1069 } else { 1070 retvalue = ARCMSR_MESSAGE_FAIL; 1071 goto message_out; 1072 } 1073 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 1074 retvalue = ARCMSR_MESSAGE_FAIL; 1075 goto message_out; 1076 } 1077 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; 1078 switch(controlcode) { 1079 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1080 u_int8_t *pQbuffer; 1081 u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 1082 int32_t allxfer_len = 0; 1083 1084 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1085 && (allxfer_len < 1031)) { 1086 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1087 memcpy(ptmpQbuffer, pQbuffer, 1); 1088 acb->rqbuf_firstindex++; 1089 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1090 ptmpQbuffer++; 1091 allxfer_len++; 1092 } 1093 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1094 struct QBUFFER *prbuffer = (struct QBUFFER *) &acb->pmu->message_rbuffer; 1095 u_int8_t *iop_data = (u_int8_t *)prbuffer->data; 1096 int32_t iop_len; 1097 1098 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1099 iop_len =(u_int32_t)prbuffer->data_len; 1100 while (iop_len > 0) { 1101 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 1102 memcpy(pQbuffer, iop_data, 1); 1103 acb->rqbuf_lastindex++; 1104 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1105 iop_data++; 1106 iop_len--; 1107 } 1108 CHIP_REG_WRITE32(inbound_doorbell, 1109 ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1110 } 1111 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1112 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1113 retvalue=ARCMSR_MESSAGE_SUCCESS; 1114 } 1115 break; 1116 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1117 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1118 u_int8_t *pQbuffer; 1119 u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 1120 1121 user_len = pcmdmessagefld->cmdmessage.Length; 1122 wqbuf_lastindex = acb->wqbuf_lastindex; 1123 wqbuf_firstindex = acb->wqbuf_firstindex; 1124 if (wqbuf_lastindex != wqbuf_firstindex) { 1125 arcmsr_post_Qbuffer(acb); 1126 /* has error report sensedata */ 1127 if(&pccb->csio.sense_data) { 1128 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 1129 /* Valid,ErrorCode */ 1130 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 1131 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 1132 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 1133 /* AdditionalSenseLength */ 1134 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 1135 /* AdditionalSenseCode */ 1136 } 1137 retvalue = ARCMSR_MESSAGE_FAIL; 1138 } else { 1139 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 1140 &(ARCMSR_MAX_QBUFFER - 1); 1141 if (my_empty_len >= user_len) { 1142 while (user_len > 0) { 1143 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; 1144 memcpy(pQbuffer, ptmpuserbuffer, 1); 1145 acb->wqbuf_lastindex++; 1146 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1147 ptmpuserbuffer++; 1148 user_len--; 1149 } 1150 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 1151 acb->acb_flags &= 1152 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 1153 arcmsr_post_Qbuffer(acb); 1154 } 1155 } else { 1156 /* has error report sensedata */ 1157 if(&pccb->csio.sense_data) { 1158 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 1159 /* Valid,ErrorCode */ 1160 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 1161 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 1162 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 1163 /* AdditionalSenseLength */ 1164 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 1165 /* AdditionalSenseCode */ 1166 } 1167 retvalue = ARCMSR_MESSAGE_FAIL; 1168 } 1169 } 1170 } 1171 break; 1172 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1173 u_int8_t *pQbuffer = acb->rqbuffer; 1174 1175 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1176 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1177 CHIP_REG_WRITE32(inbound_doorbell 1178 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1179 } 1180 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 1181 acb->rqbuf_firstindex = 0; 1182 acb->rqbuf_lastindex = 0; 1183 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1184 pcmdmessagefld->cmdmessage.ReturnCode = 1185 ARCMSR_MESSAGE_RETURNCODE_OK; 1186 } 1187 break; 1188 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 1189 u_int8_t *pQbuffer = acb->wqbuffer; 1190 1191 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1192 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1193 CHIP_REG_WRITE32(inbound_doorbell 1194 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1195 } 1196 acb->acb_flags |= 1197 (ACB_F_MESSAGE_WQBUFFER_CLEARED | 1198 ACB_F_MESSAGE_WQBUFFER_READED); 1199 acb->wqbuf_firstindex = 0; 1200 acb->wqbuf_lastindex = 0; 1201 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1202 pcmdmessagefld->cmdmessage.ReturnCode = 1203 ARCMSR_MESSAGE_RETURNCODE_OK; 1204 } 1205 break; 1206 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1207 u_int8_t *pQbuffer; 1208 1209 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1210 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1211 CHIP_REG_WRITE32(inbound_doorbell 1212 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1213 } 1214 acb->acb_flags |= 1215 (ACB_F_MESSAGE_WQBUFFER_CLEARED 1216 | ACB_F_MESSAGE_RQBUFFER_CLEARED 1217 | ACB_F_MESSAGE_WQBUFFER_READED); 1218 acb->rqbuf_firstindex = 0; 1219 acb->rqbuf_lastindex = 0; 1220 acb->wqbuf_firstindex = 0; 1221 acb->wqbuf_lastindex = 0; 1222 pQbuffer = acb->rqbuffer; 1223 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 1224 pQbuffer = acb->wqbuffer; 1225 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 1226 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1227 } 1228 break; 1229 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 1230 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 1231 } 1232 break; 1233 case ARCMSR_MESSAGE_SAY_HELLO: { 1234 int8_t * hello_string = "Hello! I am ARCMSR"; 1235 1236 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 1237 , (int16_t)strlen(hello_string)); 1238 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1239 } 1240 break; 1241 case ARCMSR_MESSAGE_SAY_GOODBYE: 1242 arcmsr_iop_parking(acb); 1243 break; 1244 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 1245 arcmsr_flush_adapter_cache(acb); 1246 break; 1247 default: 1248 retvalue = ARCMSR_MESSAGE_FAIL; 1249 } 1250 message_out: 1251 return retvalue; 1252 } 1253 /* 1254 ********************************************************************* 1255 ********************************************************************* 1256 */ 1257 static void arcmsr_executesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1258 { 1259 struct CommandControlBlock *srb=(struct CommandControlBlock *)arg; 1260 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb; 1261 union ccb * pccb; 1262 int target, lun; 1263 1264 pccb=srb->pccb; 1265 target=pccb->ccb_h.target_id; 1266 lun=pccb->ccb_h.target_lun; 1267 if(error != 0) { 1268 if(error != EFBIG) { 1269 printf("arcmsr%d: unexpected error %x returned from 'bus_dmamap_load' \n" 1270 , acb->pci_unit, error); 1271 } 1272 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1273 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 1274 } 1275 arcmsr_srb_complete(srb, 0); 1276 return; 1277 } 1278 if(nseg > ARCMSR_MAX_SG_ENTRIES) { 1279 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 1280 arcmsr_srb_complete(srb, 0); 1281 return; 1282 } 1283 if(acb->acb_flags & ACB_F_BUS_RESET) { 1284 printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); 1285 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; 1286 arcmsr_srb_complete(srb, 0); 1287 return; 1288 } 1289 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 1290 u_int8_t block_cmd; 1291 1292 block_cmd=pccb->csio.cdb_io.cdb_bytes[0] & 0x0f; 1293 if(block_cmd==0x08 || block_cmd==0x0a) { 1294 printf("arcmsr%d:block 'read/write' command" 1295 "with gone raid volume Cmd=%2x, TargetId=%d, Lun=%d \n" 1296 , acb->pci_unit, block_cmd, target, lun); 1297 pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 1298 arcmsr_srb_complete(srb, 0); 1299 return; 1300 } 1301 } 1302 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1303 if(nseg != 0) { 1304 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 1305 } 1306 arcmsr_srb_complete(srb, 0); 1307 return; 1308 } 1309 if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) { 1310 xpt_freeze_simq(acb->psim, 1); 1311 pccb->ccb_h.status = CAM_REQUEUE_REQ; 1312 acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; 1313 arcmsr_srb_complete(srb, 0); 1314 return; 1315 } 1316 pccb->ccb_h.status |= CAM_SIM_QUEUED; 1317 arcmsr_build_srb(srb, dm_segs, nseg); 1318 arcmsr_post_srb(acb, srb); 1319 return; 1320 } 1321 /* 1322 ***************************************************************************************** 1323 ***************************************************************************************** 1324 */ 1325 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb) 1326 { 1327 struct CommandControlBlock *srb; 1328 struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; 1329 u_int32_t intmask_org, mask; 1330 int i=0; 1331 1332 acb->num_aborts++; 1333 /* 1334 *************************************************************************** 1335 ** It is the upper layer do abort command this lock just prior to calling us. 1336 ** First determine if we currently own this command. 1337 ** Start by searching the device queue. If not found 1338 ** at all, and the system wanted us to just abort the 1339 ** command return success. 1340 *************************************************************************** 1341 */ 1342 if(acb->srboutstandingcount!=0) { 1343 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 1344 srb=acb->psrb_pool[i]; 1345 if(srb->startdone==ARCMSR_SRB_START) { 1346 if(srb->pccb==abortccb) { 1347 srb->startdone=ARCMSR_SRB_ABORTED; 1348 printf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'" 1349 "outstanding command \n" 1350 , acb->pci_unit, abortccb->ccb_h.target_id 1351 , abortccb->ccb_h.target_lun, srb); 1352 goto abort_outstanding_cmd; 1353 } 1354 } 1355 } 1356 } 1357 return(FALSE); 1358 abort_outstanding_cmd: 1359 /* do not talk to iop 331 abort command */ 1360 UDELAY(3000*1000);/*wait for 3 sec for all command done*/ 1361 /* disable all outbound interrupt */ 1362 intmask_org=CHIP_REG_READ32(outbound_intmask); 1363 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 1364 arcmsr_polling_srbdone(acb, srb); 1365 /* enable all outbound interrupt */ 1366 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 1367 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 1368 return (TRUE); 1369 } 1370 /* 1371 **************************************************************************** 1372 **************************************************************************** 1373 */ 1374 static void arcmsr_bus_reset(struct AdapterControlBlock *acb) 1375 { 1376 int retry=0; 1377 1378 acb->num_resets++; 1379 acb->acb_flags |=ACB_F_BUS_RESET; 1380 while(acb->srboutstandingcount!=0 && retry < 400) { 1381 arcmsr_interrupt((void *)acb); 1382 UDELAY(25000); 1383 retry++; 1384 } 1385 arcmsr_iop_reset(acb); 1386 acb->acb_flags &= ~ACB_F_BUS_RESET; 1387 return; 1388 } 1389 /* 1390 ************************************************************************** 1391 ************************************************************************** 1392 */ 1393 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 1394 union ccb * pccb) 1395 { 1396 pccb->ccb_h.status |= CAM_REQ_CMP; 1397 switch (pccb->csio.cdb_io.cdb_bytes[0]) { 1398 case INQUIRY: { 1399 unsigned char inqdata[36]; 1400 char *buffer=pccb->csio.data_ptr;; 1401 1402 if (pccb->ccb_h.target_lun) { 1403 pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 1404 xpt_done(pccb); 1405 return; 1406 } 1407 inqdata[0] = T_PROCESSOR; 1408 /* Periph Qualifier & Periph Dev Type */ 1409 inqdata[1] = 0; 1410 /* rem media bit & Dev Type Modifier */ 1411 inqdata[2] = 0; 1412 /* ISO, ECMA, & ANSI versions */ 1413 inqdata[4] = 31; 1414 /* length of additional data */ 1415 strncpy(&inqdata[8], "Areca ", 8); 1416 /* Vendor Identification */ 1417 strncpy(&inqdata[16], "RAID controller ", 16); 1418 /* Product Identification */ 1419 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 1420 memcpy(buffer, inqdata, sizeof(inqdata)); 1421 xpt_done(pccb); 1422 } 1423 break; 1424 case WRITE_BUFFER: 1425 case READ_BUFFER: { 1426 if (arcmsr_iop_message_xfer(acb, pccb)) { 1427 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1428 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1429 } 1430 xpt_done(pccb); 1431 } 1432 break; 1433 default: 1434 xpt_done(pccb); 1435 } 1436 } 1437 /* 1438 ********************************************************************* 1439 ********************************************************************* 1440 */ 1441 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb) 1442 { 1443 struct AdapterControlBlock * acb; 1444 1445 acb=(struct AdapterControlBlock *) cam_sim_softc(psim); 1446 if(acb==NULL) { 1447 pccb->ccb_h.status |= CAM_REQ_INVALID; 1448 xpt_done(pccb); 1449 return; 1450 } 1451 switch (pccb->ccb_h.func_code) { 1452 case XPT_SCSI_IO: { 1453 struct CommandControlBlock *srb; 1454 int target=pccb->ccb_h.target_id; 1455 1456 if(target == 16) { 1457 /* virtual device for iop message transfer */ 1458 arcmsr_handle_virtual_command(acb, pccb); 1459 return; 1460 } 1461 if((srb=arcmsr_get_freesrb(acb)) == NULL) { 1462 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; 1463 xpt_done(pccb); 1464 return; 1465 } 1466 pccb->ccb_h.arcmsr_ccbsrb_ptr=srb; 1467 pccb->ccb_h.arcmsr_ccbacb_ptr=acb; 1468 srb->pccb=pccb; 1469 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1470 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) { 1471 /* Single buffer */ 1472 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) { 1473 /* Buffer is virtual */ 1474 u_int32_t error, s; 1475 1476 s=splsoftvm(); 1477 error = bus_dmamap_load(acb->dm_segs_dmat 1478 , srb->dm_segs_dmamap 1479 , pccb->csio.data_ptr 1480 , pccb->csio.dxfer_len 1481 , arcmsr_executesrb, srb, /*flags*/0); 1482 if(error == EINPROGRESS) { 1483 xpt_freeze_simq(acb->psim, 1); 1484 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1485 } 1486 splx(s); 1487 } else { 1488 /* Buffer is physical */ 1489 panic("arcmsr: CAM_DATA_PHYS not supported"); 1490 } 1491 } else { 1492 /* Scatter/gather list */ 1493 struct bus_dma_segment *segs; 1494 1495 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 1496 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1497 pccb->ccb_h.status |= CAM_PROVIDE_FAIL; 1498 xpt_done(pccb); 1499 free(srb, M_DEVBUF); 1500 return; 1501 } 1502 segs=(struct bus_dma_segment *)pccb->csio.data_ptr; 1503 arcmsr_executesrb(srb, segs, pccb->csio.sglist_cnt, 0); 1504 } 1505 } else { 1506 arcmsr_executesrb(srb, NULL, 0, 0); 1507 } 1508 break; 1509 } 1510 case XPT_TARGET_IO: { 1511 /* target mode not yet support vendor specific commands. */ 1512 pccb->ccb_h.status |= CAM_REQ_CMP; 1513 xpt_done(pccb); 1514 break; 1515 } 1516 case XPT_PATH_INQ: { 1517 struct ccb_pathinq *cpi= &pccb->cpi; 1518 1519 cpi->version_num=1; 1520 cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE; 1521 cpi->target_sprt=0; 1522 cpi->hba_misc=0; 1523 cpi->hba_eng_cnt=0; 1524 cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */ 1525 cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */ 1526 cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */ 1527 cpi->bus_id=cam_sim_bus(psim); 1528 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1529 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); 1530 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); 1531 cpi->unit_number=cam_sim_unit(psim); 1532 cpi->transport = XPORT_SPI; 1533 cpi->transport_version = 2; 1534 cpi->protocol = PROTO_SCSI; 1535 cpi->protocol_version = SCSI_REV_2; 1536 cpi->ccb_h.status |= CAM_REQ_CMP; 1537 xpt_done(pccb); 1538 break; 1539 } 1540 case XPT_ABORT: { 1541 union ccb *pabort_ccb; 1542 1543 pabort_ccb=pccb->cab.abort_ccb; 1544 switch (pabort_ccb->ccb_h.func_code) { 1545 case XPT_ACCEPT_TARGET_IO: 1546 case XPT_IMMED_NOTIFY: 1547 case XPT_CONT_TARGET_IO: 1548 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { 1549 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; 1550 xpt_done(pabort_ccb); 1551 pccb->ccb_h.status |= CAM_REQ_CMP; 1552 } else { 1553 xpt_print_path(pabort_ccb->ccb_h.path); 1554 printf("Not found\n"); 1555 pccb->ccb_h.status |= CAM_PATH_INVALID; 1556 } 1557 break; 1558 case XPT_SCSI_IO: 1559 pccb->ccb_h.status |= CAM_UA_ABORT; 1560 break; 1561 default: 1562 pccb->ccb_h.status |= CAM_REQ_INVALID; 1563 break; 1564 } 1565 xpt_done(pccb); 1566 break; 1567 } 1568 case XPT_RESET_BUS: 1569 case XPT_RESET_DEV: { 1570 u_int32_t i; 1571 1572 arcmsr_bus_reset(acb); 1573 for (i=0; i < 500; i++) { 1574 DELAY(1000); 1575 } 1576 pccb->ccb_h.status |= CAM_REQ_CMP; 1577 xpt_done(pccb); 1578 break; 1579 } 1580 case XPT_TERM_IO: { 1581 pccb->ccb_h.status |= CAM_REQ_INVALID; 1582 xpt_done(pccb); 1583 break; 1584 } 1585 case XPT_GET_TRAN_SETTINGS: { 1586 struct ccb_trans_settings *cts; 1587 struct ccb_trans_settings_scsi *scsi; 1588 struct ccb_trans_settings_spi *spi; 1589 1590 if(pccb->ccb_h.target_id == 16) { 1591 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1592 xpt_done(pccb); 1593 break; 1594 } 1595 1596 cts= &pccb->cts; 1597 scsi = &cts->proto_specific.scsi; 1598 spi = &cts->xport_specific.spi; 1599 1600 cts->protocol = PROTO_SCSI; 1601 cts->protocol_version = SCSI_REV_2; 1602 cts->transport = XPORT_SPI; 1603 cts->transport_version = 2; 1604 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 1605 spi->sync_period=3; 1606 spi->sync_offset=32; 1607 spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT; 1608 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1609 spi->valid = CTS_SPI_VALID_SYNC_RATE 1610 | CTS_SPI_VALID_SYNC_OFFSET 1611 | CTS_SPI_VALID_BUS_WIDTH; 1612 scsi->valid = CTS_SCSI_VALID_TQ; 1613 1614 pccb->ccb_h.status |= CAM_REQ_CMP; 1615 xpt_done(pccb); 1616 break; 1617 } 1618 case XPT_SET_TRAN_SETTINGS: { 1619 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1620 xpt_done(pccb); 1621 break; 1622 } 1623 case XPT_CALC_GEOMETRY: { 1624 struct ccb_calc_geometry *ccg; 1625 u_int32_t size_mb; 1626 u_int32_t secs_per_cylinder; 1627 1628 if(pccb->ccb_h.target_id == 16) { 1629 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1630 xpt_done(pccb); 1631 break; 1632 } 1633 ccg= &pccb->ccg; 1634 if (ccg->block_size == 0) { 1635 pccb->ccb_h.status = CAM_REQ_INVALID; 1636 xpt_done(pccb); 1637 break; 1638 } 1639 if(((1024L * 1024L)/ccg->block_size) < 0) { 1640 pccb->ccb_h.status = CAM_REQ_INVALID; 1641 xpt_done(pccb); 1642 break; 1643 } 1644 size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size); 1645 if(size_mb > 1024 ) { 1646 ccg->heads=255; 1647 ccg->secs_per_track=63; 1648 } else { 1649 ccg->heads=64; 1650 ccg->secs_per_track=32; 1651 } 1652 secs_per_cylinder=ccg->heads * ccg->secs_per_track; 1653 ccg->cylinders=ccg->volume_size / secs_per_cylinder; 1654 pccb->ccb_h.status |= CAM_REQ_CMP; 1655 xpt_done(pccb); 1656 break; 1657 } 1658 default: 1659 pccb->ccb_h.status |= CAM_REQ_INVALID; 1660 xpt_done(pccb); 1661 break; 1662 } 1663 return; 1664 } 1665 /* 1666 ********************************************************************** 1667 ********************************************************************** 1668 */ 1669 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 1670 { 1671 acb->acb_flags |= ACB_F_MSG_START_BGRB; 1672 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 1673 if(arcmsr_wait_msgint_ready(acb)) { 1674 printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 1675 } 1676 return; 1677 } 1678 /* 1679 ********************************************************************** 1680 ********************************************************************** 1681 */ 1682 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 1683 { 1684 struct CommandControlBlock *srb; 1685 uint32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; 1686 int id, lun; 1687 1688 polling_srb_retry: 1689 poll_count++; 1690 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable; 1691 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 1692 while(1) { 1693 if((flag_srb=CHIP_REG_READ32(outbound_queueport))==0xFFFFFFFF) { 1694 if(poll_srb_done) { 1695 break;/*chip FIFO no ccb for completion already*/ 1696 } else { 1697 UDELAY(25000); 1698 if(poll_count > 100) { 1699 break; 1700 } 1701 goto polling_srb_retry; 1702 } 1703 } 1704 /* check ifcommand done with no error*/ 1705 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5)); 1706 /*frame must be 32 bytes aligned*/ 1707 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 1708 if((srb->startdone==ARCMSR_SRB_ABORTED) && (srb==poll_srb)) { 1709 printf("arcmsr%d: scsi id=%d lun=%d srb='%p'" 1710 "poll command abort successfully \n" 1711 , acb->pci_unit 1712 , srb->pccb->ccb_h.target_id 1713 , srb->pccb->ccb_h.target_lun, srb); 1714 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 1715 arcmsr_srb_complete(srb, 1); 1716 poll_srb_done=1; 1717 continue; 1718 } 1719 printf("arcmsr%d: polling get an illegal srb command done srb='%p'" 1720 "srboutstandingcount=%d \n" 1721 , acb->pci_unit 1722 , srb, acb->srboutstandingcount); 1723 continue; 1724 } 1725 id=srb->pccb->ccb_h.target_id; 1726 lun=srb->pccb->ccb_h.target_lun; 1727 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) { 1728 if(acb->devstate[id][lun]==ARECA_RAID_GONE) { 1729 acb->devstate[id][lun]=ARECA_RAID_GOOD; 1730 } 1731 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 1732 arcmsr_srb_complete(srb, 1); 1733 } else { 1734 switch(srb->arcmsr_cdb.DeviceStatus) { 1735 case ARCMSR_DEV_SELECT_TIMEOUT: { 1736 acb->devstate[id][lun]=ARECA_RAID_GONE; 1737 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 1738 arcmsr_srb_complete(srb, 1); 1739 } 1740 break; 1741 case ARCMSR_DEV_ABORTED: 1742 case ARCMSR_DEV_INIT_FAIL: { 1743 acb->devstate[id][lun]=ARECA_RAID_GONE; 1744 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 1745 arcmsr_srb_complete(srb, 1); 1746 } 1747 break; 1748 case SCSISTAT_CHECK_CONDITION: { 1749 acb->devstate[id][lun]=ARECA_RAID_GOOD; 1750 arcmsr_report_sense_info(srb); 1751 arcmsr_srb_complete(srb, 1); 1752 } 1753 break; 1754 default: 1755 printf("arcmsr%d: scsi id=%d lun=%d" 1756 "polling and getting command error done" 1757 ", but got unknow DeviceStatus=0x%x \n" 1758 , acb->pci_unit, id, lun, srb->arcmsr_cdb.DeviceStatus); 1759 acb->devstate[id][lun]=ARECA_RAID_GONE; 1760 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 1761 /*unknow error or crc error just for retry*/ 1762 arcmsr_srb_complete(srb, 1); 1763 break; 1764 } 1765 } 1766 } /*drain reply FIFO*/ 1767 return; 1768 } 1769 /* 1770 ********************************************************************** 1771 ** get firmware miscellaneous data 1772 ********************************************************************** 1773 */ 1774 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 1775 { 1776 char *acb_firm_model=acb->firm_model; 1777 char *acb_firm_version=acb->firm_version; 1778 size_t iop_firm_model=offsetof(struct MessageUnit,message_rwbuffer[15]); /*firm_model,15,60-67*/ 1779 size_t iop_firm_version=offsetof(struct MessageUnit,message_rwbuffer[17]); /*firm_version,17,68-83*/ 1780 int i; 1781 1782 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 1783 if(arcmsr_wait_msgint_ready(acb)) { 1784 printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n" 1785 , acb->pci_unit); 1786 } 1787 i=0; 1788 while(i<8) { 1789 *acb_firm_model=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_model+i); 1790 /* 8 bytes firm_model, 15, 60-67*/ 1791 acb_firm_model++; 1792 i++; 1793 } 1794 i=0; 1795 while(i<16) { 1796 *acb_firm_version=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_version+i); 1797 /* 16 bytes firm_version, 17, 68-83*/ 1798 acb_firm_version++; 1799 i++; 1800 } 1801 printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 1802 printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 1803 acb->firm_request_len=CHIP_REG_READ32(message_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 1804 acb->firm_numbers_queue=CHIP_REG_READ32(message_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 1805 acb->firm_sdram_size=CHIP_REG_READ32(message_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 1806 acb->firm_ide_channels=CHIP_REG_READ32(message_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 1807 return; 1808 } 1809 /* 1810 ********************************************************************** 1811 ** start background rebulid 1812 ********************************************************************** 1813 */ 1814 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 1815 { 1816 u_int32_t intmask_org, mask, outbound_doorbell, firmware_state=0; 1817 1818 do { 1819 firmware_state=CHIP_REG_READ32(outbound_msgaddr1); 1820 } while((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)==0); 1821 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; 1822 CHIP_REG_WRITE32(outbound_intmask, intmask_org); 1823 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; 1824 arcmsr_get_firmware_spec(acb); 1825 arcmsr_start_adapter_bgrb(acb); 1826 /* clear Qbuffer if door bell ringed */ 1827 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell); 1828 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */ 1829 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1830 /* enable outbound Post Queue, outbound message0, outbell doorbell Interrupt */ 1831 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 1832 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 1833 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 1834 acb->acb_flags |=ACB_F_IOP_INITED; 1835 return; 1836 } 1837 /* 1838 ********************************************************************** 1839 ********************************************************************** 1840 */ 1841 static void arcmsr_map_freesrb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1842 { 1843 struct AdapterControlBlock *acb=arg; 1844 struct CommandControlBlock *srb_tmp; 1845 u_int8_t * dma_memptr; 1846 u_int32_t i, srb_phyaddr_hi32; 1847 unsigned long srb_phyaddr=(unsigned long)segs->ds_addr; 1848 1849 dma_memptr=acb->uncacheptr; 1850 srb_phyaddr=segs->ds_addr; /* We suppose bus_addr_t high part always 0 here*/ 1851 if(((unsigned long)dma_memptr & 0x1F)!=0) { 1852 dma_memptr=dma_memptr+(0x20-((unsigned long)dma_memptr & 0x1F)); 1853 srb_phyaddr=srb_phyaddr+(0x20-((unsigned long)srb_phyaddr & 0x1F)); 1854 } 1855 srb_tmp=(struct CommandControlBlock *)dma_memptr; 1856 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 1857 /*srb address must 32 (0x20) boundary*/ 1858 if(((unsigned long)srb_tmp & 0x1F)==0) { 1859 if(bus_dmamap_create(acb->dm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) { 1860 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 1861 printf("arcmsr%d: srb dmamap bus_dmamap_create error\n", acb->pci_unit); 1862 return; 1863 } 1864 srb_tmp->cdb_shifted_phyaddr=srb_phyaddr >> 5; 1865 srb_tmp->acb=acb; 1866 acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp; 1867 srb_phyaddr=srb_phyaddr+sizeof(struct CommandControlBlock); 1868 } else { 1869 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 1870 printf("arcmsr%d: dma_memptr=%p i=%d" 1871 "this srb cross 32 bytes boundary ignored srb_tmp=%p \n" 1872 , acb->pci_unit, dma_memptr, i, srb_tmp); 1873 return; 1874 } 1875 srb_tmp++; 1876 } 1877 acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr; 1878 /* 1879 ******************************************************************** 1880 ** here we need to tell iop 331 our freesrb.HighPart 1881 ** if freesrb.HighPart is not zero 1882 ******************************************************************** 1883 */ 1884 srb_phyaddr_hi32=(uint32_t) ((srb_phyaddr>>16)>>16); 1885 if(srb_phyaddr_hi32!=0) { 1886 CHIP_REG_WRITE32(message_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 1887 CHIP_REG_WRITE32(message_rwbuffer[1], srb_phyaddr_hi32); 1888 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 1889 if(arcmsr_wait_msgint_ready(acb)) { 1890 printf("arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 1891 } 1892 } 1893 return; 1894 } 1895 /* 1896 ************************************************************************ 1897 ** 1898 ** 1899 ************************************************************************ 1900 */ 1901 static void arcmsr_free_resource(struct AdapterControlBlock *acb) 1902 { 1903 /* remove the control device */ 1904 if(acb->ioctl_dev != NULL) { 1905 destroy_dev(acb->ioctl_dev); 1906 } 1907 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); 1908 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); 1909 bus_dma_tag_destroy(acb->srb_dmat); 1910 bus_dma_tag_destroy(acb->dm_segs_dmat); 1911 bus_dma_tag_destroy(acb->parent_dmat); 1912 return; 1913 } 1914 /* 1915 ************************************************************************ 1916 ************************************************************************ 1917 */ 1918 static u_int32_t arcmsr_initialize(device_t dev) 1919 { 1920 struct AdapterControlBlock *acb=device_get_softc(dev); 1921 u_int32_t intmask_org, rid=PCIR_BAR(0); 1922 vm_offset_t mem_base; 1923 u_int16_t pci_command; 1924 int i, j; 1925 1926 #if __FreeBSD_version >= 502010 1927 if(bus_dma_tag_create( /*parent*/ NULL, 1928 /*alignemnt*/ 1, 1929 /*boundary*/ 0, 1930 /*lowaddr*/ BUS_SPACE_MAXADDR, 1931 /*highaddr*/ BUS_SPACE_MAXADDR, 1932 /*filter*/ NULL, 1933 /*filterarg*/ NULL, 1934 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 1935 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 1936 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1937 /*flags*/ 0, 1938 /*lockfunc*/ NULL, 1939 /*lockarg*/ NULL, 1940 &acb->parent_dmat) != 0) 1941 #else 1942 if(bus_dma_tag_create( /*parent*/ NULL, 1943 /*alignemnt*/ 1, 1944 /*boundary*/ 0, 1945 /*lowaddr*/ BUS_SPACE_MAXADDR, 1946 /*highaddr*/ BUS_SPACE_MAXADDR, 1947 /*filter*/ NULL, 1948 /*filterarg*/ NULL, 1949 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 1950 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 1951 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1952 /*flags*/ 0, 1953 &acb->parent_dmat) != 0) 1954 #endif 1955 { 1956 printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 1957 return ENOMEM; 1958 } 1959 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ 1960 #if __FreeBSD_version >= 502010 1961 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1962 /*alignment*/ 1, 1963 /*boundary*/ 0, 1964 /*lowaddr*/ BUS_SPACE_MAXADDR, 1965 /*highaddr*/ BUS_SPACE_MAXADDR, 1966 /*filter*/ NULL, 1967 /*filterarg*/ NULL, 1968 /*maxsize*/ MAXBSIZE, 1969 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 1970 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1971 /*flags*/ 0, 1972 /*lockfunc*/ busdma_lock_mutex, 1973 /*lockarg*/ &Giant, 1974 &acb->dm_segs_dmat) != 0) 1975 #else 1976 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1977 /*alignment*/ 1, 1978 /*boundary*/ 0, 1979 /*lowaddr*/ BUS_SPACE_MAXADDR, 1980 /*highaddr*/ BUS_SPACE_MAXADDR, 1981 /*filter*/ NULL, 1982 /*filterarg*/ NULL, 1983 /*maxsize*/ MAXBSIZE, 1984 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 1985 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1986 /*flags*/ 0, 1987 &acb->dm_segs_dmat) != 0) 1988 #endif 1989 { 1990 bus_dma_tag_destroy(acb->parent_dmat); 1991 printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 1992 return ENOMEM; 1993 } 1994 /* DMA tag for our srb structures.... Allocate the freesrb memory */ 1995 #if __FreeBSD_version >= 502010 1996 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1997 /*alignment*/ 1, 1998 /*boundary*/ 0, 1999 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 2000 /*highaddr*/ BUS_SPACE_MAXADDR, 2001 /*filter*/ NULL, 2002 /*filterarg*/ NULL, 2003 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE, 2004 /*nsegments*/ 1, 2005 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 2006 /*flags*/ 0, 2007 /*lockfunc*/ NULL, 2008 /*lockarg*/ NULL, 2009 &acb->srb_dmat) != 0) 2010 #else 2011 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 2012 /*alignment*/ 1, 2013 /*boundary*/ 0, 2014 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 2015 /*highaddr*/ BUS_SPACE_MAXADDR, 2016 /*filter*/ NULL, 2017 /*filterarg*/ NULL, 2018 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE, 2019 /*nsegments*/ 1, 2020 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 2021 /*flags*/ 0, 2022 &acb->srb_dmat) != 0) 2023 #endif 2024 { 2025 bus_dma_tag_destroy(acb->dm_segs_dmat); 2026 bus_dma_tag_destroy(acb->parent_dmat); 2027 printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 2028 return ENXIO; 2029 } 2030 /* Allocation for our srbs */ 2031 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr 2032 , BUS_DMA_WAITOK | BUS_DMA_COHERENT, &acb->srb_dmamap) != 0) { 2033 bus_dma_tag_destroy(acb->srb_dmat); 2034 bus_dma_tag_destroy(acb->dm_segs_dmat); 2035 bus_dma_tag_destroy(acb->parent_dmat); 2036 printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", acb->pci_unit); 2037 return ENXIO; 2038 } 2039 /* And permanently map them */ 2040 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr 2041 , ARCMSR_SRBS_POOL_SIZE, arcmsr_map_freesrb, acb, /*flags*/0)) { 2042 bus_dma_tag_destroy(acb->srb_dmat); 2043 bus_dma_tag_destroy(acb->dm_segs_dmat); 2044 bus_dma_tag_destroy(acb->parent_dmat); 2045 printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", acb->pci_unit); 2046 return ENXIO; 2047 } 2048 pci_command=pci_read_config(dev, PCIR_COMMAND, 2); 2049 pci_command |= PCIM_CMD_BUSMASTEREN; 2050 pci_command |= PCIM_CMD_PERRESPEN; 2051 pci_command |= PCIM_CMD_MWRICEN; 2052 /* Enable Busmaster/Mem */ 2053 pci_command |= PCIM_CMD_MEMEN; 2054 pci_write_config(dev, PCIR_COMMAND, pci_command, 2); 2055 acb->sys_res_arcmsr=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, 0x1000, RF_ACTIVE); 2056 if(acb->sys_res_arcmsr == NULL) { 2057 arcmsr_free_resource(acb); 2058 printf("arcmsr%d: bus_alloc_resource failure!\n", acb->pci_unit); 2059 return ENOMEM; 2060 } 2061 if(rman_get_start(acb->sys_res_arcmsr) <= 0) { 2062 arcmsr_free_resource(acb); 2063 printf("arcmsr%d: rman_get_start failure!\n", acb->pci_unit); 2064 return ENXIO; 2065 } 2066 mem_base=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr); 2067 if(mem_base==0) { 2068 arcmsr_free_resource(acb); 2069 printf("arcmsr%d: rman_get_virtual failure!\n", acb->pci_unit); 2070 return ENXIO; 2071 } 2072 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { 2073 arcmsr_free_resource(acb); 2074 printf("arcmsr%d: map free srb failure!\n", acb->pci_unit); 2075 return ENXIO; 2076 } 2077 acb->btag=rman_get_bustag(acb->sys_res_arcmsr); 2078 acb->bhandle=rman_get_bushandle(acb->sys_res_arcmsr); 2079 acb->pmu=(struct MessageUnit *)mem_base; 2080 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 2081 |ACB_F_MESSAGE_RQBUFFER_CLEARED 2082 |ACB_F_MESSAGE_WQBUFFER_READED); 2083 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 2084 /* 2085 ******************************************************************** 2086 ** init raid volume state 2087 ******************************************************************** 2088 */ 2089 for(i=0;i<ARCMSR_MAX_TARGETID;i++) { 2090 for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) { 2091 acb->devstate[i][j]=ARECA_RAID_GOOD; 2092 } 2093 } 2094 /* disable iop all outbound interrupt */ 2095 intmask_org=CHIP_REG_READ32(outbound_intmask); 2096 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 2097 arcmsr_iop_init(acb); 2098 return(0); 2099 } 2100 /* 2101 ************************************************************************ 2102 ************************************************************************ 2103 */ 2104 static u_int32_t arcmsr_attach(device_t dev) 2105 { 2106 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2107 u_int32_t unit=device_get_unit(dev); 2108 struct ccb_setasync csa; 2109 struct cam_devq *devq; /* Device Queue to use for this SIM */ 2110 struct resource *irqres; 2111 int rid; 2112 2113 if(acb == NULL) { 2114 printf("arcmsr%d: cannot allocate softc\n", unit); 2115 return (ENOMEM); 2116 } 2117 bzero(acb, sizeof(struct AdapterControlBlock)); 2118 if(arcmsr_initialize(dev)) { 2119 printf("arcmsr%d: initialize failure!\n", unit); 2120 return ENXIO; 2121 } 2122 /* After setting up the adapter, map our interrupt */ 2123 rid=0; 2124 irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE); 2125 if(irqres == NULL || 2126 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE 2127 , NULL, arcmsr_interrupt, acb, &acb->ih)) { 2128 arcmsr_free_resource(acb); 2129 printf("arcmsr%d: unable to register interrupt handler!\n", unit); 2130 return ENXIO; 2131 } 2132 acb->irqres=irqres; 2133 acb->pci_dev=dev; 2134 acb->pci_unit=unit; 2135 /* 2136 * Now let the CAM generic SCSI layer find the SCSI devices on 2137 * the bus * start queue to reset to the idle loop. * 2138 * Create device queue of SIM(s) * (MAX_START_JOB - 1) : 2139 * max_sim_transactions 2140 */ 2141 devq=cam_simq_alloc(ARCMSR_MAX_START_JOB); 2142 if(devq == NULL) { 2143 arcmsr_free_resource(acb); 2144 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2145 printf("arcmsr%d: cam_simq_alloc failure!\n", unit); 2146 return ENXIO; 2147 } 2148 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll 2149 , "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); 2150 if(acb->psim == NULL) { 2151 arcmsr_free_resource(acb); 2152 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2153 cam_simq_free(devq); 2154 printf("arcmsr%d: cam_sim_alloc failure!\n", unit); 2155 return ENXIO; 2156 } 2157 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { 2158 arcmsr_free_resource(acb); 2159 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2160 cam_sim_free(acb->psim, /*free_devq*/TRUE); 2161 printf("arcmsr%d: xpt_bus_register failure!\n", unit); 2162 return ENXIO; 2163 } 2164 if(xpt_create_path(&acb->ppath, /* periph */ NULL 2165 , cam_sim_path(acb->psim) 2166 , CAM_TARGET_WILDCARD 2167 , CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2168 arcmsr_free_resource(acb); 2169 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2170 xpt_bus_deregister(cam_sim_path(acb->psim)); 2171 cam_sim_free(acb->psim, /* free_simq */ TRUE); 2172 printf("arcmsr%d: xpt_create_path failure!\n", unit); 2173 return ENXIO; 2174 } 2175 ARCMSR_LOCK_INIT(&acb->workingQ_done_lock, "arcmsr done working Q lock"); 2176 ARCMSR_LOCK_INIT(&acb->workingQ_start_lock, "arcmsr start working Q lock"); 2177 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock"); 2178 /* 2179 **************************************************** 2180 */ 2181 xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); 2182 csa.ccb_h.func_code=XPT_SASYNC_CB; 2183 csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE; 2184 csa.callback=arcmsr_async; 2185 csa.callback_arg=acb->psim; 2186 xpt_action((union ccb *)&csa); 2187 /* Create the control device. */ 2188 acb->ioctl_dev=make_dev(&arcmsr_cdevsw 2189 , unit 2190 , UID_ROOT 2191 , GID_WHEEL /* GID_OPERATOR */ 2192 , S_IRUSR | S_IWUSR 2193 , "arcmsr%d", unit); 2194 #if __FreeBSD_version < 503000 2195 acb->ioctl_dev->si_drv1=acb; 2196 #endif 2197 #if __FreeBSD_version > 500005 2198 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); 2199 #endif 2200 return 0; 2201 } 2202 /* 2203 ************************************************************************ 2204 ************************************************************************ 2205 */ 2206 static u_int32_t arcmsr_probe(device_t dev) 2207 { 2208 u_int32_t id; 2209 static char buf[256]; 2210 char *type; 2211 int raid6 = 1; 2212 2213 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { 2214 return (ENXIO); 2215 } 2216 switch(id=pci_get_devid(dev)) { 2217 case PCIDevVenIDARC1110: 2218 case PCIDevVenIDARC1210: 2219 raid6 = 0; 2220 /*FALLTHRU*/ 2221 case PCIDevVenIDARC1120: 2222 case PCIDevVenIDARC1130: 2223 case PCIDevVenIDARC1160: 2224 case PCIDevVenIDARC1170: 2225 case PCIDevVenIDARC1220: 2226 case PCIDevVenIDARC1230: 2227 case PCIDevVenIDARC1260: 2228 case PCIDevVenIDARC1270: 2229 case PCIDevVenIDARC1280: 2230 type = "SATA"; 2231 break; 2232 case PCIDevVenIDARC1380: 2233 case PCIDevVenIDARC1381: 2234 case PCIDevVenIDARC1680: 2235 case PCIDevVenIDARC1681: 2236 type = "SAS"; 2237 break; 2238 default: 2239 type = "X-TYPE"; 2240 break; 2241 } 2242 sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n", type, raid6 ? "(RAID6 capable)" : ""); 2243 device_set_desc_copy(dev, buf); 2244 return 0; 2245 } 2246 /* 2247 ************************************************************************ 2248 ************************************************************************ 2249 */ 2250 static void arcmsr_shutdown(device_t dev) 2251 { 2252 u_int32_t i, poll_count=0; 2253 u_int32_t intmask_org; 2254 struct CommandControlBlock *srb; 2255 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2256 2257 /* stop adapter background rebuild */ 2258 arcmsr_stop_adapter_bgrb(acb); 2259 arcmsr_flush_adapter_cache(acb); 2260 /* disable all outbound interrupt */ 2261 intmask_org=CHIP_REG_READ32(outbound_intmask); 2262 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE)); 2263 /* abort all outstanding command */ 2264 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 2265 acb->acb_flags &= ~ACB_F_IOP_INITED; 2266 if(acb->srboutstandingcount!=0) { 2267 while((acb->srboutstandingcount!=0) && (poll_count < 256)) { 2268 arcmsr_interrupt((void *)acb); 2269 UDELAY(25000); 2270 poll_count++; 2271 } 2272 if(acb->srboutstandingcount!=0) { 2273 arcmsr_abort_allcmd(acb); 2274 /*clear all outbound posted Q*/ 2275 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) { 2276 CHIP_REG_READ32(outbound_queueport); 2277 } 2278 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 2279 srb=acb->psrb_pool[i]; 2280 if(srb->startdone==ARCMSR_SRB_START) { 2281 srb->startdone=ARCMSR_SRB_ABORTED; 2282 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2283 arcmsr_srb_complete(srb, 1); 2284 } 2285 } 2286 } 2287 } 2288 atomic_set_int(&acb->srboutstandingcount, 0); 2289 acb->workingsrb_doneindex=0; 2290 acb->workingsrb_startindex=0; 2291 return; 2292 } 2293 /* 2294 ************************************************************************ 2295 ************************************************************************ 2296 */ 2297 static u_int32_t arcmsr_detach(device_t dev) 2298 { 2299 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2300 2301 arcmsr_shutdown(dev); 2302 arcmsr_free_resource(acb); 2303 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), acb->sys_res_arcmsr); 2304 bus_teardown_intr(dev, acb->irqres, acb->ih); 2305 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2306 xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); 2307 xpt_free_path(acb->ppath); 2308 xpt_bus_deregister(cam_sim_path(acb->psim)); 2309 cam_sim_free(acb->psim, TRUE); 2310 return (0); 2311 } 2312 2313 2314