1 /* 2 ****************************************************************************************** 3 ** O.S : FreeBSD 4 ** FILE NAME : arcmsr.c 5 ** BY : Erich Chen 6 ** Description: SCSI RAID Device Driver for 7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX) SATA/SAS RAID HOST Adapter 8 ** ARCMSR RAID Host adapter 9 ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set] 10 ****************************************************************************************** 11 ************************************************************************ 12 ** 13 ** Copyright (c) 2004-2006 ARECA Co. Ltd. 14 ** Erich Chen, Taipei Taiwan All rights reserved. 15 ** 16 ** Redistribution and use in source and binary forms, with or without 17 ** modification, are permitted provided that the following conditions 18 ** are met: 19 ** 1. Redistributions of source code must retain the above copyright 20 ** notice, this list of conditions and the following disclaimer. 21 ** 2. Redistributions in binary form must reproduce the above copyright 22 ** notice, this list of conditions and the following disclaimer in the 23 ** documentation and/or other materials provided with the distribution. 24 ** 3. The name of the author may not be used to endorse or promote products 25 ** derived from this software without specific prior written permission. 26 ** 27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT 32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 ************************************************************************** 38 ** History 39 ** 40 ** REV# DATE NAME DESCRIPTION 41 ** 1.00.00.00 3/31/2004 Erich Chen First release 42 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error 43 ** 1.20.00.03 4/19/2005 Erich Chen add SATA 24 Ports adapter type support 44 ** clean unused function 45 ** 1.20.00.12 9/12/2005 Erich Chen bug fix with abort command handling, 46 ** firmware version check 47 ** and firmware update notify for hardware bug fix 48 ** handling if none zero high part physical address 49 ** of srb resource 50 ** 1.20.00.13 8/18/2006 Erich Chen remove pending srb and report busy 51 ** add iop message xfer 52 ** with scsi pass-through command 53 ** add new device id of sas raid adapters 54 ** code fit for SPARC64 & PPC 55 ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report 56 ** and cause g_vfs_done() read write error 57 58 ****************************************************************************************** 59 * $FreeBSD$ 60 */ 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/malloc.h> 64 #include <sys/kernel.h> 65 #include <sys/bus.h> 66 #include <sys/queue.h> 67 #include <sys/stat.h> 68 #include <sys/devicestat.h> 69 #include <sys/kthread.h> 70 #include <sys/module.h> 71 #include <sys/proc.h> 72 #include <sys/lock.h> 73 #include <sys/sysctl.h> 74 #include <sys/poll.h> 75 #include <sys/ioccom.h> 76 #include <vm/vm.h> 77 #include <vm/vm_param.h> 78 #include <vm/pmap.h> 79 80 #include <isa/rtc.h> 81 82 #include <machine/bus.h> 83 #include <machine/resource.h> 84 #include <machine/atomic.h> 85 #include <sys/conf.h> 86 #include <sys/rman.h> 87 88 #include <cam/cam.h> 89 #include <cam/cam_ccb.h> 90 #include <cam/cam_sim.h> 91 #include <cam/cam_xpt_sim.h> 92 #include <cam/cam_debug.h> 93 #include <cam/scsi/scsi_all.h> 94 #include <cam/scsi/scsi_message.h> 95 /* 96 ************************************************************************** 97 ************************************************************************** 98 */ 99 #if __FreeBSD_version >= 500005 100 #include <sys/selinfo.h> 101 #include <sys/mutex.h> 102 #include <sys/endian.h> 103 #include <dev/pci/pcivar.h> 104 #include <dev/pci/pcireg.h> 105 #define ARCMSR_LOCK_INIT(l, s) mtx_init(l, s, NULL, MTX_DEF|MTX_RECURSE) 106 #define ARCMSR_LOCK_ACQUIRE(l) mtx_lock(l) 107 #define ARCMSR_LOCK_RELEASE(l) mtx_unlock(l) 108 #define ARCMSR_LOCK_TRY(l) mtx_trylock(l) 109 #define arcmsr_htole32(x) htole32(x) 110 typedef struct mtx arcmsr_lock_t; 111 #else 112 #include <sys/select.h> 113 #include <pci/pcivar.h> 114 #include <pci/pcireg.h> 115 #define ARCMSR_LOCK_INIT(l, s) simple_lock_init(l) 116 #define ARCMSR_LOCK_ACQUIRE(l) simple_lock(l) 117 #define ARCMSR_LOCK_RELEASE(l) simple_unlock(l) 118 #define ARCMSR_LOCK_TRY(l) simple_lock_try(l) 119 #define arcmsr_htole32(x) (x) 120 typedef struct simplelock arcmsr_lock_t; 121 #endif 122 #include <dev/arcmsr/arcmsr.h> 123 #define ARCMSR_SRBS_POOL_SIZE ((sizeof(struct CommandControlBlock) * ARCMSR_MAX_FREESRB_NUM)+0x20) 124 /* 125 ************************************************************************** 126 ************************************************************************** 127 */ 128 #define CHIP_REG_READ32(r) bus_space_read_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r)) 129 #define CHIP_REG_WRITE32(r,d) bus_space_write_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r), d) 130 /* 131 ************************************************************************** 132 ************************************************************************** 133 */ 134 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb); 135 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb); 136 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb); 137 static u_int32_t arcmsr_probe(device_t dev); 138 static u_int32_t arcmsr_attach(device_t dev); 139 static u_int32_t arcmsr_detach(device_t dev); 140 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); 141 static void arcmsr_iop_parking(struct AdapterControlBlock *acb); 142 static void arcmsr_shutdown(device_t dev); 143 static void arcmsr_interrupt(void *arg); 144 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); 145 static void arcmsr_free_resource(struct AdapterControlBlock *acb); 146 static void arcmsr_bus_reset(struct AdapterControlBlock *acb); 147 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 148 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 149 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 150 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); 151 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb); 152 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); 153 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); 154 static void arcmsr_iop_reset(struct AdapterControlBlock *acb); 155 static void arcmsr_report_sense_info(struct CommandControlBlock *srb); 156 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg); 157 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb); 158 static int arcmsr_resume(device_t dev); 159 static int arcmsr_suspend(device_t dev); 160 /* 161 ************************************************************************** 162 ************************************************************************** 163 */ 164 static void UDELAY(u_int32_t us) { DELAY(us); } 165 /* 166 ************************************************************************** 167 ************************************************************************** 168 */ 169 static bus_dmamap_callback_t arcmsr_map_freesrb; 170 static bus_dmamap_callback_t arcmsr_executesrb; 171 /* 172 ************************************************************************** 173 ************************************************************************** 174 */ 175 static d_open_t arcmsr_open; 176 static d_close_t arcmsr_close; 177 static d_ioctl_t arcmsr_ioctl; 178 179 static device_method_t arcmsr_methods[]={ 180 DEVMETHOD(device_probe, arcmsr_probe), 181 DEVMETHOD(device_attach, arcmsr_attach), 182 DEVMETHOD(device_detach, arcmsr_detach), 183 DEVMETHOD(device_shutdown, arcmsr_shutdown), 184 DEVMETHOD(device_suspend, arcmsr_suspend), 185 DEVMETHOD(device_resume, arcmsr_resume), 186 187 DEVMETHOD(bus_print_child, bus_generic_print_child), 188 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 189 { 0, 0 } 190 }; 191 192 static driver_t arcmsr_driver={ 193 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) 194 }; 195 196 static devclass_t arcmsr_devclass; 197 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); 198 #ifndef BUS_DMA_COHERENT 199 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ 200 #endif 201 #if __FreeBSD_version >= 501000 202 #ifndef D_NEEDGIANT 203 #define D_NEEDGIANT 0x00400000 /* driver want Giant */ 204 #endif 205 #ifndef D_VERSION 206 #define D_VERSION 0x20011966 207 #endif 208 static struct cdevsw arcmsr_cdevsw={ 209 #if __FreeBSD_version > 502010 210 .d_version = D_VERSION, 211 #endif 212 .d_flags = D_NEEDGIANT, 213 .d_open = arcmsr_open, /* open */ 214 .d_close = arcmsr_close, /* close */ 215 .d_ioctl = arcmsr_ioctl, /* ioctl */ 216 .d_name = "arcmsr", /* name */ 217 }; 218 #else 219 #define ARCMSR_CDEV_MAJOR 180 220 221 static struct cdevsw arcmsr_cdevsw = { 222 arcmsr_open, /* open */ 223 arcmsr_close, /* close */ 224 noread, /* read */ 225 nowrite, /* write */ 226 arcmsr_ioctl, /* ioctl */ 227 nopoll, /* poll */ 228 nommap, /* mmap */ 229 nostrategy, /* strategy */ 230 "arcmsr", /* name */ 231 ARCMSR_CDEV_MAJOR, /* major */ 232 nodump, /* dump */ 233 nopsize, /* psize */ 234 0 /* flags */ 235 }; 236 #endif 237 238 #if __FreeBSD_version < 500005 239 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) 240 #else 241 #if __FreeBSD_version < 503000 242 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) 243 #else 244 static int arcmsr_open(struct cdev *dev, int flags, int fmt, d_thread_t *proc) 245 #endif 246 #endif 247 { 248 #if __FreeBSD_version < 503000 249 struct AdapterControlBlock *acb=dev->si_drv1; 250 #else 251 int unit = minor(dev); 252 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 253 #endif 254 if(acb==NULL) { 255 return ENXIO; 256 } 257 return 0; 258 } 259 /* 260 ************************************************************************** 261 ************************************************************************** 262 */ 263 #if __FreeBSD_version < 500005 264 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) 265 #else 266 #if __FreeBSD_version < 503000 267 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) 268 #else 269 static int arcmsr_close(struct cdev *dev, int flags, int fmt, d_thread_t *proc) 270 #endif 271 #endif 272 { 273 #if __FreeBSD_version < 503000 274 struct AdapterControlBlock *acb=dev->si_drv1; 275 #else 276 int unit = minor(dev); 277 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 278 #endif 279 if(acb==NULL) { 280 return ENXIO; 281 } 282 return 0; 283 } 284 /* 285 ************************************************************************** 286 ************************************************************************** 287 */ 288 #if __FreeBSD_version < 500005 289 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) 290 #else 291 #if __FreeBSD_version < 503000 292 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) 293 #else 294 static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, d_thread_t *proc) 295 #endif 296 #endif 297 { 298 #if __FreeBSD_version < 503000 299 struct AdapterControlBlock *acb=dev->si_drv1; 300 #else 301 int unit = minor(dev); 302 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 303 #endif 304 305 if(acb==NULL) { 306 return ENXIO; 307 } 308 return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); 309 } 310 /* 311 ******************************************************************************* 312 ******************************************************************************* 313 */ 314 static int arcmsr_suspend(device_t dev) 315 { 316 struct AdapterControlBlock *acb = device_get_softc(dev); 317 u_int32_t intmask_org; 318 319 /* disable all outbound interrupt */ 320 intmask_org=CHIP_REG_READ32(outbound_intmask); 321 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE)); 322 /* flush controller */ 323 arcmsr_iop_parking(acb); 324 return(0); 325 } 326 /* 327 ******************************************************************************* 328 ******************************************************************************* 329 */ 330 static int arcmsr_resume(device_t dev) 331 { 332 struct AdapterControlBlock *acb = device_get_softc(dev); 333 334 arcmsr_iop_init(acb); 335 return(0); 336 } 337 /* 338 ********************************************************************************* 339 ********************************************************************************* 340 */ 341 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) 342 { 343 struct AdapterControlBlock *acb; 344 u_int8_t target_id, target_lun; 345 struct cam_sim * sim; 346 347 sim=(struct cam_sim *) cb_arg; 348 acb =(struct AdapterControlBlock *) cam_sim_softc(sim); 349 switch (code) { 350 case AC_LOST_DEVICE: 351 target_id=xpt_path_target_id(path); 352 target_lun=xpt_path_lun_id(path); 353 if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { 354 break; 355 } 356 printf("%s:scsi id%d lun%d device lost \n" 357 , device_get_name(acb->pci_dev), target_id, target_lun); 358 break; 359 default: 360 break; 361 } 362 } 363 /* 364 ************************************************************************ 365 ************************************************************************ 366 */ 367 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 368 { 369 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 370 if(arcmsr_wait_msgint_ready(acb)) { 371 printf("arcmsr%d: wait 'flush adapter cache' timeout \n" 372 , acb->pci_unit); 373 } 374 return; 375 } 376 /* 377 ********************************************************************** 378 ********************************************************************** 379 */ 380 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb) 381 { 382 u_int32_t Index; 383 u_int8_t Retries=0x00; 384 385 do { 386 for(Index=0; Index < 100; Index++) { 387 if(CHIP_REG_READ32(outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 388 /*clear interrupt*/ 389 CHIP_REG_WRITE32(outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); 390 return 0x00; 391 } 392 /* one us delay */ 393 UDELAY(10000); 394 }/*max 1 seconds*/ 395 }while(Retries++ < 20);/*max 20 sec*/ 396 return 0xff; 397 } 398 /* 399 ********************************************************************** 400 ********************************************************************** 401 */ 402 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) 403 { 404 struct AdapterControlBlock *acb=srb->acb; 405 union ccb * pccb=srb->pccb; 406 407 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 408 bus_dmasync_op_t op; 409 410 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 411 op = BUS_DMASYNC_POSTREAD; 412 } else { 413 op = BUS_DMASYNC_POSTWRITE; 414 } 415 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 416 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 417 } 418 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_done_lock); 419 if(stand_flag==1) { 420 atomic_subtract_int(&acb->srboutstandingcount, 1); 421 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( 422 acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) { 423 acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; 424 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 425 } 426 } 427 srb->startdone=ARCMSR_SRB_DONE; 428 srb->srb_flags=0; 429 acb->srbworkingQ[acb->workingsrb_doneindex]=srb; 430 acb->workingsrb_doneindex++; 431 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; 432 ARCMSR_LOCK_RELEASE(&acb->workingQ_done_lock); 433 xpt_done(pccb); 434 return; 435 } 436 /* 437 ********************************************************************** 438 ********************************************************************** 439 */ 440 static void arcmsr_report_sense_info(struct CommandControlBlock *srb) 441 { 442 union ccb * pccb=srb->pccb; 443 444 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 445 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 446 if(&pccb->csio.sense_data) { 447 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); 448 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, 449 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); 450 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ 451 pccb->ccb_h.status |= CAM_AUTOSNS_VALID; 452 } 453 return; 454 } 455 /* 456 ********************************************************************* 457 ** 458 ********************************************************************* 459 */ 460 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 461 { 462 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 463 if(arcmsr_wait_msgint_ready(acb)) { 464 printf("arcmsr%d: wait 'abort all outstanding command' timeout \n" 465 , acb->pci_unit); 466 } 467 return; 468 } 469 /* 470 **************************************************************************** 471 **************************************************************************** 472 */ 473 static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 474 { 475 struct CommandControlBlock *srb; 476 u_int32_t intmask_org, mask; 477 u_int32_t i=0; 478 479 if(acb->srboutstandingcount!=0) 480 { 481 /* talk to iop 331 outstanding command aborted*/ 482 arcmsr_abort_allcmd(acb); 483 UDELAY(3000*1000);/*wait for 3 sec for all command aborted*/ 484 /* disable all outbound interrupt */ 485 intmask_org=CHIP_REG_READ32(outbound_intmask); 486 CHIP_REG_WRITE32(outbound_intmask 487 , intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 488 /*clear all outbound posted Q*/ 489 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) { 490 CHIP_REG_READ32(outbound_queueport); 491 } 492 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 493 srb=acb->psrb_pool[i]; 494 if(srb->startdone==ARCMSR_SRB_START) { 495 srb->startdone=ARCMSR_SRB_ABORTED; 496 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 497 arcmsr_srb_complete(srb, 1); 498 } 499 } 500 /* enable all outbound interrupt */ 501 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 502 |ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 503 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 504 /* post abort all outstanding command message to RAID controller */ 505 } 506 atomic_set_int(&acb->srboutstandingcount, 0); 507 acb->workingsrb_doneindex=0; 508 acb->workingsrb_startindex=0; 509 return; 510 } 511 /* 512 ********************************************************************** 513 ********************************************************************** 514 */ 515 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) 516 { 517 struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb; 518 u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u; 519 u_int32_t address_lo, address_hi; 520 union ccb * pccb=srb->pccb; 521 struct ccb_scsiio * pcsio= &pccb->csio; 522 u_int32_t arccdbsize=0x30; 523 524 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 525 arcmsr_cdb->Bus=0; 526 arcmsr_cdb->TargetID=pccb->ccb_h.target_id; 527 arcmsr_cdb->LUN=pccb->ccb_h.target_lun; 528 arcmsr_cdb->Function=1; 529 arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len; 530 arcmsr_cdb->Context=(unsigned long)arcmsr_cdb; 531 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); 532 if(nseg != 0) { 533 struct AdapterControlBlock *acb=srb->acb; 534 bus_dmasync_op_t op; 535 u_int32_t length, i, cdb_sgcount=0; 536 537 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 538 op=BUS_DMASYNC_PREREAD; 539 } else { 540 op=BUS_DMASYNC_PREWRITE; 541 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE; 542 srb->srb_flags|=SRB_FLAG_WRITE; 543 } 544 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 545 for(i=0;i<nseg;i++) { 546 /* Get the physical address of the current data pointer */ 547 length=arcmsr_htole32(dm_segs[i].ds_len); 548 address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); 549 address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); 550 if(address_hi==0) { 551 struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge; 552 pdma_sg->address=address_lo; 553 pdma_sg->length=length; 554 psge += sizeof(struct SG32ENTRY); 555 arccdbsize += sizeof(struct SG32ENTRY); 556 } else { 557 u_int32_t sg64s_size=0, tmplength=length; 558 559 while(1) { 560 u_int64_t span4G, length0; 561 struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge; 562 563 span4G=(u_int64_t)address_lo + tmplength; 564 pdma_sg->addresshigh=address_hi; 565 pdma_sg->address=address_lo; 566 if(span4G > 0x100000000) { 567 /*see if cross 4G boundary*/ 568 length0=0x100000000-address_lo; 569 pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR; 570 address_hi=address_hi+1; 571 address_lo=0; 572 tmplength=tmplength-(u_int32_t)length0; 573 sg64s_size += sizeof(struct SG64ENTRY); 574 psge += sizeof(struct SG64ENTRY); 575 cdb_sgcount++; 576 } else { 577 pdma_sg->length=tmplength|IS_SG64_ADDR; 578 sg64s_size += sizeof(struct SG64ENTRY); 579 psge += sizeof(struct SG64ENTRY); 580 break; 581 } 582 } 583 arccdbsize += sg64s_size; 584 } 585 cdb_sgcount++; 586 } 587 arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount; 588 arcmsr_cdb->DataLength=pcsio->dxfer_len; 589 if( arccdbsize > 256) { 590 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE; 591 } 592 } 593 return; 594 } 595 /* 596 ************************************************************************** 597 ************************************************************************** 598 */ 599 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) 600 { 601 u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr; 602 struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb; 603 604 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 605 (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); 606 atomic_add_int(&acb->srboutstandingcount, 1); 607 srb->startdone=ARCMSR_SRB_START; 608 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 609 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); 610 } else { 611 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr); 612 } 613 return; 614 } 615 /* 616 ********************************************************************** 617 ********************************************************************** 618 */ 619 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb) 620 { 621 u_int8_t * pQbuffer; 622 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer; 623 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data; 624 u_int32_t allxfer_len=0; 625 626 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 627 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 628 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { 629 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex]; 630 memcpy(iop_data, pQbuffer, 1); 631 acb->wqbuf_firstindex++; 632 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 633 /*if last index number set it to 0 */ 634 iop_data++; 635 allxfer_len++; 636 } 637 pwbuffer->data_len=allxfer_len; 638 /* 639 ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post 640 */ 641 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 642 } 643 return; 644 } 645 /* 646 ************************************************************************ 647 ************************************************************************ 648 */ 649 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 650 { 651 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 652 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 653 if(arcmsr_wait_msgint_ready(acb)) { 654 printf("arcmsr%d: wait 'stop adapter rebulid' timeout \n" 655 , acb->pci_unit); 656 } 657 return; 658 } 659 /* 660 ************************************************************************ 661 ************************************************************************ 662 */ 663 static void arcmsr_poll(struct cam_sim * psim) 664 { 665 arcmsr_interrupt(cam_sim_softc(psim)); 666 return; 667 } 668 /* 669 ********************************************************************** 670 ********************************************************************** 671 */ 672 static void arcmsr_interrupt(void *arg) 673 { 674 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg; 675 struct CommandControlBlock *srb; 676 u_int32_t flag_srb, outbound_intstatus, outbound_doorbell; 677 678 /* 679 ********************************************* 680 ** check outbound intstatus 681 ********************************************* 682 */ 683 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable; 684 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 685 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { 686 /* 687 ********************************************* 688 ** DOORBELL 689 ********************************************* 690 */ 691 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell); 692 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */ 693 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 694 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer; 695 u_int8_t * iop_data=(u_int8_t *)prbuffer->data; 696 u_int8_t * pQbuffer; 697 u_int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; 698 699 /*check this iop data if overflow my rqbuffer*/ 700 rqbuf_lastindex=acb->rqbuf_lastindex; 701 rqbuf_firstindex=acb->rqbuf_firstindex; 702 iop_len=prbuffer->data_len; 703 my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 704 if(my_empty_len>=iop_len) { 705 while(iop_len > 0) { 706 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 707 memcpy(pQbuffer, iop_data, 1); 708 acb->rqbuf_lastindex++; 709 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 710 /*if last index number set it to 0 */ 711 iop_data++; 712 iop_len--; 713 } 714 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 715 /*signature, let IOP331 know data has been readed */ 716 } else { 717 acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW; 718 } 719 } 720 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 721 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; 722 /* 723 ********************************************* 724 ********************************************* 725 */ 726 if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) { 727 u_int8_t * pQbuffer; 728 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer; 729 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data; 730 u_int32_t allxfer_len=0; 731 732 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 733 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { 734 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex]; 735 memcpy(iop_data, pQbuffer, 1); 736 acb->wqbuf_firstindex++; 737 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 738 /*if last index number set it to 0 */ 739 iop_data++; 740 allxfer_len++; 741 } 742 pwbuffer->data_len=allxfer_len; 743 /* 744 ** push inbound doorbell tell iop driver data write ok 745 ** and wait reply on next hwinterrupt for next Qbuffer post 746 */ 747 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 748 } 749 if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) { 750 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 751 } 752 } 753 } 754 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 755 int target, lun; 756 /* 757 ***************************************************************************** 758 ** areca cdb command done 759 ***************************************************************************** 760 */ 761 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 762 while(1) { 763 if((flag_srb=CHIP_REG_READ32(outbound_queueport)) == 0xFFFFFFFF) { 764 break;/*chip FIFO no srb for completion already*/ 765 } 766 /* check if command done with no error*/ 767 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5)); 768 /*frame must be 32 bytes aligned*/ 769 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 770 if(srb->startdone==ARCMSR_SRB_ABORTED) { 771 printf("arcmsr%d: srb='%p' isr got aborted command \n" 772 , acb->pci_unit, srb); 773 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 774 arcmsr_srb_complete(srb, 1); 775 continue; 776 } 777 printf("arcmsr%d: isr get an illegal srb command done" 778 "acb='%p' srb='%p' srbacb='%p' startdone=0x%x" 779 "srboutstandingcount=%d \n", 780 acb->pci_unit, acb, srb, srb->acb, 781 srb->startdone, acb->srboutstandingcount); 782 continue; 783 } 784 target=srb->pccb->ccb_h.target_id; 785 lun=srb->pccb->ccb_h.target_lun; 786 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) { 787 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 788 acb->devstate[target][lun]=ARECA_RAID_GOOD; 789 } 790 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 791 arcmsr_srb_complete(srb, 1); 792 } else { 793 switch(srb->arcmsr_cdb.DeviceStatus) { 794 case ARCMSR_DEV_SELECT_TIMEOUT: { 795 acb->devstate[target][lun]=ARECA_RAID_GONE; 796 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 797 arcmsr_srb_complete(srb, 1); 798 } 799 break; 800 case ARCMSR_DEV_ABORTED: 801 case ARCMSR_DEV_INIT_FAIL: { 802 acb->devstate[target][lun]=ARECA_RAID_GONE; 803 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 804 arcmsr_srb_complete(srb, 1); 805 } 806 break; 807 case SCSISTAT_CHECK_CONDITION: { 808 acb->devstate[target][lun]=ARECA_RAID_GOOD; 809 arcmsr_report_sense_info(srb); 810 arcmsr_srb_complete(srb, 1); 811 } 812 break; 813 default: 814 printf("arcmsr%d: scsi id=%d lun=%d" 815 "isr get command error done," 816 "but got unknow DeviceStatus=0x%x \n" 817 , acb->pci_unit, target, lun 818 ,srb->arcmsr_cdb.DeviceStatus); 819 acb->devstate[target][lun]=ARECA_RAID_GONE; 820 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 821 /*unknow error or crc error just for retry*/ 822 arcmsr_srb_complete(srb, 1); 823 break; 824 } 825 } 826 } /*drain reply FIFO*/ 827 } 828 return; 829 } 830 /* 831 ******************************************************************************* 832 ** 833 ******************************************************************************* 834 */ 835 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 836 { 837 if(acb!=NULL) { 838 /* stop adapter background rebuild */ 839 if(acb->acb_flags & ACB_F_MSG_START_BGRB) { 840 arcmsr_stop_adapter_bgrb(acb); 841 arcmsr_flush_adapter_cache(acb); 842 } 843 } 844 } 845 /* 846 *********************************************************************** 847 ** 848 ************************************************************************ 849 */ 850 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) 851 { 852 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 853 u_int32_t retvalue=EINVAL; 854 855 pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg; 856 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { 857 return retvalue; 858 } 859 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 860 switch(ioctl_cmd) { 861 case ARCMSR_MESSAGE_READ_RQBUFFER: { 862 u_int8_t * pQbuffer; 863 u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 864 u_int32_t allxfer_len=0; 865 866 while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex) && (allxfer_len<1031)) { 867 /*copy READ QBUFFER to srb*/ 868 pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex]; 869 memcpy(ptmpQbuffer, pQbuffer, 1); 870 acb->rqbuf_firstindex++; 871 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 872 /*if last index number set it to 0 */ 873 ptmpQbuffer++; 874 allxfer_len++; 875 } 876 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 877 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer; 878 u_int8_t * iop_data=(u_int8_t *)prbuffer->data; 879 u_int32_t iop_len; 880 881 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 882 iop_len=(u_int32_t)prbuffer->data_len; 883 /*this iop data does no chance to make me overflow again here, so just do it*/ 884 while(iop_len>0) { 885 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 886 memcpy(pQbuffer, iop_data, 1); 887 acb->rqbuf_lastindex++; 888 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 889 /*if last index number set it to 0 */ 890 iop_data++; 891 iop_len--; 892 } 893 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 894 /*signature, let IOP331 know data has been readed */ 895 } 896 pcmdmessagefld->cmdmessage.Length=allxfer_len; 897 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 898 retvalue=ARCMSR_MESSAGE_SUCCESS; 899 } 900 break; 901 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 902 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 903 u_int8_t * pQbuffer; 904 u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 905 906 user_len=pcmdmessagefld->cmdmessage.Length; 907 /*check if data xfer length of this request will overflow my array qbuffer */ 908 wqbuf_lastindex=acb->wqbuf_lastindex; 909 wqbuf_firstindex=acb->wqbuf_firstindex; 910 if(wqbuf_lastindex!=wqbuf_firstindex) { 911 arcmsr_post_Qbuffer(acb); 912 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 913 } else { 914 my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 915 if(my_empty_len>=user_len) { 916 while(user_len>0) { 917 /*copy srb data to wqbuffer*/ 918 pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex]; 919 memcpy(pQbuffer, ptmpuserbuffer, 1); 920 acb->wqbuf_lastindex++; 921 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 922 /*if last index number set it to 0 */ 923 ptmpuserbuffer++; 924 user_len--; 925 } 926 /*post fist Qbuffer*/ 927 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 928 acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED; 929 arcmsr_post_Qbuffer(acb); 930 } 931 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 932 } else { 933 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 934 } 935 } 936 retvalue=ARCMSR_MESSAGE_SUCCESS; 937 } 938 break; 939 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 940 u_int8_t * pQbuffer=acb->rqbuffer; 941 942 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 943 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 944 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 945 /*signature, let IOP331 know data has been readed */ 946 } 947 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 948 acb->rqbuf_firstindex=0; 949 acb->rqbuf_lastindex=0; 950 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 951 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 952 retvalue=ARCMSR_MESSAGE_SUCCESS; 953 } 954 break; 955 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: 956 { 957 u_int8_t * pQbuffer=acb->wqbuffer; 958 959 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 960 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 961 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 962 /*signature, let IOP331 know data has been readed */ 963 } 964 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READED); 965 acb->wqbuf_firstindex=0; 966 acb->wqbuf_lastindex=0; 967 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 968 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 969 retvalue=ARCMSR_MESSAGE_SUCCESS; 970 } 971 break; 972 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 973 u_int8_t * pQbuffer; 974 975 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 976 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 977 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 978 /*signature, let IOP331 know data has been readed */ 979 } 980 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 981 |ACB_F_MESSAGE_RQBUFFER_CLEARED 982 |ACB_F_MESSAGE_WQBUFFER_READED); 983 acb->rqbuf_firstindex=0; 984 acb->rqbuf_lastindex=0; 985 acb->wqbuf_firstindex=0; 986 acb->wqbuf_lastindex=0; 987 pQbuffer=acb->rqbuffer; 988 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 989 pQbuffer=acb->wqbuffer; 990 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 991 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 992 retvalue=ARCMSR_MESSAGE_SUCCESS; 993 } 994 break; 995 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 996 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F; 997 retvalue=ARCMSR_MESSAGE_SUCCESS; 998 } 999 break; 1000 case ARCMSR_MESSAGE_SAY_HELLO: { 1001 u_int8_t * hello_string="Hello! I am ARCMSR"; 1002 u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer; 1003 1004 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { 1005 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 1006 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1007 return ENOIOCTL; 1008 } 1009 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1010 retvalue=ARCMSR_MESSAGE_SUCCESS; 1011 } 1012 break; 1013 case ARCMSR_MESSAGE_SAY_GOODBYE: { 1014 arcmsr_iop_parking(acb); 1015 retvalue=ARCMSR_MESSAGE_SUCCESS; 1016 } 1017 break; 1018 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 1019 arcmsr_flush_adapter_cache(acb); 1020 retvalue=ARCMSR_MESSAGE_SUCCESS; 1021 } 1022 break; 1023 } 1024 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1025 return retvalue; 1026 } 1027 /* 1028 ************************************************************************** 1029 ************************************************************************** 1030 */ 1031 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb) 1032 { 1033 struct CommandControlBlock *srb=NULL; 1034 u_int32_t workingsrb_startindex, workingsrb_doneindex; 1035 1036 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_start_lock); 1037 workingsrb_doneindex=acb->workingsrb_doneindex; 1038 workingsrb_startindex=acb->workingsrb_startindex; 1039 srb=acb->srbworkingQ[workingsrb_startindex]; 1040 workingsrb_startindex++; 1041 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; 1042 if(workingsrb_doneindex!=workingsrb_startindex) { 1043 acb->workingsrb_startindex=workingsrb_startindex; 1044 } else { 1045 srb=NULL; 1046 } 1047 ARCMSR_LOCK_RELEASE(&acb->workingQ_start_lock); 1048 return(srb); 1049 } 1050 /* 1051 ************************************************************************** 1052 ************************************************************************** 1053 */ 1054 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb) 1055 { 1056 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 1057 int retvalue = 0, transfer_len = 0; 1058 char *buffer; 1059 uint32_t controlcode = (uint32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | 1060 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | 1061 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | 1062 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[8]; 1063 /* 4 bytes: Areca io control code */ 1064 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1065 buffer = pccb->csio.data_ptr; 1066 transfer_len = pccb->csio.dxfer_len; 1067 } else { 1068 retvalue = ARCMSR_MESSAGE_FAIL; 1069 goto message_out; 1070 } 1071 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 1072 retvalue = ARCMSR_MESSAGE_FAIL; 1073 goto message_out; 1074 } 1075 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; 1076 switch(controlcode) { 1077 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1078 u_int8_t *pQbuffer; 1079 u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 1080 int32_t allxfer_len = 0; 1081 1082 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1083 && (allxfer_len < 1031)) { 1084 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1085 memcpy(ptmpQbuffer, pQbuffer, 1); 1086 acb->rqbuf_firstindex++; 1087 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1088 ptmpQbuffer++; 1089 allxfer_len++; 1090 } 1091 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1092 struct QBUFFER *prbuffer = (struct QBUFFER *) &acb->pmu->message_rbuffer; 1093 u_int8_t *iop_data = (u_int8_t *)prbuffer->data; 1094 int32_t iop_len; 1095 1096 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1097 iop_len =(u_int32_t)prbuffer->data_len; 1098 while (iop_len > 0) { 1099 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 1100 memcpy(pQbuffer, iop_data, 1); 1101 acb->rqbuf_lastindex++; 1102 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1103 iop_data++; 1104 iop_len--; 1105 } 1106 CHIP_REG_WRITE32(inbound_doorbell, 1107 ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1108 } 1109 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1110 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1111 retvalue=ARCMSR_MESSAGE_SUCCESS; 1112 } 1113 break; 1114 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1115 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1116 u_int8_t *pQbuffer; 1117 u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 1118 1119 user_len = pcmdmessagefld->cmdmessage.Length; 1120 wqbuf_lastindex = acb->wqbuf_lastindex; 1121 wqbuf_firstindex = acb->wqbuf_firstindex; 1122 if (wqbuf_lastindex != wqbuf_firstindex) { 1123 arcmsr_post_Qbuffer(acb); 1124 /* has error report sensedata */ 1125 if(&pccb->csio.sense_data) { 1126 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 1127 /* Valid,ErrorCode */ 1128 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 1129 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 1130 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 1131 /* AdditionalSenseLength */ 1132 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 1133 /* AdditionalSenseCode */ 1134 } 1135 retvalue = ARCMSR_MESSAGE_FAIL; 1136 } else { 1137 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 1138 &(ARCMSR_MAX_QBUFFER - 1); 1139 if (my_empty_len >= user_len) { 1140 while (user_len > 0) { 1141 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; 1142 memcpy(pQbuffer, ptmpuserbuffer, 1); 1143 acb->wqbuf_lastindex++; 1144 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1145 ptmpuserbuffer++; 1146 user_len--; 1147 } 1148 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 1149 acb->acb_flags &= 1150 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 1151 arcmsr_post_Qbuffer(acb); 1152 } 1153 } else { 1154 /* has error report sensedata */ 1155 if(&pccb->csio.sense_data) { 1156 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 1157 /* Valid,ErrorCode */ 1158 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 1159 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 1160 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 1161 /* AdditionalSenseLength */ 1162 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 1163 /* AdditionalSenseCode */ 1164 } 1165 retvalue = ARCMSR_MESSAGE_FAIL; 1166 } 1167 } 1168 } 1169 break; 1170 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1171 u_int8_t *pQbuffer = acb->rqbuffer; 1172 1173 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1174 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1175 CHIP_REG_WRITE32(inbound_doorbell 1176 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1177 } 1178 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 1179 acb->rqbuf_firstindex = 0; 1180 acb->rqbuf_lastindex = 0; 1181 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1182 pcmdmessagefld->cmdmessage.ReturnCode = 1183 ARCMSR_MESSAGE_RETURNCODE_OK; 1184 } 1185 break; 1186 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 1187 u_int8_t *pQbuffer = acb->wqbuffer; 1188 1189 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1190 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1191 CHIP_REG_WRITE32(inbound_doorbell 1192 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1193 } 1194 acb->acb_flags |= 1195 (ACB_F_MESSAGE_WQBUFFER_CLEARED | 1196 ACB_F_MESSAGE_WQBUFFER_READED); 1197 acb->wqbuf_firstindex = 0; 1198 acb->wqbuf_lastindex = 0; 1199 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1200 pcmdmessagefld->cmdmessage.ReturnCode = 1201 ARCMSR_MESSAGE_RETURNCODE_OK; 1202 } 1203 break; 1204 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1205 u_int8_t *pQbuffer; 1206 1207 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1208 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1209 CHIP_REG_WRITE32(inbound_doorbell 1210 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1211 } 1212 acb->acb_flags |= 1213 (ACB_F_MESSAGE_WQBUFFER_CLEARED 1214 | ACB_F_MESSAGE_RQBUFFER_CLEARED 1215 | ACB_F_MESSAGE_WQBUFFER_READED); 1216 acb->rqbuf_firstindex = 0; 1217 acb->rqbuf_lastindex = 0; 1218 acb->wqbuf_firstindex = 0; 1219 acb->wqbuf_lastindex = 0; 1220 pQbuffer = acb->rqbuffer; 1221 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 1222 pQbuffer = acb->wqbuffer; 1223 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 1224 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1225 } 1226 break; 1227 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 1228 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 1229 } 1230 break; 1231 case ARCMSR_MESSAGE_SAY_HELLO: { 1232 int8_t * hello_string = "Hello! I am ARCMSR"; 1233 1234 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 1235 , (int16_t)strlen(hello_string)); 1236 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1237 } 1238 break; 1239 case ARCMSR_MESSAGE_SAY_GOODBYE: 1240 arcmsr_iop_parking(acb); 1241 break; 1242 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 1243 arcmsr_flush_adapter_cache(acb); 1244 break; 1245 default: 1246 retvalue = ARCMSR_MESSAGE_FAIL; 1247 } 1248 message_out: 1249 return retvalue; 1250 } 1251 /* 1252 ********************************************************************* 1253 ********************************************************************* 1254 */ 1255 static void arcmsr_executesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1256 { 1257 struct CommandControlBlock *srb=(struct CommandControlBlock *)arg; 1258 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb; 1259 union ccb * pccb; 1260 int target, lun; 1261 1262 pccb=srb->pccb; 1263 target=pccb->ccb_h.target_id; 1264 lun=pccb->ccb_h.target_lun; 1265 if(error != 0) { 1266 if(error != EFBIG) { 1267 printf("arcmsr%d: unexpected error %x returned from 'bus_dmamap_load' \n" 1268 , acb->pci_unit, error); 1269 } 1270 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1271 xpt_freeze_devq(pccb->ccb_h.path, /*count*/1); 1272 pccb->ccb_h.status |= (CAM_REQ_TOO_BIG|CAM_DEV_QFRZN); 1273 } 1274 arcmsr_srb_complete(srb, 0); 1275 return; 1276 } 1277 if(nseg > ARCMSR_MAX_SG_ENTRIES) { 1278 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 1279 arcmsr_srb_complete(srb, 0); 1280 return; 1281 } 1282 if(acb->acb_flags & ACB_F_BUS_RESET) { 1283 printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); 1284 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; 1285 arcmsr_srb_complete(srb, 0); 1286 return; 1287 } 1288 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 1289 u_int8_t block_cmd; 1290 1291 block_cmd=pccb->csio.cdb_io.cdb_bytes[0] & 0x0f; 1292 if(block_cmd==0x08 || block_cmd==0x0a) { 1293 printf("arcmsr%d:block 'read/write' command" 1294 "with gone raid volume Cmd=%2x, TargetId=%d, Lun=%d \n" 1295 , acb->pci_unit, block_cmd, target, lun); 1296 pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 1297 arcmsr_srb_complete(srb, 0); 1298 return; 1299 } 1300 } 1301 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1302 if(nseg != 0) { 1303 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 1304 } 1305 arcmsr_srb_complete(srb, 0); 1306 return; 1307 } 1308 pccb->ccb_h.status |= CAM_SIM_QUEUED; 1309 if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) { 1310 pccb->ccb_h.status &= ~CAM_STATUS_MASK; 1311 pccb->ccb_h.status |= (CAM_REQUEUE_REQ|CAM_DEV_QFRZN); 1312 acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; 1313 arcmsr_srb_complete(srb, 0); 1314 return; 1315 } 1316 arcmsr_build_srb(srb, dm_segs, nseg); 1317 arcmsr_post_srb(acb, srb); 1318 return; 1319 } 1320 /* 1321 ***************************************************************************************** 1322 ***************************************************************************************** 1323 */ 1324 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb) 1325 { 1326 struct CommandControlBlock *srb; 1327 struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; 1328 u_int32_t intmask_org, mask; 1329 int i=0; 1330 1331 acb->num_aborts++; 1332 /* 1333 *************************************************************************** 1334 ** It is the upper layer do abort command this lock just prior to calling us. 1335 ** First determine if we currently own this command. 1336 ** Start by searching the device queue. If not found 1337 ** at all, and the system wanted us to just abort the 1338 ** command return success. 1339 *************************************************************************** 1340 */ 1341 if(acb->srboutstandingcount!=0) { 1342 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 1343 srb=acb->psrb_pool[i]; 1344 if(srb->startdone==ARCMSR_SRB_START) { 1345 if(srb->pccb==abortccb) { 1346 srb->startdone=ARCMSR_SRB_ABORTED; 1347 printf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'" 1348 "outstanding command \n" 1349 , acb->pci_unit, abortccb->ccb_h.target_id 1350 , abortccb->ccb_h.target_lun, srb); 1351 goto abort_outstanding_cmd; 1352 } 1353 } 1354 } 1355 } 1356 return(FALSE); 1357 abort_outstanding_cmd: 1358 /* do not talk to iop 331 abort command */ 1359 UDELAY(3000*1000);/*wait for 3 sec for all command done*/ 1360 /* disable all outbound interrupt */ 1361 intmask_org=CHIP_REG_READ32(outbound_intmask); 1362 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 1363 arcmsr_polling_srbdone(acb, srb); 1364 /* enable all outbound interrupt */ 1365 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 1366 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 1367 return (TRUE); 1368 } 1369 /* 1370 **************************************************************************** 1371 **************************************************************************** 1372 */ 1373 static void arcmsr_bus_reset(struct AdapterControlBlock *acb) 1374 { 1375 int retry=0; 1376 1377 acb->num_resets++; 1378 acb->acb_flags |=ACB_F_BUS_RESET; 1379 while(acb->srboutstandingcount!=0 && retry < 400) { 1380 arcmsr_interrupt((void *)acb); 1381 UDELAY(25000); 1382 retry++; 1383 } 1384 arcmsr_iop_reset(acb); 1385 acb->acb_flags &= ~ACB_F_BUS_RESET; 1386 return; 1387 } 1388 /* 1389 ************************************************************************** 1390 ************************************************************************** 1391 */ 1392 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 1393 union ccb * pccb) 1394 { 1395 pccb->ccb_h.status |= CAM_REQ_CMP; 1396 switch (pccb->csio.cdb_io.cdb_bytes[0]) { 1397 case INQUIRY: { 1398 unsigned char inqdata[36]; 1399 char *buffer=pccb->csio.data_ptr;; 1400 1401 if (pccb->ccb_h.target_lun) { 1402 pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 1403 xpt_done(pccb); 1404 return; 1405 } 1406 inqdata[0] = T_PROCESSOR; 1407 /* Periph Qualifier & Periph Dev Type */ 1408 inqdata[1] = 0; 1409 /* rem media bit & Dev Type Modifier */ 1410 inqdata[2] = 0; 1411 /* ISO, ECMA, & ANSI versions */ 1412 inqdata[4] = 31; 1413 /* length of additional data */ 1414 strncpy(&inqdata[8], "Areca ", 8); 1415 /* Vendor Identification */ 1416 strncpy(&inqdata[16], "RAID controller ", 16); 1417 /* Product Identification */ 1418 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 1419 memcpy(buffer, inqdata, sizeof(inqdata)); 1420 xpt_done(pccb); 1421 } 1422 break; 1423 case WRITE_BUFFER: 1424 case READ_BUFFER: { 1425 if (arcmsr_iop_message_xfer(acb, pccb)) { 1426 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1427 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1428 } 1429 xpt_done(pccb); 1430 } 1431 break; 1432 default: 1433 xpt_done(pccb); 1434 } 1435 } 1436 /* 1437 ********************************************************************* 1438 ********************************************************************* 1439 */ 1440 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb) 1441 { 1442 struct AdapterControlBlock * acb; 1443 1444 acb=(struct AdapterControlBlock *) cam_sim_softc(psim); 1445 if(acb==NULL) { 1446 pccb->ccb_h.status |= CAM_REQ_INVALID; 1447 xpt_done(pccb); 1448 return; 1449 } 1450 switch (pccb->ccb_h.func_code) { 1451 case XPT_SCSI_IO: { 1452 struct CommandControlBlock *srb; 1453 int target=pccb->ccb_h.target_id; 1454 1455 if(target == 16) { 1456 /* virtual device for iop message transfer */ 1457 arcmsr_handle_virtual_command(acb, pccb); 1458 return; 1459 } 1460 if((srb=arcmsr_get_freesrb(acb)) == NULL) { 1461 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; 1462 xpt_done(pccb); 1463 return; 1464 } 1465 pccb->ccb_h.arcmsr_ccbsrb_ptr=srb; 1466 pccb->ccb_h.arcmsr_ccbacb_ptr=acb; 1467 srb->pccb=pccb; 1468 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1469 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) { 1470 /* Single buffer */ 1471 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) { 1472 /* Buffer is virtual */ 1473 u_int32_t error, s; 1474 1475 s=splsoftvm(); 1476 error = bus_dmamap_load(acb->dm_segs_dmat 1477 , srb->dm_segs_dmamap 1478 , pccb->csio.data_ptr 1479 , pccb->csio.dxfer_len 1480 , arcmsr_executesrb, srb, /*flags*/0); 1481 if(error == EINPROGRESS) { 1482 xpt_freeze_simq(acb->psim, 1); 1483 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1484 } 1485 splx(s); 1486 } else { 1487 /* Buffer is physical */ 1488 panic("arcmsr: CAM_DATA_PHYS not supported"); 1489 } 1490 } else { 1491 /* Scatter/gather list */ 1492 struct bus_dma_segment *segs; 1493 1494 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 1495 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1496 pccb->ccb_h.status |= CAM_PROVIDE_FAIL; 1497 xpt_done(pccb); 1498 free(srb, M_DEVBUF); 1499 return; 1500 } 1501 segs=(struct bus_dma_segment *)pccb->csio.data_ptr; 1502 arcmsr_executesrb(srb, segs, pccb->csio.sglist_cnt, 0); 1503 } 1504 } else { 1505 arcmsr_executesrb(srb, NULL, 0, 0); 1506 } 1507 break; 1508 } 1509 case XPT_TARGET_IO: { 1510 /* target mode not yet support vendor specific commands. */ 1511 pccb->ccb_h.status |= CAM_REQ_CMP; 1512 xpt_done(pccb); 1513 break; 1514 } 1515 case XPT_PATH_INQ: { 1516 struct ccb_pathinq *cpi= &pccb->cpi; 1517 1518 cpi->version_num=1; 1519 cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE; 1520 cpi->target_sprt=0; 1521 cpi->hba_misc=0; 1522 cpi->hba_eng_cnt=0; 1523 cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */ 1524 cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */ 1525 cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */ 1526 cpi->bus_id=cam_sim_bus(psim); 1527 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1528 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); 1529 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); 1530 cpi->unit_number=cam_sim_unit(psim); 1531 cpi->transport = XPORT_SPI; 1532 cpi->transport_version = 2; 1533 cpi->protocol = PROTO_SCSI; 1534 cpi->protocol_version = SCSI_REV_2; 1535 cpi->ccb_h.status |= CAM_REQ_CMP; 1536 xpt_done(pccb); 1537 break; 1538 } 1539 case XPT_ABORT: { 1540 union ccb *pabort_ccb; 1541 1542 pabort_ccb=pccb->cab.abort_ccb; 1543 switch (pabort_ccb->ccb_h.func_code) { 1544 case XPT_ACCEPT_TARGET_IO: 1545 case XPT_IMMED_NOTIFY: 1546 case XPT_CONT_TARGET_IO: 1547 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { 1548 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; 1549 xpt_done(pabort_ccb); 1550 pccb->ccb_h.status |= CAM_REQ_CMP; 1551 } else { 1552 xpt_print_path(pabort_ccb->ccb_h.path); 1553 printf("Not found\n"); 1554 pccb->ccb_h.status |= CAM_PATH_INVALID; 1555 } 1556 break; 1557 case XPT_SCSI_IO: 1558 pccb->ccb_h.status |= CAM_UA_ABORT; 1559 break; 1560 default: 1561 pccb->ccb_h.status |= CAM_REQ_INVALID; 1562 break; 1563 } 1564 xpt_done(pccb); 1565 break; 1566 } 1567 case XPT_RESET_BUS: 1568 case XPT_RESET_DEV: { 1569 u_int32_t i; 1570 1571 arcmsr_bus_reset(acb); 1572 for (i=0; i < 500; i++) { 1573 DELAY(1000); 1574 } 1575 pccb->ccb_h.status |= CAM_REQ_CMP; 1576 xpt_done(pccb); 1577 break; 1578 } 1579 case XPT_TERM_IO: { 1580 pccb->ccb_h.status |= CAM_REQ_INVALID; 1581 xpt_done(pccb); 1582 break; 1583 } 1584 case XPT_GET_TRAN_SETTINGS: { 1585 struct ccb_trans_settings *cts; 1586 struct ccb_trans_settings_scsi *scsi; 1587 struct ccb_trans_settings_spi *spi; 1588 1589 if(pccb->ccb_h.target_id == 16) { 1590 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1591 xpt_done(pccb); 1592 break; 1593 } 1594 1595 cts= &pccb->cts; 1596 scsi = &cts->proto_specific.scsi; 1597 spi = &cts->xport_specific.spi; 1598 1599 cts->protocol = PROTO_SCSI; 1600 cts->protocol_version = SCSI_REV_2; 1601 cts->transport = XPORT_SPI; 1602 cts->transport_version = 2; 1603 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 1604 spi->sync_period=3; 1605 spi->sync_offset=32; 1606 spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT; 1607 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1608 spi->valid = CTS_SPI_VALID_SYNC_RATE 1609 | CTS_SPI_VALID_SYNC_OFFSET 1610 | CTS_SPI_VALID_BUS_WIDTH; 1611 scsi->valid = CTS_SCSI_VALID_TQ; 1612 1613 pccb->ccb_h.status |= CAM_REQ_CMP; 1614 xpt_done(pccb); 1615 break; 1616 } 1617 case XPT_SET_TRAN_SETTINGS: { 1618 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1619 xpt_done(pccb); 1620 break; 1621 } 1622 case XPT_CALC_GEOMETRY: { 1623 struct ccb_calc_geometry *ccg; 1624 u_int32_t size_mb; 1625 u_int32_t secs_per_cylinder; 1626 1627 if(pccb->ccb_h.target_id == 16) { 1628 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1629 xpt_done(pccb); 1630 break; 1631 } 1632 ccg= &pccb->ccg; 1633 if (ccg->block_size == 0) { 1634 pccb->ccb_h.status = CAM_REQ_INVALID; 1635 xpt_done(pccb); 1636 break; 1637 } 1638 if(((1024L * 1024L)/ccg->block_size) < 0) { 1639 pccb->ccb_h.status = CAM_REQ_INVALID; 1640 xpt_done(pccb); 1641 break; 1642 } 1643 size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size); 1644 if(size_mb > 1024 ) { 1645 ccg->heads=255; 1646 ccg->secs_per_track=63; 1647 } else { 1648 ccg->heads=64; 1649 ccg->secs_per_track=32; 1650 } 1651 secs_per_cylinder=ccg->heads * ccg->secs_per_track; 1652 ccg->cylinders=ccg->volume_size / secs_per_cylinder; 1653 pccb->ccb_h.status |= CAM_REQ_CMP; 1654 xpt_done(pccb); 1655 break; 1656 } 1657 default: 1658 pccb->ccb_h.status |= CAM_REQ_INVALID; 1659 xpt_done(pccb); 1660 break; 1661 } 1662 return; 1663 } 1664 /* 1665 ********************************************************************** 1666 ********************************************************************** 1667 */ 1668 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 1669 { 1670 acb->acb_flags |= ACB_F_MSG_START_BGRB; 1671 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 1672 if(arcmsr_wait_msgint_ready(acb)) { 1673 printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 1674 } 1675 return; 1676 } 1677 /* 1678 ********************************************************************** 1679 ********************************************************************** 1680 */ 1681 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 1682 { 1683 struct CommandControlBlock *srb; 1684 uint32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; 1685 int id, lun; 1686 1687 polling_srb_retry: 1688 poll_count++; 1689 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable; 1690 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 1691 while(1) { 1692 if((flag_srb=CHIP_REG_READ32(outbound_queueport))==0xFFFFFFFF) { 1693 if(poll_srb_done) { 1694 break;/*chip FIFO no ccb for completion already*/ 1695 } else { 1696 UDELAY(25000); 1697 if(poll_count > 100) { 1698 break; 1699 } 1700 goto polling_srb_retry; 1701 } 1702 } 1703 /* check ifcommand done with no error*/ 1704 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5)); 1705 /*frame must be 32 bytes aligned*/ 1706 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 1707 if((srb->startdone==ARCMSR_SRB_ABORTED) && (srb==poll_srb)) { 1708 printf("arcmsr%d: scsi id=%d lun=%d srb='%p'" 1709 "poll command abort successfully \n" 1710 , acb->pci_unit 1711 , srb->pccb->ccb_h.target_id 1712 , srb->pccb->ccb_h.target_lun, srb); 1713 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 1714 arcmsr_srb_complete(srb, 1); 1715 poll_srb_done=1; 1716 continue; 1717 } 1718 printf("arcmsr%d: polling get an illegal srb command done srb='%p'" 1719 "srboutstandingcount=%d \n" 1720 , acb->pci_unit 1721 , srb, acb->srboutstandingcount); 1722 continue; 1723 } 1724 id=srb->pccb->ccb_h.target_id; 1725 lun=srb->pccb->ccb_h.target_lun; 1726 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) { 1727 if(acb->devstate[id][lun]==ARECA_RAID_GONE) { 1728 acb->devstate[id][lun]=ARECA_RAID_GOOD; 1729 } 1730 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 1731 arcmsr_srb_complete(srb, 1); 1732 } else { 1733 switch(srb->arcmsr_cdb.DeviceStatus) { 1734 case ARCMSR_DEV_SELECT_TIMEOUT: { 1735 acb->devstate[id][lun]=ARECA_RAID_GONE; 1736 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 1737 arcmsr_srb_complete(srb, 1); 1738 } 1739 break; 1740 case ARCMSR_DEV_ABORTED: 1741 case ARCMSR_DEV_INIT_FAIL: { 1742 acb->devstate[id][lun]=ARECA_RAID_GONE; 1743 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 1744 arcmsr_srb_complete(srb, 1); 1745 } 1746 break; 1747 case SCSISTAT_CHECK_CONDITION: { 1748 acb->devstate[id][lun]=ARECA_RAID_GOOD; 1749 arcmsr_report_sense_info(srb); 1750 arcmsr_srb_complete(srb, 1); 1751 } 1752 break; 1753 default: 1754 printf("arcmsr%d: scsi id=%d lun=%d" 1755 "polling and getting command error done" 1756 ", but got unknow DeviceStatus=0x%x \n" 1757 , acb->pci_unit, id, lun, srb->arcmsr_cdb.DeviceStatus); 1758 acb->devstate[id][lun]=ARECA_RAID_GONE; 1759 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 1760 /*unknow error or crc error just for retry*/ 1761 arcmsr_srb_complete(srb, 1); 1762 break; 1763 } 1764 } 1765 } /*drain reply FIFO*/ 1766 return; 1767 } 1768 /* 1769 ********************************************************************** 1770 ** get firmware miscellaneous data 1771 ********************************************************************** 1772 */ 1773 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 1774 { 1775 char *acb_firm_model=acb->firm_model; 1776 char *acb_firm_version=acb->firm_version; 1777 size_t iop_firm_model=offsetof(struct MessageUnit,message_rwbuffer[15]); /*firm_model,15,60-67*/ 1778 size_t iop_firm_version=offsetof(struct MessageUnit,message_rwbuffer[17]); /*firm_version,17,68-83*/ 1779 int i; 1780 1781 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 1782 if(arcmsr_wait_msgint_ready(acb)) { 1783 printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n" 1784 , acb->pci_unit); 1785 } 1786 i=0; 1787 while(i<8) { 1788 *acb_firm_model=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_model+i); 1789 /* 8 bytes firm_model, 15, 60-67*/ 1790 acb_firm_model++; 1791 i++; 1792 } 1793 i=0; 1794 while(i<16) { 1795 *acb_firm_version=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_version+i); 1796 /* 16 bytes firm_version, 17, 68-83*/ 1797 acb_firm_version++; 1798 i++; 1799 } 1800 printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 1801 printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 1802 acb->firm_request_len=CHIP_REG_READ32(message_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 1803 acb->firm_numbers_queue=CHIP_REG_READ32(message_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 1804 acb->firm_sdram_size=CHIP_REG_READ32(message_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 1805 acb->firm_ide_channels=CHIP_REG_READ32(message_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 1806 return; 1807 } 1808 /* 1809 ********************************************************************** 1810 ** start background rebulid 1811 ********************************************************************** 1812 */ 1813 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 1814 { 1815 u_int32_t intmask_org, mask, outbound_doorbell, firmware_state=0; 1816 1817 do { 1818 firmware_state=CHIP_REG_READ32(outbound_msgaddr1); 1819 } while((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)==0); 1820 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; 1821 CHIP_REG_WRITE32(outbound_intmask, intmask_org); 1822 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; 1823 arcmsr_get_firmware_spec(acb); 1824 arcmsr_start_adapter_bgrb(acb); 1825 /* clear Qbuffer if door bell ringed */ 1826 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell); 1827 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */ 1828 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1829 /* enable outbound Post Queue, outbound message0, outbell doorbell Interrupt */ 1830 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 1831 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 1832 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 1833 acb->acb_flags |=ACB_F_IOP_INITED; 1834 return; 1835 } 1836 /* 1837 ********************************************************************** 1838 ********************************************************************** 1839 */ 1840 static void arcmsr_map_freesrb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1841 { 1842 struct AdapterControlBlock *acb=arg; 1843 struct CommandControlBlock *srb_tmp; 1844 u_int8_t * dma_memptr; 1845 u_int32_t i, srb_phyaddr_hi32; 1846 unsigned long srb_phyaddr=(unsigned long)segs->ds_addr; 1847 1848 dma_memptr=acb->uncacheptr; 1849 srb_phyaddr=segs->ds_addr; /* We suppose bus_addr_t high part always 0 here*/ 1850 if(((unsigned long)dma_memptr & 0x1F)!=0) { 1851 dma_memptr=dma_memptr+(0x20-((unsigned long)dma_memptr & 0x1F)); 1852 srb_phyaddr=srb_phyaddr+(0x20-((unsigned long)srb_phyaddr & 0x1F)); 1853 } 1854 srb_tmp=(struct CommandControlBlock *)dma_memptr; 1855 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 1856 /*srb address must 32 (0x20) boundary*/ 1857 if(((unsigned long)srb_tmp & 0x1F)==0) { 1858 if(bus_dmamap_create(acb->dm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) { 1859 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 1860 printf("arcmsr%d: srb dmamap bus_dmamap_create error\n", acb->pci_unit); 1861 return; 1862 } 1863 srb_tmp->cdb_shifted_phyaddr=srb_phyaddr >> 5; 1864 srb_tmp->acb=acb; 1865 acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp; 1866 srb_phyaddr=srb_phyaddr+sizeof(struct CommandControlBlock); 1867 } else { 1868 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 1869 printf("arcmsr%d: dma_memptr=%p i=%d" 1870 "this srb cross 32 bytes boundary ignored srb_tmp=%p \n" 1871 , acb->pci_unit, dma_memptr, i, srb_tmp); 1872 return; 1873 } 1874 srb_tmp++; 1875 } 1876 acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr; 1877 /* 1878 ******************************************************************** 1879 ** here we need to tell iop 331 our freesrb.HighPart 1880 ** if freesrb.HighPart is not zero 1881 ******************************************************************** 1882 */ 1883 srb_phyaddr_hi32=(uint32_t) ((srb_phyaddr>>16)>>16); 1884 if(srb_phyaddr_hi32!=0) { 1885 CHIP_REG_WRITE32(message_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 1886 CHIP_REG_WRITE32(message_rwbuffer[1], srb_phyaddr_hi32); 1887 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 1888 if(arcmsr_wait_msgint_ready(acb)) { 1889 printf("arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 1890 } 1891 } 1892 return; 1893 } 1894 /* 1895 ************************************************************************ 1896 ** 1897 ** 1898 ************************************************************************ 1899 */ 1900 static void arcmsr_free_resource(struct AdapterControlBlock *acb) 1901 { 1902 /* remove the control device */ 1903 if(acb->ioctl_dev != NULL) { 1904 destroy_dev(acb->ioctl_dev); 1905 } 1906 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); 1907 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); 1908 bus_dma_tag_destroy(acb->srb_dmat); 1909 bus_dma_tag_destroy(acb->dm_segs_dmat); 1910 bus_dma_tag_destroy(acb->parent_dmat); 1911 return; 1912 } 1913 /* 1914 ************************************************************************ 1915 ************************************************************************ 1916 */ 1917 static u_int32_t arcmsr_initialize(device_t dev) 1918 { 1919 struct AdapterControlBlock *acb=device_get_softc(dev); 1920 u_int32_t intmask_org, rid=PCIR_BAR(0); 1921 vm_offset_t mem_base; 1922 u_int16_t pci_command; 1923 int i, j; 1924 1925 #if __FreeBSD_version >= 502010 1926 if(bus_dma_tag_create( /*parent*/ NULL, 1927 /*alignemnt*/ 1, 1928 /*boundary*/ 0, 1929 /*lowaddr*/ BUS_SPACE_MAXADDR, 1930 /*highaddr*/ BUS_SPACE_MAXADDR, 1931 /*filter*/ NULL, 1932 /*filterarg*/ NULL, 1933 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 1934 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 1935 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1936 /*flags*/ 0, 1937 /*lockfunc*/ NULL, 1938 /*lockarg*/ NULL, 1939 &acb->parent_dmat) != 0) 1940 #else 1941 if(bus_dma_tag_create( /*parent*/ NULL, 1942 /*alignemnt*/ 1, 1943 /*boundary*/ 0, 1944 /*lowaddr*/ BUS_SPACE_MAXADDR, 1945 /*highaddr*/ BUS_SPACE_MAXADDR, 1946 /*filter*/ NULL, 1947 /*filterarg*/ NULL, 1948 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 1949 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 1950 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1951 /*flags*/ 0, 1952 &acb->parent_dmat) != 0) 1953 #endif 1954 { 1955 printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 1956 return ENOMEM; 1957 } 1958 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ 1959 #if __FreeBSD_version >= 502010 1960 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1961 /*alignment*/ 1, 1962 /*boundary*/ 0, 1963 /*lowaddr*/ BUS_SPACE_MAXADDR, 1964 /*highaddr*/ BUS_SPACE_MAXADDR, 1965 /*filter*/ NULL, 1966 /*filterarg*/ NULL, 1967 /*maxsize*/ MAXBSIZE, 1968 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 1969 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1970 /*flags*/ 0, 1971 /*lockfunc*/ busdma_lock_mutex, 1972 /*lockarg*/ &Giant, 1973 &acb->dm_segs_dmat) != 0) 1974 #else 1975 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1976 /*alignment*/ 1, 1977 /*boundary*/ 0, 1978 /*lowaddr*/ BUS_SPACE_MAXADDR, 1979 /*highaddr*/ BUS_SPACE_MAXADDR, 1980 /*filter*/ NULL, 1981 /*filterarg*/ NULL, 1982 /*maxsize*/ MAXBSIZE, 1983 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 1984 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1985 /*flags*/ 0, 1986 &acb->dm_segs_dmat) != 0) 1987 #endif 1988 { 1989 bus_dma_tag_destroy(acb->parent_dmat); 1990 printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 1991 return ENOMEM; 1992 } 1993 /* DMA tag for our srb structures.... Allocate the freesrb memory */ 1994 #if __FreeBSD_version >= 502010 1995 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1996 /*alignment*/ 1, 1997 /*boundary*/ 0, 1998 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 1999 /*highaddr*/ BUS_SPACE_MAXADDR, 2000 /*filter*/ NULL, 2001 /*filterarg*/ NULL, 2002 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE, 2003 /*nsegments*/ 1, 2004 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 2005 /*flags*/ 0, 2006 /*lockfunc*/ NULL, 2007 /*lockarg*/ NULL, 2008 &acb->srb_dmat) != 0) 2009 #else 2010 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 2011 /*alignment*/ 1, 2012 /*boundary*/ 0, 2013 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 2014 /*highaddr*/ BUS_SPACE_MAXADDR, 2015 /*filter*/ NULL, 2016 /*filterarg*/ NULL, 2017 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE, 2018 /*nsegments*/ 1, 2019 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 2020 /*flags*/ 0, 2021 &acb->srb_dmat) != 0) 2022 #endif 2023 { 2024 bus_dma_tag_destroy(acb->dm_segs_dmat); 2025 bus_dma_tag_destroy(acb->parent_dmat); 2026 printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 2027 return ENXIO; 2028 } 2029 /* Allocation for our srbs */ 2030 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr 2031 , BUS_DMA_WAITOK | BUS_DMA_COHERENT, &acb->srb_dmamap) != 0) { 2032 bus_dma_tag_destroy(acb->srb_dmat); 2033 bus_dma_tag_destroy(acb->dm_segs_dmat); 2034 bus_dma_tag_destroy(acb->parent_dmat); 2035 printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", acb->pci_unit); 2036 return ENXIO; 2037 } 2038 /* And permanently map them */ 2039 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr 2040 , ARCMSR_SRBS_POOL_SIZE, arcmsr_map_freesrb, acb, /*flags*/0)) { 2041 bus_dma_tag_destroy(acb->srb_dmat); 2042 bus_dma_tag_destroy(acb->dm_segs_dmat); 2043 bus_dma_tag_destroy(acb->parent_dmat); 2044 printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", acb->pci_unit); 2045 return ENXIO; 2046 } 2047 pci_command=pci_read_config(dev, PCIR_COMMAND, 2); 2048 pci_command |= PCIM_CMD_BUSMASTEREN; 2049 pci_command |= PCIM_CMD_PERRESPEN; 2050 pci_command |= PCIM_CMD_MWRICEN; 2051 /* Enable Busmaster/Mem */ 2052 pci_command |= PCIM_CMD_MEMEN; 2053 pci_write_config(dev, PCIR_COMMAND, pci_command, 2); 2054 acb->sys_res_arcmsr=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, 0x1000, RF_ACTIVE); 2055 if(acb->sys_res_arcmsr == NULL) { 2056 arcmsr_free_resource(acb); 2057 printf("arcmsr%d: bus_alloc_resource failure!\n", acb->pci_unit); 2058 return ENOMEM; 2059 } 2060 if(rman_get_start(acb->sys_res_arcmsr) <= 0) { 2061 arcmsr_free_resource(acb); 2062 printf("arcmsr%d: rman_get_start failure!\n", acb->pci_unit); 2063 return ENXIO; 2064 } 2065 mem_base=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr); 2066 if(mem_base==0) { 2067 arcmsr_free_resource(acb); 2068 printf("arcmsr%d: rman_get_virtual failure!\n", acb->pci_unit); 2069 return ENXIO; 2070 } 2071 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { 2072 arcmsr_free_resource(acb); 2073 printf("arcmsr%d: map free srb failure!\n", acb->pci_unit); 2074 return ENXIO; 2075 } 2076 acb->btag=rman_get_bustag(acb->sys_res_arcmsr); 2077 acb->bhandle=rman_get_bushandle(acb->sys_res_arcmsr); 2078 acb->pmu=(struct MessageUnit *)mem_base; 2079 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 2080 |ACB_F_MESSAGE_RQBUFFER_CLEARED 2081 |ACB_F_MESSAGE_WQBUFFER_READED); 2082 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 2083 /* 2084 ******************************************************************** 2085 ** init raid volume state 2086 ******************************************************************** 2087 */ 2088 for(i=0;i<ARCMSR_MAX_TARGETID;i++) { 2089 for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) { 2090 acb->devstate[i][j]=ARECA_RAID_GOOD; 2091 } 2092 } 2093 /* disable iop all outbound interrupt */ 2094 intmask_org=CHIP_REG_READ32(outbound_intmask); 2095 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 2096 arcmsr_iop_init(acb); 2097 return(0); 2098 } 2099 /* 2100 ************************************************************************ 2101 ************************************************************************ 2102 */ 2103 static u_int32_t arcmsr_attach(device_t dev) 2104 { 2105 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2106 u_int32_t unit=device_get_unit(dev); 2107 struct ccb_setasync csa; 2108 struct cam_devq *devq; /* Device Queue to use for this SIM */ 2109 struct resource *irqres; 2110 int rid; 2111 2112 if(acb == NULL) { 2113 printf("arcmsr%d: cannot allocate softc\n", unit); 2114 return (ENOMEM); 2115 } 2116 bzero(acb, sizeof(struct AdapterControlBlock)); 2117 if(arcmsr_initialize(dev)) { 2118 printf("arcmsr%d: initialize failure!\n", unit); 2119 return ENXIO; 2120 } 2121 /* After setting up the adapter, map our interrupt */ 2122 rid=0; 2123 irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE); 2124 if(irqres == NULL || 2125 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE 2126 , NULL, arcmsr_interrupt, acb, &acb->ih)) { 2127 arcmsr_free_resource(acb); 2128 printf("arcmsr%d: unable to register interrupt handler!\n", unit); 2129 return ENXIO; 2130 } 2131 acb->irqres=irqres; 2132 acb->pci_dev=dev; 2133 acb->pci_unit=unit; 2134 /* 2135 * Now let the CAM generic SCSI layer find the SCSI devices on 2136 * the bus * start queue to reset to the idle loop. * 2137 * Create device queue of SIM(s) * (MAX_START_JOB - 1) : 2138 * max_sim_transactions 2139 */ 2140 devq=cam_simq_alloc(ARCMSR_MAX_START_JOB); 2141 if(devq == NULL) { 2142 arcmsr_free_resource(acb); 2143 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2144 printf("arcmsr%d: cam_simq_alloc failure!\n", unit); 2145 return ENXIO; 2146 } 2147 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll 2148 , "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); 2149 if(acb->psim == NULL) { 2150 arcmsr_free_resource(acb); 2151 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2152 cam_simq_free(devq); 2153 printf("arcmsr%d: cam_sim_alloc failure!\n", unit); 2154 return ENXIO; 2155 } 2156 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { 2157 arcmsr_free_resource(acb); 2158 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2159 cam_sim_free(acb->psim, /*free_devq*/TRUE); 2160 printf("arcmsr%d: xpt_bus_register failure!\n", unit); 2161 return ENXIO; 2162 } 2163 if(xpt_create_path(&acb->ppath, /* periph */ NULL 2164 , cam_sim_path(acb->psim) 2165 , CAM_TARGET_WILDCARD 2166 , CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2167 arcmsr_free_resource(acb); 2168 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2169 xpt_bus_deregister(cam_sim_path(acb->psim)); 2170 cam_sim_free(acb->psim, /* free_simq */ TRUE); 2171 printf("arcmsr%d: xpt_create_path failure!\n", unit); 2172 return ENXIO; 2173 } 2174 ARCMSR_LOCK_INIT(&acb->workingQ_done_lock, "arcmsr done working Q lock"); 2175 ARCMSR_LOCK_INIT(&acb->workingQ_start_lock, "arcmsr start working Q lock"); 2176 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock"); 2177 /* 2178 **************************************************** 2179 */ 2180 xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); 2181 csa.ccb_h.func_code=XPT_SASYNC_CB; 2182 csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE; 2183 csa.callback=arcmsr_async; 2184 csa.callback_arg=acb->psim; 2185 xpt_action((union ccb *)&csa); 2186 /* Create the control device. */ 2187 acb->ioctl_dev=make_dev(&arcmsr_cdevsw 2188 , unit 2189 , UID_ROOT 2190 , GID_WHEEL /* GID_OPERATOR */ 2191 , S_IRUSR | S_IWUSR 2192 , "arcmsr%d", unit); 2193 #if __FreeBSD_version < 503000 2194 acb->ioctl_dev->si_drv1=acb; 2195 #endif 2196 #if __FreeBSD_version > 500005 2197 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); 2198 #endif 2199 return 0; 2200 } 2201 /* 2202 ************************************************************************ 2203 ************************************************************************ 2204 */ 2205 static u_int32_t arcmsr_probe(device_t dev) 2206 { 2207 u_int32_t id; 2208 static char buf[256]; 2209 char *type; 2210 int raid6 = 1; 2211 2212 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { 2213 return (ENXIO); 2214 } 2215 switch(id=pci_get_devid(dev)) { 2216 case PCIDevVenIDARC1110: 2217 case PCIDevVenIDARC1210: 2218 raid6 = 0; 2219 /*FALLTHRU*/ 2220 case PCIDevVenIDARC1120: 2221 case PCIDevVenIDARC1130: 2222 case PCIDevVenIDARC1160: 2223 case PCIDevVenIDARC1170: 2224 case PCIDevVenIDARC1220: 2225 case PCIDevVenIDARC1230: 2226 case PCIDevVenIDARC1260: 2227 case PCIDevVenIDARC1270: 2228 case PCIDevVenIDARC1280: 2229 type = "SATA"; 2230 break; 2231 case PCIDevVenIDARC1380: 2232 case PCIDevVenIDARC1381: 2233 case PCIDevVenIDARC1680: 2234 case PCIDevVenIDARC1681: 2235 type = "SAS"; 2236 break; 2237 default: 2238 type = "X-TYPE"; 2239 break; 2240 } 2241 sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n", type, raid6 ? "(RAID6 capable)" : ""); 2242 device_set_desc_copy(dev, buf); 2243 return 0; 2244 } 2245 /* 2246 ************************************************************************ 2247 ************************************************************************ 2248 */ 2249 static void arcmsr_shutdown(device_t dev) 2250 { 2251 u_int32_t i, poll_count=0; 2252 u_int32_t intmask_org; 2253 struct CommandControlBlock *srb; 2254 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2255 2256 /* stop adapter background rebuild */ 2257 arcmsr_stop_adapter_bgrb(acb); 2258 arcmsr_flush_adapter_cache(acb); 2259 /* disable all outbound interrupt */ 2260 intmask_org=CHIP_REG_READ32(outbound_intmask); 2261 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE)); 2262 /* abort all outstanding command */ 2263 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 2264 acb->acb_flags &= ~ACB_F_IOP_INITED; 2265 if(acb->srboutstandingcount!=0) { 2266 while((acb->srboutstandingcount!=0) && (poll_count < 256)) { 2267 arcmsr_interrupt((void *)acb); 2268 UDELAY(25000); 2269 poll_count++; 2270 } 2271 if(acb->srboutstandingcount!=0) { 2272 arcmsr_abort_allcmd(acb); 2273 /*clear all outbound posted Q*/ 2274 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) { 2275 CHIP_REG_READ32(outbound_queueport); 2276 } 2277 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 2278 srb=acb->psrb_pool[i]; 2279 if(srb->startdone==ARCMSR_SRB_START) { 2280 srb->startdone=ARCMSR_SRB_ABORTED; 2281 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2282 arcmsr_srb_complete(srb, 1); 2283 } 2284 } 2285 } 2286 } 2287 atomic_set_int(&acb->srboutstandingcount, 0); 2288 acb->workingsrb_doneindex=0; 2289 acb->workingsrb_startindex=0; 2290 return; 2291 } 2292 /* 2293 ************************************************************************ 2294 ************************************************************************ 2295 */ 2296 static u_int32_t arcmsr_detach(device_t dev) 2297 { 2298 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2299 2300 arcmsr_shutdown(dev); 2301 arcmsr_free_resource(acb); 2302 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), acb->sys_res_arcmsr); 2303 bus_teardown_intr(dev, acb->irqres, acb->ih); 2304 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2305 xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); 2306 xpt_free_path(acb->ppath); 2307 xpt_bus_deregister(cam_sim_path(acb->psim)); 2308 cam_sim_free(acb->psim, TRUE); 2309 return (0); 2310 } 2311 2312 2313