1 /* 2 ****************************************************************************************** 3 ** O.S : FreeBSD 4 ** FILE NAME : arcmsr.c 5 ** BY : Erich Chen 6 ** Description: SCSI RAID Device Driver for 7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX) SATA/SAS RAID HOST Adapter 8 ** ARCMSR RAID Host adapter 9 ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set] 10 ****************************************************************************************** 11 ************************************************************************ 12 ** 13 ** Copyright (c) 2004-2006 ARECA Co. Ltd. 14 ** Erich Chen, Taipei Taiwan All rights reserved. 15 ** 16 ** Redistribution and use in source and binary forms, with or without 17 ** modification, are permitted provided that the following conditions 18 ** are met: 19 ** 1. Redistributions of source code must retain the above copyright 20 ** notice, this list of conditions and the following disclaimer. 21 ** 2. Redistributions in binary form must reproduce the above copyright 22 ** notice, this list of conditions and the following disclaimer in the 23 ** documentation and/or other materials provided with the distribution. 24 ** 3. The name of the author may not be used to endorse or promote products 25 ** derived from this software without specific prior written permission. 26 ** 27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT 32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 ************************************************************************** 38 ** History 39 ** 40 ** REV# DATE NAME DESCRIPTION 41 ** 1.00.00.00 3/31/2004 Erich Chen First release 42 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error 43 ** 1.20.00.03 4/19/2005 Erich Chen add SATA 24 Ports adapter type support 44 ** clean unused function 45 ** 1.20.00.12 9/12/2005 Erich Chen bug fix with abort command handling, 46 ** firmware version check 47 ** and firmware update notify for hardware bug fix 48 ** handling if none zero high part physical address 49 ** of srb resource 50 ** 1.20.00.13 8/18/2006 Erich Chen remove pending srb and report busy 51 ** add iop message xfer 52 ** with scsi pass-through command 53 ** add new device id of sas raid adapters 54 ** code fit for SPARC64 & PPC 55 ****************************************************************************************** 56 * $FreeBSD$ 57 */ 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/malloc.h> 61 #include <sys/kernel.h> 62 #include <sys/bus.h> 63 #include <sys/queue.h> 64 #include <sys/stat.h> 65 #include <sys/devicestat.h> 66 #include <sys/kthread.h> 67 #include <sys/module.h> 68 #include <sys/proc.h> 69 #include <sys/lock.h> 70 #include <sys/sysctl.h> 71 #include <sys/poll.h> 72 #include <sys/ioccom.h> 73 #include <vm/vm.h> 74 #include <vm/vm_param.h> 75 #include <vm/pmap.h> 76 77 #include <isa/rtc.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <machine/atomic.h> 82 #include <sys/conf.h> 83 #include <sys/rman.h> 84 85 #include <cam/cam.h> 86 #include <cam/cam_ccb.h> 87 #include <cam/cam_sim.h> 88 #include <cam/cam_xpt_sim.h> 89 #include <cam/cam_debug.h> 90 #include <cam/scsi/scsi_all.h> 91 #include <cam/scsi/scsi_message.h> 92 /* 93 ************************************************************************** 94 ************************************************************************** 95 */ 96 #if __FreeBSD_version >= 500005 97 #include <sys/selinfo.h> 98 #include <sys/mutex.h> 99 #include <sys/endian.h> 100 #include <dev/pci/pcivar.h> 101 #include <dev/pci/pcireg.h> 102 #define ARCMSR_LOCK_INIT(l, s) mtx_init(l, s, NULL, MTX_DEF|MTX_RECURSE) 103 #define ARCMSR_LOCK_ACQUIRE(l) mtx_lock(l) 104 #define ARCMSR_LOCK_RELEASE(l) mtx_unlock(l) 105 #define ARCMSR_LOCK_TRY(l) mtx_trylock(l) 106 #define arcmsr_htole32(x) htole32(x) 107 typedef struct mtx arcmsr_lock_t; 108 #else 109 #include <sys/select.h> 110 #include <pci/pcivar.h> 111 #include <pci/pcireg.h> 112 #define ARCMSR_LOCK_INIT(l, s) simple_lock_init(l) 113 #define ARCMSR_LOCK_ACQUIRE(l) simple_lock(l) 114 #define ARCMSR_LOCK_RELEASE(l) simple_unlock(l) 115 #define ARCMSR_LOCK_TRY(l) simple_lock_try(l) 116 #define arcmsr_htole32(x) (x) 117 typedef struct simplelock arcmsr_lock_t; 118 #endif 119 #include <dev/arcmsr/arcmsr.h> 120 #define ARCMSR_SRBS_POOL_SIZE ((sizeof(struct CommandControlBlock) * ARCMSR_MAX_FREESRB_NUM)+0x20) 121 /* 122 ************************************************************************** 123 ************************************************************************** 124 */ 125 #define CHIP_REG_READ32(r) bus_space_read_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r)) 126 #define CHIP_REG_WRITE32(r,d) bus_space_write_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r), d) 127 /* 128 ************************************************************************** 129 ************************************************************************** 130 */ 131 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb); 132 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb); 133 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb); 134 static u_int32_t arcmsr_probe(device_t dev); 135 static u_int32_t arcmsr_attach(device_t dev); 136 static u_int32_t arcmsr_detach(device_t dev); 137 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); 138 static void arcmsr_iop_parking(struct AdapterControlBlock *acb); 139 static void arcmsr_shutdown(device_t dev); 140 static void arcmsr_interrupt(void *arg); 141 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); 142 static void arcmsr_free_resource(struct AdapterControlBlock *acb); 143 static void arcmsr_bus_reset(struct AdapterControlBlock *acb); 144 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 145 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 146 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 147 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); 148 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb); 149 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); 150 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); 151 static void arcmsr_iop_reset(struct AdapterControlBlock *acb); 152 static void arcmsr_report_sense_info(struct CommandControlBlock *srb); 153 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg); 154 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb); 155 static int arcmsr_resume(device_t dev); 156 static int arcmsr_suspend(device_t dev); 157 /* 158 ************************************************************************** 159 ************************************************************************** 160 */ 161 static void UDELAY(u_int32_t us) { DELAY(us); } 162 /* 163 ************************************************************************** 164 ************************************************************************** 165 */ 166 static bus_dmamap_callback_t arcmsr_map_freesrb; 167 static bus_dmamap_callback_t arcmsr_executesrb; 168 /* 169 ************************************************************************** 170 ************************************************************************** 171 */ 172 static d_open_t arcmsr_open; 173 static d_close_t arcmsr_close; 174 static d_ioctl_t arcmsr_ioctl; 175 176 static device_method_t arcmsr_methods[]={ 177 DEVMETHOD(device_probe, arcmsr_probe), 178 DEVMETHOD(device_attach, arcmsr_attach), 179 DEVMETHOD(device_detach, arcmsr_detach), 180 DEVMETHOD(device_shutdown, arcmsr_shutdown), 181 DEVMETHOD(device_suspend, arcmsr_suspend), 182 DEVMETHOD(device_resume, arcmsr_resume), 183 184 DEVMETHOD(bus_print_child, bus_generic_print_child), 185 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 186 { 0, 0 } 187 }; 188 189 static driver_t arcmsr_driver={ 190 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) 191 }; 192 193 static devclass_t arcmsr_devclass; 194 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); 195 #ifndef BUS_DMA_COHERENT 196 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ 197 #endif 198 #if __FreeBSD_version >= 501000 199 #ifndef D_NEEDGIANT 200 #define D_NEEDGIANT 0x00400000 /* driver want Giant */ 201 #endif 202 #ifndef D_VERSION 203 #define D_VERSION 0x20011966 204 #endif 205 static struct cdevsw arcmsr_cdevsw={ 206 #if __FreeBSD_version > 502010 207 .d_version = D_VERSION, 208 #endif 209 .d_flags = D_NEEDGIANT, 210 .d_open = arcmsr_open, /* open */ 211 .d_close = arcmsr_close, /* close */ 212 .d_ioctl = arcmsr_ioctl, /* ioctl */ 213 .d_name = "arcmsr", /* name */ 214 }; 215 #else 216 #define ARCMSR_CDEV_MAJOR 180 217 218 static struct cdevsw arcmsr_cdevsw = { 219 arcmsr_open, /* open */ 220 arcmsr_close, /* close */ 221 noread, /* read */ 222 nowrite, /* write */ 223 arcmsr_ioctl, /* ioctl */ 224 nopoll, /* poll */ 225 nommap, /* mmap */ 226 nostrategy, /* strategy */ 227 "arcmsr", /* name */ 228 ARCMSR_CDEV_MAJOR, /* major */ 229 nodump, /* dump */ 230 nopsize, /* psize */ 231 0 /* flags */ 232 }; 233 #endif 234 235 #if __FreeBSD_version < 500005 236 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) 237 #else 238 #if __FreeBSD_version < 503000 239 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) 240 #else 241 static int arcmsr_open(struct cdev *dev, int flags, int fmt, d_thread_t *proc) 242 #endif 243 #endif 244 { 245 #if __FreeBSD_version < 503000 246 struct AdapterControlBlock *acb=dev->si_drv1; 247 #else 248 int unit = minor(dev); 249 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 250 #endif 251 if(acb==NULL) { 252 return ENXIO; 253 } 254 return 0; 255 } 256 /* 257 ************************************************************************** 258 ************************************************************************** 259 */ 260 #if __FreeBSD_version < 500005 261 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) 262 #else 263 #if __FreeBSD_version < 503000 264 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) 265 #else 266 static int arcmsr_close(struct cdev *dev, int flags, int fmt, d_thread_t *proc) 267 #endif 268 #endif 269 { 270 #if __FreeBSD_version < 503000 271 struct AdapterControlBlock *acb=dev->si_drv1; 272 #else 273 int unit = minor(dev); 274 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 275 #endif 276 if(acb==NULL) { 277 return ENXIO; 278 } 279 return 0; 280 } 281 /* 282 ************************************************************************** 283 ************************************************************************** 284 */ 285 #if __FreeBSD_version < 500005 286 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) 287 #else 288 #if __FreeBSD_version < 503000 289 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) 290 #else 291 static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, d_thread_t *proc) 292 #endif 293 #endif 294 { 295 #if __FreeBSD_version < 503000 296 struct AdapterControlBlock *acb=dev->si_drv1; 297 #else 298 int unit = minor(dev); 299 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 300 #endif 301 302 if(acb==NULL) { 303 return ENXIO; 304 } 305 return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); 306 } 307 /* 308 ******************************************************************************* 309 ******************************************************************************* 310 */ 311 static int arcmsr_suspend(device_t dev) 312 { 313 struct AdapterControlBlock *acb = device_get_softc(dev); 314 u_int32_t intmask_org; 315 316 /* disable all outbound interrupt */ 317 intmask_org=CHIP_REG_READ32(outbound_intmask); 318 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE)); 319 /* flush controller */ 320 arcmsr_iop_parking(acb); 321 return(0); 322 } 323 /* 324 ******************************************************************************* 325 ******************************************************************************* 326 */ 327 static int arcmsr_resume(device_t dev) 328 { 329 struct AdapterControlBlock *acb = device_get_softc(dev); 330 331 arcmsr_iop_init(acb); 332 return(0); 333 } 334 /* 335 ********************************************************************************* 336 ********************************************************************************* 337 */ 338 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) 339 { 340 struct AdapterControlBlock *acb; 341 u_int8_t target_id, target_lun; 342 struct cam_sim * sim; 343 344 sim=(struct cam_sim *) cb_arg; 345 acb =(struct AdapterControlBlock *) cam_sim_softc(sim); 346 switch (code) { 347 case AC_LOST_DEVICE: 348 target_id=xpt_path_target_id(path); 349 target_lun=xpt_path_lun_id(path); 350 if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { 351 break; 352 } 353 printf("%s:scsi id%d lun%d device lost \n" 354 , device_get_name(acb->pci_dev), target_id, target_lun); 355 break; 356 default: 357 break; 358 } 359 } 360 /* 361 ************************************************************************ 362 ************************************************************************ 363 */ 364 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 365 { 366 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 367 if(arcmsr_wait_msgint_ready(acb)) { 368 printf("arcmsr%d: wait 'flush adapter cache' timeout \n" 369 , acb->pci_unit); 370 } 371 return; 372 } 373 /* 374 ********************************************************************** 375 ********************************************************************** 376 */ 377 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb) 378 { 379 u_int32_t Index; 380 u_int8_t Retries=0x00; 381 382 do { 383 for(Index=0; Index < 100; Index++) { 384 if(CHIP_REG_READ32(outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 385 /*clear interrupt*/ 386 CHIP_REG_WRITE32(outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); 387 return 0x00; 388 } 389 /* one us delay */ 390 UDELAY(10000); 391 }/*max 1 seconds*/ 392 }while(Retries++ < 20);/*max 20 sec*/ 393 return 0xff; 394 } 395 /* 396 ********************************************************************** 397 ********************************************************************** 398 */ 399 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) 400 { 401 struct AdapterControlBlock *acb=srb->acb; 402 union ccb * pccb=srb->pccb; 403 404 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 405 bus_dmasync_op_t op; 406 407 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 408 op = BUS_DMASYNC_POSTREAD; 409 } else { 410 op = BUS_DMASYNC_POSTWRITE; 411 } 412 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 413 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 414 } 415 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_done_lock); 416 if(stand_flag==1) { 417 atomic_subtract_int(&acb->srboutstandingcount, 1); 418 } 419 srb->startdone=ARCMSR_SRB_DONE; 420 srb->srb_flags=0; 421 acb->srbworkingQ[acb->workingsrb_doneindex]=srb; 422 acb->workingsrb_doneindex++; 423 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; 424 ARCMSR_LOCK_RELEASE(&acb->workingQ_done_lock); 425 xpt_done(pccb); 426 return; 427 } 428 /* 429 ********************************************************************** 430 ********************************************************************** 431 */ 432 static void arcmsr_report_sense_info(struct CommandControlBlock *srb) 433 { 434 union ccb * pccb=srb->pccb; 435 436 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 437 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 438 if(&pccb->csio.sense_data) { 439 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); 440 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, 441 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); 442 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ 443 pccb->ccb_h.status |= CAM_AUTOSNS_VALID; 444 } 445 return; 446 } 447 /* 448 ********************************************************************* 449 ** 450 ********************************************************************* 451 */ 452 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 453 { 454 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 455 if(arcmsr_wait_msgint_ready(acb)) { 456 printf("arcmsr%d: wait 'abort all outstanding command' timeout \n" 457 , acb->pci_unit); 458 } 459 return; 460 } 461 /* 462 **************************************************************************** 463 **************************************************************************** 464 */ 465 static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 466 { 467 struct CommandControlBlock *srb; 468 u_int32_t intmask_org, mask; 469 u_int32_t i=0; 470 471 if(acb->srboutstandingcount!=0) 472 { 473 /* talk to iop 331 outstanding command aborted*/ 474 arcmsr_abort_allcmd(acb); 475 UDELAY(3000*1000);/*wait for 3 sec for all command aborted*/ 476 /* disable all outbound interrupt */ 477 intmask_org=CHIP_REG_READ32(outbound_intmask); 478 CHIP_REG_WRITE32(outbound_intmask 479 , intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 480 /*clear all outbound posted Q*/ 481 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) { 482 CHIP_REG_READ32(outbound_queueport); 483 } 484 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 485 srb=acb->psrb_pool[i]; 486 if(srb->startdone==ARCMSR_SRB_START) { 487 srb->startdone=ARCMSR_SRB_ABORTED; 488 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 489 arcmsr_srb_complete(srb, 1); 490 } 491 } 492 /* enable all outbound interrupt */ 493 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 494 |ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 495 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 496 /* post abort all outstanding command message to RAID controller */ 497 } 498 atomic_set_int(&acb->srboutstandingcount, 0); 499 acb->workingsrb_doneindex=0; 500 acb->workingsrb_startindex=0; 501 return; 502 } 503 /* 504 ********************************************************************** 505 ********************************************************************** 506 */ 507 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) 508 { 509 struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb; 510 u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u; 511 u_int32_t address_lo, address_hi; 512 union ccb * pccb=srb->pccb; 513 struct ccb_scsiio * pcsio= &pccb->csio; 514 u_int32_t arccdbsize=0x30; 515 516 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 517 arcmsr_cdb->Bus=0; 518 arcmsr_cdb->TargetID=pccb->ccb_h.target_id; 519 arcmsr_cdb->LUN=pccb->ccb_h.target_lun; 520 arcmsr_cdb->Function=1; 521 arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len; 522 arcmsr_cdb->Context=(unsigned long)arcmsr_cdb; 523 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); 524 if(nseg != 0) { 525 struct AdapterControlBlock *acb=srb->acb; 526 bus_dmasync_op_t op; 527 u_int32_t length, i, cdb_sgcount=0; 528 529 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 530 op=BUS_DMASYNC_PREREAD; 531 } else { 532 op=BUS_DMASYNC_PREWRITE; 533 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE; 534 srb->srb_flags|=SRB_FLAG_WRITE; 535 } 536 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 537 for(i=0;i<nseg;i++) { 538 /* Get the physical address of the current data pointer */ 539 length=arcmsr_htole32(dm_segs[i].ds_len); 540 address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); 541 address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); 542 if(address_hi==0) { 543 struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge; 544 pdma_sg->address=address_lo; 545 pdma_sg->length=length; 546 psge += sizeof(struct SG32ENTRY); 547 arccdbsize += sizeof(struct SG32ENTRY); 548 } else { 549 u_int32_t sg64s_size=0, tmplength=length; 550 551 while(1) { 552 u_int64_t span4G, length0; 553 struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge; 554 555 span4G=(u_int64_t)address_lo + tmplength; 556 pdma_sg->addresshigh=address_hi; 557 pdma_sg->address=address_lo; 558 if(span4G > 0x100000000) { 559 /*see if cross 4G boundary*/ 560 length0=0x100000000-address_lo; 561 pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR; 562 address_hi=address_hi+1; 563 address_lo=0; 564 tmplength=tmplength-(u_int32_t)length0; 565 sg64s_size += sizeof(struct SG64ENTRY); 566 psge += sizeof(struct SG64ENTRY); 567 cdb_sgcount++; 568 } else { 569 pdma_sg->length=tmplength|IS_SG64_ADDR; 570 sg64s_size += sizeof(struct SG64ENTRY); 571 psge += sizeof(struct SG64ENTRY); 572 break; 573 } 574 } 575 arccdbsize += sg64s_size; 576 } 577 cdb_sgcount++; 578 } 579 arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount; 580 arcmsr_cdb->DataLength=pcsio->dxfer_len; 581 if( arccdbsize > 256) { 582 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE; 583 } 584 } 585 return; 586 } 587 /* 588 ************************************************************************** 589 ************************************************************************** 590 */ 591 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) 592 { 593 u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr; 594 struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb; 595 596 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 597 (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); 598 atomic_add_int(&acb->srboutstandingcount, 1); 599 srb->startdone=ARCMSR_SRB_START; 600 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 601 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); 602 } else { 603 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr); 604 } 605 return; 606 } 607 /* 608 ********************************************************************** 609 ********************************************************************** 610 */ 611 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb) 612 { 613 u_int8_t * pQbuffer; 614 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer; 615 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data; 616 u_int32_t allxfer_len=0; 617 618 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 619 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 620 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { 621 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex]; 622 memcpy(iop_data, pQbuffer, 1); 623 acb->wqbuf_firstindex++; 624 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 625 /*if last index number set it to 0 */ 626 iop_data++; 627 allxfer_len++; 628 } 629 pwbuffer->data_len=allxfer_len; 630 /* 631 ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post 632 */ 633 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 634 } 635 return; 636 } 637 /* 638 ************************************************************************ 639 ************************************************************************ 640 */ 641 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 642 { 643 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 644 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 645 if(arcmsr_wait_msgint_ready(acb)) { 646 printf("arcmsr%d: wait 'stop adapter rebulid' timeout \n" 647 , acb->pci_unit); 648 } 649 return; 650 } 651 /* 652 ************************************************************************ 653 ************************************************************************ 654 */ 655 static void arcmsr_poll(struct cam_sim * psim) 656 { 657 arcmsr_interrupt(cam_sim_softc(psim)); 658 return; 659 } 660 /* 661 ********************************************************************** 662 ********************************************************************** 663 */ 664 static void arcmsr_interrupt(void *arg) 665 { 666 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg; 667 struct CommandControlBlock *srb; 668 u_int32_t flag_srb, outbound_intstatus, outbound_doorbell; 669 670 /* 671 ********************************************* 672 ** check outbound intstatus 673 ********************************************* 674 */ 675 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable; 676 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 677 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { 678 /* 679 ********************************************* 680 ** DOORBELL 681 ********************************************* 682 */ 683 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell); 684 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */ 685 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 686 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer; 687 u_int8_t * iop_data=(u_int8_t *)prbuffer->data; 688 u_int8_t * pQbuffer; 689 u_int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; 690 691 /*check this iop data if overflow my rqbuffer*/ 692 rqbuf_lastindex=acb->rqbuf_lastindex; 693 rqbuf_firstindex=acb->rqbuf_firstindex; 694 iop_len=prbuffer->data_len; 695 my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 696 if(my_empty_len>=iop_len) { 697 while(iop_len > 0) { 698 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 699 memcpy(pQbuffer, iop_data, 1); 700 acb->rqbuf_lastindex++; 701 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 702 /*if last index number set it to 0 */ 703 iop_data++; 704 iop_len--; 705 } 706 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 707 /*signature, let IOP331 know data has been readed */ 708 } else { 709 acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW; 710 } 711 } 712 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 713 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; 714 /* 715 ********************************************* 716 ********************************************* 717 */ 718 if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) { 719 u_int8_t * pQbuffer; 720 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer; 721 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data; 722 u_int32_t allxfer_len=0; 723 724 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 725 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { 726 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex]; 727 memcpy(iop_data, pQbuffer, 1); 728 acb->wqbuf_firstindex++; 729 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 730 /*if last index number set it to 0 */ 731 iop_data++; 732 allxfer_len++; 733 } 734 pwbuffer->data_len=allxfer_len; 735 /* 736 ** push inbound doorbell tell iop driver data write ok 737 ** and wait reply on next hwinterrupt for next Qbuffer post 738 */ 739 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 740 } 741 if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) { 742 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 743 } 744 } 745 } 746 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 747 int target, lun; 748 /* 749 ***************************************************************************** 750 ** areca cdb command done 751 ***************************************************************************** 752 */ 753 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 754 while(1) { 755 if((flag_srb=CHIP_REG_READ32(outbound_queueport)) == 0xFFFFFFFF) { 756 break;/*chip FIFO no srb for completion already*/ 757 } 758 /* check if command done with no error*/ 759 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5)); 760 /*frame must be 32 bytes aligned*/ 761 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 762 if(srb->startdone==ARCMSR_SRB_ABORTED) { 763 printf("arcmsr%d: srb='%p' isr got aborted command \n" 764 , acb->pci_unit, srb); 765 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 766 arcmsr_srb_complete(srb, 1); 767 continue; 768 } 769 printf("arcmsr%d: isr get an illegal srb command done" 770 "acb='%p' srb='%p' srbacb='%p' startdone=0x%x" 771 "srboutstandingcount=%d \n", 772 acb->pci_unit, acb, srb, srb->acb, 773 srb->startdone, acb->srboutstandingcount); 774 continue; 775 } 776 target=srb->pccb->ccb_h.target_id; 777 lun=srb->pccb->ccb_h.target_lun; 778 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) { 779 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 780 acb->devstate[target][lun]=ARECA_RAID_GOOD; 781 } 782 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 783 arcmsr_srb_complete(srb, 1); 784 } else { 785 switch(srb->arcmsr_cdb.DeviceStatus) { 786 case ARCMSR_DEV_SELECT_TIMEOUT: { 787 acb->devstate[target][lun]=ARECA_RAID_GONE; 788 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 789 arcmsr_srb_complete(srb, 1); 790 } 791 break; 792 case ARCMSR_DEV_ABORTED: 793 case ARCMSR_DEV_INIT_FAIL: { 794 acb->devstate[target][lun]=ARECA_RAID_GONE; 795 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 796 arcmsr_srb_complete(srb, 1); 797 } 798 break; 799 case SCSISTAT_CHECK_CONDITION: { 800 acb->devstate[target][lun]=ARECA_RAID_GOOD; 801 arcmsr_report_sense_info(srb); 802 arcmsr_srb_complete(srb, 1); 803 } 804 break; 805 default: 806 printf("arcmsr%d: scsi id=%d lun=%d" 807 "isr get command error done," 808 "but got unknow DeviceStatus=0x%x \n" 809 , acb->pci_unit, target, lun 810 ,srb->arcmsr_cdb.DeviceStatus); 811 acb->devstate[target][lun]=ARECA_RAID_GONE; 812 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 813 /*unknow error or crc error just for retry*/ 814 arcmsr_srb_complete(srb, 1); 815 break; 816 } 817 } 818 } /*drain reply FIFO*/ 819 } 820 return; 821 } 822 /* 823 ******************************************************************************* 824 ** 825 ******************************************************************************* 826 */ 827 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 828 { 829 if(acb!=NULL) { 830 /* stop adapter background rebuild */ 831 if(acb->acb_flags & ACB_F_MSG_START_BGRB) { 832 arcmsr_stop_adapter_bgrb(acb); 833 arcmsr_flush_adapter_cache(acb); 834 } 835 } 836 } 837 /* 838 *********************************************************************** 839 ** 840 ************************************************************************ 841 */ 842 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) 843 { 844 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 845 u_int32_t retvalue=EINVAL; 846 847 pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg; 848 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { 849 return retvalue; 850 } 851 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 852 switch(ioctl_cmd) { 853 case ARCMSR_MESSAGE_READ_RQBUFFER: { 854 u_int8_t * pQbuffer; 855 u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 856 u_int32_t allxfer_len=0; 857 858 while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex) && (allxfer_len<1031)) { 859 /*copy READ QBUFFER to srb*/ 860 pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex]; 861 memcpy(ptmpQbuffer, pQbuffer, 1); 862 acb->rqbuf_firstindex++; 863 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 864 /*if last index number set it to 0 */ 865 ptmpQbuffer++; 866 allxfer_len++; 867 } 868 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 869 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer; 870 u_int8_t * iop_data=(u_int8_t *)prbuffer->data; 871 u_int32_t iop_len; 872 873 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 874 iop_len=(u_int32_t)prbuffer->data_len; 875 /*this iop data does no chance to make me overflow again here, so just do it*/ 876 while(iop_len>0) { 877 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 878 memcpy(pQbuffer, iop_data, 1); 879 acb->rqbuf_lastindex++; 880 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 881 /*if last index number set it to 0 */ 882 iop_data++; 883 iop_len--; 884 } 885 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 886 /*signature, let IOP331 know data has been readed */ 887 } 888 pcmdmessagefld->cmdmessage.Length=allxfer_len; 889 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 890 retvalue=ARCMSR_MESSAGE_SUCCESS; 891 } 892 break; 893 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 894 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 895 u_int8_t * pQbuffer; 896 u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 897 898 user_len=pcmdmessagefld->cmdmessage.Length; 899 /*check if data xfer length of this request will overflow my array qbuffer */ 900 wqbuf_lastindex=acb->wqbuf_lastindex; 901 wqbuf_firstindex=acb->wqbuf_firstindex; 902 if(wqbuf_lastindex!=wqbuf_firstindex) { 903 arcmsr_post_Qbuffer(acb); 904 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 905 } else { 906 my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 907 if(my_empty_len>=user_len) { 908 while(user_len>0) { 909 /*copy srb data to wqbuffer*/ 910 pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex]; 911 memcpy(pQbuffer, ptmpuserbuffer, 1); 912 acb->wqbuf_lastindex++; 913 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 914 /*if last index number set it to 0 */ 915 ptmpuserbuffer++; 916 user_len--; 917 } 918 /*post fist Qbuffer*/ 919 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 920 acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED; 921 arcmsr_post_Qbuffer(acb); 922 } 923 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 924 } else { 925 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 926 } 927 } 928 retvalue=ARCMSR_MESSAGE_SUCCESS; 929 } 930 break; 931 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 932 u_int8_t * pQbuffer=acb->rqbuffer; 933 934 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 935 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 936 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 937 /*signature, let IOP331 know data has been readed */ 938 } 939 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 940 acb->rqbuf_firstindex=0; 941 acb->rqbuf_lastindex=0; 942 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 943 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 944 retvalue=ARCMSR_MESSAGE_SUCCESS; 945 } 946 break; 947 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: 948 { 949 u_int8_t * pQbuffer=acb->wqbuffer; 950 951 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 952 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 953 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 954 /*signature, let IOP331 know data has been readed */ 955 } 956 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READED); 957 acb->wqbuf_firstindex=0; 958 acb->wqbuf_lastindex=0; 959 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 960 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 961 retvalue=ARCMSR_MESSAGE_SUCCESS; 962 } 963 break; 964 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 965 u_int8_t * pQbuffer; 966 967 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 968 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 969 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 970 /*signature, let IOP331 know data has been readed */ 971 } 972 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 973 |ACB_F_MESSAGE_RQBUFFER_CLEARED 974 |ACB_F_MESSAGE_WQBUFFER_READED); 975 acb->rqbuf_firstindex=0; 976 acb->rqbuf_lastindex=0; 977 acb->wqbuf_firstindex=0; 978 acb->wqbuf_lastindex=0; 979 pQbuffer=acb->rqbuffer; 980 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 981 pQbuffer=acb->wqbuffer; 982 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 983 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 984 retvalue=ARCMSR_MESSAGE_SUCCESS; 985 } 986 break; 987 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 988 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F; 989 retvalue=ARCMSR_MESSAGE_SUCCESS; 990 } 991 break; 992 case ARCMSR_MESSAGE_SAY_HELLO: { 993 u_int8_t * hello_string="Hello! I am ARCMSR"; 994 u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer; 995 996 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { 997 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 998 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 999 return ENOIOCTL; 1000 } 1001 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1002 retvalue=ARCMSR_MESSAGE_SUCCESS; 1003 } 1004 break; 1005 case ARCMSR_MESSAGE_SAY_GOODBYE: { 1006 arcmsr_iop_parking(acb); 1007 retvalue=ARCMSR_MESSAGE_SUCCESS; 1008 } 1009 break; 1010 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 1011 arcmsr_flush_adapter_cache(acb); 1012 retvalue=ARCMSR_MESSAGE_SUCCESS; 1013 } 1014 break; 1015 } 1016 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1017 return retvalue; 1018 } 1019 /* 1020 ************************************************************************** 1021 ************************************************************************** 1022 */ 1023 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb) 1024 { 1025 struct CommandControlBlock *srb=NULL; 1026 u_int32_t workingsrb_startindex, workingsrb_doneindex; 1027 1028 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_start_lock); 1029 workingsrb_doneindex=acb->workingsrb_doneindex; 1030 workingsrb_startindex=acb->workingsrb_startindex; 1031 srb=acb->srbworkingQ[workingsrb_startindex]; 1032 workingsrb_startindex++; 1033 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; 1034 if(workingsrb_doneindex!=workingsrb_startindex) { 1035 acb->workingsrb_startindex=workingsrb_startindex; 1036 } else { 1037 srb=NULL; 1038 } 1039 ARCMSR_LOCK_RELEASE(&acb->workingQ_start_lock); 1040 return(srb); 1041 } 1042 /* 1043 ************************************************************************** 1044 ************************************************************************** 1045 */ 1046 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb) 1047 { 1048 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 1049 int retvalue = 0, transfer_len = 0; 1050 char *buffer; 1051 uint32_t controlcode = (uint32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | 1052 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | 1053 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | 1054 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[8]; 1055 /* 4 bytes: Areca io control code */ 1056 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1057 buffer = pccb->csio.data_ptr; 1058 transfer_len = pccb->csio.dxfer_len; 1059 } else { 1060 retvalue = ARCMSR_MESSAGE_FAIL; 1061 goto message_out; 1062 } 1063 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 1064 retvalue = ARCMSR_MESSAGE_FAIL; 1065 goto message_out; 1066 } 1067 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; 1068 switch(controlcode) { 1069 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1070 u_int8_t *pQbuffer; 1071 u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 1072 int32_t allxfer_len = 0; 1073 1074 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1075 && (allxfer_len < 1031)) { 1076 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1077 memcpy(ptmpQbuffer, pQbuffer, 1); 1078 acb->rqbuf_firstindex++; 1079 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1080 ptmpQbuffer++; 1081 allxfer_len++; 1082 } 1083 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1084 struct QBUFFER *prbuffer = (struct QBUFFER *) &acb->pmu->message_rbuffer; 1085 u_int8_t *iop_data = (u_int8_t *)prbuffer->data; 1086 int32_t iop_len; 1087 1088 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1089 iop_len =(u_int32_t)prbuffer->data_len; 1090 while (iop_len > 0) { 1091 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 1092 memcpy(pQbuffer, iop_data, 1); 1093 acb->rqbuf_lastindex++; 1094 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1095 iop_data++; 1096 iop_len--; 1097 } 1098 CHIP_REG_WRITE32(inbound_doorbell, 1099 ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1100 } 1101 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1102 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1103 retvalue=ARCMSR_MESSAGE_SUCCESS; 1104 } 1105 break; 1106 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1107 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1108 u_int8_t *pQbuffer; 1109 u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 1110 1111 user_len = pcmdmessagefld->cmdmessage.Length; 1112 wqbuf_lastindex = acb->wqbuf_lastindex; 1113 wqbuf_firstindex = acb->wqbuf_firstindex; 1114 if (wqbuf_lastindex != wqbuf_firstindex) { 1115 arcmsr_post_Qbuffer(acb); 1116 /* has error report sensedata */ 1117 if(&pccb->csio.sense_data) { 1118 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 1119 /* Valid,ErrorCode */ 1120 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 1121 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 1122 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 1123 /* AdditionalSenseLength */ 1124 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 1125 /* AdditionalSenseCode */ 1126 } 1127 retvalue = ARCMSR_MESSAGE_FAIL; 1128 } else { 1129 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 1130 &(ARCMSR_MAX_QBUFFER - 1); 1131 if (my_empty_len >= user_len) { 1132 while (user_len > 0) { 1133 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; 1134 memcpy(pQbuffer, ptmpuserbuffer, 1); 1135 acb->wqbuf_lastindex++; 1136 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1137 ptmpuserbuffer++; 1138 user_len--; 1139 } 1140 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 1141 acb->acb_flags &= 1142 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 1143 arcmsr_post_Qbuffer(acb); 1144 } 1145 } else { 1146 /* has error report sensedata */ 1147 if(&pccb->csio.sense_data) { 1148 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 1149 /* Valid,ErrorCode */ 1150 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 1151 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 1152 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 1153 /* AdditionalSenseLength */ 1154 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 1155 /* AdditionalSenseCode */ 1156 } 1157 retvalue = ARCMSR_MESSAGE_FAIL; 1158 } 1159 } 1160 } 1161 break; 1162 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1163 u_int8_t *pQbuffer = acb->rqbuffer; 1164 1165 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1166 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1167 CHIP_REG_WRITE32(inbound_doorbell 1168 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1169 } 1170 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 1171 acb->rqbuf_firstindex = 0; 1172 acb->rqbuf_lastindex = 0; 1173 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1174 pcmdmessagefld->cmdmessage.ReturnCode = 1175 ARCMSR_MESSAGE_RETURNCODE_OK; 1176 } 1177 break; 1178 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 1179 u_int8_t *pQbuffer = acb->wqbuffer; 1180 1181 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1182 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1183 CHIP_REG_WRITE32(inbound_doorbell 1184 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1185 } 1186 acb->acb_flags |= 1187 (ACB_F_MESSAGE_WQBUFFER_CLEARED | 1188 ACB_F_MESSAGE_WQBUFFER_READED); 1189 acb->wqbuf_firstindex = 0; 1190 acb->wqbuf_lastindex = 0; 1191 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1192 pcmdmessagefld->cmdmessage.ReturnCode = 1193 ARCMSR_MESSAGE_RETURNCODE_OK; 1194 } 1195 break; 1196 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1197 u_int8_t *pQbuffer; 1198 1199 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1200 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1201 CHIP_REG_WRITE32(inbound_doorbell 1202 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1203 } 1204 acb->acb_flags |= 1205 (ACB_F_MESSAGE_WQBUFFER_CLEARED 1206 | ACB_F_MESSAGE_RQBUFFER_CLEARED 1207 | ACB_F_MESSAGE_WQBUFFER_READED); 1208 acb->rqbuf_firstindex = 0; 1209 acb->rqbuf_lastindex = 0; 1210 acb->wqbuf_firstindex = 0; 1211 acb->wqbuf_lastindex = 0; 1212 pQbuffer = acb->rqbuffer; 1213 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 1214 pQbuffer = acb->wqbuffer; 1215 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 1216 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1217 } 1218 break; 1219 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 1220 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 1221 } 1222 break; 1223 case ARCMSR_MESSAGE_SAY_HELLO: { 1224 int8_t * hello_string = "Hello! I am ARCMSR"; 1225 1226 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 1227 , (int16_t)strlen(hello_string)); 1228 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1229 } 1230 break; 1231 case ARCMSR_MESSAGE_SAY_GOODBYE: 1232 arcmsr_iop_parking(acb); 1233 break; 1234 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 1235 arcmsr_flush_adapter_cache(acb); 1236 break; 1237 default: 1238 retvalue = ARCMSR_MESSAGE_FAIL; 1239 } 1240 message_out: 1241 return retvalue; 1242 } 1243 /* 1244 ********************************************************************* 1245 ********************************************************************* 1246 */ 1247 static void arcmsr_executesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1248 { 1249 struct CommandControlBlock *srb=(struct CommandControlBlock *)arg; 1250 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb; 1251 union ccb * pccb; 1252 int target, lun; 1253 1254 pccb=srb->pccb; 1255 target=pccb->ccb_h.target_id; 1256 lun=pccb->ccb_h.target_lun; 1257 if(error != 0) { 1258 if(error != EFBIG) { 1259 printf("arcmsr%d: unexpected error %x returned from 'bus_dmamap_load' \n" 1260 , acb->pci_unit, error); 1261 } 1262 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1263 xpt_freeze_devq(pccb->ccb_h.path, /*count*/1); 1264 pccb->ccb_h.status |= (CAM_REQ_TOO_BIG|CAM_DEV_QFRZN); 1265 } 1266 arcmsr_srb_complete(srb, 0); 1267 return; 1268 } 1269 if(nseg > ARCMSR_MAX_SG_ENTRIES) { 1270 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 1271 arcmsr_srb_complete(srb, 0); 1272 return; 1273 } 1274 if(acb->acb_flags & ACB_F_BUS_RESET) { 1275 printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); 1276 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; 1277 arcmsr_srb_complete(srb, 0); 1278 return; 1279 } 1280 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 1281 u_int8_t block_cmd; 1282 1283 block_cmd=pccb->csio.cdb_io.cdb_bytes[0] & 0x0f; 1284 if(block_cmd==0x08 || block_cmd==0x0a) { 1285 printf("arcmsr%d:block 'read/write' command" 1286 "with gone raid volume Cmd=%2x, TargetId=%d, Lun=%d \n" 1287 , acb->pci_unit, block_cmd, target, lun); 1288 pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 1289 arcmsr_srb_complete(srb, 0); 1290 return; 1291 } 1292 } 1293 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1294 if(nseg != 0) { 1295 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 1296 } 1297 arcmsr_srb_complete(srb, 0); 1298 return; 1299 } 1300 pccb->ccb_h.status |= CAM_SIM_QUEUED; 1301 if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) { 1302 pccb->ccb_h.status |= CAM_SCSI_BUSY; 1303 arcmsr_srb_complete(srb, 0); 1304 return; 1305 } 1306 arcmsr_build_srb(srb, dm_segs, nseg); 1307 arcmsr_post_srb(acb, srb); 1308 return; 1309 } 1310 /* 1311 ***************************************************************************************** 1312 ***************************************************************************************** 1313 */ 1314 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb) 1315 { 1316 struct CommandControlBlock *srb; 1317 struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; 1318 u_int32_t intmask_org, mask; 1319 int i=0; 1320 1321 acb->num_aborts++; 1322 /* 1323 *************************************************************************** 1324 ** It is the upper layer do abort command this lock just prior to calling us. 1325 ** First determine if we currently own this command. 1326 ** Start by searching the device queue. If not found 1327 ** at all, and the system wanted us to just abort the 1328 ** command return success. 1329 *************************************************************************** 1330 */ 1331 if(acb->srboutstandingcount!=0) { 1332 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 1333 srb=acb->psrb_pool[i]; 1334 if(srb->startdone==ARCMSR_SRB_START) { 1335 if(srb->pccb==abortccb) { 1336 srb->startdone=ARCMSR_SRB_ABORTED; 1337 printf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'" 1338 "outstanding command \n" 1339 , acb->pci_unit, abortccb->ccb_h.target_id 1340 , abortccb->ccb_h.target_lun, srb); 1341 goto abort_outstanding_cmd; 1342 } 1343 } 1344 } 1345 } 1346 return(FALSE); 1347 abort_outstanding_cmd: 1348 /* do not talk to iop 331 abort command */ 1349 UDELAY(3000*1000);/*wait for 3 sec for all command done*/ 1350 /* disable all outbound interrupt */ 1351 intmask_org=CHIP_REG_READ32(outbound_intmask); 1352 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 1353 arcmsr_polling_srbdone(acb, srb); 1354 /* enable all outbound interrupt */ 1355 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 1356 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 1357 return (TRUE); 1358 } 1359 /* 1360 **************************************************************************** 1361 **************************************************************************** 1362 */ 1363 static void arcmsr_bus_reset(struct AdapterControlBlock *acb) 1364 { 1365 int retry=0; 1366 1367 acb->num_resets++; 1368 acb->acb_flags |=ACB_F_BUS_RESET; 1369 while(acb->srboutstandingcount!=0 && retry < 400) { 1370 arcmsr_interrupt((void *)acb); 1371 UDELAY(25000); 1372 retry++; 1373 } 1374 arcmsr_iop_reset(acb); 1375 acb->acb_flags &= ~ACB_F_BUS_RESET; 1376 return; 1377 } 1378 /* 1379 ************************************************************************** 1380 ************************************************************************** 1381 */ 1382 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 1383 union ccb * pccb) 1384 { 1385 pccb->ccb_h.status |= CAM_REQ_CMP; 1386 switch (pccb->csio.cdb_io.cdb_bytes[0]) { 1387 case INQUIRY: { 1388 unsigned char inqdata[36]; 1389 char *buffer=pccb->csio.data_ptr;; 1390 1391 if (pccb->ccb_h.target_lun) { 1392 pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 1393 xpt_done(pccb); 1394 return; 1395 } 1396 inqdata[0] = T_PROCESSOR; 1397 /* Periph Qualifier & Periph Dev Type */ 1398 inqdata[1] = 0; 1399 /* rem media bit & Dev Type Modifier */ 1400 inqdata[2] = 0; 1401 /* ISO, ECMA, & ANSI versions */ 1402 inqdata[4] = 31; 1403 /* length of additional data */ 1404 strncpy(&inqdata[8], "Areca ", 8); 1405 /* Vendor Identification */ 1406 strncpy(&inqdata[16], "RAID controller ", 16); 1407 /* Product Identification */ 1408 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 1409 memcpy(buffer, inqdata, sizeof(inqdata)); 1410 xpt_done(pccb); 1411 } 1412 break; 1413 case WRITE_BUFFER: 1414 case READ_BUFFER: { 1415 if (arcmsr_iop_message_xfer(acb, pccb)) { 1416 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1417 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1418 } 1419 xpt_done(pccb); 1420 } 1421 break; 1422 default: 1423 xpt_done(pccb); 1424 } 1425 } 1426 /* 1427 ********************************************************************* 1428 ********************************************************************* 1429 */ 1430 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb) 1431 { 1432 struct AdapterControlBlock * acb; 1433 1434 acb=(struct AdapterControlBlock *) cam_sim_softc(psim); 1435 if(acb==NULL) { 1436 pccb->ccb_h.status |= CAM_REQ_INVALID; 1437 xpt_done(pccb); 1438 return; 1439 } 1440 switch (pccb->ccb_h.func_code) { 1441 case XPT_SCSI_IO: { 1442 struct CommandControlBlock *srb; 1443 int target=pccb->ccb_h.target_id; 1444 1445 if(target == 16) { 1446 /* virtual device for iop message transfer */ 1447 arcmsr_handle_virtual_command(acb, pccb); 1448 return; 1449 } 1450 if((srb=arcmsr_get_freesrb(acb)) == NULL) { 1451 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; 1452 xpt_done(pccb); 1453 return; 1454 } 1455 pccb->ccb_h.arcmsr_ccbsrb_ptr=srb; 1456 pccb->ccb_h.arcmsr_ccbacb_ptr=acb; 1457 srb->pccb=pccb; 1458 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1459 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) { 1460 /* Single buffer */ 1461 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) { 1462 /* Buffer is virtual */ 1463 u_int32_t error, s; 1464 1465 s=splsoftvm(); 1466 error = bus_dmamap_load(acb->dm_segs_dmat 1467 , srb->dm_segs_dmamap 1468 , pccb->csio.data_ptr 1469 , pccb->csio.dxfer_len 1470 , arcmsr_executesrb, srb, /*flags*/0); 1471 if(error == EINPROGRESS) { 1472 xpt_freeze_simq(acb->psim, 1); 1473 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1474 } 1475 splx(s); 1476 } else { 1477 /* Buffer is physical */ 1478 panic("arcmsr: CAM_DATA_PHYS not supported"); 1479 } 1480 } else { 1481 /* Scatter/gather list */ 1482 struct bus_dma_segment *segs; 1483 1484 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 1485 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1486 pccb->ccb_h.status |= CAM_PROVIDE_FAIL; 1487 xpt_done(pccb); 1488 free(srb, M_DEVBUF); 1489 return; 1490 } 1491 segs=(struct bus_dma_segment *)pccb->csio.data_ptr; 1492 arcmsr_executesrb(srb, segs, pccb->csio.sglist_cnt, 0); 1493 } 1494 } else { 1495 arcmsr_executesrb(srb, NULL, 0, 0); 1496 } 1497 break; 1498 } 1499 case XPT_TARGET_IO: { 1500 /* target mode not yet support vendor specific commands. */ 1501 pccb->ccb_h.status |= CAM_REQ_CMP; 1502 xpt_done(pccb); 1503 break; 1504 } 1505 case XPT_PATH_INQ: { 1506 struct ccb_pathinq *cpi= &pccb->cpi; 1507 1508 cpi->version_num=1; 1509 cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE; 1510 cpi->target_sprt=0; 1511 cpi->hba_misc=0; 1512 cpi->hba_eng_cnt=0; 1513 cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */ 1514 cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */ 1515 cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */ 1516 cpi->bus_id=cam_sim_bus(psim); 1517 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1518 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); 1519 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); 1520 cpi->unit_number=cam_sim_unit(psim); 1521 cpi->transport = XPORT_SPI; 1522 cpi->transport_version = 2; 1523 cpi->protocol = PROTO_SCSI; 1524 cpi->protocol_version = SCSI_REV_2; 1525 cpi->ccb_h.status |= CAM_REQ_CMP; 1526 xpt_done(pccb); 1527 break; 1528 } 1529 case XPT_ABORT: { 1530 union ccb *pabort_ccb; 1531 1532 pabort_ccb=pccb->cab.abort_ccb; 1533 switch (pabort_ccb->ccb_h.func_code) { 1534 case XPT_ACCEPT_TARGET_IO: 1535 case XPT_IMMED_NOTIFY: 1536 case XPT_CONT_TARGET_IO: 1537 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { 1538 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; 1539 xpt_done(pabort_ccb); 1540 pccb->ccb_h.status |= CAM_REQ_CMP; 1541 } else { 1542 xpt_print_path(pabort_ccb->ccb_h.path); 1543 printf("Not found\n"); 1544 pccb->ccb_h.status |= CAM_PATH_INVALID; 1545 } 1546 break; 1547 case XPT_SCSI_IO: 1548 pccb->ccb_h.status |= CAM_UA_ABORT; 1549 break; 1550 default: 1551 pccb->ccb_h.status |= CAM_REQ_INVALID; 1552 break; 1553 } 1554 xpt_done(pccb); 1555 break; 1556 } 1557 case XPT_RESET_BUS: 1558 case XPT_RESET_DEV: { 1559 u_int32_t i; 1560 1561 arcmsr_bus_reset(acb); 1562 for (i=0; i < 500; i++) { 1563 DELAY(1000); 1564 } 1565 pccb->ccb_h.status |= CAM_REQ_CMP; 1566 xpt_done(pccb); 1567 break; 1568 } 1569 case XPT_TERM_IO: { 1570 pccb->ccb_h.status |= CAM_REQ_INVALID; 1571 xpt_done(pccb); 1572 break; 1573 } 1574 case XPT_GET_TRAN_SETTINGS: { 1575 struct ccb_trans_settings *cts; 1576 struct ccb_trans_settings_scsi *scsi; 1577 struct ccb_trans_settings_spi *spi; 1578 1579 if(pccb->ccb_h.target_id == 16) { 1580 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1581 xpt_done(pccb); 1582 break; 1583 } 1584 1585 cts= &pccb->cts; 1586 scsi = &cts->proto_specific.scsi; 1587 spi = &cts->xport_specific.spi; 1588 1589 cts->protocol = PROTO_SCSI; 1590 cts->protocol_version = SCSI_REV_2; 1591 cts->transport = XPORT_SPI; 1592 cts->transport_version = 2; 1593 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 1594 spi->sync_period=3; 1595 spi->sync_offset=32; 1596 spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT; 1597 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1598 spi->valid = CTS_SPI_VALID_SYNC_RATE 1599 | CTS_SPI_VALID_SYNC_OFFSET 1600 | CTS_SPI_VALID_BUS_WIDTH; 1601 scsi->valid = CTS_SCSI_VALID_TQ; 1602 1603 pccb->ccb_h.status |= CAM_REQ_CMP; 1604 xpt_done(pccb); 1605 break; 1606 } 1607 case XPT_SET_TRAN_SETTINGS: { 1608 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1609 xpt_done(pccb); 1610 break; 1611 } 1612 case XPT_CALC_GEOMETRY: { 1613 struct ccb_calc_geometry *ccg; 1614 u_int32_t size_mb; 1615 u_int32_t secs_per_cylinder; 1616 1617 if(pccb->ccb_h.target_id == 16) { 1618 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1619 xpt_done(pccb); 1620 break; 1621 } 1622 ccg= &pccb->ccg; 1623 if (ccg->block_size == 0) { 1624 pccb->ccb_h.status = CAM_REQ_INVALID; 1625 xpt_done(pccb); 1626 break; 1627 } 1628 if(((1024L * 1024L)/ccg->block_size) < 0) { 1629 pccb->ccb_h.status = CAM_REQ_INVALID; 1630 xpt_done(pccb); 1631 break; 1632 } 1633 size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size); 1634 if(size_mb > 1024 ) { 1635 ccg->heads=255; 1636 ccg->secs_per_track=63; 1637 } else { 1638 ccg->heads=64; 1639 ccg->secs_per_track=32; 1640 } 1641 secs_per_cylinder=ccg->heads * ccg->secs_per_track; 1642 ccg->cylinders=ccg->volume_size / secs_per_cylinder; 1643 pccb->ccb_h.status |= CAM_REQ_CMP; 1644 xpt_done(pccb); 1645 break; 1646 } 1647 default: 1648 pccb->ccb_h.status |= CAM_REQ_INVALID; 1649 xpt_done(pccb); 1650 break; 1651 } 1652 return; 1653 } 1654 /* 1655 ********************************************************************** 1656 ********************************************************************** 1657 */ 1658 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 1659 { 1660 acb->acb_flags |= ACB_F_MSG_START_BGRB; 1661 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 1662 if(arcmsr_wait_msgint_ready(acb)) { 1663 printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 1664 } 1665 return; 1666 } 1667 /* 1668 ********************************************************************** 1669 ********************************************************************** 1670 */ 1671 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 1672 { 1673 struct CommandControlBlock *srb; 1674 uint32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; 1675 int id, lun; 1676 1677 polling_srb_retry: 1678 poll_count++; 1679 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable; 1680 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 1681 while(1) { 1682 if((flag_srb=CHIP_REG_READ32(outbound_queueport))==0xFFFFFFFF) { 1683 if(poll_srb_done) { 1684 break;/*chip FIFO no ccb for completion already*/ 1685 } else { 1686 UDELAY(25000); 1687 if(poll_count > 100) { 1688 break; 1689 } 1690 goto polling_srb_retry; 1691 } 1692 } 1693 /* check ifcommand done with no error*/ 1694 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5)); 1695 /*frame must be 32 bytes aligned*/ 1696 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 1697 if((srb->startdone==ARCMSR_SRB_ABORTED) && (srb==poll_srb)) { 1698 printf("arcmsr%d: scsi id=%d lun=%d srb='%p'" 1699 "poll command abort successfully \n" 1700 , acb->pci_unit 1701 , srb->pccb->ccb_h.target_id 1702 , srb->pccb->ccb_h.target_lun, srb); 1703 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 1704 arcmsr_srb_complete(srb, 1); 1705 poll_srb_done=1; 1706 continue; 1707 } 1708 printf("arcmsr%d: polling get an illegal srb command done srb='%p'" 1709 "srboutstandingcount=%d \n" 1710 , acb->pci_unit 1711 , srb, acb->srboutstandingcount); 1712 continue; 1713 } 1714 id=srb->pccb->ccb_h.target_id; 1715 lun=srb->pccb->ccb_h.target_lun; 1716 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) { 1717 if(acb->devstate[id][lun]==ARECA_RAID_GONE) { 1718 acb->devstate[id][lun]=ARECA_RAID_GOOD; 1719 } 1720 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 1721 arcmsr_srb_complete(srb, 1); 1722 } else { 1723 switch(srb->arcmsr_cdb.DeviceStatus) { 1724 case ARCMSR_DEV_SELECT_TIMEOUT: { 1725 acb->devstate[id][lun]=ARECA_RAID_GONE; 1726 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 1727 arcmsr_srb_complete(srb, 1); 1728 } 1729 break; 1730 case ARCMSR_DEV_ABORTED: 1731 case ARCMSR_DEV_INIT_FAIL: { 1732 acb->devstate[id][lun]=ARECA_RAID_GONE; 1733 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 1734 arcmsr_srb_complete(srb, 1); 1735 } 1736 break; 1737 case SCSISTAT_CHECK_CONDITION: { 1738 acb->devstate[id][lun]=ARECA_RAID_GOOD; 1739 arcmsr_report_sense_info(srb); 1740 arcmsr_srb_complete(srb, 1); 1741 } 1742 break; 1743 default: 1744 printf("arcmsr%d: scsi id=%d lun=%d" 1745 "polling and getting command error done" 1746 ", but got unknow DeviceStatus=0x%x \n" 1747 , acb->pci_unit, id, lun, srb->arcmsr_cdb.DeviceStatus); 1748 acb->devstate[id][lun]=ARECA_RAID_GONE; 1749 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 1750 /*unknow error or crc error just for retry*/ 1751 arcmsr_srb_complete(srb, 1); 1752 break; 1753 } 1754 } 1755 } /*drain reply FIFO*/ 1756 return; 1757 } 1758 /* 1759 ********************************************************************** 1760 ** get firmware miscellaneous data 1761 ********************************************************************** 1762 */ 1763 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 1764 { 1765 char *acb_firm_model=acb->firm_model; 1766 char *acb_firm_version=acb->firm_version; 1767 size_t iop_firm_model=offsetof(struct MessageUnit,message_rwbuffer[15]); /*firm_model,15,60-67*/ 1768 size_t iop_firm_version=offsetof(struct MessageUnit,message_rwbuffer[17]); /*firm_version,17,68-83*/ 1769 int i; 1770 1771 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 1772 if(arcmsr_wait_msgint_ready(acb)) { 1773 printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n" 1774 , acb->pci_unit); 1775 } 1776 i=0; 1777 while(i<8) { 1778 *acb_firm_model=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_model+i); 1779 /* 8 bytes firm_model, 15, 60-67*/ 1780 acb_firm_model++; 1781 i++; 1782 } 1783 i=0; 1784 while(i<16) { 1785 *acb_firm_version=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_version+i); 1786 /* 16 bytes firm_version, 17, 68-83*/ 1787 acb_firm_version++; 1788 i++; 1789 } 1790 printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 1791 printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 1792 acb->firm_request_len=CHIP_REG_READ32(message_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 1793 acb->firm_numbers_queue=CHIP_REG_READ32(message_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 1794 acb->firm_sdram_size=CHIP_REG_READ32(message_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 1795 acb->firm_ide_channels=CHIP_REG_READ32(message_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 1796 return; 1797 } 1798 /* 1799 ********************************************************************** 1800 ** start background rebulid 1801 ********************************************************************** 1802 */ 1803 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 1804 { 1805 u_int32_t intmask_org, mask, outbound_doorbell, firmware_state=0; 1806 1807 do { 1808 firmware_state=CHIP_REG_READ32(outbound_msgaddr1); 1809 } while((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)==0); 1810 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; 1811 CHIP_REG_WRITE32(outbound_intmask, intmask_org); 1812 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; 1813 arcmsr_get_firmware_spec(acb); 1814 arcmsr_start_adapter_bgrb(acb); 1815 /* clear Qbuffer if door bell ringed */ 1816 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell); 1817 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */ 1818 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1819 /* enable outbound Post Queue, outbound message0, outbell doorbell Interrupt */ 1820 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 1821 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 1822 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 1823 acb->acb_flags |=ACB_F_IOP_INITED; 1824 return; 1825 } 1826 /* 1827 ********************************************************************** 1828 ********************************************************************** 1829 */ 1830 static void arcmsr_map_freesrb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1831 { 1832 struct AdapterControlBlock *acb=arg; 1833 struct CommandControlBlock *srb_tmp; 1834 u_int8_t * dma_memptr; 1835 u_int32_t i, srb_phyaddr_hi32; 1836 unsigned long srb_phyaddr=(unsigned long)segs->ds_addr; 1837 1838 dma_memptr=acb->uncacheptr; 1839 srb_phyaddr=segs->ds_addr; /* We suppose bus_addr_t high part always 0 here*/ 1840 if(((unsigned long)dma_memptr & 0x1F)!=0) { 1841 dma_memptr=dma_memptr+(0x20-((unsigned long)dma_memptr & 0x1F)); 1842 srb_phyaddr=srb_phyaddr+(0x20-((unsigned long)srb_phyaddr & 0x1F)); 1843 } 1844 srb_tmp=(struct CommandControlBlock *)dma_memptr; 1845 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 1846 /*srb address must 32 (0x20) boundary*/ 1847 if(((unsigned long)srb_tmp & 0x1F)==0) { 1848 if(bus_dmamap_create(acb->dm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) { 1849 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 1850 printf("arcmsr%d: srb dmamap bus_dmamap_create error\n", acb->pci_unit); 1851 return; 1852 } 1853 srb_tmp->cdb_shifted_phyaddr=srb_phyaddr >> 5; 1854 srb_tmp->acb=acb; 1855 acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp; 1856 srb_phyaddr=srb_phyaddr+sizeof(struct CommandControlBlock); 1857 } else { 1858 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 1859 printf("arcmsr%d: dma_memptr=%p i=%d" 1860 "this srb cross 32 bytes boundary ignored srb_tmp=%p \n" 1861 , acb->pci_unit, dma_memptr, i, srb_tmp); 1862 return; 1863 } 1864 srb_tmp++; 1865 } 1866 acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr; 1867 /* 1868 ******************************************************************** 1869 ** here we need to tell iop 331 our freesrb.HighPart 1870 ** if freesrb.HighPart is not zero 1871 ******************************************************************** 1872 */ 1873 srb_phyaddr_hi32=(uint32_t) ((srb_phyaddr>>16)>>16); 1874 if(srb_phyaddr_hi32!=0) { 1875 CHIP_REG_WRITE32(message_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 1876 CHIP_REG_WRITE32(message_rwbuffer[1], srb_phyaddr_hi32); 1877 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 1878 if(arcmsr_wait_msgint_ready(acb)) { 1879 printf("arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 1880 } 1881 } 1882 return; 1883 } 1884 /* 1885 ************************************************************************ 1886 ** 1887 ** 1888 ************************************************************************ 1889 */ 1890 static void arcmsr_free_resource(struct AdapterControlBlock *acb) 1891 { 1892 /* remove the control device */ 1893 if(acb->ioctl_dev != NULL) { 1894 destroy_dev(acb->ioctl_dev); 1895 } 1896 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); 1897 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); 1898 bus_dma_tag_destroy(acb->srb_dmat); 1899 bus_dma_tag_destroy(acb->dm_segs_dmat); 1900 bus_dma_tag_destroy(acb->parent_dmat); 1901 return; 1902 } 1903 /* 1904 ************************************************************************ 1905 ************************************************************************ 1906 */ 1907 static u_int32_t arcmsr_initialize(device_t dev) 1908 { 1909 struct AdapterControlBlock *acb=device_get_softc(dev); 1910 u_int32_t intmask_org, rid=PCIR_BAR(0); 1911 vm_offset_t mem_base; 1912 u_int16_t pci_command; 1913 int i, j; 1914 1915 #if __FreeBSD_version >= 502010 1916 if(bus_dma_tag_create( /*parent*/ NULL, 1917 /*alignemnt*/ 1, 1918 /*boundary*/ 0, 1919 /*lowaddr*/ BUS_SPACE_MAXADDR, 1920 /*highaddr*/ BUS_SPACE_MAXADDR, 1921 /*filter*/ NULL, 1922 /*filterarg*/ NULL, 1923 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 1924 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 1925 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1926 /*flags*/ 0, 1927 /*lockfunc*/ NULL, 1928 /*lockarg*/ NULL, 1929 &acb->parent_dmat) != 0) 1930 #else 1931 if(bus_dma_tag_create( /*parent*/ NULL, 1932 /*alignemnt*/ 1, 1933 /*boundary*/ 0, 1934 /*lowaddr*/ BUS_SPACE_MAXADDR, 1935 /*highaddr*/ BUS_SPACE_MAXADDR, 1936 /*filter*/ NULL, 1937 /*filterarg*/ NULL, 1938 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 1939 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 1940 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1941 /*flags*/ 0, 1942 &acb->parent_dmat) != 0) 1943 #endif 1944 { 1945 printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 1946 return ENOMEM; 1947 } 1948 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ 1949 #if __FreeBSD_version >= 502010 1950 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1951 /*alignment*/ 1, 1952 /*boundary*/ 0, 1953 /*lowaddr*/ BUS_SPACE_MAXADDR, 1954 /*highaddr*/ BUS_SPACE_MAXADDR, 1955 /*filter*/ NULL, 1956 /*filterarg*/ NULL, 1957 /*maxsize*/ MAXBSIZE, 1958 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 1959 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1960 /*flags*/ 0, 1961 /*lockfunc*/ busdma_lock_mutex, 1962 /*lockarg*/ &Giant, 1963 &acb->dm_segs_dmat) != 0) 1964 #else 1965 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1966 /*alignment*/ 1, 1967 /*boundary*/ 0, 1968 /*lowaddr*/ BUS_SPACE_MAXADDR, 1969 /*highaddr*/ BUS_SPACE_MAXADDR, 1970 /*filter*/ NULL, 1971 /*filterarg*/ NULL, 1972 /*maxsize*/ MAXBSIZE, 1973 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 1974 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1975 /*flags*/ 0, 1976 &acb->dm_segs_dmat) != 0) 1977 #endif 1978 { 1979 bus_dma_tag_destroy(acb->parent_dmat); 1980 printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 1981 return ENOMEM; 1982 } 1983 /* DMA tag for our srb structures.... Allocate the freesrb memory */ 1984 #if __FreeBSD_version >= 502010 1985 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1986 /*alignment*/ 1, 1987 /*boundary*/ 0, 1988 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 1989 /*highaddr*/ BUS_SPACE_MAXADDR, 1990 /*filter*/ NULL, 1991 /*filterarg*/ NULL, 1992 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE, 1993 /*nsegments*/ 1, 1994 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1995 /*flags*/ 0, 1996 /*lockfunc*/ NULL, 1997 /*lockarg*/ NULL, 1998 &acb->srb_dmat) != 0) 1999 #else 2000 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 2001 /*alignment*/ 1, 2002 /*boundary*/ 0, 2003 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 2004 /*highaddr*/ BUS_SPACE_MAXADDR, 2005 /*filter*/ NULL, 2006 /*filterarg*/ NULL, 2007 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE, 2008 /*nsegments*/ 1, 2009 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 2010 /*flags*/ 0, 2011 &acb->srb_dmat) != 0) 2012 #endif 2013 { 2014 bus_dma_tag_destroy(acb->dm_segs_dmat); 2015 bus_dma_tag_destroy(acb->parent_dmat); 2016 printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 2017 return ENXIO; 2018 } 2019 /* Allocation for our srbs */ 2020 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr 2021 , BUS_DMA_WAITOK | BUS_DMA_COHERENT, &acb->srb_dmamap) != 0) { 2022 bus_dma_tag_destroy(acb->srb_dmat); 2023 bus_dma_tag_destroy(acb->dm_segs_dmat); 2024 bus_dma_tag_destroy(acb->parent_dmat); 2025 printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", acb->pci_unit); 2026 return ENXIO; 2027 } 2028 /* And permanently map them */ 2029 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr 2030 , ARCMSR_SRBS_POOL_SIZE, arcmsr_map_freesrb, acb, /*flags*/0)) { 2031 bus_dma_tag_destroy(acb->srb_dmat); 2032 bus_dma_tag_destroy(acb->dm_segs_dmat); 2033 bus_dma_tag_destroy(acb->parent_dmat); 2034 printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", acb->pci_unit); 2035 return ENXIO; 2036 } 2037 pci_command=pci_read_config(dev, PCIR_COMMAND, 2); 2038 pci_command |= PCIM_CMD_BUSMASTEREN; 2039 pci_command |= PCIM_CMD_PERRESPEN; 2040 pci_command |= PCIM_CMD_MWRICEN; 2041 /* Enable Busmaster/Mem */ 2042 pci_command |= PCIM_CMD_MEMEN; 2043 pci_write_config(dev, PCIR_COMMAND, pci_command, 2); 2044 acb->sys_res_arcmsr=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, 0x1000, RF_ACTIVE); 2045 if(acb->sys_res_arcmsr == NULL) { 2046 arcmsr_free_resource(acb); 2047 printf("arcmsr%d: bus_alloc_resource failure!\n", acb->pci_unit); 2048 return ENOMEM; 2049 } 2050 if(rman_get_start(acb->sys_res_arcmsr) <= 0) { 2051 arcmsr_free_resource(acb); 2052 printf("arcmsr%d: rman_get_start failure!\n", acb->pci_unit); 2053 return ENXIO; 2054 } 2055 mem_base=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr); 2056 if(mem_base==0) { 2057 arcmsr_free_resource(acb); 2058 printf("arcmsr%d: rman_get_virtual failure!\n", acb->pci_unit); 2059 return ENXIO; 2060 } 2061 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { 2062 arcmsr_free_resource(acb); 2063 printf("arcmsr%d: map free srb failure!\n", acb->pci_unit); 2064 return ENXIO; 2065 } 2066 acb->btag=rman_get_bustag(acb->sys_res_arcmsr); 2067 acb->bhandle=rman_get_bushandle(acb->sys_res_arcmsr); 2068 acb->pmu=(struct MessageUnit *)mem_base; 2069 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 2070 |ACB_F_MESSAGE_RQBUFFER_CLEARED 2071 |ACB_F_MESSAGE_WQBUFFER_READED); 2072 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 2073 /* 2074 ******************************************************************** 2075 ** init raid volume state 2076 ******************************************************************** 2077 */ 2078 for(i=0;i<ARCMSR_MAX_TARGETID;i++) { 2079 for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) { 2080 acb->devstate[i][j]=ARECA_RAID_GOOD; 2081 } 2082 } 2083 /* disable iop all outbound interrupt */ 2084 intmask_org=CHIP_REG_READ32(outbound_intmask); 2085 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 2086 arcmsr_iop_init(acb); 2087 return(0); 2088 } 2089 /* 2090 ************************************************************************ 2091 ************************************************************************ 2092 */ 2093 static u_int32_t arcmsr_attach(device_t dev) 2094 { 2095 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2096 u_int32_t unit=device_get_unit(dev); 2097 struct ccb_setasync csa; 2098 struct cam_devq *devq; /* Device Queue to use for this SIM */ 2099 struct resource *irqres; 2100 int rid; 2101 2102 if(acb == NULL) { 2103 printf("arcmsr%d: cannot allocate softc\n", unit); 2104 return (ENOMEM); 2105 } 2106 bzero(acb, sizeof(struct AdapterControlBlock)); 2107 if(arcmsr_initialize(dev)) { 2108 printf("arcmsr%d: initialize failure!\n", unit); 2109 return ENXIO; 2110 } 2111 /* After setting up the adapter, map our interrupt */ 2112 rid=0; 2113 irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE); 2114 if(irqres == NULL || 2115 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE 2116 , arcmsr_interrupt, acb, &acb->ih)) { 2117 arcmsr_free_resource(acb); 2118 printf("arcmsr%d: unable to register interrupt handler!\n", unit); 2119 return ENXIO; 2120 } 2121 acb->irqres=irqres; 2122 acb->pci_dev=dev; 2123 acb->pci_unit=unit; 2124 /* 2125 * Now let the CAM generic SCSI layer find the SCSI devices on 2126 * the bus * start queue to reset to the idle loop. * 2127 * Create device queue of SIM(s) * (MAX_START_JOB - 1) : 2128 * max_sim_transactions 2129 */ 2130 devq=cam_simq_alloc(ARCMSR_MAX_START_JOB); 2131 if(devq == NULL) { 2132 arcmsr_free_resource(acb); 2133 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2134 printf("arcmsr%d: cam_simq_alloc failure!\n", unit); 2135 return ENXIO; 2136 } 2137 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll 2138 , "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); 2139 if(acb->psim == NULL) { 2140 arcmsr_free_resource(acb); 2141 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2142 cam_simq_free(devq); 2143 printf("arcmsr%d: cam_sim_alloc failure!\n", unit); 2144 return ENXIO; 2145 } 2146 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { 2147 arcmsr_free_resource(acb); 2148 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2149 cam_sim_free(acb->psim, /*free_devq*/TRUE); 2150 printf("arcmsr%d: xpt_bus_register failure!\n", unit); 2151 return ENXIO; 2152 } 2153 if(xpt_create_path(&acb->ppath, /* periph */ NULL 2154 , cam_sim_path(acb->psim) 2155 , CAM_TARGET_WILDCARD 2156 , CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2157 arcmsr_free_resource(acb); 2158 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2159 xpt_bus_deregister(cam_sim_path(acb->psim)); 2160 cam_sim_free(acb->psim, /* free_simq */ TRUE); 2161 printf("arcmsr%d: xpt_create_path failure!\n", unit); 2162 return ENXIO; 2163 } 2164 ARCMSR_LOCK_INIT(&acb->workingQ_done_lock, "arcmsr done working Q lock"); 2165 ARCMSR_LOCK_INIT(&acb->workingQ_start_lock, "arcmsr start working Q lock"); 2166 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock"); 2167 /* 2168 **************************************************** 2169 */ 2170 xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); 2171 csa.ccb_h.func_code=XPT_SASYNC_CB; 2172 csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE; 2173 csa.callback=arcmsr_async; 2174 csa.callback_arg=acb->psim; 2175 xpt_action((union ccb *)&csa); 2176 /* Create the control device. */ 2177 acb->ioctl_dev=make_dev(&arcmsr_cdevsw 2178 , unit 2179 , UID_ROOT 2180 , GID_WHEEL /* GID_OPERATOR */ 2181 , S_IRUSR | S_IWUSR 2182 , "arcmsr%d", unit); 2183 #if __FreeBSD_version < 503000 2184 acb->ioctl_dev->si_drv1=acb; 2185 #endif 2186 #if __FreeBSD_version > 500005 2187 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); 2188 #endif 2189 return 0; 2190 } 2191 /* 2192 ************************************************************************ 2193 ************************************************************************ 2194 */ 2195 static u_int32_t arcmsr_probe(device_t dev) 2196 { 2197 u_int32_t id; 2198 static char buf[256]; 2199 char *type; 2200 int raid6 = 1; 2201 2202 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { 2203 return (ENXIO); 2204 } 2205 switch(id=pci_get_devid(dev)) { 2206 case PCIDevVenIDARC1110: 2207 case PCIDevVenIDARC1210: 2208 raid6 = 0; 2209 /*FALLTHRU*/ 2210 case PCIDevVenIDARC1120: 2211 case PCIDevVenIDARC1130: 2212 case PCIDevVenIDARC1160: 2213 case PCIDevVenIDARC1170: 2214 case PCIDevVenIDARC1220: 2215 case PCIDevVenIDARC1230: 2216 case PCIDevVenIDARC1260: 2217 case PCIDevVenIDARC1270: 2218 case PCIDevVenIDARC1280: 2219 type = "SATA"; 2220 break; 2221 case PCIDevVenIDARC1380: 2222 case PCIDevVenIDARC1381: 2223 case PCIDevVenIDARC1680: 2224 case PCIDevVenIDARC1681: 2225 type = "SAS"; 2226 break; 2227 default: 2228 type = "X-TYPE"; 2229 break; 2230 } 2231 sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n", type, raid6 ? "(RAID6 capable)" : ""); 2232 device_set_desc_copy(dev, buf); 2233 return 0; 2234 } 2235 /* 2236 ************************************************************************ 2237 ************************************************************************ 2238 */ 2239 static void arcmsr_shutdown(device_t dev) 2240 { 2241 u_int32_t i, poll_count=0; 2242 u_int32_t intmask_org; 2243 struct CommandControlBlock *srb; 2244 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2245 2246 /* stop adapter background rebuild */ 2247 arcmsr_stop_adapter_bgrb(acb); 2248 arcmsr_flush_adapter_cache(acb); 2249 /* disable all outbound interrupt */ 2250 intmask_org=CHIP_REG_READ32(outbound_intmask); 2251 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE)); 2252 /* abort all outstanding command */ 2253 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 2254 acb->acb_flags &= ~ACB_F_IOP_INITED; 2255 if(acb->srboutstandingcount!=0) { 2256 while((acb->srboutstandingcount!=0) && (poll_count < 256)) { 2257 arcmsr_interrupt((void *)acb); 2258 UDELAY(25000); 2259 poll_count++; 2260 } 2261 if(acb->srboutstandingcount!=0) { 2262 arcmsr_abort_allcmd(acb); 2263 /*clear all outbound posted Q*/ 2264 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) { 2265 CHIP_REG_READ32(outbound_queueport); 2266 } 2267 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 2268 srb=acb->psrb_pool[i]; 2269 if(srb->startdone==ARCMSR_SRB_START) { 2270 srb->startdone=ARCMSR_SRB_ABORTED; 2271 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2272 arcmsr_srb_complete(srb, 1); 2273 } 2274 } 2275 } 2276 } 2277 atomic_set_int(&acb->srboutstandingcount, 0); 2278 acb->workingsrb_doneindex=0; 2279 acb->workingsrb_startindex=0; 2280 return; 2281 } 2282 /* 2283 ************************************************************************ 2284 ************************************************************************ 2285 */ 2286 static u_int32_t arcmsr_detach(device_t dev) 2287 { 2288 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2289 2290 arcmsr_shutdown(dev); 2291 arcmsr_free_resource(acb); 2292 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), acb->sys_res_arcmsr); 2293 bus_teardown_intr(dev, acb->irqres, acb->ih); 2294 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2295 xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); 2296 xpt_free_path(acb->ppath); 2297 xpt_bus_deregister(cam_sim_path(acb->psim)); 2298 cam_sim_free(acb->psim, TRUE); 2299 return (0); 2300 } 2301 2302 2303