1 /* 2 ****************************************************************************************** 3 ** O.S : FreeBSD 4 ** FILE NAME : arcmsr.c 5 ** BY : Erich Chen 6 ** Description: SCSI RAID Device Driver for 7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX) SATA/SAS RAID HOST Adapter 8 ** ARCMSR RAID Host adapter 9 ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set] 10 ****************************************************************************************** 11 ************************************************************************ 12 ** 13 ** Copyright (c) 2004-2006 ARECA Co. Ltd. 14 ** Erich Chen, Taipei Taiwan All rights reserved. 15 ** 16 ** Redistribution and use in source and binary forms, with or without 17 ** modification, are permitted provided that the following conditions 18 ** are met: 19 ** 1. Redistributions of source code must retain the above copyright 20 ** notice, this list of conditions and the following disclaimer. 21 ** 2. Redistributions in binary form must reproduce the above copyright 22 ** notice, this list of conditions and the following disclaimer in the 23 ** documentation and/or other materials provided with the distribution. 24 ** 3. The name of the author may not be used to endorse or promote products 25 ** derived from this software without specific prior written permission. 26 ** 27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT 32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 ************************************************************************** 38 ** History 39 ** 40 ** REV# DATE NAME DESCRIPTION 41 ** 1.00.00.00 3/31/2004 Erich Chen First release 42 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error 43 ** 1.20.00.03 4/19/2005 Erich Chen add SATA 24 Ports adapter type support 44 ** clean unused function 45 ** 1.20.00.12 9/12/2005 Erich Chen bug fix with abort command handling, 46 ** firmware version check 47 ** and firmware update notify for hardware bug fix 48 ** handling if none zero high part physical address 49 ** of srb resource 50 ** 1.20.00.13 8/18/2006 Erich Chen remove pending srb and report busy 51 ** add iop message xfer 52 ** with scsi pass-through command 53 ** add new device id of sas raid adapters 54 ** code fit for SPARC64 & PPC 55 ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report 56 ** and cause g_vfs_done() read write error 57 58 ****************************************************************************************** 59 * $FreeBSD$ 60 */ 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/malloc.h> 64 #include <sys/kernel.h> 65 #include <sys/bus.h> 66 #include <sys/queue.h> 67 #include <sys/stat.h> 68 #include <sys/devicestat.h> 69 #include <sys/kthread.h> 70 #include <sys/module.h> 71 #include <sys/proc.h> 72 #include <sys/lock.h> 73 #include <sys/sysctl.h> 74 #include <sys/poll.h> 75 #include <sys/ioccom.h> 76 #include <vm/vm.h> 77 #include <vm/vm_param.h> 78 #include <vm/pmap.h> 79 80 #include <isa/rtc.h> 81 82 #include <machine/bus.h> 83 #include <machine/resource.h> 84 #include <machine/atomic.h> 85 #include <sys/conf.h> 86 #include <sys/rman.h> 87 88 #include <cam/cam.h> 89 #include <cam/cam_ccb.h> 90 #include <cam/cam_sim.h> 91 #include <cam/cam_xpt_sim.h> 92 #include <cam/cam_debug.h> 93 #include <cam/scsi/scsi_all.h> 94 #include <cam/scsi/scsi_message.h> 95 /* 96 ************************************************************************** 97 ************************************************************************** 98 */ 99 #if __FreeBSD_version >= 500005 100 #include <sys/selinfo.h> 101 #include <sys/mutex.h> 102 #include <sys/endian.h> 103 #include <dev/pci/pcivar.h> 104 #include <dev/pci/pcireg.h> 105 #define ARCMSR_LOCK_INIT(l, s) mtx_init(l, s, NULL, MTX_DEF|MTX_RECURSE) 106 #define ARCMSR_LOCK_ACQUIRE(l) mtx_lock(l) 107 #define ARCMSR_LOCK_RELEASE(l) mtx_unlock(l) 108 #define ARCMSR_LOCK_TRY(l) mtx_trylock(l) 109 #define arcmsr_htole32(x) htole32(x) 110 typedef struct mtx arcmsr_lock_t; 111 #else 112 #include <sys/select.h> 113 #include <pci/pcivar.h> 114 #include <pci/pcireg.h> 115 #define ARCMSR_LOCK_INIT(l, s) simple_lock_init(l) 116 #define ARCMSR_LOCK_ACQUIRE(l) simple_lock(l) 117 #define ARCMSR_LOCK_RELEASE(l) simple_unlock(l) 118 #define ARCMSR_LOCK_TRY(l) simple_lock_try(l) 119 #define arcmsr_htole32(x) (x) 120 typedef struct simplelock arcmsr_lock_t; 121 #endif 122 #include <dev/arcmsr/arcmsr.h> 123 #define ARCMSR_SRBS_POOL_SIZE ((sizeof(struct CommandControlBlock) * ARCMSR_MAX_FREESRB_NUM)+0x20) 124 /* 125 ************************************************************************** 126 ************************************************************************** 127 */ 128 #define CHIP_REG_READ32(r) bus_space_read_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r)) 129 #define CHIP_REG_WRITE32(r,d) bus_space_write_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r), d) 130 /* 131 ************************************************************************** 132 ************************************************************************** 133 */ 134 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb); 135 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb); 136 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb); 137 static u_int32_t arcmsr_probe(device_t dev); 138 static u_int32_t arcmsr_attach(device_t dev); 139 static u_int32_t arcmsr_detach(device_t dev); 140 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); 141 static void arcmsr_iop_parking(struct AdapterControlBlock *acb); 142 static void arcmsr_shutdown(device_t dev); 143 static void arcmsr_interrupt(void *arg); 144 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); 145 static void arcmsr_free_resource(struct AdapterControlBlock *acb); 146 static void arcmsr_bus_reset(struct AdapterControlBlock *acb); 147 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 148 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 149 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 150 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); 151 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb); 152 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); 153 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); 154 static void arcmsr_iop_reset(struct AdapterControlBlock *acb); 155 static void arcmsr_report_sense_info(struct CommandControlBlock *srb); 156 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg); 157 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb); 158 static int arcmsr_resume(device_t dev); 159 static int arcmsr_suspend(device_t dev); 160 /* 161 ************************************************************************** 162 ************************************************************************** 163 */ 164 static void UDELAY(u_int32_t us) { DELAY(us); } 165 /* 166 ************************************************************************** 167 ************************************************************************** 168 */ 169 static bus_dmamap_callback_t arcmsr_map_freesrb; 170 static bus_dmamap_callback_t arcmsr_executesrb; 171 /* 172 ************************************************************************** 173 ************************************************************************** 174 */ 175 static d_open_t arcmsr_open; 176 static d_close_t arcmsr_close; 177 static d_ioctl_t arcmsr_ioctl; 178 179 static device_method_t arcmsr_methods[]={ 180 DEVMETHOD(device_probe, arcmsr_probe), 181 DEVMETHOD(device_attach, arcmsr_attach), 182 DEVMETHOD(device_detach, arcmsr_detach), 183 DEVMETHOD(device_shutdown, arcmsr_shutdown), 184 DEVMETHOD(device_suspend, arcmsr_suspend), 185 DEVMETHOD(device_resume, arcmsr_resume), 186 187 DEVMETHOD(bus_print_child, bus_generic_print_child), 188 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 189 { 0, 0 } 190 }; 191 192 static driver_t arcmsr_driver={ 193 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) 194 }; 195 196 static devclass_t arcmsr_devclass; 197 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); 198 #ifndef BUS_DMA_COHERENT 199 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ 200 #endif 201 #if __FreeBSD_version >= 501000 202 #ifndef D_NEEDGIANT 203 #define D_NEEDGIANT 0x00400000 /* driver want Giant */ 204 #endif 205 #ifndef D_VERSION 206 #define D_VERSION 0x20011966 207 #endif 208 static struct cdevsw arcmsr_cdevsw={ 209 #if __FreeBSD_version > 502010 210 .d_version = D_VERSION, 211 #endif 212 .d_flags = D_NEEDGIANT, 213 .d_open = arcmsr_open, /* open */ 214 .d_close = arcmsr_close, /* close */ 215 .d_ioctl = arcmsr_ioctl, /* ioctl */ 216 .d_name = "arcmsr", /* name */ 217 }; 218 #else 219 #define ARCMSR_CDEV_MAJOR 180 220 221 static struct cdevsw arcmsr_cdevsw = { 222 arcmsr_open, /* open */ 223 arcmsr_close, /* close */ 224 noread, /* read */ 225 nowrite, /* write */ 226 arcmsr_ioctl, /* ioctl */ 227 nopoll, /* poll */ 228 nommap, /* mmap */ 229 nostrategy, /* strategy */ 230 "arcmsr", /* name */ 231 ARCMSR_CDEV_MAJOR, /* major */ 232 nodump, /* dump */ 233 nopsize, /* psize */ 234 0 /* flags */ 235 }; 236 #endif 237 238 #if __FreeBSD_version < 500005 239 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) 240 #else 241 #if __FreeBSD_version < 503000 242 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) 243 #else 244 static int arcmsr_open(struct cdev *dev, int flags, int fmt, d_thread_t *proc) 245 #endif 246 #endif 247 { 248 #if __FreeBSD_version < 503000 249 struct AdapterControlBlock *acb=dev->si_drv1; 250 #else 251 int unit = minor(dev); 252 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 253 #endif 254 if(acb==NULL) { 255 return ENXIO; 256 } 257 return 0; 258 } 259 /* 260 ************************************************************************** 261 ************************************************************************** 262 */ 263 #if __FreeBSD_version < 500005 264 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) 265 #else 266 #if __FreeBSD_version < 503000 267 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) 268 #else 269 static int arcmsr_close(struct cdev *dev, int flags, int fmt, d_thread_t *proc) 270 #endif 271 #endif 272 { 273 #if __FreeBSD_version < 503000 274 struct AdapterControlBlock *acb=dev->si_drv1; 275 #else 276 int unit = minor(dev); 277 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 278 #endif 279 if(acb==NULL) { 280 return ENXIO; 281 } 282 return 0; 283 } 284 /* 285 ************************************************************************** 286 ************************************************************************** 287 */ 288 #if __FreeBSD_version < 500005 289 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) 290 #else 291 #if __FreeBSD_version < 503000 292 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) 293 #else 294 static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, d_thread_t *proc) 295 #endif 296 #endif 297 { 298 #if __FreeBSD_version < 503000 299 struct AdapterControlBlock *acb=dev->si_drv1; 300 #else 301 int unit = minor(dev); 302 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); 303 #endif 304 305 if(acb==NULL) { 306 return ENXIO; 307 } 308 return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); 309 } 310 /* 311 ******************************************************************************* 312 ******************************************************************************* 313 */ 314 static int arcmsr_suspend(device_t dev) 315 { 316 struct AdapterControlBlock *acb = device_get_softc(dev); 317 u_int32_t intmask_org; 318 319 /* disable all outbound interrupt */ 320 intmask_org=CHIP_REG_READ32(outbound_intmask); 321 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE)); 322 /* flush controller */ 323 arcmsr_iop_parking(acb); 324 return(0); 325 } 326 /* 327 ******************************************************************************* 328 ******************************************************************************* 329 */ 330 static int arcmsr_resume(device_t dev) 331 { 332 struct AdapterControlBlock *acb = device_get_softc(dev); 333 334 arcmsr_iop_init(acb); 335 return(0); 336 } 337 /* 338 ********************************************************************************* 339 ********************************************************************************* 340 */ 341 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) 342 { 343 struct AdapterControlBlock *acb; 344 u_int8_t target_id, target_lun; 345 struct cam_sim * sim; 346 347 sim=(struct cam_sim *) cb_arg; 348 acb =(struct AdapterControlBlock *) cam_sim_softc(sim); 349 switch (code) { 350 case AC_LOST_DEVICE: 351 target_id=xpt_path_target_id(path); 352 target_lun=xpt_path_lun_id(path); 353 if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { 354 break; 355 } 356 printf("%s:scsi id%d lun%d device lost \n" 357 , device_get_name(acb->pci_dev), target_id, target_lun); 358 break; 359 default: 360 break; 361 } 362 } 363 /* 364 ************************************************************************ 365 ************************************************************************ 366 */ 367 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 368 { 369 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 370 if(arcmsr_wait_msgint_ready(acb)) { 371 printf("arcmsr%d: wait 'flush adapter cache' timeout \n" 372 , acb->pci_unit); 373 } 374 return; 375 } 376 /* 377 ********************************************************************** 378 ********************************************************************** 379 */ 380 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb) 381 { 382 u_int32_t Index; 383 u_int8_t Retries=0x00; 384 385 do { 386 for(Index=0; Index < 100; Index++) { 387 if(CHIP_REG_READ32(outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 388 /*clear interrupt*/ 389 CHIP_REG_WRITE32(outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); 390 return 0x00; 391 } 392 /* one us delay */ 393 UDELAY(10000); 394 }/*max 1 seconds*/ 395 }while(Retries++ < 20);/*max 20 sec*/ 396 return 0xff; 397 } 398 /* 399 ********************************************************************** 400 ********************************************************************** 401 */ 402 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) 403 { 404 struct AdapterControlBlock *acb=srb->acb; 405 union ccb * pccb=srb->pccb; 406 407 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 408 bus_dmasync_op_t op; 409 410 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 411 op = BUS_DMASYNC_POSTREAD; 412 } else { 413 op = BUS_DMASYNC_POSTWRITE; 414 } 415 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 416 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 417 } 418 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_done_lock); 419 if(stand_flag==1) { 420 atomic_subtract_int(&acb->srboutstandingcount, 1); 421 } 422 srb->startdone=ARCMSR_SRB_DONE; 423 srb->srb_flags=0; 424 acb->srbworkingQ[acb->workingsrb_doneindex]=srb; 425 acb->workingsrb_doneindex++; 426 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; 427 ARCMSR_LOCK_RELEASE(&acb->workingQ_done_lock); 428 xpt_done(pccb); 429 return; 430 } 431 /* 432 ********************************************************************** 433 ********************************************************************** 434 */ 435 static void arcmsr_report_sense_info(struct CommandControlBlock *srb) 436 { 437 union ccb * pccb=srb->pccb; 438 439 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 440 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 441 if(&pccb->csio.sense_data) { 442 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); 443 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, 444 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); 445 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ 446 pccb->ccb_h.status |= CAM_AUTOSNS_VALID; 447 } 448 return; 449 } 450 /* 451 ********************************************************************* 452 ** 453 ********************************************************************* 454 */ 455 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 456 { 457 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 458 if(arcmsr_wait_msgint_ready(acb)) { 459 printf("arcmsr%d: wait 'abort all outstanding command' timeout \n" 460 , acb->pci_unit); 461 } 462 return; 463 } 464 /* 465 **************************************************************************** 466 **************************************************************************** 467 */ 468 static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 469 { 470 struct CommandControlBlock *srb; 471 u_int32_t intmask_org, mask; 472 u_int32_t i=0; 473 474 if(acb->srboutstandingcount!=0) 475 { 476 /* talk to iop 331 outstanding command aborted*/ 477 arcmsr_abort_allcmd(acb); 478 UDELAY(3000*1000);/*wait for 3 sec for all command aborted*/ 479 /* disable all outbound interrupt */ 480 intmask_org=CHIP_REG_READ32(outbound_intmask); 481 CHIP_REG_WRITE32(outbound_intmask 482 , intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 483 /*clear all outbound posted Q*/ 484 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) { 485 CHIP_REG_READ32(outbound_queueport); 486 } 487 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 488 srb=acb->psrb_pool[i]; 489 if(srb->startdone==ARCMSR_SRB_START) { 490 srb->startdone=ARCMSR_SRB_ABORTED; 491 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 492 arcmsr_srb_complete(srb, 1); 493 } 494 } 495 /* enable all outbound interrupt */ 496 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 497 |ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 498 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 499 /* post abort all outstanding command message to RAID controller */ 500 } 501 atomic_set_int(&acb->srboutstandingcount, 0); 502 acb->workingsrb_doneindex=0; 503 acb->workingsrb_startindex=0; 504 return; 505 } 506 /* 507 ********************************************************************** 508 ********************************************************************** 509 */ 510 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) 511 { 512 struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb; 513 u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u; 514 u_int32_t address_lo, address_hi; 515 union ccb * pccb=srb->pccb; 516 struct ccb_scsiio * pcsio= &pccb->csio; 517 u_int32_t arccdbsize=0x30; 518 519 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 520 arcmsr_cdb->Bus=0; 521 arcmsr_cdb->TargetID=pccb->ccb_h.target_id; 522 arcmsr_cdb->LUN=pccb->ccb_h.target_lun; 523 arcmsr_cdb->Function=1; 524 arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len; 525 arcmsr_cdb->Context=(unsigned long)arcmsr_cdb; 526 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); 527 if(nseg != 0) { 528 struct AdapterControlBlock *acb=srb->acb; 529 bus_dmasync_op_t op; 530 u_int32_t length, i, cdb_sgcount=0; 531 532 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 533 op=BUS_DMASYNC_PREREAD; 534 } else { 535 op=BUS_DMASYNC_PREWRITE; 536 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE; 537 srb->srb_flags|=SRB_FLAG_WRITE; 538 } 539 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 540 for(i=0;i<nseg;i++) { 541 /* Get the physical address of the current data pointer */ 542 length=arcmsr_htole32(dm_segs[i].ds_len); 543 address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); 544 address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); 545 if(address_hi==0) { 546 struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge; 547 pdma_sg->address=address_lo; 548 pdma_sg->length=length; 549 psge += sizeof(struct SG32ENTRY); 550 arccdbsize += sizeof(struct SG32ENTRY); 551 } else { 552 u_int32_t sg64s_size=0, tmplength=length; 553 554 while(1) { 555 u_int64_t span4G, length0; 556 struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge; 557 558 span4G=(u_int64_t)address_lo + tmplength; 559 pdma_sg->addresshigh=address_hi; 560 pdma_sg->address=address_lo; 561 if(span4G > 0x100000000) { 562 /*see if cross 4G boundary*/ 563 length0=0x100000000-address_lo; 564 pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR; 565 address_hi=address_hi+1; 566 address_lo=0; 567 tmplength=tmplength-(u_int32_t)length0; 568 sg64s_size += sizeof(struct SG64ENTRY); 569 psge += sizeof(struct SG64ENTRY); 570 cdb_sgcount++; 571 } else { 572 pdma_sg->length=tmplength|IS_SG64_ADDR; 573 sg64s_size += sizeof(struct SG64ENTRY); 574 psge += sizeof(struct SG64ENTRY); 575 break; 576 } 577 } 578 arccdbsize += sg64s_size; 579 } 580 cdb_sgcount++; 581 } 582 arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount; 583 arcmsr_cdb->DataLength=pcsio->dxfer_len; 584 if( arccdbsize > 256) { 585 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE; 586 } 587 } 588 return; 589 } 590 /* 591 ************************************************************************** 592 ************************************************************************** 593 */ 594 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) 595 { 596 u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr; 597 struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb; 598 599 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 600 (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); 601 atomic_add_int(&acb->srboutstandingcount, 1); 602 srb->startdone=ARCMSR_SRB_START; 603 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 604 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); 605 } else { 606 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr); 607 } 608 return; 609 } 610 /* 611 ********************************************************************** 612 ********************************************************************** 613 */ 614 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb) 615 { 616 u_int8_t * pQbuffer; 617 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer; 618 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data; 619 u_int32_t allxfer_len=0; 620 621 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 622 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 623 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { 624 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex]; 625 memcpy(iop_data, pQbuffer, 1); 626 acb->wqbuf_firstindex++; 627 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 628 /*if last index number set it to 0 */ 629 iop_data++; 630 allxfer_len++; 631 } 632 pwbuffer->data_len=allxfer_len; 633 /* 634 ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post 635 */ 636 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 637 } 638 return; 639 } 640 /* 641 ************************************************************************ 642 ************************************************************************ 643 */ 644 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 645 { 646 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 647 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 648 if(arcmsr_wait_msgint_ready(acb)) { 649 printf("arcmsr%d: wait 'stop adapter rebulid' timeout \n" 650 , acb->pci_unit); 651 } 652 return; 653 } 654 /* 655 ************************************************************************ 656 ************************************************************************ 657 */ 658 static void arcmsr_poll(struct cam_sim * psim) 659 { 660 arcmsr_interrupt(cam_sim_softc(psim)); 661 return; 662 } 663 /* 664 ********************************************************************** 665 ********************************************************************** 666 */ 667 static void arcmsr_interrupt(void *arg) 668 { 669 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg; 670 struct CommandControlBlock *srb; 671 u_int32_t flag_srb, outbound_intstatus, outbound_doorbell; 672 673 /* 674 ********************************************* 675 ** check outbound intstatus 676 ********************************************* 677 */ 678 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable; 679 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 680 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { 681 /* 682 ********************************************* 683 ** DOORBELL 684 ********************************************* 685 */ 686 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell); 687 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */ 688 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 689 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer; 690 u_int8_t * iop_data=(u_int8_t *)prbuffer->data; 691 u_int8_t * pQbuffer; 692 u_int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; 693 694 /*check this iop data if overflow my rqbuffer*/ 695 rqbuf_lastindex=acb->rqbuf_lastindex; 696 rqbuf_firstindex=acb->rqbuf_firstindex; 697 iop_len=prbuffer->data_len; 698 my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 699 if(my_empty_len>=iop_len) { 700 while(iop_len > 0) { 701 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 702 memcpy(pQbuffer, iop_data, 1); 703 acb->rqbuf_lastindex++; 704 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 705 /*if last index number set it to 0 */ 706 iop_data++; 707 iop_len--; 708 } 709 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 710 /*signature, let IOP331 know data has been readed */ 711 } else { 712 acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW; 713 } 714 } 715 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 716 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; 717 /* 718 ********************************************* 719 ********************************************* 720 */ 721 if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) { 722 u_int8_t * pQbuffer; 723 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer; 724 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data; 725 u_int32_t allxfer_len=0; 726 727 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 728 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { 729 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex]; 730 memcpy(iop_data, pQbuffer, 1); 731 acb->wqbuf_firstindex++; 732 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 733 /*if last index number set it to 0 */ 734 iop_data++; 735 allxfer_len++; 736 } 737 pwbuffer->data_len=allxfer_len; 738 /* 739 ** push inbound doorbell tell iop driver data write ok 740 ** and wait reply on next hwinterrupt for next Qbuffer post 741 */ 742 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 743 } 744 if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) { 745 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 746 } 747 } 748 } 749 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 750 int target, lun; 751 /* 752 ***************************************************************************** 753 ** areca cdb command done 754 ***************************************************************************** 755 */ 756 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 757 while(1) { 758 if((flag_srb=CHIP_REG_READ32(outbound_queueport)) == 0xFFFFFFFF) { 759 break;/*chip FIFO no srb for completion already*/ 760 } 761 /* check if command done with no error*/ 762 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5)); 763 /*frame must be 32 bytes aligned*/ 764 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 765 if(srb->startdone==ARCMSR_SRB_ABORTED) { 766 printf("arcmsr%d: srb='%p' isr got aborted command \n" 767 , acb->pci_unit, srb); 768 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 769 arcmsr_srb_complete(srb, 1); 770 continue; 771 } 772 printf("arcmsr%d: isr get an illegal srb command done" 773 "acb='%p' srb='%p' srbacb='%p' startdone=0x%x" 774 "srboutstandingcount=%d \n", 775 acb->pci_unit, acb, srb, srb->acb, 776 srb->startdone, acb->srboutstandingcount); 777 continue; 778 } 779 target=srb->pccb->ccb_h.target_id; 780 lun=srb->pccb->ccb_h.target_lun; 781 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) { 782 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 783 acb->devstate[target][lun]=ARECA_RAID_GOOD; 784 } 785 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 786 arcmsr_srb_complete(srb, 1); 787 } else { 788 switch(srb->arcmsr_cdb.DeviceStatus) { 789 case ARCMSR_DEV_SELECT_TIMEOUT: { 790 acb->devstate[target][lun]=ARECA_RAID_GONE; 791 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 792 arcmsr_srb_complete(srb, 1); 793 } 794 break; 795 case ARCMSR_DEV_ABORTED: 796 case ARCMSR_DEV_INIT_FAIL: { 797 acb->devstate[target][lun]=ARECA_RAID_GONE; 798 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 799 arcmsr_srb_complete(srb, 1); 800 } 801 break; 802 case SCSISTAT_CHECK_CONDITION: { 803 acb->devstate[target][lun]=ARECA_RAID_GOOD; 804 arcmsr_report_sense_info(srb); 805 arcmsr_srb_complete(srb, 1); 806 } 807 break; 808 default: 809 printf("arcmsr%d: scsi id=%d lun=%d" 810 "isr get command error done," 811 "but got unknow DeviceStatus=0x%x \n" 812 , acb->pci_unit, target, lun 813 ,srb->arcmsr_cdb.DeviceStatus); 814 acb->devstate[target][lun]=ARECA_RAID_GONE; 815 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 816 /*unknow error or crc error just for retry*/ 817 arcmsr_srb_complete(srb, 1); 818 break; 819 } 820 } 821 } /*drain reply FIFO*/ 822 } 823 return; 824 } 825 /* 826 ******************************************************************************* 827 ** 828 ******************************************************************************* 829 */ 830 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 831 { 832 if(acb!=NULL) { 833 /* stop adapter background rebuild */ 834 if(acb->acb_flags & ACB_F_MSG_START_BGRB) { 835 arcmsr_stop_adapter_bgrb(acb); 836 arcmsr_flush_adapter_cache(acb); 837 } 838 } 839 } 840 /* 841 *********************************************************************** 842 ** 843 ************************************************************************ 844 */ 845 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) 846 { 847 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 848 u_int32_t retvalue=EINVAL; 849 850 pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg; 851 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { 852 return retvalue; 853 } 854 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 855 switch(ioctl_cmd) { 856 case ARCMSR_MESSAGE_READ_RQBUFFER: { 857 u_int8_t * pQbuffer; 858 u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 859 u_int32_t allxfer_len=0; 860 861 while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex) && (allxfer_len<1031)) { 862 /*copy READ QBUFFER to srb*/ 863 pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex]; 864 memcpy(ptmpQbuffer, pQbuffer, 1); 865 acb->rqbuf_firstindex++; 866 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 867 /*if last index number set it to 0 */ 868 ptmpQbuffer++; 869 allxfer_len++; 870 } 871 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 872 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer; 873 u_int8_t * iop_data=(u_int8_t *)prbuffer->data; 874 u_int32_t iop_len; 875 876 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 877 iop_len=(u_int32_t)prbuffer->data_len; 878 /*this iop data does no chance to make me overflow again here, so just do it*/ 879 while(iop_len>0) { 880 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 881 memcpy(pQbuffer, iop_data, 1); 882 acb->rqbuf_lastindex++; 883 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 884 /*if last index number set it to 0 */ 885 iop_data++; 886 iop_len--; 887 } 888 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 889 /*signature, let IOP331 know data has been readed */ 890 } 891 pcmdmessagefld->cmdmessage.Length=allxfer_len; 892 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 893 retvalue=ARCMSR_MESSAGE_SUCCESS; 894 } 895 break; 896 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 897 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 898 u_int8_t * pQbuffer; 899 u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 900 901 user_len=pcmdmessagefld->cmdmessage.Length; 902 /*check if data xfer length of this request will overflow my array qbuffer */ 903 wqbuf_lastindex=acb->wqbuf_lastindex; 904 wqbuf_firstindex=acb->wqbuf_firstindex; 905 if(wqbuf_lastindex!=wqbuf_firstindex) { 906 arcmsr_post_Qbuffer(acb); 907 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 908 } else { 909 my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 910 if(my_empty_len>=user_len) { 911 while(user_len>0) { 912 /*copy srb data to wqbuffer*/ 913 pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex]; 914 memcpy(pQbuffer, ptmpuserbuffer, 1); 915 acb->wqbuf_lastindex++; 916 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 917 /*if last index number set it to 0 */ 918 ptmpuserbuffer++; 919 user_len--; 920 } 921 /*post fist Qbuffer*/ 922 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 923 acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED; 924 arcmsr_post_Qbuffer(acb); 925 } 926 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 927 } else { 928 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 929 } 930 } 931 retvalue=ARCMSR_MESSAGE_SUCCESS; 932 } 933 break; 934 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 935 u_int8_t * pQbuffer=acb->rqbuffer; 936 937 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 938 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 939 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 940 /*signature, let IOP331 know data has been readed */ 941 } 942 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 943 acb->rqbuf_firstindex=0; 944 acb->rqbuf_lastindex=0; 945 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 946 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 947 retvalue=ARCMSR_MESSAGE_SUCCESS; 948 } 949 break; 950 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: 951 { 952 u_int8_t * pQbuffer=acb->wqbuffer; 953 954 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 955 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 956 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 957 /*signature, let IOP331 know data has been readed */ 958 } 959 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READED); 960 acb->wqbuf_firstindex=0; 961 acb->wqbuf_lastindex=0; 962 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 963 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 964 retvalue=ARCMSR_MESSAGE_SUCCESS; 965 } 966 break; 967 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 968 u_int8_t * pQbuffer; 969 970 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 971 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 972 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 973 /*signature, let IOP331 know data has been readed */ 974 } 975 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 976 |ACB_F_MESSAGE_RQBUFFER_CLEARED 977 |ACB_F_MESSAGE_WQBUFFER_READED); 978 acb->rqbuf_firstindex=0; 979 acb->rqbuf_lastindex=0; 980 acb->wqbuf_firstindex=0; 981 acb->wqbuf_lastindex=0; 982 pQbuffer=acb->rqbuffer; 983 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 984 pQbuffer=acb->wqbuffer; 985 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 986 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 987 retvalue=ARCMSR_MESSAGE_SUCCESS; 988 } 989 break; 990 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 991 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F; 992 retvalue=ARCMSR_MESSAGE_SUCCESS; 993 } 994 break; 995 case ARCMSR_MESSAGE_SAY_HELLO: { 996 u_int8_t * hello_string="Hello! I am ARCMSR"; 997 u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer; 998 999 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { 1000 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 1001 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1002 return ENOIOCTL; 1003 } 1004 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1005 retvalue=ARCMSR_MESSAGE_SUCCESS; 1006 } 1007 break; 1008 case ARCMSR_MESSAGE_SAY_GOODBYE: { 1009 arcmsr_iop_parking(acb); 1010 retvalue=ARCMSR_MESSAGE_SUCCESS; 1011 } 1012 break; 1013 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 1014 arcmsr_flush_adapter_cache(acb); 1015 retvalue=ARCMSR_MESSAGE_SUCCESS; 1016 } 1017 break; 1018 } 1019 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1020 return retvalue; 1021 } 1022 /* 1023 ************************************************************************** 1024 ************************************************************************** 1025 */ 1026 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb) 1027 { 1028 struct CommandControlBlock *srb=NULL; 1029 u_int32_t workingsrb_startindex, workingsrb_doneindex; 1030 1031 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_start_lock); 1032 workingsrb_doneindex=acb->workingsrb_doneindex; 1033 workingsrb_startindex=acb->workingsrb_startindex; 1034 srb=acb->srbworkingQ[workingsrb_startindex]; 1035 workingsrb_startindex++; 1036 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; 1037 if(workingsrb_doneindex!=workingsrb_startindex) { 1038 acb->workingsrb_startindex=workingsrb_startindex; 1039 } else { 1040 srb=NULL; 1041 } 1042 ARCMSR_LOCK_RELEASE(&acb->workingQ_start_lock); 1043 return(srb); 1044 } 1045 /* 1046 ************************************************************************** 1047 ************************************************************************** 1048 */ 1049 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb) 1050 { 1051 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 1052 int retvalue = 0, transfer_len = 0; 1053 char *buffer; 1054 uint32_t controlcode = (uint32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | 1055 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | 1056 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | 1057 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[8]; 1058 /* 4 bytes: Areca io control code */ 1059 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1060 buffer = pccb->csio.data_ptr; 1061 transfer_len = pccb->csio.dxfer_len; 1062 } else { 1063 retvalue = ARCMSR_MESSAGE_FAIL; 1064 goto message_out; 1065 } 1066 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 1067 retvalue = ARCMSR_MESSAGE_FAIL; 1068 goto message_out; 1069 } 1070 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; 1071 switch(controlcode) { 1072 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1073 u_int8_t *pQbuffer; 1074 u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 1075 int32_t allxfer_len = 0; 1076 1077 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1078 && (allxfer_len < 1031)) { 1079 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1080 memcpy(ptmpQbuffer, pQbuffer, 1); 1081 acb->rqbuf_firstindex++; 1082 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1083 ptmpQbuffer++; 1084 allxfer_len++; 1085 } 1086 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1087 struct QBUFFER *prbuffer = (struct QBUFFER *) &acb->pmu->message_rbuffer; 1088 u_int8_t *iop_data = (u_int8_t *)prbuffer->data; 1089 int32_t iop_len; 1090 1091 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1092 iop_len =(u_int32_t)prbuffer->data_len; 1093 while (iop_len > 0) { 1094 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 1095 memcpy(pQbuffer, iop_data, 1); 1096 acb->rqbuf_lastindex++; 1097 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1098 iop_data++; 1099 iop_len--; 1100 } 1101 CHIP_REG_WRITE32(inbound_doorbell, 1102 ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1103 } 1104 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1105 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1106 retvalue=ARCMSR_MESSAGE_SUCCESS; 1107 } 1108 break; 1109 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1110 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1111 u_int8_t *pQbuffer; 1112 u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 1113 1114 user_len = pcmdmessagefld->cmdmessage.Length; 1115 wqbuf_lastindex = acb->wqbuf_lastindex; 1116 wqbuf_firstindex = acb->wqbuf_firstindex; 1117 if (wqbuf_lastindex != wqbuf_firstindex) { 1118 arcmsr_post_Qbuffer(acb); 1119 /* has error report sensedata */ 1120 if(&pccb->csio.sense_data) { 1121 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 1122 /* Valid,ErrorCode */ 1123 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 1124 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 1125 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 1126 /* AdditionalSenseLength */ 1127 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 1128 /* AdditionalSenseCode */ 1129 } 1130 retvalue = ARCMSR_MESSAGE_FAIL; 1131 } else { 1132 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 1133 &(ARCMSR_MAX_QBUFFER - 1); 1134 if (my_empty_len >= user_len) { 1135 while (user_len > 0) { 1136 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; 1137 memcpy(pQbuffer, ptmpuserbuffer, 1); 1138 acb->wqbuf_lastindex++; 1139 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1140 ptmpuserbuffer++; 1141 user_len--; 1142 } 1143 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 1144 acb->acb_flags &= 1145 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 1146 arcmsr_post_Qbuffer(acb); 1147 } 1148 } else { 1149 /* has error report sensedata */ 1150 if(&pccb->csio.sense_data) { 1151 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 1152 /* Valid,ErrorCode */ 1153 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 1154 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 1155 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 1156 /* AdditionalSenseLength */ 1157 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 1158 /* AdditionalSenseCode */ 1159 } 1160 retvalue = ARCMSR_MESSAGE_FAIL; 1161 } 1162 } 1163 } 1164 break; 1165 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1166 u_int8_t *pQbuffer = acb->rqbuffer; 1167 1168 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1169 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1170 CHIP_REG_WRITE32(inbound_doorbell 1171 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1172 } 1173 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 1174 acb->rqbuf_firstindex = 0; 1175 acb->rqbuf_lastindex = 0; 1176 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1177 pcmdmessagefld->cmdmessage.ReturnCode = 1178 ARCMSR_MESSAGE_RETURNCODE_OK; 1179 } 1180 break; 1181 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 1182 u_int8_t *pQbuffer = acb->wqbuffer; 1183 1184 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1185 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1186 CHIP_REG_WRITE32(inbound_doorbell 1187 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1188 } 1189 acb->acb_flags |= 1190 (ACB_F_MESSAGE_WQBUFFER_CLEARED | 1191 ACB_F_MESSAGE_WQBUFFER_READED); 1192 acb->wqbuf_firstindex = 0; 1193 acb->wqbuf_lastindex = 0; 1194 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1195 pcmdmessagefld->cmdmessage.ReturnCode = 1196 ARCMSR_MESSAGE_RETURNCODE_OK; 1197 } 1198 break; 1199 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1200 u_int8_t *pQbuffer; 1201 1202 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1203 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1204 CHIP_REG_WRITE32(inbound_doorbell 1205 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1206 } 1207 acb->acb_flags |= 1208 (ACB_F_MESSAGE_WQBUFFER_CLEARED 1209 | ACB_F_MESSAGE_RQBUFFER_CLEARED 1210 | ACB_F_MESSAGE_WQBUFFER_READED); 1211 acb->rqbuf_firstindex = 0; 1212 acb->rqbuf_lastindex = 0; 1213 acb->wqbuf_firstindex = 0; 1214 acb->wqbuf_lastindex = 0; 1215 pQbuffer = acb->rqbuffer; 1216 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 1217 pQbuffer = acb->wqbuffer; 1218 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 1219 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1220 } 1221 break; 1222 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 1223 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 1224 } 1225 break; 1226 case ARCMSR_MESSAGE_SAY_HELLO: { 1227 int8_t * hello_string = "Hello! I am ARCMSR"; 1228 1229 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 1230 , (int16_t)strlen(hello_string)); 1231 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1232 } 1233 break; 1234 case ARCMSR_MESSAGE_SAY_GOODBYE: 1235 arcmsr_iop_parking(acb); 1236 break; 1237 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 1238 arcmsr_flush_adapter_cache(acb); 1239 break; 1240 default: 1241 retvalue = ARCMSR_MESSAGE_FAIL; 1242 } 1243 message_out: 1244 return retvalue; 1245 } 1246 /* 1247 ********************************************************************* 1248 ********************************************************************* 1249 */ 1250 static void arcmsr_executesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1251 { 1252 struct CommandControlBlock *srb=(struct CommandControlBlock *)arg; 1253 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb; 1254 union ccb * pccb; 1255 int target, lun; 1256 1257 pccb=srb->pccb; 1258 target=pccb->ccb_h.target_id; 1259 lun=pccb->ccb_h.target_lun; 1260 if(error != 0) { 1261 if(error != EFBIG) { 1262 printf("arcmsr%d: unexpected error %x returned from 'bus_dmamap_load' \n" 1263 , acb->pci_unit, error); 1264 } 1265 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1266 xpt_freeze_devq(pccb->ccb_h.path, /*count*/1); 1267 pccb->ccb_h.status |= (CAM_REQ_TOO_BIG|CAM_DEV_QFRZN); 1268 } 1269 arcmsr_srb_complete(srb, 0); 1270 return; 1271 } 1272 if(nseg > ARCMSR_MAX_SG_ENTRIES) { 1273 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 1274 arcmsr_srb_complete(srb, 0); 1275 return; 1276 } 1277 if(acb->acb_flags & ACB_F_BUS_RESET) { 1278 printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); 1279 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; 1280 arcmsr_srb_complete(srb, 0); 1281 return; 1282 } 1283 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 1284 u_int8_t block_cmd; 1285 1286 block_cmd=pccb->csio.cdb_io.cdb_bytes[0] & 0x0f; 1287 if(block_cmd==0x08 || block_cmd==0x0a) { 1288 printf("arcmsr%d:block 'read/write' command" 1289 "with gone raid volume Cmd=%2x, TargetId=%d, Lun=%d \n" 1290 , acb->pci_unit, block_cmd, target, lun); 1291 pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 1292 arcmsr_srb_complete(srb, 0); 1293 return; 1294 } 1295 } 1296 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1297 if(nseg != 0) { 1298 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 1299 } 1300 arcmsr_srb_complete(srb, 0); 1301 return; 1302 } 1303 pccb->ccb_h.status |= CAM_SIM_QUEUED; 1304 if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) { 1305 pccb->ccb_h.status &= ~CAM_STATUS_MASK; 1306 pccb->ccb_h.status |= CAM_REQUEUE_REQ; 1307 arcmsr_srb_complete(srb, 0); 1308 return; 1309 } 1310 arcmsr_build_srb(srb, dm_segs, nseg); 1311 arcmsr_post_srb(acb, srb); 1312 return; 1313 } 1314 /* 1315 ***************************************************************************************** 1316 ***************************************************************************************** 1317 */ 1318 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb) 1319 { 1320 struct CommandControlBlock *srb; 1321 struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; 1322 u_int32_t intmask_org, mask; 1323 int i=0; 1324 1325 acb->num_aborts++; 1326 /* 1327 *************************************************************************** 1328 ** It is the upper layer do abort command this lock just prior to calling us. 1329 ** First determine if we currently own this command. 1330 ** Start by searching the device queue. If not found 1331 ** at all, and the system wanted us to just abort the 1332 ** command return success. 1333 *************************************************************************** 1334 */ 1335 if(acb->srboutstandingcount!=0) { 1336 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 1337 srb=acb->psrb_pool[i]; 1338 if(srb->startdone==ARCMSR_SRB_START) { 1339 if(srb->pccb==abortccb) { 1340 srb->startdone=ARCMSR_SRB_ABORTED; 1341 printf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'" 1342 "outstanding command \n" 1343 , acb->pci_unit, abortccb->ccb_h.target_id 1344 , abortccb->ccb_h.target_lun, srb); 1345 goto abort_outstanding_cmd; 1346 } 1347 } 1348 } 1349 } 1350 return(FALSE); 1351 abort_outstanding_cmd: 1352 /* do not talk to iop 331 abort command */ 1353 UDELAY(3000*1000);/*wait for 3 sec for all command done*/ 1354 /* disable all outbound interrupt */ 1355 intmask_org=CHIP_REG_READ32(outbound_intmask); 1356 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 1357 arcmsr_polling_srbdone(acb, srb); 1358 /* enable all outbound interrupt */ 1359 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 1360 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 1361 return (TRUE); 1362 } 1363 /* 1364 **************************************************************************** 1365 **************************************************************************** 1366 */ 1367 static void arcmsr_bus_reset(struct AdapterControlBlock *acb) 1368 { 1369 int retry=0; 1370 1371 acb->num_resets++; 1372 acb->acb_flags |=ACB_F_BUS_RESET; 1373 while(acb->srboutstandingcount!=0 && retry < 400) { 1374 arcmsr_interrupt((void *)acb); 1375 UDELAY(25000); 1376 retry++; 1377 } 1378 arcmsr_iop_reset(acb); 1379 acb->acb_flags &= ~ACB_F_BUS_RESET; 1380 return; 1381 } 1382 /* 1383 ************************************************************************** 1384 ************************************************************************** 1385 */ 1386 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 1387 union ccb * pccb) 1388 { 1389 pccb->ccb_h.status |= CAM_REQ_CMP; 1390 switch (pccb->csio.cdb_io.cdb_bytes[0]) { 1391 case INQUIRY: { 1392 unsigned char inqdata[36]; 1393 char *buffer=pccb->csio.data_ptr;; 1394 1395 if (pccb->ccb_h.target_lun) { 1396 pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 1397 xpt_done(pccb); 1398 return; 1399 } 1400 inqdata[0] = T_PROCESSOR; 1401 /* Periph Qualifier & Periph Dev Type */ 1402 inqdata[1] = 0; 1403 /* rem media bit & Dev Type Modifier */ 1404 inqdata[2] = 0; 1405 /* ISO, ECMA, & ANSI versions */ 1406 inqdata[4] = 31; 1407 /* length of additional data */ 1408 strncpy(&inqdata[8], "Areca ", 8); 1409 /* Vendor Identification */ 1410 strncpy(&inqdata[16], "RAID controller ", 16); 1411 /* Product Identification */ 1412 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 1413 memcpy(buffer, inqdata, sizeof(inqdata)); 1414 xpt_done(pccb); 1415 } 1416 break; 1417 case WRITE_BUFFER: 1418 case READ_BUFFER: { 1419 if (arcmsr_iop_message_xfer(acb, pccb)) { 1420 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1421 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1422 } 1423 xpt_done(pccb); 1424 } 1425 break; 1426 default: 1427 xpt_done(pccb); 1428 } 1429 } 1430 /* 1431 ********************************************************************* 1432 ********************************************************************* 1433 */ 1434 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb) 1435 { 1436 struct AdapterControlBlock * acb; 1437 1438 acb=(struct AdapterControlBlock *) cam_sim_softc(psim); 1439 if(acb==NULL) { 1440 pccb->ccb_h.status |= CAM_REQ_INVALID; 1441 xpt_done(pccb); 1442 return; 1443 } 1444 switch (pccb->ccb_h.func_code) { 1445 case XPT_SCSI_IO: { 1446 struct CommandControlBlock *srb; 1447 int target=pccb->ccb_h.target_id; 1448 1449 if(target == 16) { 1450 /* virtual device for iop message transfer */ 1451 arcmsr_handle_virtual_command(acb, pccb); 1452 return; 1453 } 1454 if((srb=arcmsr_get_freesrb(acb)) == NULL) { 1455 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; 1456 xpt_done(pccb); 1457 return; 1458 } 1459 pccb->ccb_h.arcmsr_ccbsrb_ptr=srb; 1460 pccb->ccb_h.arcmsr_ccbacb_ptr=acb; 1461 srb->pccb=pccb; 1462 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1463 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) { 1464 /* Single buffer */ 1465 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) { 1466 /* Buffer is virtual */ 1467 u_int32_t error, s; 1468 1469 s=splsoftvm(); 1470 error = bus_dmamap_load(acb->dm_segs_dmat 1471 , srb->dm_segs_dmamap 1472 , pccb->csio.data_ptr 1473 , pccb->csio.dxfer_len 1474 , arcmsr_executesrb, srb, /*flags*/0); 1475 if(error == EINPROGRESS) { 1476 xpt_freeze_simq(acb->psim, 1); 1477 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1478 } 1479 splx(s); 1480 } else { 1481 /* Buffer is physical */ 1482 panic("arcmsr: CAM_DATA_PHYS not supported"); 1483 } 1484 } else { 1485 /* Scatter/gather list */ 1486 struct bus_dma_segment *segs; 1487 1488 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 1489 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1490 pccb->ccb_h.status |= CAM_PROVIDE_FAIL; 1491 xpt_done(pccb); 1492 free(srb, M_DEVBUF); 1493 return; 1494 } 1495 segs=(struct bus_dma_segment *)pccb->csio.data_ptr; 1496 arcmsr_executesrb(srb, segs, pccb->csio.sglist_cnt, 0); 1497 } 1498 } else { 1499 arcmsr_executesrb(srb, NULL, 0, 0); 1500 } 1501 break; 1502 } 1503 case XPT_TARGET_IO: { 1504 /* target mode not yet support vendor specific commands. */ 1505 pccb->ccb_h.status |= CAM_REQ_CMP; 1506 xpt_done(pccb); 1507 break; 1508 } 1509 case XPT_PATH_INQ: { 1510 struct ccb_pathinq *cpi= &pccb->cpi; 1511 1512 cpi->version_num=1; 1513 cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE; 1514 cpi->target_sprt=0; 1515 cpi->hba_misc=0; 1516 cpi->hba_eng_cnt=0; 1517 cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */ 1518 cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */ 1519 cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */ 1520 cpi->bus_id=cam_sim_bus(psim); 1521 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1522 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); 1523 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); 1524 cpi->unit_number=cam_sim_unit(psim); 1525 cpi->transport = XPORT_SPI; 1526 cpi->transport_version = 2; 1527 cpi->protocol = PROTO_SCSI; 1528 cpi->protocol_version = SCSI_REV_2; 1529 cpi->ccb_h.status |= CAM_REQ_CMP; 1530 xpt_done(pccb); 1531 break; 1532 } 1533 case XPT_ABORT: { 1534 union ccb *pabort_ccb; 1535 1536 pabort_ccb=pccb->cab.abort_ccb; 1537 switch (pabort_ccb->ccb_h.func_code) { 1538 case XPT_ACCEPT_TARGET_IO: 1539 case XPT_IMMED_NOTIFY: 1540 case XPT_CONT_TARGET_IO: 1541 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { 1542 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; 1543 xpt_done(pabort_ccb); 1544 pccb->ccb_h.status |= CAM_REQ_CMP; 1545 } else { 1546 xpt_print_path(pabort_ccb->ccb_h.path); 1547 printf("Not found\n"); 1548 pccb->ccb_h.status |= CAM_PATH_INVALID; 1549 } 1550 break; 1551 case XPT_SCSI_IO: 1552 pccb->ccb_h.status |= CAM_UA_ABORT; 1553 break; 1554 default: 1555 pccb->ccb_h.status |= CAM_REQ_INVALID; 1556 break; 1557 } 1558 xpt_done(pccb); 1559 break; 1560 } 1561 case XPT_RESET_BUS: 1562 case XPT_RESET_DEV: { 1563 u_int32_t i; 1564 1565 arcmsr_bus_reset(acb); 1566 for (i=0; i < 500; i++) { 1567 DELAY(1000); 1568 } 1569 pccb->ccb_h.status |= CAM_REQ_CMP; 1570 xpt_done(pccb); 1571 break; 1572 } 1573 case XPT_TERM_IO: { 1574 pccb->ccb_h.status |= CAM_REQ_INVALID; 1575 xpt_done(pccb); 1576 break; 1577 } 1578 case XPT_GET_TRAN_SETTINGS: { 1579 struct ccb_trans_settings *cts; 1580 struct ccb_trans_settings_scsi *scsi; 1581 struct ccb_trans_settings_spi *spi; 1582 1583 if(pccb->ccb_h.target_id == 16) { 1584 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1585 xpt_done(pccb); 1586 break; 1587 } 1588 1589 cts= &pccb->cts; 1590 scsi = &cts->proto_specific.scsi; 1591 spi = &cts->xport_specific.spi; 1592 1593 cts->protocol = PROTO_SCSI; 1594 cts->protocol_version = SCSI_REV_2; 1595 cts->transport = XPORT_SPI; 1596 cts->transport_version = 2; 1597 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 1598 spi->sync_period=3; 1599 spi->sync_offset=32; 1600 spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT; 1601 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1602 spi->valid = CTS_SPI_VALID_SYNC_RATE 1603 | CTS_SPI_VALID_SYNC_OFFSET 1604 | CTS_SPI_VALID_BUS_WIDTH; 1605 scsi->valid = CTS_SCSI_VALID_TQ; 1606 1607 pccb->ccb_h.status |= CAM_REQ_CMP; 1608 xpt_done(pccb); 1609 break; 1610 } 1611 case XPT_SET_TRAN_SETTINGS: { 1612 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1613 xpt_done(pccb); 1614 break; 1615 } 1616 case XPT_CALC_GEOMETRY: { 1617 struct ccb_calc_geometry *ccg; 1618 u_int32_t size_mb; 1619 u_int32_t secs_per_cylinder; 1620 1621 if(pccb->ccb_h.target_id == 16) { 1622 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 1623 xpt_done(pccb); 1624 break; 1625 } 1626 ccg= &pccb->ccg; 1627 if (ccg->block_size == 0) { 1628 pccb->ccb_h.status = CAM_REQ_INVALID; 1629 xpt_done(pccb); 1630 break; 1631 } 1632 if(((1024L * 1024L)/ccg->block_size) < 0) { 1633 pccb->ccb_h.status = CAM_REQ_INVALID; 1634 xpt_done(pccb); 1635 break; 1636 } 1637 size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size); 1638 if(size_mb > 1024 ) { 1639 ccg->heads=255; 1640 ccg->secs_per_track=63; 1641 } else { 1642 ccg->heads=64; 1643 ccg->secs_per_track=32; 1644 } 1645 secs_per_cylinder=ccg->heads * ccg->secs_per_track; 1646 ccg->cylinders=ccg->volume_size / secs_per_cylinder; 1647 pccb->ccb_h.status |= CAM_REQ_CMP; 1648 xpt_done(pccb); 1649 break; 1650 } 1651 default: 1652 pccb->ccb_h.status |= CAM_REQ_INVALID; 1653 xpt_done(pccb); 1654 break; 1655 } 1656 return; 1657 } 1658 /* 1659 ********************************************************************** 1660 ********************************************************************** 1661 */ 1662 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 1663 { 1664 acb->acb_flags |= ACB_F_MSG_START_BGRB; 1665 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 1666 if(arcmsr_wait_msgint_ready(acb)) { 1667 printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 1668 } 1669 return; 1670 } 1671 /* 1672 ********************************************************************** 1673 ********************************************************************** 1674 */ 1675 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 1676 { 1677 struct CommandControlBlock *srb; 1678 uint32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; 1679 int id, lun; 1680 1681 polling_srb_retry: 1682 poll_count++; 1683 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable; 1684 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 1685 while(1) { 1686 if((flag_srb=CHIP_REG_READ32(outbound_queueport))==0xFFFFFFFF) { 1687 if(poll_srb_done) { 1688 break;/*chip FIFO no ccb for completion already*/ 1689 } else { 1690 UDELAY(25000); 1691 if(poll_count > 100) { 1692 break; 1693 } 1694 goto polling_srb_retry; 1695 } 1696 } 1697 /* check ifcommand done with no error*/ 1698 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5)); 1699 /*frame must be 32 bytes aligned*/ 1700 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 1701 if((srb->startdone==ARCMSR_SRB_ABORTED) && (srb==poll_srb)) { 1702 printf("arcmsr%d: scsi id=%d lun=%d srb='%p'" 1703 "poll command abort successfully \n" 1704 , acb->pci_unit 1705 , srb->pccb->ccb_h.target_id 1706 , srb->pccb->ccb_h.target_lun, srb); 1707 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 1708 arcmsr_srb_complete(srb, 1); 1709 poll_srb_done=1; 1710 continue; 1711 } 1712 printf("arcmsr%d: polling get an illegal srb command done srb='%p'" 1713 "srboutstandingcount=%d \n" 1714 , acb->pci_unit 1715 , srb, acb->srboutstandingcount); 1716 continue; 1717 } 1718 id=srb->pccb->ccb_h.target_id; 1719 lun=srb->pccb->ccb_h.target_lun; 1720 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) { 1721 if(acb->devstate[id][lun]==ARECA_RAID_GONE) { 1722 acb->devstate[id][lun]=ARECA_RAID_GOOD; 1723 } 1724 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 1725 arcmsr_srb_complete(srb, 1); 1726 } else { 1727 switch(srb->arcmsr_cdb.DeviceStatus) { 1728 case ARCMSR_DEV_SELECT_TIMEOUT: { 1729 acb->devstate[id][lun]=ARECA_RAID_GONE; 1730 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 1731 arcmsr_srb_complete(srb, 1); 1732 } 1733 break; 1734 case ARCMSR_DEV_ABORTED: 1735 case ARCMSR_DEV_INIT_FAIL: { 1736 acb->devstate[id][lun]=ARECA_RAID_GONE; 1737 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 1738 arcmsr_srb_complete(srb, 1); 1739 } 1740 break; 1741 case SCSISTAT_CHECK_CONDITION: { 1742 acb->devstate[id][lun]=ARECA_RAID_GOOD; 1743 arcmsr_report_sense_info(srb); 1744 arcmsr_srb_complete(srb, 1); 1745 } 1746 break; 1747 default: 1748 printf("arcmsr%d: scsi id=%d lun=%d" 1749 "polling and getting command error done" 1750 ", but got unknow DeviceStatus=0x%x \n" 1751 , acb->pci_unit, id, lun, srb->arcmsr_cdb.DeviceStatus); 1752 acb->devstate[id][lun]=ARECA_RAID_GONE; 1753 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 1754 /*unknow error or crc error just for retry*/ 1755 arcmsr_srb_complete(srb, 1); 1756 break; 1757 } 1758 } 1759 } /*drain reply FIFO*/ 1760 return; 1761 } 1762 /* 1763 ********************************************************************** 1764 ** get firmware miscellaneous data 1765 ********************************************************************** 1766 */ 1767 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 1768 { 1769 char *acb_firm_model=acb->firm_model; 1770 char *acb_firm_version=acb->firm_version; 1771 size_t iop_firm_model=offsetof(struct MessageUnit,message_rwbuffer[15]); /*firm_model,15,60-67*/ 1772 size_t iop_firm_version=offsetof(struct MessageUnit,message_rwbuffer[17]); /*firm_version,17,68-83*/ 1773 int i; 1774 1775 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 1776 if(arcmsr_wait_msgint_ready(acb)) { 1777 printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n" 1778 , acb->pci_unit); 1779 } 1780 i=0; 1781 while(i<8) { 1782 *acb_firm_model=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_model+i); 1783 /* 8 bytes firm_model, 15, 60-67*/ 1784 acb_firm_model++; 1785 i++; 1786 } 1787 i=0; 1788 while(i<16) { 1789 *acb_firm_version=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_version+i); 1790 /* 16 bytes firm_version, 17, 68-83*/ 1791 acb_firm_version++; 1792 i++; 1793 } 1794 printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 1795 printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 1796 acb->firm_request_len=CHIP_REG_READ32(message_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 1797 acb->firm_numbers_queue=CHIP_REG_READ32(message_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 1798 acb->firm_sdram_size=CHIP_REG_READ32(message_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 1799 acb->firm_ide_channels=CHIP_REG_READ32(message_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 1800 return; 1801 } 1802 /* 1803 ********************************************************************** 1804 ** start background rebulid 1805 ********************************************************************** 1806 */ 1807 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 1808 { 1809 u_int32_t intmask_org, mask, outbound_doorbell, firmware_state=0; 1810 1811 do { 1812 firmware_state=CHIP_REG_READ32(outbound_msgaddr1); 1813 } while((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)==0); 1814 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; 1815 CHIP_REG_WRITE32(outbound_intmask, intmask_org); 1816 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; 1817 arcmsr_get_firmware_spec(acb); 1818 arcmsr_start_adapter_bgrb(acb); 1819 /* clear Qbuffer if door bell ringed */ 1820 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell); 1821 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */ 1822 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1823 /* enable outbound Post Queue, outbound message0, outbell doorbell Interrupt */ 1824 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 1825 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask); 1826 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 1827 acb->acb_flags |=ACB_F_IOP_INITED; 1828 return; 1829 } 1830 /* 1831 ********************************************************************** 1832 ********************************************************************** 1833 */ 1834 static void arcmsr_map_freesrb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1835 { 1836 struct AdapterControlBlock *acb=arg; 1837 struct CommandControlBlock *srb_tmp; 1838 u_int8_t * dma_memptr; 1839 u_int32_t i, srb_phyaddr_hi32; 1840 unsigned long srb_phyaddr=(unsigned long)segs->ds_addr; 1841 1842 dma_memptr=acb->uncacheptr; 1843 srb_phyaddr=segs->ds_addr; /* We suppose bus_addr_t high part always 0 here*/ 1844 if(((unsigned long)dma_memptr & 0x1F)!=0) { 1845 dma_memptr=dma_memptr+(0x20-((unsigned long)dma_memptr & 0x1F)); 1846 srb_phyaddr=srb_phyaddr+(0x20-((unsigned long)srb_phyaddr & 0x1F)); 1847 } 1848 srb_tmp=(struct CommandControlBlock *)dma_memptr; 1849 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 1850 /*srb address must 32 (0x20) boundary*/ 1851 if(((unsigned long)srb_tmp & 0x1F)==0) { 1852 if(bus_dmamap_create(acb->dm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) { 1853 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 1854 printf("arcmsr%d: srb dmamap bus_dmamap_create error\n", acb->pci_unit); 1855 return; 1856 } 1857 srb_tmp->cdb_shifted_phyaddr=srb_phyaddr >> 5; 1858 srb_tmp->acb=acb; 1859 acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp; 1860 srb_phyaddr=srb_phyaddr+sizeof(struct CommandControlBlock); 1861 } else { 1862 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 1863 printf("arcmsr%d: dma_memptr=%p i=%d" 1864 "this srb cross 32 bytes boundary ignored srb_tmp=%p \n" 1865 , acb->pci_unit, dma_memptr, i, srb_tmp); 1866 return; 1867 } 1868 srb_tmp++; 1869 } 1870 acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr; 1871 /* 1872 ******************************************************************** 1873 ** here we need to tell iop 331 our freesrb.HighPart 1874 ** if freesrb.HighPart is not zero 1875 ******************************************************************** 1876 */ 1877 srb_phyaddr_hi32=(uint32_t) ((srb_phyaddr>>16)>>16); 1878 if(srb_phyaddr_hi32!=0) { 1879 CHIP_REG_WRITE32(message_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 1880 CHIP_REG_WRITE32(message_rwbuffer[1], srb_phyaddr_hi32); 1881 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 1882 if(arcmsr_wait_msgint_ready(acb)) { 1883 printf("arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 1884 } 1885 } 1886 return; 1887 } 1888 /* 1889 ************************************************************************ 1890 ** 1891 ** 1892 ************************************************************************ 1893 */ 1894 static void arcmsr_free_resource(struct AdapterControlBlock *acb) 1895 { 1896 /* remove the control device */ 1897 if(acb->ioctl_dev != NULL) { 1898 destroy_dev(acb->ioctl_dev); 1899 } 1900 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); 1901 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); 1902 bus_dma_tag_destroy(acb->srb_dmat); 1903 bus_dma_tag_destroy(acb->dm_segs_dmat); 1904 bus_dma_tag_destroy(acb->parent_dmat); 1905 return; 1906 } 1907 /* 1908 ************************************************************************ 1909 ************************************************************************ 1910 */ 1911 static u_int32_t arcmsr_initialize(device_t dev) 1912 { 1913 struct AdapterControlBlock *acb=device_get_softc(dev); 1914 u_int32_t intmask_org, rid=PCIR_BAR(0); 1915 vm_offset_t mem_base; 1916 u_int16_t pci_command; 1917 int i, j; 1918 1919 #if __FreeBSD_version >= 502010 1920 if(bus_dma_tag_create( /*parent*/ NULL, 1921 /*alignemnt*/ 1, 1922 /*boundary*/ 0, 1923 /*lowaddr*/ BUS_SPACE_MAXADDR, 1924 /*highaddr*/ BUS_SPACE_MAXADDR, 1925 /*filter*/ NULL, 1926 /*filterarg*/ NULL, 1927 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 1928 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 1929 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1930 /*flags*/ 0, 1931 /*lockfunc*/ NULL, 1932 /*lockarg*/ NULL, 1933 &acb->parent_dmat) != 0) 1934 #else 1935 if(bus_dma_tag_create( /*parent*/ NULL, 1936 /*alignemnt*/ 1, 1937 /*boundary*/ 0, 1938 /*lowaddr*/ BUS_SPACE_MAXADDR, 1939 /*highaddr*/ BUS_SPACE_MAXADDR, 1940 /*filter*/ NULL, 1941 /*filterarg*/ NULL, 1942 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 1943 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 1944 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1945 /*flags*/ 0, 1946 &acb->parent_dmat) != 0) 1947 #endif 1948 { 1949 printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 1950 return ENOMEM; 1951 } 1952 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ 1953 #if __FreeBSD_version >= 502010 1954 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1955 /*alignment*/ 1, 1956 /*boundary*/ 0, 1957 /*lowaddr*/ BUS_SPACE_MAXADDR, 1958 /*highaddr*/ BUS_SPACE_MAXADDR, 1959 /*filter*/ NULL, 1960 /*filterarg*/ NULL, 1961 /*maxsize*/ MAXBSIZE, 1962 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 1963 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1964 /*flags*/ 0, 1965 /*lockfunc*/ busdma_lock_mutex, 1966 /*lockarg*/ &Giant, 1967 &acb->dm_segs_dmat) != 0) 1968 #else 1969 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1970 /*alignment*/ 1, 1971 /*boundary*/ 0, 1972 /*lowaddr*/ BUS_SPACE_MAXADDR, 1973 /*highaddr*/ BUS_SPACE_MAXADDR, 1974 /*filter*/ NULL, 1975 /*filterarg*/ NULL, 1976 /*maxsize*/ MAXBSIZE, 1977 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 1978 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1979 /*flags*/ 0, 1980 &acb->dm_segs_dmat) != 0) 1981 #endif 1982 { 1983 bus_dma_tag_destroy(acb->parent_dmat); 1984 printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 1985 return ENOMEM; 1986 } 1987 /* DMA tag for our srb structures.... Allocate the freesrb memory */ 1988 #if __FreeBSD_version >= 502010 1989 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 1990 /*alignment*/ 1, 1991 /*boundary*/ 0, 1992 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 1993 /*highaddr*/ BUS_SPACE_MAXADDR, 1994 /*filter*/ NULL, 1995 /*filterarg*/ NULL, 1996 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE, 1997 /*nsegments*/ 1, 1998 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 1999 /*flags*/ 0, 2000 /*lockfunc*/ NULL, 2001 /*lockarg*/ NULL, 2002 &acb->srb_dmat) != 0) 2003 #else 2004 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 2005 /*alignment*/ 1, 2006 /*boundary*/ 0, 2007 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 2008 /*highaddr*/ BUS_SPACE_MAXADDR, 2009 /*filter*/ NULL, 2010 /*filterarg*/ NULL, 2011 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE, 2012 /*nsegments*/ 1, 2013 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 2014 /*flags*/ 0, 2015 &acb->srb_dmat) != 0) 2016 #endif 2017 { 2018 bus_dma_tag_destroy(acb->dm_segs_dmat); 2019 bus_dma_tag_destroy(acb->parent_dmat); 2020 printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", acb->pci_unit); 2021 return ENXIO; 2022 } 2023 /* Allocation for our srbs */ 2024 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr 2025 , BUS_DMA_WAITOK | BUS_DMA_COHERENT, &acb->srb_dmamap) != 0) { 2026 bus_dma_tag_destroy(acb->srb_dmat); 2027 bus_dma_tag_destroy(acb->dm_segs_dmat); 2028 bus_dma_tag_destroy(acb->parent_dmat); 2029 printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", acb->pci_unit); 2030 return ENXIO; 2031 } 2032 /* And permanently map them */ 2033 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr 2034 , ARCMSR_SRBS_POOL_SIZE, arcmsr_map_freesrb, acb, /*flags*/0)) { 2035 bus_dma_tag_destroy(acb->srb_dmat); 2036 bus_dma_tag_destroy(acb->dm_segs_dmat); 2037 bus_dma_tag_destroy(acb->parent_dmat); 2038 printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", acb->pci_unit); 2039 return ENXIO; 2040 } 2041 pci_command=pci_read_config(dev, PCIR_COMMAND, 2); 2042 pci_command |= PCIM_CMD_BUSMASTEREN; 2043 pci_command |= PCIM_CMD_PERRESPEN; 2044 pci_command |= PCIM_CMD_MWRICEN; 2045 /* Enable Busmaster/Mem */ 2046 pci_command |= PCIM_CMD_MEMEN; 2047 pci_write_config(dev, PCIR_COMMAND, pci_command, 2); 2048 acb->sys_res_arcmsr=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, 0x1000, RF_ACTIVE); 2049 if(acb->sys_res_arcmsr == NULL) { 2050 arcmsr_free_resource(acb); 2051 printf("arcmsr%d: bus_alloc_resource failure!\n", acb->pci_unit); 2052 return ENOMEM; 2053 } 2054 if(rman_get_start(acb->sys_res_arcmsr) <= 0) { 2055 arcmsr_free_resource(acb); 2056 printf("arcmsr%d: rman_get_start failure!\n", acb->pci_unit); 2057 return ENXIO; 2058 } 2059 mem_base=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr); 2060 if(mem_base==0) { 2061 arcmsr_free_resource(acb); 2062 printf("arcmsr%d: rman_get_virtual failure!\n", acb->pci_unit); 2063 return ENXIO; 2064 } 2065 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { 2066 arcmsr_free_resource(acb); 2067 printf("arcmsr%d: map free srb failure!\n", acb->pci_unit); 2068 return ENXIO; 2069 } 2070 acb->btag=rman_get_bustag(acb->sys_res_arcmsr); 2071 acb->bhandle=rman_get_bushandle(acb->sys_res_arcmsr); 2072 acb->pmu=(struct MessageUnit *)mem_base; 2073 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 2074 |ACB_F_MESSAGE_RQBUFFER_CLEARED 2075 |ACB_F_MESSAGE_WQBUFFER_READED); 2076 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 2077 /* 2078 ******************************************************************** 2079 ** init raid volume state 2080 ******************************************************************** 2081 */ 2082 for(i=0;i<ARCMSR_MAX_TARGETID;i++) { 2083 for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) { 2084 acb->devstate[i][j]=ARECA_RAID_GOOD; 2085 } 2086 } 2087 /* disable iop all outbound interrupt */ 2088 intmask_org=CHIP_REG_READ32(outbound_intmask); 2089 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 2090 arcmsr_iop_init(acb); 2091 return(0); 2092 } 2093 /* 2094 ************************************************************************ 2095 ************************************************************************ 2096 */ 2097 static u_int32_t arcmsr_attach(device_t dev) 2098 { 2099 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2100 u_int32_t unit=device_get_unit(dev); 2101 struct ccb_setasync csa; 2102 struct cam_devq *devq; /* Device Queue to use for this SIM */ 2103 struct resource *irqres; 2104 int rid; 2105 2106 if(acb == NULL) { 2107 printf("arcmsr%d: cannot allocate softc\n", unit); 2108 return (ENOMEM); 2109 } 2110 bzero(acb, sizeof(struct AdapterControlBlock)); 2111 if(arcmsr_initialize(dev)) { 2112 printf("arcmsr%d: initialize failure!\n", unit); 2113 return ENXIO; 2114 } 2115 /* After setting up the adapter, map our interrupt */ 2116 rid=0; 2117 irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE); 2118 if(irqres == NULL || 2119 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE 2120 , NULL, arcmsr_interrupt, acb, &acb->ih)) { 2121 arcmsr_free_resource(acb); 2122 printf("arcmsr%d: unable to register interrupt handler!\n", unit); 2123 return ENXIO; 2124 } 2125 acb->irqres=irqres; 2126 acb->pci_dev=dev; 2127 acb->pci_unit=unit; 2128 /* 2129 * Now let the CAM generic SCSI layer find the SCSI devices on 2130 * the bus * start queue to reset to the idle loop. * 2131 * Create device queue of SIM(s) * (MAX_START_JOB - 1) : 2132 * max_sim_transactions 2133 */ 2134 devq=cam_simq_alloc(ARCMSR_MAX_START_JOB); 2135 if(devq == NULL) { 2136 arcmsr_free_resource(acb); 2137 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2138 printf("arcmsr%d: cam_simq_alloc failure!\n", unit); 2139 return ENXIO; 2140 } 2141 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll 2142 , "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); 2143 if(acb->psim == NULL) { 2144 arcmsr_free_resource(acb); 2145 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2146 cam_simq_free(devq); 2147 printf("arcmsr%d: cam_sim_alloc failure!\n", unit); 2148 return ENXIO; 2149 } 2150 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { 2151 arcmsr_free_resource(acb); 2152 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2153 cam_sim_free(acb->psim, /*free_devq*/TRUE); 2154 printf("arcmsr%d: xpt_bus_register failure!\n", unit); 2155 return ENXIO; 2156 } 2157 if(xpt_create_path(&acb->ppath, /* periph */ NULL 2158 , cam_sim_path(acb->psim) 2159 , CAM_TARGET_WILDCARD 2160 , CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2161 arcmsr_free_resource(acb); 2162 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2163 xpt_bus_deregister(cam_sim_path(acb->psim)); 2164 cam_sim_free(acb->psim, /* free_simq */ TRUE); 2165 printf("arcmsr%d: xpt_create_path failure!\n", unit); 2166 return ENXIO; 2167 } 2168 ARCMSR_LOCK_INIT(&acb->workingQ_done_lock, "arcmsr done working Q lock"); 2169 ARCMSR_LOCK_INIT(&acb->workingQ_start_lock, "arcmsr start working Q lock"); 2170 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock"); 2171 /* 2172 **************************************************** 2173 */ 2174 xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); 2175 csa.ccb_h.func_code=XPT_SASYNC_CB; 2176 csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE; 2177 csa.callback=arcmsr_async; 2178 csa.callback_arg=acb->psim; 2179 xpt_action((union ccb *)&csa); 2180 /* Create the control device. */ 2181 acb->ioctl_dev=make_dev(&arcmsr_cdevsw 2182 , unit 2183 , UID_ROOT 2184 , GID_WHEEL /* GID_OPERATOR */ 2185 , S_IRUSR | S_IWUSR 2186 , "arcmsr%d", unit); 2187 #if __FreeBSD_version < 503000 2188 acb->ioctl_dev->si_drv1=acb; 2189 #endif 2190 #if __FreeBSD_version > 500005 2191 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); 2192 #endif 2193 return 0; 2194 } 2195 /* 2196 ************************************************************************ 2197 ************************************************************************ 2198 */ 2199 static u_int32_t arcmsr_probe(device_t dev) 2200 { 2201 u_int32_t id; 2202 static char buf[256]; 2203 char *type; 2204 int raid6 = 1; 2205 2206 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { 2207 return (ENXIO); 2208 } 2209 switch(id=pci_get_devid(dev)) { 2210 case PCIDevVenIDARC1110: 2211 case PCIDevVenIDARC1210: 2212 raid6 = 0; 2213 /*FALLTHRU*/ 2214 case PCIDevVenIDARC1120: 2215 case PCIDevVenIDARC1130: 2216 case PCIDevVenIDARC1160: 2217 case PCIDevVenIDARC1170: 2218 case PCIDevVenIDARC1220: 2219 case PCIDevVenIDARC1230: 2220 case PCIDevVenIDARC1260: 2221 case PCIDevVenIDARC1270: 2222 case PCIDevVenIDARC1280: 2223 type = "SATA"; 2224 break; 2225 case PCIDevVenIDARC1380: 2226 case PCIDevVenIDARC1381: 2227 case PCIDevVenIDARC1680: 2228 case PCIDevVenIDARC1681: 2229 type = "SAS"; 2230 break; 2231 default: 2232 type = "X-TYPE"; 2233 break; 2234 } 2235 sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n", type, raid6 ? "(RAID6 capable)" : ""); 2236 device_set_desc_copy(dev, buf); 2237 return 0; 2238 } 2239 /* 2240 ************************************************************************ 2241 ************************************************************************ 2242 */ 2243 static void arcmsr_shutdown(device_t dev) 2244 { 2245 u_int32_t i, poll_count=0; 2246 u_int32_t intmask_org; 2247 struct CommandControlBlock *srb; 2248 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2249 2250 /* stop adapter background rebuild */ 2251 arcmsr_stop_adapter_bgrb(acb); 2252 arcmsr_flush_adapter_cache(acb); 2253 /* disable all outbound interrupt */ 2254 intmask_org=CHIP_REG_READ32(outbound_intmask); 2255 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE)); 2256 /* abort all outstanding command */ 2257 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 2258 acb->acb_flags &= ~ACB_F_IOP_INITED; 2259 if(acb->srboutstandingcount!=0) { 2260 while((acb->srboutstandingcount!=0) && (poll_count < 256)) { 2261 arcmsr_interrupt((void *)acb); 2262 UDELAY(25000); 2263 poll_count++; 2264 } 2265 if(acb->srboutstandingcount!=0) { 2266 arcmsr_abort_allcmd(acb); 2267 /*clear all outbound posted Q*/ 2268 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) { 2269 CHIP_REG_READ32(outbound_queueport); 2270 } 2271 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 2272 srb=acb->psrb_pool[i]; 2273 if(srb->startdone==ARCMSR_SRB_START) { 2274 srb->startdone=ARCMSR_SRB_ABORTED; 2275 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2276 arcmsr_srb_complete(srb, 1); 2277 } 2278 } 2279 } 2280 } 2281 atomic_set_int(&acb->srboutstandingcount, 0); 2282 acb->workingsrb_doneindex=0; 2283 acb->workingsrb_startindex=0; 2284 return; 2285 } 2286 /* 2287 ************************************************************************ 2288 ************************************************************************ 2289 */ 2290 static u_int32_t arcmsr_detach(device_t dev) 2291 { 2292 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 2293 2294 arcmsr_shutdown(dev); 2295 arcmsr_free_resource(acb); 2296 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), acb->sys_res_arcmsr); 2297 bus_teardown_intr(dev, acb->irqres, acb->ih); 2298 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 2299 xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); 2300 xpt_free_path(acb->ppath); 2301 xpt_bus_deregister(cam_sim_path(acb->psim)); 2302 cam_sim_free(acb->psim, TRUE); 2303 return (0); 2304 } 2305 2306 2307