1 /* $FreeBSD$ */ 2 /*- 3 * Generic defines for LSI '909 FC adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 2000, 2001 by Greg Ansley 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 /*- 30 * Copyright (c) 2002, 2006 by Matthew Jacob 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions are 35 * met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 39 * substantially similar to the "NO WARRANTY" disclaimer below 40 * ("Disclaimer") and any redistribution must be conditioned upon including 41 * a substantially similar Disclaimer requirement for further binary 42 * redistribution. 43 * 3. Neither the names of the above listed copyright holders nor the names 44 * of any contributors may be used to endorse or promote products derived 45 * from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 51 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 57 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * 59 * Support from Chris Ellsworth in order to make SAS adapters work 60 * is gratefully acknowledged. 61 * 62 * 63 * Support from LSI-Logic has also gone a great deal toward making this a 64 * workable subsystem and is gratefully acknowledged. 65 */ 66 /* 67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 68 * Copyright (c) 2004, 2005 Justin T. Gibbs 69 * Copyright (c) 2005, WHEEL Sp. z o.o. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions are 74 * met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 78 * substantially similar to the "NO WARRANTY" disclaimer below 79 * ("Disclaimer") and any redistribution must be conditioned upon including 80 * a substantially similar Disclaimer requirement for further binary 81 * redistribution. 82 * 3. Neither the names of the above listed copyright holders nor the names 83 * of any contributors may be used to endorse or promote products derived 84 * from this software without specific prior written permission. 85 * 86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 97 */ 98 99 #ifndef _MPT_H_ 100 #define _MPT_H_ 101 102 /********************************* OS Includes ********************************/ 103 #include <sys/types.h> 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/endian.h> 107 #include <sys/eventhandler.h> 108 #if __FreeBSD_version < 500000 109 #include <sys/kernel.h> 110 #include <sys/queue.h> 111 #include <sys/malloc.h> 112 #else 113 #include <sys/lock.h> 114 #include <sys/kernel.h> 115 #include <sys/queue.h> 116 #include <sys/malloc.h> 117 #include <sys/mutex.h> 118 #include <sys/condvar.h> 119 #endif 120 #include <sys/proc.h> 121 #include <sys/bus.h> 122 #include <sys/module.h> 123 124 #include <machine/cpu.h> 125 #include <machine/resource.h> 126 127 #if __FreeBSD_version < 500000 128 #include <machine/bus.h> 129 #include <machine/clock.h> 130 #endif 131 132 #include <sys/rman.h> 133 134 #if __FreeBSD_version < 500000 135 #include <pci/pcireg.h> 136 #include <pci/pcivar.h> 137 #else 138 #include <dev/pci/pcireg.h> 139 #include <dev/pci/pcivar.h> 140 #endif 141 142 #include <machine/bus.h> 143 #include "opt_ddb.h" 144 145 /**************************** Register Definitions ****************************/ 146 #include <dev/mpt/mpt_reg.h> 147 148 /******************************* MPI Definitions ******************************/ 149 #include <dev/mpt/mpilib/mpi_type.h> 150 #include <dev/mpt/mpilib/mpi.h> 151 #include <dev/mpt/mpilib/mpi_cnfg.h> 152 #include <dev/mpt/mpilib/mpi_ioc.h> 153 #include <dev/mpt/mpilib/mpi_raid.h> 154 155 /* XXX For mpt_debug.c */ 156 #include <dev/mpt/mpilib/mpi_init.h> 157 158 /****************************** Misc Definitions ******************************/ 159 #define MPT_OK (0) 160 #define MPT_FAIL (0x10000) 161 162 #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) 163 164 #define MPT_ROLE_NONE 0 165 #define MPT_ROLE_INITIATOR 1 166 #define MPT_ROLE_TARGET 2 167 #define MPT_ROLE_BOTH 3 168 #define MPT_ROLE_DEFAULT MPT_ROLE_INITIATOR 169 170 /**************************** Forward Declarations ****************************/ 171 struct mpt_softc; 172 struct mpt_personality; 173 typedef struct req_entry request_t; 174 175 /************************* Personality Module Support *************************/ 176 typedef int mpt_load_handler_t(struct mpt_personality *); 177 typedef int mpt_probe_handler_t(struct mpt_softc *); 178 typedef int mpt_attach_handler_t(struct mpt_softc *); 179 typedef int mpt_enable_handler_t(struct mpt_softc *); 180 typedef int mpt_event_handler_t(struct mpt_softc *, request_t *, 181 MSG_EVENT_NOTIFY_REPLY *); 182 typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/); 183 /* XXX Add return value and use for veto? */ 184 typedef void mpt_shutdown_handler_t(struct mpt_softc *); 185 typedef void mpt_detach_handler_t(struct mpt_softc *); 186 typedef int mpt_unload_handler_t(struct mpt_personality *); 187 188 struct mpt_personality 189 { 190 const char *name; 191 uint32_t id; /* Assigned identifier. */ 192 u_int use_count; /* Instances using personality*/ 193 mpt_load_handler_t *load; /* configure personailty */ 194 #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load) 195 mpt_probe_handler_t *probe; /* configure personailty */ 196 mpt_attach_handler_t *attach; /* initialize device instance */ 197 mpt_enable_handler_t *enable; /* enable device */ 198 mpt_event_handler_t *event; /* Handle MPI event. */ 199 mpt_reset_handler_t *reset; /* Re-init after reset. */ 200 mpt_shutdown_handler_t *shutdown; /* Shutdown instance. */ 201 mpt_detach_handler_t *detach; /* release device instance */ 202 mpt_unload_handler_t *unload; /* Shutdown personality */ 203 #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload) 204 }; 205 206 int mpt_modevent(module_t, int, void *); 207 208 /* Maximum supported number of personalities. */ 209 #define MPT_MAX_PERSONALITIES (15) 210 211 #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \ 212 MODULE_DEPEND(name, dep, vmin, vpref, vmax) 213 214 #define DECLARE_MPT_PERSONALITY(name, order) \ 215 static moduledata_t name##_mod = { \ 216 #name, mpt_modevent, &name##_personality \ 217 }; \ 218 DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order); \ 219 MODULE_VERSION(name, 1); \ 220 MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1) 221 222 /******************************* Bus DMA Support ******************************/ 223 /* XXX Need to update bus_dmamap_sync to take a range argument. */ 224 #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op) \ 225 bus_dmamap_sync(dma_tag, dmamap, op) 226 227 #if __FreeBSD_version >= 501102 228 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ 229 lowaddr, highaddr, filter, filterarg, \ 230 maxsize, nsegments, maxsegsz, flags, \ 231 dma_tagp) \ 232 bus_dma_tag_create(parent_tag, alignment, boundary, \ 233 lowaddr, highaddr, filter, filterarg, \ 234 maxsize, nsegments, maxsegsz, flags, \ 235 busdma_lock_mutex, &Giant, \ 236 dma_tagp) 237 #else 238 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ 239 lowaddr, highaddr, filter, filterarg, \ 240 maxsize, nsegments, maxsegsz, flags, \ 241 dma_tagp) \ 242 bus_dma_tag_create(parent_tag, alignment, boundary, \ 243 lowaddr, highaddr, filter, filterarg, \ 244 maxsize, nsegments, maxsegsz, flags, \ 245 dma_tagp) 246 #endif 247 248 struct mpt_map_info { 249 struct mpt_softc *mpt; 250 int error; 251 uint32_t phys; 252 }; 253 254 void mpt_map_rquest(void *, bus_dma_segment_t *, int, int); 255 256 /**************************** Kernel Thread Support ***************************/ 257 #if __FreeBSD_version > 500005 258 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ 259 kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) 260 #else 261 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ 262 kthread_create(func, farg, proc_ptr, fmtstr, arg) 263 #endif 264 265 /****************************** Timer Facilities ******************************/ 266 #if __FreeBSD_version > 500000 267 #define mpt_callout_init(c) callout_init(c, /*mpsafe*/0); 268 #else 269 #define mpt_callout_init(c) callout_init(c); 270 #endif 271 272 /********************************** Endianess *********************************/ 273 static __inline uint64_t 274 u64toh(U64 s) 275 { 276 uint64_t result; 277 278 result = le32toh(s.Low); 279 result |= ((uint64_t)le32toh(s.High)) << 32; 280 return (result); 281 } 282 283 /**************************** MPI Transaction State ***************************/ 284 typedef enum { 285 REQ_STATE_NIL = 0x00, 286 REQ_STATE_FREE = 0x01, 287 REQ_STATE_ALLOCATED = 0x02, 288 REQ_STATE_QUEUED = 0x04, 289 REQ_STATE_DONE = 0x08, 290 REQ_STATE_TIMEDOUT = 0x10, 291 REQ_STATE_NEED_WAKEUP = 0x20, 292 REQ_STATE_LOCKED = 0x80, /* can't be freed */ 293 REQ_STATE_MASK = 0xFF 294 } mpt_req_state_t; 295 296 struct req_entry { 297 TAILQ_ENTRY(req_entry) links; /* Pointer to next in list */ 298 mpt_req_state_t state; /* Request State Information */ 299 uint16_t index; /* Index of this entry */ 300 uint16_t IOCStatus; /* Completion status */ 301 uint16_t ResponseCode; /* TMF Reponse Code */ 302 uint16_t serno; /* serial number */ 303 union ccb *ccb; /* CAM request */ 304 void *req_vbuf; /* Virtual Address of Entry */ 305 void *sense_vbuf; /* Virtual Address of sense data */ 306 bus_addr_t req_pbuf; /* Physical Address of Entry */ 307 bus_addr_t sense_pbuf; /* Physical Address of sense data */ 308 bus_dmamap_t dmap; /* DMA map for data buffers */ 309 struct req_entry *chain; /* for SGE overallocations */ 310 }; 311 312 /**************************** MPI Target State Info ***************************/ 313 314 typedef struct { 315 uint32_t reply_desc; /* current reply descriptor */ 316 uint32_t resid; /* current data residual */ 317 uint32_t bytes_xfered; /* current relative offset */ 318 union ccb *ccb; /* pointer to currently active ccb */ 319 request_t *req; /* pointer to currently active assist request */ 320 int nxfers; 321 uint32_t tag_id; 322 enum { 323 TGT_STATE_NIL, 324 TGT_STATE_LOADING, 325 TGT_STATE_LOADED, 326 TGT_STATE_IN_CAM, 327 TGT_STATE_SETTING_UP_FOR_DATA, 328 TGT_STATE_MOVING_DATA, 329 TGT_STATE_MOVING_DATA_AND_STATUS, 330 TGT_STATE_SENDING_STATUS 331 } state; 332 } mpt_tgt_state_t; 333 334 /* 335 * When we get an incoming command it has its own tag which is called the 336 * IoIndex. This is the value we gave that particular command buffer when 337 * we originally assigned it. It's just a number, really. The FC card uses 338 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which 339 * contains pointers the request_t structures related to that IoIndex. 340 * 341 * What *we* do is construct a tag out of the index for the target command 342 * which owns the incoming ATIO plus a rolling sequence number. 343 */ 344 #define MPT_MAKE_TAGID(mpt, req, ioindex) \ 345 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff)) 346 347 #ifdef INVARIANTS 348 #define MPT_TAG_2_REQ(a, b) mpt_tag_2_req(a, (uint32_t) b) 349 #else 350 #define MPT_TAG_2_REQ(mpt, tag) mpt->tgt_cmd_ptrs[tag >> 18] 351 #endif 352 353 #define MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \ 354 (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)])) 355 356 STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr); 357 #define MPT_MAX_LUNS 256 358 typedef struct { 359 struct mpt_hdr_stailq atios; 360 struct mpt_hdr_stailq inots; 361 int enabled; 362 } tgt_resource_t; 363 #define MPT_MAX_ELS 64 364 365 /**************************** Handler Registration ****************************/ 366 /* 367 * Global table of registered reply handlers. The 368 * handler is indicated by byte 3 of the request 369 * index submitted to the IOC. This allows the 370 * driver core to perform generic processing without 371 * any knowledge of per-personality behavior. 372 * 373 * MPT_NUM_REPLY_HANDLERS must be a power of 2 374 * to allow the easy generation of a mask. 375 * 376 * The handler offsets used by the core are hard coded 377 * allowing faster code generation when assigning a handler 378 * to a request. All "personalities" must use the 379 * the handler registration mechanism. 380 * 381 * The IOC handlers that are rarely executed are placed 382 * at the tail of the table to make it more likely that 383 * all commonly executed handlers fit in a single cache 384 * line. 385 */ 386 #define MPT_NUM_REPLY_HANDLERS (32) 387 #define MPT_REPLY_HANDLER_EVENTS MPT_CBI_TO_HID(0) 388 #define MPT_REPLY_HANDLER_CONFIG MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1) 389 #define MPT_REPLY_HANDLER_HANDSHAKE MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2) 390 typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request, 391 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame); 392 typedef union { 393 mpt_reply_handler_t *reply_handler; 394 } mpt_handler_t; 395 396 typedef enum { 397 MPT_HANDLER_REPLY, 398 MPT_HANDLER_EVENT, 399 MPT_HANDLER_RESET, 400 MPT_HANDLER_SHUTDOWN 401 } mpt_handler_type; 402 403 struct mpt_handler_record 404 { 405 LIST_ENTRY(mpt_handler_record) links; 406 mpt_handler_t handler; 407 }; 408 409 LIST_HEAD(mpt_handler_list, mpt_handler_record); 410 411 /* 412 * The handler_id is currently unused but would contain the 413 * handler ID used in the MsgContext field to allow direction 414 * of replies to the handler. Registrations that don't require 415 * a handler id can pass in NULL for the handler_id. 416 * 417 * Deregistrations for handlers without a handler id should 418 * pass in MPT_HANDLER_ID_NONE. 419 */ 420 #define MPT_HANDLER_ID_NONE (0xFFFFFFFF) 421 int mpt_register_handler(struct mpt_softc *, mpt_handler_type, 422 mpt_handler_t, uint32_t *); 423 int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type, 424 mpt_handler_t, uint32_t); 425 426 /******************* Per-Controller Instance Data Structures ******************/ 427 TAILQ_HEAD(req_queue, req_entry); 428 429 /* Structure for saving proper values for modifyable PCI config registers */ 430 struct mpt_pci_cfg { 431 uint16_t Command; 432 uint16_t LatencyTimer_LineSize; 433 uint32_t IO_BAR; 434 uint32_t Mem0_BAR[2]; 435 uint32_t Mem1_BAR[2]; 436 uint32_t ROM_BAR; 437 uint8_t IntLine; 438 uint32_t PMCSR; 439 }; 440 441 typedef enum { 442 MPT_RVF_NONE = 0x0, 443 MPT_RVF_ACTIVE = 0x1, 444 MPT_RVF_ANNOUNCED = 0x2, 445 MPT_RVF_UP2DATE = 0x4, 446 MPT_RVF_REFERENCED = 0x8, 447 MPT_RVF_WCE_CHANGED = 0x10 448 } mpt_raid_volume_flags; 449 450 struct mpt_raid_volume { 451 CONFIG_PAGE_RAID_VOL_0 *config_page; 452 MPI_RAID_VOL_INDICATOR sync_progress; 453 mpt_raid_volume_flags flags; 454 u_int quiesced_disks; 455 }; 456 457 typedef enum { 458 MPT_RDF_NONE = 0x00, 459 MPT_RDF_ACTIVE = 0x01, 460 MPT_RDF_ANNOUNCED = 0x02, 461 MPT_RDF_UP2DATE = 0x04, 462 MPT_RDF_REFERENCED = 0x08, 463 MPT_RDF_QUIESCING = 0x10, 464 MPT_RDF_QUIESCED = 0x20 465 } mpt_raid_disk_flags; 466 467 struct mpt_raid_disk { 468 CONFIG_PAGE_RAID_PHYS_DISK_0 config_page; 469 struct mpt_raid_volume *volume; 470 u_int member_number; 471 u_int pass_thru_active; 472 mpt_raid_disk_flags flags; 473 }; 474 475 struct mpt_evtf_record { 476 MSG_EVENT_NOTIFY_REPLY reply; 477 uint32_t context; 478 LIST_ENTRY(mpt_evtf_record) links; 479 }; 480 481 LIST_HEAD(mpt_evtf_list, mpt_evtf_record); 482 483 struct mpt_softc { 484 device_t dev; 485 #if __FreeBSD_version < 500000 486 uint32_t mpt_islocked; 487 int mpt_splsaved; 488 #else 489 struct mtx mpt_lock; 490 int mpt_locksetup; 491 #endif 492 uint32_t mpt_pers_mask; 493 uint32_t : 8, 494 unit : 8, 495 : 1, 496 twildcard : 1, 497 tenabled : 1, 498 role : 2, /* none, ini, target, both */ 499 : 1, 500 raid_enabled : 1, 501 raid_mwce_set : 1, 502 getreqwaiter : 1, 503 shutdwn_raid : 1, 504 shutdwn_recovery: 1, 505 outofbeer : 1, 506 disabled : 1, 507 is_spi : 1, 508 is_sas : 1, 509 is_fc : 1; 510 511 u_int verbose; 512 513 /* 514 * IOC Facts 515 */ 516 uint16_t mpt_global_credits; 517 uint16_t request_frame_size; 518 uint8_t mpt_max_devices; 519 uint8_t mpt_max_buses; 520 uint8_t ioc_facts_flags; 521 uint8_t padding0; 522 523 /* 524 * Port Facts 525 * XXX - Add multi-port support!. 526 */ 527 uint16_t mpt_ini_id; 528 uint16_t mpt_port_type; 529 uint16_t mpt_proto_flags; 530 uint16_t mpt_max_tgtcmds; 531 532 /* 533 * Device Configuration Information 534 */ 535 union { 536 struct mpt_spi_cfg { 537 CONFIG_PAGE_SCSI_PORT_0 _port_page0; 538 CONFIG_PAGE_SCSI_PORT_1 _port_page1; 539 CONFIG_PAGE_SCSI_PORT_2 _port_page2; 540 CONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16]; 541 CONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16]; 542 uint16_t _tag_enable; 543 uint16_t _disc_enable; 544 } spi; 545 #define mpt_port_page0 cfg.spi._port_page0 546 #define mpt_port_page1 cfg.spi._port_page1 547 #define mpt_port_page2 cfg.spi._port_page2 548 #define mpt_dev_page0 cfg.spi._dev_page0 549 #define mpt_dev_page1 cfg.spi._dev_page1 550 #define mpt_tag_enable cfg.spi._tag_enable 551 #define mpt_disc_enable cfg.spi._disc_enable 552 struct mpi_fc_cfg { 553 CONFIG_PAGE_FC_PORT_0 _port_page0; 554 #define mpt_fcport_page0 cfg.fc._port_page0 555 } fc; 556 } cfg; 557 558 /* Controller Info for RAID information */ 559 CONFIG_PAGE_IOC_2 * ioc_page2; 560 CONFIG_PAGE_IOC_3 * ioc_page3; 561 562 /* Raid Data */ 563 struct mpt_raid_volume* raid_volumes; 564 struct mpt_raid_disk* raid_disks; 565 u_int raid_max_volumes; 566 u_int raid_max_disks; 567 u_int raid_page0_len; 568 u_int raid_wakeup; 569 u_int raid_rescan; 570 u_int raid_resync_rate; 571 u_int raid_mwce_setting; 572 u_int raid_queue_depth; 573 u_int raid_nonopt_volumes; 574 struct proc *raid_thread; 575 struct callout raid_timer; 576 577 /* 578 * PCI Hardware info 579 */ 580 struct resource * pci_irq; /* Interrupt map for chip */ 581 void * ih; /* Interupt handle */ 582 struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */ 583 584 /* 585 * DMA Mapping Stuff 586 */ 587 struct resource * pci_reg; /* Register map for chip */ 588 int pci_mem_rid; /* Resource ID */ 589 bus_space_tag_t pci_st; /* Bus tag for registers */ 590 bus_space_handle_t pci_sh; /* Bus handle for registers */ 591 /* PIO versions of above. */ 592 int pci_pio_rid; 593 struct resource * pci_pio_reg; 594 bus_space_tag_t pci_pio_st; 595 bus_space_handle_t pci_pio_sh; 596 597 bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */ 598 bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */ 599 bus_dmamap_t reply_dmap; /* DMA map for reply memory */ 600 uint8_t *reply; /* KVA of reply memory */ 601 bus_addr_t reply_phys; /* BusAddr of reply memory */ 602 603 bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */ 604 bus_dma_tag_t request_dmat; /* DMA tag for request memroy */ 605 bus_dmamap_t request_dmap; /* DMA map for request memroy */ 606 uint8_t *request; /* KVA of Request memory */ 607 bus_addr_t request_phys; /* BusAddr of request memory */ 608 609 uint32_t max_seg_cnt; /* calculated after IOC facts */ 610 611 /* 612 * Hardware management 613 */ 614 u_int reset_cnt; 615 616 /* 617 * CAM && Software Management 618 */ 619 request_t *request_pool; 620 struct req_queue request_free_list; 621 struct req_queue request_pending_list; 622 struct req_queue request_timeout_list; 623 624 625 struct cam_sim *sim; 626 struct cam_path *path; 627 628 struct cam_sim *phydisk_sim; 629 struct cam_path *phydisk_path; 630 631 struct proc *recovery_thread; 632 request_t *tmf_req; 633 634 /* 635 * Deferred frame acks due to resource shortage. 636 */ 637 struct mpt_evtf_list ack_frames; 638 639 /* 640 * Target Mode Support 641 */ 642 uint32_t scsi_tgt_handler_id; 643 request_t ** tgt_cmd_ptrs; 644 request_t ** els_cmd_ptrs; /* FC only */ 645 646 /* 647 * *snork*- this is chosen to be here *just in case* somebody 648 * forgets to point to it exactly and we index off of trt with 649 * CAM_LUN_WILDCARD. 650 */ 651 tgt_resource_t trt_wildcard; /* wildcard luns */ 652 tgt_resource_t trt[MPT_MAX_LUNS]; 653 uint16_t tgt_cmds_allocated; 654 uint16_t els_cmds_allocated; /* FC only */ 655 656 uint16_t timeouts; /* timeout count */ 657 uint16_t success; /* successes afer timeout */ 658 uint16_t sequence; /* Sequence Number */ 659 uint16_t pad3; 660 661 662 /* Paired port in some dual adapters configurations */ 663 struct mpt_softc * mpt2; 664 665 /* FW Image management */ 666 uint32_t fw_image_size; 667 uint8_t *fw_image; 668 bus_dma_tag_t fw_dmat; /* DMA tag for firmware image */ 669 bus_dmamap_t fw_dmap; /* DMA map for firmware image */ 670 bus_addr_t fw_phys; /* BusAddr of firmware image */ 671 672 /* Shutdown Event Handler. */ 673 eventhandler_tag eh; 674 675 TAILQ_ENTRY(mpt_softc) links; 676 }; 677 678 static __inline void mpt_assign_serno(struct mpt_softc *, request_t *); 679 680 static __inline void 681 mpt_assign_serno(struct mpt_softc *mpt, request_t *req) 682 { 683 if ((req->serno = mpt->sequence++) == 0) { 684 req->serno = mpt->sequence++; 685 } 686 } 687 688 /***************************** Locking Primitives *****************************/ 689 #if __FreeBSD_version < 500000 690 #define MPT_IFLAGS INTR_TYPE_CAM 691 #define MPT_LOCK(mpt) mpt_lockspl(mpt) 692 #define MPT_UNLOCK(mpt) mpt_unlockspl(mpt) 693 #define MPT_OWNED(mpt) mpt->mpt_islocked 694 #define MPTLOCK_2_CAMLOCK MPT_UNLOCK 695 #define CAMLOCK_2_MPTLOCK MPT_LOCK 696 #define MPT_LOCK_SETUP(mpt) 697 #define MPT_LOCK_DESTROY(mpt) 698 699 static __inline void mpt_lockspl(struct mpt_softc *mpt); 700 static __inline void mpt_unlockspl(struct mpt_softc *mpt); 701 702 static __inline void 703 mpt_lockspl(struct mpt_softc *mpt) 704 { 705 int s; 706 707 s = splcam(); 708 if (mpt->mpt_islocked++ == 0) { 709 mpt->mpt_splsaved = s; 710 } else { 711 splx(s); 712 panic("Recursed lock with mask: 0x%x\n", s); 713 } 714 } 715 716 static __inline void 717 mpt_unlockspl(struct mpt_softc *mpt) 718 { 719 if (mpt->mpt_islocked) { 720 if (--mpt->mpt_islocked == 0) { 721 splx(mpt->mpt_splsaved); 722 } 723 } else 724 panic("Negative lock count\n"); 725 } 726 727 static __inline int 728 mpt_sleep(struct mpt_softc *mpt, void *ident, int priority, 729 const char *wmesg, int timo) 730 { 731 int saved_cnt; 732 int saved_spl; 733 int error; 734 735 KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep")); 736 saved_cnt = mpt->mpt_islocked; 737 saved_spl = mpt->mpt_splsaved; 738 mpt->mpt_islocked = 0; 739 error = tsleep(ident, priority, wmesg, timo); 740 KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup")); 741 mpt->mpt_islocked = saved_cnt; 742 mpt->mpt_splsaved = saved_spl; 743 return (error); 744 } 745 746 #else 747 #ifdef LOCKING_WORKED_AS_IT_SHOULD 748 #error "Shouldn't Be Here!" 749 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE 750 #define MPT_LOCK_SETUP(mpt) \ 751 mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \ 752 mpt->mpt_locksetup = 1 753 #define MPT_LOCK_DESTROY(mpt) \ 754 if (mpt->mpt_locksetup) { \ 755 mtx_destroy(&mpt->mpt_lock); \ 756 mpt->mpt_locksetup = 0; \ 757 } 758 759 #define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock) 760 #define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock) 761 #define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock) 762 #define MPTLOCK_2_CAMLOCK(mpt) \ 763 mtx_unlock(&(mpt)->mpt_lock); mtx_lock(&Giant) 764 #define CAMLOCK_2_MPTLOCK(mpt) \ 765 mtx_unlock(&Giant); mtx_lock(&(mpt)->mpt_lock) 766 #define mpt_sleep(mpt, ident, priority, wmesg, timo) \ 767 msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo) 768 769 #else 770 771 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY 772 #define MPT_LOCK_SETUP(mpt) do { } while (0) 773 #define MPT_LOCK_DESTROY(mpt) do { } while (0) 774 #if 0 775 #define MPT_LOCK(mpt) \ 776 device_printf(mpt->dev, "LOCK %s:%d\n", __FILE__, __LINE__); \ 777 KASSERT(mpt->mpt_locksetup == 0, \ 778 ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \ 779 mpt->mpt_locksetup = 1 780 #define MPT_UNLOCK(mpt) \ 781 device_printf(mpt->dev, "UNLK %s:%d\n", __FILE__, __LINE__); \ 782 KASSERT(mpt->mpt_locksetup == 1, \ 783 ("release unowned lock at %s:%d", __FILE__, __LINE__)); \ 784 mpt->mpt_locksetup = 0 785 #else 786 #define MPT_LOCK(mpt) \ 787 KASSERT(mpt->mpt_locksetup == 0, \ 788 ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \ 789 mpt->mpt_locksetup = 1 790 #define MPT_UNLOCK(mpt) \ 791 KASSERT(mpt->mpt_locksetup == 1, \ 792 ("release unowned lock at %s:%d", __FILE__, __LINE__)); \ 793 mpt->mpt_locksetup = 0 794 #endif 795 #define MPT_OWNED(mpt) mpt->mpt_locksetup 796 #define MPTLOCK_2_CAMLOCK(mpt) MPT_UNLOCK(mpt) 797 #define CAMLOCK_2_MPTLOCK(mpt) MPT_LOCK(mpt) 798 799 static __inline int 800 mpt_sleep(struct mpt_softc *, void *, int, const char *, int); 801 802 static __inline int 803 mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t) 804 { 805 int r; 806 MPT_UNLOCK(mpt); 807 r = tsleep(i, p, w, t); 808 MPT_LOCK(mpt); 809 return (r); 810 } 811 #endif 812 #endif 813 814 /******************************* Register Access ******************************/ 815 static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t); 816 static __inline uint32_t mpt_read(struct mpt_softc *, int); 817 static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t); 818 static __inline uint32_t mpt_pio_read(struct mpt_softc *, int); 819 820 static __inline void 821 mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val) 822 { 823 bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val); 824 } 825 826 static __inline uint32_t 827 mpt_read(struct mpt_softc *mpt, int offset) 828 { 829 return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset)); 830 } 831 832 /* 833 * Some operations (e.g. diagnostic register writes while the ARM proccessor 834 * is disabled), must be performed using "PCI pio" operations. On non-PCI 835 * busses, these operations likely map to normal register accesses. 836 */ 837 static __inline void 838 mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val) 839 { 840 bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val); 841 } 842 843 static __inline uint32_t 844 mpt_pio_read(struct mpt_softc *mpt, int offset) 845 { 846 return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset)); 847 } 848 /*********************** Reply Frame/Request Management ***********************/ 849 /* Max MPT Reply we are willing to accept (must be power of 2) */ 850 #define MPT_REPLY_SIZE 256 851 852 /* 853 * Must be less than 16384 in order for target mode to work 854 */ 855 #define MPT_MAX_REQUESTS(mpt) 512 856 #define MPT_REQUEST_AREA 512 857 #define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */ 858 #define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA) 859 860 #define MPT_CONTEXT_CB_SHIFT (16) 861 #define MPT_CBI(handle) (handle >> MPT_CONTEXT_CB_SHIFT) 862 #define MPT_CBI_TO_HID(cbi) ((cbi) << MPT_CONTEXT_CB_SHIFT) 863 #define MPT_CONTEXT_TO_CBI(x) \ 864 (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1)) 865 #define MPT_CONTEXT_REQI_MASK 0xFFFF 866 #define MPT_CONTEXT_TO_REQI(x) ((x) & MPT_CONTEXT_REQI_MASK) 867 868 /* 869 * Convert a 32bit physical address returned from IOC to an 870 * offset into our reply frame memory or the kvm address needed 871 * to access the data. The returned address is only the low 872 * 32 bits, so mask our base physical address accordingly. 873 */ 874 #define MPT_REPLY_BADDR(x) \ 875 (x << 1) 876 #define MPT_REPLY_OTOV(m, i) \ 877 ((void *)(&m->reply[i])) 878 879 #define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \ 880 do { \ 881 if (mpt->verbose > MPT_PRT_DEBUG) \ 882 mpt_dump_reply_frame(mpt, reply_frame); \ 883 } while(0) 884 885 static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt); 886 static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr); 887 888 /* 889 * Give the reply buffer back to the IOC after we have 890 * finished processing it. 891 */ 892 static __inline void 893 mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr) 894 { 895 mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr); 896 } 897 898 /* Get a reply from the IOC */ 899 static __inline uint32_t 900 mpt_pop_reply_queue(struct mpt_softc *mpt) 901 { 902 return mpt_read(mpt, MPT_OFFSET_REPLY_Q); 903 } 904 905 void 906 mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int); 907 908 /************************** Scatter Gather Managment **************************/ 909 /* MPT_RQSL- size of request frame, in bytes */ 910 #define MPT_RQSL(mpt) (mpt->request_frame_size << 2) 911 912 /* MPT_NSGL- how many SG entries can fit in a request frame size */ 913 #define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION)) 914 915 /* MPT_NRFM- how many request frames can fit in each request alloc we make */ 916 #define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt)) 917 918 /* 919 * MPT_NSGL_FIRST- # of SG elements that can fit after 920 * an I/O request but still within the request frame. 921 * Do this safely based upon SGE_IO_UNION. 922 * 923 * Note that the first element is *within* the SCSI request. 924 */ 925 #define MPT_NSGL_FIRST(mpt) \ 926 ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \ 927 sizeof (SGE_IO_UNION)) 928 929 /***************************** IOC Initialization *****************************/ 930 int mpt_reset(struct mpt_softc *, int /*reinit*/); 931 932 /****************************** Debugging ************************************/ 933 typedef struct mpt_decode_entry { 934 char *name; 935 u_int value; 936 u_int mask; 937 } mpt_decode_entry_t; 938 939 int mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries, 940 const char *name, u_int value, u_int *cur_column, 941 u_int wrap_point); 942 943 void mpt_dump_request(struct mpt_softc *, request_t *); 944 945 enum { 946 MPT_PRT_ALWAYS, 947 MPT_PRT_FATAL, 948 MPT_PRT_ERROR, 949 MPT_PRT_WARN, 950 MPT_PRT_INFO, 951 MPT_PRT_NEGOTIATION, 952 MPT_PRT_DEBUG, 953 MPT_PRT_DEBUG1, 954 MPT_PRT_DEBUG2, 955 MPT_PRT_DEBUG3, 956 MPT_PRT_TRACE, 957 MPT_PRT_NONE=100 958 }; 959 960 #if __FreeBSD_version > 500000 961 #define mpt_lprt(mpt, level, ...) \ 962 do { \ 963 if (level <= (mpt)->verbose) \ 964 mpt_prt(mpt, __VA_ARGS__); \ 965 } while (0) 966 967 #define mpt_lprtc(mpt, level, ...) \ 968 do { \ 969 if (level <= (mpt)->debug_level) \ 970 mpt_prtc(mpt, __VA_ARGS__); \ 971 } while (0) 972 #else 973 void mpt_lprt(struct mpt_softc *, int, const char *, ...) 974 __printflike(3, 4); 975 void mpt_lprtc(struct mpt_softc *, int, const char *, ...) 976 __printflike(3, 4); 977 #endif 978 void mpt_prt(struct mpt_softc *, const char *, ...) 979 __printflike(2, 3); 980 void mpt_prtc(struct mpt_softc *, const char *, ...) 981 __printflike(2, 3); 982 983 /**************************** Target Mode Related ***************************/ 984 static __inline int mpt_cdblen(uint8_t, int); 985 static __inline int 986 mpt_cdblen(uint8_t cdb0, int maxlen) 987 { 988 int group = cdb0 >> 5; 989 switch (group) { 990 case 0: 991 return (6); 992 case 1: 993 return (10); 994 case 4: 995 case 5: 996 return (12); 997 default: 998 return (16); 999 } 1000 } 1001 #ifdef INVARIANTS 1002 static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t); 1003 static __inline request_t * 1004 mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag) 1005 { 1006 uint16_t rtg = (tag >> 18); 1007 KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d\n", tag)); 1008 KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array")); 1009 KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer")); 1010 return (mpt->tgt_cmd_ptrs[rtg]); 1011 } 1012 1013 1014 static __inline int 1015 mpt_req_on_free_list(struct mpt_softc *, request_t *); 1016 static __inline int 1017 mpt_req_on_pending_list(struct mpt_softc *, request_t *); 1018 1019 static __inline void 1020 mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int); 1021 static __inline void 1022 mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int); 1023 1024 1025 /* 1026 * Is request on freelist? 1027 */ 1028 static __inline int 1029 mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req) 1030 { 1031 request_t *lrq; 1032 1033 TAILQ_FOREACH(lrq, &mpt->request_free_list, links) { 1034 if (lrq == req) { 1035 return (1); 1036 } 1037 } 1038 return (0); 1039 } 1040 1041 /* 1042 * Is request on pending list? 1043 */ 1044 static __inline int 1045 mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req) 1046 { 1047 request_t *lrq; 1048 1049 TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) { 1050 if (lrq == req) { 1051 return (1); 1052 } 1053 } 1054 return (0); 1055 } 1056 1057 /* 1058 * Make sure that req *is* part of one of the special lists 1059 */ 1060 static __inline void 1061 mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) 1062 { 1063 int i; 1064 for (i = 0; i < mpt->els_cmds_allocated; i++) { 1065 if (req == mpt->els_cmd_ptrs[i]) { 1066 return; 1067 } 1068 } 1069 for (i = 0; i < mpt->tgt_cmds_allocated; i++) { 1070 if (req == mpt->tgt_cmd_ptrs[i]) { 1071 return; 1072 } 1073 } 1074 panic("%s(%d): req %p:%u function %x not in els or tgt ptrs\n", 1075 s, line, req, req->serno, 1076 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function); 1077 } 1078 1079 /* 1080 * Make sure that req is *not* part of one of the special lists. 1081 */ 1082 static __inline void 1083 mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) 1084 { 1085 int i; 1086 for (i = 0; i < mpt->els_cmds_allocated; i++) { 1087 KASSERT(req != mpt->els_cmd_ptrs[i], 1088 ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d\n", 1089 s, line, req, req->serno, 1090 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); 1091 } 1092 for (i = 0; i < mpt->tgt_cmds_allocated; i++) { 1093 KASSERT(req != mpt->tgt_cmd_ptrs[i], 1094 ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d\n", 1095 s, line, req, req->serno, 1096 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); 1097 } 1098 } 1099 #endif 1100 1101 typedef enum { 1102 MPT_ABORT_TASK_SET=1234, 1103 MPT_CLEAR_TASK_SET, 1104 MPT_TARGET_RESET, 1105 MPT_CLEAR_ACA, 1106 MPT_TERMINATE_TASK, 1107 MPT_NIL_TMT_VALUE=5678 1108 } mpt_task_mgmt_t; 1109 1110 /**************************** Unclassified Routines ***************************/ 1111 void mpt_send_cmd(struct mpt_softc *mpt, request_t *req); 1112 int mpt_recv_handshake_reply(struct mpt_softc *mpt, 1113 size_t reply_len, void *reply); 1114 int mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1115 mpt_req_state_t state, mpt_req_state_t mask, 1116 int sleep_ok, int time_ms); 1117 void mpt_enable_ints(struct mpt_softc *mpt); 1118 void mpt_disable_ints(struct mpt_softc *mpt); 1119 int mpt_attach(struct mpt_softc *mpt); 1120 int mpt_shutdown(struct mpt_softc *mpt); 1121 int mpt_detach(struct mpt_softc *mpt); 1122 int mpt_send_handshake_cmd(struct mpt_softc *mpt, 1123 size_t len, void *cmd); 1124 request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok); 1125 void mpt_free_request(struct mpt_softc *mpt, request_t *req); 1126 void mpt_intr(void *arg); 1127 void mpt_check_doorbell(struct mpt_softc *mpt); 1128 void mpt_dump_reply_frame(struct mpt_softc *mpt, 1129 MSG_DEFAULT_REPLY *reply_frame); 1130 1131 void mpt_set_config_regs(struct mpt_softc *); 1132 int mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/, 1133 u_int /*Action*/, u_int /*PageVersion*/, 1134 u_int /*PageLength*/, u_int /*PageNumber*/, 1135 u_int /*PageType*/, uint32_t /*PageAddress*/, 1136 bus_addr_t /*addr*/, bus_size_t/*len*/, 1137 int /*sleep_ok*/, int /*timeout_ms*/); 1138 int mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/, 1139 int /*PageNumber*/, 1140 uint32_t /*PageAddress*/, 1141 CONFIG_PAGE_HEADER *, 1142 int /*sleep_ok*/, int /*timeout_ms*/); 1143 int mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/, 1144 uint32_t /*PageAddress*/, 1145 CONFIG_PAGE_HEADER *, size_t /*len*/, 1146 int /*sleep_ok*/, int /*timeout_ms*/); 1147 int mpt_write_cfg_page(struct mpt_softc *, int /*Action*/, 1148 uint32_t /*PageAddress*/, 1149 CONFIG_PAGE_HEADER *, size_t /*len*/, 1150 int /*sleep_ok*/, int /*timeout_ms*/); 1151 static __inline int 1152 mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, 1153 CONFIG_PAGE_HEADER *hdr, size_t len, 1154 int sleep_ok, int timeout_ms) 1155 { 1156 return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 1157 PageAddress, hdr, len, sleep_ok, timeout_ms)); 1158 } 1159 1160 static __inline int 1161 mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, 1162 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1163 int timeout_ms) 1164 { 1165 return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 1166 PageAddress, hdr, len, sleep_ok, 1167 timeout_ms)); 1168 } 1169 1170 /* mpt_debug.c functions */ 1171 void mpt_print_reply(void *vmsg); 1172 void mpt_print_db(uint32_t mb); 1173 void mpt_print_config_reply(void *vmsg); 1174 char *mpt_ioc_diag(uint32_t diag); 1175 void mpt_req_state(mpt_req_state_t state); 1176 void mpt_print_config_request(void *vmsg); 1177 void mpt_print_request(void *vmsg); 1178 void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg); 1179 void mpt_dump_sgl(SGE_IO_UNION *se, int offset); 1180 #endif /* _MPT_H_ */ 1181