1 /* $FreeBSD$ */ 2 /*- 3 * Generic defines for LSI '909 FC adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 2000, 2001 by Greg Ansley 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 /*- 30 * Copyright (c) 2002, 2006 by Matthew Jacob 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions are 35 * met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 39 * substantially similar to the "NO WARRANTY" disclaimer below 40 * ("Disclaimer") and any redistribution must be conditioned upon including 41 * a substantially similar Disclaimer requirement for further binary 42 * redistribution. 43 * 3. Neither the names of the above listed copyright holders nor the names 44 * of any contributors may be used to endorse or promote products derived 45 * from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 51 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 57 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * 59 * Support from Chris Ellsworth in order to make SAS adapters work 60 * is gratefully acknowledged. 61 * 62 * 63 * Support from LSI-Logic has also gone a great deal toward making this a 64 * workable subsystem and is gratefully acknowledged. 65 */ 66 /* 67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 68 * Copyright (c) 2004, 2005 Justin T. Gibbs 69 * Copyright (c) 2005, WHEEL Sp. z o.o. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions are 74 * met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 78 * substantially similar to the "NO WARRANTY" disclaimer below 79 * ("Disclaimer") and any redistribution must be conditioned upon including 80 * a substantially similar Disclaimer requirement for further binary 81 * redistribution. 82 * 3. Neither the names of the above listed copyright holders nor the names 83 * of any contributors may be used to endorse or promote products derived 84 * from this software without specific prior written permission. 85 * 86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 97 */ 98 99 #ifndef _MPT_H_ 100 #define _MPT_H_ 101 102 /********************************* OS Includes ********************************/ 103 #include <sys/types.h> 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/endian.h> 107 #include <sys/eventhandler.h> 108 #if __FreeBSD_version < 500000 109 #include <sys/kernel.h> 110 #include <sys/queue.h> 111 #include <sys/malloc.h> 112 #else 113 #include <sys/lock.h> 114 #include <sys/kernel.h> 115 #include <sys/queue.h> 116 #include <sys/malloc.h> 117 #include <sys/mutex.h> 118 #include <sys/condvar.h> 119 #endif 120 #include <sys/proc.h> 121 #include <sys/bus.h> 122 #include <sys/module.h> 123 124 #include <machine/cpu.h> 125 #include <machine/resource.h> 126 127 #if __FreeBSD_version < 500000 128 #include <machine/bus.h> 129 #include <machine/clock.h> 130 #endif 131 132 #include <sys/rman.h> 133 134 #if __FreeBSD_version < 500000 135 #include <pci/pcireg.h> 136 #include <pci/pcivar.h> 137 #else 138 #include <dev/pci/pcireg.h> 139 #include <dev/pci/pcivar.h> 140 #endif 141 142 #include <machine/bus.h> 143 #include "opt_ddb.h" 144 145 /**************************** Register Definitions ****************************/ 146 #include <dev/mpt/mpt_reg.h> 147 148 /******************************* MPI Definitions ******************************/ 149 #include <dev/mpt/mpilib/mpi_type.h> 150 #include <dev/mpt/mpilib/mpi.h> 151 #include <dev/mpt/mpilib/mpi_cnfg.h> 152 #include <dev/mpt/mpilib/mpi_ioc.h> 153 #include <dev/mpt/mpilib/mpi_raid.h> 154 155 /* XXX For mpt_debug.c */ 156 #include <dev/mpt/mpilib/mpi_init.h> 157 158 #define MPT_S64_2_SCALAR(y) ((((int64_t)y.High) << 32) | (y.Low)) 159 #define MPT_U64_2_SCALAR(y) ((((uint64_t)y.High) << 32) | (y.Low)) 160 161 /****************************** Misc Definitions ******************************/ 162 /* #define MPT_TEST_MULTIPATH 1 */ 163 #define MPT_OK (0) 164 #define MPT_FAIL (0x10000) 165 166 #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) 167 168 #define MPT_ROLE_NONE 0 169 #define MPT_ROLE_INITIATOR 1 170 #define MPT_ROLE_TARGET 2 171 #define MPT_ROLE_BOTH 3 172 #define MPT_ROLE_DEFAULT MPT_ROLE_INITIATOR 173 174 /**************************** Forward Declarations ****************************/ 175 struct mpt_softc; 176 struct mpt_personality; 177 typedef struct req_entry request_t; 178 179 /************************* Personality Module Support *************************/ 180 typedef int mpt_load_handler_t(struct mpt_personality *); 181 typedef int mpt_probe_handler_t(struct mpt_softc *); 182 typedef int mpt_attach_handler_t(struct mpt_softc *); 183 typedef int mpt_enable_handler_t(struct mpt_softc *); 184 typedef void mpt_ready_handler_t(struct mpt_softc *); 185 typedef int mpt_event_handler_t(struct mpt_softc *, request_t *, 186 MSG_EVENT_NOTIFY_REPLY *); 187 typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/); 188 /* XXX Add return value and use for veto? */ 189 typedef void mpt_shutdown_handler_t(struct mpt_softc *); 190 typedef void mpt_detach_handler_t(struct mpt_softc *); 191 typedef int mpt_unload_handler_t(struct mpt_personality *); 192 193 struct mpt_personality 194 { 195 const char *name; 196 uint32_t id; /* Assigned identifier. */ 197 u_int use_count; /* Instances using personality*/ 198 mpt_load_handler_t *load; /* configure personailty */ 199 #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load) 200 mpt_probe_handler_t *probe; /* configure personailty */ 201 mpt_attach_handler_t *attach; /* initialize device instance */ 202 mpt_enable_handler_t *enable; /* enable device */ 203 mpt_ready_handler_t *ready; /* final open for business */ 204 mpt_event_handler_t *event; /* Handle MPI event. */ 205 mpt_reset_handler_t *reset; /* Re-init after reset. */ 206 mpt_shutdown_handler_t *shutdown; /* Shutdown instance. */ 207 mpt_detach_handler_t *detach; /* release device instance */ 208 mpt_unload_handler_t *unload; /* Shutdown personality */ 209 #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload) 210 }; 211 212 int mpt_modevent(module_t, int, void *); 213 214 /* Maximum supported number of personalities. */ 215 #define MPT_MAX_PERSONALITIES (15) 216 217 #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \ 218 MODULE_DEPEND(name, dep, vmin, vpref, vmax) 219 220 #define DECLARE_MPT_PERSONALITY(name, order) \ 221 static moduledata_t name##_mod = { \ 222 #name, mpt_modevent, &name##_personality \ 223 }; \ 224 DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order); \ 225 MODULE_VERSION(name, 1); \ 226 MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1) 227 228 /******************************* Bus DMA Support ******************************/ 229 /* XXX Need to update bus_dmamap_sync to take a range argument. */ 230 #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op) \ 231 bus_dmamap_sync(dma_tag, dmamap, op) 232 233 #if __FreeBSD_version >= 501102 234 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ 235 lowaddr, highaddr, filter, filterarg, \ 236 maxsize, nsegments, maxsegsz, flags, \ 237 dma_tagp) \ 238 bus_dma_tag_create(parent_tag, alignment, boundary, \ 239 lowaddr, highaddr, filter, filterarg, \ 240 maxsize, nsegments, maxsegsz, flags, \ 241 busdma_lock_mutex, &Giant, \ 242 dma_tagp) 243 #else 244 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ 245 lowaddr, highaddr, filter, filterarg, \ 246 maxsize, nsegments, maxsegsz, flags, \ 247 dma_tagp) \ 248 bus_dma_tag_create(parent_tag, alignment, boundary, \ 249 lowaddr, highaddr, filter, filterarg, \ 250 maxsize, nsegments, maxsegsz, flags, \ 251 dma_tagp) 252 #endif 253 254 struct mpt_map_info { 255 struct mpt_softc *mpt; 256 int error; 257 uint32_t phys; 258 }; 259 260 void mpt_map_rquest(void *, bus_dma_segment_t *, int, int); 261 262 /**************************** Kernel Thread Support ***************************/ 263 #if __FreeBSD_version > 500005 264 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ 265 kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) 266 #else 267 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ 268 kthread_create(func, farg, proc_ptr, fmtstr, arg) 269 #endif 270 271 /****************************** Timer Facilities ******************************/ 272 #if __FreeBSD_version > 500000 273 #define mpt_callout_init(c) callout_init(c, /*mpsafe*/0); 274 #else 275 #define mpt_callout_init(c) callout_init(c); 276 #endif 277 278 /********************************** Endianess *********************************/ 279 #define MPT_2_HOST64(ptr, tag) ptr->tag = le64toh(ptr->tag) 280 #define MPT_2_HOST32(ptr, tag) ptr->tag = le32toh(ptr->tag) 281 #define MPT_2_HOST16(ptr, tag) ptr->tag = le16toh(ptr->tag) 282 283 #define HOST_2_MPT64(ptr, tag) ptr->tag = htole64(ptr->tag) 284 #define HOST_2_MPT32(ptr, tag) ptr->tag = htole32(ptr->tag) 285 #define HOST_2_MPT16(ptr, tag) ptr->tag = htole16(ptr->tag) 286 287 #if _BYTE_ORDER == _BIG_ENDIAN 288 void mpt2host_sge_simple_union(SGE_SIMPLE_UNION *); 289 void mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *); 290 void mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *); 291 void mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *); 292 void mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *); 293 void mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *); 294 #else 295 #define mpt2host_sge_simple_union(x) do { ; } while (0) 296 #define mpt2host_iocfacts_reply(x) do { ; } while (0) 297 #define mpt2host_portfacts_reply(x) do { ; } while (0) 298 #define mpt2host_config_page_ioc2(x) do { ; } while (0) 299 #define mpt2host_config_page_raid_vol_0(x) do { ; } while (0) 300 #define mpt2host_mpi_raid_vol_indicator(x) do { ; } while (0) 301 #endif 302 303 /**************************** MPI Transaction State ***************************/ 304 typedef enum { 305 REQ_STATE_NIL = 0x00, 306 REQ_STATE_FREE = 0x01, 307 REQ_STATE_ALLOCATED = 0x02, 308 REQ_STATE_QUEUED = 0x04, 309 REQ_STATE_DONE = 0x08, 310 REQ_STATE_TIMEDOUT = 0x10, 311 REQ_STATE_NEED_WAKEUP = 0x20, 312 REQ_STATE_LOCKED = 0x80, /* can't be freed */ 313 REQ_STATE_MASK = 0xFF 314 } mpt_req_state_t; 315 316 struct req_entry { 317 TAILQ_ENTRY(req_entry) links; /* Pointer to next in list */ 318 mpt_req_state_t state; /* Request State Information */ 319 uint16_t index; /* Index of this entry */ 320 uint16_t IOCStatus; /* Completion status */ 321 uint16_t ResponseCode; /* TMF Reponse Code */ 322 uint16_t serno; /* serial number */ 323 union ccb *ccb; /* CAM request */ 324 void *req_vbuf; /* Virtual Address of Entry */ 325 void *sense_vbuf; /* Virtual Address of sense data */ 326 bus_addr_t req_pbuf; /* Physical Address of Entry */ 327 bus_addr_t sense_pbuf; /* Physical Address of sense data */ 328 bus_dmamap_t dmap; /* DMA map for data buffers */ 329 struct req_entry *chain; /* for SGE overallocations */ 330 }; 331 332 /**************************** MPI Target State Info ***************************/ 333 334 typedef struct { 335 uint32_t reply_desc; /* current reply descriptor */ 336 uint32_t resid; /* current data residual */ 337 uint32_t bytes_xfered; /* current relative offset */ 338 union ccb *ccb; /* pointer to currently active ccb */ 339 request_t *req; /* pointer to currently active assist request */ 340 uint32_t 341 is_local : 1, 342 nxfers : 31; 343 uint32_t tag_id; 344 enum { 345 TGT_STATE_NIL, 346 TGT_STATE_LOADING, 347 TGT_STATE_LOADED, 348 TGT_STATE_IN_CAM, 349 TGT_STATE_SETTING_UP_FOR_DATA, 350 TGT_STATE_MOVING_DATA, 351 TGT_STATE_MOVING_DATA_AND_STATUS, 352 TGT_STATE_SENDING_STATUS 353 } state; 354 } mpt_tgt_state_t; 355 356 /* 357 * When we get an incoming command it has its own tag which is called the 358 * IoIndex. This is the value we gave that particular command buffer when 359 * we originally assigned it. It's just a number, really. The FC card uses 360 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which 361 * contains pointers the request_t structures related to that IoIndex. 362 * 363 * What *we* do is construct a tag out of the index for the target command 364 * which owns the incoming ATIO plus a rolling sequence number. 365 */ 366 #define MPT_MAKE_TAGID(mpt, req, ioindex) \ 367 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff)) 368 369 #ifdef INVARIANTS 370 #define MPT_TAG_2_REQ(a, b) mpt_tag_2_req(a, (uint32_t) b) 371 #else 372 #define MPT_TAG_2_REQ(mpt, tag) mpt->tgt_cmd_ptrs[tag >> 18] 373 #endif 374 375 #define MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \ 376 (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)])) 377 378 STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr); 379 #define MPT_MAX_LUNS 256 380 typedef struct { 381 struct mpt_hdr_stailq atios; 382 struct mpt_hdr_stailq inots; 383 int enabled; 384 } tgt_resource_t; 385 #define MPT_MAX_ELS 64 386 387 /**************************** Handler Registration ****************************/ 388 /* 389 * Global table of registered reply handlers. The 390 * handler is indicated by byte 3 of the request 391 * index submitted to the IOC. This allows the 392 * driver core to perform generic processing without 393 * any knowledge of per-personality behavior. 394 * 395 * MPT_NUM_REPLY_HANDLERS must be a power of 2 396 * to allow the easy generation of a mask. 397 * 398 * The handler offsets used by the core are hard coded 399 * allowing faster code generation when assigning a handler 400 * to a request. All "personalities" must use the 401 * the handler registration mechanism. 402 * 403 * The IOC handlers that are rarely executed are placed 404 * at the tail of the table to make it more likely that 405 * all commonly executed handlers fit in a single cache 406 * line. 407 */ 408 #define MPT_NUM_REPLY_HANDLERS (32) 409 #define MPT_REPLY_HANDLER_EVENTS MPT_CBI_TO_HID(0) 410 #define MPT_REPLY_HANDLER_CONFIG MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1) 411 #define MPT_REPLY_HANDLER_HANDSHAKE MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2) 412 typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request, 413 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame); 414 typedef union { 415 mpt_reply_handler_t *reply_handler; 416 } mpt_handler_t; 417 418 typedef enum { 419 MPT_HANDLER_REPLY, 420 MPT_HANDLER_EVENT, 421 MPT_HANDLER_RESET, 422 MPT_HANDLER_SHUTDOWN 423 } mpt_handler_type; 424 425 struct mpt_handler_record 426 { 427 LIST_ENTRY(mpt_handler_record) links; 428 mpt_handler_t handler; 429 }; 430 431 LIST_HEAD(mpt_handler_list, mpt_handler_record); 432 433 /* 434 * The handler_id is currently unused but would contain the 435 * handler ID used in the MsgContext field to allow direction 436 * of replies to the handler. Registrations that don't require 437 * a handler id can pass in NULL for the handler_id. 438 * 439 * Deregistrations for handlers without a handler id should 440 * pass in MPT_HANDLER_ID_NONE. 441 */ 442 #define MPT_HANDLER_ID_NONE (0xFFFFFFFF) 443 int mpt_register_handler(struct mpt_softc *, mpt_handler_type, 444 mpt_handler_t, uint32_t *); 445 int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type, 446 mpt_handler_t, uint32_t); 447 448 /******************* Per-Controller Instance Data Structures ******************/ 449 TAILQ_HEAD(req_queue, req_entry); 450 451 /* Structure for saving proper values for modifyable PCI config registers */ 452 struct mpt_pci_cfg { 453 uint16_t Command; 454 uint16_t LatencyTimer_LineSize; 455 uint32_t IO_BAR; 456 uint32_t Mem0_BAR[2]; 457 uint32_t Mem1_BAR[2]; 458 uint32_t ROM_BAR; 459 uint8_t IntLine; 460 uint32_t PMCSR; 461 }; 462 463 typedef enum { 464 MPT_RVF_NONE = 0x0, 465 MPT_RVF_ACTIVE = 0x1, 466 MPT_RVF_ANNOUNCED = 0x2, 467 MPT_RVF_UP2DATE = 0x4, 468 MPT_RVF_REFERENCED = 0x8, 469 MPT_RVF_WCE_CHANGED = 0x10 470 } mpt_raid_volume_flags; 471 472 struct mpt_raid_volume { 473 CONFIG_PAGE_RAID_VOL_0 *config_page; 474 MPI_RAID_VOL_INDICATOR sync_progress; 475 mpt_raid_volume_flags flags; 476 u_int quiesced_disks; 477 }; 478 479 typedef enum { 480 MPT_RDF_NONE = 0x00, 481 MPT_RDF_ACTIVE = 0x01, 482 MPT_RDF_ANNOUNCED = 0x02, 483 MPT_RDF_UP2DATE = 0x04, 484 MPT_RDF_REFERENCED = 0x08, 485 MPT_RDF_QUIESCING = 0x10, 486 MPT_RDF_QUIESCED = 0x20 487 } mpt_raid_disk_flags; 488 489 struct mpt_raid_disk { 490 CONFIG_PAGE_RAID_PHYS_DISK_0 config_page; 491 struct mpt_raid_volume *volume; 492 u_int member_number; 493 u_int pass_thru_active; 494 mpt_raid_disk_flags flags; 495 }; 496 497 struct mpt_evtf_record { 498 MSG_EVENT_NOTIFY_REPLY reply; 499 uint32_t context; 500 LIST_ENTRY(mpt_evtf_record) links; 501 }; 502 503 LIST_HEAD(mpt_evtf_list, mpt_evtf_record); 504 505 struct mpt_softc { 506 device_t dev; 507 #if __FreeBSD_version < 500000 508 uint32_t mpt_islocked; 509 int mpt_splsaved; 510 #else 511 struct mtx mpt_lock; 512 int mpt_locksetup; 513 #endif 514 uint32_t mpt_pers_mask; 515 uint32_t 516 : 8, 517 unit : 8, 518 ready : 1, 519 fw_uploaded : 1, 520 msi_enable : 1, 521 twildcard : 1, 522 tenabled : 1, 523 do_cfg_role : 1, 524 raid_enabled : 1, 525 raid_mwce_set : 1, 526 getreqwaiter : 1, 527 shutdwn_raid : 1, 528 shutdwn_recovery: 1, 529 outofbeer : 1, 530 disabled : 1, 531 is_spi : 1, 532 is_sas : 1, 533 is_fc : 1; 534 535 u_int cfg_role; 536 u_int role; /* role: none, ini, target, both */ 537 538 u_int verbose; 539 #ifdef MPT_TEST_MULTIPATH 540 int failure_id; 541 #endif 542 543 /* 544 * IOC Facts 545 */ 546 MSG_IOC_FACTS_REPLY ioc_facts; 547 548 /* 549 * Port Facts 550 */ 551 MSG_PORT_FACTS_REPLY * port_facts; 552 #define mpt_ini_id port_facts[0].PortSCSIID 553 #define mpt_max_tgtcmds port_facts[0].MaxPostedCmdBuffers 554 555 /* 556 * Device Configuration Information 557 */ 558 union { 559 struct mpt_spi_cfg { 560 CONFIG_PAGE_SCSI_PORT_0 _port_page0; 561 CONFIG_PAGE_SCSI_PORT_1 _port_page1; 562 CONFIG_PAGE_SCSI_PORT_2 _port_page2; 563 CONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16]; 564 CONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16]; 565 uint16_t _tag_enable; 566 uint16_t _disc_enable; 567 } spi; 568 #define mpt_port_page0 cfg.spi._port_page0 569 #define mpt_port_page1 cfg.spi._port_page1 570 #define mpt_port_page2 cfg.spi._port_page2 571 #define mpt_dev_page0 cfg.spi._dev_page0 572 #define mpt_dev_page1 cfg.spi._dev_page1 573 #define mpt_tag_enable cfg.spi._tag_enable 574 #define mpt_disc_enable cfg.spi._disc_enable 575 struct mpi_fc_cfg { 576 CONFIG_PAGE_FC_PORT_0 _port_page0; 577 uint32_t _port_speed; 578 #define mpt_fcport_page0 cfg.fc._port_page0 579 #define mpt_fcport_speed cfg.fc._port_speed 580 } fc; 581 } cfg; 582 #if __FreeBSD_version >= 500000 583 /* 584 * Device config information stored up for sysctl to access 585 */ 586 union { 587 struct { 588 unsigned int initiator_id; 589 } spi; 590 struct { 591 char wwnn[19]; 592 char wwpn[19]; 593 } fc; 594 } scinfo; 595 #endif 596 597 /* Controller Info for RAID information */ 598 CONFIG_PAGE_IOC_2 * ioc_page2; 599 CONFIG_PAGE_IOC_3 * ioc_page3; 600 601 /* Raid Data */ 602 struct mpt_raid_volume* raid_volumes; 603 struct mpt_raid_disk* raid_disks; 604 u_int raid_max_volumes; 605 u_int raid_max_disks; 606 u_int raid_page0_len; 607 u_int raid_wakeup; 608 u_int raid_rescan; 609 u_int raid_resync_rate; 610 u_int raid_mwce_setting; 611 u_int raid_queue_depth; 612 u_int raid_nonopt_volumes; 613 struct proc *raid_thread; 614 struct callout raid_timer; 615 616 /* 617 * PCI Hardware info 618 */ 619 int pci_msi_count; 620 struct resource * pci_irq; /* Interrupt map for chip */ 621 void * ih; /* Interupt handle */ 622 struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */ 623 624 /* 625 * DMA Mapping Stuff 626 */ 627 struct resource * pci_reg; /* Register map for chip */ 628 int pci_mem_rid; /* Resource ID */ 629 bus_space_tag_t pci_st; /* Bus tag for registers */ 630 bus_space_handle_t pci_sh; /* Bus handle for registers */ 631 /* PIO versions of above. */ 632 int pci_pio_rid; 633 struct resource * pci_pio_reg; 634 bus_space_tag_t pci_pio_st; 635 bus_space_handle_t pci_pio_sh; 636 637 bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */ 638 bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */ 639 bus_dmamap_t reply_dmap; /* DMA map for reply memory */ 640 uint8_t *reply; /* KVA of reply memory */ 641 bus_addr_t reply_phys; /* BusAddr of reply memory */ 642 643 bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */ 644 bus_dma_tag_t request_dmat; /* DMA tag for request memroy */ 645 bus_dmamap_t request_dmap; /* DMA map for request memroy */ 646 uint8_t *request; /* KVA of Request memory */ 647 bus_addr_t request_phys; /* BusAddr of request memory */ 648 649 uint32_t max_seg_cnt; /* calculated after IOC facts */ 650 651 /* 652 * Hardware management 653 */ 654 u_int reset_cnt; 655 656 /* 657 * CAM && Software Management 658 */ 659 request_t *request_pool; 660 struct req_queue request_free_list; 661 struct req_queue request_pending_list; 662 struct req_queue request_timeout_list; 663 664 665 struct cam_sim *sim; 666 struct cam_path *path; 667 668 struct cam_sim *phydisk_sim; 669 struct cam_path *phydisk_path; 670 671 struct proc *recovery_thread; 672 request_t *tmf_req; 673 674 /* 675 * Deferred frame acks due to resource shortage. 676 */ 677 struct mpt_evtf_list ack_frames; 678 679 /* 680 * Target Mode Support 681 */ 682 uint32_t scsi_tgt_handler_id; 683 request_t ** tgt_cmd_ptrs; 684 request_t ** els_cmd_ptrs; /* FC only */ 685 686 /* 687 * *snork*- this is chosen to be here *just in case* somebody 688 * forgets to point to it exactly and we index off of trt with 689 * CAM_LUN_WILDCARD. 690 */ 691 tgt_resource_t trt_wildcard; /* wildcard luns */ 692 tgt_resource_t trt[MPT_MAX_LUNS]; 693 uint16_t tgt_cmds_allocated; 694 uint16_t els_cmds_allocated; /* FC only */ 695 696 uint16_t timeouts; /* timeout count */ 697 uint16_t success; /* successes afer timeout */ 698 uint16_t sequence; /* Sequence Number */ 699 uint16_t pad3; 700 701 702 /* Paired port in some dual adapters configurations */ 703 struct mpt_softc * mpt2; 704 705 /* FW Image management */ 706 uint32_t fw_image_size; 707 uint8_t *fw_image; 708 bus_dma_tag_t fw_dmat; /* DMA tag for firmware image */ 709 bus_dmamap_t fw_dmap; /* DMA map for firmware image */ 710 bus_addr_t fw_phys; /* BusAddr of firmware image */ 711 712 /* Shutdown Event Handler. */ 713 eventhandler_tag eh; 714 715 TAILQ_ENTRY(mpt_softc) links; 716 }; 717 718 static __inline void mpt_assign_serno(struct mpt_softc *, request_t *); 719 720 static __inline void 721 mpt_assign_serno(struct mpt_softc *mpt, request_t *req) 722 { 723 if ((req->serno = mpt->sequence++) == 0) { 724 req->serno = mpt->sequence++; 725 } 726 } 727 728 /***************************** Locking Primitives *****************************/ 729 #if __FreeBSD_version < 500000 730 #define MPT_IFLAGS INTR_TYPE_CAM 731 #define MPT_LOCK(mpt) mpt_lockspl(mpt) 732 #define MPT_UNLOCK(mpt) mpt_unlockspl(mpt) 733 #define MPT_OWNED(mpt) mpt->mpt_islocked 734 #define MPTLOCK_2_CAMLOCK MPT_UNLOCK 735 #define CAMLOCK_2_MPTLOCK MPT_LOCK 736 #define MPT_LOCK_SETUP(mpt) 737 #define MPT_LOCK_DESTROY(mpt) 738 739 static __inline void mpt_lockspl(struct mpt_softc *mpt); 740 static __inline void mpt_unlockspl(struct mpt_softc *mpt); 741 742 static __inline void 743 mpt_lockspl(struct mpt_softc *mpt) 744 { 745 int s; 746 747 s = splcam(); 748 if (mpt->mpt_islocked++ == 0) { 749 mpt->mpt_splsaved = s; 750 } else { 751 splx(s); 752 panic("Recursed lock with mask: 0x%x\n", s); 753 } 754 } 755 756 static __inline void 757 mpt_unlockspl(struct mpt_softc *mpt) 758 { 759 if (mpt->mpt_islocked) { 760 if (--mpt->mpt_islocked == 0) { 761 splx(mpt->mpt_splsaved); 762 } 763 } else 764 panic("Negative lock count\n"); 765 } 766 767 static __inline int 768 mpt_sleep(struct mpt_softc *mpt, void *ident, int priority, 769 const char *wmesg, int timo) 770 { 771 int saved_cnt; 772 int saved_spl; 773 int error; 774 775 KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep")); 776 saved_cnt = mpt->mpt_islocked; 777 saved_spl = mpt->mpt_splsaved; 778 mpt->mpt_islocked = 0; 779 error = tsleep(ident, priority, wmesg, timo); 780 KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup")); 781 mpt->mpt_islocked = saved_cnt; 782 mpt->mpt_splsaved = saved_spl; 783 return (error); 784 } 785 786 #else 787 #ifdef LOCKING_WORKED_AS_IT_SHOULD 788 #error "Shouldn't Be Here!" 789 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE 790 #define MPT_LOCK_SETUP(mpt) \ 791 mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \ 792 mpt->mpt_locksetup = 1 793 #define MPT_LOCK_DESTROY(mpt) \ 794 if (mpt->mpt_locksetup) { \ 795 mtx_destroy(&mpt->mpt_lock); \ 796 mpt->mpt_locksetup = 0; \ 797 } 798 799 #define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock) 800 #define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock) 801 #define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock) 802 #define MPTLOCK_2_CAMLOCK(mpt) \ 803 mtx_unlock(&(mpt)->mpt_lock); mtx_lock(&Giant) 804 #define CAMLOCK_2_MPTLOCK(mpt) \ 805 mtx_unlock(&Giant); mtx_lock(&(mpt)->mpt_lock) 806 #define mpt_sleep(mpt, ident, priority, wmesg, timo) \ 807 msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo) 808 809 #else 810 811 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY 812 #define MPT_LOCK_SETUP(mpt) do { } while (0) 813 #define MPT_LOCK_DESTROY(mpt) do { } while (0) 814 #if 0 815 #define MPT_LOCK(mpt) \ 816 device_printf(mpt->dev, "LOCK %s:%d\n", __FILE__, __LINE__); \ 817 KASSERT(mpt->mpt_locksetup == 0, \ 818 ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \ 819 mpt->mpt_locksetup = 1 820 #define MPT_UNLOCK(mpt) \ 821 device_printf(mpt->dev, "UNLK %s:%d\n", __FILE__, __LINE__); \ 822 KASSERT(mpt->mpt_locksetup == 1, \ 823 ("release unowned lock at %s:%d", __FILE__, __LINE__)); \ 824 mpt->mpt_locksetup = 0 825 #else 826 #define MPT_LOCK(mpt) \ 827 KASSERT(mpt->mpt_locksetup == 0, \ 828 ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \ 829 mpt->mpt_locksetup = 1 830 #define MPT_UNLOCK(mpt) \ 831 KASSERT(mpt->mpt_locksetup == 1, \ 832 ("release unowned lock at %s:%d", __FILE__, __LINE__)); \ 833 mpt->mpt_locksetup = 0 834 #endif 835 #define MPT_OWNED(mpt) mpt->mpt_locksetup 836 #define MPTLOCK_2_CAMLOCK(mpt) MPT_UNLOCK(mpt) 837 #define CAMLOCK_2_MPTLOCK(mpt) MPT_LOCK(mpt) 838 839 static __inline int 840 mpt_sleep(struct mpt_softc *, void *, int, const char *, int); 841 842 static __inline int 843 mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t) 844 { 845 int r; 846 MPT_UNLOCK(mpt); 847 r = tsleep(i, p, w, t); 848 MPT_LOCK(mpt); 849 return (r); 850 } 851 #endif 852 #endif 853 854 /******************************* Register Access ******************************/ 855 static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t); 856 static __inline uint32_t mpt_read(struct mpt_softc *, int); 857 static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t); 858 static __inline uint32_t mpt_pio_read(struct mpt_softc *, int); 859 860 static __inline void 861 mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val) 862 { 863 bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val); 864 } 865 866 static __inline uint32_t 867 mpt_read(struct mpt_softc *mpt, int offset) 868 { 869 return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset)); 870 } 871 872 /* 873 * Some operations (e.g. diagnostic register writes while the ARM proccessor 874 * is disabled), must be performed using "PCI pio" operations. On non-PCI 875 * busses, these operations likely map to normal register accesses. 876 */ 877 static __inline void 878 mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val) 879 { 880 bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val); 881 } 882 883 static __inline uint32_t 884 mpt_pio_read(struct mpt_softc *mpt, int offset) 885 { 886 return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset)); 887 } 888 /*********************** Reply Frame/Request Management ***********************/ 889 /* Max MPT Reply we are willing to accept (must be power of 2) */ 890 #define MPT_REPLY_SIZE 256 891 892 /* 893 * Must be less than 16384 in order for target mode to work 894 */ 895 #define MPT_MAX_REQUESTS(mpt) 512 896 #define MPT_REQUEST_AREA 512 897 #define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */ 898 #define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA) 899 900 #define MPT_CONTEXT_CB_SHIFT (16) 901 #define MPT_CBI(handle) (handle >> MPT_CONTEXT_CB_SHIFT) 902 #define MPT_CBI_TO_HID(cbi) ((cbi) << MPT_CONTEXT_CB_SHIFT) 903 #define MPT_CONTEXT_TO_CBI(x) \ 904 (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1)) 905 #define MPT_CONTEXT_REQI_MASK 0xFFFF 906 #define MPT_CONTEXT_TO_REQI(x) ((x) & MPT_CONTEXT_REQI_MASK) 907 908 /* 909 * Convert a 32bit physical address returned from IOC to an 910 * offset into our reply frame memory or the kvm address needed 911 * to access the data. The returned address is only the low 912 * 32 bits, so mask our base physical address accordingly. 913 */ 914 #define MPT_REPLY_BADDR(x) \ 915 (x << 1) 916 #define MPT_REPLY_OTOV(m, i) \ 917 ((void *)(&m->reply[i])) 918 919 #define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \ 920 do { \ 921 if (mpt->verbose > MPT_PRT_DEBUG) \ 922 mpt_dump_reply_frame(mpt, reply_frame); \ 923 } while(0) 924 925 static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt); 926 static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr); 927 928 /* 929 * Give the reply buffer back to the IOC after we have 930 * finished processing it. 931 */ 932 static __inline void 933 mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr) 934 { 935 mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr); 936 } 937 938 /* Get a reply from the IOC */ 939 static __inline uint32_t 940 mpt_pop_reply_queue(struct mpt_softc *mpt) 941 { 942 return mpt_read(mpt, MPT_OFFSET_REPLY_Q); 943 } 944 945 void 946 mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int); 947 948 /************************** Scatter Gather Managment **************************/ 949 /* MPT_RQSL- size of request frame, in bytes */ 950 #define MPT_RQSL(mpt) (mpt->ioc_facts.RequestFrameSize << 2) 951 952 /* MPT_NSGL- how many SG entries can fit in a request frame size */ 953 #define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION)) 954 955 /* MPT_NRFM- how many request frames can fit in each request alloc we make */ 956 #define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt)) 957 958 /* 959 * MPT_NSGL_FIRST- # of SG elements that can fit after 960 * an I/O request but still within the request frame. 961 * Do this safely based upon SGE_IO_UNION. 962 * 963 * Note that the first element is *within* the SCSI request. 964 */ 965 #define MPT_NSGL_FIRST(mpt) \ 966 ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \ 967 sizeof (SGE_IO_UNION)) 968 969 /***************************** IOC Initialization *****************************/ 970 int mpt_reset(struct mpt_softc *, int /*reinit*/); 971 972 /****************************** Debugging ************************************/ 973 typedef struct mpt_decode_entry { 974 char *name; 975 u_int value; 976 u_int mask; 977 } mpt_decode_entry_t; 978 979 int mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries, 980 const char *name, u_int value, u_int *cur_column, 981 u_int wrap_point); 982 983 void mpt_dump_data(struct mpt_softc *, const char *, void *, int); 984 void mpt_dump_request(struct mpt_softc *, request_t *); 985 986 enum { 987 MPT_PRT_ALWAYS, 988 MPT_PRT_FATAL, 989 MPT_PRT_ERROR, 990 MPT_PRT_WARN, 991 MPT_PRT_INFO, 992 MPT_PRT_NEGOTIATION, 993 MPT_PRT_DEBUG, 994 MPT_PRT_DEBUG1, 995 MPT_PRT_DEBUG2, 996 MPT_PRT_DEBUG3, 997 MPT_PRT_TRACE, 998 MPT_PRT_NONE=100 999 }; 1000 1001 #if __FreeBSD_version > 500000 1002 #define mpt_lprt(mpt, level, ...) \ 1003 do { \ 1004 if (level <= (mpt)->verbose) \ 1005 mpt_prt(mpt, __VA_ARGS__); \ 1006 } while (0) 1007 1008 #define mpt_lprtc(mpt, level, ...) \ 1009 do { \ 1010 if (level <= (mpt)->debug_level) \ 1011 mpt_prtc(mpt, __VA_ARGS__); \ 1012 } while (0) 1013 #else 1014 void mpt_lprt(struct mpt_softc *, int, const char *, ...) 1015 __printflike(3, 4); 1016 void mpt_lprtc(struct mpt_softc *, int, const char *, ...) 1017 __printflike(3, 4); 1018 #endif 1019 void mpt_prt(struct mpt_softc *, const char *, ...) 1020 __printflike(2, 3); 1021 void mpt_prtc(struct mpt_softc *, const char *, ...) 1022 __printflike(2, 3); 1023 1024 /**************************** Target Mode Related ***************************/ 1025 static __inline int mpt_cdblen(uint8_t, int); 1026 static __inline int 1027 mpt_cdblen(uint8_t cdb0, int maxlen) 1028 { 1029 int group = cdb0 >> 5; 1030 switch (group) { 1031 case 0: 1032 return (6); 1033 case 1: 1034 return (10); 1035 case 4: 1036 case 5: 1037 return (12); 1038 default: 1039 return (16); 1040 } 1041 } 1042 #ifdef INVARIANTS 1043 static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t); 1044 static __inline request_t * 1045 mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag) 1046 { 1047 uint16_t rtg = (tag >> 18); 1048 KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d\n", tag)); 1049 KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array")); 1050 KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer")); 1051 return (mpt->tgt_cmd_ptrs[rtg]); 1052 } 1053 1054 1055 static __inline int 1056 mpt_req_on_free_list(struct mpt_softc *, request_t *); 1057 static __inline int 1058 mpt_req_on_pending_list(struct mpt_softc *, request_t *); 1059 1060 static __inline void 1061 mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int); 1062 static __inline void 1063 mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int); 1064 1065 1066 /* 1067 * Is request on freelist? 1068 */ 1069 static __inline int 1070 mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req) 1071 { 1072 request_t *lrq; 1073 1074 TAILQ_FOREACH(lrq, &mpt->request_free_list, links) { 1075 if (lrq == req) { 1076 return (1); 1077 } 1078 } 1079 return (0); 1080 } 1081 1082 /* 1083 * Is request on pending list? 1084 */ 1085 static __inline int 1086 mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req) 1087 { 1088 request_t *lrq; 1089 1090 TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) { 1091 if (lrq == req) { 1092 return (1); 1093 } 1094 } 1095 return (0); 1096 } 1097 1098 /* 1099 * Make sure that req *is* part of one of the special lists 1100 */ 1101 static __inline void 1102 mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) 1103 { 1104 int i; 1105 for (i = 0; i < mpt->els_cmds_allocated; i++) { 1106 if (req == mpt->els_cmd_ptrs[i]) { 1107 return; 1108 } 1109 } 1110 for (i = 0; i < mpt->tgt_cmds_allocated; i++) { 1111 if (req == mpt->tgt_cmd_ptrs[i]) { 1112 return; 1113 } 1114 } 1115 panic("%s(%d): req %p:%u function %x not in els or tgt ptrs\n", 1116 s, line, req, req->serno, 1117 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function); 1118 } 1119 1120 /* 1121 * Make sure that req is *not* part of one of the special lists. 1122 */ 1123 static __inline void 1124 mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) 1125 { 1126 int i; 1127 for (i = 0; i < mpt->els_cmds_allocated; i++) { 1128 KASSERT(req != mpt->els_cmd_ptrs[i], 1129 ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d\n", 1130 s, line, req, req->serno, 1131 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); 1132 } 1133 for (i = 0; i < mpt->tgt_cmds_allocated; i++) { 1134 KASSERT(req != mpt->tgt_cmd_ptrs[i], 1135 ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d\n", 1136 s, line, req, req->serno, 1137 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); 1138 } 1139 } 1140 #endif 1141 1142 /* 1143 * Task Management Types, purely for internal consumption 1144 */ 1145 typedef enum { 1146 MPT_ABORT_TASK_SET=1234, 1147 MPT_CLEAR_TASK_SET, 1148 MPT_TARGET_RESET, 1149 MPT_CLEAR_ACA, 1150 MPT_TERMINATE_TASK, 1151 MPT_NIL_TMT_VALUE=5678 1152 } mpt_task_mgmt_t; 1153 1154 /**************************** Unclassified Routines ***************************/ 1155 void mpt_send_cmd(struct mpt_softc *mpt, request_t *req); 1156 int mpt_recv_handshake_reply(struct mpt_softc *mpt, 1157 size_t reply_len, void *reply); 1158 int mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1159 mpt_req_state_t state, mpt_req_state_t mask, 1160 int sleep_ok, int time_ms); 1161 void mpt_enable_ints(struct mpt_softc *mpt); 1162 void mpt_disable_ints(struct mpt_softc *mpt); 1163 int mpt_attach(struct mpt_softc *mpt); 1164 int mpt_shutdown(struct mpt_softc *mpt); 1165 int mpt_detach(struct mpt_softc *mpt); 1166 int mpt_send_handshake_cmd(struct mpt_softc *mpt, 1167 size_t len, void *cmd); 1168 request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok); 1169 void mpt_free_request(struct mpt_softc *mpt, request_t *req); 1170 void mpt_intr(void *arg); 1171 void mpt_check_doorbell(struct mpt_softc *mpt); 1172 void mpt_dump_reply_frame(struct mpt_softc *mpt, 1173 MSG_DEFAULT_REPLY *reply_frame); 1174 1175 void mpt_set_config_regs(struct mpt_softc *); 1176 int mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/, 1177 u_int /*Action*/, u_int /*PageVersion*/, 1178 u_int /*PageLength*/, u_int /*PageNumber*/, 1179 u_int /*PageType*/, uint32_t /*PageAddress*/, 1180 bus_addr_t /*addr*/, bus_size_t/*len*/, 1181 int /*sleep_ok*/, int /*timeout_ms*/); 1182 int mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/, 1183 int /*PageNumber*/, 1184 uint32_t /*PageAddress*/, 1185 CONFIG_PAGE_HEADER *, 1186 int /*sleep_ok*/, int /*timeout_ms*/); 1187 int mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/, 1188 uint32_t /*PageAddress*/, 1189 CONFIG_PAGE_HEADER *, size_t /*len*/, 1190 int /*sleep_ok*/, int /*timeout_ms*/); 1191 int mpt_write_cfg_page(struct mpt_softc *, int /*Action*/, 1192 uint32_t /*PageAddress*/, 1193 CONFIG_PAGE_HEADER *, size_t /*len*/, 1194 int /*sleep_ok*/, int /*timeout_ms*/); 1195 static __inline int 1196 mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, 1197 CONFIG_PAGE_HEADER *hdr, size_t len, 1198 int sleep_ok, int timeout_ms) 1199 { 1200 return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 1201 PageAddress, hdr, len, sleep_ok, timeout_ms)); 1202 } 1203 1204 static __inline int 1205 mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, 1206 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1207 int timeout_ms) 1208 { 1209 return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 1210 PageAddress, hdr, len, sleep_ok, 1211 timeout_ms)); 1212 } 1213 /* mpt_debug.c functions */ 1214 void mpt_print_reply(void *vmsg); 1215 void mpt_print_db(uint32_t mb); 1216 void mpt_print_config_reply(void *vmsg); 1217 char *mpt_ioc_diag(uint32_t diag); 1218 void mpt_req_state(mpt_req_state_t state); 1219 void mpt_print_config_request(void *vmsg); 1220 void mpt_print_request(void *vmsg); 1221 void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg); 1222 void mpt_dump_sgl(SGE_IO_UNION *se, int offset); 1223 #endif /* _MPT_H_ */ 1224