1 /* $FreeBSD$ */ 2 /*- 3 * Generic defines for LSI '909 FC adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 2000, 2001 by Greg Ansley 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 /*- 30 * Copyright (c) 2002, 2006 by Matthew Jacob 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions are 35 * met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 39 * substantially similar to the "NO WARRANTY" disclaimer below 40 * ("Disclaimer") and any redistribution must be conditioned upon including 41 * a substantially similar Disclaimer requirement for further binary 42 * redistribution. 43 * 3. Neither the names of the above listed copyright holders nor the names 44 * of any contributors may be used to endorse or promote products derived 45 * from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 51 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 57 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * 59 * Support from Chris Ellsworth in order to make SAS adapters work 60 * is gratefully acknowledged. 61 * 62 * 63 * Support from LSI-Logic has also gone a great deal toward making this a 64 * workable subsystem and is gratefully acknowledged. 65 */ 66 /* 67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 68 * Copyright (c) 2004, 2005 Justin T. Gibbs 69 * Copyright (c) 2005, WHEEL Sp. z o.o. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions are 74 * met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 78 * substantially similar to the "NO WARRANTY" disclaimer below 79 * ("Disclaimer") and any redistribution must be conditioned upon including 80 * a substantially similar Disclaimer requirement for further binary 81 * redistribution. 82 * 3. Neither the names of the above listed copyright holders nor the names 83 * of any contributors may be used to endorse or promote products derived 84 * from this software without specific prior written permission. 85 * 86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 97 */ 98 99 #ifndef _MPT_H_ 100 #define _MPT_H_ 101 102 /********************************* OS Includes ********************************/ 103 #include <sys/param.h> 104 #include <sys/systm.h> 105 #include <sys/bus.h> 106 #include <sys/condvar.h> 107 #include <sys/endian.h> 108 #include <sys/eventhandler.h> 109 #include <sys/kernel.h> 110 #include <sys/lock.h> 111 #include <sys/malloc.h> 112 #include <sys/module.h> 113 #include <sys/mutex.h> 114 #include <sys/proc.h> 115 #include <sys/queue.h> 116 #include <sys/rman.h> 117 #include <sys/types.h> 118 119 #include <machine/bus.h> 120 #include <machine/cpu.h> 121 #include <machine/resource.h> 122 123 #ifdef __sparc64__ 124 #include <dev/ofw/openfirm.h> 125 #include <machine/ofw_machdep.h> 126 #endif 127 128 #include <dev/pci/pcireg.h> 129 #include <dev/pci/pcivar.h> 130 131 #include "opt_ddb.h" 132 133 /**************************** Register Definitions ****************************/ 134 #include <dev/mpt/mpt_reg.h> 135 136 /******************************* MPI Definitions ******************************/ 137 #include <dev/mpt/mpilib/mpi_type.h> 138 #include <dev/mpt/mpilib/mpi.h> 139 #include <dev/mpt/mpilib/mpi_cnfg.h> 140 #include <dev/mpt/mpilib/mpi_ioc.h> 141 #include <dev/mpt/mpilib/mpi_raid.h> 142 143 /* XXX For mpt_debug.c */ 144 #include <dev/mpt/mpilib/mpi_init.h> 145 146 #define MPT_S64_2_SCALAR(y) ((((int64_t)y.High) << 32) | (y.Low)) 147 #define MPT_U64_2_SCALAR(y) ((((uint64_t)y.High) << 32) | (y.Low)) 148 149 /****************************** Misc Definitions ******************************/ 150 /* #define MPT_TEST_MULTIPATH 1 */ 151 #define MPT_OK (0) 152 #define MPT_FAIL (0x10000) 153 154 #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) 155 156 #define MPT_ROLE_NONE 0 157 #define MPT_ROLE_INITIATOR 1 158 #define MPT_ROLE_TARGET 2 159 #define MPT_ROLE_BOTH 3 160 #define MPT_ROLE_DEFAULT MPT_ROLE_INITIATOR 161 162 #define MPT_INI_ID_NONE -1 163 164 /**************************** Forward Declarations ****************************/ 165 struct mpt_softc; 166 struct mpt_personality; 167 typedef struct req_entry request_t; 168 169 /************************* Personality Module Support *************************/ 170 typedef int mpt_load_handler_t(struct mpt_personality *); 171 typedef int mpt_probe_handler_t(struct mpt_softc *); 172 typedef int mpt_attach_handler_t(struct mpt_softc *); 173 typedef int mpt_enable_handler_t(struct mpt_softc *); 174 typedef void mpt_ready_handler_t(struct mpt_softc *); 175 typedef int mpt_event_handler_t(struct mpt_softc *, request_t *, 176 MSG_EVENT_NOTIFY_REPLY *); 177 typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/); 178 /* XXX Add return value and use for veto? */ 179 typedef void mpt_shutdown_handler_t(struct mpt_softc *); 180 typedef void mpt_detach_handler_t(struct mpt_softc *); 181 typedef int mpt_unload_handler_t(struct mpt_personality *); 182 183 struct mpt_personality 184 { 185 const char *name; 186 uint32_t id; /* Assigned identifier. */ 187 u_int use_count; /* Instances using personality*/ 188 mpt_load_handler_t *load; /* configure personailty */ 189 #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load) 190 mpt_probe_handler_t *probe; /* configure personailty */ 191 mpt_attach_handler_t *attach; /* initialize device instance */ 192 mpt_enable_handler_t *enable; /* enable device */ 193 mpt_ready_handler_t *ready; /* final open for business */ 194 mpt_event_handler_t *event; /* Handle MPI event. */ 195 mpt_reset_handler_t *reset; /* Re-init after reset. */ 196 mpt_shutdown_handler_t *shutdown; /* Shutdown instance. */ 197 mpt_detach_handler_t *detach; /* release device instance */ 198 mpt_unload_handler_t *unload; /* Shutdown personality */ 199 #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload) 200 }; 201 202 int mpt_modevent(module_t, int, void *); 203 204 /* Maximum supported number of personalities. */ 205 #define MPT_MAX_PERSONALITIES (15) 206 207 #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \ 208 MODULE_DEPEND(name, dep, vmin, vpref, vmax) 209 210 #define DECLARE_MPT_PERSONALITY(name, order) \ 211 static moduledata_t name##_mod = { \ 212 #name, mpt_modevent, &name##_personality \ 213 }; \ 214 DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order); \ 215 MODULE_VERSION(name, 1); \ 216 MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1) 217 218 /******************************* Bus DMA Support ******************************/ 219 /* XXX Need to update bus_dmamap_sync to take a range argument. */ 220 #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op) \ 221 bus_dmamap_sync(dma_tag, dmamap, op) 222 223 #if __FreeBSD_version < 600000 224 #define bus_get_dma_tag(x) NULL 225 #endif 226 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ 227 lowaddr, highaddr, filter, filterarg, \ 228 maxsize, nsegments, maxsegsz, flags, \ 229 dma_tagp) \ 230 bus_dma_tag_create(parent_tag, alignment, boundary, \ 231 lowaddr, highaddr, filter, filterarg, \ 232 maxsize, nsegments, maxsegsz, flags, \ 233 busdma_lock_mutex, &(mpt)->mpt_lock, \ 234 dma_tagp) 235 struct mpt_map_info { 236 struct mpt_softc *mpt; 237 int error; 238 uint32_t phys; 239 }; 240 241 void mpt_map_rquest(void *, bus_dma_segment_t *, int, int); 242 /* **************************** NewBUS interrupt Crock ************************/ 243 #if __FreeBSD_version < 700031 244 #define mpt_setup_intr(d, i, f, U, if, ifa, hp) \ 245 bus_setup_intr(d, i, f, if, ifa, hp) 246 #else 247 #define mpt_setup_intr bus_setup_intr 248 #endif 249 250 /* **************************** NewBUS CAM Support ****************************/ 251 #if __FreeBSD_version < 700049 252 #define mpt_xpt_bus_register(sim, parent, bus) \ 253 xpt_bus_register(sim, bus) 254 #else 255 #define mpt_xpt_bus_register xpt_bus_register 256 #endif 257 258 /**************************** Kernel Thread Support ***************************/ 259 #if __FreeBSD_version > 800001 260 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ 261 kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) 262 #define mpt_kthread_exit(status) \ 263 kproc_exit(status) 264 #else 265 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ 266 kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) 267 #define mpt_kthread_exit(status) \ 268 kthread_exit(status) 269 #endif 270 271 /********************************** Endianess *********************************/ 272 #define MPT_2_HOST64(ptr, tag) ptr->tag = le64toh(ptr->tag) 273 #define MPT_2_HOST32(ptr, tag) ptr->tag = le32toh(ptr->tag) 274 #define MPT_2_HOST16(ptr, tag) ptr->tag = le16toh(ptr->tag) 275 276 #define HOST_2_MPT64(ptr, tag) ptr->tag = htole64(ptr->tag) 277 #define HOST_2_MPT32(ptr, tag) ptr->tag = htole32(ptr->tag) 278 #define HOST_2_MPT16(ptr, tag) ptr->tag = htole16(ptr->tag) 279 280 #if _BYTE_ORDER == _BIG_ENDIAN 281 void mpt2host_sge_simple_union(SGE_SIMPLE_UNION *); 282 void mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *); 283 void mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *); 284 void mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *); 285 void mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *); 286 void mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *); 287 void mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *); 288 void host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *); 289 void mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *); 290 void mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *); 291 void mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *); 292 void host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *); 293 void mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *); 294 void mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *); 295 void host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *); 296 void mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *); 297 void mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *); 298 void mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *); 299 #else 300 #define mpt2host_sge_simple_union(x) do { ; } while (0) 301 #define mpt2host_iocfacts_reply(x) do { ; } while (0) 302 #define mpt2host_portfacts_reply(x) do { ; } while (0) 303 #define mpt2host_config_page_ioc2(x) do { ; } while (0) 304 #define mpt2host_config_page_ioc3(x) do { ; } while (0) 305 #define mpt2host_config_page_scsi_port_0(x) do { ; } while (0) 306 #define mpt2host_config_page_scsi_port_1(x) do { ; } while (0) 307 #define host2mpt_config_page_scsi_port_1(x) do { ; } while (0) 308 #define mpt2host_config_page_scsi_port_2(x) do { ; } while (0) 309 #define mpt2host_config_page_scsi_device_0(x) do { ; } while (0) 310 #define mpt2host_config_page_scsi_device_1(x) do { ; } while (0) 311 #define host2mpt_config_page_scsi_device_1(x) do { ; } while (0) 312 #define mpt2host_config_page_fc_port_0(x) do { ; } while (0) 313 #define mpt2host_config_page_fc_port_1(x) do { ; } while (0) 314 #define host2mpt_config_page_fc_port_1(x) do { ; } while (0) 315 #define mpt2host_config_page_raid_vol_0(x) do { ; } while (0) 316 #define mpt2host_config_page_raid_phys_disk_0(x) \ 317 do { ; } while (0) 318 #define mpt2host_mpi_raid_vol_indicator(x) do { ; } while (0) 319 #endif 320 321 /**************************** MPI Transaction State ***************************/ 322 typedef enum { 323 REQ_STATE_NIL = 0x00, 324 REQ_STATE_FREE = 0x01, 325 REQ_STATE_ALLOCATED = 0x02, 326 REQ_STATE_QUEUED = 0x04, 327 REQ_STATE_DONE = 0x08, 328 REQ_STATE_TIMEDOUT = 0x10, 329 REQ_STATE_NEED_WAKEUP = 0x20, 330 REQ_STATE_LOCKED = 0x80, /* can't be freed */ 331 REQ_STATE_MASK = 0xFF 332 } mpt_req_state_t; 333 334 struct req_entry { 335 TAILQ_ENTRY(req_entry) links; /* Pointer to next in list */ 336 mpt_req_state_t state; /* Request State Information */ 337 uint16_t index; /* Index of this entry */ 338 uint16_t IOCStatus; /* Completion status */ 339 uint16_t ResponseCode; /* TMF Response Code */ 340 uint16_t serno; /* serial number */ 341 union ccb *ccb; /* CAM request */ 342 void *req_vbuf; /* Virtual Address of Entry */ 343 void *sense_vbuf; /* Virtual Address of sense data */ 344 bus_addr_t req_pbuf; /* Physical Address of Entry */ 345 bus_addr_t sense_pbuf; /* Physical Address of sense data */ 346 bus_dmamap_t dmap; /* DMA map for data buffers */ 347 struct req_entry *chain; /* for SGE overallocations */ 348 struct callout callout; /* Timeout for the request */ 349 }; 350 351 typedef struct mpt_config_params { 352 u_int Action; 353 u_int PageVersion; 354 u_int PageLength; 355 u_int PageNumber; 356 u_int PageType; 357 u_int PageAddress; 358 u_int ExtPageLength; 359 u_int ExtPageType; 360 } cfgparms_t; 361 362 /**************************** MPI Target State Info ***************************/ 363 364 typedef struct { 365 uint32_t reply_desc; /* current reply descriptor */ 366 uint32_t resid; /* current data residual */ 367 uint32_t bytes_xfered; /* current relative offset */ 368 union ccb *ccb; /* pointer to currently active ccb */ 369 request_t *req; /* pointer to currently active assist request */ 370 uint32_t 371 is_local : 1, 372 nxfers : 31; 373 uint32_t tag_id; 374 enum { 375 TGT_STATE_NIL, 376 TGT_STATE_LOADING, 377 TGT_STATE_LOADED, 378 TGT_STATE_IN_CAM, 379 TGT_STATE_SETTING_UP_FOR_DATA, 380 TGT_STATE_MOVING_DATA, 381 TGT_STATE_MOVING_DATA_AND_STATUS, 382 TGT_STATE_SENDING_STATUS 383 } state; 384 } mpt_tgt_state_t; 385 386 /* 387 * When we get an incoming command it has its own tag which is called the 388 * IoIndex. This is the value we gave that particular command buffer when 389 * we originally assigned it. It's just a number, really. The FC card uses 390 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which 391 * contains pointers the request_t structures related to that IoIndex. 392 * 393 * What *we* do is construct a tag out of the index for the target command 394 * which owns the incoming ATIO plus a rolling sequence number. 395 */ 396 #define MPT_MAKE_TAGID(mpt, req, ioindex) \ 397 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff)) 398 399 #ifdef INVARIANTS 400 #define MPT_TAG_2_REQ(a, b) mpt_tag_2_req(a, (uint32_t) b) 401 #else 402 #define MPT_TAG_2_REQ(mpt, tag) mpt->tgt_cmd_ptrs[tag >> 18] 403 #endif 404 405 #define MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \ 406 (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)])) 407 408 STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr); 409 #define MPT_MAX_LUNS 256 410 typedef struct { 411 struct mpt_hdr_stailq atios; 412 struct mpt_hdr_stailq inots; 413 int enabled; 414 } tgt_resource_t; 415 #define MPT_MAX_ELS 64 416 417 /**************************** Handler Registration ****************************/ 418 /* 419 * Global table of registered reply handlers. The 420 * handler is indicated by byte 3 of the request 421 * index submitted to the IOC. This allows the 422 * driver core to perform generic processing without 423 * any knowledge of per-personality behavior. 424 * 425 * MPT_NUM_REPLY_HANDLERS must be a power of 2 426 * to allow the easy generation of a mask. 427 * 428 * The handler offsets used by the core are hard coded 429 * allowing faster code generation when assigning a handler 430 * to a request. All "personalities" must use the 431 * the handler registration mechanism. 432 * 433 * The IOC handlers that are rarely executed are placed 434 * at the tail of the table to make it more likely that 435 * all commonly executed handlers fit in a single cache 436 * line. 437 */ 438 #define MPT_NUM_REPLY_HANDLERS (32) 439 #define MPT_REPLY_HANDLER_EVENTS MPT_CBI_TO_HID(0) 440 #define MPT_REPLY_HANDLER_CONFIG MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1) 441 #define MPT_REPLY_HANDLER_HANDSHAKE MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2) 442 typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request, 443 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame); 444 typedef union { 445 mpt_reply_handler_t *reply_handler; 446 } mpt_handler_t; 447 448 typedef enum { 449 MPT_HANDLER_REPLY, 450 MPT_HANDLER_EVENT, 451 MPT_HANDLER_RESET, 452 MPT_HANDLER_SHUTDOWN 453 } mpt_handler_type; 454 455 struct mpt_handler_record 456 { 457 LIST_ENTRY(mpt_handler_record) links; 458 mpt_handler_t handler; 459 }; 460 461 LIST_HEAD(mpt_handler_list, mpt_handler_record); 462 463 /* 464 * The handler_id is currently unused but would contain the 465 * handler ID used in the MsgContext field to allow direction 466 * of replies to the handler. Registrations that don't require 467 * a handler id can pass in NULL for the handler_id. 468 * 469 * Deregistrations for handlers without a handler id should 470 * pass in MPT_HANDLER_ID_NONE. 471 */ 472 #define MPT_HANDLER_ID_NONE (0xFFFFFFFF) 473 int mpt_register_handler(struct mpt_softc *, mpt_handler_type, 474 mpt_handler_t, uint32_t *); 475 int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type, 476 mpt_handler_t, uint32_t); 477 478 /******************* Per-Controller Instance Data Structures ******************/ 479 TAILQ_HEAD(req_queue, req_entry); 480 481 /* Structure for saving proper values for modifyable PCI config registers */ 482 struct mpt_pci_cfg { 483 uint16_t Command; 484 uint16_t LatencyTimer_LineSize; 485 uint32_t IO_BAR; 486 uint32_t Mem0_BAR[2]; 487 uint32_t Mem1_BAR[2]; 488 uint32_t ROM_BAR; 489 uint8_t IntLine; 490 uint32_t PMCSR; 491 }; 492 493 typedef enum { 494 MPT_RVF_NONE = 0x0, 495 MPT_RVF_ACTIVE = 0x1, 496 MPT_RVF_ANNOUNCED = 0x2, 497 MPT_RVF_UP2DATE = 0x4, 498 MPT_RVF_REFERENCED = 0x8, 499 MPT_RVF_WCE_CHANGED = 0x10 500 } mpt_raid_volume_flags; 501 502 struct mpt_raid_volume { 503 CONFIG_PAGE_RAID_VOL_0 *config_page; 504 MPI_RAID_VOL_INDICATOR sync_progress; 505 mpt_raid_volume_flags flags; 506 u_int quiesced_disks; 507 }; 508 509 typedef enum { 510 MPT_RDF_NONE = 0x00, 511 MPT_RDF_ACTIVE = 0x01, 512 MPT_RDF_ANNOUNCED = 0x02, 513 MPT_RDF_UP2DATE = 0x04, 514 MPT_RDF_REFERENCED = 0x08, 515 MPT_RDF_QUIESCING = 0x10, 516 MPT_RDF_QUIESCED = 0x20 517 } mpt_raid_disk_flags; 518 519 struct mpt_raid_disk { 520 CONFIG_PAGE_RAID_PHYS_DISK_0 config_page; 521 struct mpt_raid_volume *volume; 522 u_int member_number; 523 u_int pass_thru_active; 524 mpt_raid_disk_flags flags; 525 }; 526 527 struct mpt_evtf_record { 528 MSG_EVENT_NOTIFY_REPLY reply; 529 uint32_t context; 530 LIST_ENTRY(mpt_evtf_record) links; 531 }; 532 533 LIST_HEAD(mpt_evtf_list, mpt_evtf_record); 534 535 struct mptsas_devinfo { 536 uint16_t dev_handle; 537 uint16_t parent_dev_handle; 538 uint16_t enclosure_handle; 539 uint16_t slot; 540 uint8_t phy_num; 541 uint8_t physical_port; 542 uint8_t target_id; 543 uint8_t bus; 544 uint64_t sas_address; 545 uint32_t device_info; 546 }; 547 548 struct mptsas_phyinfo { 549 uint16_t handle; 550 uint8_t phy_num; 551 uint8_t port_id; 552 uint8_t negotiated_link_rate; 553 uint8_t hw_link_rate; 554 uint8_t programmed_link_rate; 555 uint8_t sas_port_add_phy; 556 struct mptsas_devinfo identify; 557 struct mptsas_devinfo attached; 558 }; 559 560 struct mptsas_portinfo { 561 uint16_t num_phys; 562 struct mptsas_phyinfo *phy_info; 563 }; 564 565 struct mpt_softc { 566 device_t dev; 567 struct mtx mpt_lock; 568 int mpt_locksetup; 569 uint32_t mpt_pers_mask; 570 uint32_t 571 : 7, 572 unit : 8, 573 ready : 1, 574 fw_uploaded : 1, 575 msi_enable : 1, 576 twildcard : 1, 577 tenabled : 1, 578 do_cfg_role : 1, 579 raid_enabled : 1, 580 raid_mwce_set : 1, 581 getreqwaiter : 1, 582 shutdwn_raid : 1, 583 shutdwn_recovery: 1, 584 outofbeer : 1, 585 disabled : 1, 586 is_spi : 1, 587 is_sas : 1, 588 is_fc : 1, 589 is_1078 : 1; 590 591 u_int cfg_role; 592 u_int role; /* role: none, ini, target, both */ 593 594 u_int verbose; 595 #ifdef MPT_TEST_MULTIPATH 596 int failure_id; 597 #endif 598 599 /* 600 * IOC Facts 601 */ 602 MSG_IOC_FACTS_REPLY ioc_facts; 603 604 /* 605 * Port Facts 606 */ 607 MSG_PORT_FACTS_REPLY * port_facts; 608 #define mpt_max_tgtcmds port_facts[0].MaxPostedCmdBuffers 609 610 /* 611 * Device Configuration Information 612 */ 613 union { 614 struct mpt_spi_cfg { 615 CONFIG_PAGE_SCSI_PORT_0 _port_page0; 616 CONFIG_PAGE_SCSI_PORT_1 _port_page1; 617 CONFIG_PAGE_SCSI_PORT_2 _port_page2; 618 CONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16]; 619 CONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16]; 620 int _ini_id; 621 uint16_t _tag_enable; 622 uint16_t _disc_enable; 623 } spi; 624 #define mpt_port_page0 cfg.spi._port_page0 625 #define mpt_port_page1 cfg.spi._port_page1 626 #define mpt_port_page2 cfg.spi._port_page2 627 #define mpt_dev_page0 cfg.spi._dev_page0 628 #define mpt_dev_page1 cfg.spi._dev_page1 629 #define mpt_ini_id cfg.spi._ini_id 630 #define mpt_tag_enable cfg.spi._tag_enable 631 #define mpt_disc_enable cfg.spi._disc_enable 632 struct mpi_fc_cfg { 633 CONFIG_PAGE_FC_PORT_0 _port_page0; 634 uint32_t _port_speed; 635 #define mpt_fcport_page0 cfg.fc._port_page0 636 #define mpt_fcport_speed cfg.fc._port_speed 637 } fc; 638 } cfg; 639 /* 640 * Device config information stored up for sysctl to access 641 */ 642 union { 643 struct { 644 unsigned int initiator_id; 645 } spi; 646 struct { 647 char wwnn[19]; 648 char wwpn[19]; 649 } fc; 650 } scinfo; 651 652 /* Controller Info for RAID information */ 653 CONFIG_PAGE_IOC_2 * ioc_page2; 654 CONFIG_PAGE_IOC_3 * ioc_page3; 655 656 /* Raid Data */ 657 struct mpt_raid_volume* raid_volumes; 658 struct mpt_raid_disk* raid_disks; 659 u_int raid_max_volumes; 660 u_int raid_max_disks; 661 u_int raid_page0_len; 662 u_int raid_wakeup; 663 u_int raid_rescan; 664 u_int raid_resync_rate; 665 u_int raid_mwce_setting; 666 u_int raid_queue_depth; 667 u_int raid_nonopt_volumes; 668 struct proc *raid_thread; 669 struct callout raid_timer; 670 671 /* 672 * PCI Hardware info 673 */ 674 int pci_msi_count; 675 struct resource * pci_irq; /* Interrupt map for chip */ 676 void * ih; /* Interrupt handle */ 677 #if 0 678 struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */ 679 #endif 680 681 /* 682 * DMA Mapping Stuff 683 */ 684 struct resource * pci_reg; /* Register map for chip */ 685 bus_space_tag_t pci_st; /* Bus tag for registers */ 686 bus_space_handle_t pci_sh; /* Bus handle for registers */ 687 /* PIO versions of above. */ 688 struct resource * pci_pio_reg; 689 bus_space_tag_t pci_pio_st; 690 bus_space_handle_t pci_pio_sh; 691 692 bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */ 693 bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */ 694 bus_dmamap_t reply_dmap; /* DMA map for reply memory */ 695 uint8_t *reply; /* KVA of reply memory */ 696 bus_addr_t reply_phys; /* BusAddr of reply memory */ 697 698 bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */ 699 bus_dma_tag_t request_dmat; /* DMA tag for request memroy */ 700 bus_dmamap_t request_dmap; /* DMA map for request memroy */ 701 uint8_t *request; /* KVA of Request memory */ 702 bus_addr_t request_phys; /* BusAddr of request memory */ 703 704 uint32_t max_seg_cnt; /* calculated after IOC facts */ 705 uint32_t max_cam_seg_cnt;/* calculated from MAXPHYS*/ 706 707 /* 708 * Hardware management 709 */ 710 u_int reset_cnt; 711 712 /* 713 * CAM && Software Management 714 */ 715 request_t *request_pool; 716 struct req_queue request_free_list; 717 struct req_queue request_pending_list; 718 struct req_queue request_timeout_list; 719 720 721 struct cam_sim *sim; 722 struct cam_path *path; 723 724 struct cam_sim *phydisk_sim; 725 struct cam_path *phydisk_path; 726 727 struct proc *recovery_thread; 728 request_t *tmf_req; 729 730 /* 731 * Deferred frame acks due to resource shortage. 732 */ 733 struct mpt_evtf_list ack_frames; 734 735 /* 736 * Target Mode Support 737 */ 738 uint32_t scsi_tgt_handler_id; 739 request_t ** tgt_cmd_ptrs; 740 request_t ** els_cmd_ptrs; /* FC only */ 741 742 /* 743 * *snork*- this is chosen to be here *just in case* somebody 744 * forgets to point to it exactly and we index off of trt with 745 * CAM_LUN_WILDCARD. 746 */ 747 tgt_resource_t trt_wildcard; /* wildcard luns */ 748 tgt_resource_t trt[MPT_MAX_LUNS]; 749 uint16_t tgt_cmds_allocated; 750 uint16_t els_cmds_allocated; /* FC only */ 751 752 uint16_t timeouts; /* timeout count */ 753 uint16_t success; /* successes afer timeout */ 754 uint16_t sequence; /* Sequence Number */ 755 uint16_t pad3; 756 757 758 /* Paired port in some dual adapters configurations */ 759 struct mpt_softc * mpt2; 760 761 /* FW Image management */ 762 uint32_t fw_image_size; 763 uint8_t *fw_image; 764 bus_dma_tag_t fw_dmat; /* DMA tag for firmware image */ 765 bus_dmamap_t fw_dmap; /* DMA map for firmware image */ 766 bus_addr_t fw_phys; /* BusAddr of firmware image */ 767 768 /* SAS Topology */ 769 struct mptsas_portinfo *sas_portinfo; 770 771 /* Shutdown Event Handler. */ 772 eventhandler_tag eh; 773 774 /* Userland management interface. */ 775 struct cdev *cdev; 776 777 TAILQ_ENTRY(mpt_softc) links; 778 }; 779 780 static __inline void mpt_assign_serno(struct mpt_softc *, request_t *); 781 782 static __inline void 783 mpt_assign_serno(struct mpt_softc *mpt, request_t *req) 784 { 785 if ((req->serno = mpt->sequence++) == 0) { 786 req->serno = mpt->sequence++; 787 } 788 } 789 790 /***************************** Locking Primitives *****************************/ 791 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE 792 #define MPT_LOCK_SETUP(mpt) \ 793 mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \ 794 mpt->mpt_locksetup = 1 795 #define MPT_LOCK_DESTROY(mpt) \ 796 if (mpt->mpt_locksetup) { \ 797 mtx_destroy(&mpt->mpt_lock); \ 798 mpt->mpt_locksetup = 0; \ 799 } 800 801 #define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock) 802 #define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock) 803 #define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock) 804 #define MPT_LOCK_ASSERT(mpt) mtx_assert(&(mpt)->mpt_lock, MA_OWNED) 805 #define mpt_sleep(mpt, ident, priority, wmesg, timo) \ 806 msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo) 807 #define mpt_req_timeout(req, ticks, func, arg) \ 808 callout_reset(&(req)->callout, (ticks), (func), (arg)) 809 #define mpt_req_untimeout(req, func, arg) \ 810 callout_stop(&(req)->callout) 811 #define mpt_callout_init(mpt, c) \ 812 callout_init_mtx(c, &(mpt)->mpt_lock, 0) 813 #define mpt_callout_drain(mpt, c) \ 814 callout_drain(c) 815 816 /******************************* Register Access ******************************/ 817 static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t); 818 static __inline uint32_t mpt_read(struct mpt_softc *, int); 819 static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t); 820 static __inline uint32_t mpt_pio_read(struct mpt_softc *, int); 821 822 static __inline void 823 mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val) 824 { 825 bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val); 826 } 827 828 static __inline uint32_t 829 mpt_read(struct mpt_softc *mpt, int offset) 830 { 831 return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset)); 832 } 833 834 /* 835 * Some operations (e.g. diagnostic register writes while the ARM proccessor 836 * is disabled), must be performed using "PCI pio" operations. On non-PCI 837 * busses, these operations likely map to normal register accesses. 838 */ 839 static __inline void 840 mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val) 841 { 842 KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource")); 843 bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val); 844 } 845 846 static __inline uint32_t 847 mpt_pio_read(struct mpt_softc *mpt, int offset) 848 { 849 KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource")); 850 return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset)); 851 } 852 /*********************** Reply Frame/Request Management ***********************/ 853 /* Max MPT Reply we are willing to accept (must be power of 2) */ 854 #define MPT_REPLY_SIZE 256 855 856 /* 857 * Must be less than 16384 in order for target mode to work 858 */ 859 #define MPT_MAX_REQUESTS(mpt) 512 860 #define MPT_REQUEST_AREA 512 861 #define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */ 862 #define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA) 863 864 #define MPT_CONTEXT_CB_SHIFT (16) 865 #define MPT_CBI(handle) (handle >> MPT_CONTEXT_CB_SHIFT) 866 #define MPT_CBI_TO_HID(cbi) ((cbi) << MPT_CONTEXT_CB_SHIFT) 867 #define MPT_CONTEXT_TO_CBI(x) \ 868 (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1)) 869 #define MPT_CONTEXT_REQI_MASK 0xFFFF 870 #define MPT_CONTEXT_TO_REQI(x) ((x) & MPT_CONTEXT_REQI_MASK) 871 872 /* 873 * Convert a 32bit physical address returned from IOC to an 874 * offset into our reply frame memory or the kvm address needed 875 * to access the data. The returned address is only the low 876 * 32 bits, so mask our base physical address accordingly. 877 */ 878 #define MPT_REPLY_BADDR(x) \ 879 (x << 1) 880 #define MPT_REPLY_OTOV(m, i) \ 881 ((void *)(&m->reply[i])) 882 883 #define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \ 884 do { \ 885 if (mpt->verbose > MPT_PRT_DEBUG) \ 886 mpt_dump_reply_frame(mpt, reply_frame); \ 887 } while(0) 888 889 static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt); 890 static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr); 891 892 /* 893 * Give the reply buffer back to the IOC after we have 894 * finished processing it. 895 */ 896 static __inline void 897 mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr) 898 { 899 mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr); 900 } 901 902 /* Get a reply from the IOC */ 903 static __inline uint32_t 904 mpt_pop_reply_queue(struct mpt_softc *mpt) 905 { 906 return mpt_read(mpt, MPT_OFFSET_REPLY_Q); 907 } 908 909 void 910 mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int); 911 912 /************************** Scatter Gather Management **************************/ 913 /* MPT_RQSL- size of request frame, in bytes */ 914 #define MPT_RQSL(mpt) (mpt->ioc_facts.RequestFrameSize << 2) 915 916 /* MPT_NSGL- how many SG entries can fit in a request frame size */ 917 #define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION)) 918 919 /* MPT_NRFM- how many request frames can fit in each request alloc we make */ 920 #define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt)) 921 922 /* 923 * MPT_NSGL_FIRST- # of SG elements that can fit after 924 * an I/O request but still within the request frame. 925 * Do this safely based upon SGE_IO_UNION. 926 * 927 * Note that the first element is *within* the SCSI request. 928 */ 929 #define MPT_NSGL_FIRST(mpt) \ 930 ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \ 931 sizeof (SGE_IO_UNION)) 932 933 /***************************** IOC Initialization *****************************/ 934 int mpt_reset(struct mpt_softc *, int /*reinit*/); 935 936 /****************************** Debugging ************************************/ 937 void mpt_dump_data(struct mpt_softc *, const char *, void *, int); 938 void mpt_dump_request(struct mpt_softc *, request_t *); 939 940 enum { 941 MPT_PRT_ALWAYS, 942 MPT_PRT_FATAL, 943 MPT_PRT_ERROR, 944 MPT_PRT_WARN, 945 MPT_PRT_INFO, 946 MPT_PRT_NEGOTIATION, 947 MPT_PRT_DEBUG, 948 MPT_PRT_DEBUG1, 949 MPT_PRT_DEBUG2, 950 MPT_PRT_DEBUG3, 951 MPT_PRT_TRACE, 952 MPT_PRT_NONE=100 953 }; 954 955 #define mpt_lprt(mpt, level, ...) \ 956 do { \ 957 if (level <= (mpt)->verbose) \ 958 mpt_prt(mpt, __VA_ARGS__); \ 959 } while (0) 960 961 #if 0 962 #define mpt_lprtc(mpt, level, ...) \ 963 do { \ 964 if (level <= (mpt)->verbose) \ 965 mpt_prtc(mpt, __VA_ARGS__); \ 966 } while (0) 967 #endif 968 969 void mpt_prt(struct mpt_softc *, const char *, ...) 970 __printflike(2, 3); 971 void mpt_prtc(struct mpt_softc *, const char *, ...) 972 __printflike(2, 3); 973 974 /**************************** Target Mode Related ***************************/ 975 static __inline int mpt_cdblen(uint8_t, int); 976 static __inline int 977 mpt_cdblen(uint8_t cdb0, int maxlen) 978 { 979 int group = cdb0 >> 5; 980 switch (group) { 981 case 0: 982 return (6); 983 case 1: 984 return (10); 985 case 4: 986 case 5: 987 return (12); 988 default: 989 return (16); 990 } 991 } 992 #ifdef INVARIANTS 993 static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t); 994 static __inline request_t * 995 mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag) 996 { 997 uint16_t rtg = (tag >> 18); 998 KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d", tag)); 999 KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array")); 1000 KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer")); 1001 return (mpt->tgt_cmd_ptrs[rtg]); 1002 } 1003 #endif 1004 1005 static __inline int 1006 mpt_req_on_free_list(struct mpt_softc *, request_t *); 1007 static __inline int 1008 mpt_req_on_pending_list(struct mpt_softc *, request_t *); 1009 1010 /* 1011 * Is request on freelist? 1012 */ 1013 static __inline int 1014 mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req) 1015 { 1016 request_t *lrq; 1017 1018 TAILQ_FOREACH(lrq, &mpt->request_free_list, links) { 1019 if (lrq == req) { 1020 return (1); 1021 } 1022 } 1023 return (0); 1024 } 1025 1026 /* 1027 * Is request on pending list? 1028 */ 1029 static __inline int 1030 mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req) 1031 { 1032 request_t *lrq; 1033 1034 TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) { 1035 if (lrq == req) { 1036 return (1); 1037 } 1038 } 1039 return (0); 1040 } 1041 1042 #ifdef INVARIANTS 1043 static __inline void 1044 mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int); 1045 static __inline void 1046 mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int); 1047 1048 /* 1049 * Make sure that req *is* part of one of the special lists 1050 */ 1051 static __inline void 1052 mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) 1053 { 1054 int i; 1055 for (i = 0; i < mpt->els_cmds_allocated; i++) { 1056 if (req == mpt->els_cmd_ptrs[i]) { 1057 return; 1058 } 1059 } 1060 for (i = 0; i < mpt->tgt_cmds_allocated; i++) { 1061 if (req == mpt->tgt_cmd_ptrs[i]) { 1062 return; 1063 } 1064 } 1065 panic("%s(%d): req %p:%u function %x not in els or tgt ptrs", 1066 s, line, req, req->serno, 1067 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function); 1068 } 1069 1070 /* 1071 * Make sure that req is *not* part of one of the special lists. 1072 */ 1073 static __inline void 1074 mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) 1075 { 1076 int i; 1077 for (i = 0; i < mpt->els_cmds_allocated; i++) { 1078 KASSERT(req != mpt->els_cmd_ptrs[i], 1079 ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d", 1080 s, line, req, req->serno, 1081 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); 1082 } 1083 for (i = 0; i < mpt->tgt_cmds_allocated; i++) { 1084 KASSERT(req != mpt->tgt_cmd_ptrs[i], 1085 ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d", 1086 s, line, req, req->serno, 1087 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); 1088 } 1089 } 1090 #endif 1091 1092 /* 1093 * Task Management Types, purely for internal consumption 1094 */ 1095 typedef enum { 1096 MPT_ABORT_TASK_SET=1234, 1097 MPT_CLEAR_TASK_SET, 1098 MPT_TARGET_RESET, 1099 MPT_CLEAR_ACA, 1100 MPT_TERMINATE_TASK, 1101 MPT_NIL_TMT_VALUE=5678 1102 } mpt_task_mgmt_t; 1103 1104 /**************************** Unclassified Routines ***************************/ 1105 void mpt_send_cmd(struct mpt_softc *mpt, request_t *req); 1106 int mpt_recv_handshake_reply(struct mpt_softc *mpt, 1107 size_t reply_len, void *reply); 1108 int mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1109 mpt_req_state_t state, mpt_req_state_t mask, 1110 int sleep_ok, int time_ms); 1111 void mpt_enable_ints(struct mpt_softc *mpt); 1112 void mpt_disable_ints(struct mpt_softc *mpt); 1113 int mpt_attach(struct mpt_softc *mpt); 1114 int mpt_shutdown(struct mpt_softc *mpt); 1115 int mpt_detach(struct mpt_softc *mpt); 1116 int mpt_send_handshake_cmd(struct mpt_softc *mpt, 1117 size_t len, void *cmd); 1118 request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok); 1119 void mpt_free_request(struct mpt_softc *mpt, request_t *req); 1120 void mpt_intr(void *arg); 1121 void mpt_check_doorbell(struct mpt_softc *mpt); 1122 void mpt_dump_reply_frame(struct mpt_softc *mpt, 1123 MSG_DEFAULT_REPLY *reply_frame); 1124 1125 int mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/, 1126 cfgparms_t *params, 1127 bus_addr_t /*addr*/, bus_size_t/*len*/, 1128 int /*sleep_ok*/, int /*timeout_ms*/); 1129 int mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, 1130 int PageNumber, uint32_t PageAddress, 1131 int ExtPageType, 1132 CONFIG_EXTENDED_PAGE_HEADER *rslt, 1133 int sleep_ok, int timeout_ms); 1134 int mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, 1135 uint32_t PageAddress, 1136 CONFIG_EXTENDED_PAGE_HEADER *hdr, 1137 void *buf, size_t len, int sleep_ok, 1138 int timeout_ms); 1139 int mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/, 1140 int /*PageNumber*/, 1141 uint32_t /*PageAddress*/, 1142 CONFIG_PAGE_HEADER *, 1143 int /*sleep_ok*/, int /*timeout_ms*/); 1144 int mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/, 1145 uint32_t /*PageAddress*/, 1146 CONFIG_PAGE_HEADER *, size_t /*len*/, 1147 int /*sleep_ok*/, int /*timeout_ms*/); 1148 int mpt_write_cfg_page(struct mpt_softc *, int /*Action*/, 1149 uint32_t /*PageAddress*/, 1150 CONFIG_PAGE_HEADER *, size_t /*len*/, 1151 int /*sleep_ok*/, int /*timeout_ms*/); 1152 static __inline int 1153 mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, 1154 CONFIG_PAGE_HEADER *hdr, size_t len, 1155 int sleep_ok, int timeout_ms) 1156 { 1157 return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 1158 PageAddress, hdr, len, sleep_ok, timeout_ms)); 1159 } 1160 1161 static __inline int 1162 mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, 1163 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1164 int timeout_ms) 1165 { 1166 return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 1167 PageAddress, hdr, len, sleep_ok, 1168 timeout_ms)); 1169 } 1170 /* mpt_debug.c functions */ 1171 void mpt_print_reply(void *vmsg); 1172 void mpt_print_db(uint32_t mb); 1173 void mpt_print_config_reply(void *vmsg); 1174 char *mpt_ioc_diag(uint32_t diag); 1175 void mpt_req_state(mpt_req_state_t state); 1176 void mpt_print_config_request(void *vmsg); 1177 void mpt_print_request(void *vmsg); 1178 void mpt_dump_sgl(SGE_IO_UNION *se, int offset); 1179 #endif /* _MPT_H_ */ 1180