1 /* $FreeBSD$ */ 2 /*- 3 * Generic defines for LSI '909 FC adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 2000, 2001 by Greg Ansley 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 /*- 30 * Copyright (c) 2002, 2006 by Matthew Jacob 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions are 35 * met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 39 * substantially similar to the "NO WARRANTY" disclaimer below 40 * ("Disclaimer") and any redistribution must be conditioned upon including 41 * a substantially similar Disclaimer requirement for further binary 42 * redistribution. 43 * 3. Neither the names of the above listed copyright holders nor the names 44 * of any contributors may be used to endorse or promote products derived 45 * from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 51 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 57 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * 59 * Support from Chris Ellsworth in order to make SAS adapters work 60 * is gratefully acknowledged. 61 * 62 * 63 * Support from LSI-Logic has also gone a great deal toward making this a 64 * workable subsystem and is gratefully acknowledged. 65 */ 66 /* 67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 68 * Copyright (c) 2004, 2005 Justin T. Gibbs 69 * Copyright (c) 2005, WHEEL Sp. z o.o. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions are 74 * met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 78 * substantially similar to the "NO WARRANTY" disclaimer below 79 * ("Disclaimer") and any redistribution must be conditioned upon including 80 * a substantially similar Disclaimer requirement for further binary 81 * redistribution. 82 * 3. Neither the names of the above listed copyright holders nor the names 83 * of any contributors may be used to endorse or promote products derived 84 * from this software without specific prior written permission. 85 * 86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 97 */ 98 99 #ifndef _MPT_H_ 100 #define _MPT_H_ 101 102 /********************************* OS Includes ********************************/ 103 #include <sys/types.h> 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/endian.h> 107 #include <sys/eventhandler.h> 108 #if __FreeBSD_version < 500000 109 #include <sys/kernel.h> 110 #include <sys/queue.h> 111 #include <sys/malloc.h> 112 #include <sys/devicestat.h> 113 #else 114 #include <sys/lock.h> 115 #include <sys/kernel.h> 116 #include <sys/queue.h> 117 #include <sys/malloc.h> 118 #include <sys/mutex.h> 119 #include <sys/condvar.h> 120 #endif 121 #include <sys/proc.h> 122 #include <sys/bus.h> 123 #include <sys/module.h> 124 125 #include <machine/cpu.h> 126 #include <machine/resource.h> 127 128 #if __FreeBSD_version < 500000 129 #include <machine/bus.h> 130 #include <machine/clock.h> 131 #endif 132 133 #include <sys/rman.h> 134 135 #if __FreeBSD_version < 500000 136 #include <pci/pcireg.h> 137 #include <pci/pcivar.h> 138 #else 139 #include <dev/pci/pcireg.h> 140 #include <dev/pci/pcivar.h> 141 #endif 142 143 #include <machine/bus.h> 144 #include "opt_ddb.h" 145 146 /**************************** Register Definitions ****************************/ 147 #include <dev/mpt/mpt_reg.h> 148 149 /******************************* MPI Definitions ******************************/ 150 #include <dev/mpt/mpilib/mpi_type.h> 151 #include <dev/mpt/mpilib/mpi.h> 152 #include <dev/mpt/mpilib/mpi_cnfg.h> 153 #include <dev/mpt/mpilib/mpi_ioc.h> 154 #include <dev/mpt/mpilib/mpi_raid.h> 155 156 /* XXX For mpt_debug.c */ 157 #include <dev/mpt/mpilib/mpi_init.h> 158 159 #define MPT_S64_2_SCALAR(y) ((((int64_t)y.High) << 32) | (y.Low)) 160 #define MPT_U64_2_SCALAR(y) ((((uint64_t)y.High) << 32) | (y.Low)) 161 162 /****************************** Misc Definitions ******************************/ 163 /* #define MPT_TEST_MULTIPATH 1 */ 164 #define MPT_OK (0) 165 #define MPT_FAIL (0x10000) 166 167 #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) 168 169 #define MPT_ROLE_NONE 0 170 #define MPT_ROLE_INITIATOR 1 171 #define MPT_ROLE_TARGET 2 172 #define MPT_ROLE_BOTH 3 173 #define MPT_ROLE_DEFAULT MPT_ROLE_INITIATOR 174 175 /**************************** Forward Declarations ****************************/ 176 struct mpt_softc; 177 struct mpt_personality; 178 typedef struct req_entry request_t; 179 180 /************************* Personality Module Support *************************/ 181 typedef int mpt_load_handler_t(struct mpt_personality *); 182 typedef int mpt_probe_handler_t(struct mpt_softc *); 183 typedef int mpt_attach_handler_t(struct mpt_softc *); 184 typedef int mpt_enable_handler_t(struct mpt_softc *); 185 typedef void mpt_ready_handler_t(struct mpt_softc *); 186 typedef int mpt_event_handler_t(struct mpt_softc *, request_t *, 187 MSG_EVENT_NOTIFY_REPLY *); 188 typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/); 189 /* XXX Add return value and use for veto? */ 190 typedef void mpt_shutdown_handler_t(struct mpt_softc *); 191 typedef void mpt_detach_handler_t(struct mpt_softc *); 192 typedef int mpt_unload_handler_t(struct mpt_personality *); 193 194 struct mpt_personality 195 { 196 const char *name; 197 uint32_t id; /* Assigned identifier. */ 198 u_int use_count; /* Instances using personality*/ 199 mpt_load_handler_t *load; /* configure personailty */ 200 #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load) 201 mpt_probe_handler_t *probe; /* configure personailty */ 202 mpt_attach_handler_t *attach; /* initialize device instance */ 203 mpt_enable_handler_t *enable; /* enable device */ 204 mpt_ready_handler_t *ready; /* final open for business */ 205 mpt_event_handler_t *event; /* Handle MPI event. */ 206 mpt_reset_handler_t *reset; /* Re-init after reset. */ 207 mpt_shutdown_handler_t *shutdown; /* Shutdown instance. */ 208 mpt_detach_handler_t *detach; /* release device instance */ 209 mpt_unload_handler_t *unload; /* Shutdown personality */ 210 #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload) 211 }; 212 213 int mpt_modevent(module_t, int, void *); 214 215 /* Maximum supported number of personalities. */ 216 #define MPT_MAX_PERSONALITIES (15) 217 218 #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \ 219 MODULE_DEPEND(name, dep, vmin, vpref, vmax) 220 221 #define DECLARE_MPT_PERSONALITY(name, order) \ 222 static moduledata_t name##_mod = { \ 223 #name, mpt_modevent, &name##_personality \ 224 }; \ 225 DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order); \ 226 MODULE_VERSION(name, 1); \ 227 MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1) 228 229 /******************************* Bus DMA Support ******************************/ 230 /* XXX Need to update bus_dmamap_sync to take a range argument. */ 231 #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op) \ 232 bus_dmamap_sync(dma_tag, dmamap, op) 233 234 #if __FreeBSD_version < 600000 235 #define bus_get_dma_tag(x) NULL 236 #endif 237 #if __FreeBSD_version >= 501102 238 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ 239 lowaddr, highaddr, filter, filterarg, \ 240 maxsize, nsegments, maxsegsz, flags, \ 241 dma_tagp) \ 242 bus_dma_tag_create(parent_tag, alignment, boundary, \ 243 lowaddr, highaddr, filter, filterarg, \ 244 maxsize, nsegments, maxsegsz, flags, \ 245 busdma_lock_mutex, &(mpt)->mpt_lock, \ 246 dma_tagp) 247 #else 248 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ 249 lowaddr, highaddr, filter, filterarg, \ 250 maxsize, nsegments, maxsegsz, flags, \ 251 dma_tagp) \ 252 bus_dma_tag_create(parent_tag, alignment, boundary, \ 253 lowaddr, highaddr, filter, filterarg, \ 254 maxsize, nsegments, maxsegsz, flags, \ 255 dma_tagp) 256 #endif 257 258 struct mpt_map_info { 259 struct mpt_softc *mpt; 260 int error; 261 uint32_t phys; 262 }; 263 264 void mpt_map_rquest(void *, bus_dma_segment_t *, int, int); 265 /* **************************** NewBUS interrupt Crock ************************/ 266 #if __FreeBSD_version < 700031 267 #define mpt_setup_intr(d, i, f, U, if, ifa, hp) \ 268 bus_setup_intr(d, i, f, if, ifa, hp) 269 #else 270 #define mpt_setup_intr bus_setup_intr 271 #endif 272 273 /* **************************** NewBUS CAM Support ****************************/ 274 #if __FreeBSD_version < 700049 275 #define mpt_xpt_bus_register(sim, parent, bus) \ 276 xpt_bus_register(sim, bus) 277 #else 278 #define mpt_xpt_bus_register xpt_bus_register 279 #endif 280 281 /**************************** Kernel Thread Support ***************************/ 282 #if __FreeBSD_version > 800001 283 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ 284 kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) 285 #define mpt_kthread_exit(status) \ 286 kproc_exit(status) 287 #elif __FreeBSD_version > 500005 288 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ 289 kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) 290 #define mpt_kthread_exit(status) \ 291 kthread_exit(status) 292 #else 293 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ 294 kthread_create(func, farg, proc_ptr, fmtstr, arg) 295 #define mpt_kthread_exit(status) \ 296 kthread_exit(status) 297 #endif 298 299 /********************************** Endianess *********************************/ 300 #define MPT_2_HOST64(ptr, tag) ptr->tag = le64toh(ptr->tag) 301 #define MPT_2_HOST32(ptr, tag) ptr->tag = le32toh(ptr->tag) 302 #define MPT_2_HOST16(ptr, tag) ptr->tag = le16toh(ptr->tag) 303 304 #define HOST_2_MPT64(ptr, tag) ptr->tag = htole64(ptr->tag) 305 #define HOST_2_MPT32(ptr, tag) ptr->tag = htole32(ptr->tag) 306 #define HOST_2_MPT16(ptr, tag) ptr->tag = htole16(ptr->tag) 307 308 #if _BYTE_ORDER == _BIG_ENDIAN 309 void mpt2host_sge_simple_union(SGE_SIMPLE_UNION *); 310 void mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *); 311 void mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *); 312 void mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *); 313 void mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *); 314 void mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *); 315 void mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *); 316 void host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *); 317 void mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *); 318 void mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *); 319 void mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *); 320 void host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *); 321 void mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *); 322 void mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *); 323 void host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *); 324 void mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *); 325 void mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *); 326 void mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *); 327 #else 328 #define mpt2host_sge_simple_union(x) do { ; } while (0) 329 #define mpt2host_iocfacts_reply(x) do { ; } while (0) 330 #define mpt2host_portfacts_reply(x) do { ; } while (0) 331 #define mpt2host_config_page_ioc2(x) do { ; } while (0) 332 #define mpt2host_config_page_ioc3(x) do { ; } while (0) 333 #define mpt2host_config_page_scsi_port_0(x) do { ; } while (0) 334 #define mpt2host_config_page_scsi_port_1(x) do { ; } while (0) 335 #define host2mpt_config_page_scsi_port_1(x) do { ; } while (0) 336 #define mpt2host_config_page_scsi_port_2(x) do { ; } while (0) 337 #define mpt2host_config_page_scsi_device_0(x) do { ; } while (0) 338 #define mpt2host_config_page_scsi_device_1(x) do { ; } while (0) 339 #define host2mpt_config_page_scsi_device_1(x) do { ; } while (0) 340 #define mpt2host_config_page_fc_port_0(x) do { ; } while (0) 341 #define mpt2host_config_page_fc_port_1(x) do { ; } while (0) 342 #define host2mpt_config_page_fc_port_1(x) do { ; } while (0) 343 #define mpt2host_config_page_raid_vol_0(x) do { ; } while (0) 344 #define mpt2host_config_page_raid_phys_disk_0(x) \ 345 do { ; } while (0) 346 #define mpt2host_mpi_raid_vol_indicator(x) do { ; } while (0) 347 #endif 348 349 /**************************** MPI Transaction State ***************************/ 350 typedef enum { 351 REQ_STATE_NIL = 0x00, 352 REQ_STATE_FREE = 0x01, 353 REQ_STATE_ALLOCATED = 0x02, 354 REQ_STATE_QUEUED = 0x04, 355 REQ_STATE_DONE = 0x08, 356 REQ_STATE_TIMEDOUT = 0x10, 357 REQ_STATE_NEED_WAKEUP = 0x20, 358 REQ_STATE_LOCKED = 0x80, /* can't be freed */ 359 REQ_STATE_MASK = 0xFF 360 } mpt_req_state_t; 361 362 struct req_entry { 363 TAILQ_ENTRY(req_entry) links; /* Pointer to next in list */ 364 mpt_req_state_t state; /* Request State Information */ 365 uint16_t index; /* Index of this entry */ 366 uint16_t IOCStatus; /* Completion status */ 367 uint16_t ResponseCode; /* TMF Reponse Code */ 368 uint16_t serno; /* serial number */ 369 union ccb *ccb; /* CAM request */ 370 void *req_vbuf; /* Virtual Address of Entry */ 371 void *sense_vbuf; /* Virtual Address of sense data */ 372 bus_addr_t req_pbuf; /* Physical Address of Entry */ 373 bus_addr_t sense_pbuf; /* Physical Address of sense data */ 374 bus_dmamap_t dmap; /* DMA map for data buffers */ 375 struct req_entry *chain; /* for SGE overallocations */ 376 struct callout callout; /* Timeout for the request */ 377 }; 378 379 typedef struct mpt_config_params { 380 u_int Action; 381 u_int PageVersion; 382 u_int PageLength; 383 u_int PageNumber; 384 u_int PageType; 385 u_int PageAddress; 386 u_int ExtPageLength; 387 u_int ExtPageType; 388 } cfgparms_t; 389 390 /**************************** MPI Target State Info ***************************/ 391 392 typedef struct { 393 uint32_t reply_desc; /* current reply descriptor */ 394 uint32_t resid; /* current data residual */ 395 uint32_t bytes_xfered; /* current relative offset */ 396 union ccb *ccb; /* pointer to currently active ccb */ 397 request_t *req; /* pointer to currently active assist request */ 398 uint32_t 399 is_local : 1, 400 nxfers : 31; 401 uint32_t tag_id; 402 enum { 403 TGT_STATE_NIL, 404 TGT_STATE_LOADING, 405 TGT_STATE_LOADED, 406 TGT_STATE_IN_CAM, 407 TGT_STATE_SETTING_UP_FOR_DATA, 408 TGT_STATE_MOVING_DATA, 409 TGT_STATE_MOVING_DATA_AND_STATUS, 410 TGT_STATE_SENDING_STATUS 411 } state; 412 } mpt_tgt_state_t; 413 414 /* 415 * When we get an incoming command it has its own tag which is called the 416 * IoIndex. This is the value we gave that particular command buffer when 417 * we originally assigned it. It's just a number, really. The FC card uses 418 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which 419 * contains pointers the request_t structures related to that IoIndex. 420 * 421 * What *we* do is construct a tag out of the index for the target command 422 * which owns the incoming ATIO plus a rolling sequence number. 423 */ 424 #define MPT_MAKE_TAGID(mpt, req, ioindex) \ 425 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff)) 426 427 #ifdef INVARIANTS 428 #define MPT_TAG_2_REQ(a, b) mpt_tag_2_req(a, (uint32_t) b) 429 #else 430 #define MPT_TAG_2_REQ(mpt, tag) mpt->tgt_cmd_ptrs[tag >> 18] 431 #endif 432 433 #define MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \ 434 (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)])) 435 436 STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr); 437 #define MPT_MAX_LUNS 256 438 typedef struct { 439 struct mpt_hdr_stailq atios; 440 struct mpt_hdr_stailq inots; 441 int enabled; 442 } tgt_resource_t; 443 #define MPT_MAX_ELS 64 444 445 /**************************** Handler Registration ****************************/ 446 /* 447 * Global table of registered reply handlers. The 448 * handler is indicated by byte 3 of the request 449 * index submitted to the IOC. This allows the 450 * driver core to perform generic processing without 451 * any knowledge of per-personality behavior. 452 * 453 * MPT_NUM_REPLY_HANDLERS must be a power of 2 454 * to allow the easy generation of a mask. 455 * 456 * The handler offsets used by the core are hard coded 457 * allowing faster code generation when assigning a handler 458 * to a request. All "personalities" must use the 459 * the handler registration mechanism. 460 * 461 * The IOC handlers that are rarely executed are placed 462 * at the tail of the table to make it more likely that 463 * all commonly executed handlers fit in a single cache 464 * line. 465 */ 466 #define MPT_NUM_REPLY_HANDLERS (32) 467 #define MPT_REPLY_HANDLER_EVENTS MPT_CBI_TO_HID(0) 468 #define MPT_REPLY_HANDLER_CONFIG MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1) 469 #define MPT_REPLY_HANDLER_HANDSHAKE MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2) 470 typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request, 471 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame); 472 typedef union { 473 mpt_reply_handler_t *reply_handler; 474 } mpt_handler_t; 475 476 typedef enum { 477 MPT_HANDLER_REPLY, 478 MPT_HANDLER_EVENT, 479 MPT_HANDLER_RESET, 480 MPT_HANDLER_SHUTDOWN 481 } mpt_handler_type; 482 483 struct mpt_handler_record 484 { 485 LIST_ENTRY(mpt_handler_record) links; 486 mpt_handler_t handler; 487 }; 488 489 LIST_HEAD(mpt_handler_list, mpt_handler_record); 490 491 /* 492 * The handler_id is currently unused but would contain the 493 * handler ID used in the MsgContext field to allow direction 494 * of replies to the handler. Registrations that don't require 495 * a handler id can pass in NULL for the handler_id. 496 * 497 * Deregistrations for handlers without a handler id should 498 * pass in MPT_HANDLER_ID_NONE. 499 */ 500 #define MPT_HANDLER_ID_NONE (0xFFFFFFFF) 501 int mpt_register_handler(struct mpt_softc *, mpt_handler_type, 502 mpt_handler_t, uint32_t *); 503 int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type, 504 mpt_handler_t, uint32_t); 505 506 /******************* Per-Controller Instance Data Structures ******************/ 507 TAILQ_HEAD(req_queue, req_entry); 508 509 /* Structure for saving proper values for modifyable PCI config registers */ 510 struct mpt_pci_cfg { 511 uint16_t Command; 512 uint16_t LatencyTimer_LineSize; 513 uint32_t IO_BAR; 514 uint32_t Mem0_BAR[2]; 515 uint32_t Mem1_BAR[2]; 516 uint32_t ROM_BAR; 517 uint8_t IntLine; 518 uint32_t PMCSR; 519 }; 520 521 typedef enum { 522 MPT_RVF_NONE = 0x0, 523 MPT_RVF_ACTIVE = 0x1, 524 MPT_RVF_ANNOUNCED = 0x2, 525 MPT_RVF_UP2DATE = 0x4, 526 MPT_RVF_REFERENCED = 0x8, 527 MPT_RVF_WCE_CHANGED = 0x10 528 } mpt_raid_volume_flags; 529 530 struct mpt_raid_volume { 531 CONFIG_PAGE_RAID_VOL_0 *config_page; 532 MPI_RAID_VOL_INDICATOR sync_progress; 533 mpt_raid_volume_flags flags; 534 u_int quiesced_disks; 535 }; 536 537 typedef enum { 538 MPT_RDF_NONE = 0x00, 539 MPT_RDF_ACTIVE = 0x01, 540 MPT_RDF_ANNOUNCED = 0x02, 541 MPT_RDF_UP2DATE = 0x04, 542 MPT_RDF_REFERENCED = 0x08, 543 MPT_RDF_QUIESCING = 0x10, 544 MPT_RDF_QUIESCED = 0x20 545 } mpt_raid_disk_flags; 546 547 struct mpt_raid_disk { 548 CONFIG_PAGE_RAID_PHYS_DISK_0 config_page; 549 struct mpt_raid_volume *volume; 550 u_int member_number; 551 u_int pass_thru_active; 552 mpt_raid_disk_flags flags; 553 }; 554 555 struct mpt_evtf_record { 556 MSG_EVENT_NOTIFY_REPLY reply; 557 uint32_t context; 558 LIST_ENTRY(mpt_evtf_record) links; 559 }; 560 561 LIST_HEAD(mpt_evtf_list, mpt_evtf_record); 562 563 struct mptsas_devinfo { 564 uint16_t dev_handle; 565 uint16_t parent_dev_handle; 566 uint16_t enclosure_handle; 567 uint16_t slot; 568 uint8_t phy_num; 569 uint8_t physical_port; 570 uint8_t target_id; 571 uint8_t bus; 572 uint64_t sas_address; 573 uint32_t device_info; 574 }; 575 576 struct mptsas_phyinfo { 577 uint16_t handle; 578 uint8_t phy_num; 579 uint8_t port_id; 580 uint8_t negotiated_link_rate; 581 uint8_t hw_link_rate; 582 uint8_t programmed_link_rate; 583 uint8_t sas_port_add_phy; 584 struct mptsas_devinfo identify; 585 struct mptsas_devinfo attached; 586 }; 587 588 struct mptsas_portinfo { 589 uint16_t num_phys; 590 struct mptsas_phyinfo *phy_info; 591 }; 592 593 struct mpt_softc { 594 device_t dev; 595 #if __FreeBSD_version < 500000 596 uint32_t mpt_islocked; 597 int mpt_splsaved; 598 #else 599 struct mtx mpt_lock; 600 int mpt_locksetup; 601 #endif 602 uint32_t mpt_pers_mask; 603 uint32_t 604 : 8, 605 unit : 8, 606 ready : 1, 607 fw_uploaded : 1, 608 msi_enable : 1, 609 twildcard : 1, 610 tenabled : 1, 611 do_cfg_role : 1, 612 raid_enabled : 1, 613 raid_mwce_set : 1, 614 getreqwaiter : 1, 615 shutdwn_raid : 1, 616 shutdwn_recovery: 1, 617 outofbeer : 1, 618 disabled : 1, 619 is_spi : 1, 620 is_sas : 1, 621 is_fc : 1; 622 623 u_int cfg_role; 624 u_int role; /* role: none, ini, target, both */ 625 626 u_int verbose; 627 #ifdef MPT_TEST_MULTIPATH 628 int failure_id; 629 #endif 630 631 /* 632 * IOC Facts 633 */ 634 MSG_IOC_FACTS_REPLY ioc_facts; 635 636 /* 637 * Port Facts 638 */ 639 MSG_PORT_FACTS_REPLY * port_facts; 640 #define mpt_ini_id port_facts[0].PortSCSIID 641 #define mpt_max_tgtcmds port_facts[0].MaxPostedCmdBuffers 642 643 /* 644 * Device Configuration Information 645 */ 646 union { 647 struct mpt_spi_cfg { 648 CONFIG_PAGE_SCSI_PORT_0 _port_page0; 649 CONFIG_PAGE_SCSI_PORT_1 _port_page1; 650 CONFIG_PAGE_SCSI_PORT_2 _port_page2; 651 CONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16]; 652 CONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16]; 653 uint16_t _tag_enable; 654 uint16_t _disc_enable; 655 } spi; 656 #define mpt_port_page0 cfg.spi._port_page0 657 #define mpt_port_page1 cfg.spi._port_page1 658 #define mpt_port_page2 cfg.spi._port_page2 659 #define mpt_dev_page0 cfg.spi._dev_page0 660 #define mpt_dev_page1 cfg.spi._dev_page1 661 #define mpt_tag_enable cfg.spi._tag_enable 662 #define mpt_disc_enable cfg.spi._disc_enable 663 struct mpi_fc_cfg { 664 CONFIG_PAGE_FC_PORT_0 _port_page0; 665 uint32_t _port_speed; 666 #define mpt_fcport_page0 cfg.fc._port_page0 667 #define mpt_fcport_speed cfg.fc._port_speed 668 } fc; 669 } cfg; 670 #if __FreeBSD_version >= 500000 671 /* 672 * Device config information stored up for sysctl to access 673 */ 674 union { 675 struct { 676 unsigned int initiator_id; 677 } spi; 678 struct { 679 char wwnn[19]; 680 char wwpn[19]; 681 } fc; 682 } scinfo; 683 #endif 684 685 /* Controller Info for RAID information */ 686 CONFIG_PAGE_IOC_2 * ioc_page2; 687 CONFIG_PAGE_IOC_3 * ioc_page3; 688 689 /* Raid Data */ 690 struct mpt_raid_volume* raid_volumes; 691 struct mpt_raid_disk* raid_disks; 692 u_int raid_max_volumes; 693 u_int raid_max_disks; 694 u_int raid_page0_len; 695 u_int raid_wakeup; 696 u_int raid_rescan; 697 u_int raid_resync_rate; 698 u_int raid_mwce_setting; 699 u_int raid_queue_depth; 700 u_int raid_nonopt_volumes; 701 struct proc *raid_thread; 702 struct callout raid_timer; 703 704 /* 705 * PCI Hardware info 706 */ 707 int pci_msi_count; 708 struct resource * pci_irq; /* Interrupt map for chip */ 709 void * ih; /* Interupt handle */ 710 struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */ 711 712 /* 713 * DMA Mapping Stuff 714 */ 715 struct resource * pci_reg; /* Register map for chip */ 716 int pci_mem_rid; /* Resource ID */ 717 bus_space_tag_t pci_st; /* Bus tag for registers */ 718 bus_space_handle_t pci_sh; /* Bus handle for registers */ 719 /* PIO versions of above. */ 720 int pci_pio_rid; 721 struct resource * pci_pio_reg; 722 bus_space_tag_t pci_pio_st; 723 bus_space_handle_t pci_pio_sh; 724 725 bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */ 726 bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */ 727 bus_dmamap_t reply_dmap; /* DMA map for reply memory */ 728 uint8_t *reply; /* KVA of reply memory */ 729 bus_addr_t reply_phys; /* BusAddr of reply memory */ 730 731 bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */ 732 bus_dma_tag_t request_dmat; /* DMA tag for request memroy */ 733 bus_dmamap_t request_dmap; /* DMA map for request memroy */ 734 uint8_t *request; /* KVA of Request memory */ 735 bus_addr_t request_phys; /* BusAddr of request memory */ 736 737 uint32_t max_seg_cnt; /* calculated after IOC facts */ 738 739 /* 740 * Hardware management 741 */ 742 u_int reset_cnt; 743 744 /* 745 * CAM && Software Management 746 */ 747 request_t *request_pool; 748 struct req_queue request_free_list; 749 struct req_queue request_pending_list; 750 struct req_queue request_timeout_list; 751 752 753 struct cam_sim *sim; 754 struct cam_path *path; 755 756 struct cam_sim *phydisk_sim; 757 struct cam_path *phydisk_path; 758 759 struct proc *recovery_thread; 760 request_t *tmf_req; 761 762 /* 763 * Deferred frame acks due to resource shortage. 764 */ 765 struct mpt_evtf_list ack_frames; 766 767 /* 768 * Target Mode Support 769 */ 770 uint32_t scsi_tgt_handler_id; 771 request_t ** tgt_cmd_ptrs; 772 request_t ** els_cmd_ptrs; /* FC only */ 773 774 /* 775 * *snork*- this is chosen to be here *just in case* somebody 776 * forgets to point to it exactly and we index off of trt with 777 * CAM_LUN_WILDCARD. 778 */ 779 tgt_resource_t trt_wildcard; /* wildcard luns */ 780 tgt_resource_t trt[MPT_MAX_LUNS]; 781 uint16_t tgt_cmds_allocated; 782 uint16_t els_cmds_allocated; /* FC only */ 783 784 uint16_t timeouts; /* timeout count */ 785 uint16_t success; /* successes afer timeout */ 786 uint16_t sequence; /* Sequence Number */ 787 uint16_t pad3; 788 789 790 /* Paired port in some dual adapters configurations */ 791 struct mpt_softc * mpt2; 792 793 /* FW Image management */ 794 uint32_t fw_image_size; 795 uint8_t *fw_image; 796 bus_dma_tag_t fw_dmat; /* DMA tag for firmware image */ 797 bus_dmamap_t fw_dmap; /* DMA map for firmware image */ 798 bus_addr_t fw_phys; /* BusAddr of firmware image */ 799 800 /* SAS Topology */ 801 struct mptsas_portinfo *sas_portinfo; 802 803 /* Shutdown Event Handler. */ 804 eventhandler_tag eh; 805 806 /* Userland management interface. */ 807 struct cdev *cdev; 808 809 TAILQ_ENTRY(mpt_softc) links; 810 }; 811 812 static __inline void mpt_assign_serno(struct mpt_softc *, request_t *); 813 814 static __inline void 815 mpt_assign_serno(struct mpt_softc *mpt, request_t *req) 816 { 817 if ((req->serno = mpt->sequence++) == 0) { 818 req->serno = mpt->sequence++; 819 } 820 } 821 822 /***************************** Locking Primitives *****************************/ 823 #if __FreeBSD_version < 500000 824 #define MPT_IFLAGS INTR_TYPE_CAM 825 #define MPT_LOCK(mpt) mpt_lockspl(mpt) 826 #define MPT_UNLOCK(mpt) mpt_unlockspl(mpt) 827 #define MPT_OWNED(mpt) mpt->mpt_islocked 828 #define MPT_LOCK_ASSERT(mpt) 829 #define MPTLOCK_2_CAMLOCK MPT_UNLOCK 830 #define CAMLOCK_2_MPTLOCK MPT_LOCK 831 #define MPT_LOCK_SETUP(mpt) 832 #define MPT_LOCK_DESTROY(mpt) 833 834 static __inline void mpt_lockspl(struct mpt_softc *mpt); 835 static __inline void mpt_unlockspl(struct mpt_softc *mpt); 836 837 static __inline void 838 mpt_lockspl(struct mpt_softc *mpt) 839 { 840 int s; 841 842 s = splcam(); 843 if (mpt->mpt_islocked++ == 0) { 844 mpt->mpt_splsaved = s; 845 } else { 846 splx(s); 847 panic("Recursed lock with mask: 0x%x\n", s); 848 } 849 } 850 851 static __inline void 852 mpt_unlockspl(struct mpt_softc *mpt) 853 { 854 if (mpt->mpt_islocked) { 855 if (--mpt->mpt_islocked == 0) { 856 splx(mpt->mpt_splsaved); 857 } 858 } else 859 panic("Negative lock count\n"); 860 } 861 862 static __inline int 863 mpt_sleep(struct mpt_softc *mpt, void *ident, int priority, 864 const char *wmesg, int timo) 865 { 866 int saved_cnt; 867 int saved_spl; 868 int error; 869 870 KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep")); 871 saved_cnt = mpt->mpt_islocked; 872 saved_spl = mpt->mpt_splsaved; 873 mpt->mpt_islocked = 0; 874 error = tsleep(ident, priority, wmesg, timo); 875 KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup")); 876 mpt->mpt_islocked = saved_cnt; 877 mpt->mpt_splsaved = saved_spl; 878 return (error); 879 } 880 881 #define mpt_req_timeout(req, ticks, func, arg) \ 882 callout_reset(&(req)->callout, (ticks), (func), (arg)); 883 #define mpt_req_untimeout(req, func, arg) \ 884 callout_stop(&(req)->callout) 885 #define mpt_callout_init(mpt, c) \ 886 callout_init(c) 887 #define mpt_callout_drain(mpt, c) \ 888 callout_stop(c) 889 890 #else 891 #if 1 892 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE 893 #define MPT_LOCK_SETUP(mpt) \ 894 mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \ 895 mpt->mpt_locksetup = 1 896 #define MPT_LOCK_DESTROY(mpt) \ 897 if (mpt->mpt_locksetup) { \ 898 mtx_destroy(&mpt->mpt_lock); \ 899 mpt->mpt_locksetup = 0; \ 900 } 901 902 #define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock) 903 #define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock) 904 #define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock) 905 #define MPT_LOCK_ASSERT(mpt) mtx_assert(&(mpt)->mpt_lock, MA_OWNED) 906 #define MPTLOCK_2_CAMLOCK(mpt) 907 #define CAMLOCK_2_MPTLOCK(mpt) 908 #define mpt_sleep(mpt, ident, priority, wmesg, timo) \ 909 msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo) 910 #define mpt_req_timeout(req, ticks, func, arg) \ 911 callout_reset(&(req)->callout, (ticks), (func), (arg)) 912 #define mpt_req_untimeout(req, func, arg) \ 913 callout_stop(&(req)->callout) 914 #define mpt_callout_init(mpt, c) \ 915 callout_init_mtx(c, &(mpt)->mpt_lock, 0) 916 #define mpt_callout_drain(mpt, c) \ 917 callout_drain(c) 918 919 #else 920 921 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY 922 #define MPT_LOCK_SETUP(mpt) do { } while (0) 923 #define MPT_LOCK_DESTROY(mpt) do { } while (0) 924 #define MPT_LOCK_ASSERT(mpt) mtx_assert(&Giant, MA_OWNED) 925 #define MPT_LOCK(mpt) mtx_lock(&Giant) 926 #define MPT_UNLOCK(mpt) mtx_unlock(&Giant) 927 #define MPTLOCK_2_CAMLOCK(mpt) 928 #define CAMLOCK_2_MPTLOCK(mpt) 929 930 #define mpt_req_timeout(req, ticks, func, arg) \ 931 callout_reset(&(req)->callout, (ticks), (func), (arg)) 932 #define mpt_req_untimeout(req, func, arg) \ 933 callout_stop(&(req)->callout) 934 #define mpt_callout_init(mpt, c) \ 935 callout_init(c, 0) 936 #define mpt_callout_drain(mpt, c) \ 937 callout_drain(c) 938 939 static __inline int 940 mpt_sleep(struct mpt_softc *, void *, int, const char *, int); 941 942 static __inline int 943 mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t) 944 { 945 int r; 946 r = tsleep(i, p, w, t); 947 return (r); 948 } 949 #endif 950 #endif 951 952 /******************************* Register Access ******************************/ 953 static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t); 954 static __inline uint32_t mpt_read(struct mpt_softc *, int); 955 static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t); 956 static __inline uint32_t mpt_pio_read(struct mpt_softc *, int); 957 958 static __inline void 959 mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val) 960 { 961 bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val); 962 } 963 964 static __inline uint32_t 965 mpt_read(struct mpt_softc *mpt, int offset) 966 { 967 return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset)); 968 } 969 970 /* 971 * Some operations (e.g. diagnostic register writes while the ARM proccessor 972 * is disabled), must be performed using "PCI pio" operations. On non-PCI 973 * busses, these operations likely map to normal register accesses. 974 */ 975 static __inline void 976 mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val) 977 { 978 bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val); 979 } 980 981 static __inline uint32_t 982 mpt_pio_read(struct mpt_softc *mpt, int offset) 983 { 984 return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset)); 985 } 986 /*********************** Reply Frame/Request Management ***********************/ 987 /* Max MPT Reply we are willing to accept (must be power of 2) */ 988 #define MPT_REPLY_SIZE 256 989 990 /* Max i/o size, based on legacy MAXPHYS. Can be increased. */ 991 #define MPT_MAXPHYS (128 * 1024) 992 993 /* 994 * Must be less than 16384 in order for target mode to work 995 */ 996 #define MPT_MAX_REQUESTS(mpt) 512 997 #define MPT_REQUEST_AREA 512 998 #define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */ 999 #define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA) 1000 1001 #define MPT_CONTEXT_CB_SHIFT (16) 1002 #define MPT_CBI(handle) (handle >> MPT_CONTEXT_CB_SHIFT) 1003 #define MPT_CBI_TO_HID(cbi) ((cbi) << MPT_CONTEXT_CB_SHIFT) 1004 #define MPT_CONTEXT_TO_CBI(x) \ 1005 (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1)) 1006 #define MPT_CONTEXT_REQI_MASK 0xFFFF 1007 #define MPT_CONTEXT_TO_REQI(x) ((x) & MPT_CONTEXT_REQI_MASK) 1008 1009 /* 1010 * Convert a 32bit physical address returned from IOC to an 1011 * offset into our reply frame memory or the kvm address needed 1012 * to access the data. The returned address is only the low 1013 * 32 bits, so mask our base physical address accordingly. 1014 */ 1015 #define MPT_REPLY_BADDR(x) \ 1016 (x << 1) 1017 #define MPT_REPLY_OTOV(m, i) \ 1018 ((void *)(&m->reply[i])) 1019 1020 #define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \ 1021 do { \ 1022 if (mpt->verbose > MPT_PRT_DEBUG) \ 1023 mpt_dump_reply_frame(mpt, reply_frame); \ 1024 } while(0) 1025 1026 static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt); 1027 static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr); 1028 1029 /* 1030 * Give the reply buffer back to the IOC after we have 1031 * finished processing it. 1032 */ 1033 static __inline void 1034 mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr) 1035 { 1036 mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr); 1037 } 1038 1039 /* Get a reply from the IOC */ 1040 static __inline uint32_t 1041 mpt_pop_reply_queue(struct mpt_softc *mpt) 1042 { 1043 return mpt_read(mpt, MPT_OFFSET_REPLY_Q); 1044 } 1045 1046 void 1047 mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int); 1048 1049 /************************** Scatter Gather Managment **************************/ 1050 /* MPT_RQSL- size of request frame, in bytes */ 1051 #define MPT_RQSL(mpt) (mpt->ioc_facts.RequestFrameSize << 2) 1052 1053 /* MPT_NSGL- how many SG entries can fit in a request frame size */ 1054 #define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION)) 1055 1056 /* MPT_NRFM- how many request frames can fit in each request alloc we make */ 1057 #define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt)) 1058 1059 /* 1060 * MPT_NSGL_FIRST- # of SG elements that can fit after 1061 * an I/O request but still within the request frame. 1062 * Do this safely based upon SGE_IO_UNION. 1063 * 1064 * Note that the first element is *within* the SCSI request. 1065 */ 1066 #define MPT_NSGL_FIRST(mpt) \ 1067 ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \ 1068 sizeof (SGE_IO_UNION)) 1069 1070 /***************************** IOC Initialization *****************************/ 1071 int mpt_reset(struct mpt_softc *, int /*reinit*/); 1072 1073 /****************************** Debugging ************************************/ 1074 typedef struct mpt_decode_entry { 1075 char *name; 1076 u_int value; 1077 u_int mask; 1078 } mpt_decode_entry_t; 1079 1080 int mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries, 1081 const char *name, u_int value, u_int *cur_column, 1082 u_int wrap_point); 1083 1084 void mpt_dump_data(struct mpt_softc *, const char *, void *, int); 1085 void mpt_dump_request(struct mpt_softc *, request_t *); 1086 1087 enum { 1088 MPT_PRT_ALWAYS, 1089 MPT_PRT_FATAL, 1090 MPT_PRT_ERROR, 1091 MPT_PRT_WARN, 1092 MPT_PRT_INFO, 1093 MPT_PRT_NEGOTIATION, 1094 MPT_PRT_DEBUG, 1095 MPT_PRT_DEBUG1, 1096 MPT_PRT_DEBUG2, 1097 MPT_PRT_DEBUG3, 1098 MPT_PRT_TRACE, 1099 MPT_PRT_NONE=100 1100 }; 1101 1102 #if __FreeBSD_version > 500000 1103 #define mpt_lprt(mpt, level, ...) \ 1104 do { \ 1105 if (level <= (mpt)->verbose) \ 1106 mpt_prt(mpt, __VA_ARGS__); \ 1107 } while (0) 1108 1109 #define mpt_lprtc(mpt, level, ...) \ 1110 do { \ 1111 if (level <= (mpt)->debug_level) \ 1112 mpt_prtc(mpt, __VA_ARGS__); \ 1113 } while (0) 1114 #else 1115 void mpt_lprt(struct mpt_softc *, int, const char *, ...) 1116 __printflike(3, 4); 1117 void mpt_lprtc(struct mpt_softc *, int, const char *, ...) 1118 __printflike(3, 4); 1119 #endif 1120 void mpt_prt(struct mpt_softc *, const char *, ...) 1121 __printflike(2, 3); 1122 void mpt_prtc(struct mpt_softc *, const char *, ...) 1123 __printflike(2, 3); 1124 1125 /**************************** Target Mode Related ***************************/ 1126 static __inline int mpt_cdblen(uint8_t, int); 1127 static __inline int 1128 mpt_cdblen(uint8_t cdb0, int maxlen) 1129 { 1130 int group = cdb0 >> 5; 1131 switch (group) { 1132 case 0: 1133 return (6); 1134 case 1: 1135 return (10); 1136 case 4: 1137 case 5: 1138 return (12); 1139 default: 1140 return (16); 1141 } 1142 } 1143 #ifdef INVARIANTS 1144 static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t); 1145 static __inline request_t * 1146 mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag) 1147 { 1148 uint16_t rtg = (tag >> 18); 1149 KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d\n", tag)); 1150 KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array")); 1151 KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer")); 1152 return (mpt->tgt_cmd_ptrs[rtg]); 1153 } 1154 1155 1156 static __inline int 1157 mpt_req_on_free_list(struct mpt_softc *, request_t *); 1158 static __inline int 1159 mpt_req_on_pending_list(struct mpt_softc *, request_t *); 1160 1161 static __inline void 1162 mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int); 1163 static __inline void 1164 mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int); 1165 1166 1167 /* 1168 * Is request on freelist? 1169 */ 1170 static __inline int 1171 mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req) 1172 { 1173 request_t *lrq; 1174 1175 TAILQ_FOREACH(lrq, &mpt->request_free_list, links) { 1176 if (lrq == req) { 1177 return (1); 1178 } 1179 } 1180 return (0); 1181 } 1182 1183 /* 1184 * Is request on pending list? 1185 */ 1186 static __inline int 1187 mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req) 1188 { 1189 request_t *lrq; 1190 1191 TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) { 1192 if (lrq == req) { 1193 return (1); 1194 } 1195 } 1196 return (0); 1197 } 1198 1199 /* 1200 * Make sure that req *is* part of one of the special lists 1201 */ 1202 static __inline void 1203 mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) 1204 { 1205 int i; 1206 for (i = 0; i < mpt->els_cmds_allocated; i++) { 1207 if (req == mpt->els_cmd_ptrs[i]) { 1208 return; 1209 } 1210 } 1211 for (i = 0; i < mpt->tgt_cmds_allocated; i++) { 1212 if (req == mpt->tgt_cmd_ptrs[i]) { 1213 return; 1214 } 1215 } 1216 panic("%s(%d): req %p:%u function %x not in els or tgt ptrs\n", 1217 s, line, req, req->serno, 1218 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function); 1219 } 1220 1221 /* 1222 * Make sure that req is *not* part of one of the special lists. 1223 */ 1224 static __inline void 1225 mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) 1226 { 1227 int i; 1228 for (i = 0; i < mpt->els_cmds_allocated; i++) { 1229 KASSERT(req != mpt->els_cmd_ptrs[i], 1230 ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d\n", 1231 s, line, req, req->serno, 1232 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); 1233 } 1234 for (i = 0; i < mpt->tgt_cmds_allocated; i++) { 1235 KASSERT(req != mpt->tgt_cmd_ptrs[i], 1236 ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d\n", 1237 s, line, req, req->serno, 1238 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); 1239 } 1240 } 1241 #endif 1242 1243 /* 1244 * Task Management Types, purely for internal consumption 1245 */ 1246 typedef enum { 1247 MPT_ABORT_TASK_SET=1234, 1248 MPT_CLEAR_TASK_SET, 1249 MPT_TARGET_RESET, 1250 MPT_CLEAR_ACA, 1251 MPT_TERMINATE_TASK, 1252 MPT_NIL_TMT_VALUE=5678 1253 } mpt_task_mgmt_t; 1254 1255 /**************************** Unclassified Routines ***************************/ 1256 void mpt_send_cmd(struct mpt_softc *mpt, request_t *req); 1257 int mpt_recv_handshake_reply(struct mpt_softc *mpt, 1258 size_t reply_len, void *reply); 1259 int mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1260 mpt_req_state_t state, mpt_req_state_t mask, 1261 int sleep_ok, int time_ms); 1262 void mpt_enable_ints(struct mpt_softc *mpt); 1263 void mpt_disable_ints(struct mpt_softc *mpt); 1264 int mpt_attach(struct mpt_softc *mpt); 1265 int mpt_shutdown(struct mpt_softc *mpt); 1266 int mpt_detach(struct mpt_softc *mpt); 1267 int mpt_send_handshake_cmd(struct mpt_softc *mpt, 1268 size_t len, void *cmd); 1269 request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok); 1270 void mpt_free_request(struct mpt_softc *mpt, request_t *req); 1271 void mpt_intr(void *arg); 1272 void mpt_check_doorbell(struct mpt_softc *mpt); 1273 void mpt_dump_reply_frame(struct mpt_softc *mpt, 1274 MSG_DEFAULT_REPLY *reply_frame); 1275 1276 void mpt_set_config_regs(struct mpt_softc *); 1277 int mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/, 1278 cfgparms_t *params, 1279 bus_addr_t /*addr*/, bus_size_t/*len*/, 1280 int /*sleep_ok*/, int /*timeout_ms*/); 1281 int mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, 1282 int PageNumber, uint32_t PageAddress, 1283 int ExtPageType, 1284 CONFIG_EXTENDED_PAGE_HEADER *rslt, 1285 int sleep_ok, int timeout_ms); 1286 int mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, 1287 uint32_t PageAddress, 1288 CONFIG_EXTENDED_PAGE_HEADER *hdr, 1289 void *buf, size_t len, int sleep_ok, 1290 int timeout_ms); 1291 int mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/, 1292 int /*PageNumber*/, 1293 uint32_t /*PageAddress*/, 1294 CONFIG_PAGE_HEADER *, 1295 int /*sleep_ok*/, int /*timeout_ms*/); 1296 int mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/, 1297 uint32_t /*PageAddress*/, 1298 CONFIG_PAGE_HEADER *, size_t /*len*/, 1299 int /*sleep_ok*/, int /*timeout_ms*/); 1300 int mpt_write_cfg_page(struct mpt_softc *, int /*Action*/, 1301 uint32_t /*PageAddress*/, 1302 CONFIG_PAGE_HEADER *, size_t /*len*/, 1303 int /*sleep_ok*/, int /*timeout_ms*/); 1304 static __inline int 1305 mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, 1306 CONFIG_PAGE_HEADER *hdr, size_t len, 1307 int sleep_ok, int timeout_ms) 1308 { 1309 return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 1310 PageAddress, hdr, len, sleep_ok, timeout_ms)); 1311 } 1312 1313 static __inline int 1314 mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, 1315 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1316 int timeout_ms) 1317 { 1318 return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 1319 PageAddress, hdr, len, sleep_ok, 1320 timeout_ms)); 1321 } 1322 /* mpt_debug.c functions */ 1323 void mpt_print_reply(void *vmsg); 1324 void mpt_print_db(uint32_t mb); 1325 void mpt_print_config_reply(void *vmsg); 1326 char *mpt_ioc_diag(uint32_t diag); 1327 void mpt_req_state(mpt_req_state_t state); 1328 void mpt_print_config_request(void *vmsg); 1329 void mpt_print_request(void *vmsg); 1330 void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg); 1331 void mpt_dump_sgl(SGE_IO_UNION *se, int offset); 1332 #endif /* _MPT_H_ */ 1333