1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2000 to 2010, LSI Corporation. 28 * All rights reserved. 29 * 30 * Redistribution and use in source and binary forms of all code within 31 * this file that is exclusively owned by LSI, with or without 32 * modification, is permitted provided that, in addition to the CDDL 1.0 33 * License requirements, the following conditions are met: 34 * 35 * Neither the name of the author nor the names of its contributors may be 36 * used to endorse or promote products derived from this software without 37 * specific prior written permission. 38 * 39 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 40 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 41 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 42 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 43 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 44 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 45 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 46 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 47 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 48 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 49 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 50 * DAMAGE. 51 */ 52 53 /* 54 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface. 55 * 56 */ 57 58 #if defined(lint) || defined(DEBUG) 59 #define MPTSAS_DEBUG 60 #endif 61 62 /* 63 * standard header files. 64 */ 65 #include <sys/note.h> 66 #include <sys/scsi/scsi.h> 67 #include <sys/pci.h> 68 #include <sys/file.h> 69 #include <sys/cpuvar.h> 70 #include <sys/policy.h> 71 #include <sys/sysevent.h> 72 #include <sys/sysevent/eventdefs.h> 73 #include <sys/sysevent/dr.h> 74 #include <sys/sata/sata_defs.h> 75 #include <sys/scsi/generic/sas.h> 76 #include <sys/scsi/impl/scsi_sas.h> 77 78 #pragma pack(1) 79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h> 80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h> 81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h> 82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h> 83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h> 84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h> 85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h> 86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h> 87 #pragma pack() 88 89 /* 90 * private header files. 91 * 92 */ 93 #include <sys/scsi/impl/scsi_reset_notify.h> 94 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h> 95 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h> 96 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h> 97 98 #include <sys/raidioctl.h> 99 100 #include <sys/fs/dv_node.h> /* devfs_clean */ 101 102 /* 103 * FMA header files 104 */ 105 #include <sys/ddifm.h> 106 #include <sys/fm/protocol.h> 107 #include <sys/fm/util.h> 108 #include <sys/fm/io/ddi.h> 109 110 /* 111 * For anyone who would modify the code in mptsas_driver, it must be awared 112 * that from snv_145 where CR6910752(mpt_sas driver performance can be 113 * improved) is integrated, the per_instance mutex m_mutex is not hold 114 * in the key IO code path, including mptsas_scsi_start(), mptsas_intr() 115 * and all of the recursive functions called in them, so don't 116 * make it for granted that all operations are sync/exclude correctly. Before 117 * doing any modification in key code path, and even other code path such as 118 * DR, watchsubr, ioctl, passthrough etc, make sure the elements modified have 119 * no releationship to elements shown in the fastpath 120 * (function mptsas_handle_io_fastpath()) in ISR and its recursive functions. 121 * otherwise, you have to use the new introduced mutex to protect them. 122 * As to how to do correctly, refer to the comments in mptsas_intr(). 123 */ 124 125 /* 126 * autoconfiguration data and routines. 127 */ 128 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 129 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd); 130 static int mptsas_power(dev_info_t *dip, int component, int level); 131 132 /* 133 * cb_ops function 134 */ 135 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 136 cred_t *credp, int *rval); 137 #ifdef __sparc 138 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd); 139 #else /* __sparc */ 140 static int mptsas_quiesce(dev_info_t *devi); 141 #endif /* __sparc */ 142 143 /* 144 * Resource initilaization for hardware 145 */ 146 static void mptsas_setup_cmd_reg(mptsas_t *mpt); 147 static void mptsas_disable_bus_master(mptsas_t *mpt); 148 static void mptsas_hba_fini(mptsas_t *mpt); 149 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp); 150 static int mptsas_hba_setup(mptsas_t *mpt); 151 static void mptsas_hba_teardown(mptsas_t *mpt); 152 static int mptsas_config_space_init(mptsas_t *mpt); 153 static void mptsas_config_space_fini(mptsas_t *mpt); 154 static void mptsas_iport_register(mptsas_t *mpt); 155 static int mptsas_smp_setup(mptsas_t *mpt); 156 static void mptsas_smp_teardown(mptsas_t *mpt); 157 static int mptsas_cache_create(mptsas_t *mpt); 158 static void mptsas_cache_destroy(mptsas_t *mpt); 159 static int mptsas_alloc_request_frames(mptsas_t *mpt); 160 static int mptsas_alloc_reply_frames(mptsas_t *mpt); 161 static int mptsas_alloc_free_queue(mptsas_t *mpt); 162 static int mptsas_alloc_post_queue(mptsas_t *mpt); 163 static void mptsas_alloc_reply_args(mptsas_t *mpt); 164 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd); 165 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd); 166 static int mptsas_init_chip(mptsas_t *mpt, int first_time); 167 168 /* 169 * SCSA function prototypes 170 */ 171 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 172 static int mptsas_scsi_reset(struct scsi_address *ap, int level); 173 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 174 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly); 175 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, 176 int tgtonly); 177 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt); 178 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap, 179 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, 180 int tgtlen, int flags, int (*callback)(), caddr_t arg); 181 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt); 182 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap, 183 struct scsi_pkt *pkt); 184 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 185 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 186 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 187 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 188 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag, 189 void (*callback)(caddr_t), caddr_t arg); 190 static int mptsas_get_name(struct scsi_device *sd, char *name, int len); 191 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len); 192 static int mptsas_scsi_quiesce(dev_info_t *dip); 193 static int mptsas_scsi_unquiesce(dev_info_t *dip); 194 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags, 195 ddi_bus_config_op_t op, void *arg, dev_info_t **childp); 196 197 /* 198 * SMP functions 199 */ 200 static int mptsas_smp_start(struct smp_pkt *smp_pkt); 201 202 /* 203 * internal function prototypes. 204 */ 205 static void mptsas_list_add(mptsas_t *mpt); 206 static void mptsas_list_del(mptsas_t *mpt); 207 208 static int mptsas_quiesce_bus(mptsas_t *mpt); 209 static int mptsas_unquiesce_bus(mptsas_t *mpt); 210 211 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size); 212 static void mptsas_free_handshake_msg(mptsas_t *mpt); 213 214 static void mptsas_ncmds_checkdrain(void *arg); 215 216 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd); 217 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp); 218 219 static int mptsas_do_detach(dev_info_t *dev); 220 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl); 221 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, 222 struct scsi_pkt *pkt); 223 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp); 224 225 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd); 226 static void mptsas_handle_event(void *args); 227 static int mptsas_handle_event_sync(void *args); 228 static void mptsas_handle_dr(void *args); 229 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node, 230 dev_info_t *pdip); 231 232 static void mptsas_restart_cmd(void *); 233 234 static void mptsas_flush_hba(mptsas_t *mpt); 235 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, 236 uint8_t tasktype); 237 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, 238 uchar_t reason, uint_t stat); 239 240 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2); 241 static void mptsas_process_intr(mptsas_t *mpt, 242 pMpi2ReplyDescriptorsUnion_t reply_desc_union); 243 static int mptsas_handle_io_fastpath(mptsas_t *mpt, uint16_t SMID); 244 static void mptsas_handle_scsi_io_success(mptsas_t *mpt, 245 pMpi2ReplyDescriptorsUnion_t reply_desc); 246 static void mptsas_handle_address_reply(mptsas_t *mpt, 247 pMpi2ReplyDescriptorsUnion_t reply_desc); 248 static int mptsas_wait_intr(mptsas_t *mpt, int polltime); 249 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, 250 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl); 251 252 static void mptsas_watch(void *arg); 253 static void mptsas_watchsubr(mptsas_t *mpt); 254 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl); 255 256 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd); 257 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply, 258 uint8_t *data, uint32_t request_size, uint32_t reply_size, 259 uint32_t data_size, uint32_t direction, uint8_t *dataout, 260 uint32_t dataout_size, short timeout, int mode); 261 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl); 262 263 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, 264 uint32_t unique_id); 265 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd); 266 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt, 267 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code); 268 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt, 269 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code, 270 uint32_t diag_type); 271 static int mptsas_diag_register(mptsas_t *mpt, 272 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code); 273 static int mptsas_diag_unregister(mptsas_t *mpt, 274 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code); 275 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query, 276 uint32_t *return_code); 277 static int mptsas_diag_read_buffer(mptsas_t *mpt, 278 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf, 279 uint32_t *return_code, int ioctl_mode); 280 static int mptsas_diag_release(mptsas_t *mpt, 281 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code); 282 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, 283 uint8_t *diag_action, uint32_t length, uint32_t *return_code, 284 int ioctl_mode); 285 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data, 286 int mode); 287 288 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd, 289 int cmdlen, int tgtlen, int statuslen, int kf); 290 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd); 291 292 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags); 293 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg); 294 295 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg, 296 int kmflags); 297 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg); 298 299 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply, 300 mptsas_cmd_t *cmd); 301 static void mptsas_check_task_mgt(mptsas_t *mpt, 302 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd); 303 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap, 304 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp, 305 int *resid); 306 307 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag); 308 static void mptsas_free_active_slots(mptsas_t *mpt); 309 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd); 310 static int mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd); 311 312 static void mptsas_restart_hba(mptsas_t *mpt); 313 314 static void mptsas_deliver_doneq_thread(mptsas_t *mpt); 315 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd); 316 static inline void mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd); 317 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t); 318 319 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t); 320 static void mptsas_doneq_empty(mptsas_t *mpt); 321 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg); 322 323 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt); 324 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd); 325 326 static void mptsas_start_watch_reset_delay(); 327 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt); 328 static void mptsas_watch_reset_delay(void *arg); 329 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt); 330 331 static int mptsas_outstanding_cmds_n(mptsas_t *mpt); 332 /* 333 * helper functions 334 */ 335 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd); 336 337 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name); 338 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy); 339 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, 340 int lun); 341 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr, 342 int lun); 343 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy); 344 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn); 345 346 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, 347 int *lun); 348 static int mptsas_parse_smp_name(char *name, uint64_t *wwn); 349 350 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, 351 uint8_t phy); 352 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, 353 uint64_t wwid); 354 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, 355 uint64_t wwid); 356 357 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, 358 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd); 359 360 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address, 361 uint16_t *handle, mptsas_target_t **pptgt); 362 static void mptsas_update_phymask(mptsas_t *mpt); 363 static inline void mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd); 364 365 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt, 366 uint32_t *status, uint8_t cmd); 367 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev, 368 mptsas_phymask_t *phymask); 369 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, 370 mptsas_phymask_t phymask); 371 static int mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, 372 uint32_t slotstatus); 373 374 375 /* 376 * Enumeration / DR functions 377 */ 378 static void mptsas_config_all(dev_info_t *pdip); 379 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun, 380 dev_info_t **lundip); 381 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun, 382 dev_info_t **lundip); 383 384 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt); 385 static int mptsas_offline_target(dev_info_t *pdip, char *name); 386 387 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target, 388 dev_info_t **dip); 389 390 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt); 391 static int mptsas_probe_lun(dev_info_t *pdip, int lun, 392 dev_info_t **dip, mptsas_target_t *ptgt); 393 394 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq, 395 dev_info_t **dip, mptsas_target_t *ptgt, int lun); 396 397 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd, 398 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun); 399 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd, 400 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, 401 int lun); 402 403 static void mptsas_offline_missed_luns(dev_info_t *pdip, 404 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt); 405 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip, 406 mdi_pathinfo_t *rpip, uint_t flags); 407 408 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, 409 dev_info_t **smp_dip); 410 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, 411 uint_t flags); 412 413 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, 414 int mode, int *rval); 415 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, 416 int mode, int *rval); 417 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, 418 int mode, int *rval); 419 static void mptsas_record_event(void *args); 420 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, 421 int mode); 422 423 static void mptsas_hash_init(mptsas_hash_table_t *hashtab); 424 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen); 425 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data); 426 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1, 427 mptsas_phymask_t key2); 428 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1, 429 mptsas_phymask_t key2); 430 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos); 431 432 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t, 433 uint32_t, mptsas_phymask_t, uint8_t, mptsas_t *); 434 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab, 435 mptsas_smp_t *data); 436 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid, 437 mptsas_phymask_t phymask); 438 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t); 439 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t); 440 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, 441 dev_info_t **smp_dip); 442 443 /* 444 * Power management functions 445 */ 446 static int mptsas_get_pci_cap(mptsas_t *mpt); 447 static int mptsas_init_pm(mptsas_t *mpt); 448 449 /* 450 * MPT MSI tunable: 451 * 452 * By default MSI is enabled on all supported platforms. 453 */ 454 boolean_t mptsas_enable_msi = B_TRUE; 455 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE; 456 457 static int mptsas_register_intrs(mptsas_t *); 458 static void mptsas_unregister_intrs(mptsas_t *); 459 static int mptsas_add_intrs(mptsas_t *, int); 460 static void mptsas_rem_intrs(mptsas_t *); 461 462 /* 463 * FMA Prototypes 464 */ 465 static void mptsas_fm_init(mptsas_t *mpt); 466 static void mptsas_fm_fini(mptsas_t *mpt); 467 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 468 469 extern pri_t minclsyspri, maxclsyspri; 470 471 /* 472 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is 473 * under this device that the paths to a physical device are created when 474 * MPxIO is used. 475 */ 476 extern dev_info_t *scsi_vhci_dip; 477 478 /* 479 * Tunable timeout value for Inquiry VPD page 0x83 480 * By default the value is 30 seconds. 481 */ 482 int mptsas_inq83_retry_timeout = 30; 483 484 /* 485 * This is used to allocate memory for message frame storage, not for 486 * data I/O DMA. All message frames must be stored in the first 4G of 487 * physical memory. 488 */ 489 ddi_dma_attr_t mptsas_dma_attrs = { 490 DMA_ATTR_V0, /* attribute layout version */ 491 0x0ull, /* address low - should be 0 (longlong) */ 492 0xffffffffull, /* address high - 32-bit max range */ 493 0x00ffffffull, /* count max - max DMA object size */ 494 4, /* allocation alignment requirements */ 495 0x78, /* burstsizes - binary encoded values */ 496 1, /* minxfer - gran. of DMA engine */ 497 0x00ffffffull, /* maxxfer - gran. of DMA engine */ 498 0xffffffffull, /* max segment size (DMA boundary) */ 499 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */ 500 512, /* granularity - device transfer size */ 501 0 /* flags, set to 0 */ 502 }; 503 504 /* 505 * This is used for data I/O DMA memory allocation. (full 64-bit DMA 506 * physical addresses are supported.) 507 */ 508 ddi_dma_attr_t mptsas_dma_attrs64 = { 509 DMA_ATTR_V0, /* attribute layout version */ 510 0x0ull, /* address low - should be 0 (longlong) */ 511 0xffffffffffffffffull, /* address high - 64-bit max */ 512 0x00ffffffull, /* count max - max DMA object size */ 513 4, /* allocation alignment requirements */ 514 0x78, /* burstsizes - binary encoded values */ 515 1, /* minxfer - gran. of DMA engine */ 516 0x00ffffffull, /* maxxfer - gran. of DMA engine */ 517 0xffffffffull, /* max segment size (DMA boundary) */ 518 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */ 519 512, /* granularity - device transfer size */ 520 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */ 521 }; 522 523 ddi_device_acc_attr_t mptsas_dev_attr = { 524 DDI_DEVICE_ATTR_V1, 525 DDI_STRUCTURE_LE_ACC, 526 DDI_STRICTORDER_ACC, 527 DDI_DEFAULT_ACC 528 }; 529 530 static struct cb_ops mptsas_cb_ops = { 531 scsi_hba_open, /* open */ 532 scsi_hba_close, /* close */ 533 nodev, /* strategy */ 534 nodev, /* print */ 535 nodev, /* dump */ 536 nodev, /* read */ 537 nodev, /* write */ 538 mptsas_ioctl, /* ioctl */ 539 nodev, /* devmap */ 540 nodev, /* mmap */ 541 nodev, /* segmap */ 542 nochpoll, /* chpoll */ 543 ddi_prop_op, /* cb_prop_op */ 544 NULL, /* streamtab */ 545 D_MP, /* cb_flag */ 546 CB_REV, /* rev */ 547 nodev, /* aread */ 548 nodev /* awrite */ 549 }; 550 551 static struct dev_ops mptsas_ops = { 552 DEVO_REV, /* devo_rev, */ 553 0, /* refcnt */ 554 ddi_no_info, /* info */ 555 nulldev, /* identify */ 556 nulldev, /* probe */ 557 mptsas_attach, /* attach */ 558 mptsas_detach, /* detach */ 559 #ifdef __sparc 560 mptsas_reset, 561 #else 562 nodev, /* reset */ 563 #endif /* __sparc */ 564 &mptsas_cb_ops, /* driver operations */ 565 NULL, /* bus operations */ 566 mptsas_power, /* power management */ 567 #ifdef __sparc 568 ddi_quiesce_not_needed 569 #else 570 mptsas_quiesce /* quiesce */ 571 #endif /* __sparc */ 572 }; 573 574 575 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24" 576 577 static struct modldrv modldrv = { 578 &mod_driverops, /* Type of module. This one is a driver */ 579 MPTSAS_MOD_STRING, /* Name of the module. */ 580 &mptsas_ops, /* driver ops */ 581 }; 582 583 static struct modlinkage modlinkage = { 584 MODREV_1, &modldrv, NULL 585 }; 586 #define TARGET_PROP "target" 587 #define LUN_PROP "lun" 588 #define LUN64_PROP "lun64" 589 #define SAS_PROP "sas-mpt" 590 #define MDI_GUID "wwn" 591 #define NDI_GUID "guid" 592 #define MPTSAS_DEV_GONE "mptsas_dev_gone" 593 594 /* 595 * Local static data 596 */ 597 #if defined(MPTSAS_DEBUG) 598 uint32_t mptsas_debug_flags = 0; 599 #endif /* defined(MPTSAS_DEBUG) */ 600 uint32_t mptsas_debug_resets = 0; 601 602 static kmutex_t mptsas_global_mutex; 603 static void *mptsas_state; /* soft state ptr */ 604 static krwlock_t mptsas_global_rwlock; 605 606 static kmutex_t mptsas_log_mutex; 607 static char mptsas_log_buf[256]; 608 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf)) 609 610 static mptsas_t *mptsas_head, *mptsas_tail; 611 static clock_t mptsas_scsi_watchdog_tick; 612 static clock_t mptsas_tick; 613 static timeout_id_t mptsas_reset_watch; 614 static timeout_id_t mptsas_timeout_id; 615 static int mptsas_timeouts_enabled = 0; 616 /* 617 * warlock directives 618 */ 619 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \ 620 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status)) 621 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt)) 622 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address)) 623 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private)) 624 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private)) 625 626 /* 627 * SM - HBA statics 628 */ 629 char *mptsas_driver_rev = MPTSAS_MOD_STRING; 630 631 #ifdef MPTSAS_DEBUG 632 void debug_enter(char *); 633 #endif 634 635 /* 636 * Notes: 637 * - scsi_hba_init(9F) initializes SCSI HBA modules 638 * - must call scsi_hba_fini(9F) if modload() fails 639 */ 640 int 641 _init(void) 642 { 643 int status; 644 /* CONSTCOND */ 645 ASSERT(NO_COMPETING_THREADS); 646 647 NDBG0(("_init")); 648 649 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE, 650 MPTSAS_INITIAL_SOFT_SPACE); 651 if (status != 0) { 652 return (status); 653 } 654 655 if ((status = scsi_hba_init(&modlinkage)) != 0) { 656 ddi_soft_state_fini(&mptsas_state); 657 return (status); 658 } 659 660 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL); 661 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL); 662 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL); 663 664 if ((status = mod_install(&modlinkage)) != 0) { 665 mutex_destroy(&mptsas_log_mutex); 666 rw_destroy(&mptsas_global_rwlock); 667 mutex_destroy(&mptsas_global_mutex); 668 ddi_soft_state_fini(&mptsas_state); 669 scsi_hba_fini(&modlinkage); 670 } 671 672 return (status); 673 } 674 675 /* 676 * Notes: 677 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules 678 */ 679 int 680 _fini(void) 681 { 682 int status; 683 /* CONSTCOND */ 684 ASSERT(NO_COMPETING_THREADS); 685 686 NDBG0(("_fini")); 687 688 if ((status = mod_remove(&modlinkage)) == 0) { 689 ddi_soft_state_fini(&mptsas_state); 690 scsi_hba_fini(&modlinkage); 691 mutex_destroy(&mptsas_global_mutex); 692 rw_destroy(&mptsas_global_rwlock); 693 mutex_destroy(&mptsas_log_mutex); 694 } 695 return (status); 696 } 697 698 /* 699 * The loadable-module _info(9E) entry point 700 */ 701 int 702 _info(struct modinfo *modinfop) 703 { 704 /* CONSTCOND */ 705 ASSERT(NO_COMPETING_THREADS); 706 NDBG0(("mptsas _info")); 707 708 return (mod_info(&modlinkage, modinfop)); 709 } 710 711 712 static int 713 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 714 { 715 dev_info_t *pdip; 716 mptsas_t *mpt; 717 scsi_hba_tran_t *hba_tran; 718 char *iport = NULL; 719 char phymask[MPTSAS_MAX_PHYS]; 720 mptsas_phymask_t phy_mask = 0; 721 int dynamic_port = 0; 722 uint32_t page_address; 723 char initiator_wwnstr[MPTSAS_WWN_STRLEN]; 724 int rval = DDI_FAILURE; 725 int i = 0; 726 uint8_t numphys = 0; 727 uint8_t phy_id; 728 uint8_t phy_port = 0; 729 uint16_t attached_devhdl = 0; 730 uint32_t dev_info; 731 uint64_t attached_sas_wwn; 732 uint16_t dev_hdl; 733 uint16_t pdev_hdl; 734 uint16_t bay_num, enclosure; 735 char attached_wwnstr[MPTSAS_WWN_STRLEN]; 736 737 /* CONSTCOND */ 738 ASSERT(NO_COMPETING_THREADS); 739 740 switch (cmd) { 741 case DDI_ATTACH: 742 break; 743 744 case DDI_RESUME: 745 /* 746 * If this a scsi-iport node, nothing to do here. 747 */ 748 return (DDI_SUCCESS); 749 750 default: 751 return (DDI_FAILURE); 752 } 753 754 pdip = ddi_get_parent(dip); 755 756 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) == 757 NULL) { 758 cmn_err(CE_WARN, "Failed attach iport because fail to " 759 "get tran vector for the HBA node"); 760 return (DDI_FAILURE); 761 } 762 763 mpt = TRAN2MPT(hba_tran); 764 ASSERT(mpt != NULL); 765 if (mpt == NULL) 766 return (DDI_FAILURE); 767 768 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == 769 NULL) { 770 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to " 771 "get tran vector for the iport node"); 772 return (DDI_FAILURE); 773 } 774 775 /* 776 * Overwrite parent's tran_hba_private to iport's tran vector 777 */ 778 hba_tran->tran_hba_private = mpt; 779 780 ddi_report_dev(dip); 781 782 /* 783 * Get SAS address for initiator port according dev_handle 784 */ 785 iport = ddi_get_name_addr(dip); 786 if (iport && strncmp(iport, "v0", 2) == 0) { 787 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, 788 MPTSAS_VIRTUAL_PORT, 1) != 789 DDI_PROP_SUCCESS) { 790 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 791 MPTSAS_VIRTUAL_PORT); 792 mptsas_log(mpt, CE_WARN, "mptsas virtual port " 793 "prop update failed"); 794 return (DDI_FAILURE); 795 } 796 return (DDI_SUCCESS); 797 } 798 799 mutex_enter(&mpt->m_mutex); 800 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 801 bzero(phymask, sizeof (phymask)); 802 (void) sprintf(phymask, 803 "%x", mpt->m_phy_info[i].phy_mask); 804 if (strcmp(phymask, iport) == 0) { 805 break; 806 } 807 } 808 809 if (i == MPTSAS_MAX_PHYS) { 810 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port" 811 "seems not exist", iport); 812 mutex_exit(&mpt->m_mutex); 813 return (DDI_FAILURE); 814 } 815 816 phy_mask = mpt->m_phy_info[i].phy_mask; 817 818 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION) 819 dynamic_port = 1; 820 else 821 dynamic_port = 0; 822 823 /* 824 * Update PHY info for smhba 825 */ 826 if (mptsas_smhba_phy_init(mpt)) { 827 mutex_exit(&mpt->m_mutex); 828 mptsas_log(mpt, CE_WARN, "mptsas phy update " 829 "failed"); 830 return (DDI_FAILURE); 831 } 832 833 mutex_exit(&mpt->m_mutex); 834 835 numphys = 0; 836 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 837 if ((phy_mask >> i) & 0x01) { 838 numphys++; 839 } 840 } 841 842 bzero(initiator_wwnstr, sizeof (initiator_wwnstr)); 843 (void) sprintf(initiator_wwnstr, "w%016"PRIx64, 844 mpt->un.m_base_wwid); 845 846 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, 847 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) != 848 DDI_PROP_SUCCESS) { 849 (void) ddi_prop_remove(DDI_DEV_T_NONE, 850 dip, SCSI_ADDR_PROP_INITIATOR_PORT); 851 mptsas_log(mpt, CE_WARN, "mptsas Initiator port " 852 "prop update failed"); 853 return (DDI_FAILURE); 854 } 855 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, 856 MPTSAS_NUM_PHYS, numphys) != 857 DDI_PROP_SUCCESS) { 858 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS); 859 return (DDI_FAILURE); 860 } 861 862 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, 863 "phymask", phy_mask) != 864 DDI_PROP_SUCCESS) { 865 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask"); 866 mptsas_log(mpt, CE_WARN, "mptsas phy mask " 867 "prop update failed"); 868 return (DDI_FAILURE); 869 } 870 871 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, 872 "dynamic-port", dynamic_port) != 873 DDI_PROP_SUCCESS) { 874 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port"); 875 mptsas_log(mpt, CE_WARN, "mptsas dynamic port " 876 "prop update failed"); 877 return (DDI_FAILURE); 878 } 879 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, 880 MPTSAS_VIRTUAL_PORT, 0) != 881 DDI_PROP_SUCCESS) { 882 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 883 MPTSAS_VIRTUAL_PORT); 884 mptsas_log(mpt, CE_WARN, "mptsas virtual port " 885 "prop update failed"); 886 return (DDI_FAILURE); 887 } 888 mptsas_smhba_set_phy_props(mpt, 889 iport, dip, numphys, &attached_devhdl); 890 891 mutex_enter(&mpt->m_mutex); 892 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE & 893 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl; 894 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl, 895 &attached_sas_wwn, &dev_info, &phy_port, &phy_id, 896 &pdev_hdl, &bay_num, &enclosure); 897 if (rval != DDI_SUCCESS) { 898 mptsas_log(mpt, CE_WARN, 899 "Failed to get device page0 for handle:%d", 900 attached_devhdl); 901 mutex_exit(&mpt->m_mutex); 902 return (DDI_FAILURE); 903 } 904 905 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 906 bzero(phymask, sizeof (phymask)); 907 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask); 908 if (strcmp(phymask, iport) == 0) { 909 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0], 910 "%x", 911 mpt->m_phy_info[i].phy_mask); 912 } 913 } 914 mutex_exit(&mpt->m_mutex); 915 916 bzero(attached_wwnstr, sizeof (attached_wwnstr)); 917 (void) sprintf(attached_wwnstr, "w%016"PRIx64, 918 attached_sas_wwn); 919 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, 920 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) != 921 DDI_PROP_SUCCESS) { 922 (void) ddi_prop_remove(DDI_DEV_T_NONE, 923 dip, SCSI_ADDR_PROP_ATTACHED_PORT); 924 return (DDI_FAILURE); 925 } 926 927 /* Create kstats for each phy on this iport */ 928 929 mptsas_create_phy_stats(mpt, iport, dip); 930 931 /* 932 * register sas hba iport with mdi (MPxIO/vhci) 933 */ 934 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, 935 dip, 0) == MDI_SUCCESS) { 936 mpt->m_mpxio_enable = TRUE; 937 } 938 return (DDI_SUCCESS); 939 } 940 941 /* 942 * Notes: 943 * Set up all device state and allocate data structures, 944 * mutexes, condition variables, etc. for device operation. 945 * Add interrupts needed. 946 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE. 947 */ 948 static int 949 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 950 { 951 mptsas_t *mpt = NULL; 952 int instance, i, j; 953 int doneq_thread_num; 954 char intr_added = 0; 955 char map_setup = 0; 956 char config_setup = 0; 957 char hba_attach_setup = 0; 958 char smp_attach_setup = 0; 959 char mutex_init_done = 0; 960 char event_taskq_create = 0; 961 char dr_taskq_create = 0; 962 char doneq_thread_create = 0; 963 scsi_hba_tran_t *hba_tran; 964 uint_t mem_bar = MEM_SPACE; 965 int rval = DDI_FAILURE; 966 967 /* CONSTCOND */ 968 ASSERT(NO_COMPETING_THREADS); 969 970 if (scsi_hba_iport_unit_address(dip)) { 971 return (mptsas_iport_attach(dip, cmd)); 972 } 973 974 switch (cmd) { 975 case DDI_ATTACH: 976 break; 977 978 case DDI_RESUME: 979 if ((hba_tran = ddi_get_driver_private(dip)) == NULL) 980 return (DDI_FAILURE); 981 982 mpt = TRAN2MPT(hba_tran); 983 984 if (!mpt) { 985 return (DDI_FAILURE); 986 } 987 988 /* 989 * Reset hardware and softc to "no outstanding commands" 990 * Note that a check condition can result on first command 991 * to a target. 992 */ 993 mutex_enter(&mpt->m_mutex); 994 995 /* 996 * raise power. 997 */ 998 if (mpt->m_options & MPTSAS_OPT_PM) { 999 mutex_exit(&mpt->m_mutex); 1000 (void) pm_busy_component(dip, 0); 1001 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0); 1002 if (rval == DDI_SUCCESS) { 1003 mutex_enter(&mpt->m_mutex); 1004 } else { 1005 /* 1006 * The pm_raise_power() call above failed, 1007 * and that can only occur if we were unable 1008 * to reset the hardware. This is probably 1009 * due to unhealty hardware, and because 1010 * important filesystems(such as the root 1011 * filesystem) could be on the attached disks, 1012 * it would not be a good idea to continue, 1013 * as we won't be entirely certain we are 1014 * writing correct data. So we panic() here 1015 * to not only prevent possible data corruption, 1016 * but to give developers or end users a hope 1017 * of identifying and correcting any problems. 1018 */ 1019 fm_panic("mptsas could not reset hardware " 1020 "during resume"); 1021 } 1022 } 1023 1024 mpt->m_suspended = 0; 1025 1026 /* 1027 * Reinitialize ioc 1028 */ 1029 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET; 1030 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) { 1031 mutex_exit(&mpt->m_mutex); 1032 if (mpt->m_options & MPTSAS_OPT_PM) { 1033 (void) pm_idle_component(dip, 0); 1034 } 1035 fm_panic("mptsas init chip fail during resume"); 1036 } 1037 /* 1038 * mptsas_update_driver_data needs interrupts so enable them 1039 * first. 1040 */ 1041 MPTSAS_ENABLE_INTR(mpt); 1042 mptsas_update_driver_data(mpt); 1043 1044 /* start requests, if possible */ 1045 mptsas_restart_hba(mpt); 1046 1047 mutex_exit(&mpt->m_mutex); 1048 1049 /* 1050 * Restart watch thread 1051 */ 1052 mutex_enter(&mptsas_global_mutex); 1053 if (mptsas_timeout_id == 0) { 1054 mptsas_timeout_id = timeout(mptsas_watch, NULL, 1055 mptsas_tick); 1056 mptsas_timeouts_enabled = 1; 1057 } 1058 mutex_exit(&mptsas_global_mutex); 1059 1060 /* report idle status to pm framework */ 1061 if (mpt->m_options & MPTSAS_OPT_PM) { 1062 (void) pm_idle_component(dip, 0); 1063 } 1064 1065 return (DDI_SUCCESS); 1066 1067 default: 1068 return (DDI_FAILURE); 1069 1070 } 1071 1072 instance = ddi_get_instance(dip); 1073 1074 /* 1075 * Allocate softc information. 1076 */ 1077 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) { 1078 mptsas_log(NULL, CE_WARN, 1079 "mptsas%d: cannot allocate soft state", instance); 1080 goto fail; 1081 } 1082 1083 mpt = ddi_get_soft_state(mptsas_state, instance); 1084 1085 if (mpt == NULL) { 1086 mptsas_log(NULL, CE_WARN, 1087 "mptsas%d: cannot get soft state", instance); 1088 goto fail; 1089 } 1090 1091 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */ 1092 scsi_size_clean(dip); 1093 1094 mpt->m_dip = dip; 1095 mpt->m_instance = instance; 1096 1097 /* Make a per-instance copy of the structures */ 1098 mpt->m_io_dma_attr = mptsas_dma_attrs64; 1099 mpt->m_msg_dma_attr = mptsas_dma_attrs; 1100 mpt->m_reg_acc_attr = mptsas_dev_attr; 1101 mpt->m_dev_acc_attr = mptsas_dev_attr; 1102 1103 /* 1104 * Initialize FMA 1105 */ 1106 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip, 1107 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 1108 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 1109 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 1110 1111 mptsas_fm_init(mpt); 1112 1113 if (mptsas_alloc_handshake_msg(mpt, 1114 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) { 1115 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg."); 1116 goto fail; 1117 } 1118 1119 /* 1120 * Setup configuration space 1121 */ 1122 if (mptsas_config_space_init(mpt) == FALSE) { 1123 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed"); 1124 goto fail; 1125 } 1126 config_setup++; 1127 1128 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg, 1129 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) { 1130 mptsas_log(mpt, CE_WARN, "map setup failed"); 1131 goto fail; 1132 } 1133 map_setup++; 1134 1135 /* 1136 * A taskq is created for dealing with the event handler 1137 */ 1138 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq", 1139 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 1140 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed"); 1141 goto fail; 1142 } 1143 event_taskq_create++; 1144 1145 /* 1146 * A taskq is created for dealing with dr events 1147 */ 1148 if ((mpt->m_dr_taskq = ddi_taskq_create(dip, 1149 "mptsas_dr_taskq", 1150 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 1151 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery " 1152 "failed"); 1153 goto fail; 1154 } 1155 dr_taskq_create++; 1156 1157 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1158 0, "mptsas_doneq_thread_threshold_prop", 10); 1159 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1160 0, "mptsas_doneq_length_threshold_prop", 8); 1161 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1162 0, "mptsas_doneq_thread_n_prop", 8); 1163 1164 if (mpt->m_doneq_thread_n) { 1165 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL); 1166 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL); 1167 1168 mutex_enter(&mpt->m_doneq_mutex); 1169 mpt->m_doneq_thread_id = 1170 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t) 1171 * mpt->m_doneq_thread_n, KM_SLEEP); 1172 1173 for (j = 0; j < mpt->m_doneq_thread_n; j++) { 1174 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL, 1175 CV_DRIVER, NULL); 1176 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL, 1177 MUTEX_DRIVER, NULL); 1178 mutex_enter(&mpt->m_doneq_thread_id[j].mutex); 1179 mpt->m_doneq_thread_id[j].flag |= 1180 MPTSAS_DONEQ_THREAD_ACTIVE; 1181 mpt->m_doneq_thread_id[j].arg.mpt = mpt; 1182 mpt->m_doneq_thread_id[j].arg.t = j; 1183 mpt->m_doneq_thread_id[j].threadp = 1184 thread_create(NULL, 0, mptsas_doneq_thread, 1185 &mpt->m_doneq_thread_id[j].arg, 1186 0, &p0, TS_RUN, minclsyspri); 1187 mpt->m_doneq_thread_id[j].donetail = 1188 &mpt->m_doneq_thread_id[j].doneq; 1189 mutex_exit(&mpt->m_doneq_thread_id[j].mutex); 1190 } 1191 mutex_exit(&mpt->m_doneq_mutex); 1192 doneq_thread_create++; 1193 } 1194 1195 /* Initialize mutex used in interrupt handler */ 1196 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER, 1197 DDI_INTR_PRI(mpt->m_intr_pri)); 1198 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL); 1199 mutex_init(&mpt->m_intr_mutex, NULL, MUTEX_DRIVER, 1200 DDI_INTR_PRI(mpt->m_intr_pri)); 1201 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 1202 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex, 1203 NULL, MUTEX_DRIVER, 1204 DDI_INTR_PRI(mpt->m_intr_pri)); 1205 } 1206 1207 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL); 1208 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL); 1209 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL); 1210 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL); 1211 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL); 1212 mutex_init_done++; 1213 1214 /* 1215 * Disable hardware interrupt since we're not ready to 1216 * handle it yet. 1217 */ 1218 MPTSAS_DISABLE_INTR(mpt); 1219 if (mptsas_register_intrs(mpt) == FALSE) 1220 goto fail; 1221 intr_added++; 1222 1223 mutex_enter(&mpt->m_mutex); 1224 /* 1225 * Initialize power management component 1226 */ 1227 if (mpt->m_options & MPTSAS_OPT_PM) { 1228 if (mptsas_init_pm(mpt)) { 1229 mutex_exit(&mpt->m_mutex); 1230 mptsas_log(mpt, CE_WARN, "mptsas pm initialization " 1231 "failed"); 1232 goto fail; 1233 } 1234 } 1235 1236 /* 1237 * Initialize chip using Message Unit Reset, if allowed 1238 */ 1239 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET; 1240 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) { 1241 mutex_exit(&mpt->m_mutex); 1242 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed"); 1243 goto fail; 1244 } 1245 1246 /* 1247 * Fill in the phy_info structure and get the base WWID 1248 */ 1249 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) { 1250 mptsas_log(mpt, CE_WARN, 1251 "mptsas_get_manufacture_page5 failed!"); 1252 goto fail; 1253 } 1254 1255 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) { 1256 mptsas_log(mpt, CE_WARN, 1257 "mptsas_get_sas_io_unit_page_hndshk failed!"); 1258 goto fail; 1259 } 1260 1261 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) { 1262 mptsas_log(mpt, CE_WARN, 1263 "mptsas_get_manufacture_page0 failed!"); 1264 goto fail; 1265 } 1266 1267 mutex_exit(&mpt->m_mutex); 1268 1269 /* 1270 * Register the iport for multiple port HBA 1271 */ 1272 mptsas_iport_register(mpt); 1273 1274 /* 1275 * initialize SCSI HBA transport structure 1276 */ 1277 if (mptsas_hba_setup(mpt) == FALSE) 1278 goto fail; 1279 hba_attach_setup++; 1280 1281 if (mptsas_smp_setup(mpt) == FALSE) 1282 goto fail; 1283 smp_attach_setup++; 1284 1285 if (mptsas_cache_create(mpt) == FALSE) 1286 goto fail; 1287 1288 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY, 1289 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY); 1290 if (mpt->m_scsi_reset_delay == 0) { 1291 mptsas_log(mpt, CE_NOTE, 1292 "scsi_reset_delay of 0 is not recommended," 1293 " resetting to SCSI_DEFAULT_RESET_DELAY\n"); 1294 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY; 1295 } 1296 1297 /* 1298 * Initialize the wait and done FIFO queue 1299 */ 1300 mpt->m_donetail = &mpt->m_doneq; 1301 mpt->m_waitqtail = &mpt->m_waitq; 1302 1303 /* 1304 * ioc cmd queue initialize 1305 */ 1306 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq; 1307 mpt->m_dev_handle = 0xFFFF; 1308 1309 MPTSAS_ENABLE_INTR(mpt); 1310 1311 /* 1312 * enable event notification 1313 */ 1314 mutex_enter(&mpt->m_mutex); 1315 if (mptsas_ioc_enable_event_notification(mpt)) { 1316 mutex_exit(&mpt->m_mutex); 1317 goto fail; 1318 } 1319 mutex_exit(&mpt->m_mutex); 1320 1321 /* 1322 * Initialize PHY info for smhba 1323 */ 1324 if (mptsas_smhba_setup(mpt)) { 1325 mptsas_log(mpt, CE_WARN, "mptsas phy initialization " 1326 "failed"); 1327 goto fail; 1328 } 1329 1330 /* Check all dma handles allocated in attach */ 1331 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) 1332 != DDI_SUCCESS) || 1333 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) 1334 != DDI_SUCCESS) || 1335 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) 1336 != DDI_SUCCESS) || 1337 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) 1338 != DDI_SUCCESS) || 1339 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) 1340 != DDI_SUCCESS)) { 1341 goto fail; 1342 } 1343 1344 /* Check all acc handles allocated in attach */ 1345 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) || 1346 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) 1347 != DDI_SUCCESS) || 1348 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) 1349 != DDI_SUCCESS) || 1350 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) 1351 != DDI_SUCCESS) || 1352 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) 1353 != DDI_SUCCESS) || 1354 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) 1355 != DDI_SUCCESS) || 1356 (mptsas_check_acc_handle(mpt->m_config_handle) 1357 != DDI_SUCCESS)) { 1358 goto fail; 1359 } 1360 1361 /* 1362 * After this point, we are not going to fail the attach. 1363 */ 1364 /* 1365 * used for mptsas_watch 1366 */ 1367 mptsas_list_add(mpt); 1368 1369 mutex_enter(&mptsas_global_mutex); 1370 if (mptsas_timeouts_enabled == 0) { 1371 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY, 1372 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK); 1373 1374 mptsas_tick = mptsas_scsi_watchdog_tick * 1375 drv_usectohz((clock_t)1000000); 1376 1377 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick); 1378 mptsas_timeouts_enabled = 1; 1379 } 1380 mutex_exit(&mptsas_global_mutex); 1381 1382 /* Print message of HBA present */ 1383 ddi_report_dev(dip); 1384 1385 /* report idle status to pm framework */ 1386 if (mpt->m_options & MPTSAS_OPT_PM) { 1387 (void) pm_idle_component(dip, 0); 1388 } 1389 1390 return (DDI_SUCCESS); 1391 1392 fail: 1393 mptsas_log(mpt, CE_WARN, "attach failed"); 1394 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE); 1395 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST); 1396 if (mpt) { 1397 mutex_enter(&mptsas_global_mutex); 1398 1399 if (mptsas_timeout_id && (mptsas_head == NULL)) { 1400 timeout_id_t tid = mptsas_timeout_id; 1401 mptsas_timeouts_enabled = 0; 1402 mptsas_timeout_id = 0; 1403 mutex_exit(&mptsas_global_mutex); 1404 (void) untimeout(tid); 1405 mutex_enter(&mptsas_global_mutex); 1406 } 1407 mutex_exit(&mptsas_global_mutex); 1408 /* deallocate in reverse order */ 1409 mptsas_cache_destroy(mpt); 1410 1411 if (smp_attach_setup) { 1412 mptsas_smp_teardown(mpt); 1413 } 1414 if (hba_attach_setup) { 1415 mptsas_hba_teardown(mpt); 1416 } 1417 1418 if (mpt->m_active) { 1419 mptsas_hash_uninit(&mpt->m_active->m_smptbl, 1420 sizeof (mptsas_smp_t)); 1421 mptsas_hash_uninit(&mpt->m_active->m_tgttbl, 1422 sizeof (mptsas_target_t)); 1423 mptsas_free_active_slots(mpt); 1424 } 1425 if (intr_added) { 1426 mptsas_unregister_intrs(mpt); 1427 } 1428 1429 if (doneq_thread_create) { 1430 mutex_enter(&mpt->m_doneq_mutex); 1431 doneq_thread_num = mpt->m_doneq_thread_n; 1432 for (j = 0; j < mpt->m_doneq_thread_n; j++) { 1433 mutex_enter(&mpt->m_doneq_thread_id[j].mutex); 1434 mpt->m_doneq_thread_id[j].flag &= 1435 (~MPTSAS_DONEQ_THREAD_ACTIVE); 1436 cv_signal(&mpt->m_doneq_thread_id[j].cv); 1437 mutex_exit(&mpt->m_doneq_thread_id[j].mutex); 1438 } 1439 while (mpt->m_doneq_thread_n) { 1440 cv_wait(&mpt->m_doneq_thread_cv, 1441 &mpt->m_doneq_mutex); 1442 } 1443 for (j = 0; j < doneq_thread_num; j++) { 1444 cv_destroy(&mpt->m_doneq_thread_id[j].cv); 1445 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex); 1446 } 1447 kmem_free(mpt->m_doneq_thread_id, 1448 sizeof (mptsas_doneq_thread_list_t) 1449 * doneq_thread_num); 1450 mutex_exit(&mpt->m_doneq_mutex); 1451 cv_destroy(&mpt->m_doneq_thread_cv); 1452 mutex_destroy(&mpt->m_doneq_mutex); 1453 } 1454 if (event_taskq_create) { 1455 ddi_taskq_destroy(mpt->m_event_taskq); 1456 } 1457 if (dr_taskq_create) { 1458 ddi_taskq_destroy(mpt->m_dr_taskq); 1459 } 1460 if (mutex_init_done) { 1461 mutex_destroy(&mpt->m_intr_mutex); 1462 mutex_destroy(&mpt->m_passthru_mutex); 1463 mutex_destroy(&mpt->m_mutex); 1464 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 1465 mutex_destroy( 1466 &mpt->m_phy_info[i].smhba_info.phy_mutex); 1467 } 1468 cv_destroy(&mpt->m_cv); 1469 cv_destroy(&mpt->m_passthru_cv); 1470 cv_destroy(&mpt->m_fw_cv); 1471 cv_destroy(&mpt->m_config_cv); 1472 cv_destroy(&mpt->m_fw_diag_cv); 1473 } 1474 1475 if (map_setup) { 1476 mptsas_cfg_fini(mpt); 1477 } 1478 if (config_setup) { 1479 mptsas_config_space_fini(mpt); 1480 } 1481 mptsas_free_handshake_msg(mpt); 1482 mptsas_hba_fini(mpt); 1483 1484 mptsas_fm_fini(mpt); 1485 ddi_soft_state_free(mptsas_state, instance); 1486 ddi_prop_remove_all(dip); 1487 } 1488 return (DDI_FAILURE); 1489 } 1490 1491 static int 1492 mptsas_suspend(dev_info_t *devi) 1493 { 1494 mptsas_t *mpt, *g; 1495 scsi_hba_tran_t *tran; 1496 1497 if (scsi_hba_iport_unit_address(devi)) { 1498 return (DDI_SUCCESS); 1499 } 1500 1501 if ((tran = ddi_get_driver_private(devi)) == NULL) 1502 return (DDI_SUCCESS); 1503 1504 mpt = TRAN2MPT(tran); 1505 if (!mpt) { 1506 return (DDI_SUCCESS); 1507 } 1508 1509 mutex_enter(&mpt->m_mutex); 1510 1511 if (mpt->m_suspended++) { 1512 mutex_exit(&mpt->m_mutex); 1513 return (DDI_SUCCESS); 1514 } 1515 1516 /* 1517 * Cancel timeout threads for this mpt 1518 */ 1519 if (mpt->m_quiesce_timeid) { 1520 timeout_id_t tid = mpt->m_quiesce_timeid; 1521 mpt->m_quiesce_timeid = 0; 1522 mutex_exit(&mpt->m_mutex); 1523 (void) untimeout(tid); 1524 mutex_enter(&mpt->m_mutex); 1525 } 1526 1527 if (mpt->m_restart_cmd_timeid) { 1528 timeout_id_t tid = mpt->m_restart_cmd_timeid; 1529 mpt->m_restart_cmd_timeid = 0; 1530 mutex_exit(&mpt->m_mutex); 1531 (void) untimeout(tid); 1532 mutex_enter(&mpt->m_mutex); 1533 } 1534 1535 mutex_exit(&mpt->m_mutex); 1536 1537 (void) pm_idle_component(mpt->m_dip, 0); 1538 1539 /* 1540 * Cancel watch threads if all mpts suspended 1541 */ 1542 rw_enter(&mptsas_global_rwlock, RW_WRITER); 1543 for (g = mptsas_head; g != NULL; g = g->m_next) { 1544 if (!g->m_suspended) 1545 break; 1546 } 1547 rw_exit(&mptsas_global_rwlock); 1548 1549 mutex_enter(&mptsas_global_mutex); 1550 if (g == NULL) { 1551 timeout_id_t tid; 1552 1553 mptsas_timeouts_enabled = 0; 1554 if (mptsas_timeout_id) { 1555 tid = mptsas_timeout_id; 1556 mptsas_timeout_id = 0; 1557 mutex_exit(&mptsas_global_mutex); 1558 (void) untimeout(tid); 1559 mutex_enter(&mptsas_global_mutex); 1560 } 1561 if (mptsas_reset_watch) { 1562 tid = mptsas_reset_watch; 1563 mptsas_reset_watch = 0; 1564 mutex_exit(&mptsas_global_mutex); 1565 (void) untimeout(tid); 1566 mutex_enter(&mptsas_global_mutex); 1567 } 1568 } 1569 mutex_exit(&mptsas_global_mutex); 1570 1571 mutex_enter(&mpt->m_mutex); 1572 1573 /* 1574 * If this mpt is not in full power(PM_LEVEL_D0), just return. 1575 */ 1576 if ((mpt->m_options & MPTSAS_OPT_PM) && 1577 (mpt->m_power_level != PM_LEVEL_D0)) { 1578 mutex_exit(&mpt->m_mutex); 1579 return (DDI_SUCCESS); 1580 } 1581 1582 /* Disable HBA interrupts in hardware */ 1583 MPTSAS_DISABLE_INTR(mpt); 1584 /* 1585 * Send RAID action system shutdown to sync IR 1586 */ 1587 mptsas_raid_action_system_shutdown(mpt); 1588 1589 mutex_exit(&mpt->m_mutex); 1590 1591 /* drain the taskq */ 1592 ddi_taskq_wait(mpt->m_event_taskq); 1593 ddi_taskq_wait(mpt->m_dr_taskq); 1594 1595 return (DDI_SUCCESS); 1596 } 1597 1598 #ifdef __sparc 1599 /*ARGSUSED*/ 1600 static int 1601 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd) 1602 { 1603 mptsas_t *mpt; 1604 scsi_hba_tran_t *tran; 1605 1606 /* 1607 * If this call is for iport, just return. 1608 */ 1609 if (scsi_hba_iport_unit_address(devi)) 1610 return (DDI_SUCCESS); 1611 1612 if ((tran = ddi_get_driver_private(devi)) == NULL) 1613 return (DDI_SUCCESS); 1614 1615 if ((mpt = TRAN2MPT(tran)) == NULL) 1616 return (DDI_SUCCESS); 1617 1618 /* 1619 * Send RAID action system shutdown to sync IR. Disable HBA 1620 * interrupts in hardware first. 1621 */ 1622 MPTSAS_DISABLE_INTR(mpt); 1623 mptsas_raid_action_system_shutdown(mpt); 1624 1625 return (DDI_SUCCESS); 1626 } 1627 #else /* __sparc */ 1628 /* 1629 * quiesce(9E) entry point. 1630 * 1631 * This function is called when the system is single-threaded at high 1632 * PIL with preemption disabled. Therefore, this function must not be 1633 * blocked. 1634 * 1635 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 1636 * DDI_FAILURE indicates an error condition and should almost never happen. 1637 */ 1638 static int 1639 mptsas_quiesce(dev_info_t *devi) 1640 { 1641 mptsas_t *mpt; 1642 scsi_hba_tran_t *tran; 1643 1644 /* 1645 * If this call is for iport, just return. 1646 */ 1647 if (scsi_hba_iport_unit_address(devi)) 1648 return (DDI_SUCCESS); 1649 1650 if ((tran = ddi_get_driver_private(devi)) == NULL) 1651 return (DDI_SUCCESS); 1652 1653 if ((mpt = TRAN2MPT(tran)) == NULL) 1654 return (DDI_SUCCESS); 1655 1656 /* Disable HBA interrupts in hardware */ 1657 MPTSAS_DISABLE_INTR(mpt); 1658 /* Send RAID action system shutdonw to sync IR */ 1659 mptsas_raid_action_system_shutdown(mpt); 1660 1661 return (DDI_SUCCESS); 1662 } 1663 #endif /* __sparc */ 1664 1665 /* 1666 * detach(9E). Remove all device allocations and system resources; 1667 * disable device interrupts. 1668 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem. 1669 */ 1670 static int 1671 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 1672 { 1673 /* CONSTCOND */ 1674 ASSERT(NO_COMPETING_THREADS); 1675 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd)); 1676 1677 switch (cmd) { 1678 case DDI_DETACH: 1679 return (mptsas_do_detach(devi)); 1680 1681 case DDI_SUSPEND: 1682 return (mptsas_suspend(devi)); 1683 1684 default: 1685 return (DDI_FAILURE); 1686 } 1687 /* NOTREACHED */ 1688 } 1689 1690 static int 1691 mptsas_do_detach(dev_info_t *dip) 1692 { 1693 mptsas_t *mpt; 1694 scsi_hba_tran_t *tran; 1695 int circ = 0; 1696 int circ1 = 0; 1697 mdi_pathinfo_t *pip = NULL; 1698 int i; 1699 int doneq_thread_num = 0; 1700 1701 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip)); 1702 1703 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL) 1704 return (DDI_FAILURE); 1705 1706 mpt = TRAN2MPT(tran); 1707 if (!mpt) { 1708 return (DDI_FAILURE); 1709 } 1710 /* 1711 * Still have pathinfo child, should not detach mpt driver 1712 */ 1713 if (scsi_hba_iport_unit_address(dip)) { 1714 if (mpt->m_mpxio_enable) { 1715 /* 1716 * MPxIO enabled for the iport 1717 */ 1718 ndi_devi_enter(scsi_vhci_dip, &circ1); 1719 ndi_devi_enter(dip, &circ); 1720 while (pip = mdi_get_next_client_path(dip, NULL)) { 1721 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) { 1722 continue; 1723 } 1724 ndi_devi_exit(dip, circ); 1725 ndi_devi_exit(scsi_vhci_dip, circ1); 1726 NDBG12(("detach failed because of " 1727 "outstanding path info")); 1728 return (DDI_FAILURE); 1729 } 1730 ndi_devi_exit(dip, circ); 1731 ndi_devi_exit(scsi_vhci_dip, circ1); 1732 (void) mdi_phci_unregister(dip, 0); 1733 } 1734 1735 ddi_prop_remove_all(dip); 1736 1737 return (DDI_SUCCESS); 1738 } 1739 1740 /* Make sure power level is D0 before accessing registers */ 1741 if (mpt->m_options & MPTSAS_OPT_PM) { 1742 (void) pm_busy_component(dip, 0); 1743 if (mpt->m_power_level != PM_LEVEL_D0) { 1744 if (pm_raise_power(dip, 0, PM_LEVEL_D0) != 1745 DDI_SUCCESS) { 1746 mptsas_log(mpt, CE_WARN, 1747 "mptsas%d: Raise power request failed.", 1748 mpt->m_instance); 1749 (void) pm_idle_component(dip, 0); 1750 return (DDI_FAILURE); 1751 } 1752 } 1753 } 1754 1755 /* 1756 * Send RAID action system shutdown to sync IR. After action, send a 1757 * Message Unit Reset. Since after that DMA resource will be freed, 1758 * set ioc to READY state will avoid HBA initiated DMA operation. 1759 */ 1760 mutex_enter(&mpt->m_mutex); 1761 MPTSAS_DISABLE_INTR(mpt); 1762 mptsas_raid_action_system_shutdown(mpt); 1763 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET; 1764 (void) mptsas_ioc_reset(mpt); 1765 mutex_exit(&mpt->m_mutex); 1766 mptsas_rem_intrs(mpt); 1767 ddi_taskq_destroy(mpt->m_event_taskq); 1768 ddi_taskq_destroy(mpt->m_dr_taskq); 1769 1770 if (mpt->m_doneq_thread_n) { 1771 mutex_enter(&mpt->m_doneq_mutex); 1772 doneq_thread_num = mpt->m_doneq_thread_n; 1773 for (i = 0; i < mpt->m_doneq_thread_n; i++) { 1774 mutex_enter(&mpt->m_doneq_thread_id[i].mutex); 1775 mpt->m_doneq_thread_id[i].flag &= 1776 (~MPTSAS_DONEQ_THREAD_ACTIVE); 1777 cv_signal(&mpt->m_doneq_thread_id[i].cv); 1778 mutex_exit(&mpt->m_doneq_thread_id[i].mutex); 1779 } 1780 while (mpt->m_doneq_thread_n) { 1781 cv_wait(&mpt->m_doneq_thread_cv, 1782 &mpt->m_doneq_mutex); 1783 } 1784 for (i = 0; i < doneq_thread_num; i++) { 1785 cv_destroy(&mpt->m_doneq_thread_id[i].cv); 1786 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex); 1787 } 1788 kmem_free(mpt->m_doneq_thread_id, 1789 sizeof (mptsas_doneq_thread_list_t) 1790 * doneq_thread_num); 1791 mutex_exit(&mpt->m_doneq_mutex); 1792 cv_destroy(&mpt->m_doneq_thread_cv); 1793 mutex_destroy(&mpt->m_doneq_mutex); 1794 } 1795 1796 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf); 1797 1798 mptsas_list_del(mpt); 1799 1800 /* 1801 * Cancel timeout threads for this mpt 1802 */ 1803 mutex_enter(&mpt->m_mutex); 1804 if (mpt->m_quiesce_timeid) { 1805 timeout_id_t tid = mpt->m_quiesce_timeid; 1806 mpt->m_quiesce_timeid = 0; 1807 mutex_exit(&mpt->m_mutex); 1808 (void) untimeout(tid); 1809 mutex_enter(&mpt->m_mutex); 1810 } 1811 1812 if (mpt->m_restart_cmd_timeid) { 1813 timeout_id_t tid = mpt->m_restart_cmd_timeid; 1814 mpt->m_restart_cmd_timeid = 0; 1815 mutex_exit(&mpt->m_mutex); 1816 (void) untimeout(tid); 1817 mutex_enter(&mpt->m_mutex); 1818 } 1819 1820 mutex_exit(&mpt->m_mutex); 1821 1822 /* 1823 * last mpt? ... if active, CANCEL watch threads. 1824 */ 1825 mutex_enter(&mptsas_global_mutex); 1826 if (mptsas_head == NULL) { 1827 timeout_id_t tid; 1828 /* 1829 * Clear mptsas_timeouts_enable so that the watch thread 1830 * gets restarted on DDI_ATTACH 1831 */ 1832 mptsas_timeouts_enabled = 0; 1833 if (mptsas_timeout_id) { 1834 tid = mptsas_timeout_id; 1835 mptsas_timeout_id = 0; 1836 mutex_exit(&mptsas_global_mutex); 1837 (void) untimeout(tid); 1838 mutex_enter(&mptsas_global_mutex); 1839 } 1840 if (mptsas_reset_watch) { 1841 tid = mptsas_reset_watch; 1842 mptsas_reset_watch = 0; 1843 mutex_exit(&mptsas_global_mutex); 1844 (void) untimeout(tid); 1845 mutex_enter(&mptsas_global_mutex); 1846 } 1847 } 1848 mutex_exit(&mptsas_global_mutex); 1849 1850 /* 1851 * Delete Phy stats 1852 */ 1853 mptsas_destroy_phy_stats(mpt); 1854 1855 /* 1856 * Delete nt_active. 1857 */ 1858 mutex_enter(&mpt->m_mutex); 1859 mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t)); 1860 mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t)); 1861 mptsas_free_active_slots(mpt); 1862 mutex_exit(&mpt->m_mutex); 1863 1864 /* deallocate everything that was allocated in mptsas_attach */ 1865 mptsas_cache_destroy(mpt); 1866 1867 mptsas_hba_fini(mpt); 1868 mptsas_cfg_fini(mpt); 1869 1870 /* Lower the power informing PM Framework */ 1871 if (mpt->m_options & MPTSAS_OPT_PM) { 1872 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS) 1873 mptsas_log(mpt, CE_WARN, 1874 "!mptsas%d: Lower power request failed " 1875 "during detach, ignoring.", 1876 mpt->m_instance); 1877 } 1878 1879 mutex_destroy(&mpt->m_intr_mutex); 1880 mutex_destroy(&mpt->m_passthru_mutex); 1881 mutex_destroy(&mpt->m_mutex); 1882 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 1883 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex); 1884 } 1885 cv_destroy(&mpt->m_cv); 1886 cv_destroy(&mpt->m_passthru_cv); 1887 cv_destroy(&mpt->m_fw_cv); 1888 cv_destroy(&mpt->m_config_cv); 1889 cv_destroy(&mpt->m_fw_diag_cv); 1890 1891 1892 mptsas_smp_teardown(mpt); 1893 mptsas_hba_teardown(mpt); 1894 1895 mptsas_config_space_fini(mpt); 1896 1897 mptsas_free_handshake_msg(mpt); 1898 1899 mptsas_fm_fini(mpt); 1900 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip)); 1901 ddi_prop_remove_all(dip); 1902 1903 return (DDI_SUCCESS); 1904 } 1905 1906 static void 1907 mptsas_list_add(mptsas_t *mpt) 1908 { 1909 rw_enter(&mptsas_global_rwlock, RW_WRITER); 1910 1911 if (mptsas_head == NULL) { 1912 mptsas_head = mpt; 1913 } else { 1914 mptsas_tail->m_next = mpt; 1915 } 1916 mptsas_tail = mpt; 1917 rw_exit(&mptsas_global_rwlock); 1918 } 1919 1920 static void 1921 mptsas_list_del(mptsas_t *mpt) 1922 { 1923 mptsas_t *m; 1924 /* 1925 * Remove device instance from the global linked list 1926 */ 1927 rw_enter(&mptsas_global_rwlock, RW_WRITER); 1928 if (mptsas_head == mpt) { 1929 m = mptsas_head = mpt->m_next; 1930 } else { 1931 for (m = mptsas_head; m != NULL; m = m->m_next) { 1932 if (m->m_next == mpt) { 1933 m->m_next = mpt->m_next; 1934 break; 1935 } 1936 } 1937 if (m == NULL) { 1938 mptsas_log(mpt, CE_PANIC, "Not in softc list!"); 1939 } 1940 } 1941 1942 if (mptsas_tail == mpt) { 1943 mptsas_tail = m; 1944 } 1945 rw_exit(&mptsas_global_rwlock); 1946 } 1947 1948 static int 1949 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size) 1950 { 1951 ddi_dma_attr_t task_dma_attrs; 1952 1953 task_dma_attrs = mpt->m_msg_dma_attr; 1954 task_dma_attrs.dma_attr_sgllen = 1; 1955 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size); 1956 1957 /* allocate Task Management ddi_dma resources */ 1958 if (mptsas_dma_addr_create(mpt, task_dma_attrs, 1959 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp, 1960 alloc_size, NULL) == FALSE) { 1961 return (DDI_FAILURE); 1962 } 1963 mpt->m_hshk_dma_size = alloc_size; 1964 1965 return (DDI_SUCCESS); 1966 } 1967 1968 static void 1969 mptsas_free_handshake_msg(mptsas_t *mpt) 1970 { 1971 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl); 1972 mpt->m_hshk_dma_size = 0; 1973 } 1974 1975 static int 1976 mptsas_hba_setup(mptsas_t *mpt) 1977 { 1978 scsi_hba_tran_t *hba_tran; 1979 int tran_flags; 1980 1981 /* Allocate a transport structure */ 1982 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip, 1983 SCSI_HBA_CANSLEEP); 1984 ASSERT(mpt->m_tran != NULL); 1985 1986 hba_tran->tran_hba_private = mpt; 1987 hba_tran->tran_tgt_private = NULL; 1988 1989 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init; 1990 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free; 1991 1992 hba_tran->tran_start = mptsas_scsi_start; 1993 hba_tran->tran_reset = mptsas_scsi_reset; 1994 hba_tran->tran_abort = mptsas_scsi_abort; 1995 hba_tran->tran_getcap = mptsas_scsi_getcap; 1996 hba_tran->tran_setcap = mptsas_scsi_setcap; 1997 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt; 1998 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt; 1999 2000 hba_tran->tran_dmafree = mptsas_scsi_dmafree; 2001 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt; 2002 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify; 2003 2004 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr; 2005 hba_tran->tran_get_name = mptsas_get_name; 2006 2007 hba_tran->tran_quiesce = mptsas_scsi_quiesce; 2008 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce; 2009 hba_tran->tran_bus_reset = NULL; 2010 2011 hba_tran->tran_add_eventcall = NULL; 2012 hba_tran->tran_get_eventcookie = NULL; 2013 hba_tran->tran_post_event = NULL; 2014 hba_tran->tran_remove_eventcall = NULL; 2015 2016 hba_tran->tran_bus_config = mptsas_bus_config; 2017 2018 hba_tran->tran_interconnect_type = INTERCONNECT_SAS; 2019 2020 /* 2021 * All children of the HBA are iports. We need tran was cloned. 2022 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be 2023 * inherited to iport's tran vector. 2024 */ 2025 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE); 2026 2027 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr, 2028 hba_tran, tran_flags) != DDI_SUCCESS) { 2029 mptsas_log(mpt, CE_WARN, "hba attach setup failed"); 2030 scsi_hba_tran_free(hba_tran); 2031 mpt->m_tran = NULL; 2032 return (FALSE); 2033 } 2034 return (TRUE); 2035 } 2036 2037 static void 2038 mptsas_hba_teardown(mptsas_t *mpt) 2039 { 2040 (void) scsi_hba_detach(mpt->m_dip); 2041 if (mpt->m_tran != NULL) { 2042 scsi_hba_tran_free(mpt->m_tran); 2043 mpt->m_tran = NULL; 2044 } 2045 } 2046 2047 static void 2048 mptsas_iport_register(mptsas_t *mpt) 2049 { 2050 int i, j; 2051 mptsas_phymask_t mask = 0x0; 2052 /* 2053 * initial value of mask is 0 2054 */ 2055 mutex_enter(&mpt->m_mutex); 2056 for (i = 0; i < mpt->m_num_phys; i++) { 2057 mptsas_phymask_t phy_mask = 0x0; 2058 char phy_mask_name[MPTSAS_MAX_PHYS]; 2059 uint8_t current_port; 2060 2061 if (mpt->m_phy_info[i].attached_devhdl == 0) 2062 continue; 2063 2064 bzero(phy_mask_name, sizeof (phy_mask_name)); 2065 2066 current_port = mpt->m_phy_info[i].port_num; 2067 2068 if ((mask & (1 << i)) != 0) 2069 continue; 2070 2071 for (j = 0; j < mpt->m_num_phys; j++) { 2072 if (mpt->m_phy_info[j].attached_devhdl && 2073 (mpt->m_phy_info[j].port_num == current_port)) { 2074 phy_mask |= (1 << j); 2075 } 2076 } 2077 mask = mask | phy_mask; 2078 2079 for (j = 0; j < mpt->m_num_phys; j++) { 2080 if ((phy_mask >> j) & 0x01) { 2081 mpt->m_phy_info[j].phy_mask = phy_mask; 2082 } 2083 } 2084 2085 (void) sprintf(phy_mask_name, "%x", phy_mask); 2086 2087 mutex_exit(&mpt->m_mutex); 2088 /* 2089 * register a iport 2090 */ 2091 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name); 2092 mutex_enter(&mpt->m_mutex); 2093 } 2094 mutex_exit(&mpt->m_mutex); 2095 /* 2096 * register a virtual port for RAID volume always 2097 */ 2098 (void) scsi_hba_iport_register(mpt->m_dip, "v0"); 2099 2100 } 2101 2102 static int 2103 mptsas_smp_setup(mptsas_t *mpt) 2104 { 2105 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip); 2106 ASSERT(mpt->m_smptran != NULL); 2107 mpt->m_smptran->smp_tran_hba_private = mpt; 2108 mpt->m_smptran->smp_tran_start = mptsas_smp_start; 2109 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) { 2110 mptsas_log(mpt, CE_WARN, "smp attach setup failed"); 2111 smp_hba_tran_free(mpt->m_smptran); 2112 mpt->m_smptran = NULL; 2113 return (FALSE); 2114 } 2115 /* 2116 * Initialize smp hash table 2117 */ 2118 mptsas_hash_init(&mpt->m_active->m_smptbl); 2119 mpt->m_smp_devhdl = 0xFFFF; 2120 2121 return (TRUE); 2122 } 2123 2124 static void 2125 mptsas_smp_teardown(mptsas_t *mpt) 2126 { 2127 (void) smp_hba_detach(mpt->m_dip); 2128 if (mpt->m_smptran != NULL) { 2129 smp_hba_tran_free(mpt->m_smptran); 2130 mpt->m_smptran = NULL; 2131 } 2132 mpt->m_smp_devhdl = 0; 2133 } 2134 2135 static int 2136 mptsas_cache_create(mptsas_t *mpt) 2137 { 2138 int instance = mpt->m_instance; 2139 char buf[64]; 2140 2141 /* 2142 * create kmem cache for packets 2143 */ 2144 (void) sprintf(buf, "mptsas%d_cache", instance); 2145 mpt->m_kmem_cache = kmem_cache_create(buf, 2146 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8, 2147 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor, 2148 NULL, (void *)mpt, NULL, 0); 2149 2150 if (mpt->m_kmem_cache == NULL) { 2151 mptsas_log(mpt, CE_WARN, "creating kmem cache failed"); 2152 return (FALSE); 2153 } 2154 2155 /* 2156 * create kmem cache for extra SGL frames if SGL cannot 2157 * be accomodated into main request frame. 2158 */ 2159 (void) sprintf(buf, "mptsas%d_cache_frames", instance); 2160 mpt->m_cache_frames = kmem_cache_create(buf, 2161 sizeof (mptsas_cache_frames_t), 8, 2162 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor, 2163 NULL, (void *)mpt, NULL, 0); 2164 2165 if (mpt->m_cache_frames == NULL) { 2166 mptsas_log(mpt, CE_WARN, "creating cache for frames failed"); 2167 return (FALSE); 2168 } 2169 2170 return (TRUE); 2171 } 2172 2173 static void 2174 mptsas_cache_destroy(mptsas_t *mpt) 2175 { 2176 /* deallocate in reverse order */ 2177 if (mpt->m_cache_frames) { 2178 kmem_cache_destroy(mpt->m_cache_frames); 2179 mpt->m_cache_frames = NULL; 2180 } 2181 if (mpt->m_kmem_cache) { 2182 kmem_cache_destroy(mpt->m_kmem_cache); 2183 mpt->m_kmem_cache = NULL; 2184 } 2185 } 2186 2187 static int 2188 mptsas_power(dev_info_t *dip, int component, int level) 2189 { 2190 #ifndef __lock_lint 2191 _NOTE(ARGUNUSED(component)) 2192 #endif 2193 mptsas_t *mpt; 2194 int rval = DDI_SUCCESS; 2195 int polls = 0; 2196 uint32_t ioc_status; 2197 2198 if (scsi_hba_iport_unit_address(dip) != 0) 2199 return (DDI_SUCCESS); 2200 2201 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip)); 2202 if (mpt == NULL) { 2203 return (DDI_FAILURE); 2204 } 2205 2206 mutex_enter(&mpt->m_mutex); 2207 2208 /* 2209 * If the device is busy, don't lower its power level 2210 */ 2211 if (mpt->m_busy && (mpt->m_power_level > level)) { 2212 mutex_exit(&mpt->m_mutex); 2213 return (DDI_FAILURE); 2214 } 2215 switch (level) { 2216 case PM_LEVEL_D0: 2217 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance)); 2218 MPTSAS_POWER_ON(mpt); 2219 /* 2220 * Wait up to 30 seconds for IOC to come out of reset. 2221 */ 2222 while (((ioc_status = ddi_get32(mpt->m_datap, 2223 &mpt->m_reg->Doorbell)) & 2224 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { 2225 if (polls++ > 3000) { 2226 break; 2227 } 2228 delay(drv_usectohz(10000)); 2229 } 2230 /* 2231 * If IOC is not in operational state, try to hard reset it. 2232 */ 2233 if ((ioc_status & MPI2_IOC_STATE_MASK) != 2234 MPI2_IOC_STATE_OPERATIONAL) { 2235 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET; 2236 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) { 2237 mptsas_log(mpt, CE_WARN, 2238 "mptsas_power: hard reset failed"); 2239 mutex_exit(&mpt->m_mutex); 2240 return (DDI_FAILURE); 2241 } 2242 } 2243 mutex_enter(&mpt->m_intr_mutex); 2244 mpt->m_power_level = PM_LEVEL_D0; 2245 mutex_exit(&mpt->m_intr_mutex); 2246 break; 2247 case PM_LEVEL_D3: 2248 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance)); 2249 MPTSAS_POWER_OFF(mpt); 2250 break; 2251 default: 2252 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.", 2253 mpt->m_instance, level); 2254 rval = DDI_FAILURE; 2255 break; 2256 } 2257 mutex_exit(&mpt->m_mutex); 2258 return (rval); 2259 } 2260 2261 /* 2262 * Initialize configuration space and figure out which 2263 * chip and revison of the chip the mpt driver is using. 2264 */ 2265 static int 2266 mptsas_config_space_init(mptsas_t *mpt) 2267 { 2268 NDBG0(("mptsas_config_space_init")); 2269 2270 if (mpt->m_config_handle != NULL) 2271 return (TRUE); 2272 2273 if (pci_config_setup(mpt->m_dip, 2274 &mpt->m_config_handle) != DDI_SUCCESS) { 2275 mptsas_log(mpt, CE_WARN, "cannot map configuration space."); 2276 return (FALSE); 2277 } 2278 2279 /* 2280 * This is a workaround for a XMITS ASIC bug which does not 2281 * drive the CBE upper bits. 2282 */ 2283 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) & 2284 PCI_STAT_PERROR) { 2285 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT, 2286 PCI_STAT_PERROR); 2287 } 2288 2289 mptsas_setup_cmd_reg(mpt); 2290 2291 /* 2292 * Get the chip device id: 2293 */ 2294 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID); 2295 2296 /* 2297 * Save the revision. 2298 */ 2299 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID); 2300 2301 /* 2302 * Save the SubSystem Vendor and Device IDs 2303 */ 2304 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID); 2305 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID); 2306 2307 /* 2308 * Set the latency timer to 0x40 as specified by the upa -> pci 2309 * bridge chip design team. This may be done by the sparc pci 2310 * bus nexus driver, but the driver should make sure the latency 2311 * timer is correct for performance reasons. 2312 */ 2313 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER, 2314 MPTSAS_LATENCY_TIMER); 2315 2316 (void) mptsas_get_pci_cap(mpt); 2317 return (TRUE); 2318 } 2319 2320 static void 2321 mptsas_config_space_fini(mptsas_t *mpt) 2322 { 2323 if (mpt->m_config_handle != NULL) { 2324 mptsas_disable_bus_master(mpt); 2325 pci_config_teardown(&mpt->m_config_handle); 2326 mpt->m_config_handle = NULL; 2327 } 2328 } 2329 2330 static void 2331 mptsas_setup_cmd_reg(mptsas_t *mpt) 2332 { 2333 ushort_t cmdreg; 2334 2335 /* 2336 * Set the command register to the needed values. 2337 */ 2338 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM); 2339 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE | 2340 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE); 2341 cmdreg &= ~PCI_COMM_IO; 2342 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg); 2343 } 2344 2345 static void 2346 mptsas_disable_bus_master(mptsas_t *mpt) 2347 { 2348 ushort_t cmdreg; 2349 2350 /* 2351 * Clear the master enable bit in the PCI command register. 2352 * This prevents any bus mastering activity like DMA. 2353 */ 2354 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM); 2355 cmdreg &= ~PCI_COMM_ME; 2356 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg); 2357 } 2358 2359 int 2360 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep) 2361 { 2362 ddi_dma_attr_t attrs; 2363 2364 attrs = mpt->m_io_dma_attr; 2365 attrs.dma_attr_sgllen = 1; 2366 2367 ASSERT(dma_statep != NULL); 2368 2369 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle, 2370 &dma_statep->accessp, &dma_statep->memp, dma_statep->size, 2371 &dma_statep->cookie) == FALSE) { 2372 return (DDI_FAILURE); 2373 } 2374 2375 return (DDI_SUCCESS); 2376 } 2377 2378 void 2379 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep) 2380 { 2381 ASSERT(dma_statep != NULL); 2382 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp); 2383 dma_statep->size = 0; 2384 } 2385 2386 int 2387 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)()) 2388 { 2389 ddi_dma_attr_t attrs; 2390 ddi_dma_handle_t dma_handle; 2391 caddr_t memp; 2392 ddi_acc_handle_t accessp; 2393 int rval; 2394 2395 ASSERT(mutex_owned(&mpt->m_mutex)); 2396 2397 attrs = mpt->m_msg_dma_attr; 2398 attrs.dma_attr_sgllen = 1; 2399 attrs.dma_attr_granular = size; 2400 2401 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle, 2402 &accessp, &memp, size, NULL) == FALSE) { 2403 return (DDI_FAILURE); 2404 } 2405 2406 rval = (*callback) (mpt, memp, var, accessp); 2407 2408 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) || 2409 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) { 2410 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 2411 rval = DDI_FAILURE; 2412 } 2413 2414 mptsas_dma_addr_destroy(&dma_handle, &accessp); 2415 return (rval); 2416 2417 } 2418 2419 static int 2420 mptsas_alloc_request_frames(mptsas_t *mpt) 2421 { 2422 ddi_dma_attr_t frame_dma_attrs; 2423 caddr_t memp; 2424 ddi_dma_cookie_t cookie; 2425 size_t mem_size; 2426 2427 /* 2428 * re-alloc when it has already alloced 2429 */ 2430 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl, 2431 &mpt->m_acc_req_frame_hdl); 2432 2433 /* 2434 * The size of the request frame pool is: 2435 * Number of Request Frames * Request Frame Size 2436 */ 2437 mem_size = mpt->m_max_requests * mpt->m_req_frame_size; 2438 2439 /* 2440 * set the DMA attributes. System Request Message Frames must be 2441 * aligned on a 16-byte boundry. 2442 */ 2443 frame_dma_attrs = mpt->m_msg_dma_attr; 2444 frame_dma_attrs.dma_attr_align = 16; 2445 frame_dma_attrs.dma_attr_sgllen = 1; 2446 2447 /* 2448 * allocate the request frame pool. 2449 */ 2450 if (mptsas_dma_addr_create(mpt, frame_dma_attrs, 2451 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp, 2452 mem_size, &cookie) == FALSE) { 2453 return (DDI_FAILURE); 2454 } 2455 2456 /* 2457 * Store the request frame memory address. This chip uses this 2458 * address to dma to and from the driver's frame. The second 2459 * address is the address mpt uses to fill in the frame. 2460 */ 2461 mpt->m_req_frame_dma_addr = cookie.dmac_laddress; 2462 mpt->m_req_frame = memp; 2463 2464 /* 2465 * Clear the request frame pool. 2466 */ 2467 bzero(mpt->m_req_frame, mem_size); 2468 2469 return (DDI_SUCCESS); 2470 } 2471 2472 static int 2473 mptsas_alloc_reply_frames(mptsas_t *mpt) 2474 { 2475 ddi_dma_attr_t frame_dma_attrs; 2476 caddr_t memp; 2477 ddi_dma_cookie_t cookie; 2478 size_t mem_size; 2479 2480 /* 2481 * re-alloc when it has already alloced 2482 */ 2483 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl, 2484 &mpt->m_acc_reply_frame_hdl); 2485 2486 /* 2487 * The size of the reply frame pool is: 2488 * Number of Reply Frames * Reply Frame Size 2489 */ 2490 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size; 2491 2492 /* 2493 * set the DMA attributes. System Reply Message Frames must be 2494 * aligned on a 4-byte boundry. This is the default. 2495 */ 2496 frame_dma_attrs = mpt->m_msg_dma_attr; 2497 frame_dma_attrs.dma_attr_sgllen = 1; 2498 2499 /* 2500 * allocate the reply frame pool 2501 */ 2502 if (mptsas_dma_addr_create(mpt, frame_dma_attrs, 2503 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp, 2504 mem_size, &cookie) == FALSE) { 2505 return (DDI_FAILURE); 2506 } 2507 2508 /* 2509 * Store the reply frame memory address. This chip uses this 2510 * address to dma to and from the driver's frame. The second 2511 * address is the address mpt uses to process the frame. 2512 */ 2513 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress; 2514 mpt->m_reply_frame = memp; 2515 2516 /* 2517 * Clear the reply frame pool. 2518 */ 2519 bzero(mpt->m_reply_frame, mem_size); 2520 2521 return (DDI_SUCCESS); 2522 } 2523 2524 static int 2525 mptsas_alloc_free_queue(mptsas_t *mpt) 2526 { 2527 ddi_dma_attr_t frame_dma_attrs; 2528 caddr_t memp; 2529 ddi_dma_cookie_t cookie; 2530 size_t mem_size; 2531 2532 /* 2533 * re-alloc when it has already alloced 2534 */ 2535 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl, 2536 &mpt->m_acc_free_queue_hdl); 2537 2538 /* 2539 * The reply free queue size is: 2540 * Reply Free Queue Depth * 4 2541 * The "4" is the size of one 32 bit address (low part of 64-bit 2542 * address) 2543 */ 2544 mem_size = mpt->m_free_queue_depth * 4; 2545 2546 /* 2547 * set the DMA attributes The Reply Free Queue must be aligned on a 2548 * 16-byte boundry. 2549 */ 2550 frame_dma_attrs = mpt->m_msg_dma_attr; 2551 frame_dma_attrs.dma_attr_align = 16; 2552 frame_dma_attrs.dma_attr_sgllen = 1; 2553 2554 /* 2555 * allocate the reply free queue 2556 */ 2557 if (mptsas_dma_addr_create(mpt, frame_dma_attrs, 2558 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp, 2559 mem_size, &cookie) == FALSE) { 2560 return (DDI_FAILURE); 2561 } 2562 2563 /* 2564 * Store the reply free queue memory address. This chip uses this 2565 * address to read from the reply free queue. The second address 2566 * is the address mpt uses to manage the queue. 2567 */ 2568 mpt->m_free_queue_dma_addr = cookie.dmac_laddress; 2569 mpt->m_free_queue = memp; 2570 2571 /* 2572 * Clear the reply free queue memory. 2573 */ 2574 bzero(mpt->m_free_queue, mem_size); 2575 2576 return (DDI_SUCCESS); 2577 } 2578 2579 static int 2580 mptsas_alloc_post_queue(mptsas_t *mpt) 2581 { 2582 ddi_dma_attr_t frame_dma_attrs; 2583 caddr_t memp; 2584 ddi_dma_cookie_t cookie; 2585 size_t mem_size; 2586 2587 /* 2588 * re-alloc when it has already alloced 2589 */ 2590 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl, 2591 &mpt->m_acc_post_queue_hdl); 2592 2593 /* 2594 * The reply descriptor post queue size is: 2595 * Reply Descriptor Post Queue Depth * 8 2596 * The "8" is the size of each descriptor (8 bytes or 64 bits). 2597 */ 2598 mem_size = mpt->m_post_queue_depth * 8; 2599 2600 /* 2601 * set the DMA attributes. The Reply Descriptor Post Queue must be 2602 * aligned on a 16-byte boundry. 2603 */ 2604 frame_dma_attrs = mpt->m_msg_dma_attr; 2605 frame_dma_attrs.dma_attr_align = 16; 2606 frame_dma_attrs.dma_attr_sgllen = 1; 2607 2608 /* 2609 * allocate the reply post queue 2610 */ 2611 if (mptsas_dma_addr_create(mpt, frame_dma_attrs, 2612 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp, 2613 mem_size, &cookie) == FALSE) { 2614 return (DDI_FAILURE); 2615 } 2616 2617 /* 2618 * Store the reply descriptor post queue memory address. This chip 2619 * uses this address to write to the reply descriptor post queue. The 2620 * second address is the address mpt uses to manage the queue. 2621 */ 2622 mpt->m_post_queue_dma_addr = cookie.dmac_laddress; 2623 mpt->m_post_queue = memp; 2624 2625 /* 2626 * Clear the reply post queue memory. 2627 */ 2628 bzero(mpt->m_post_queue, mem_size); 2629 2630 return (DDI_SUCCESS); 2631 } 2632 2633 static void 2634 mptsas_alloc_reply_args(mptsas_t *mpt) 2635 { 2636 if (mpt->m_replyh_args != NULL) { 2637 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t) 2638 * mpt->m_max_replies); 2639 mpt->m_replyh_args = NULL; 2640 } 2641 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) * 2642 mpt->m_max_replies, KM_SLEEP); 2643 } 2644 2645 static int 2646 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd) 2647 { 2648 mptsas_cache_frames_t *frames = NULL; 2649 if (cmd->cmd_extra_frames == NULL) { 2650 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP); 2651 if (frames == NULL) { 2652 return (DDI_FAILURE); 2653 } 2654 cmd->cmd_extra_frames = frames; 2655 } 2656 return (DDI_SUCCESS); 2657 } 2658 2659 static void 2660 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd) 2661 { 2662 if (cmd->cmd_extra_frames) { 2663 kmem_cache_free(mpt->m_cache_frames, 2664 (void *)cmd->cmd_extra_frames); 2665 cmd->cmd_extra_frames = NULL; 2666 } 2667 } 2668 2669 static void 2670 mptsas_cfg_fini(mptsas_t *mpt) 2671 { 2672 NDBG0(("mptsas_cfg_fini")); 2673 ddi_regs_map_free(&mpt->m_datap); 2674 } 2675 2676 static void 2677 mptsas_hba_fini(mptsas_t *mpt) 2678 { 2679 NDBG0(("mptsas_hba_fini")); 2680 2681 /* 2682 * Free up any allocated memory 2683 */ 2684 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl, 2685 &mpt->m_acc_req_frame_hdl); 2686 2687 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl, 2688 &mpt->m_acc_reply_frame_hdl); 2689 2690 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl, 2691 &mpt->m_acc_free_queue_hdl); 2692 2693 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl, 2694 &mpt->m_acc_post_queue_hdl); 2695 2696 if (mpt->m_replyh_args != NULL) { 2697 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t) 2698 * mpt->m_max_replies); 2699 } 2700 } 2701 2702 static int 2703 mptsas_name_child(dev_info_t *lun_dip, char *name, int len) 2704 { 2705 int lun = 0; 2706 char *sas_wwn = NULL; 2707 int phynum = -1; 2708 int reallen = 0; 2709 2710 /* Get the target num */ 2711 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS, 2712 LUN_PROP, 0); 2713 2714 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, 2715 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) { 2716 /* 2717 * Stick in the address of form "pPHY,LUN" 2718 */ 2719 reallen = snprintf(name, len, "p%x,%x", phynum, lun); 2720 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip, 2721 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn) 2722 == DDI_PROP_SUCCESS) { 2723 /* 2724 * Stick in the address of the form "wWWN,LUN" 2725 */ 2726 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun); 2727 ddi_prop_free(sas_wwn); 2728 } else { 2729 return (DDI_FAILURE); 2730 } 2731 2732 ASSERT(reallen < len); 2733 if (reallen >= len) { 2734 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter " 2735 "length too small, it needs to be %d bytes", reallen + 1); 2736 } 2737 return (DDI_SUCCESS); 2738 } 2739 2740 /* 2741 * tran_tgt_init(9E) - target device instance initialization 2742 */ 2743 static int 2744 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 2745 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 2746 { 2747 #ifndef __lock_lint 2748 _NOTE(ARGUNUSED(hba_tran)) 2749 #endif 2750 2751 /* 2752 * At this point, the scsi_device structure already exists 2753 * and has been initialized. 2754 * 2755 * Use this function to allocate target-private data structures, 2756 * if needed by this HBA. Add revised flow-control and queue 2757 * properties for child here, if desired and if you can tell they 2758 * support tagged queueing by now. 2759 */ 2760 mptsas_t *mpt; 2761 int lun = sd->sd_address.a_lun; 2762 mdi_pathinfo_t *pip = NULL; 2763 mptsas_tgt_private_t *tgt_private = NULL; 2764 mptsas_target_t *ptgt = NULL; 2765 char *psas_wwn = NULL; 2766 int phymask = 0; 2767 uint64_t sas_wwn = 0; 2768 mpt = SDEV2MPT(sd); 2769 2770 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0); 2771 2772 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d", 2773 (void *)hba_dip, (void *)tgt_dip, lun)); 2774 2775 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 2776 (void) ndi_merge_node(tgt_dip, mptsas_name_child); 2777 ddi_set_name_addr(tgt_dip, NULL); 2778 return (DDI_FAILURE); 2779 } 2780 /* 2781 * phymask is 0 means the virtual port for RAID 2782 */ 2783 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0, 2784 "phymask", 0); 2785 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) { 2786 if ((pip = (void *)(sd->sd_private)) == NULL) { 2787 /* 2788 * Very bad news if this occurs. Somehow scsi_vhci has 2789 * lost the pathinfo node for this target. 2790 */ 2791 return (DDI_NOT_WELL_FORMED); 2792 } 2793 2794 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) != 2795 DDI_PROP_SUCCESS) { 2796 mptsas_log(mpt, CE_WARN, "Get lun property failed\n"); 2797 return (DDI_FAILURE); 2798 } 2799 2800 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT, 2801 &psas_wwn) == MDI_SUCCESS) { 2802 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) { 2803 sas_wwn = 0; 2804 } 2805 (void) mdi_prop_free(psas_wwn); 2806 } 2807 } else { 2808 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip, 2809 DDI_PROP_DONTPASS, LUN_PROP, 0); 2810 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, 2811 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) == 2812 DDI_PROP_SUCCESS) { 2813 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) { 2814 sas_wwn = 0; 2815 } 2816 ddi_prop_free(psas_wwn); 2817 } else { 2818 sas_wwn = 0; 2819 } 2820 } 2821 ASSERT((sas_wwn != 0) || (phymask != 0)); 2822 mutex_enter(&mpt->m_mutex); 2823 ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask); 2824 mutex_exit(&mpt->m_mutex); 2825 if (ptgt == NULL) { 2826 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or " 2827 "gone already! phymask:%x, saswwn %"PRIx64, phymask, 2828 sas_wwn); 2829 return (DDI_FAILURE); 2830 } 2831 if (hba_tran->tran_tgt_private == NULL) { 2832 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t), 2833 KM_SLEEP); 2834 tgt_private->t_lun = lun; 2835 tgt_private->t_private = ptgt; 2836 hba_tran->tran_tgt_private = tgt_private; 2837 } 2838 2839 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) { 2840 return (DDI_SUCCESS); 2841 } 2842 mutex_enter(&mpt->m_mutex); 2843 2844 if (ptgt->m_deviceinfo & 2845 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE | 2846 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) { 2847 uchar_t *inq89 = NULL; 2848 int inq89_len = 0x238; 2849 int reallen = 0; 2850 int rval = 0; 2851 struct sata_id *sid = NULL; 2852 char model[SATA_ID_MODEL_LEN + 1]; 2853 char fw[SATA_ID_FW_LEN + 1]; 2854 char *vid, *pid; 2855 int i; 2856 2857 mutex_exit(&mpt->m_mutex); 2858 /* 2859 * According SCSI/ATA Translation -2 (SAT-2) revision 01a 2860 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY 2861 * DEVICE data or ATA IDENTIFY PACKET DEVICE data. 2862 */ 2863 inq89 = kmem_zalloc(inq89_len, KM_SLEEP); 2864 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89, 2865 inq89, inq89_len, &reallen, 1); 2866 2867 if (rval != 0) { 2868 if (inq89 != NULL) { 2869 kmem_free(inq89, inq89_len); 2870 } 2871 2872 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page " 2873 "0x89 for SATA target:%x failed!", ptgt->m_devhdl); 2874 return (DDI_SUCCESS); 2875 } 2876 sid = (void *)(&inq89[60]); 2877 2878 swab(sid->ai_model, model, SATA_ID_MODEL_LEN); 2879 swab(sid->ai_fw, fw, SATA_ID_FW_LEN); 2880 2881 model[SATA_ID_MODEL_LEN] = 0; 2882 fw[SATA_ID_FW_LEN] = 0; 2883 2884 /* 2885 * split model into into vid/pid 2886 */ 2887 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++) 2888 if ((*pid == ' ') || (*pid == '\t')) 2889 break; 2890 if (i < SATA_ID_MODEL_LEN) { 2891 vid = model; 2892 /* 2893 * terminate vid, establish pid 2894 */ 2895 *pid++ = 0; 2896 } else { 2897 /* 2898 * vid will stay "ATA ", the rule is same 2899 * as sata framework implementation. 2900 */ 2901 vid = NULL; 2902 /* 2903 * model is all pid 2904 */ 2905 pid = model; 2906 } 2907 2908 /* 2909 * override SCSA "inquiry-*" properties 2910 */ 2911 if (vid) 2912 (void) scsi_device_prop_update_inqstring(sd, 2913 INQUIRY_VENDOR_ID, vid, strlen(vid)); 2914 if (pid) 2915 (void) scsi_device_prop_update_inqstring(sd, 2916 INQUIRY_PRODUCT_ID, pid, strlen(pid)); 2917 (void) scsi_device_prop_update_inqstring(sd, 2918 INQUIRY_REVISION_ID, fw, strlen(fw)); 2919 2920 if (inq89 != NULL) { 2921 kmem_free(inq89, inq89_len); 2922 } 2923 } else { 2924 mutex_exit(&mpt->m_mutex); 2925 } 2926 2927 return (DDI_SUCCESS); 2928 } 2929 /* 2930 * tran_tgt_free(9E) - target device instance deallocation 2931 */ 2932 static void 2933 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 2934 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 2935 { 2936 #ifndef __lock_lint 2937 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd)) 2938 #endif 2939 2940 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private; 2941 2942 if (tgt_private != NULL) { 2943 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t)); 2944 hba_tran->tran_tgt_private = NULL; 2945 } 2946 } 2947 2948 /* 2949 * scsi_pkt handling 2950 * 2951 * Visible to the external world via the transport structure. 2952 */ 2953 2954 /* 2955 * Notes: 2956 * - transport the command to the addressed SCSI target/lun device 2957 * - normal operation is to schedule the command to be transported, 2958 * and return TRAN_ACCEPT if this is successful. 2959 * - if NO_INTR, tran_start must poll device for command completion 2960 */ 2961 static int 2962 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 2963 { 2964 #ifndef __lock_lint 2965 _NOTE(ARGUNUSED(ap)) 2966 #endif 2967 mptsas_t *mpt = PKT2MPT(pkt); 2968 mptsas_cmd_t *cmd = PKT2CMD(pkt); 2969 int rval; 2970 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 2971 2972 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt)); 2973 ASSERT(ptgt); 2974 if (ptgt == NULL) 2975 return (TRAN_FATAL_ERROR); 2976 2977 /* 2978 * prepare the pkt before taking mutex. 2979 */ 2980 rval = mptsas_prepare_pkt(cmd); 2981 if (rval != TRAN_ACCEPT) { 2982 return (rval); 2983 } 2984 2985 /* 2986 * Send the command to target/lun, however your HBA requires it. 2987 * If busy, return TRAN_BUSY; if there's some other formatting error 2988 * in the packet, return TRAN_BADPKT; otherwise, fall through to the 2989 * return of TRAN_ACCEPT. 2990 * 2991 * Remember that access to shared resources, including the mptsas_t 2992 * data structure and the HBA hardware registers, must be protected 2993 * with mutexes, here and everywhere. 2994 * 2995 * Also remember that at interrupt time, you'll get an argument 2996 * to the interrupt handler which is a pointer to your mptsas_t 2997 * structure; you'll have to remember which commands are outstanding 2998 * and which scsi_pkt is the currently-running command so the 2999 * interrupt handler can refer to the pkt to set completion 3000 * status, call the target driver back through pkt_comp, etc. 3001 */ 3002 3003 mutex_enter(&ptgt->m_tgt_intr_mutex); 3004 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) { 3005 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) { 3006 /* 3007 * commands should be allowed to retry by 3008 * returning TRAN_BUSY to stall the I/O's 3009 * which come from scsi_vhci since the device/ 3010 * path is in unstable state now. 3011 */ 3012 mutex_exit(&ptgt->m_tgt_intr_mutex); 3013 return (TRAN_BUSY); 3014 } else { 3015 /* 3016 * The device is offline, just fail the 3017 * command by returning TRAN_FATAL_ERROR. 3018 */ 3019 mutex_exit(&ptgt->m_tgt_intr_mutex); 3020 return (TRAN_FATAL_ERROR); 3021 } 3022 } 3023 mutex_exit(&ptgt->m_tgt_intr_mutex); 3024 rval = mptsas_accept_pkt(mpt, cmd); 3025 3026 return (rval); 3027 } 3028 3029 static int 3030 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd) 3031 { 3032 int rval = TRAN_ACCEPT; 3033 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 3034 3035 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd)); 3036 3037 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) { 3038 rval = mptsas_prepare_pkt(cmd); 3039 if (rval != TRAN_ACCEPT) { 3040 cmd->cmd_flags &= ~CFLAG_TRANFLAG; 3041 return (rval); 3042 } 3043 } 3044 3045 /* 3046 * reset the throttle if we were draining 3047 */ 3048 mutex_enter(&ptgt->m_tgt_intr_mutex); 3049 if ((ptgt->m_t_ncmds == 0) && 3050 (ptgt->m_t_throttle == DRAIN_THROTTLE)) { 3051 NDBG23(("reset throttle")); 3052 ASSERT(ptgt->m_reset_delay == 0); 3053 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE); 3054 } 3055 3056 /* 3057 * If device handle has already been invalidated, just 3058 * fail the command. In theory, command from scsi_vhci 3059 * client is impossible send down command with invalid 3060 * devhdl since devhdl is set after path offline, target 3061 * driver is not suppose to select a offlined path. 3062 */ 3063 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) { 3064 NDBG20(("rejecting command, it might because invalid devhdl " 3065 "request.")); 3066 mutex_exit(&ptgt->m_tgt_intr_mutex); 3067 mutex_enter(&mpt->m_mutex); 3068 /* 3069 * If HBA is being reset, the DevHandles are being 3070 * re-initialized, which means that they could be invalid 3071 * even if the target is still attached. Check if being reset 3072 * and if DevHandle is being re-initialized. If this is the 3073 * case, return BUSY so the I/O can be retried later. 3074 */ 3075 if (mpt->m_in_reset) { 3076 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, 3077 STAT_BUS_RESET); 3078 if (cmd->cmd_flags & CFLAG_TXQ) { 3079 mptsas_doneq_add(mpt, cmd); 3080 mptsas_doneq_empty(mpt); 3081 mutex_exit(&mpt->m_mutex); 3082 return (rval); 3083 } else { 3084 mutex_exit(&mpt->m_mutex); 3085 return (TRAN_BUSY); 3086 } 3087 } 3088 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED); 3089 if (cmd->cmd_flags & CFLAG_TXQ) { 3090 mptsas_doneq_add(mpt, cmd); 3091 mptsas_doneq_empty(mpt); 3092 mutex_exit(&mpt->m_mutex); 3093 return (rval); 3094 } else { 3095 mutex_exit(&mpt->m_mutex); 3096 return (TRAN_FATAL_ERROR); 3097 } 3098 } 3099 mutex_exit(&ptgt->m_tgt_intr_mutex); 3100 /* 3101 * The first case is the normal case. mpt gets a command from the 3102 * target driver and starts it. 3103 * Since SMID 0 is reserved and the TM slot is reserved, the actual max 3104 * commands is m_max_requests - 2. 3105 */ 3106 mutex_enter(&ptgt->m_tgt_intr_mutex); 3107 if ((ptgt->m_t_throttle > HOLD_THROTTLE) && 3108 (ptgt->m_t_ncmds < ptgt->m_t_throttle) && 3109 (ptgt->m_reset_delay == 0) && 3110 (ptgt->m_t_nwait == 0) && 3111 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) { 3112 mutex_exit(&ptgt->m_tgt_intr_mutex); 3113 if (mptsas_save_cmd(mpt, cmd) == TRUE) { 3114 (void) mptsas_start_cmd0(mpt, cmd); 3115 } else { 3116 mutex_enter(&mpt->m_mutex); 3117 mptsas_waitq_add(mpt, cmd); 3118 mutex_exit(&mpt->m_mutex); 3119 } 3120 } else { 3121 /* 3122 * Add this pkt to the work queue 3123 */ 3124 mutex_exit(&ptgt->m_tgt_intr_mutex); 3125 mutex_enter(&mpt->m_mutex); 3126 mptsas_waitq_add(mpt, cmd); 3127 3128 if (cmd->cmd_pkt_flags & FLAG_NOINTR) { 3129 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME); 3130 3131 /* 3132 * Only flush the doneq if this is not a TM 3133 * cmd. For TM cmds the flushing of the 3134 * doneq will be done in those routines. 3135 */ 3136 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) { 3137 mptsas_doneq_empty(mpt); 3138 } 3139 } 3140 mutex_exit(&mpt->m_mutex); 3141 } 3142 return (rval); 3143 } 3144 3145 int 3146 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd) 3147 { 3148 mptsas_slots_t *slots; 3149 int slot; 3150 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 3151 mptsas_slot_free_e_t *pe; 3152 int qn, qn_first; 3153 3154 slots = mpt->m_active; 3155 3156 /* 3157 * Account for reserved TM request slot and reserved SMID of 0. 3158 */ 3159 ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2)); 3160 3161 qn = qn_first = CPU->cpu_seqid & (mpt->m_slot_freeq_pair_n - 1); 3162 3163 qpair_retry: 3164 ASSERT(qn < mpt->m_slot_freeq_pair_n); 3165 mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex); 3166 pe = list_head(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq. 3167 s.m_fq_list); 3168 if (!pe) { /* switch the allocq and releq */ 3169 mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_releq. 3170 s.m_fq_mutex); 3171 if (mpt->m_slot_freeq_pairp[qn].m_slot_releq.s.m_fq_n) { 3172 mpt->m_slot_freeq_pairp[qn]. 3173 m_slot_allocq.s.m_fq_n = 3174 mpt->m_slot_freeq_pairp[qn]. 3175 m_slot_releq.s.m_fq_n; 3176 mpt->m_slot_freeq_pairp[qn]. 3177 m_slot_allocq.s.m_fq_list.list_head.list_next = 3178 mpt->m_slot_freeq_pairp[qn]. 3179 m_slot_releq.s.m_fq_list.list_head.list_next; 3180 mpt->m_slot_freeq_pairp[qn]. 3181 m_slot_allocq.s.m_fq_list.list_head.list_prev = 3182 mpt->m_slot_freeq_pairp[qn]. 3183 m_slot_releq.s.m_fq_list.list_head.list_prev; 3184 mpt->m_slot_freeq_pairp[qn]. 3185 m_slot_releq.s.m_fq_list.list_head.list_prev-> 3186 list_next = 3187 &mpt->m_slot_freeq_pairp[qn]. 3188 m_slot_allocq.s.m_fq_list.list_head; 3189 mpt->m_slot_freeq_pairp[qn]. 3190 m_slot_releq.s.m_fq_list.list_head.list_next-> 3191 list_prev = 3192 &mpt->m_slot_freeq_pairp[qn]. 3193 m_slot_allocq.s.m_fq_list.list_head; 3194 3195 mpt->m_slot_freeq_pairp[qn]. 3196 m_slot_releq.s.m_fq_list.list_head.list_next = 3197 mpt->m_slot_freeq_pairp[qn]. 3198 m_slot_releq.s.m_fq_list.list_head.list_prev = 3199 &mpt->m_slot_freeq_pairp[qn]. 3200 m_slot_releq.s.m_fq_list.list_head; 3201 mpt->m_slot_freeq_pairp[qn]. 3202 m_slot_releq.s.m_fq_n = 0; 3203 } else { 3204 mutex_exit(&mpt->m_slot_freeq_pairp[qn]. 3205 m_slot_releq.s.m_fq_mutex); 3206 mutex_exit(&mpt->m_slot_freeq_pairp[qn]. 3207 m_slot_allocq.s.m_fq_mutex); 3208 qn = (qn + 1) & (mpt->m_slot_freeq_pair_n - 1); 3209 if (qn == qn_first) 3210 return (FALSE); 3211 else 3212 goto qpair_retry; 3213 } 3214 mutex_exit(&mpt->m_slot_freeq_pairp[qn]. 3215 m_slot_releq.s.m_fq_mutex); 3216 pe = list_head(&mpt->m_slot_freeq_pairp[qn]. 3217 m_slot_allocq.s.m_fq_list); 3218 ASSERT(pe); 3219 } 3220 list_remove(&mpt->m_slot_freeq_pairp[qn]. 3221 m_slot_allocq.s.m_fq_list, pe); 3222 slot = pe->slot; 3223 /* 3224 * Make sure SMID is not using reserved value of 0 3225 * and the TM request slot. 3226 */ 3227 ASSERT((slot > 0) && (slot <= slots->m_n_slots) && 3228 mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n > 0); 3229 cmd->cmd_slot = slot; 3230 mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n--; 3231 ASSERT(mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n >= 0); 3232 3233 mutex_exit(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex); 3234 /* 3235 * only increment per target ncmds if this is not a 3236 * command that has no target associated with it (i.e. a 3237 * event acknoledgment) 3238 */ 3239 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) { 3240 mutex_enter(&ptgt->m_tgt_intr_mutex); 3241 ptgt->m_t_ncmds++; 3242 mutex_exit(&ptgt->m_tgt_intr_mutex); 3243 } 3244 cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time; 3245 3246 /* 3247 * If initial timout is less than or equal to one tick, bump 3248 * the timeout by a tick so that command doesn't timeout before 3249 * its allotted time. 3250 */ 3251 if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) { 3252 cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick; 3253 } 3254 return (TRUE); 3255 } 3256 3257 /* 3258 * prepare the pkt: 3259 * the pkt may have been resubmitted or just reused so 3260 * initialize some fields and do some checks. 3261 */ 3262 static int 3263 mptsas_prepare_pkt(mptsas_cmd_t *cmd) 3264 { 3265 struct scsi_pkt *pkt = CMD2PKT(cmd); 3266 3267 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd)); 3268 3269 /* 3270 * Reinitialize some fields that need it; the packet may 3271 * have been resubmitted 3272 */ 3273 pkt->pkt_reason = CMD_CMPLT; 3274 pkt->pkt_state = 0; 3275 pkt->pkt_statistics = 0; 3276 pkt->pkt_resid = 0; 3277 cmd->cmd_age = 0; 3278 cmd->cmd_pkt_flags = pkt->pkt_flags; 3279 3280 /* 3281 * zero status byte. 3282 */ 3283 *(pkt->pkt_scbp) = 0; 3284 3285 if (cmd->cmd_flags & CFLAG_DMAVALID) { 3286 pkt->pkt_resid = cmd->cmd_dmacount; 3287 3288 /* 3289 * consistent packets need to be sync'ed first 3290 * (only for data going out) 3291 */ 3292 if ((cmd->cmd_flags & CFLAG_CMDIOPB) && 3293 (cmd->cmd_flags & CFLAG_DMASEND)) { 3294 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0, 3295 DDI_DMA_SYNC_FORDEV); 3296 } 3297 } 3298 3299 cmd->cmd_flags = 3300 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) | 3301 CFLAG_PREPARED | CFLAG_IN_TRANSPORT; 3302 3303 return (TRAN_ACCEPT); 3304 } 3305 3306 /* 3307 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command 3308 * 3309 * One of three possibilities: 3310 * - allocate scsi_pkt 3311 * - allocate scsi_pkt and DMA resources 3312 * - allocate DMA resources to an already-allocated pkt 3313 */ 3314 static struct scsi_pkt * 3315 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 3316 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags, 3317 int (*callback)(), caddr_t arg) 3318 { 3319 mptsas_cmd_t *cmd, *new_cmd; 3320 mptsas_t *mpt = ADDR2MPT(ap); 3321 int failure = 1; 3322 #ifndef __sparc 3323 uint_t oldcookiec; 3324 #endif /* __sparc */ 3325 mptsas_target_t *ptgt = NULL; 3326 int rval; 3327 mptsas_tgt_private_t *tgt_private; 3328 int kf; 3329 3330 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP; 3331 3332 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran-> 3333 tran_tgt_private; 3334 ASSERT(tgt_private != NULL); 3335 if (tgt_private == NULL) { 3336 return (NULL); 3337 } 3338 ptgt = tgt_private->t_private; 3339 ASSERT(ptgt != NULL); 3340 if (ptgt == NULL) 3341 return (NULL); 3342 ap->a_target = ptgt->m_devhdl; 3343 ap->a_lun = tgt_private->t_lun; 3344 3345 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC); 3346 #ifdef MPTSAS_TEST_EXTRN_ALLOC 3347 statuslen *= 100; tgtlen *= 4; 3348 #endif 3349 NDBG3(("mptsas_scsi_init_pkt:\n" 3350 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x", 3351 ap->a_target, (void *)pkt, (void *)bp, 3352 cmdlen, statuslen, tgtlen, flags)); 3353 3354 /* 3355 * Allocate the new packet. 3356 */ 3357 if (pkt == NULL) { 3358 ddi_dma_handle_t save_dma_handle; 3359 ddi_dma_handle_t save_arq_dma_handle; 3360 struct buf *save_arq_bp; 3361 ddi_dma_cookie_t save_arqcookie; 3362 #ifdef __sparc 3363 mptti_t *save_sg; 3364 #endif /* __sparc */ 3365 3366 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf); 3367 3368 if (cmd) { 3369 save_dma_handle = cmd->cmd_dmahandle; 3370 save_arq_dma_handle = cmd->cmd_arqhandle; 3371 save_arq_bp = cmd->cmd_arq_buf; 3372 save_arqcookie = cmd->cmd_arqcookie; 3373 #ifdef __sparc 3374 save_sg = cmd->cmd_sg; 3375 #endif /* __sparc */ 3376 bzero(cmd, sizeof (*cmd) + scsi_pkt_size()); 3377 cmd->cmd_dmahandle = save_dma_handle; 3378 cmd->cmd_arqhandle = save_arq_dma_handle; 3379 cmd->cmd_arq_buf = save_arq_bp; 3380 cmd->cmd_arqcookie = save_arqcookie; 3381 #ifdef __sparc 3382 cmd->cmd_sg = save_sg; 3383 #endif /* __sparc */ 3384 pkt = (void *)((uchar_t *)cmd + 3385 sizeof (struct mptsas_cmd)); 3386 pkt->pkt_ha_private = (opaque_t)cmd; 3387 pkt->pkt_address = *ap; 3388 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private; 3389 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb; 3390 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb; 3391 cmd->cmd_pkt = (struct scsi_pkt *)pkt; 3392 cmd->cmd_cdblen = (uchar_t)cmdlen; 3393 cmd->cmd_scblen = statuslen; 3394 cmd->cmd_rqslen = SENSE_LENGTH; 3395 cmd->cmd_tgt_addr = ptgt; 3396 failure = 0; 3397 } 3398 3399 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) || 3400 (tgtlen > PKT_PRIV_LEN) || 3401 (statuslen > EXTCMDS_STATUS_SIZE)) { 3402 if (failure == 0) { 3403 /* 3404 * if extern alloc fails, all will be 3405 * deallocated, including cmd 3406 */ 3407 failure = mptsas_pkt_alloc_extern(mpt, cmd, 3408 cmdlen, tgtlen, statuslen, kf); 3409 } 3410 if (failure) { 3411 /* 3412 * if extern allocation fails, it will 3413 * deallocate the new pkt as well 3414 */ 3415 return (NULL); 3416 } 3417 } 3418 new_cmd = cmd; 3419 3420 } else { 3421 cmd = PKT2CMD(pkt); 3422 new_cmd = NULL; 3423 } 3424 3425 3426 #ifndef __sparc 3427 /* grab cmd->cmd_cookiec here as oldcookiec */ 3428 3429 oldcookiec = cmd->cmd_cookiec; 3430 #endif /* __sparc */ 3431 3432 /* 3433 * If the dma was broken up into PARTIAL transfers cmd_nwin will be 3434 * greater than 0 and we'll need to grab the next dma window 3435 */ 3436 /* 3437 * SLM-not doing extra command frame right now; may add later 3438 */ 3439 3440 if (cmd->cmd_nwin > 0) { 3441 3442 /* 3443 * Make sure we havn't gone past the the total number 3444 * of windows 3445 */ 3446 if (++cmd->cmd_winindex >= cmd->cmd_nwin) { 3447 return (NULL); 3448 } 3449 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex, 3450 &cmd->cmd_dma_offset, &cmd->cmd_dma_len, 3451 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) { 3452 return (NULL); 3453 } 3454 goto get_dma_cookies; 3455 } 3456 3457 3458 if (flags & PKT_XARQ) { 3459 cmd->cmd_flags |= CFLAG_XARQ; 3460 } 3461 3462 /* 3463 * DMA resource allocation. This version assumes your 3464 * HBA has some sort of bus-mastering or onboard DMA capability, with a 3465 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the 3466 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget. 3467 */ 3468 if (bp && (bp->b_bcount != 0) && 3469 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) { 3470 3471 int cnt, dma_flags; 3472 mptti_t *dmap; /* ptr to the S/G list */ 3473 3474 /* 3475 * Set up DMA memory and position to the next DMA segment. 3476 */ 3477 ASSERT(cmd->cmd_dmahandle != NULL); 3478 3479 if (bp->b_flags & B_READ) { 3480 dma_flags = DDI_DMA_READ; 3481 cmd->cmd_flags &= ~CFLAG_DMASEND; 3482 } else { 3483 dma_flags = DDI_DMA_WRITE; 3484 cmd->cmd_flags |= CFLAG_DMASEND; 3485 } 3486 if (flags & PKT_CONSISTENT) { 3487 cmd->cmd_flags |= CFLAG_CMDIOPB; 3488 dma_flags |= DDI_DMA_CONSISTENT; 3489 } 3490 3491 if (flags & PKT_DMA_PARTIAL) { 3492 dma_flags |= DDI_DMA_PARTIAL; 3493 } 3494 3495 /* 3496 * workaround for byte hole issue on psycho and 3497 * schizo pre 2.1 3498 */ 3499 if ((bp->b_flags & B_READ) && ((bp->b_flags & 3500 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) && 3501 ((uintptr_t)bp->b_un.b_addr & 0x7)) { 3502 dma_flags |= DDI_DMA_CONSISTENT; 3503 } 3504 3505 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp, 3506 dma_flags, callback, arg, 3507 &cmd->cmd_cookie, &cmd->cmd_cookiec); 3508 if (rval == DDI_DMA_PARTIAL_MAP) { 3509 (void) ddi_dma_numwin(cmd->cmd_dmahandle, 3510 &cmd->cmd_nwin); 3511 cmd->cmd_winindex = 0; 3512 (void) ddi_dma_getwin(cmd->cmd_dmahandle, 3513 cmd->cmd_winindex, &cmd->cmd_dma_offset, 3514 &cmd->cmd_dma_len, &cmd->cmd_cookie, 3515 &cmd->cmd_cookiec); 3516 } else if (rval && (rval != DDI_DMA_MAPPED)) { 3517 switch (rval) { 3518 case DDI_DMA_NORESOURCES: 3519 bioerror(bp, 0); 3520 break; 3521 case DDI_DMA_BADATTR: 3522 case DDI_DMA_NOMAPPING: 3523 bioerror(bp, EFAULT); 3524 break; 3525 case DDI_DMA_TOOBIG: 3526 default: 3527 bioerror(bp, EINVAL); 3528 break; 3529 } 3530 cmd->cmd_flags &= ~CFLAG_DMAVALID; 3531 if (new_cmd) { 3532 mptsas_scsi_destroy_pkt(ap, pkt); 3533 } 3534 return ((struct scsi_pkt *)NULL); 3535 } 3536 3537 get_dma_cookies: 3538 cmd->cmd_flags |= CFLAG_DMAVALID; 3539 ASSERT(cmd->cmd_cookiec > 0); 3540 3541 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) { 3542 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n", 3543 cmd->cmd_cookiec); 3544 bioerror(bp, EINVAL); 3545 if (new_cmd) { 3546 mptsas_scsi_destroy_pkt(ap, pkt); 3547 } 3548 return ((struct scsi_pkt *)NULL); 3549 } 3550 3551 /* 3552 * Allocate extra SGL buffer if needed. 3553 */ 3554 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) && 3555 (cmd->cmd_extra_frames == NULL)) { 3556 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) == 3557 DDI_FAILURE) { 3558 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc " 3559 "failed"); 3560 bioerror(bp, ENOMEM); 3561 if (new_cmd) { 3562 mptsas_scsi_destroy_pkt(ap, pkt); 3563 } 3564 return ((struct scsi_pkt *)NULL); 3565 } 3566 } 3567 3568 /* 3569 * Always use scatter-gather transfer 3570 * Use the loop below to store physical addresses of 3571 * DMA segments, from the DMA cookies, into your HBA's 3572 * scatter-gather list. 3573 * We need to ensure we have enough kmem alloc'd 3574 * for the sg entries since we are no longer using an 3575 * array inside mptsas_cmd_t. 3576 * 3577 * We check cmd->cmd_cookiec against oldcookiec so 3578 * the scatter-gather list is correctly allocated 3579 */ 3580 #ifndef __sparc 3581 if (oldcookiec != cmd->cmd_cookiec) { 3582 if (cmd->cmd_sg != (mptti_t *)NULL) { 3583 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * 3584 oldcookiec); 3585 cmd->cmd_sg = NULL; 3586 } 3587 } 3588 3589 if (cmd->cmd_sg == (mptti_t *)NULL) { 3590 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)* 3591 cmd->cmd_cookiec), kf); 3592 3593 if (cmd->cmd_sg == (mptti_t *)NULL) { 3594 mptsas_log(mpt, CE_WARN, 3595 "unable to kmem_alloc enough memory " 3596 "for scatter/gather list"); 3597 /* 3598 * if we have an ENOMEM condition we need to behave 3599 * the same way as the rest of this routine 3600 */ 3601 3602 bioerror(bp, ENOMEM); 3603 if (new_cmd) { 3604 mptsas_scsi_destroy_pkt(ap, pkt); 3605 } 3606 return ((struct scsi_pkt *)NULL); 3607 } 3608 } 3609 #endif /* __sparc */ 3610 dmap = cmd->cmd_sg; 3611 3612 ASSERT(cmd->cmd_cookie.dmac_size != 0); 3613 3614 /* 3615 * store the first segment into the S/G list 3616 */ 3617 dmap->count = cmd->cmd_cookie.dmac_size; 3618 dmap->addr.address64.Low = (uint32_t) 3619 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull); 3620 dmap->addr.address64.High = (uint32_t) 3621 (cmd->cmd_cookie.dmac_laddress >> 32); 3622 3623 /* 3624 * dmacount counts the size of the dma for this window 3625 * (if partial dma is being used). totaldmacount 3626 * keeps track of the total amount of dma we have 3627 * transferred for all the windows (needed to calculate 3628 * the resid value below). 3629 */ 3630 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size; 3631 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size; 3632 3633 /* 3634 * We already stored the first DMA scatter gather segment, 3635 * start at 1 if we need to store more. 3636 */ 3637 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) { 3638 /* 3639 * Get next DMA cookie 3640 */ 3641 ddi_dma_nextcookie(cmd->cmd_dmahandle, 3642 &cmd->cmd_cookie); 3643 dmap++; 3644 3645 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size; 3646 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size; 3647 3648 /* 3649 * store the segment parms into the S/G list 3650 */ 3651 dmap->count = cmd->cmd_cookie.dmac_size; 3652 dmap->addr.address64.Low = (uint32_t) 3653 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull); 3654 dmap->addr.address64.High = (uint32_t) 3655 (cmd->cmd_cookie.dmac_laddress >> 32); 3656 } 3657 3658 /* 3659 * If this was partially allocated we set the resid 3660 * the amount of data NOT transferred in this window 3661 * If there is only one window, the resid will be 0 3662 */ 3663 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount); 3664 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount)); 3665 } 3666 return (pkt); 3667 } 3668 3669 /* 3670 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation 3671 * 3672 * Notes: 3673 * - also frees DMA resources if allocated 3674 * - implicit DMA synchonization 3675 */ 3676 static void 3677 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 3678 { 3679 mptsas_cmd_t *cmd = PKT2CMD(pkt); 3680 mptsas_t *mpt = ADDR2MPT(ap); 3681 3682 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p", 3683 ap->a_target, (void *)pkt)); 3684 3685 if (cmd->cmd_flags & CFLAG_DMAVALID) { 3686 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle); 3687 cmd->cmd_flags &= ~CFLAG_DMAVALID; 3688 } 3689 #ifndef __sparc 3690 if (cmd->cmd_sg) { 3691 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec); 3692 cmd->cmd_sg = NULL; 3693 } 3694 #endif /* __sparc */ 3695 mptsas_free_extra_sgl_frame(mpt, cmd); 3696 3697 if ((cmd->cmd_flags & 3698 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN | 3699 CFLAG_SCBEXTERN)) == 0) { 3700 cmd->cmd_flags = CFLAG_FREE; 3701 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd); 3702 } else { 3703 mptsas_pkt_destroy_extern(mpt, cmd); 3704 } 3705 } 3706 3707 /* 3708 * kmem cache constructor and destructor: 3709 * When constructing, we bzero the cmd and allocate the dma handle 3710 * When destructing, just free the dma handle 3711 */ 3712 static int 3713 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags) 3714 { 3715 mptsas_cmd_t *cmd = buf; 3716 mptsas_t *mpt = cdrarg; 3717 struct scsi_address ap; 3718 uint_t cookiec; 3719 ddi_dma_attr_t arq_dma_attr; 3720 int (*callback)(caddr_t); 3721 3722 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT; 3723 3724 NDBG4(("mptsas_kmem_cache_constructor")); 3725 3726 ap.a_hba_tran = mpt->m_tran; 3727 ap.a_target = 0; 3728 ap.a_lun = 0; 3729 3730 /* 3731 * allocate a dma handle 3732 */ 3733 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback, 3734 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) { 3735 cmd->cmd_dmahandle = NULL; 3736 return (-1); 3737 } 3738 3739 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL, 3740 SENSE_LENGTH, B_READ, callback, NULL); 3741 if (cmd->cmd_arq_buf == NULL) { 3742 ddi_dma_free_handle(&cmd->cmd_dmahandle); 3743 cmd->cmd_dmahandle = NULL; 3744 return (-1); 3745 } 3746 3747 /* 3748 * allocate a arq handle 3749 */ 3750 arq_dma_attr = mpt->m_msg_dma_attr; 3751 arq_dma_attr.dma_attr_sgllen = 1; 3752 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback, 3753 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) { 3754 ddi_dma_free_handle(&cmd->cmd_dmahandle); 3755 scsi_free_consistent_buf(cmd->cmd_arq_buf); 3756 cmd->cmd_dmahandle = NULL; 3757 cmd->cmd_arqhandle = NULL; 3758 return (-1); 3759 } 3760 3761 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle, 3762 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT), 3763 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) { 3764 ddi_dma_free_handle(&cmd->cmd_dmahandle); 3765 ddi_dma_free_handle(&cmd->cmd_arqhandle); 3766 scsi_free_consistent_buf(cmd->cmd_arq_buf); 3767 cmd->cmd_dmahandle = NULL; 3768 cmd->cmd_arqhandle = NULL; 3769 cmd->cmd_arq_buf = NULL; 3770 return (-1); 3771 } 3772 /* 3773 * In sparc, the sgl length in most of the cases would be 1, so we 3774 * pre-allocate it in cache. On x86, the max number would be 256, 3775 * pre-allocate a maximum would waste a lot of memory especially 3776 * when many cmds are put onto waitq. 3777 */ 3778 #ifdef __sparc 3779 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)* 3780 MPTSAS_MAX_CMD_SEGS), KM_SLEEP); 3781 #endif /* __sparc */ 3782 3783 return (0); 3784 } 3785 3786 static void 3787 mptsas_kmem_cache_destructor(void *buf, void *cdrarg) 3788 { 3789 #ifndef __lock_lint 3790 _NOTE(ARGUNUSED(cdrarg)) 3791 #endif 3792 mptsas_cmd_t *cmd = buf; 3793 3794 NDBG4(("mptsas_kmem_cache_destructor")); 3795 3796 if (cmd->cmd_arqhandle) { 3797 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle); 3798 ddi_dma_free_handle(&cmd->cmd_arqhandle); 3799 cmd->cmd_arqhandle = NULL; 3800 } 3801 if (cmd->cmd_arq_buf) { 3802 scsi_free_consistent_buf(cmd->cmd_arq_buf); 3803 cmd->cmd_arq_buf = NULL; 3804 } 3805 if (cmd->cmd_dmahandle) { 3806 ddi_dma_free_handle(&cmd->cmd_dmahandle); 3807 cmd->cmd_dmahandle = NULL; 3808 } 3809 #ifdef __sparc 3810 if (cmd->cmd_sg) { 3811 kmem_free(cmd->cmd_sg, sizeof (mptti_t)* MPTSAS_MAX_CMD_SEGS); 3812 cmd->cmd_sg = NULL; 3813 } 3814 #endif /* __sparc */ 3815 } 3816 3817 static int 3818 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags) 3819 { 3820 mptsas_cache_frames_t *p = buf; 3821 mptsas_t *mpt = cdrarg; 3822 ddi_dma_attr_t frame_dma_attr; 3823 size_t mem_size, alloc_len; 3824 ddi_dma_cookie_t cookie; 3825 uint_t ncookie; 3826 int (*callback)(caddr_t) = (kmflags == KM_SLEEP) 3827 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT; 3828 3829 frame_dma_attr = mpt->m_msg_dma_attr; 3830 frame_dma_attr.dma_attr_align = 0x10; 3831 frame_dma_attr.dma_attr_sgllen = 1; 3832 3833 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL, 3834 &p->m_dma_hdl) != DDI_SUCCESS) { 3835 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for" 3836 " extra SGL."); 3837 return (DDI_FAILURE); 3838 } 3839 3840 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size; 3841 3842 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr, 3843 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr, 3844 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) { 3845 ddi_dma_free_handle(&p->m_dma_hdl); 3846 p->m_dma_hdl = NULL; 3847 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for" 3848 " extra SGL."); 3849 return (DDI_FAILURE); 3850 } 3851 3852 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr, 3853 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL, 3854 &cookie, &ncookie) != DDI_DMA_MAPPED) { 3855 (void) ddi_dma_mem_free(&p->m_acc_hdl); 3856 ddi_dma_free_handle(&p->m_dma_hdl); 3857 p->m_dma_hdl = NULL; 3858 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for" 3859 " extra SGL"); 3860 return (DDI_FAILURE); 3861 } 3862 3863 /* 3864 * Store the SGL memory address. This chip uses this 3865 * address to dma to and from the driver. The second 3866 * address is the address mpt uses to fill in the SGL. 3867 */ 3868 p->m_phys_addr = cookie.dmac_address; 3869 3870 return (DDI_SUCCESS); 3871 } 3872 3873 static void 3874 mptsas_cache_frames_destructor(void *buf, void *cdrarg) 3875 { 3876 #ifndef __lock_lint 3877 _NOTE(ARGUNUSED(cdrarg)) 3878 #endif 3879 mptsas_cache_frames_t *p = buf; 3880 if (p->m_dma_hdl != NULL) { 3881 (void) ddi_dma_unbind_handle(p->m_dma_hdl); 3882 (void) ddi_dma_mem_free(&p->m_acc_hdl); 3883 ddi_dma_free_handle(&p->m_dma_hdl); 3884 p->m_phys_addr = NULL; 3885 p->m_frames_addr = NULL; 3886 p->m_dma_hdl = NULL; 3887 p->m_acc_hdl = NULL; 3888 } 3889 3890 } 3891 3892 /* 3893 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd) 3894 * for non-standard length cdb, pkt_private, status areas 3895 * if allocation fails, then deallocate all external space and the pkt 3896 */ 3897 /* ARGSUSED */ 3898 static int 3899 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd, 3900 int cmdlen, int tgtlen, int statuslen, int kf) 3901 { 3902 caddr_t cdbp, scbp, tgt; 3903 int (*callback)(caddr_t) = (kf == KM_SLEEP) ? 3904 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 3905 struct scsi_address ap; 3906 size_t senselength; 3907 ddi_dma_attr_t ext_arq_dma_attr; 3908 uint_t cookiec; 3909 3910 NDBG3(("mptsas_pkt_alloc_extern: " 3911 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x", 3912 (void *)cmd, cmdlen, tgtlen, statuslen, kf)); 3913 3914 tgt = cdbp = scbp = NULL; 3915 cmd->cmd_scblen = statuslen; 3916 cmd->cmd_privlen = (uchar_t)tgtlen; 3917 3918 if (cmdlen > sizeof (cmd->cmd_cdb)) { 3919 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) { 3920 goto fail; 3921 } 3922 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp; 3923 cmd->cmd_flags |= CFLAG_CDBEXTERN; 3924 } 3925 if (tgtlen > PKT_PRIV_LEN) { 3926 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) { 3927 goto fail; 3928 } 3929 cmd->cmd_flags |= CFLAG_PRIVEXTERN; 3930 cmd->cmd_pkt->pkt_private = tgt; 3931 } 3932 if (statuslen > EXTCMDS_STATUS_SIZE) { 3933 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) { 3934 goto fail; 3935 } 3936 cmd->cmd_flags |= CFLAG_SCBEXTERN; 3937 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp; 3938 3939 /* allocate sense data buf for DMA */ 3940 3941 senselength = statuslen - MPTSAS_GET_ITEM_OFF( 3942 struct scsi_arq_status, sts_sensedata); 3943 cmd->cmd_rqslen = (uchar_t)senselength; 3944 3945 ap.a_hba_tran = mpt->m_tran; 3946 ap.a_target = 0; 3947 ap.a_lun = 0; 3948 3949 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap, 3950 (struct buf *)NULL, senselength, B_READ, 3951 callback, NULL); 3952 3953 if (cmd->cmd_ext_arq_buf == NULL) { 3954 goto fail; 3955 } 3956 /* 3957 * allocate a extern arq handle and bind the buf 3958 */ 3959 ext_arq_dma_attr = mpt->m_msg_dma_attr; 3960 ext_arq_dma_attr.dma_attr_sgllen = 1; 3961 if ((ddi_dma_alloc_handle(mpt->m_dip, 3962 &ext_arq_dma_attr, callback, 3963 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) { 3964 goto fail; 3965 } 3966 3967 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle, 3968 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT), 3969 callback, NULL, &cmd->cmd_ext_arqcookie, 3970 &cookiec) 3971 != DDI_SUCCESS) { 3972 goto fail; 3973 } 3974 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID; 3975 } 3976 return (0); 3977 fail: 3978 mptsas_pkt_destroy_extern(mpt, cmd); 3979 return (1); 3980 } 3981 3982 /* 3983 * deallocate external pkt space and deallocate the pkt 3984 */ 3985 static void 3986 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd) 3987 { 3988 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd)); 3989 3990 if (cmd->cmd_flags & CFLAG_FREE) { 3991 mptsas_log(mpt, CE_PANIC, 3992 "mptsas_pkt_destroy_extern: freeing free packet"); 3993 _NOTE(NOT_REACHED) 3994 /* NOTREACHED */ 3995 } 3996 if (cmd->cmd_flags & CFLAG_CDBEXTERN) { 3997 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen); 3998 } 3999 if (cmd->cmd_flags & CFLAG_SCBEXTERN) { 4000 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen); 4001 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) { 4002 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle); 4003 } 4004 if (cmd->cmd_ext_arqhandle) { 4005 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle); 4006 cmd->cmd_ext_arqhandle = NULL; 4007 } 4008 if (cmd->cmd_ext_arq_buf) 4009 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf); 4010 } 4011 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) { 4012 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen); 4013 } 4014 cmd->cmd_flags = CFLAG_FREE; 4015 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd); 4016 } 4017 4018 /* 4019 * tran_sync_pkt(9E) - explicit DMA synchronization 4020 */ 4021 /*ARGSUSED*/ 4022 static void 4023 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 4024 { 4025 mptsas_cmd_t *cmd = PKT2CMD(pkt); 4026 4027 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p", 4028 ap->a_target, (void *)pkt)); 4029 4030 if (cmd->cmd_dmahandle) { 4031 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0, 4032 (cmd->cmd_flags & CFLAG_DMASEND) ? 4033 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 4034 } 4035 } 4036 4037 /* 4038 * tran_dmafree(9E) - deallocate DMA resources allocated for command 4039 */ 4040 /*ARGSUSED*/ 4041 static void 4042 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 4043 { 4044 mptsas_cmd_t *cmd = PKT2CMD(pkt); 4045 mptsas_t *mpt = ADDR2MPT(ap); 4046 4047 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p", 4048 ap->a_target, (void *)pkt)); 4049 4050 if (cmd->cmd_flags & CFLAG_DMAVALID) { 4051 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle); 4052 cmd->cmd_flags &= ~CFLAG_DMAVALID; 4053 } 4054 4055 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) { 4056 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle); 4057 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID; 4058 } 4059 4060 mptsas_free_extra_sgl_frame(mpt, cmd); 4061 } 4062 4063 static void 4064 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd) 4065 { 4066 if ((cmd->cmd_flags & CFLAG_CMDIOPB) && 4067 (!(cmd->cmd_flags & CFLAG_DMASEND))) { 4068 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0, 4069 DDI_DMA_SYNC_FORCPU); 4070 } 4071 (*pkt->pkt_comp)(pkt); 4072 } 4073 4074 static void 4075 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control, 4076 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl) 4077 { 4078 uint_t cookiec; 4079 mptti_t *dmap; 4080 uint32_t flags; 4081 pMpi2SGESimple64_t sge; 4082 pMpi2SGEChain64_t sgechain; 4083 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID); 4084 4085 /* 4086 * Save the number of entries in the DMA 4087 * Scatter/Gather list 4088 */ 4089 cookiec = cmd->cmd_cookiec; 4090 4091 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec)); 4092 4093 /* 4094 * Set read/write bit in control. 4095 */ 4096 if (cmd->cmd_flags & CFLAG_DMASEND) { 4097 *control |= MPI2_SCSIIO_CONTROL_WRITE; 4098 } else { 4099 *control |= MPI2_SCSIIO_CONTROL_READ; 4100 } 4101 4102 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount); 4103 4104 /* 4105 * We have 2 cases here. First where we can fit all the 4106 * SG elements into the main frame, and the case 4107 * where we can't. 4108 * If we have more cookies than we can attach to a frame 4109 * we will need to use a chain element to point 4110 * a location of memory where the rest of the S/G 4111 * elements reside. 4112 */ 4113 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) { 4114 dmap = cmd->cmd_sg; 4115 sge = (pMpi2SGESimple64_t)(&frame->SGL); 4116 while (cookiec--) { 4117 ddi_put32(acc_hdl, 4118 &sge->Address.Low, dmap->addr.address64.Low); 4119 ddi_put32(acc_hdl, 4120 &sge->Address.High, dmap->addr.address64.High); 4121 ddi_put32(acc_hdl, &sge->FlagsLength, 4122 dmap->count); 4123 flags = ddi_get32(acc_hdl, &sge->FlagsLength); 4124 flags |= ((uint32_t) 4125 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 4126 MPI2_SGE_FLAGS_SYSTEM_ADDRESS | 4127 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 4128 MPI2_SGE_FLAGS_SHIFT); 4129 4130 /* 4131 * If this is the last cookie, we set the flags 4132 * to indicate so 4133 */ 4134 if (cookiec == 0) { 4135 flags |= 4136 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT 4137 | MPI2_SGE_FLAGS_END_OF_BUFFER 4138 | MPI2_SGE_FLAGS_END_OF_LIST) << 4139 MPI2_SGE_FLAGS_SHIFT); 4140 } 4141 if (cmd->cmd_flags & CFLAG_DMASEND) { 4142 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC << 4143 MPI2_SGE_FLAGS_SHIFT); 4144 } else { 4145 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST << 4146 MPI2_SGE_FLAGS_SHIFT); 4147 } 4148 ddi_put32(acc_hdl, &sge->FlagsLength, flags); 4149 dmap++; 4150 sge++; 4151 } 4152 } else { 4153 /* 4154 * Hereby we start to deal with multiple frames. 4155 * The process is as follows: 4156 * 1. Determine how many frames are needed for SGL element 4157 * storage; Note that all frames are stored in contiguous 4158 * memory space and in 64-bit DMA mode each element is 4159 * 3 double-words (12 bytes) long. 4160 * 2. Fill up the main frame. We need to do this separately 4161 * since it contains the SCSI IO request header and needs 4162 * dedicated processing. Note that the last 4 double-words 4163 * of the SCSI IO header is for SGL element storage 4164 * (MPI2_SGE_IO_UNION). 4165 * 3. Fill the chain element in the main frame, so the DMA 4166 * engine can use the following frames. 4167 * 4. Enter a loop to fill the remaining frames. Note that the 4168 * last frame contains no chain element. The remaining 4169 * frames go into the mpt SGL buffer allocated on the fly, 4170 * not immediately following the main message frame, as in 4171 * Gen1. 4172 * Some restrictions: 4173 * 1. For 64-bit DMA, the simple element and chain element 4174 * are both of 3 double-words (12 bytes) in size, even 4175 * though all frames are stored in the first 4G of mem 4176 * range and the higher 32-bits of the address are always 0. 4177 * 2. On some controllers (like the 1064/1068), a frame can 4178 * hold SGL elements with the last 1 or 2 double-words 4179 * (4 or 8 bytes) un-used. On these controllers, we should 4180 * recognize that there's not enough room for another SGL 4181 * element and move the sge pointer to the next frame. 4182 */ 4183 int i, j, k, l, frames, sgemax; 4184 int temp; 4185 uint8_t chainflags; 4186 uint16_t chainlength; 4187 mptsas_cache_frames_t *p; 4188 4189 /* 4190 * Sgemax is the number of SGE's that will fit 4191 * each extra frame and frames is total 4192 * number of frames we'll need. 1 sge entry per 4193 * frame is reseverd for the chain element thus the -1 below. 4194 */ 4195 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64)) 4196 - 1); 4197 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax; 4198 4199 /* 4200 * A little check to see if we need to round up the number 4201 * of frames we need 4202 */ 4203 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp * 4204 sgemax) > 1) { 4205 frames = (temp + 1); 4206 } else { 4207 frames = temp; 4208 } 4209 dmap = cmd->cmd_sg; 4210 sge = (pMpi2SGESimple64_t)(&frame->SGL); 4211 4212 /* 4213 * First fill in the main frame 4214 */ 4215 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) { 4216 ddi_put32(acc_hdl, &sge->Address.Low, 4217 dmap->addr.address64.Low); 4218 ddi_put32(acc_hdl, &sge->Address.High, 4219 dmap->addr.address64.High); 4220 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count); 4221 flags = ddi_get32(acc_hdl, &sge->FlagsLength); 4222 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 4223 MPI2_SGE_FLAGS_SYSTEM_ADDRESS | 4224 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 4225 MPI2_SGE_FLAGS_SHIFT); 4226 4227 /* 4228 * If this is the last SGE of this frame 4229 * we set the end of list flag 4230 */ 4231 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) { 4232 flags |= ((uint32_t) 4233 (MPI2_SGE_FLAGS_LAST_ELEMENT) << 4234 MPI2_SGE_FLAGS_SHIFT); 4235 } 4236 if (cmd->cmd_flags & CFLAG_DMASEND) { 4237 flags |= 4238 (MPI2_SGE_FLAGS_HOST_TO_IOC << 4239 MPI2_SGE_FLAGS_SHIFT); 4240 } else { 4241 flags |= 4242 (MPI2_SGE_FLAGS_IOC_TO_HOST << 4243 MPI2_SGE_FLAGS_SHIFT); 4244 } 4245 ddi_put32(acc_hdl, &sge->FlagsLength, flags); 4246 dmap++; 4247 sge++; 4248 } 4249 4250 /* 4251 * Fill in the chain element in the main frame. 4252 * About calculation on ChainOffset: 4253 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes) 4254 * in the end reserved for SGL element storage 4255 * (MPI2_SGE_IO_UNION); we should count it in our 4256 * calculation. See its definition in the header file. 4257 * 2. Constant j is the counter of the current SGL element 4258 * that will be processed, and (j - 1) is the number of 4259 * SGL elements that have been processed (stored in the 4260 * main frame). 4261 * 3. ChainOffset value should be in units of double-words (4 4262 * bytes) so the last value should be divided by 4. 4263 */ 4264 ddi_put8(acc_hdl, &frame->ChainOffset, 4265 (sizeof (MPI2_SCSI_IO_REQUEST) - 4266 sizeof (MPI2_SGE_IO_UNION) + 4267 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2); 4268 sgechain = (pMpi2SGEChain64_t)sge; 4269 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT | 4270 MPI2_SGE_FLAGS_SYSTEM_ADDRESS | 4271 MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 4272 ddi_put8(acc_hdl, &sgechain->Flags, chainflags); 4273 4274 /* 4275 * The size of the next frame is the accurate size of space 4276 * (in bytes) used to store the SGL elements. j is the counter 4277 * of SGL elements. (j - 1) is the number of SGL elements that 4278 * have been processed (stored in frames). 4279 */ 4280 if (frames >= 2) { 4281 chainlength = mpt->m_req_frame_size / 4282 sizeof (MPI2_SGE_SIMPLE64) * 4283 sizeof (MPI2_SGE_SIMPLE64); 4284 } else { 4285 chainlength = ((cookiec - (j - 1)) * 4286 sizeof (MPI2_SGE_SIMPLE64)); 4287 } 4288 4289 p = cmd->cmd_extra_frames; 4290 4291 ddi_put16(acc_hdl, &sgechain->Length, chainlength); 4292 ddi_put32(acc_hdl, &sgechain->Address.Low, 4293 p->m_phys_addr); 4294 /* SGL is allocated in the first 4G mem range */ 4295 ddi_put32(acc_hdl, &sgechain->Address.High, 0); 4296 4297 /* 4298 * If there are more than 2 frames left we have to 4299 * fill in the next chain offset to the location of 4300 * the chain element in the next frame. 4301 * sgemax is the number of simple elements in an extra 4302 * frame. Note that the value NextChainOffset should be 4303 * in double-words (4 bytes). 4304 */ 4305 if (frames >= 2) { 4306 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 4307 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2); 4308 } else { 4309 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0); 4310 } 4311 4312 /* 4313 * Jump to next frame; 4314 * Starting here, chain buffers go into the per command SGL. 4315 * This buffer is allocated when chain buffers are needed. 4316 */ 4317 sge = (pMpi2SGESimple64_t)p->m_frames_addr; 4318 i = cookiec; 4319 4320 /* 4321 * Start filling in frames with SGE's. If we 4322 * reach the end of frame and still have SGE's 4323 * to fill we need to add a chain element and 4324 * use another frame. j will be our counter 4325 * for what cookie we are at and i will be 4326 * the total cookiec. k is the current frame 4327 */ 4328 for (k = 1; k <= frames; k++) { 4329 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) { 4330 4331 /* 4332 * If we have reached the end of frame 4333 * and we have more SGE's to fill in 4334 * we have to fill the final entry 4335 * with a chain element and then 4336 * continue to the next frame 4337 */ 4338 if ((l == (sgemax + 1)) && (k != frames)) { 4339 sgechain = (pMpi2SGEChain64_t)sge; 4340 j--; 4341 chainflags = ( 4342 MPI2_SGE_FLAGS_CHAIN_ELEMENT | 4343 MPI2_SGE_FLAGS_SYSTEM_ADDRESS | 4344 MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 4345 ddi_put8(p->m_acc_hdl, 4346 &sgechain->Flags, chainflags); 4347 /* 4348 * k is the frame counter and (k + 1) 4349 * is the number of the next frame. 4350 * Note that frames are in contiguous 4351 * memory space. 4352 */ 4353 ddi_put32(p->m_acc_hdl, 4354 &sgechain->Address.Low, 4355 (p->m_phys_addr + 4356 (mpt->m_req_frame_size * k))); 4357 ddi_put32(p->m_acc_hdl, 4358 &sgechain->Address.High, 0); 4359 4360 /* 4361 * If there are more than 2 frames left 4362 * we have to next chain offset to 4363 * the location of the chain element 4364 * in the next frame and fill in the 4365 * length of the next chain 4366 */ 4367 if ((frames - k) >= 2) { 4368 ddi_put8(p->m_acc_hdl, 4369 &sgechain->NextChainOffset, 4370 (sgemax * 4371 sizeof (MPI2_SGE_SIMPLE64)) 4372 >> 2); 4373 ddi_put16(p->m_acc_hdl, 4374 &sgechain->Length, 4375 mpt->m_req_frame_size / 4376 sizeof (MPI2_SGE_SIMPLE64) * 4377 sizeof (MPI2_SGE_SIMPLE64)); 4378 } else { 4379 /* 4380 * This is the last frame. Set 4381 * the NextChainOffset to 0 and 4382 * Length is the total size of 4383 * all remaining simple elements 4384 */ 4385 ddi_put8(p->m_acc_hdl, 4386 &sgechain->NextChainOffset, 4387 0); 4388 ddi_put16(p->m_acc_hdl, 4389 &sgechain->Length, 4390 (cookiec - j) * 4391 sizeof (MPI2_SGE_SIMPLE64)); 4392 } 4393 4394 /* Jump to the next frame */ 4395 sge = (pMpi2SGESimple64_t) 4396 ((char *)p->m_frames_addr + 4397 (int)mpt->m_req_frame_size * k); 4398 4399 continue; 4400 } 4401 4402 ddi_put32(p->m_acc_hdl, 4403 &sge->Address.Low, 4404 dmap->addr.address64.Low); 4405 ddi_put32(p->m_acc_hdl, 4406 &sge->Address.High, 4407 dmap->addr.address64.High); 4408 ddi_put32(p->m_acc_hdl, 4409 &sge->FlagsLength, dmap->count); 4410 flags = ddi_get32(p->m_acc_hdl, 4411 &sge->FlagsLength); 4412 flags |= ((uint32_t)( 4413 MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 4414 MPI2_SGE_FLAGS_SYSTEM_ADDRESS | 4415 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 4416 MPI2_SGE_FLAGS_SHIFT); 4417 4418 /* 4419 * If we are at the end of the frame and 4420 * there is another frame to fill in 4421 * we set the last simple element as last 4422 * element 4423 */ 4424 if ((l == sgemax) && (k != frames)) { 4425 flags |= ((uint32_t) 4426 (MPI2_SGE_FLAGS_LAST_ELEMENT) << 4427 MPI2_SGE_FLAGS_SHIFT); 4428 } 4429 4430 /* 4431 * If this is the final cookie we 4432 * indicate it by setting the flags 4433 */ 4434 if (j == i) { 4435 flags |= ((uint32_t) 4436 (MPI2_SGE_FLAGS_LAST_ELEMENT | 4437 MPI2_SGE_FLAGS_END_OF_BUFFER | 4438 MPI2_SGE_FLAGS_END_OF_LIST) << 4439 MPI2_SGE_FLAGS_SHIFT); 4440 } 4441 if (cmd->cmd_flags & CFLAG_DMASEND) { 4442 flags |= 4443 (MPI2_SGE_FLAGS_HOST_TO_IOC << 4444 MPI2_SGE_FLAGS_SHIFT); 4445 } else { 4446 flags |= 4447 (MPI2_SGE_FLAGS_IOC_TO_HOST << 4448 MPI2_SGE_FLAGS_SHIFT); 4449 } 4450 ddi_put32(p->m_acc_hdl, 4451 &sge->FlagsLength, flags); 4452 dmap++; 4453 sge++; 4454 } 4455 } 4456 4457 /* 4458 * Sync DMA with the chain buffers that were just created 4459 */ 4460 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV); 4461 } 4462 } 4463 4464 /* 4465 * Interrupt handling 4466 * Utility routine. Poll for status of a command sent to HBA 4467 * without interrupts (a FLAG_NOINTR command). 4468 */ 4469 int 4470 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime) 4471 { 4472 int rval = TRUE; 4473 4474 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd)); 4475 4476 /* 4477 * In order to avoid using m_mutex in ISR(a new separate mutex 4478 * m_intr_mutex is introduced) and keep the same lock logic, 4479 * the m_intr_mutex should be used to protect the getting and 4480 * setting of the ReplyDescriptorIndex. 4481 * 4482 * Since the m_intr_mutex would be released during processing the poll 4483 * cmd, so we should set the poll flag earlier here to make sure the 4484 * polled cmd be handled in this thread/context. A side effect is other 4485 * cmds during the period between the flag set and reset are also 4486 * handled in this thread and not the ISR. Since the poll cmd is not 4487 * so common, so the performance degradation in this case is not a big 4488 * issue. 4489 */ 4490 mutex_enter(&mpt->m_intr_mutex); 4491 mpt->m_polled_intr = 1; 4492 mutex_exit(&mpt->m_intr_mutex); 4493 4494 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) { 4495 mptsas_restart_hba(mpt); 4496 } 4497 4498 /* 4499 * Wait, using drv_usecwait(), long enough for the command to 4500 * reasonably return from the target if the target isn't 4501 * "dead". A polled command may well be sent from scsi_poll, and 4502 * there are retries built in to scsi_poll if the transport 4503 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second 4504 * and retries the transport up to scsi_poll_busycnt times 4505 * (currently 60) if 4506 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or 4507 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY 4508 * 4509 * limit the waiting to avoid a hang in the event that the 4510 * cmd never gets started but we are still receiving interrupts 4511 */ 4512 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) { 4513 if (mptsas_wait_intr(mpt, polltime) == FALSE) { 4514 NDBG5(("mptsas_poll: command incomplete")); 4515 rval = FALSE; 4516 break; 4517 } 4518 } 4519 4520 mutex_enter(&mpt->m_intr_mutex); 4521 mpt->m_polled_intr = 0; 4522 mutex_exit(&mpt->m_intr_mutex); 4523 4524 if (rval == FALSE) { 4525 4526 /* 4527 * this isn't supposed to happen, the hba must be wedged 4528 * Mark this cmd as a timeout. 4529 */ 4530 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT, 4531 (STAT_TIMEOUT|STAT_ABORTED)); 4532 4533 if (poll_cmd->cmd_queued == FALSE) { 4534 4535 NDBG5(("mptsas_poll: not on waitq")); 4536 4537 poll_cmd->cmd_pkt->pkt_state |= 4538 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD); 4539 } else { 4540 4541 /* find and remove it from the waitq */ 4542 NDBG5(("mptsas_poll: delete from waitq")); 4543 mptsas_waitq_delete(mpt, poll_cmd); 4544 } 4545 4546 } 4547 mptsas_fma_check(mpt, poll_cmd); 4548 NDBG5(("mptsas_poll: done")); 4549 return (rval); 4550 } 4551 4552 /* 4553 * Used for polling cmds and TM function 4554 */ 4555 static int 4556 mptsas_wait_intr(mptsas_t *mpt, int polltime) 4557 { 4558 int cnt; 4559 pMpi2ReplyDescriptorsUnion_t reply_desc_union; 4560 Mpi2ReplyDescriptorsUnion_t reply_desc_union_v; 4561 uint32_t int_mask; 4562 uint8_t reply_type; 4563 4564 NDBG5(("mptsas_wait_intr")); 4565 4566 4567 /* 4568 * Get the current interrupt mask and disable interrupts. When 4569 * re-enabling ints, set mask to saved value. 4570 */ 4571 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask); 4572 MPTSAS_DISABLE_INTR(mpt); 4573 4574 /* 4575 * Keep polling for at least (polltime * 1000) seconds 4576 */ 4577 for (cnt = 0; cnt < polltime; cnt++) { 4578 mutex_enter(&mpt->m_intr_mutex); 4579 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0, 4580 DDI_DMA_SYNC_FORCPU); 4581 4582 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t) 4583 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index); 4584 4585 if (ddi_get32(mpt->m_acc_post_queue_hdl, 4586 &reply_desc_union->Words.Low) == 0xFFFFFFFF || 4587 ddi_get32(mpt->m_acc_post_queue_hdl, 4588 &reply_desc_union->Words.High) == 0xFFFFFFFF) { 4589 mutex_exit(&mpt->m_intr_mutex); 4590 drv_usecwait(1000); 4591 continue; 4592 } 4593 4594 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl, 4595 &reply_desc_union->Default.ReplyFlags); 4596 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 4597 reply_desc_union_v.Default.ReplyFlags = reply_type; 4598 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { 4599 reply_desc_union_v.SCSIIOSuccess.SMID = 4600 ddi_get16(mpt->m_acc_post_queue_hdl, 4601 &reply_desc_union->SCSIIOSuccess.SMID); 4602 } else if (reply_type == 4603 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 4604 reply_desc_union_v.AddressReply.ReplyFrameAddress = 4605 ddi_get32(mpt->m_acc_post_queue_hdl, 4606 &reply_desc_union->AddressReply.ReplyFrameAddress); 4607 reply_desc_union_v.AddressReply.SMID = 4608 ddi_get16(mpt->m_acc_post_queue_hdl, 4609 &reply_desc_union->AddressReply.SMID); 4610 } 4611 /* 4612 * Clear the reply descriptor for re-use and increment 4613 * index. 4614 */ 4615 ddi_put64(mpt->m_acc_post_queue_hdl, 4616 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index], 4617 0xFFFFFFFFFFFFFFFF); 4618 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0, 4619 DDI_DMA_SYNC_FORDEV); 4620 4621 if (++mpt->m_post_index == mpt->m_post_queue_depth) { 4622 mpt->m_post_index = 0; 4623 } 4624 4625 /* 4626 * Update the global reply index 4627 */ 4628 ddi_put32(mpt->m_datap, 4629 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index); 4630 mutex_exit(&mpt->m_intr_mutex); 4631 4632 /* 4633 * The reply is valid, process it according to its 4634 * type. 4635 */ 4636 mptsas_process_intr(mpt, &reply_desc_union_v); 4637 4638 4639 /* 4640 * Re-enable interrupts and quit. 4641 */ 4642 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, 4643 int_mask); 4644 return (TRUE); 4645 4646 } 4647 4648 /* 4649 * Clear polling flag, re-enable interrupts and quit. 4650 */ 4651 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask); 4652 return (FALSE); 4653 } 4654 4655 /* 4656 * For fastpath, the m_intr_mutex should be held from the begining to the end, 4657 * so we only treat those cmds that need not release m_intr_mutex(even just for 4658 * a moment) as candidate for fast processing. otherwise, we don't handle them 4659 * and just return, then in ISR, those cmds would be handled later with m_mutex 4660 * held and m_intr_mutex not held. 4661 */ 4662 static int 4663 mptsas_handle_io_fastpath(mptsas_t *mpt, 4664 uint16_t SMID) 4665 { 4666 mptsas_slots_t *slots = mpt->m_active; 4667 mptsas_cmd_t *cmd = NULL; 4668 struct scsi_pkt *pkt; 4669 4670 /* 4671 * This is a success reply so just complete the IO. First, do a sanity 4672 * check on the SMID. The final slot is used for TM requests, which 4673 * would not come into this reply handler. 4674 */ 4675 if ((SMID == 0) || (SMID > slots->m_n_slots)) { 4676 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n", 4677 SMID); 4678 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 4679 return (TRUE); 4680 } 4681 4682 cmd = slots->m_slot[SMID]; 4683 4684 /* 4685 * print warning and return if the slot is empty 4686 */ 4687 if (cmd == NULL) { 4688 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO " 4689 "in slot %d", SMID); 4690 return (TRUE); 4691 } 4692 4693 pkt = CMD2PKT(cmd); 4694 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD | 4695 STATE_GOT_STATUS); 4696 if (cmd->cmd_flags & CFLAG_DMAVALID) { 4697 pkt->pkt_state |= STATE_XFERRED_DATA; 4698 } 4699 pkt->pkt_resid = 0; 4700 4701 /* 4702 * If the cmd is a IOC, or a passthrough, then we don't process it in 4703 * fastpath, and later it would be handled by mptsas_process_intr() 4704 * with m_mutex protected. 4705 */ 4706 if (cmd->cmd_flags & (CFLAG_PASSTHRU | CFLAG_CMDIOC)) { 4707 return (FALSE); 4708 } else { 4709 mptsas_remove_cmd0(mpt, cmd); 4710 } 4711 4712 if (cmd->cmd_flags & CFLAG_RETRY) { 4713 /* 4714 * The target returned QFULL or busy, do not add tihs 4715 * pkt to the doneq since the hba will retry 4716 * this cmd. 4717 * 4718 * The pkt has already been resubmitted in 4719 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error(). 4720 * Remove this cmd_flag here. 4721 */ 4722 cmd->cmd_flags &= ~CFLAG_RETRY; 4723 } else { 4724 mptsas_doneq_add0(mpt, cmd); 4725 } 4726 4727 /* 4728 * In fastpath, the cmd should only be a context reply, so just check 4729 * the post queue of the reply descriptor and the dmahandle of the cmd 4730 * is enough. No sense data in this case and no need to check the dma 4731 * handle where sense data dma info is saved, the dma handle of the 4732 * reply frame, and the dma handle of the reply free queue. 4733 * For the dma handle of the request queue. Check fma here since we 4734 * are sure the request must have already been sent/DMAed correctly. 4735 * otherwise checking in mptsas_scsi_start() is not correct since 4736 * at that time the dma may not start. 4737 */ 4738 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) != 4739 DDI_SUCCESS) || 4740 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) != 4741 DDI_SUCCESS)) { 4742 ddi_fm_service_impact(mpt->m_dip, 4743 DDI_SERVICE_UNAFFECTED); 4744 pkt->pkt_reason = CMD_TRAN_ERR; 4745 pkt->pkt_statistics = 0; 4746 } 4747 if (cmd->cmd_dmahandle && 4748 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) { 4749 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 4750 pkt->pkt_reason = CMD_TRAN_ERR; 4751 pkt->pkt_statistics = 0; 4752 } 4753 if ((cmd->cmd_extra_frames && 4754 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) != 4755 DDI_SUCCESS) || 4756 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) != 4757 DDI_SUCCESS)))) { 4758 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 4759 pkt->pkt_reason = CMD_TRAN_ERR; 4760 pkt->pkt_statistics = 0; 4761 } 4762 4763 return (TRUE); 4764 } 4765 4766 static void 4767 mptsas_handle_scsi_io_success(mptsas_t *mpt, 4768 pMpi2ReplyDescriptorsUnion_t reply_desc) 4769 { 4770 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success; 4771 uint16_t SMID; 4772 mptsas_slots_t *slots = mpt->m_active; 4773 mptsas_cmd_t *cmd = NULL; 4774 struct scsi_pkt *pkt; 4775 4776 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc; 4777 SMID = scsi_io_success->SMID; 4778 4779 /* 4780 * This is a success reply so just complete the IO. First, do a sanity 4781 * check on the SMID. The final slot is used for TM requests, which 4782 * would not come into this reply handler. 4783 */ 4784 if ((SMID == 0) || (SMID > slots->m_n_slots)) { 4785 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n", 4786 SMID); 4787 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 4788 return; 4789 } 4790 4791 cmd = slots->m_slot[SMID]; 4792 4793 /* 4794 * print warning and return if the slot is empty 4795 */ 4796 if (cmd == NULL) { 4797 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO " 4798 "in slot %d", SMID); 4799 return; 4800 } 4801 4802 pkt = CMD2PKT(cmd); 4803 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD | 4804 STATE_GOT_STATUS); 4805 if (cmd->cmd_flags & CFLAG_DMAVALID) { 4806 pkt->pkt_state |= STATE_XFERRED_DATA; 4807 } 4808 pkt->pkt_resid = 0; 4809 4810 if (cmd->cmd_flags & CFLAG_PASSTHRU) { 4811 cmd->cmd_flags |= CFLAG_FINISHED; 4812 cv_broadcast(&mpt->m_passthru_cv); 4813 return; 4814 } else { 4815 mptsas_remove_cmd(mpt, cmd); 4816 } 4817 4818 if (cmd->cmd_flags & CFLAG_RETRY) { 4819 /* 4820 * The target returned QFULL or busy, do not add tihs 4821 * pkt to the doneq since the hba will retry 4822 * this cmd. 4823 * 4824 * The pkt has already been resubmitted in 4825 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error(). 4826 * Remove this cmd_flag here. 4827 */ 4828 cmd->cmd_flags &= ~CFLAG_RETRY; 4829 } else { 4830 mptsas_doneq_add(mpt, cmd); 4831 } 4832 } 4833 4834 static void 4835 mptsas_handle_address_reply(mptsas_t *mpt, 4836 pMpi2ReplyDescriptorsUnion_t reply_desc) 4837 { 4838 pMpi2AddressReplyDescriptor_t address_reply; 4839 pMPI2DefaultReply_t reply; 4840 mptsas_fw_diagnostic_buffer_t *pBuffer; 4841 uint32_t reply_addr; 4842 uint16_t SMID, iocstatus; 4843 mptsas_slots_t *slots = mpt->m_active; 4844 mptsas_cmd_t *cmd = NULL; 4845 uint8_t function, buffer_type; 4846 m_replyh_arg_t *args; 4847 int reply_frame_no; 4848 4849 ASSERT(mutex_owned(&mpt->m_mutex)); 4850 4851 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc; 4852 4853 reply_addr = address_reply->ReplyFrameAddress; 4854 SMID = address_reply->SMID; 4855 /* 4856 * If reply frame is not in the proper range we should ignore this 4857 * message and exit the interrupt handler. 4858 */ 4859 if ((reply_addr < mpt->m_reply_frame_dma_addr) || 4860 (reply_addr >= (mpt->m_reply_frame_dma_addr + 4861 (mpt->m_reply_frame_size * mpt->m_max_replies))) || 4862 ((reply_addr - mpt->m_reply_frame_dma_addr) % 4863 mpt->m_reply_frame_size != 0)) { 4864 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame " 4865 "address 0x%x\n", reply_addr); 4866 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 4867 return; 4868 } 4869 4870 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0, 4871 DDI_DMA_SYNC_FORCPU); 4872 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr - 4873 mpt->m_reply_frame_dma_addr)); 4874 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function); 4875 4876 /* 4877 * don't get slot information and command for events since these values 4878 * don't exist 4879 */ 4880 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) && 4881 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) { 4882 /* 4883 * This could be a TM reply, which use the last allocated SMID, 4884 * so allow for that. 4885 */ 4886 if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) { 4887 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of " 4888 "%d\n", SMID); 4889 ddi_fm_service_impact(mpt->m_dip, 4890 DDI_SERVICE_UNAFFECTED); 4891 return; 4892 } 4893 4894 cmd = slots->m_slot[SMID]; 4895 4896 /* 4897 * print warning and return if the slot is empty 4898 */ 4899 if (cmd == NULL) { 4900 mptsas_log(mpt, CE_WARN, "?NULL command for address " 4901 "reply in slot %d", SMID); 4902 return; 4903 } 4904 if ((cmd->cmd_flags & CFLAG_PASSTHRU) || 4905 (cmd->cmd_flags & CFLAG_CONFIG) || 4906 (cmd->cmd_flags & CFLAG_FW_DIAG)) { 4907 cmd->cmd_rfm = reply_addr; 4908 cmd->cmd_flags |= CFLAG_FINISHED; 4909 cv_broadcast(&mpt->m_passthru_cv); 4910 cv_broadcast(&mpt->m_config_cv); 4911 cv_broadcast(&mpt->m_fw_diag_cv); 4912 return; 4913 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) { 4914 mptsas_remove_cmd(mpt, cmd); 4915 } 4916 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID)); 4917 } 4918 /* 4919 * Depending on the function, we need to handle 4920 * the reply frame (and cmd) differently. 4921 */ 4922 switch (function) { 4923 case MPI2_FUNCTION_SCSI_IO_REQUEST: 4924 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd); 4925 break; 4926 case MPI2_FUNCTION_SCSI_TASK_MGMT: 4927 cmd->cmd_rfm = reply_addr; 4928 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply, 4929 cmd); 4930 break; 4931 case MPI2_FUNCTION_FW_DOWNLOAD: 4932 cmd->cmd_flags |= CFLAG_FINISHED; 4933 cv_signal(&mpt->m_fw_cv); 4934 break; 4935 case MPI2_FUNCTION_EVENT_NOTIFICATION: 4936 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) / 4937 mpt->m_reply_frame_size; 4938 args = &mpt->m_replyh_args[reply_frame_no]; 4939 args->mpt = (void *)mpt; 4940 args->rfm = reply_addr; 4941 4942 /* 4943 * Record the event if its type is enabled in 4944 * this mpt instance by ioctl. 4945 */ 4946 mptsas_record_event(args); 4947 4948 /* 4949 * Handle time critical events 4950 * NOT_RESPONDING/ADDED only now 4951 */ 4952 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) { 4953 /* 4954 * Would not return main process, 4955 * just let taskq resolve ack action 4956 * and ack would be sent in taskq thread 4957 */ 4958 NDBG20(("send mptsas_handle_event_sync success")); 4959 } 4960 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event, 4961 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) { 4962 mptsas_log(mpt, CE_WARN, "No memory available" 4963 "for dispatch taskq"); 4964 /* 4965 * Return the reply frame to the free queue. 4966 */ 4967 ddi_put32(mpt->m_acc_free_queue_hdl, 4968 &((uint32_t *)(void *) 4969 mpt->m_free_queue)[mpt->m_free_index], reply_addr); 4970 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0, 4971 DDI_DMA_SYNC_FORDEV); 4972 if (++mpt->m_free_index == mpt->m_free_queue_depth) { 4973 mpt->m_free_index = 0; 4974 } 4975 4976 ddi_put32(mpt->m_datap, 4977 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index); 4978 } 4979 return; 4980 case MPI2_FUNCTION_DIAG_BUFFER_POST: 4981 /* 4982 * If SMID is 0, this implies that the reply is due to a 4983 * release function with a status that the buffer has been 4984 * released. Set the buffer flags accordingly. 4985 */ 4986 if (SMID == 0) { 4987 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl, 4988 &reply->IOCStatus); 4989 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl, 4990 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType)); 4991 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) { 4992 pBuffer = 4993 &mpt->m_fw_diag_buffer_list[buffer_type]; 4994 pBuffer->valid_data = TRUE; 4995 pBuffer->owned_by_firmware = FALSE; 4996 pBuffer->immediate = FALSE; 4997 } 4998 } else { 4999 /* 5000 * Normal handling of diag post reply with SMID. 5001 */ 5002 cmd = slots->m_slot[SMID]; 5003 5004 /* 5005 * print warning and return if the slot is empty 5006 */ 5007 if (cmd == NULL) { 5008 mptsas_log(mpt, CE_WARN, "?NULL command for " 5009 "address reply in slot %d", SMID); 5010 return; 5011 } 5012 cmd->cmd_rfm = reply_addr; 5013 cmd->cmd_flags |= CFLAG_FINISHED; 5014 cv_broadcast(&mpt->m_fw_diag_cv); 5015 } 5016 return; 5017 default: 5018 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function); 5019 break; 5020 } 5021 5022 /* 5023 * Return the reply frame to the free queue. 5024 */ 5025 ddi_put32(mpt->m_acc_free_queue_hdl, 5026 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], 5027 reply_addr); 5028 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0, 5029 DDI_DMA_SYNC_FORDEV); 5030 if (++mpt->m_free_index == mpt->m_free_queue_depth) { 5031 mpt->m_free_index = 0; 5032 } 5033 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, 5034 mpt->m_free_index); 5035 5036 if (cmd->cmd_flags & CFLAG_FW_CMD) 5037 return; 5038 5039 if (cmd->cmd_flags & CFLAG_RETRY) { 5040 /* 5041 * The target returned QFULL or busy, do not add tihs 5042 * pkt to the doneq since the hba will retry 5043 * this cmd. 5044 * 5045 * The pkt has already been resubmitted in 5046 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error(). 5047 * Remove this cmd_flag here. 5048 */ 5049 cmd->cmd_flags &= ~CFLAG_RETRY; 5050 } else { 5051 mptsas_doneq_add(mpt, cmd); 5052 } 5053 } 5054 5055 static void 5056 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply, 5057 mptsas_cmd_t *cmd) 5058 { 5059 uint8_t scsi_status, scsi_state; 5060 uint16_t ioc_status; 5061 uint32_t xferred, sensecount, responsedata, loginfo = 0; 5062 struct scsi_pkt *pkt; 5063 struct scsi_arq_status *arqstat; 5064 struct buf *bp; 5065 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 5066 uint8_t *sensedata = NULL; 5067 5068 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) == 5069 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) { 5070 bp = cmd->cmd_ext_arq_buf; 5071 } else { 5072 bp = cmd->cmd_arq_buf; 5073 } 5074 5075 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus); 5076 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus); 5077 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState); 5078 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount); 5079 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount); 5080 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl, 5081 &reply->ResponseInfo); 5082 5083 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 5084 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl, 5085 &reply->IOCLogInfo); 5086 mptsas_log(mpt, CE_NOTE, 5087 "?Log info 0x%x received for target %d.\n" 5088 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x", 5089 loginfo, Tgt(cmd), scsi_status, ioc_status, 5090 scsi_state); 5091 } 5092 5093 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x", 5094 scsi_status, ioc_status, scsi_state)); 5095 5096 pkt = CMD2PKT(cmd); 5097 *(pkt->pkt_scbp) = scsi_status; 5098 5099 if (loginfo == 0x31170000) { 5100 /* 5101 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY 5102 * 0x31170000 comes, that means the device missing delay 5103 * is in progressing, the command need retry later. 5104 */ 5105 *(pkt->pkt_scbp) = STATUS_BUSY; 5106 return; 5107 } 5108 5109 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) && 5110 ((ioc_status & MPI2_IOCSTATUS_MASK) == 5111 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) { 5112 pkt->pkt_reason = CMD_INCOMPLETE; 5113 pkt->pkt_state |= STATE_GOT_BUS; 5114 mutex_enter(&ptgt->m_tgt_intr_mutex); 5115 if (ptgt->m_reset_delay == 0) { 5116 mptsas_set_throttle(mpt, ptgt, 5117 DRAIN_THROTTLE); 5118 } 5119 mutex_exit(&ptgt->m_tgt_intr_mutex); 5120 return; 5121 } 5122 5123 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 5124 responsedata &= 0x000000FF; 5125 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) { 5126 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n"); 5127 pkt->pkt_reason = CMD_TLR_OFF; 5128 return; 5129 } 5130 } 5131 5132 5133 switch (scsi_status) { 5134 case MPI2_SCSI_STATUS_CHECK_CONDITION: 5135 pkt->pkt_resid = (cmd->cmd_dmacount - xferred); 5136 arqstat = (void*)(pkt->pkt_scbp); 5137 arqstat->sts_rqpkt_status = *((struct scsi_status *) 5138 (pkt->pkt_scbp)); 5139 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | 5140 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE); 5141 if (cmd->cmd_flags & CFLAG_XARQ) { 5142 pkt->pkt_state |= STATE_XARQ_DONE; 5143 } 5144 if (pkt->pkt_resid != cmd->cmd_dmacount) { 5145 pkt->pkt_state |= STATE_XFERRED_DATA; 5146 } 5147 arqstat->sts_rqpkt_reason = pkt->pkt_reason; 5148 arqstat->sts_rqpkt_state = pkt->pkt_state; 5149 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA; 5150 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics; 5151 sensedata = (uint8_t *)&arqstat->sts_sensedata; 5152 5153 bcopy((uchar_t *)bp->b_un.b_addr, sensedata, 5154 ((cmd->cmd_rqslen >= sensecount) ? sensecount : 5155 cmd->cmd_rqslen)); 5156 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount); 5157 cmd->cmd_flags |= CFLAG_CMDARQ; 5158 /* 5159 * Set proper status for pkt if autosense was valid 5160 */ 5161 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5162 struct scsi_status zero_status = { 0 }; 5163 arqstat->sts_rqpkt_status = zero_status; 5164 } 5165 5166 /* 5167 * ASC=0x47 is parity error 5168 * ASC=0x48 is initiator detected error received 5169 */ 5170 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) && 5171 ((scsi_sense_asc(sensedata) == 0x47) || 5172 (scsi_sense_asc(sensedata) == 0x48))) { 5173 mptsas_log(mpt, CE_NOTE, "Aborted_command!"); 5174 } 5175 5176 /* 5177 * ASC/ASCQ=0x3F/0x0E means report_luns data changed 5178 * ASC/ASCQ=0x25/0x00 means invalid lun 5179 */ 5180 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) && 5181 (scsi_sense_asc(sensedata) == 0x3F) && 5182 (scsi_sense_ascq(sensedata) == 0x0E)) || 5183 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) && 5184 (scsi_sense_asc(sensedata) == 0x25) && 5185 (scsi_sense_ascq(sensedata) == 0x00))) { 5186 mptsas_topo_change_list_t *topo_node = NULL; 5187 5188 topo_node = kmem_zalloc( 5189 sizeof (mptsas_topo_change_list_t), 5190 KM_NOSLEEP); 5191 if (topo_node == NULL) { 5192 mptsas_log(mpt, CE_NOTE, "No memory" 5193 "resource for handle SAS dynamic" 5194 "reconfigure.\n"); 5195 break; 5196 } 5197 topo_node->mpt = mpt; 5198 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET; 5199 topo_node->un.phymask = ptgt->m_phymask; 5200 topo_node->devhdl = ptgt->m_devhdl; 5201 topo_node->object = (void *)ptgt; 5202 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED; 5203 5204 if ((ddi_taskq_dispatch(mpt->m_dr_taskq, 5205 mptsas_handle_dr, 5206 (void *)topo_node, 5207 DDI_NOSLEEP)) != DDI_SUCCESS) { 5208 mptsas_log(mpt, CE_NOTE, "mptsas start taskq" 5209 "for handle SAS dynamic reconfigure" 5210 "failed. \n"); 5211 } 5212 } 5213 break; 5214 case MPI2_SCSI_STATUS_GOOD: 5215 switch (ioc_status & MPI2_IOCSTATUS_MASK) { 5216 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5217 pkt->pkt_reason = CMD_DEV_GONE; 5218 pkt->pkt_state |= STATE_GOT_BUS; 5219 mutex_enter(&ptgt->m_tgt_intr_mutex); 5220 if (ptgt->m_reset_delay == 0) { 5221 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE); 5222 } 5223 mutex_exit(&ptgt->m_tgt_intr_mutex); 5224 NDBG31(("lost disk for target%d, command:%x", 5225 Tgt(cmd), pkt->pkt_cdbp[0])); 5226 break; 5227 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5228 NDBG31(("data overrun: xferred=%d", xferred)); 5229 NDBG31(("dmacount=%d", cmd->cmd_dmacount)); 5230 pkt->pkt_reason = CMD_DATA_OVR; 5231 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET 5232 | STATE_SENT_CMD | STATE_GOT_STATUS 5233 | STATE_XFERRED_DATA); 5234 pkt->pkt_resid = 0; 5235 break; 5236 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5237 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5238 NDBG31(("data underrun: xferred=%d", xferred)); 5239 NDBG31(("dmacount=%d", cmd->cmd_dmacount)); 5240 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET 5241 | STATE_SENT_CMD | STATE_GOT_STATUS); 5242 pkt->pkt_resid = (cmd->cmd_dmacount - xferred); 5243 if (pkt->pkt_resid != cmd->cmd_dmacount) { 5244 pkt->pkt_state |= STATE_XFERRED_DATA; 5245 } 5246 break; 5247 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5248 mptsas_set_pkt_reason(mpt, 5249 cmd, CMD_RESET, STAT_BUS_RESET); 5250 break; 5251 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5252 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5253 mptsas_set_pkt_reason(mpt, 5254 cmd, CMD_RESET, STAT_DEV_RESET); 5255 break; 5256 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5257 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5258 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET); 5259 mptsas_set_pkt_reason(mpt, 5260 cmd, CMD_TERMINATED, STAT_TERMINATED); 5261 break; 5262 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 5263 case MPI2_IOCSTATUS_BUSY: 5264 /* 5265 * set throttles to drain 5266 */ 5267 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 5268 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST); 5269 while (ptgt != NULL) { 5270 mutex_enter(&ptgt->m_tgt_intr_mutex); 5271 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE); 5272 mutex_exit(&ptgt->m_tgt_intr_mutex); 5273 5274 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 5275 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 5276 } 5277 5278 /* 5279 * retry command 5280 */ 5281 cmd->cmd_flags |= CFLAG_RETRY; 5282 cmd->cmd_pkt_flags |= FLAG_HEAD; 5283 5284 mutex_exit(&mpt->m_mutex); 5285 (void) mptsas_accept_pkt(mpt, cmd); 5286 mutex_enter(&mpt->m_mutex); 5287 break; 5288 default: 5289 mptsas_log(mpt, CE_WARN, 5290 "unknown ioc_status = %x\n", ioc_status); 5291 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer " 5292 "count = %x, scsi_status = %x", scsi_state, 5293 xferred, scsi_status); 5294 break; 5295 } 5296 break; 5297 case MPI2_SCSI_STATUS_TASK_SET_FULL: 5298 mptsas_handle_qfull(mpt, cmd); 5299 break; 5300 case MPI2_SCSI_STATUS_BUSY: 5301 NDBG31(("scsi_status busy received")); 5302 break; 5303 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 5304 NDBG31(("scsi_status reservation conflict received")); 5305 break; 5306 default: 5307 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n", 5308 scsi_status, ioc_status); 5309 mptsas_log(mpt, CE_WARN, 5310 "mptsas_process_intr: invalid scsi status\n"); 5311 break; 5312 } 5313 } 5314 5315 static void 5316 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply, 5317 mptsas_cmd_t *cmd) 5318 { 5319 uint8_t task_type; 5320 uint16_t ioc_status; 5321 uint32_t log_info; 5322 uint16_t dev_handle; 5323 struct scsi_pkt *pkt = CMD2PKT(cmd); 5324 5325 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType); 5326 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus); 5327 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo); 5328 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle); 5329 5330 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5331 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x " 5332 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n", 5333 task_type, ioc_status, log_info, dev_handle); 5334 pkt->pkt_reason = CMD_INCOMPLETE; 5335 return; 5336 } 5337 5338 switch (task_type) { 5339 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 5340 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET: 5341 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 5342 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA: 5343 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET: 5344 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION: 5345 break; 5346 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 5347 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 5348 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 5349 /* 5350 * Check for invalid DevHandle of 0 in case application 5351 * sends bad command. DevHandle of 0 could cause problems. 5352 */ 5353 if (dev_handle == 0) { 5354 mptsas_log(mpt, CE_WARN, "!Can't flush target with" 5355 " DevHandle of 0."); 5356 } else { 5357 mptsas_flush_target(mpt, dev_handle, Lun(cmd), 5358 task_type); 5359 } 5360 break; 5361 default: 5362 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.", 5363 task_type); 5364 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status); 5365 break; 5366 } 5367 } 5368 5369 static void 5370 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg) 5371 { 5372 mptsas_t *mpt = arg->mpt; 5373 uint64_t t = arg->t; 5374 mptsas_cmd_t *cmd; 5375 struct scsi_pkt *pkt; 5376 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t]; 5377 5378 mutex_enter(&item->mutex); 5379 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) { 5380 if (!item->doneq) { 5381 cv_wait(&item->cv, &item->mutex); 5382 } 5383 pkt = NULL; 5384 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) { 5385 cmd->cmd_flags |= CFLAG_COMPLETED; 5386 pkt = CMD2PKT(cmd); 5387 } 5388 mutex_exit(&item->mutex); 5389 if (pkt) { 5390 mptsas_pkt_comp(pkt, cmd); 5391 } 5392 mutex_enter(&item->mutex); 5393 } 5394 mutex_exit(&item->mutex); 5395 mutex_enter(&mpt->m_doneq_mutex); 5396 mpt->m_doneq_thread_n--; 5397 cv_broadcast(&mpt->m_doneq_thread_cv); 5398 mutex_exit(&mpt->m_doneq_mutex); 5399 } 5400 5401 /* 5402 * mpt interrupt handler. 5403 */ 5404 static uint_t 5405 mptsas_intr(caddr_t arg1, caddr_t arg2) 5406 { 5407 mptsas_t *mpt = (void *)arg1; 5408 pMpi2ReplyDescriptorsUnion_t reply_desc_union; 5409 uchar_t did_reply = FALSE; 5410 int i = 0, j; 5411 uint8_t reply_type; 5412 uint16_t SMID; 5413 5414 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2)); 5415 5416 /* 5417 * 1. 5418 * To avoid using m_mutex in the ISR(ISR referes not only mptsas_intr, 5419 * but all of the recursive called functions in it. the same below), 5420 * separate mutexs are introduced to protect the elements shown in ISR. 5421 * 3 type of mutex are involved here: 5422 * a)per instance mutex m_intr_mutex. 5423 * b)per target mutex m_tgt_intr_mutex. 5424 * c)mutex that protect the free slot. 5425 * 5426 * a)per instance mutex m_intr_mutex: 5427 * used to protect m_options, m_power, m_waitq, etc that would be 5428 * checked/modified in ISR; protect the getting and setting the reply 5429 * descriptor index; protect the m_slots[]; 5430 * 5431 * b)per target mutex m_tgt_intr_mutex: 5432 * used to protect per target element which has relationship to ISR. 5433 * contention for the new per target mutex is just as high as it in 5434 * sd(7d) driver. 5435 * 5436 * c)mutexs that protect the free slots: 5437 * those mutexs are introduced to minimize the mutex contentions 5438 * between the IO request threads where free slots are allocated 5439 * for sending cmds and ISR where slots holding outstanding cmds 5440 * are returned to the free pool. 5441 * the idea is like this: 5442 * 1) Partition all of the free slot into NCPU groups. For example, 5443 * In system where we have 15 slots, and 4 CPU, then slot s1,s5,s9,s13 5444 * are marked belonging to CPU1, s2,s6,s10,s14 to CPU2, s3,s7,s11,s15 5445 * to CPU3, and s4,s8,s12 to CPU4. 5446 * 2) In each of the group, an alloc/release queue pair is created, 5447 * and both the allocq and the releaseq have a dedicated mutex. 5448 * 3) When init, all of the slots in a CPU group are inserted into the 5449 * allocq of its CPU's pair. 5450 * 4) When doing IO, 5451 * mptsas_scsi_start() 5452 * { 5453 * cpuid = the cpu NO of the cpu where this thread is running on 5454 * retry: 5455 * mutex_enter(&allocq[cpuid]); 5456 * if (get free slot = success) { 5457 * remove the slot from the allocq 5458 * mutex_exit(&allocq[cpuid]); 5459 * return(success); 5460 * } else { // exchange allocq and releaseq and try again 5461 * mutex_enter(&releq[cpuid]); 5462 * exchange the allocq and releaseq of this pair; 5463 * mutex_exit(&releq[cpuid]); 5464 * if (try to get free slot again = success) { 5465 * remove the slot from the allocq 5466 * mutex_exit(&allocq[cpuid]); 5467 * return(success); 5468 * } else { 5469 * MOD(cpuid)++; 5470 * goto retry; 5471 * if (all CPU groups tried) 5472 * mutex_exit(&allocq[cpuid]); 5473 * return(failure); 5474 * } 5475 * } 5476 * } 5477 * ISR() 5478 * { 5479 * cpuid = the CPU group id where the slot sending the 5480 * cmd belongs; 5481 * mutex_enter(&releq[cpuid]); 5482 * remove the slot from the releaseq 5483 * mutex_exit(&releq[cpuid]); 5484 * } 5485 * This way, only when the queue pair doing exchange have mutex 5486 * contentions. 5487 * 5488 * For mutex m_intr_mutex and m_tgt_intr_mutex, there are 2 scenarios: 5489 * 5490 * a)If the elements are only checked but not modified in the ISR, then 5491 * only the places where those elements are modifed(outside of ISR) 5492 * need to be protected by the new introduced mutex. 5493 * For example, data A is only read/checked in ISR, then we need do 5494 * like this: 5495 * In ISR: 5496 * { 5497 * mutex_enter(&new_mutex); 5498 * read(A); 5499 * mutex_exit(&new_mutex); 5500 * //the new_mutex here is either the m_tgt_intr_mutex or 5501 * //the m_intr_mutex. 5502 * } 5503 * In non-ISR 5504 * { 5505 * mutex_enter(&m_mutex); //the stock driver already did this 5506 * mutex_enter(&new_mutex); 5507 * write(A); 5508 * mutex_exit(&new_mutex); 5509 * mutex_exit(&m_mutex); //the stock driver already did this 5510 * 5511 * read(A); 5512 * // read(A) in non-ISR is not required to be protected by new 5513 * // mutex since 'A' has already been protected by m_mutex 5514 * // outside of the ISR 5515 * } 5516 * 5517 * Those fields in mptsas_target_t/ptgt which are only read in ISR 5518 * fall into this catergory. So they, together with the fields which 5519 * are never read in ISR, are not necessary to be protected by 5520 * m_tgt_intr_mutex, don't bother. 5521 * checking of m_waitq also falls into this catergory. so all of the 5522 * place outside of ISR where the m_waitq is modified, such as in 5523 * mptsas_waitq_add(), mptsas_waitq_delete(), mptsas_waitq_rm(), 5524 * m_intr_mutex should be used. 5525 * 5526 * b)If the elements are modified in the ISR, then each place where 5527 * those elements are referred(outside of ISR) need to be protected 5528 * by the new introduced mutex. Of course, if those elements only 5529 * appear in the non-key code path, that is, they don't affect 5530 * performance, then the m_mutex can still be used as before. 5531 * For example, data B is modified in key code path in ISR, and data C 5532 * is modified in non-key code path in ISR, then we can do like this: 5533 * In ISR: 5534 * { 5535 * mutex_enter(&new_mutex); 5536 * wirte(B); 5537 * mutex_exit(&new_mutex); 5538 * if (seldom happen) { 5539 * mutex_enter(&m_mutex); 5540 * write(C); 5541 * mutex_exit(&m_mutex); 5542 * } 5543 * //the new_mutex here is either the m_tgt_intr_mutex or 5544 * //the m_intr_mutex. 5545 * } 5546 * In non-ISR 5547 * { 5548 * mutex_enter(&new_mutex); 5549 * write(B); 5550 * mutex_exit(&new_mutex); 5551 * 5552 * mutex_enter(&new_mutex); 5553 * read(B); 5554 * mutex_exit(&new_mutex); 5555 * // both write(B) and read(B) in non-ISR is required to be 5556 * // protected by new mutex outside of the ISR 5557 * 5558 * mutex_enter(&m_mutex); //the stock driver already did this 5559 * read(C); 5560 * write(C); 5561 * mutex_exit(&m_mutex); //the stock driver already did this 5562 * // both write(C) and read(C) in non-ISR have been already 5563 * // been protected by m_mutex outside of the ISR 5564 * } 5565 * 5566 * For example, ptgt->m_t_ncmds fall into 'B' of this catergory, and 5567 * elements shown in address reply, restart_hba, passthrough, IOC 5568 * fall into 'C' of this catergory. 5569 * 5570 * In any case where mutexs are nested, make sure in the following 5571 * order: 5572 * m_mutex -> m_intr_mutex -> m_tgt_intr_mutex 5573 * m_intr_mutex -> m_tgt_intr_mutex 5574 * m_mutex -> m_intr_mutex 5575 * m_mutex -> m_tgt_intr_mutex 5576 * 5577 * 2. 5578 * Make sure at any time, getting the ReplyDescriptor by m_post_index 5579 * and setting m_post_index to the ReplyDescriptorIndex register are 5580 * atomic. Since m_mutex is not used for this purpose in ISR, the new 5581 * mutex m_intr_mutex must play this role. So mptsas_poll(), where this 5582 * kind of getting/setting is also performed, must use m_intr_mutex. 5583 * Note, since context reply in ISR/process_intr is the only code path 5584 * which affect performance, a fast path is introduced to only handle 5585 * the read/write IO having context reply. For other IOs such as 5586 * passthrough and IOC with context reply and all address reply, we 5587 * use the as-is process_intr() to handle them. In order to keep the 5588 * same semantics in process_intr(), make sure any new mutex is not held 5589 * before enterring it. 5590 */ 5591 5592 mutex_enter(&mpt->m_intr_mutex); 5593 5594 /* 5595 * If interrupts are shared by two channels then check whether this 5596 * interrupt is genuinely for this channel by making sure first the 5597 * chip is in high power state. 5598 */ 5599 if ((mpt->m_options & MPTSAS_OPT_PM) && 5600 (mpt->m_power_level != PM_LEVEL_D0)) { 5601 mutex_exit(&mpt->m_intr_mutex); 5602 return (DDI_INTR_UNCLAIMED); 5603 } 5604 5605 /* 5606 * If polling, interrupt was triggered by some shared interrupt because 5607 * IOC interrupts are disabled during polling, so polling routine will 5608 * handle any replies. Considering this, if polling is happening, 5609 * return with interrupt unclaimed. 5610 */ 5611 if (mpt->m_polled_intr) { 5612 mutex_exit(&mpt->m_intr_mutex); 5613 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt"); 5614 return (DDI_INTR_UNCLAIMED); 5615 } 5616 5617 /* 5618 * Read the istat register. 5619 */ 5620 if ((INTPENDING(mpt)) != 0) { 5621 /* 5622 * read fifo until empty. 5623 */ 5624 #ifndef __lock_lint 5625 _NOTE(CONSTCOND) 5626 #endif 5627 while (TRUE) { 5628 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0, 5629 DDI_DMA_SYNC_FORCPU); 5630 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t) 5631 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index); 5632 5633 if (ddi_get32(mpt->m_acc_post_queue_hdl, 5634 &reply_desc_union->Words.Low) == 0xFFFFFFFF || 5635 ddi_get32(mpt->m_acc_post_queue_hdl, 5636 &reply_desc_union->Words.High) == 0xFFFFFFFF) { 5637 break; 5638 } 5639 5640 /* 5641 * The reply is valid, process it according to its 5642 * type. Also, set a flag for updating the reply index 5643 * after they've all been processed. 5644 */ 5645 did_reply = TRUE; 5646 5647 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl, 5648 &reply_desc_union->Default.ReplyFlags); 5649 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 5650 mpt->m_reply[i].Default.ReplyFlags = reply_type; 5651 if (reply_type == 5652 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { 5653 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, 5654 &reply_desc_union->SCSIIOSuccess.SMID); 5655 if (mptsas_handle_io_fastpath(mpt, SMID) != 5656 TRUE) { 5657 mpt->m_reply[i].SCSIIOSuccess.SMID = 5658 SMID; 5659 i++; 5660 } 5661 } else if (reply_type == 5662 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 5663 mpt->m_reply[i].AddressReply.ReplyFrameAddress = 5664 ddi_get32(mpt->m_acc_post_queue_hdl, 5665 &reply_desc_union->AddressReply. 5666 ReplyFrameAddress); 5667 mpt->m_reply[i].AddressReply.SMID = 5668 ddi_get16(mpt->m_acc_post_queue_hdl, 5669 &reply_desc_union->AddressReply.SMID); 5670 i++; 5671 } 5672 /* 5673 * Clear the reply descriptor for re-use and increment 5674 * index. 5675 */ 5676 ddi_put64(mpt->m_acc_post_queue_hdl, 5677 &((uint64_t *)(void *)mpt->m_post_queue) 5678 [mpt->m_post_index], 0xFFFFFFFFFFFFFFFF); 5679 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0, 5680 DDI_DMA_SYNC_FORDEV); 5681 5682 /* 5683 * Increment post index and roll over if needed. 5684 */ 5685 if (++mpt->m_post_index == mpt->m_post_queue_depth) { 5686 mpt->m_post_index = 0; 5687 } 5688 if (i >= MPI_ADDRESS_COALSCE_MAX) 5689 break; 5690 } 5691 5692 /* 5693 * Update the global reply index if at least one reply was 5694 * processed. 5695 */ 5696 if (did_reply) { 5697 ddi_put32(mpt->m_datap, 5698 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index); 5699 5700 /* 5701 * For fma, only check the PIO is required and enough 5702 * here. Those cases where fastpath is not hit, the 5703 * mptsas_fma_check() check all of the types of 5704 * fma. That is not necessary and sometimes not 5705 * correct. fma check should only be done after 5706 * the PIO and/or dma is performed. 5707 */ 5708 if ((mptsas_check_acc_handle(mpt->m_datap) != 5709 DDI_SUCCESS)) { 5710 ddi_fm_service_impact(mpt->m_dip, 5711 DDI_SERVICE_UNAFFECTED); 5712 } 5713 5714 } 5715 } else { 5716 mutex_exit(&mpt->m_intr_mutex); 5717 return (DDI_INTR_UNCLAIMED); 5718 } 5719 NDBG1(("mptsas_intr complete")); 5720 mutex_exit(&mpt->m_intr_mutex); 5721 5722 /* 5723 * Since most of the cmds(read and write IO with success return.) 5724 * have already been processed in fast path in which the m_mutex 5725 * is not held, handling here the address reply and other context reply 5726 * such as passthrough and IOC cmd with m_mutex held should be a big 5727 * issue for performance. 5728 * If holding m_mutex to process these cmds was still an obvious issue, 5729 * we can process them in a taskq. 5730 */ 5731 for (j = 0; j < i; j++) { 5732 mutex_enter(&mpt->m_mutex); 5733 mptsas_process_intr(mpt, &mpt->m_reply[j]); 5734 mutex_exit(&mpt->m_mutex); 5735 } 5736 5737 /* 5738 * If no helper threads are created, process the doneq in ISR. If 5739 * helpers are created, use the doneq length as a metric to measure the 5740 * load on the interrupt CPU. If it is long enough, which indicates the 5741 * load is heavy, then we deliver the IO completions to the helpers. 5742 * This measurement has some limitations, although it is simple and 5743 * straightforward and works well for most of the cases at present. 5744 */ 5745 if (!mpt->m_doneq_thread_n) { 5746 mptsas_doneq_empty(mpt); 5747 } else { 5748 int helper = 1; 5749 mutex_enter(&mpt->m_intr_mutex); 5750 if (mpt->m_doneq_len <= mpt->m_doneq_length_threshold) 5751 helper = 0; 5752 mutex_exit(&mpt->m_intr_mutex); 5753 if (helper) { 5754 mptsas_deliver_doneq_thread(mpt); 5755 } else { 5756 mptsas_doneq_empty(mpt); 5757 } 5758 } 5759 5760 /* 5761 * If there are queued cmd, start them now. 5762 */ 5763 mutex_enter(&mpt->m_intr_mutex); 5764 if (mpt->m_waitq != NULL) { 5765 mutex_exit(&mpt->m_intr_mutex); 5766 mutex_enter(&mpt->m_mutex); 5767 mptsas_restart_hba(mpt); 5768 mutex_exit(&mpt->m_mutex); 5769 return (DDI_INTR_CLAIMED); 5770 } 5771 mutex_exit(&mpt->m_intr_mutex); 5772 return (DDI_INTR_CLAIMED); 5773 } 5774 5775 /* 5776 * In ISR, the successfully completed read and write IO are processed in a 5777 * fast path. This function is only used to handle non-fastpath IO, including 5778 * all of the address reply, and the context reply for IOC cmd, passthrough, 5779 * etc. 5780 * This function is also used to process polled cmd. 5781 */ 5782 static void 5783 mptsas_process_intr(mptsas_t *mpt, 5784 pMpi2ReplyDescriptorsUnion_t reply_desc_union) 5785 { 5786 uint8_t reply_type; 5787 5788 /* 5789 * The reply is valid, process it according to its 5790 * type. Also, set a flag for updated the reply index 5791 * after they've all been processed. 5792 */ 5793 reply_type = reply_desc_union->Default.ReplyFlags; 5794 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { 5795 mptsas_handle_scsi_io_success(mpt, reply_desc_union); 5796 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 5797 mptsas_handle_address_reply(mpt, reply_desc_union); 5798 } else { 5799 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type); 5800 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 5801 } 5802 } 5803 5804 /* 5805 * handle qfull condition 5806 */ 5807 static void 5808 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd) 5809 { 5810 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 5811 5812 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) || 5813 (ptgt->m_qfull_retries == 0)) { 5814 /* 5815 * We have exhausted the retries on QFULL, or, 5816 * the target driver has indicated that it 5817 * wants to handle QFULL itself by setting 5818 * qfull-retries capability to 0. In either case 5819 * we want the target driver's QFULL handling 5820 * to kick in. We do this by having pkt_reason 5821 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL. 5822 */ 5823 mutex_enter(&ptgt->m_tgt_intr_mutex); 5824 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE); 5825 mutex_exit(&ptgt->m_tgt_intr_mutex); 5826 } else { 5827 mutex_enter(&ptgt->m_tgt_intr_mutex); 5828 if (ptgt->m_reset_delay == 0) { 5829 ptgt->m_t_throttle = 5830 max((ptgt->m_t_ncmds - 2), 0); 5831 } 5832 mutex_exit(&ptgt->m_tgt_intr_mutex); 5833 5834 cmd->cmd_pkt_flags |= FLAG_HEAD; 5835 cmd->cmd_flags &= ~(CFLAG_TRANFLAG); 5836 cmd->cmd_flags |= CFLAG_RETRY; 5837 5838 mutex_exit(&mpt->m_mutex); 5839 (void) mptsas_accept_pkt(mpt, cmd); 5840 mutex_enter(&mpt->m_mutex); 5841 5842 /* 5843 * when target gives queue full status with no commands 5844 * outstanding (m_t_ncmds == 0), throttle is set to 0 5845 * (HOLD_THROTTLE), and the queue full handling start 5846 * (see psarc/1994/313); if there are commands outstanding, 5847 * throttle is set to (m_t_ncmds - 2) 5848 */ 5849 mutex_enter(&ptgt->m_tgt_intr_mutex); 5850 if (ptgt->m_t_throttle == HOLD_THROTTLE) { 5851 /* 5852 * By setting throttle to QFULL_THROTTLE, we 5853 * avoid submitting new commands and in 5854 * mptsas_restart_cmd find out slots which need 5855 * their throttles to be cleared. 5856 */ 5857 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE); 5858 if (mpt->m_restart_cmd_timeid == 0) { 5859 mpt->m_restart_cmd_timeid = 5860 timeout(mptsas_restart_cmd, mpt, 5861 ptgt->m_qfull_retry_interval); 5862 } 5863 } 5864 mutex_exit(&ptgt->m_tgt_intr_mutex); 5865 } 5866 } 5867 5868 mptsas_phymask_t 5869 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport) 5870 { 5871 mptsas_phymask_t phy_mask = 0; 5872 uint8_t i = 0; 5873 5874 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance)); 5875 5876 ASSERT(mutex_owned(&mpt->m_mutex)); 5877 5878 /* 5879 * If physport is 0xFF, this is a RAID volume. Use phymask of 0. 5880 */ 5881 if (physport == 0xFF) { 5882 return (0); 5883 } 5884 5885 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 5886 if (mpt->m_phy_info[i].attached_devhdl && 5887 (mpt->m_phy_info[i].phy_mask != 0) && 5888 (mpt->m_phy_info[i].port_num == physport)) { 5889 phy_mask = mpt->m_phy_info[i].phy_mask; 5890 break; 5891 } 5892 } 5893 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ", 5894 mpt->m_instance, physport, phy_mask)); 5895 return (phy_mask); 5896 } 5897 5898 /* 5899 * mpt free device handle after device gone, by use of passthrough 5900 */ 5901 static int 5902 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl) 5903 { 5904 Mpi2SasIoUnitControlRequest_t req; 5905 Mpi2SasIoUnitControlReply_t rep; 5906 int ret; 5907 5908 ASSERT(mutex_owned(&mpt->m_mutex)); 5909 5910 /* 5911 * Need to compose a SAS IO Unit Control request message 5912 * and call mptsas_do_passthru() function 5913 */ 5914 bzero(&req, sizeof (req)); 5915 bzero(&rep, sizeof (rep)); 5916 5917 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 5918 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE; 5919 req.DevHandle = LE_16(devhdl); 5920 5921 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL, 5922 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL); 5923 if (ret != 0) { 5924 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit " 5925 "Control error %d", ret); 5926 return (DDI_FAILURE); 5927 } 5928 5929 /* do passthrough success, check the ioc status */ 5930 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) { 5931 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit " 5932 "Control IOCStatus %d", LE_16(rep.IOCStatus)); 5933 return (DDI_FAILURE); 5934 } 5935 5936 return (DDI_SUCCESS); 5937 } 5938 5939 static void 5940 mptsas_update_phymask(mptsas_t *mpt) 5941 { 5942 mptsas_phymask_t mask = 0, phy_mask; 5943 char *phy_mask_name; 5944 uint8_t current_port; 5945 int i, j; 5946 5947 NDBG20(("mptsas%d update phymask ", mpt->m_instance)); 5948 5949 ASSERT(mutex_owned(&mpt->m_mutex)); 5950 5951 (void) mptsas_get_sas_io_unit_page(mpt); 5952 5953 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP); 5954 5955 for (i = 0; i < mpt->m_num_phys; i++) { 5956 phy_mask = 0x00; 5957 5958 if (mpt->m_phy_info[i].attached_devhdl == 0) 5959 continue; 5960 5961 bzero(phy_mask_name, sizeof (phy_mask_name)); 5962 5963 current_port = mpt->m_phy_info[i].port_num; 5964 5965 if ((mask & (1 << i)) != 0) 5966 continue; 5967 5968 for (j = 0; j < mpt->m_num_phys; j++) { 5969 if (mpt->m_phy_info[j].attached_devhdl && 5970 (mpt->m_phy_info[j].port_num == current_port)) { 5971 phy_mask |= (1 << j); 5972 } 5973 } 5974 mask = mask | phy_mask; 5975 5976 for (j = 0; j < mpt->m_num_phys; j++) { 5977 if ((phy_mask >> j) & 0x01) { 5978 mpt->m_phy_info[j].phy_mask = phy_mask; 5979 } 5980 } 5981 5982 (void) sprintf(phy_mask_name, "%x", phy_mask); 5983 5984 mutex_exit(&mpt->m_mutex); 5985 /* 5986 * register a iport, if the port has already been existed 5987 * SCSA will do nothing and just return. 5988 */ 5989 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name); 5990 mutex_enter(&mpt->m_mutex); 5991 } 5992 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS); 5993 NDBG20(("mptsas%d update phymask return", mpt->m_instance)); 5994 } 5995 5996 /* 5997 * mptsas_handle_dr is a task handler for DR, the DR action includes: 5998 * 1. Directly attched Device Added/Removed. 5999 * 2. Expander Device Added/Removed. 6000 * 3. Indirectly Attached Device Added/Expander. 6001 * 4. LUNs of a existing device status change. 6002 * 5. RAID volume created/deleted. 6003 * 6. Member of RAID volume is released because of RAID deletion. 6004 * 7. Physical disks are removed because of RAID creation. 6005 */ 6006 static void 6007 mptsas_handle_dr(void *args) { 6008 mptsas_topo_change_list_t *topo_node = NULL; 6009 mptsas_topo_change_list_t *save_node = NULL; 6010 mptsas_t *mpt; 6011 dev_info_t *parent = NULL; 6012 mptsas_phymask_t phymask = 0; 6013 char *phy_mask_name; 6014 uint8_t flags = 0, physport = 0xff; 6015 uint8_t port_update = 0; 6016 uint_t event; 6017 6018 topo_node = (mptsas_topo_change_list_t *)args; 6019 6020 mpt = topo_node->mpt; 6021 event = topo_node->event; 6022 flags = topo_node->flags; 6023 6024 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP); 6025 6026 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance)); 6027 6028 switch (event) { 6029 case MPTSAS_DR_EVENT_RECONFIG_TARGET: 6030 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) || 6031 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) || 6032 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) { 6033 /* 6034 * Direct attached or expander attached device added 6035 * into system or a Phys Disk that is being unhidden. 6036 */ 6037 port_update = 1; 6038 } 6039 break; 6040 case MPTSAS_DR_EVENT_RECONFIG_SMP: 6041 /* 6042 * New expander added into system, it must be the head 6043 * of topo_change_list_t 6044 */ 6045 port_update = 1; 6046 break; 6047 default: 6048 port_update = 0; 6049 break; 6050 } 6051 /* 6052 * All cases port_update == 1 may cause initiator port form change 6053 */ 6054 mutex_enter(&mpt->m_mutex); 6055 if (mpt->m_port_chng && port_update) { 6056 /* 6057 * mpt->m_port_chng flag indicates some PHYs of initiator 6058 * port have changed to online. So when expander added or 6059 * directly attached device online event come, we force to 6060 * update port information by issueing SAS IO Unit Page and 6061 * update PHYMASKs. 6062 */ 6063 (void) mptsas_update_phymask(mpt); 6064 mpt->m_port_chng = 0; 6065 6066 } 6067 mutex_exit(&mpt->m_mutex); 6068 while (topo_node) { 6069 phymask = 0; 6070 if (parent == NULL) { 6071 physport = topo_node->un.physport; 6072 event = topo_node->event; 6073 flags = topo_node->flags; 6074 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET | 6075 MPTSAS_DR_EVENT_OFFLINE_SMP)) { 6076 /* 6077 * For all offline events, phymask is known 6078 */ 6079 phymask = topo_node->un.phymask; 6080 goto find_parent; 6081 } 6082 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) { 6083 goto handle_topo_change; 6084 } 6085 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) { 6086 phymask = topo_node->un.phymask; 6087 goto find_parent; 6088 } 6089 6090 if ((flags == 6091 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) && 6092 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) { 6093 /* 6094 * There is no any field in IR_CONFIG_CHANGE 6095 * event indicate physport/phynum, let's get 6096 * parent after SAS Device Page0 request. 6097 */ 6098 goto handle_topo_change; 6099 } 6100 6101 mutex_enter(&mpt->m_mutex); 6102 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) { 6103 /* 6104 * If the direct attached device added or a 6105 * phys disk is being unhidden, argument 6106 * physport actually is PHY#, so we have to get 6107 * phymask according PHY#. 6108 */ 6109 physport = mpt->m_phy_info[physport].port_num; 6110 } 6111 6112 /* 6113 * Translate physport to phymask so that we can search 6114 * parent dip. 6115 */ 6116 phymask = mptsas_physport_to_phymask(mpt, 6117 physport); 6118 mutex_exit(&mpt->m_mutex); 6119 6120 find_parent: 6121 bzero(phy_mask_name, MPTSAS_MAX_PHYS); 6122 /* 6123 * For RAID topology change node, write the iport name 6124 * as v0. 6125 */ 6126 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) { 6127 (void) sprintf(phy_mask_name, "v0"); 6128 } else { 6129 /* 6130 * phymask can bo 0 if the drive has been 6131 * pulled by the time an add event is 6132 * processed. If phymask is 0, just skip this 6133 * event and continue. 6134 */ 6135 if (phymask == 0) { 6136 mutex_enter(&mpt->m_mutex); 6137 save_node = topo_node; 6138 topo_node = topo_node->next; 6139 ASSERT(save_node); 6140 kmem_free(save_node, 6141 sizeof (mptsas_topo_change_list_t)); 6142 mutex_exit(&mpt->m_mutex); 6143 6144 parent = NULL; 6145 continue; 6146 } 6147 (void) sprintf(phy_mask_name, "%x", phymask); 6148 } 6149 parent = scsi_hba_iport_find(mpt->m_dip, 6150 phy_mask_name); 6151 if (parent == NULL) { 6152 mptsas_log(mpt, CE_WARN, "Failed to find an " 6153 "iport, should not happen!"); 6154 goto out; 6155 } 6156 6157 } 6158 ASSERT(parent); 6159 handle_topo_change: 6160 6161 mutex_enter(&mpt->m_mutex); 6162 6163 mptsas_handle_topo_change(topo_node, parent); 6164 save_node = topo_node; 6165 topo_node = topo_node->next; 6166 ASSERT(save_node); 6167 kmem_free(save_node, sizeof (mptsas_topo_change_list_t)); 6168 mutex_exit(&mpt->m_mutex); 6169 6170 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) || 6171 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) || 6172 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) { 6173 /* 6174 * If direct attached device associated, make sure 6175 * reset the parent before start the next one. But 6176 * all devices associated with expander shares the 6177 * parent. Also, reset parent if this is for RAID. 6178 */ 6179 parent = NULL; 6180 } 6181 } 6182 out: 6183 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS); 6184 } 6185 6186 static void 6187 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node, 6188 dev_info_t *parent) 6189 { 6190 mptsas_target_t *ptgt = NULL; 6191 mptsas_smp_t *psmp = NULL; 6192 mptsas_t *mpt = (void *)topo_node->mpt; 6193 uint16_t devhdl; 6194 uint16_t attached_devhdl; 6195 uint64_t sas_wwn = 0; 6196 int rval = 0; 6197 uint32_t page_address; 6198 uint8_t phy, flags; 6199 char *addr = NULL; 6200 dev_info_t *lundip; 6201 int circ = 0, circ1 = 0; 6202 char attached_wwnstr[MPTSAS_WWN_STRLEN]; 6203 6204 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance)); 6205 6206 ASSERT(mutex_owned(&mpt->m_mutex)); 6207 6208 switch (topo_node->event) { 6209 case MPTSAS_DR_EVENT_RECONFIG_TARGET: 6210 { 6211 char *phy_mask_name; 6212 mptsas_phymask_t phymask = 0; 6213 6214 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) { 6215 /* 6216 * Get latest RAID info. 6217 */ 6218 (void) mptsas_get_raid_info(mpt); 6219 ptgt = mptsas_search_by_devhdl( 6220 &mpt->m_active->m_tgttbl, topo_node->devhdl); 6221 if (ptgt == NULL) 6222 break; 6223 } else { 6224 ptgt = (void *)topo_node->object; 6225 } 6226 6227 if (ptgt == NULL) { 6228 /* 6229 * If a Phys Disk was deleted, RAID info needs to be 6230 * updated to reflect the new topology. 6231 */ 6232 (void) mptsas_get_raid_info(mpt); 6233 6234 /* 6235 * Get sas device page 0 by DevHandle to make sure if 6236 * SSP/SATA end device exist. 6237 */ 6238 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE & 6239 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | 6240 topo_node->devhdl; 6241 6242 rval = mptsas_get_target_device_info(mpt, page_address, 6243 &devhdl, &ptgt); 6244 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) { 6245 mptsas_log(mpt, CE_NOTE, 6246 "mptsas_handle_topo_change: target %d is " 6247 "not a SAS/SATA device. \n", 6248 topo_node->devhdl); 6249 } else if (rval == DEV_INFO_FAIL_ALLOC) { 6250 mptsas_log(mpt, CE_NOTE, 6251 "mptsas_handle_topo_change: could not " 6252 "allocate memory. \n"); 6253 } 6254 /* 6255 * If rval is DEV_INFO_PHYS_DISK than there is nothing 6256 * else to do, just leave. 6257 */ 6258 if (rval != DEV_INFO_SUCCESS) { 6259 return; 6260 } 6261 } 6262 6263 ASSERT(ptgt->m_devhdl == topo_node->devhdl); 6264 6265 mutex_exit(&mpt->m_mutex); 6266 flags = topo_node->flags; 6267 6268 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) { 6269 phymask = ptgt->m_phymask; 6270 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP); 6271 (void) sprintf(phy_mask_name, "%x", phymask); 6272 parent = scsi_hba_iport_find(mpt->m_dip, 6273 phy_mask_name); 6274 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS); 6275 if (parent == NULL) { 6276 mptsas_log(mpt, CE_WARN, "Failed to find a " 6277 "iport for PD, should not happen!"); 6278 mutex_enter(&mpt->m_mutex); 6279 break; 6280 } 6281 } 6282 6283 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) { 6284 ndi_devi_enter(parent, &circ1); 6285 (void) mptsas_config_raid(parent, topo_node->devhdl, 6286 &lundip); 6287 ndi_devi_exit(parent, circ1); 6288 } else { 6289 /* 6290 * hold nexus for bus configure 6291 */ 6292 ndi_devi_enter(scsi_vhci_dip, &circ); 6293 ndi_devi_enter(parent, &circ1); 6294 rval = mptsas_config_target(parent, ptgt); 6295 /* 6296 * release nexus for bus configure 6297 */ 6298 ndi_devi_exit(parent, circ1); 6299 ndi_devi_exit(scsi_vhci_dip, circ); 6300 6301 /* 6302 * Add parent's props for SMHBA support 6303 */ 6304 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) { 6305 bzero(attached_wwnstr, 6306 sizeof (attached_wwnstr)); 6307 (void) sprintf(attached_wwnstr, "w%016"PRIx64, 6308 ptgt->m_sas_wwn); 6309 if (ddi_prop_update_string(DDI_DEV_T_NONE, 6310 parent, 6311 SCSI_ADDR_PROP_ATTACHED_PORT, 6312 attached_wwnstr) 6313 != DDI_PROP_SUCCESS) { 6314 (void) ddi_prop_remove(DDI_DEV_T_NONE, 6315 parent, 6316 SCSI_ADDR_PROP_ATTACHED_PORT); 6317 mptsas_log(mpt, CE_WARN, "Failed to" 6318 "attached-port props"); 6319 return; 6320 } 6321 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent, 6322 MPTSAS_NUM_PHYS, 1) != 6323 DDI_PROP_SUCCESS) { 6324 (void) ddi_prop_remove(DDI_DEV_T_NONE, 6325 parent, MPTSAS_NUM_PHYS); 6326 mptsas_log(mpt, CE_WARN, "Failed to" 6327 " create num-phys props"); 6328 return; 6329 } 6330 6331 /* 6332 * Update PHY info for smhba 6333 */ 6334 mutex_enter(&mpt->m_mutex); 6335 if (mptsas_smhba_phy_init(mpt)) { 6336 mutex_exit(&mpt->m_mutex); 6337 mptsas_log(mpt, CE_WARN, "mptsas phy" 6338 " update failed"); 6339 return; 6340 } 6341 mutex_exit(&mpt->m_mutex); 6342 mptsas_smhba_set_phy_props(mpt, 6343 ddi_get_name_addr(parent), parent, 6344 1, &attached_devhdl); 6345 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent, 6346 MPTSAS_VIRTUAL_PORT, 0) != 6347 DDI_PROP_SUCCESS) { 6348 (void) ddi_prop_remove(DDI_DEV_T_NONE, 6349 parent, MPTSAS_VIRTUAL_PORT); 6350 mptsas_log(mpt, CE_WARN, 6351 "mptsas virtual-port" 6352 "port prop update failed"); 6353 return; 6354 } 6355 } 6356 } 6357 mutex_enter(&mpt->m_mutex); 6358 6359 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, " 6360 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl, 6361 ptgt->m_phymask)); 6362 break; 6363 } 6364 case MPTSAS_DR_EVENT_OFFLINE_TARGET: 6365 { 6366 mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl; 6367 devhdl = topo_node->devhdl; 6368 ptgt = mptsas_search_by_devhdl(tgttbl, devhdl); 6369 if (ptgt == NULL) 6370 break; 6371 6372 sas_wwn = ptgt->m_sas_wwn; 6373 phy = ptgt->m_phynum; 6374 6375 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 6376 6377 if (sas_wwn) { 6378 (void) sprintf(addr, "w%016"PRIx64, sas_wwn); 6379 } else { 6380 (void) sprintf(addr, "p%x", phy); 6381 } 6382 ASSERT(ptgt->m_devhdl == devhdl); 6383 6384 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) || 6385 (topo_node->flags == 6386 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) { 6387 /* 6388 * Get latest RAID info if RAID volume status changes 6389 * or Phys Disk status changes 6390 */ 6391 (void) mptsas_get_raid_info(mpt); 6392 } 6393 /* 6394 * Abort all outstanding command on the device 6395 */ 6396 rval = mptsas_do_scsi_reset(mpt, devhdl); 6397 if (rval) { 6398 NDBG20(("mptsas%d handle_topo_change to reset target " 6399 "before offline devhdl:%x, phymask:%x, rval:%x", 6400 mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask, 6401 rval)); 6402 } 6403 6404 mutex_exit(&mpt->m_mutex); 6405 6406 ndi_devi_enter(scsi_vhci_dip, &circ); 6407 ndi_devi_enter(parent, &circ1); 6408 rval = mptsas_offline_target(parent, addr); 6409 ndi_devi_exit(parent, circ1); 6410 ndi_devi_exit(scsi_vhci_dip, circ); 6411 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, " 6412 "phymask:%x, rval:%x", mpt->m_instance, 6413 ptgt->m_devhdl, ptgt->m_phymask, rval)); 6414 6415 kmem_free(addr, SCSI_MAXNAMELEN); 6416 6417 /* 6418 * Clear parent's props for SMHBA support 6419 */ 6420 flags = topo_node->flags; 6421 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) { 6422 bzero(attached_wwnstr, sizeof (attached_wwnstr)); 6423 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent, 6424 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) != 6425 DDI_PROP_SUCCESS) { 6426 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent, 6427 SCSI_ADDR_PROP_ATTACHED_PORT); 6428 mptsas_log(mpt, CE_WARN, "mptsas attached port " 6429 "prop update failed"); 6430 break; 6431 } 6432 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent, 6433 MPTSAS_NUM_PHYS, 0) != 6434 DDI_PROP_SUCCESS) { 6435 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent, 6436 MPTSAS_NUM_PHYS); 6437 mptsas_log(mpt, CE_WARN, "mptsas num phys " 6438 "prop update failed"); 6439 break; 6440 } 6441 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent, 6442 MPTSAS_VIRTUAL_PORT, 1) != 6443 DDI_PROP_SUCCESS) { 6444 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent, 6445 MPTSAS_VIRTUAL_PORT); 6446 mptsas_log(mpt, CE_WARN, "mptsas virtual port " 6447 "prop update failed"); 6448 break; 6449 } 6450 } 6451 6452 mutex_enter(&mpt->m_mutex); 6453 if (rval == DDI_SUCCESS) { 6454 mptsas_tgt_free(&mpt->m_active->m_tgttbl, 6455 ptgt->m_sas_wwn, ptgt->m_phymask); 6456 ptgt = NULL; 6457 } else { 6458 /* 6459 * clean DR_INTRANSITION flag to allow I/O down to 6460 * PHCI driver since failover finished. 6461 * Invalidate the devhdl 6462 */ 6463 mutex_enter(&ptgt->m_tgt_intr_mutex); 6464 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL; 6465 ptgt->m_tgt_unconfigured = 0; 6466 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE; 6467 mutex_exit(&ptgt->m_tgt_intr_mutex); 6468 } 6469 6470 /* 6471 * Send SAS IO Unit Control to free the dev handle 6472 */ 6473 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) || 6474 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) { 6475 rval = mptsas_free_devhdl(mpt, devhdl); 6476 6477 NDBG20(("mptsas%d handle_topo_change to remove " 6478 "devhdl:%x, rval:%x", mpt->m_instance, devhdl, 6479 rval)); 6480 } 6481 6482 break; 6483 } 6484 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE: 6485 { 6486 devhdl = topo_node->devhdl; 6487 /* 6488 * If this is the remove handle event, do a reset first. 6489 */ 6490 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) { 6491 rval = mptsas_do_scsi_reset(mpt, devhdl); 6492 if (rval) { 6493 NDBG20(("mpt%d reset target before remove " 6494 "devhdl:%x, rval:%x", mpt->m_instance, 6495 devhdl, rval)); 6496 } 6497 } 6498 6499 /* 6500 * Send SAS IO Unit Control to free the dev handle 6501 */ 6502 rval = mptsas_free_devhdl(mpt, devhdl); 6503 NDBG20(("mptsas%d handle_topo_change to remove " 6504 "devhdl:%x, rval:%x", mpt->m_instance, devhdl, 6505 rval)); 6506 break; 6507 } 6508 case MPTSAS_DR_EVENT_RECONFIG_SMP: 6509 { 6510 mptsas_smp_t smp; 6511 dev_info_t *smpdip; 6512 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl; 6513 6514 devhdl = topo_node->devhdl; 6515 6516 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL & 6517 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl; 6518 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp); 6519 if (rval != DDI_SUCCESS) { 6520 mptsas_log(mpt, CE_WARN, "failed to online smp, " 6521 "handle %x", devhdl); 6522 return; 6523 } 6524 6525 psmp = mptsas_smp_alloc(smptbl, &smp); 6526 if (psmp == NULL) { 6527 return; 6528 } 6529 6530 mutex_exit(&mpt->m_mutex); 6531 ndi_devi_enter(parent, &circ1); 6532 (void) mptsas_online_smp(parent, psmp, &smpdip); 6533 ndi_devi_exit(parent, circ1); 6534 6535 mutex_enter(&mpt->m_mutex); 6536 break; 6537 } 6538 case MPTSAS_DR_EVENT_OFFLINE_SMP: 6539 { 6540 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl; 6541 devhdl = topo_node->devhdl; 6542 uint32_t dev_info; 6543 6544 psmp = mptsas_search_by_devhdl(smptbl, devhdl); 6545 if (psmp == NULL) 6546 break; 6547 /* 6548 * The mptsas_smp_t data is released only if the dip is offlined 6549 * successfully. 6550 */ 6551 mutex_exit(&mpt->m_mutex); 6552 6553 ndi_devi_enter(parent, &circ1); 6554 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE); 6555 ndi_devi_exit(parent, circ1); 6556 6557 dev_info = psmp->m_deviceinfo; 6558 if ((dev_info & DEVINFO_DIRECT_ATTACHED) == 6559 DEVINFO_DIRECT_ATTACHED) { 6560 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent, 6561 MPTSAS_VIRTUAL_PORT, 1) != 6562 DDI_PROP_SUCCESS) { 6563 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent, 6564 MPTSAS_VIRTUAL_PORT); 6565 mptsas_log(mpt, CE_WARN, "mptsas virtual port " 6566 "prop update failed"); 6567 return; 6568 } 6569 /* 6570 * Check whether the smp connected to the iport, 6571 */ 6572 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent, 6573 MPTSAS_NUM_PHYS, 0) != 6574 DDI_PROP_SUCCESS) { 6575 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent, 6576 MPTSAS_NUM_PHYS); 6577 mptsas_log(mpt, CE_WARN, "mptsas num phys" 6578 "prop update failed"); 6579 return; 6580 } 6581 /* 6582 * Clear parent's attached-port props 6583 */ 6584 bzero(attached_wwnstr, sizeof (attached_wwnstr)); 6585 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent, 6586 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) != 6587 DDI_PROP_SUCCESS) { 6588 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent, 6589 SCSI_ADDR_PROP_ATTACHED_PORT); 6590 mptsas_log(mpt, CE_WARN, "mptsas attached port " 6591 "prop update failed"); 6592 return; 6593 } 6594 } 6595 6596 mutex_enter(&mpt->m_mutex); 6597 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, " 6598 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval)); 6599 if (rval == DDI_SUCCESS) { 6600 mptsas_smp_free(smptbl, psmp->m_sasaddr, 6601 psmp->m_phymask); 6602 } else { 6603 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL; 6604 } 6605 6606 bzero(attached_wwnstr, sizeof (attached_wwnstr)); 6607 6608 break; 6609 } 6610 default: 6611 return; 6612 } 6613 } 6614 6615 /* 6616 * Record the event if its type is enabled in mpt instance by ioctl. 6617 */ 6618 static void 6619 mptsas_record_event(void *args) 6620 { 6621 m_replyh_arg_t *replyh_arg; 6622 pMpi2EventNotificationReply_t eventreply; 6623 uint32_t event, rfm; 6624 mptsas_t *mpt; 6625 int i, j; 6626 uint16_t event_data_len; 6627 boolean_t sendAEN = FALSE; 6628 6629 replyh_arg = (m_replyh_arg_t *)args; 6630 rfm = replyh_arg->rfm; 6631 mpt = replyh_arg->mpt; 6632 6633 eventreply = (pMpi2EventNotificationReply_t) 6634 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr)); 6635 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event); 6636 6637 6638 /* 6639 * Generate a system event to let anyone who cares know that a 6640 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the 6641 * event mask is set to. 6642 */ 6643 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { 6644 sendAEN = TRUE; 6645 } 6646 6647 /* 6648 * Record the event only if it is not masked. Determine which dword 6649 * and bit of event mask to test. 6650 */ 6651 i = (uint8_t)(event / 32); 6652 j = (uint8_t)(event % 32); 6653 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) { 6654 i = mpt->m_event_index; 6655 mpt->m_events[i].Type = event; 6656 mpt->m_events[i].Number = ++mpt->m_event_number; 6657 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4); 6658 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl, 6659 &eventreply->EventDataLength); 6660 6661 if (event_data_len > 0) { 6662 /* 6663 * Limit data to size in m_event entry 6664 */ 6665 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) { 6666 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH; 6667 } 6668 for (j = 0; j < event_data_len; j++) { 6669 mpt->m_events[i].Data[j] = 6670 ddi_get32(mpt->m_acc_reply_frame_hdl, 6671 &(eventreply->EventData[j])); 6672 } 6673 6674 /* 6675 * check for index wrap-around 6676 */ 6677 if (++i == MPTSAS_EVENT_QUEUE_SIZE) { 6678 i = 0; 6679 } 6680 mpt->m_event_index = (uint8_t)i; 6681 6682 /* 6683 * Set flag to send the event. 6684 */ 6685 sendAEN = TRUE; 6686 } 6687 } 6688 6689 /* 6690 * Generate a system event if flag is set to let anyone who cares know 6691 * that an event has occurred. 6692 */ 6693 if (sendAEN) { 6694 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS", 6695 "SAS", NULL, NULL, DDI_NOSLEEP); 6696 } 6697 } 6698 6699 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS 6700 /* 6701 * handle sync events from ioc in interrupt 6702 * return value: 6703 * DDI_SUCCESS: The event is handled by this func 6704 * DDI_FAILURE: Event is not handled 6705 */ 6706 static int 6707 mptsas_handle_event_sync(void *args) 6708 { 6709 m_replyh_arg_t *replyh_arg; 6710 pMpi2EventNotificationReply_t eventreply; 6711 uint32_t event, rfm; 6712 mptsas_t *mpt; 6713 uint_t iocstatus; 6714 6715 replyh_arg = (m_replyh_arg_t *)args; 6716 rfm = replyh_arg->rfm; 6717 mpt = replyh_arg->mpt; 6718 6719 ASSERT(mutex_owned(&mpt->m_mutex)); 6720 6721 eventreply = (pMpi2EventNotificationReply_t) 6722 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr)); 6723 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event); 6724 6725 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl, 6726 &eventreply->IOCStatus)) { 6727 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 6728 mptsas_log(mpt, CE_WARN, 6729 "!mptsas_handle_event_sync: IOCStatus=0x%x, " 6730 "IOCLogInfo=0x%x", iocstatus, 6731 ddi_get32(mpt->m_acc_reply_frame_hdl, 6732 &eventreply->IOCLogInfo)); 6733 } else { 6734 mptsas_log(mpt, CE_WARN, 6735 "mptsas_handle_event_sync: IOCStatus=0x%x, " 6736 "IOCLogInfo=0x%x", iocstatus, 6737 ddi_get32(mpt->m_acc_reply_frame_hdl, 6738 &eventreply->IOCLogInfo)); 6739 } 6740 } 6741 6742 /* 6743 * figure out what kind of event we got and handle accordingly 6744 */ 6745 switch (event) { 6746 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 6747 { 6748 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list; 6749 uint8_t num_entries, expstatus, phy; 6750 uint8_t phystatus, physport, state, i; 6751 uint8_t start_phy_num, link_rate; 6752 uint16_t dev_handle, reason_code; 6753 uint16_t enc_handle, expd_handle; 6754 char string[80], curr[80], prev[80]; 6755 mptsas_topo_change_list_t *topo_head = NULL; 6756 mptsas_topo_change_list_t *topo_tail = NULL; 6757 mptsas_topo_change_list_t *topo_node = NULL; 6758 mptsas_target_t *ptgt; 6759 mptsas_smp_t *psmp; 6760 mptsas_hash_table_t *tgttbl, *smptbl; 6761 uint8_t flags = 0, exp_flag; 6762 smhba_info_t *pSmhba = NULL; 6763 6764 NDBG20(("mptsas_handle_event_sync: SAS topology change")); 6765 6766 tgttbl = &mpt->m_active->m_tgttbl; 6767 smptbl = &mpt->m_active->m_smptbl; 6768 6769 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t) 6770 eventreply->EventData; 6771 6772 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, 6773 &sas_topo_change_list->EnclosureHandle); 6774 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, 6775 &sas_topo_change_list->ExpanderDevHandle); 6776 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl, 6777 &sas_topo_change_list->NumEntries); 6778 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl, 6779 &sas_topo_change_list->StartPhyNum); 6780 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl, 6781 &sas_topo_change_list->ExpStatus); 6782 physport = ddi_get8(mpt->m_acc_reply_frame_hdl, 6783 &sas_topo_change_list->PhysicalPort); 6784 6785 string[0] = 0; 6786 if (expd_handle) { 6787 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED; 6788 switch (expstatus) { 6789 case MPI2_EVENT_SAS_TOPO_ES_ADDED: 6790 (void) sprintf(string, " added"); 6791 /* 6792 * New expander device added 6793 */ 6794 mpt->m_port_chng = 1; 6795 topo_node = kmem_zalloc( 6796 sizeof (mptsas_topo_change_list_t), 6797 KM_SLEEP); 6798 topo_node->mpt = mpt; 6799 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP; 6800 topo_node->un.physport = physport; 6801 topo_node->devhdl = expd_handle; 6802 topo_node->flags = flags; 6803 topo_node->object = NULL; 6804 if (topo_head == NULL) { 6805 topo_head = topo_tail = topo_node; 6806 } else { 6807 topo_tail->next = topo_node; 6808 topo_tail = topo_node; 6809 } 6810 break; 6811 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 6812 (void) sprintf(string, " not responding, " 6813 "removed"); 6814 psmp = mptsas_search_by_devhdl(smptbl, 6815 expd_handle); 6816 if (psmp == NULL) 6817 break; 6818 6819 topo_node = kmem_zalloc( 6820 sizeof (mptsas_topo_change_list_t), 6821 KM_SLEEP); 6822 topo_node->mpt = mpt; 6823 topo_node->un.phymask = psmp->m_phymask; 6824 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP; 6825 topo_node->devhdl = expd_handle; 6826 topo_node->flags = flags; 6827 topo_node->object = NULL; 6828 if (topo_head == NULL) { 6829 topo_head = topo_tail = topo_node; 6830 } else { 6831 topo_tail->next = topo_node; 6832 topo_tail = topo_node; 6833 } 6834 break; 6835 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: 6836 break; 6837 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 6838 (void) sprintf(string, " not responding, " 6839 "delaying removal"); 6840 break; 6841 default: 6842 break; 6843 } 6844 } else { 6845 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE; 6846 } 6847 6848 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n", 6849 enc_handle, expd_handle, string)); 6850 for (i = 0; i < num_entries; i++) { 6851 phy = i + start_phy_num; 6852 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl, 6853 &sas_topo_change_list->PHY[i].PhyStatus); 6854 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, 6855 &sas_topo_change_list->PHY[i].AttachedDevHandle); 6856 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK; 6857 /* 6858 * Filter out processing of Phy Vacant Status unless 6859 * the reason code is "Not Responding". Process all 6860 * other combinations of Phy Status and Reason Codes. 6861 */ 6862 if ((phystatus & 6863 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && 6864 (reason_code != 6865 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) { 6866 continue; 6867 } 6868 curr[0] = 0; 6869 prev[0] = 0; 6870 string[0] = 0; 6871 switch (reason_code) { 6872 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 6873 { 6874 NDBG20(("mptsas%d phy %d physical_port %d " 6875 "dev_handle %d added", mpt->m_instance, phy, 6876 physport, dev_handle)); 6877 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl, 6878 &sas_topo_change_list->PHY[i].LinkRate); 6879 state = (link_rate & 6880 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >> 6881 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT; 6882 switch (state) { 6883 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED: 6884 (void) sprintf(curr, "is disabled"); 6885 break; 6886 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED: 6887 (void) sprintf(curr, "is offline, " 6888 "failed speed negotiation"); 6889 break; 6890 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE: 6891 (void) sprintf(curr, "SATA OOB " 6892 "complete"); 6893 break; 6894 case SMP_RESET_IN_PROGRESS: 6895 (void) sprintf(curr, "SMP reset in " 6896 "progress"); 6897 break; 6898 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5: 6899 (void) sprintf(curr, "is online at " 6900 "1.5 Gbps"); 6901 break; 6902 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0: 6903 (void) sprintf(curr, "is online at 3.0 " 6904 "Gbps"); 6905 break; 6906 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0: 6907 (void) sprintf(curr, "is online at 6.0 " 6908 "Gbps"); 6909 break; 6910 default: 6911 (void) sprintf(curr, "state is " 6912 "unknown"); 6913 break; 6914 } 6915 /* 6916 * New target device added into the system. 6917 * Set association flag according to if an 6918 * expander is used or not. 6919 */ 6920 exp_flag = 6921 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE; 6922 if (flags == 6923 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) { 6924 flags = exp_flag; 6925 } 6926 topo_node = kmem_zalloc( 6927 sizeof (mptsas_topo_change_list_t), 6928 KM_SLEEP); 6929 topo_node->mpt = mpt; 6930 topo_node->event = 6931 MPTSAS_DR_EVENT_RECONFIG_TARGET; 6932 if (expd_handle == 0) { 6933 /* 6934 * Per MPI 2, if expander dev handle 6935 * is 0, it's a directly attached 6936 * device. So driver use PHY to decide 6937 * which iport is associated 6938 */ 6939 physport = phy; 6940 mpt->m_port_chng = 1; 6941 } 6942 topo_node->un.physport = physport; 6943 topo_node->devhdl = dev_handle; 6944 topo_node->flags = flags; 6945 topo_node->object = NULL; 6946 if (topo_head == NULL) { 6947 topo_head = topo_tail = topo_node; 6948 } else { 6949 topo_tail->next = topo_node; 6950 topo_tail = topo_node; 6951 } 6952 break; 6953 } 6954 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 6955 { 6956 NDBG20(("mptsas%d phy %d physical_port %d " 6957 "dev_handle %d removed", mpt->m_instance, 6958 phy, physport, dev_handle)); 6959 /* 6960 * Set association flag according to if an 6961 * expander is used or not. 6962 */ 6963 exp_flag = 6964 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE; 6965 if (flags == 6966 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) { 6967 flags = exp_flag; 6968 } 6969 /* 6970 * Target device is removed from the system 6971 * Before the device is really offline from 6972 * from system. 6973 */ 6974 ptgt = mptsas_search_by_devhdl(tgttbl, 6975 dev_handle); 6976 /* 6977 * If ptgt is NULL here, it means that the 6978 * DevHandle is not in the hash table. This is 6979 * reasonable sometimes. For example, if a 6980 * disk was pulled, then added, then pulled 6981 * again, the disk will not have been put into 6982 * the hash table because the add event will 6983 * have an invalid phymask. BUT, this does not 6984 * mean that the DevHandle is invalid. The 6985 * controller will still have a valid DevHandle 6986 * that must be removed. To do this, use the 6987 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event. 6988 */ 6989 if (ptgt == NULL) { 6990 topo_node = kmem_zalloc( 6991 sizeof (mptsas_topo_change_list_t), 6992 KM_SLEEP); 6993 topo_node->mpt = mpt; 6994 topo_node->un.phymask = 0; 6995 topo_node->event = 6996 MPTSAS_TOPO_FLAG_REMOVE_HANDLE; 6997 topo_node->devhdl = dev_handle; 6998 topo_node->flags = flags; 6999 topo_node->object = NULL; 7000 if (topo_head == NULL) { 7001 topo_head = topo_tail = 7002 topo_node; 7003 } else { 7004 topo_tail->next = topo_node; 7005 topo_tail = topo_node; 7006 } 7007 break; 7008 } 7009 7010 /* 7011 * Update DR flag immediately avoid I/O failure 7012 * before failover finish. Pay attention to the 7013 * mutex protect, we need grab the per target 7014 * mutex during set m_dr_flag because the 7015 * m_mutex would not be held all the time in 7016 * mptsas_scsi_start(). 7017 */ 7018 mutex_enter(&ptgt->m_tgt_intr_mutex); 7019 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION; 7020 mutex_exit(&ptgt->m_tgt_intr_mutex); 7021 7022 topo_node = kmem_zalloc( 7023 sizeof (mptsas_topo_change_list_t), 7024 KM_SLEEP); 7025 topo_node->mpt = mpt; 7026 topo_node->un.phymask = ptgt->m_phymask; 7027 topo_node->event = 7028 MPTSAS_DR_EVENT_OFFLINE_TARGET; 7029 topo_node->devhdl = dev_handle; 7030 topo_node->flags = flags; 7031 topo_node->object = NULL; 7032 if (topo_head == NULL) { 7033 topo_head = topo_tail = topo_node; 7034 } else { 7035 topo_tail->next = topo_node; 7036 topo_tail = topo_node; 7037 } 7038 break; 7039 } 7040 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7041 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl, 7042 &sas_topo_change_list->PHY[i].LinkRate); 7043 state = (link_rate & 7044 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >> 7045 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT; 7046 pSmhba = &mpt->m_phy_info[i].smhba_info; 7047 pSmhba->negotiated_link_rate = state; 7048 switch (state) { 7049 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED: 7050 (void) sprintf(curr, "is disabled"); 7051 mptsas_smhba_log_sysevent(mpt, 7052 ESC_SAS_PHY_EVENT, 7053 SAS_PHY_REMOVE, 7054 &mpt->m_phy_info[i].smhba_info); 7055 mpt->m_phy_info[i].smhba_info. 7056 negotiated_link_rate 7057 = 0x1; 7058 break; 7059 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED: 7060 (void) sprintf(curr, "is offline, " 7061 "failed speed negotiation"); 7062 mptsas_smhba_log_sysevent(mpt, 7063 ESC_SAS_PHY_EVENT, 7064 SAS_PHY_OFFLINE, 7065 &mpt->m_phy_info[i].smhba_info); 7066 break; 7067 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE: 7068 (void) sprintf(curr, "SATA OOB " 7069 "complete"); 7070 break; 7071 case SMP_RESET_IN_PROGRESS: 7072 (void) sprintf(curr, "SMP reset in " 7073 "progress"); 7074 break; 7075 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5: 7076 (void) sprintf(curr, "is online at " 7077 "1.5 Gbps"); 7078 if ((expd_handle == 0) && 7079 (enc_handle == 1)) { 7080 mpt->m_port_chng = 1; 7081 } 7082 mptsas_smhba_log_sysevent(mpt, 7083 ESC_SAS_PHY_EVENT, 7084 SAS_PHY_ONLINE, 7085 &mpt->m_phy_info[i].smhba_info); 7086 break; 7087 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0: 7088 (void) sprintf(curr, "is online at 3.0 " 7089 "Gbps"); 7090 if ((expd_handle == 0) && 7091 (enc_handle == 1)) { 7092 mpt->m_port_chng = 1; 7093 } 7094 mptsas_smhba_log_sysevent(mpt, 7095 ESC_SAS_PHY_EVENT, 7096 SAS_PHY_ONLINE, 7097 &mpt->m_phy_info[i].smhba_info); 7098 break; 7099 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0: 7100 (void) sprintf(curr, "is online at " 7101 "6.0 Gbps"); 7102 if ((expd_handle == 0) && 7103 (enc_handle == 1)) { 7104 mpt->m_port_chng = 1; 7105 } 7106 mptsas_smhba_log_sysevent(mpt, 7107 ESC_SAS_PHY_EVENT, 7108 SAS_PHY_ONLINE, 7109 &mpt->m_phy_info[i].smhba_info); 7110 break; 7111 default: 7112 (void) sprintf(curr, "state is " 7113 "unknown"); 7114 break; 7115 } 7116 7117 state = (link_rate & 7118 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >> 7119 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT; 7120 switch (state) { 7121 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED: 7122 (void) sprintf(prev, ", was disabled"); 7123 break; 7124 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED: 7125 (void) sprintf(prev, ", was offline, " 7126 "failed speed negotiation"); 7127 break; 7128 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE: 7129 (void) sprintf(prev, ", was SATA OOB " 7130 "complete"); 7131 break; 7132 case SMP_RESET_IN_PROGRESS: 7133 (void) sprintf(prev, ", was SMP reset " 7134 "in progress"); 7135 break; 7136 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5: 7137 (void) sprintf(prev, ", was online at " 7138 "1.5 Gbps"); 7139 break; 7140 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0: 7141 (void) sprintf(prev, ", was online at " 7142 "3.0 Gbps"); 7143 break; 7144 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0: 7145 (void) sprintf(prev, ", was online at " 7146 "6.0 Gbps"); 7147 break; 7148 default: 7149 break; 7150 } 7151 (void) sprintf(&string[strlen(string)], "link " 7152 "changed, "); 7153 break; 7154 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: 7155 continue; 7156 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: 7157 (void) sprintf(&string[strlen(string)], 7158 "target not responding, delaying " 7159 "removal"); 7160 break; 7161 } 7162 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n", 7163 mpt->m_instance, phy, dev_handle, string, curr, 7164 prev)); 7165 } 7166 if (topo_head != NULL) { 7167 /* 7168 * Launch DR taskq to handle topology change 7169 */ 7170 if ((ddi_taskq_dispatch(mpt->m_dr_taskq, 7171 mptsas_handle_dr, (void *)topo_head, 7172 DDI_NOSLEEP)) != DDI_SUCCESS) { 7173 mptsas_log(mpt, CE_NOTE, "mptsas start taskq " 7174 "for handle SAS DR event failed. \n"); 7175 } 7176 } 7177 break; 7178 } 7179 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 7180 { 7181 Mpi2EventDataIrConfigChangeList_t *irChangeList; 7182 mptsas_topo_change_list_t *topo_head = NULL; 7183 mptsas_topo_change_list_t *topo_tail = NULL; 7184 mptsas_topo_change_list_t *topo_node = NULL; 7185 mptsas_target_t *ptgt; 7186 mptsas_hash_table_t *tgttbl; 7187 uint8_t num_entries, i, reason; 7188 uint16_t volhandle, diskhandle; 7189 7190 irChangeList = (pMpi2EventDataIrConfigChangeList_t) 7191 eventreply->EventData; 7192 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl, 7193 &irChangeList->NumElements); 7194 7195 tgttbl = &mpt->m_active->m_tgttbl; 7196 7197 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received", 7198 mpt->m_instance)); 7199 7200 for (i = 0; i < num_entries; i++) { 7201 reason = ddi_get8(mpt->m_acc_reply_frame_hdl, 7202 &irChangeList->ConfigElement[i].ReasonCode); 7203 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl, 7204 &irChangeList->ConfigElement[i].VolDevHandle); 7205 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl, 7206 &irChangeList->ConfigElement[i].PhysDiskDevHandle); 7207 7208 switch (reason) { 7209 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 7210 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 7211 { 7212 NDBG20(("mptsas %d volume added\n", 7213 mpt->m_instance)); 7214 7215 topo_node = kmem_zalloc( 7216 sizeof (mptsas_topo_change_list_t), 7217 KM_SLEEP); 7218 7219 topo_node->mpt = mpt; 7220 topo_node->event = 7221 MPTSAS_DR_EVENT_RECONFIG_TARGET; 7222 topo_node->un.physport = 0xff; 7223 topo_node->devhdl = volhandle; 7224 topo_node->flags = 7225 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED; 7226 topo_node->object = NULL; 7227 if (topo_head == NULL) { 7228 topo_head = topo_tail = topo_node; 7229 } else { 7230 topo_tail->next = topo_node; 7231 topo_tail = topo_node; 7232 } 7233 break; 7234 } 7235 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 7236 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 7237 { 7238 NDBG20(("mptsas %d volume deleted\n", 7239 mpt->m_instance)); 7240 ptgt = mptsas_search_by_devhdl(tgttbl, 7241 volhandle); 7242 if (ptgt == NULL) 7243 break; 7244 7245 /* 7246 * Clear any flags related to volume 7247 */ 7248 (void) mptsas_delete_volume(mpt, volhandle); 7249 7250 /* 7251 * Update DR flag immediately avoid I/O failure 7252 */ 7253 mutex_enter(&ptgt->m_tgt_intr_mutex); 7254 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION; 7255 mutex_exit(&ptgt->m_tgt_intr_mutex); 7256 7257 topo_node = kmem_zalloc( 7258 sizeof (mptsas_topo_change_list_t), 7259 KM_SLEEP); 7260 topo_node->mpt = mpt; 7261 topo_node->un.phymask = ptgt->m_phymask; 7262 topo_node->event = 7263 MPTSAS_DR_EVENT_OFFLINE_TARGET; 7264 topo_node->devhdl = volhandle; 7265 topo_node->flags = 7266 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED; 7267 topo_node->object = (void *)ptgt; 7268 if (topo_head == NULL) { 7269 topo_head = topo_tail = topo_node; 7270 } else { 7271 topo_tail->next = topo_node; 7272 topo_tail = topo_node; 7273 } 7274 break; 7275 } 7276 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 7277 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 7278 { 7279 ptgt = mptsas_search_by_devhdl(tgttbl, 7280 diskhandle); 7281 if (ptgt == NULL) 7282 break; 7283 7284 /* 7285 * Update DR flag immediately avoid I/O failure 7286 */ 7287 mutex_enter(&ptgt->m_tgt_intr_mutex); 7288 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION; 7289 mutex_exit(&ptgt->m_tgt_intr_mutex); 7290 7291 topo_node = kmem_zalloc( 7292 sizeof (mptsas_topo_change_list_t), 7293 KM_SLEEP); 7294 topo_node->mpt = mpt; 7295 topo_node->un.phymask = ptgt->m_phymask; 7296 topo_node->event = 7297 MPTSAS_DR_EVENT_OFFLINE_TARGET; 7298 topo_node->devhdl = diskhandle; 7299 topo_node->flags = 7300 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED; 7301 topo_node->object = (void *)ptgt; 7302 if (topo_head == NULL) { 7303 topo_head = topo_tail = topo_node; 7304 } else { 7305 topo_tail->next = topo_node; 7306 topo_tail = topo_node; 7307 } 7308 break; 7309 } 7310 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 7311 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 7312 { 7313 /* 7314 * The physical drive is released by a IR 7315 * volume. But we cannot get the the physport 7316 * or phynum from the event data, so we only 7317 * can get the physport/phynum after SAS 7318 * Device Page0 request for the devhdl. 7319 */ 7320 topo_node = kmem_zalloc( 7321 sizeof (mptsas_topo_change_list_t), 7322 KM_SLEEP); 7323 topo_node->mpt = mpt; 7324 topo_node->un.phymask = 0; 7325 topo_node->event = 7326 MPTSAS_DR_EVENT_RECONFIG_TARGET; 7327 topo_node->devhdl = diskhandle; 7328 topo_node->flags = 7329 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED; 7330 topo_node->object = NULL; 7331 mpt->m_port_chng = 1; 7332 if (topo_head == NULL) { 7333 topo_head = topo_tail = topo_node; 7334 } else { 7335 topo_tail->next = topo_node; 7336 topo_tail = topo_node; 7337 } 7338 break; 7339 } 7340 default: 7341 break; 7342 } 7343 } 7344 7345 if (topo_head != NULL) { 7346 /* 7347 * Launch DR taskq to handle topology change 7348 */ 7349 if ((ddi_taskq_dispatch(mpt->m_dr_taskq, 7350 mptsas_handle_dr, (void *)topo_head, 7351 DDI_NOSLEEP)) != DDI_SUCCESS) { 7352 mptsas_log(mpt, CE_NOTE, "mptsas start taskq " 7353 "for handle SAS DR event failed. \n"); 7354 } 7355 } 7356 break; 7357 } 7358 default: 7359 return (DDI_FAILURE); 7360 } 7361 7362 return (DDI_SUCCESS); 7363 } 7364 7365 /* 7366 * handle events from ioc 7367 */ 7368 static void 7369 mptsas_handle_event(void *args) 7370 { 7371 m_replyh_arg_t *replyh_arg; 7372 pMpi2EventNotificationReply_t eventreply; 7373 uint32_t event, iocloginfo, rfm; 7374 uint32_t status; 7375 uint8_t port; 7376 mptsas_t *mpt; 7377 uint_t iocstatus; 7378 7379 replyh_arg = (m_replyh_arg_t *)args; 7380 rfm = replyh_arg->rfm; 7381 mpt = replyh_arg->mpt; 7382 7383 mutex_enter(&mpt->m_mutex); 7384 7385 eventreply = (pMpi2EventNotificationReply_t) 7386 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr)); 7387 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event); 7388 7389 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl, 7390 &eventreply->IOCStatus)) { 7391 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 7392 mptsas_log(mpt, CE_WARN, 7393 "!mptsas_handle_event: IOCStatus=0x%x, " 7394 "IOCLogInfo=0x%x", iocstatus, 7395 ddi_get32(mpt->m_acc_reply_frame_hdl, 7396 &eventreply->IOCLogInfo)); 7397 } else { 7398 mptsas_log(mpt, CE_WARN, 7399 "mptsas_handle_event: IOCStatus=0x%x, " 7400 "IOCLogInfo=0x%x", iocstatus, 7401 ddi_get32(mpt->m_acc_reply_frame_hdl, 7402 &eventreply->IOCLogInfo)); 7403 } 7404 } 7405 7406 /* 7407 * figure out what kind of event we got and handle accordingly 7408 */ 7409 switch (event) { 7410 case MPI2_EVENT_LOG_ENTRY_ADDED: 7411 break; 7412 case MPI2_EVENT_LOG_DATA: 7413 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl, 7414 &eventreply->IOCLogInfo); 7415 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance, 7416 iocloginfo)); 7417 break; 7418 case MPI2_EVENT_STATE_CHANGE: 7419 NDBG20(("mptsas%d state change.", mpt->m_instance)); 7420 break; 7421 case MPI2_EVENT_HARD_RESET_RECEIVED: 7422 NDBG20(("mptsas%d event change.", mpt->m_instance)); 7423 break; 7424 case MPI2_EVENT_SAS_DISCOVERY: 7425 { 7426 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery; 7427 char string[80]; 7428 uint8_t rc; 7429 7430 sasdiscovery = 7431 (pMpi2EventDataSasDiscovery_t)eventreply->EventData; 7432 7433 rc = ddi_get8(mpt->m_acc_reply_frame_hdl, 7434 &sasdiscovery->ReasonCode); 7435 port = ddi_get8(mpt->m_acc_reply_frame_hdl, 7436 &sasdiscovery->PhysicalPort); 7437 status = ddi_get32(mpt->m_acc_reply_frame_hdl, 7438 &sasdiscovery->DiscoveryStatus); 7439 7440 string[0] = 0; 7441 switch (rc) { 7442 case MPI2_EVENT_SAS_DISC_RC_STARTED: 7443 (void) sprintf(string, "STARTING"); 7444 break; 7445 case MPI2_EVENT_SAS_DISC_RC_COMPLETED: 7446 (void) sprintf(string, "COMPLETED"); 7447 break; 7448 default: 7449 (void) sprintf(string, "UNKNOWN"); 7450 break; 7451 } 7452 7453 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string, 7454 port, status)); 7455 7456 break; 7457 } 7458 case MPI2_EVENT_EVENT_CHANGE: 7459 NDBG20(("mptsas%d event change.", mpt->m_instance)); 7460 break; 7461 case MPI2_EVENT_TASK_SET_FULL: 7462 { 7463 pMpi2EventDataTaskSetFull_t taskfull; 7464 7465 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData; 7466 7467 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n", 7468 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl, 7469 &taskfull->CurrentDepth))); 7470 break; 7471 } 7472 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 7473 { 7474 /* 7475 * SAS TOPOLOGY CHANGE LIST Event has already been handled 7476 * in mptsas_handle_event_sync() of interrupt context 7477 */ 7478 break; 7479 } 7480 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 7481 { 7482 pMpi2EventDataSasEnclDevStatusChange_t encstatus; 7483 uint8_t rc; 7484 char string[80]; 7485 7486 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t) 7487 eventreply->EventData; 7488 7489 rc = ddi_get8(mpt->m_acc_reply_frame_hdl, 7490 &encstatus->ReasonCode); 7491 switch (rc) { 7492 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 7493 (void) sprintf(string, "added"); 7494 break; 7495 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 7496 (void) sprintf(string, ", not responding"); 7497 break; 7498 default: 7499 break; 7500 } 7501 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n", 7502 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl, 7503 &encstatus->EnclosureHandle), string)); 7504 break; 7505 } 7506 7507 /* 7508 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by 7509 * mptsas_handle_event_sync,in here just send ack message. 7510 */ 7511 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 7512 { 7513 pMpi2EventDataSasDeviceStatusChange_t statuschange; 7514 uint8_t rc; 7515 uint16_t devhdl; 7516 uint64_t wwn = 0; 7517 uint32_t wwn_lo, wwn_hi; 7518 7519 statuschange = (pMpi2EventDataSasDeviceStatusChange_t) 7520 eventreply->EventData; 7521 rc = ddi_get8(mpt->m_acc_reply_frame_hdl, 7522 &statuschange->ReasonCode); 7523 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl, 7524 (uint32_t *)(void *)&statuschange->SASAddress); 7525 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl, 7526 (uint32_t *)(void *)&statuschange->SASAddress + 1); 7527 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo; 7528 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl, 7529 &statuschange->DevHandle); 7530 7531 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64, 7532 wwn)); 7533 7534 switch (rc) { 7535 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 7536 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x", 7537 ddi_get8(mpt->m_acc_reply_frame_hdl, 7538 &statuschange->ASC), 7539 ddi_get8(mpt->m_acc_reply_frame_hdl, 7540 &statuschange->ASCQ))); 7541 break; 7542 7543 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 7544 NDBG20(("Device not supported")); 7545 break; 7546 7547 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 7548 NDBG20(("IOC internally generated the Target Reset " 7549 "for devhdl:%x", devhdl)); 7550 break; 7551 7552 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 7553 NDBG20(("IOC's internally generated Target Reset " 7554 "completed for devhdl:%x", devhdl)); 7555 break; 7556 7557 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 7558 NDBG20(("IOC internally generated Abort Task")); 7559 break; 7560 7561 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 7562 NDBG20(("IOC's internally generated Abort Task " 7563 "completed")); 7564 break; 7565 7566 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 7567 NDBG20(("IOC internally generated Abort Task Set")); 7568 break; 7569 7570 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 7571 NDBG20(("IOC internally generated Clear Task Set")); 7572 break; 7573 7574 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: 7575 NDBG20(("IOC internally generated Query Task")); 7576 break; 7577 7578 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: 7579 NDBG20(("Device sent an Asynchronous Notification")); 7580 break; 7581 7582 default: 7583 break; 7584 } 7585 break; 7586 } 7587 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 7588 { 7589 /* 7590 * IR TOPOLOGY CHANGE LIST Event has already been handled 7591 * in mpt_handle_event_sync() of interrupt context 7592 */ 7593 break; 7594 } 7595 case MPI2_EVENT_IR_OPERATION_STATUS: 7596 { 7597 Mpi2EventDataIrOperationStatus_t *irOpStatus; 7598 char reason_str[80]; 7599 uint8_t rc, percent; 7600 uint16_t handle; 7601 7602 irOpStatus = (pMpi2EventDataIrOperationStatus_t) 7603 eventreply->EventData; 7604 rc = ddi_get8(mpt->m_acc_reply_frame_hdl, 7605 &irOpStatus->RAIDOperation); 7606 percent = ddi_get8(mpt->m_acc_reply_frame_hdl, 7607 &irOpStatus->PercentComplete); 7608 handle = ddi_get16(mpt->m_acc_reply_frame_hdl, 7609 &irOpStatus->VolDevHandle); 7610 7611 switch (rc) { 7612 case MPI2_EVENT_IR_RAIDOP_RESYNC: 7613 (void) sprintf(reason_str, "resync"); 7614 break; 7615 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: 7616 (void) sprintf(reason_str, "online capacity " 7617 "expansion"); 7618 break; 7619 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: 7620 (void) sprintf(reason_str, "consistency check"); 7621 break; 7622 default: 7623 (void) sprintf(reason_str, "unknown reason %x", 7624 rc); 7625 } 7626 7627 NDBG20(("mptsas%d raid operational status: (%s)" 7628 "\thandle(0x%04x), percent complete(%d)\n", 7629 mpt->m_instance, reason_str, handle, percent)); 7630 break; 7631 } 7632 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 7633 { 7634 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast; 7635 uint8_t phy_num; 7636 uint8_t primitive; 7637 7638 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t) 7639 eventreply->EventData; 7640 7641 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl, 7642 &sas_broadcast->PhyNum); 7643 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl, 7644 &sas_broadcast->Primitive); 7645 7646 switch (primitive) { 7647 case MPI2_EVENT_PRIMITIVE_CHANGE: 7648 mptsas_smhba_log_sysevent(mpt, 7649 ESC_SAS_HBA_PORT_BROADCAST, 7650 SAS_PORT_BROADCAST_CHANGE, 7651 &mpt->m_phy_info[phy_num].smhba_info); 7652 break; 7653 case MPI2_EVENT_PRIMITIVE_SES: 7654 mptsas_smhba_log_sysevent(mpt, 7655 ESC_SAS_HBA_PORT_BROADCAST, 7656 SAS_PORT_BROADCAST_SES, 7657 &mpt->m_phy_info[phy_num].smhba_info); 7658 break; 7659 case MPI2_EVENT_PRIMITIVE_EXPANDER: 7660 mptsas_smhba_log_sysevent(mpt, 7661 ESC_SAS_HBA_PORT_BROADCAST, 7662 SAS_PORT_BROADCAST_D01_4, 7663 &mpt->m_phy_info[phy_num].smhba_info); 7664 break; 7665 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT: 7666 mptsas_smhba_log_sysevent(mpt, 7667 ESC_SAS_HBA_PORT_BROADCAST, 7668 SAS_PORT_BROADCAST_D04_7, 7669 &mpt->m_phy_info[phy_num].smhba_info); 7670 break; 7671 case MPI2_EVENT_PRIMITIVE_RESERVED3: 7672 mptsas_smhba_log_sysevent(mpt, 7673 ESC_SAS_HBA_PORT_BROADCAST, 7674 SAS_PORT_BROADCAST_D16_7, 7675 &mpt->m_phy_info[phy_num].smhba_info); 7676 break; 7677 case MPI2_EVENT_PRIMITIVE_RESERVED4: 7678 mptsas_smhba_log_sysevent(mpt, 7679 ESC_SAS_HBA_PORT_BROADCAST, 7680 SAS_PORT_BROADCAST_D29_7, 7681 &mpt->m_phy_info[phy_num].smhba_info); 7682 break; 7683 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED: 7684 mptsas_smhba_log_sysevent(mpt, 7685 ESC_SAS_HBA_PORT_BROADCAST, 7686 SAS_PORT_BROADCAST_D24_0, 7687 &mpt->m_phy_info[phy_num].smhba_info); 7688 break; 7689 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED: 7690 mptsas_smhba_log_sysevent(mpt, 7691 ESC_SAS_HBA_PORT_BROADCAST, 7692 SAS_PORT_BROADCAST_D27_4, 7693 &mpt->m_phy_info[phy_num].smhba_info); 7694 break; 7695 default: 7696 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE" 7697 " %x received", 7698 mpt->m_instance, primitive)); 7699 break; 7700 } 7701 NDBG20(("mptsas%d sas broadcast primitive: " 7702 "\tprimitive(0x%04x), phy(%d) complete\n", 7703 mpt->m_instance, primitive, phy_num)); 7704 break; 7705 } 7706 case MPI2_EVENT_IR_VOLUME: 7707 { 7708 Mpi2EventDataIrVolume_t *irVolume; 7709 uint16_t devhandle; 7710 uint32_t state; 7711 int config, vol; 7712 mptsas_slots_t *slots = mpt->m_active; 7713 uint8_t found = FALSE; 7714 7715 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData; 7716 state = ddi_get32(mpt->m_acc_reply_frame_hdl, 7717 &irVolume->NewValue); 7718 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl, 7719 &irVolume->VolDevHandle); 7720 7721 NDBG20(("EVENT_IR_VOLUME event is received")); 7722 7723 /* 7724 * Get latest RAID info and then find the DevHandle for this 7725 * event in the configuration. If the DevHandle is not found 7726 * just exit the event. 7727 */ 7728 (void) mptsas_get_raid_info(mpt); 7729 for (config = 0; (config < slots->m_num_raid_configs) && 7730 (!found); config++) { 7731 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) { 7732 if (slots->m_raidconfig[config].m_raidvol[vol]. 7733 m_raidhandle == devhandle) { 7734 found = TRUE; 7735 break; 7736 } 7737 } 7738 } 7739 if (!found) { 7740 break; 7741 } 7742 7743 switch (irVolume->ReasonCode) { 7744 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED: 7745 { 7746 uint32_t i; 7747 slots->m_raidconfig[config].m_raidvol[vol].m_settings = 7748 state; 7749 7750 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING; 7751 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed" 7752 ", auto-config of hot-swap drives is %s" 7753 ", write caching is %s" 7754 ", hot-spare pool mask is %02x\n", 7755 vol, state & 7756 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE 7757 ? "disabled" : "enabled", 7758 i == MPI2_RAIDVOL0_SETTING_UNCHANGED 7759 ? "controlled by member disks" : 7760 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING 7761 ? "disabled" : 7762 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING 7763 ? "enabled" : 7764 "incorrectly set", 7765 (state >> 16) & 0xff); 7766 break; 7767 } 7768 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED: 7769 { 7770 slots->m_raidconfig[config].m_raidvol[vol].m_state = 7771 (uint8_t)state; 7772 7773 mptsas_log(mpt, CE_NOTE, 7774 "Volume %d is now %s\n", vol, 7775 state == MPI2_RAID_VOL_STATE_OPTIMAL 7776 ? "optimal" : 7777 state == MPI2_RAID_VOL_STATE_DEGRADED 7778 ? "degraded" : 7779 state == MPI2_RAID_VOL_STATE_ONLINE 7780 ? "online" : 7781 state == MPI2_RAID_VOL_STATE_INITIALIZING 7782 ? "initializing" : 7783 state == MPI2_RAID_VOL_STATE_FAILED 7784 ? "failed" : 7785 state == MPI2_RAID_VOL_STATE_MISSING 7786 ? "missing" : 7787 "state unknown"); 7788 break; 7789 } 7790 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED: 7791 { 7792 slots->m_raidconfig[config].m_raidvol[vol]. 7793 m_statusflags = state; 7794 7795 mptsas_log(mpt, CE_NOTE, 7796 " Volume %d is now %s%s%s%s%s%s%s%s%s\n", 7797 vol, 7798 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED 7799 ? ", enabled" : ", disabled", 7800 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED 7801 ? ", quiesced" : "", 7802 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE 7803 ? ", inactive" : ", active", 7804 state & 7805 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL 7806 ? ", bad block table is full" : "", 7807 state & 7808 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS 7809 ? ", resync in progress" : "", 7810 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT 7811 ? ", background initialization in progress" : "", 7812 state & 7813 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION 7814 ? ", capacity expansion in progress" : "", 7815 state & 7816 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK 7817 ? ", consistency check in progress" : "", 7818 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB 7819 ? ", data scrub in progress" : ""); 7820 break; 7821 } 7822 default: 7823 break; 7824 } 7825 break; 7826 } 7827 case MPI2_EVENT_IR_PHYSICAL_DISK: 7828 { 7829 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk; 7830 uint16_t devhandle, enchandle, slot; 7831 uint32_t status, state; 7832 uint8_t physdisknum, reason; 7833 7834 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *) 7835 eventreply->EventData; 7836 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl, 7837 &irPhysDisk->PhysDiskNum); 7838 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl, 7839 &irPhysDisk->PhysDiskDevHandle); 7840 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl, 7841 &irPhysDisk->EnclosureHandle); 7842 slot = ddi_get16(mpt->m_acc_reply_frame_hdl, 7843 &irPhysDisk->Slot); 7844 state = ddi_get32(mpt->m_acc_reply_frame_hdl, 7845 &irPhysDisk->NewValue); 7846 reason = ddi_get8(mpt->m_acc_reply_frame_hdl, 7847 &irPhysDisk->ReasonCode); 7848 7849 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received")); 7850 7851 switch (reason) { 7852 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED: 7853 mptsas_log(mpt, CE_NOTE, 7854 " PhysDiskNum %d with DevHandle 0x%x in slot %d " 7855 "for enclosure with handle 0x%x is now in hot " 7856 "spare pool %d", 7857 physdisknum, devhandle, slot, enchandle, 7858 (state >> 16) & 0xff); 7859 break; 7860 7861 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED: 7862 status = state; 7863 mptsas_log(mpt, CE_NOTE, 7864 " PhysDiskNum %d with DevHandle 0x%x in slot %d " 7865 "for enclosure with handle 0x%x is now " 7866 "%s%s%s%s%s\n", physdisknum, devhandle, slot, 7867 enchandle, 7868 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME 7869 ? ", inactive" : ", active", 7870 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC 7871 ? ", out of sync" : "", 7872 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED 7873 ? ", quiesced" : "", 7874 status & 7875 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED 7876 ? ", write cache enabled" : "", 7877 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET 7878 ? ", capacity expansion target" : ""); 7879 break; 7880 7881 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED: 7882 mptsas_log(mpt, CE_NOTE, 7883 " PhysDiskNum %d with DevHandle 0x%x in slot %d " 7884 "for enclosure with handle 0x%x is now %s\n", 7885 physdisknum, devhandle, slot, enchandle, 7886 state == MPI2_RAID_PD_STATE_OPTIMAL 7887 ? "optimal" : 7888 state == MPI2_RAID_PD_STATE_REBUILDING 7889 ? "rebuilding" : 7890 state == MPI2_RAID_PD_STATE_DEGRADED 7891 ? "degraded" : 7892 state == MPI2_RAID_PD_STATE_HOT_SPARE 7893 ? "a hot spare" : 7894 state == MPI2_RAID_PD_STATE_ONLINE 7895 ? "online" : 7896 state == MPI2_RAID_PD_STATE_OFFLINE 7897 ? "offline" : 7898 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE 7899 ? "not compatible" : 7900 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED 7901 ? "not configured" : 7902 "state unknown"); 7903 break; 7904 } 7905 break; 7906 } 7907 default: 7908 NDBG20(("mptsas%d: unknown event %x received", 7909 mpt->m_instance, event)); 7910 break; 7911 } 7912 7913 /* 7914 * Return the reply frame to the free queue. 7915 */ 7916 ddi_put32(mpt->m_acc_free_queue_hdl, 7917 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm); 7918 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0, 7919 DDI_DMA_SYNC_FORDEV); 7920 if (++mpt->m_free_index == mpt->m_free_queue_depth) { 7921 mpt->m_free_index = 0; 7922 } 7923 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, 7924 mpt->m_free_index); 7925 mutex_exit(&mpt->m_mutex); 7926 } 7927 7928 /* 7929 * invoked from timeout() to restart qfull cmds with throttle == 0 7930 */ 7931 static void 7932 mptsas_restart_cmd(void *arg) 7933 { 7934 mptsas_t *mpt = arg; 7935 mptsas_target_t *ptgt = NULL; 7936 7937 mutex_enter(&mpt->m_mutex); 7938 7939 mpt->m_restart_cmd_timeid = 0; 7940 7941 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 7942 MPTSAS_HASH_FIRST); 7943 while (ptgt != NULL) { 7944 mutex_enter(&ptgt->m_tgt_intr_mutex); 7945 if (ptgt->m_reset_delay == 0) { 7946 if (ptgt->m_t_throttle == QFULL_THROTTLE) { 7947 mptsas_set_throttle(mpt, ptgt, 7948 MAX_THROTTLE); 7949 } 7950 } 7951 mutex_exit(&ptgt->m_tgt_intr_mutex); 7952 7953 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 7954 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 7955 } 7956 mptsas_restart_hba(mpt); 7957 mutex_exit(&mpt->m_mutex); 7958 } 7959 7960 /* 7961 * mptsas_remove_cmd0 is similar to mptsas_remove_cmd except that it is called 7962 * where m_intr_mutex has already been held. 7963 */ 7964 void 7965 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd) 7966 { 7967 ASSERT(mutex_owned(&mpt->m_mutex)); 7968 7969 /* 7970 * With new fine-grained lock mechanism, the outstanding cmd is only 7971 * linked to m_active before the dma is triggerred(MPTSAS_START_CMD) 7972 * to send it. that is, mptsas_save_cmd() doesn't link the outstanding 7973 * cmd now. So when mptsas_remove_cmd is called, a mptsas_save_cmd must 7974 * have been called, but the cmd may have not been linked. 7975 * For mptsas_remove_cmd0, the cmd must have been linked. 7976 * In order to keep the same semantic, we link the cmd to the 7977 * outstanding cmd list. 7978 */ 7979 mpt->m_active->m_slot[cmd->cmd_slot] = cmd; 7980 7981 mutex_enter(&mpt->m_intr_mutex); 7982 mptsas_remove_cmd0(mpt, cmd); 7983 mutex_exit(&mpt->m_intr_mutex); 7984 } 7985 7986 static inline void 7987 mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd) 7988 { 7989 int slot; 7990 mptsas_slots_t *slots = mpt->m_active; 7991 int t; 7992 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 7993 mptsas_slot_free_e_t *pe; 7994 7995 ASSERT(cmd != NULL); 7996 ASSERT(cmd->cmd_queued == FALSE); 7997 7998 /* 7999 * Task Management cmds are removed in their own routines. Also, 8000 * we don't want to modify timeout based on TM cmds. 8001 */ 8002 if (cmd->cmd_flags & CFLAG_TM_CMD) { 8003 return; 8004 } 8005 8006 t = Tgt(cmd); 8007 slot = cmd->cmd_slot; 8008 pe = mpt->m_slot_free_ae + slot - 1; 8009 ASSERT(cmd == slots->m_slot[slot]); 8010 ASSERT((slot > 0) && slot < (mpt->m_max_requests - 1)); 8011 8012 /* 8013 * remove the cmd. 8014 */ 8015 mutex_enter(&mpt->m_slot_freeq_pairp[pe->cpuid]. 8016 m_slot_releq.s.m_fq_mutex); 8017 NDBG31(("mptsas_remove_cmd0: removing cmd=0x%p", (void *)cmd)); 8018 slots->m_slot[slot] = NULL; 8019 ASSERT(pe->slot == slot); 8020 list_insert_tail(&mpt->m_slot_freeq_pairp[pe->cpuid]. 8021 m_slot_releq.s.m_fq_list, pe); 8022 mpt->m_slot_freeq_pairp[pe->cpuid].m_slot_releq.s.m_fq_n++; 8023 ASSERT(mpt->m_slot_freeq_pairp[pe->cpuid]. 8024 m_slot_releq.s.m_fq_n <= mpt->m_max_requests - 2); 8025 mutex_exit(&mpt->m_slot_freeq_pairp[pe->cpuid]. 8026 m_slot_releq.s.m_fq_mutex); 8027 8028 /* 8029 * only decrement per target ncmds if command 8030 * has a target associated with it. 8031 */ 8032 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) { 8033 mutex_enter(&ptgt->m_tgt_intr_mutex); 8034 ptgt->m_t_ncmds--; 8035 /* 8036 * reset throttle if we just ran an untagged command 8037 * to a tagged target 8038 */ 8039 if ((ptgt->m_t_ncmds == 0) && 8040 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) { 8041 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE); 8042 } 8043 mutex_exit(&ptgt->m_tgt_intr_mutex); 8044 } 8045 8046 /* 8047 * This is all we need to do for ioc commands. 8048 * The ioc cmds would never be handled in fastpath in ISR, so we make 8049 * sure the mptsas_return_to_pool() would always be called with 8050 * m_mutex protected. 8051 */ 8052 if (cmd->cmd_flags & CFLAG_CMDIOC) { 8053 ASSERT(mutex_owned(&mpt->m_mutex)); 8054 mptsas_return_to_pool(mpt, cmd); 8055 return; 8056 } 8057 8058 /* 8059 * Figure out what to set tag Q timeout for... 8060 * 8061 * Optimize: If we have duplicate's of same timeout 8062 * we're using, then we'll use it again until we run 8063 * out of duplicates. This should be the normal case 8064 * for block and raw I/O. 8065 * If no duplicates, we have to scan through tag que and 8066 * find the longest timeout value and use it. This is 8067 * going to take a while... 8068 * Add 1 to m_n_slots to account for TM request. 8069 */ 8070 mutex_enter(&ptgt->m_tgt_intr_mutex); 8071 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) { 8072 if (--(ptgt->m_dups) == 0) { 8073 if (ptgt->m_t_ncmds) { 8074 mptsas_cmd_t *ssp; 8075 uint_t n = 0; 8076 ushort_t nslots = (slots->m_n_slots + 1); 8077 ushort_t i; 8078 /* 8079 * This crude check assumes we don't do 8080 * this too often which seems reasonable 8081 * for block and raw I/O. 8082 */ 8083 for (i = 0; i < nslots; i++) { 8084 ssp = slots->m_slot[i]; 8085 if (ssp && (Tgt(ssp) == t) && 8086 (ssp->cmd_pkt->pkt_time > n)) { 8087 n = ssp->cmd_pkt->pkt_time; 8088 ptgt->m_dups = 1; 8089 } else if (ssp && (Tgt(ssp) == t) && 8090 (ssp->cmd_pkt->pkt_time == n)) { 8091 ptgt->m_dups++; 8092 } 8093 } 8094 ptgt->m_timebase = n; 8095 } else { 8096 ptgt->m_dups = 0; 8097 ptgt->m_timebase = 0; 8098 } 8099 } 8100 } 8101 ptgt->m_timeout = ptgt->m_timebase; 8102 8103 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]); 8104 mutex_exit(&ptgt->m_tgt_intr_mutex); 8105 } 8106 8107 /* 8108 * start a fresh request from the top of the device queue. 8109 */ 8110 static void 8111 mptsas_restart_hba(mptsas_t *mpt) 8112 { 8113 mptsas_cmd_t *cmd, *next_cmd; 8114 mptsas_target_t *ptgt = NULL; 8115 8116 NDBG1(("mptsas_restart_hba: mpt=0x%p", (void *)mpt)); 8117 8118 ASSERT(mutex_owned(&mpt->m_mutex)); 8119 8120 /* 8121 * If there is a reset delay, don't start any cmds. Otherwise, start 8122 * as many cmds as possible. 8123 * Since SMID 0 is reserved and the TM slot is reserved, the actual max 8124 * commands is m_max_requests - 2. 8125 */ 8126 cmd = mpt->m_waitq; 8127 8128 while (cmd != NULL) { 8129 next_cmd = cmd->cmd_linkp; 8130 if (cmd->cmd_flags & CFLAG_PASSTHRU) { 8131 if (mptsas_save_cmd(mpt, cmd) == TRUE) { 8132 /* 8133 * passthru command get slot need 8134 * set CFLAG_PREPARED. 8135 */ 8136 cmd->cmd_flags |= CFLAG_PREPARED; 8137 mptsas_waitq_delete(mpt, cmd); 8138 mptsas_start_passthru(mpt, cmd); 8139 } 8140 cmd = next_cmd; 8141 continue; 8142 } 8143 if (cmd->cmd_flags & CFLAG_CONFIG) { 8144 if (mptsas_save_cmd(mpt, cmd) == TRUE) { 8145 /* 8146 * Send the config page request and delete it 8147 * from the waitq. 8148 */ 8149 cmd->cmd_flags |= CFLAG_PREPARED; 8150 mptsas_waitq_delete(mpt, cmd); 8151 mptsas_start_config_page_access(mpt, cmd); 8152 } 8153 cmd = next_cmd; 8154 continue; 8155 } 8156 if (cmd->cmd_flags & CFLAG_FW_DIAG) { 8157 if (mptsas_save_cmd(mpt, cmd) == TRUE) { 8158 /* 8159 * Send the FW Diag request and delete if from 8160 * the waitq. 8161 */ 8162 cmd->cmd_flags |= CFLAG_PREPARED; 8163 mptsas_waitq_delete(mpt, cmd); 8164 mptsas_start_diag(mpt, cmd); 8165 } 8166 cmd = next_cmd; 8167 continue; 8168 } 8169 8170 ptgt = cmd->cmd_tgt_addr; 8171 if (ptgt) { 8172 mutex_enter(&mpt->m_intr_mutex); 8173 mutex_enter(&ptgt->m_tgt_intr_mutex); 8174 if ((ptgt->m_t_throttle == DRAIN_THROTTLE) && 8175 (ptgt->m_t_ncmds == 0)) { 8176 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE); 8177 } 8178 if ((ptgt->m_reset_delay == 0) && 8179 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) { 8180 mutex_exit(&ptgt->m_tgt_intr_mutex); 8181 mutex_exit(&mpt->m_intr_mutex); 8182 if (mptsas_save_cmd(mpt, cmd) == TRUE) { 8183 mptsas_waitq_delete(mpt, cmd); 8184 (void) mptsas_start_cmd(mpt, cmd); 8185 } 8186 goto out; 8187 } 8188 mutex_exit(&ptgt->m_tgt_intr_mutex); 8189 mutex_exit(&mpt->m_intr_mutex); 8190 } 8191 out: 8192 cmd = next_cmd; 8193 } 8194 } 8195 8196 /* 8197 * mpt tag type lookup 8198 */ 8199 static char mptsas_tag_lookup[] = 8200 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG}; 8201 8202 /* 8203 * mptsas_start_cmd0 is similar to mptsas_start_cmd, except that, it is called 8204 * without ANY mutex protected, while, mptsas_start_cmd is called with m_mutex 8205 * protected. 8206 * 8207 * the relevant field in ptgt should be protected by m_tgt_intr_mutex in both 8208 * functions. 8209 * 8210 * before the cmds are linked on the slot for monitor as outstanding cmds, they 8211 * are accessed as slab objects, so slab framework ensures the exclusive access, 8212 * and no other mutex is requireed. Linking for monitor and the trigger of dma 8213 * must be done exclusively. 8214 */ 8215 static int 8216 mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd) 8217 { 8218 struct scsi_pkt *pkt = CMD2PKT(cmd); 8219 uint32_t control = 0; 8220 int n; 8221 caddr_t mem; 8222 pMpi2SCSIIORequest_t io_request; 8223 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl; 8224 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl; 8225 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 8226 uint16_t SMID, io_flags = 0; 8227 uint32_t request_desc_low, request_desc_high; 8228 8229 NDBG1(("mptsas_start_cmd0: cmd=0x%p", (void *)cmd)); 8230 8231 /* 8232 * Set SMID and increment index. Rollover to 1 instead of 0 if index 8233 * is at the max. 0 is an invalid SMID, so we call the first index 1. 8234 */ 8235 SMID = cmd->cmd_slot; 8236 8237 /* 8238 * It is possible for back to back device reset to 8239 * happen before the reset delay has expired. That's 8240 * ok, just let the device reset go out on the bus. 8241 */ 8242 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) { 8243 ASSERT(ptgt->m_reset_delay == 0); 8244 } 8245 8246 /* 8247 * if a non-tagged cmd is submitted to an active tagged target 8248 * then drain before submitting this cmd; SCSI-2 allows RQSENSE 8249 * to be untagged 8250 */ 8251 mutex_enter(&ptgt->m_tgt_intr_mutex); 8252 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) && 8253 (ptgt->m_t_ncmds > 1) && 8254 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) && 8255 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) { 8256 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) { 8257 NDBG23(("target=%d, untagged cmd, start draining\n", 8258 ptgt->m_devhdl)); 8259 8260 if (ptgt->m_reset_delay == 0) { 8261 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE); 8262 } 8263 mutex_exit(&ptgt->m_tgt_intr_mutex); 8264 8265 mutex_enter(&mpt->m_mutex); 8266 mptsas_remove_cmd(mpt, cmd); 8267 cmd->cmd_pkt_flags |= FLAG_HEAD; 8268 mptsas_waitq_add(mpt, cmd); 8269 mutex_exit(&mpt->m_mutex); 8270 return (DDI_FAILURE); 8271 } 8272 mutex_exit(&ptgt->m_tgt_intr_mutex); 8273 return (DDI_FAILURE); 8274 } 8275 mutex_exit(&ptgt->m_tgt_intr_mutex); 8276 8277 /* 8278 * Set correct tag bits. 8279 */ 8280 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) { 8281 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags & 8282 FLAG_TAGMASK) >> 12)]) { 8283 case MSG_SIMPLE_QTAG: 8284 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 8285 break; 8286 case MSG_HEAD_QTAG: 8287 control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 8288 break; 8289 case MSG_ORDERED_QTAG: 8290 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 8291 break; 8292 default: 8293 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n"); 8294 break; 8295 } 8296 } else { 8297 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) { 8298 ptgt->m_t_throttle = 1; 8299 } 8300 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 8301 } 8302 8303 if (cmd->cmd_pkt_flags & FLAG_TLR) { 8304 control |= MPI2_SCSIIO_CONTROL_TLR_ON; 8305 } 8306 8307 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID); 8308 io_request = (pMpi2SCSIIORequest_t)mem; 8309 8310 bzero(io_request, sizeof (Mpi2SCSIIORequest_t)); 8311 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof 8312 (MPI2_SCSI_IO_REQUEST, SGL) / 4); 8313 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0, 8314 MPI2_FUNCTION_SCSI_IO_REQUEST); 8315 8316 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp, 8317 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR); 8318 8319 io_flags = cmd->cmd_cdblen; 8320 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags); 8321 /* 8322 * setup the Scatter/Gather DMA list for this request 8323 */ 8324 if (cmd->cmd_cookiec > 0) { 8325 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl); 8326 } else { 8327 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength, 8328 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT | 8329 MPI2_SGE_FLAGS_END_OF_BUFFER | 8330 MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 8331 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT); 8332 } 8333 8334 /* 8335 * save ARQ information 8336 */ 8337 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen); 8338 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) == 8339 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) { 8340 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, 8341 cmd->cmd_ext_arqcookie.dmac_address); 8342 } else { 8343 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, 8344 cmd->cmd_arqcookie.dmac_address); 8345 } 8346 8347 ddi_put32(acc_hdl, &io_request->Control, control); 8348 8349 NDBG31(("starting message=0x%p, with cmd=0x%p", 8350 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd)); 8351 8352 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV); 8353 8354 /* 8355 * Build request descriptor and write it to the request desc post reg. 8356 */ 8357 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 8358 request_desc_high = ptgt->m_devhdl << 16; 8359 8360 mutex_enter(&mpt->m_mutex); 8361 mpt->m_active->m_slot[cmd->cmd_slot] = cmd; 8362 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high); 8363 mutex_exit(&mpt->m_mutex); 8364 8365 /* 8366 * Start timeout. 8367 */ 8368 mutex_enter(&ptgt->m_tgt_intr_mutex); 8369 #ifdef MPTSAS_TEST 8370 /* 8371 * Temporarily set timebase = 0; needed for 8372 * timeout torture test. 8373 */ 8374 if (mptsas_test_timeouts) { 8375 ptgt->m_timebase = 0; 8376 } 8377 #endif 8378 n = pkt->pkt_time - ptgt->m_timebase; 8379 8380 if (n == 0) { 8381 (ptgt->m_dups)++; 8382 ptgt->m_timeout = ptgt->m_timebase; 8383 } else if (n > 0) { 8384 ptgt->m_timeout = 8385 ptgt->m_timebase = pkt->pkt_time; 8386 ptgt->m_dups = 1; 8387 } else if (n < 0) { 8388 ptgt->m_timeout = ptgt->m_timebase; 8389 } 8390 #ifdef MPTSAS_TEST 8391 /* 8392 * Set back to a number higher than 8393 * mptsas_scsi_watchdog_tick 8394 * so timeouts will happen in mptsas_watchsubr 8395 */ 8396 if (mptsas_test_timeouts) { 8397 ptgt->m_timebase = 60; 8398 } 8399 #endif 8400 mutex_exit(&ptgt->m_tgt_intr_mutex); 8401 8402 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) || 8403 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) { 8404 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 8405 return (DDI_FAILURE); 8406 } 8407 return (DDI_SUCCESS); 8408 } 8409 8410 static int 8411 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd) 8412 { 8413 struct scsi_pkt *pkt = CMD2PKT(cmd); 8414 uint32_t control = 0; 8415 int n; 8416 caddr_t mem; 8417 pMpi2SCSIIORequest_t io_request; 8418 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl; 8419 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl; 8420 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 8421 uint16_t SMID, io_flags = 0; 8422 uint32_t request_desc_low, request_desc_high; 8423 8424 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd)); 8425 8426 /* 8427 * Set SMID and increment index. Rollover to 1 instead of 0 if index 8428 * is at the max. 0 is an invalid SMID, so we call the first index 1. 8429 */ 8430 SMID = cmd->cmd_slot; 8431 8432 /* 8433 * It is possible for back to back device reset to 8434 * happen before the reset delay has expired. That's 8435 * ok, just let the device reset go out on the bus. 8436 */ 8437 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) { 8438 ASSERT(ptgt->m_reset_delay == 0); 8439 } 8440 8441 /* 8442 * if a non-tagged cmd is submitted to an active tagged target 8443 * then drain before submitting this cmd; SCSI-2 allows RQSENSE 8444 * to be untagged 8445 */ 8446 mutex_enter(&ptgt->m_tgt_intr_mutex); 8447 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) && 8448 (ptgt->m_t_ncmds > 1) && 8449 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) && 8450 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) { 8451 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) { 8452 NDBG23(("target=%d, untagged cmd, start draining\n", 8453 ptgt->m_devhdl)); 8454 8455 if (ptgt->m_reset_delay == 0) { 8456 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE); 8457 } 8458 mutex_exit(&ptgt->m_tgt_intr_mutex); 8459 8460 mptsas_remove_cmd(mpt, cmd); 8461 cmd->cmd_pkt_flags |= FLAG_HEAD; 8462 mptsas_waitq_add(mpt, cmd); 8463 return (DDI_FAILURE); 8464 } 8465 mutex_exit(&ptgt->m_tgt_intr_mutex); 8466 return (DDI_FAILURE); 8467 } 8468 mutex_exit(&ptgt->m_tgt_intr_mutex); 8469 8470 /* 8471 * Set correct tag bits. 8472 */ 8473 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) { 8474 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags & 8475 FLAG_TAGMASK) >> 12)]) { 8476 case MSG_SIMPLE_QTAG: 8477 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 8478 break; 8479 case MSG_HEAD_QTAG: 8480 control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 8481 break; 8482 case MSG_ORDERED_QTAG: 8483 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 8484 break; 8485 default: 8486 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n"); 8487 break; 8488 } 8489 } else { 8490 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) { 8491 ptgt->m_t_throttle = 1; 8492 } 8493 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 8494 } 8495 8496 if (cmd->cmd_pkt_flags & FLAG_TLR) { 8497 control |= MPI2_SCSIIO_CONTROL_TLR_ON; 8498 } 8499 8500 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID); 8501 io_request = (pMpi2SCSIIORequest_t)mem; 8502 8503 bzero(io_request, sizeof (Mpi2SCSIIORequest_t)); 8504 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof 8505 (MPI2_SCSI_IO_REQUEST, SGL) / 4); 8506 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0, 8507 MPI2_FUNCTION_SCSI_IO_REQUEST); 8508 8509 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp, 8510 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR); 8511 8512 io_flags = cmd->cmd_cdblen; 8513 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags); 8514 /* 8515 * setup the Scatter/Gather DMA list for this request 8516 */ 8517 if (cmd->cmd_cookiec > 0) { 8518 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl); 8519 } else { 8520 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength, 8521 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT | 8522 MPI2_SGE_FLAGS_END_OF_BUFFER | 8523 MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 8524 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT); 8525 } 8526 8527 /* 8528 * save ARQ information 8529 */ 8530 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen); 8531 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) == 8532 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) { 8533 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, 8534 cmd->cmd_ext_arqcookie.dmac_address); 8535 } else { 8536 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, 8537 cmd->cmd_arqcookie.dmac_address); 8538 } 8539 8540 ddi_put32(acc_hdl, &io_request->Control, control); 8541 8542 NDBG31(("starting message=0x%p, with cmd=0x%p", 8543 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd)); 8544 8545 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV); 8546 8547 /* 8548 * Build request descriptor and write it to the request desc post reg. 8549 */ 8550 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 8551 request_desc_high = ptgt->m_devhdl << 16; 8552 8553 mpt->m_active->m_slot[cmd->cmd_slot] = cmd; 8554 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high); 8555 8556 /* 8557 * Start timeout. 8558 */ 8559 mutex_enter(&ptgt->m_tgt_intr_mutex); 8560 #ifdef MPTSAS_TEST 8561 /* 8562 * Temporarily set timebase = 0; needed for 8563 * timeout torture test. 8564 */ 8565 if (mptsas_test_timeouts) { 8566 ptgt->m_timebase = 0; 8567 } 8568 #endif 8569 n = pkt->pkt_time - ptgt->m_timebase; 8570 8571 if (n == 0) { 8572 (ptgt->m_dups)++; 8573 ptgt->m_timeout = ptgt->m_timebase; 8574 } else if (n > 0) { 8575 ptgt->m_timeout = 8576 ptgt->m_timebase = pkt->pkt_time; 8577 ptgt->m_dups = 1; 8578 } else if (n < 0) { 8579 ptgt->m_timeout = ptgt->m_timebase; 8580 } 8581 #ifdef MPTSAS_TEST 8582 /* 8583 * Set back to a number higher than 8584 * mptsas_scsi_watchdog_tick 8585 * so timeouts will happen in mptsas_watchsubr 8586 */ 8587 if (mptsas_test_timeouts) { 8588 ptgt->m_timebase = 60; 8589 } 8590 #endif 8591 mutex_exit(&ptgt->m_tgt_intr_mutex); 8592 8593 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) || 8594 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) { 8595 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 8596 return (DDI_FAILURE); 8597 } 8598 return (DDI_SUCCESS); 8599 } 8600 8601 /* 8602 * Select a helper thread to handle current doneq 8603 */ 8604 static void 8605 mptsas_deliver_doneq_thread(mptsas_t *mpt) 8606 { 8607 uint64_t t, i; 8608 uint32_t min = 0xffffffff; 8609 mptsas_doneq_thread_list_t *item; 8610 8611 for (i = 0; i < mpt->m_doneq_thread_n; i++) { 8612 item = &mpt->m_doneq_thread_id[i]; 8613 /* 8614 * If the completed command on help thread[i] less than 8615 * doneq_thread_threshold, then pick the thread[i]. Otherwise 8616 * pick a thread which has least completed command. 8617 */ 8618 8619 mutex_enter(&item->mutex); 8620 if (item->len < mpt->m_doneq_thread_threshold) { 8621 t = i; 8622 mutex_exit(&item->mutex); 8623 break; 8624 } 8625 if (item->len < min) { 8626 min = item->len; 8627 t = i; 8628 } 8629 mutex_exit(&item->mutex); 8630 } 8631 mutex_enter(&mpt->m_doneq_thread_id[t].mutex); 8632 mptsas_doneq_mv(mpt, t); 8633 cv_signal(&mpt->m_doneq_thread_id[t].cv); 8634 mutex_exit(&mpt->m_doneq_thread_id[t].mutex); 8635 } 8636 8637 /* 8638 * move the current global doneq to the doneq of thread[t] 8639 */ 8640 static void 8641 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t) 8642 { 8643 mptsas_cmd_t *cmd; 8644 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t]; 8645 8646 ASSERT(mutex_owned(&item->mutex)); 8647 mutex_enter(&mpt->m_intr_mutex); 8648 while ((cmd = mpt->m_doneq) != NULL) { 8649 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) { 8650 mpt->m_donetail = &mpt->m_doneq; 8651 } 8652 cmd->cmd_linkp = NULL; 8653 *item->donetail = cmd; 8654 item->donetail = &cmd->cmd_linkp; 8655 mpt->m_doneq_len--; 8656 item->len++; 8657 } 8658 mutex_exit(&mpt->m_intr_mutex); 8659 } 8660 8661 void 8662 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd) 8663 { 8664 struct scsi_pkt *pkt = CMD2PKT(cmd); 8665 8666 /* Check all acc and dma handles */ 8667 if ((mptsas_check_acc_handle(mpt->m_datap) != 8668 DDI_SUCCESS) || 8669 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) != 8670 DDI_SUCCESS) || 8671 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) != 8672 DDI_SUCCESS) || 8673 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) != 8674 DDI_SUCCESS) || 8675 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) != 8676 DDI_SUCCESS) || 8677 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) != 8678 DDI_SUCCESS) || 8679 (mptsas_check_acc_handle(mpt->m_config_handle) != 8680 DDI_SUCCESS)) { 8681 ddi_fm_service_impact(mpt->m_dip, 8682 DDI_SERVICE_UNAFFECTED); 8683 ddi_fm_acc_err_clear(mpt->m_config_handle, 8684 DDI_FME_VER0); 8685 pkt->pkt_reason = CMD_TRAN_ERR; 8686 pkt->pkt_statistics = 0; 8687 } 8688 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) != 8689 DDI_SUCCESS) || 8690 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) != 8691 DDI_SUCCESS) || 8692 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) != 8693 DDI_SUCCESS) || 8694 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) != 8695 DDI_SUCCESS) || 8696 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) != 8697 DDI_SUCCESS)) { 8698 ddi_fm_service_impact(mpt->m_dip, 8699 DDI_SERVICE_UNAFFECTED); 8700 pkt->pkt_reason = CMD_TRAN_ERR; 8701 pkt->pkt_statistics = 0; 8702 } 8703 if (cmd->cmd_dmahandle && 8704 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) { 8705 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 8706 pkt->pkt_reason = CMD_TRAN_ERR; 8707 pkt->pkt_statistics = 0; 8708 } 8709 if ((cmd->cmd_extra_frames && 8710 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) != 8711 DDI_SUCCESS) || 8712 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) != 8713 DDI_SUCCESS)))) { 8714 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 8715 pkt->pkt_reason = CMD_TRAN_ERR; 8716 pkt->pkt_statistics = 0; 8717 } 8718 if (cmd->cmd_arqhandle && 8719 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) { 8720 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 8721 pkt->pkt_reason = CMD_TRAN_ERR; 8722 pkt->pkt_statistics = 0; 8723 } 8724 if (cmd->cmd_ext_arqhandle && 8725 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) { 8726 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 8727 pkt->pkt_reason = CMD_TRAN_ERR; 8728 pkt->pkt_statistics = 0; 8729 } 8730 } 8731 8732 /* 8733 * mptsas_doneq_add0 is similar to mptsas_doneq_add except that it is called 8734 * where m_intr_mutex has already been held. 8735 */ 8736 static inline void 8737 mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd) 8738 { 8739 struct scsi_pkt *pkt = CMD2PKT(cmd); 8740 8741 NDBG31(("mptsas_doneq_add0: cmd=0x%p", (void *)cmd)); 8742 8743 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0); 8744 cmd->cmd_linkp = NULL; 8745 cmd->cmd_flags |= CFLAG_FINISHED; 8746 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT; 8747 8748 /* 8749 * only add scsi pkts that have completion routines to 8750 * the doneq. no intr cmds do not have callbacks. 8751 */ 8752 if (pkt && (pkt->pkt_comp)) { 8753 *mpt->m_donetail = cmd; 8754 mpt->m_donetail = &cmd->cmd_linkp; 8755 mpt->m_doneq_len++; 8756 } 8757 } 8758 8759 /* 8760 * These routines manipulate the queue of commands that 8761 * are waiting for their completion routines to be called. 8762 * The queue is usually in FIFO order but on an MP system 8763 * it's possible for the completion routines to get out 8764 * of order. If that's a problem you need to add a global 8765 * mutex around the code that calls the completion routine 8766 * in the interrupt handler. 8767 */ 8768 static void 8769 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd) 8770 { 8771 ASSERT(mutex_owned(&mpt->m_mutex)); 8772 8773 mptsas_fma_check(mpt, cmd); 8774 8775 mutex_enter(&mpt->m_intr_mutex); 8776 mptsas_doneq_add0(mpt, cmd); 8777 mutex_exit(&mpt->m_intr_mutex); 8778 } 8779 8780 static mptsas_cmd_t * 8781 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t) 8782 { 8783 mptsas_cmd_t *cmd; 8784 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t]; 8785 8786 /* pop one off the done queue */ 8787 if ((cmd = item->doneq) != NULL) { 8788 /* if the queue is now empty fix the tail pointer */ 8789 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd)); 8790 if ((item->doneq = cmd->cmd_linkp) == NULL) { 8791 item->donetail = &item->doneq; 8792 } 8793 cmd->cmd_linkp = NULL; 8794 item->len--; 8795 } 8796 return (cmd); 8797 } 8798 8799 static void 8800 mptsas_doneq_empty(mptsas_t *mpt) 8801 { 8802 mutex_enter(&mpt->m_intr_mutex); 8803 if (mpt->m_doneq && !mpt->m_in_callback) { 8804 mptsas_cmd_t *cmd, *next; 8805 struct scsi_pkt *pkt; 8806 8807 mpt->m_in_callback = 1; 8808 cmd = mpt->m_doneq; 8809 mpt->m_doneq = NULL; 8810 mpt->m_donetail = &mpt->m_doneq; 8811 mpt->m_doneq_len = 0; 8812 8813 mutex_exit(&mpt->m_intr_mutex); 8814 8815 /* 8816 * ONLY in ISR, is it called without m_mutex held, otherwise, 8817 * it is always called with m_mutex held. 8818 */ 8819 if ((curthread->t_flag & T_INTR_THREAD) == 0) 8820 mutex_exit(&mpt->m_mutex); 8821 /* 8822 * run the completion routines of all the 8823 * completed commands 8824 */ 8825 while (cmd != NULL) { 8826 next = cmd->cmd_linkp; 8827 cmd->cmd_linkp = NULL; 8828 /* run this command's completion routine */ 8829 cmd->cmd_flags |= CFLAG_COMPLETED; 8830 pkt = CMD2PKT(cmd); 8831 mptsas_pkt_comp(pkt, cmd); 8832 cmd = next; 8833 } 8834 if ((curthread->t_flag & T_INTR_THREAD) == 0) 8835 mutex_enter(&mpt->m_mutex); 8836 mpt->m_in_callback = 0; 8837 return; 8838 } 8839 mutex_exit(&mpt->m_intr_mutex); 8840 } 8841 8842 /* 8843 * These routines manipulate the target's queue of pending requests 8844 */ 8845 void 8846 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd) 8847 { 8848 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd)); 8849 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 8850 cmd->cmd_queued = TRUE; 8851 if (ptgt) 8852 ptgt->m_t_nwait++; 8853 if (cmd->cmd_pkt_flags & FLAG_HEAD) { 8854 mutex_enter(&mpt->m_intr_mutex); 8855 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) { 8856 mpt->m_waitqtail = &cmd->cmd_linkp; 8857 } 8858 mpt->m_waitq = cmd; 8859 mutex_exit(&mpt->m_intr_mutex); 8860 } else { 8861 cmd->cmd_linkp = NULL; 8862 *(mpt->m_waitqtail) = cmd; 8863 mpt->m_waitqtail = &cmd->cmd_linkp; 8864 } 8865 } 8866 8867 static mptsas_cmd_t * 8868 mptsas_waitq_rm(mptsas_t *mpt) 8869 { 8870 mptsas_cmd_t *cmd; 8871 mptsas_target_t *ptgt; 8872 NDBG7(("mptsas_waitq_rm")); 8873 8874 mutex_enter(&mpt->m_intr_mutex); 8875 MPTSAS_WAITQ_RM(mpt, cmd); 8876 mutex_exit(&mpt->m_intr_mutex); 8877 8878 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd)); 8879 if (cmd) { 8880 ptgt = cmd->cmd_tgt_addr; 8881 if (ptgt) { 8882 ptgt->m_t_nwait--; 8883 ASSERT(ptgt->m_t_nwait >= 0); 8884 } 8885 } 8886 return (cmd); 8887 } 8888 8889 /* 8890 * remove specified cmd from the middle of the wait queue. 8891 */ 8892 static void 8893 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd) 8894 { 8895 mptsas_cmd_t *prevp = mpt->m_waitq; 8896 mptsas_target_t *ptgt = cmd->cmd_tgt_addr; 8897 8898 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p", 8899 (void *)mpt, (void *)cmd)); 8900 if (ptgt) { 8901 ptgt->m_t_nwait--; 8902 ASSERT(ptgt->m_t_nwait >= 0); 8903 } 8904 8905 if (prevp == cmd) { 8906 mutex_enter(&mpt->m_intr_mutex); 8907 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL) 8908 mpt->m_waitqtail = &mpt->m_waitq; 8909 mutex_exit(&mpt->m_intr_mutex); 8910 8911 cmd->cmd_linkp = NULL; 8912 cmd->cmd_queued = FALSE; 8913 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p", 8914 (void *)mpt, (void *)cmd)); 8915 return; 8916 } 8917 8918 while (prevp != NULL) { 8919 if (prevp->cmd_linkp == cmd) { 8920 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL) 8921 mpt->m_waitqtail = &prevp->cmd_linkp; 8922 8923 cmd->cmd_linkp = NULL; 8924 cmd->cmd_queued = FALSE; 8925 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p", 8926 (void *)mpt, (void *)cmd)); 8927 return; 8928 } 8929 prevp = prevp->cmd_linkp; 8930 } 8931 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch"); 8932 } 8933 8934 /* 8935 * device and bus reset handling 8936 * 8937 * Notes: 8938 * - RESET_ALL: reset the controller 8939 * - RESET_TARGET: reset the target specified in scsi_address 8940 */ 8941 static int 8942 mptsas_scsi_reset(struct scsi_address *ap, int level) 8943 { 8944 mptsas_t *mpt = ADDR2MPT(ap); 8945 int rval; 8946 mptsas_tgt_private_t *tgt_private; 8947 mptsas_target_t *ptgt = NULL; 8948 8949 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private; 8950 ptgt = tgt_private->t_private; 8951 if (ptgt == NULL) { 8952 return (FALSE); 8953 } 8954 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl, 8955 level)); 8956 8957 mutex_enter(&mpt->m_mutex); 8958 /* 8959 * if we are not in panic set up a reset delay for this target 8960 */ 8961 if (!ddi_in_panic()) { 8962 mptsas_setup_bus_reset_delay(mpt); 8963 } else { 8964 drv_usecwait(mpt->m_scsi_reset_delay * 1000); 8965 } 8966 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl); 8967 mutex_exit(&mpt->m_mutex); 8968 8969 /* 8970 * The transport layer expect to only see TRUE and 8971 * FALSE. Therefore, we will adjust the return value 8972 * if mptsas_do_scsi_reset returns FAILED. 8973 */ 8974 if (rval == FAILED) 8975 rval = FALSE; 8976 return (rval); 8977 } 8978 8979 static int 8980 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl) 8981 { 8982 int rval = FALSE; 8983 uint8_t config, disk; 8984 mptsas_slots_t *slots = mpt->m_active; 8985 8986 ASSERT(mutex_owned(&mpt->m_mutex)); 8987 8988 if (mptsas_debug_resets) { 8989 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d", 8990 devhdl); 8991 } 8992 8993 /* 8994 * Issue a Target Reset message to the target specified but not to a 8995 * disk making up a raid volume. Just look through the RAID config 8996 * Phys Disk list of DevHandles. If the target's DevHandle is in this 8997 * list, then don't reset this target. 8998 */ 8999 for (config = 0; config < slots->m_num_raid_configs; config++) { 9000 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) { 9001 if (devhdl == slots->m_raidconfig[config]. 9002 m_physdisk_devhdl[disk]) { 9003 return (TRUE); 9004 } 9005 } 9006 } 9007 9008 rval = mptsas_ioc_task_management(mpt, 9009 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0); 9010 9011 mptsas_doneq_empty(mpt); 9012 return (rval); 9013 } 9014 9015 static int 9016 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag, 9017 void (*callback)(caddr_t), caddr_t arg) 9018 { 9019 mptsas_t *mpt = ADDR2MPT(ap); 9020 9021 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target)); 9022 9023 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 9024 &mpt->m_mutex, &mpt->m_reset_notify_listf)); 9025 } 9026 9027 static int 9028 mptsas_get_name(struct scsi_device *sd, char *name, int len) 9029 { 9030 dev_info_t *lun_dip = NULL; 9031 9032 ASSERT(sd != NULL); 9033 ASSERT(name != NULL); 9034 lun_dip = sd->sd_dev; 9035 ASSERT(lun_dip != NULL); 9036 9037 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) { 9038 return (1); 9039 } else { 9040 return (0); 9041 } 9042 } 9043 9044 static int 9045 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len) 9046 { 9047 return (mptsas_get_name(sd, name, len)); 9048 } 9049 9050 void 9051 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what) 9052 { 9053 9054 NDBG25(("mptsas_set_throttle: throttle=%x", what)); 9055 9056 /* 9057 * if the bus is draining/quiesced, no changes to the throttles 9058 * are allowed. Not allowing change of throttles during draining 9059 * limits error recovery but will reduce draining time 9060 * 9061 * all throttles should have been set to HOLD_THROTTLE 9062 */ 9063 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) { 9064 return; 9065 } 9066 9067 if (what == HOLD_THROTTLE) { 9068 ptgt->m_t_throttle = HOLD_THROTTLE; 9069 } else if (ptgt->m_reset_delay == 0) { 9070 ptgt->m_t_throttle = what; 9071 } 9072 } 9073 9074 /* 9075 * Clean up from a device reset. 9076 * For the case of target reset, this function clears the waitq of all 9077 * commands for a particular target. For the case of abort task set, this 9078 * function clears the waitq of all commonds for a particular target/lun. 9079 */ 9080 static void 9081 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype) 9082 { 9083 mptsas_slots_t *slots = mpt->m_active; 9084 mptsas_cmd_t *cmd, *next_cmd; 9085 int slot; 9086 uchar_t reason; 9087 uint_t stat; 9088 9089 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun)); 9090 9091 /* 9092 * Make sure the I/O Controller has flushed all cmds 9093 * that are associated with this target for a target reset 9094 * and target/lun for abort task set. 9095 * Account for TM requests, which use the last SMID. 9096 */ 9097 mutex_enter(&mpt->m_intr_mutex); 9098 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) { 9099 if ((cmd = slots->m_slot[slot]) == NULL) { 9100 continue; 9101 } 9102 reason = CMD_RESET; 9103 stat = STAT_DEV_RESET; 9104 switch (tasktype) { 9105 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 9106 if (Tgt(cmd) == target) { 9107 NDBG25(("mptsas_flush_target discovered non-" 9108 "NULL cmd in slot %d, tasktype 0x%x", slot, 9109 tasktype)); 9110 mptsas_dump_cmd(mpt, cmd); 9111 mptsas_remove_cmd0(mpt, cmd); 9112 mptsas_set_pkt_reason(mpt, cmd, reason, stat); 9113 mptsas_doneq_add0(mpt, cmd); 9114 } 9115 break; 9116 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 9117 reason = CMD_ABORTED; 9118 stat = STAT_ABORTED; 9119 /*FALLTHROUGH*/ 9120 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 9121 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) { 9122 9123 NDBG25(("mptsas_flush_target discovered non-" 9124 "NULL cmd in slot %d, tasktype 0x%x", slot, 9125 tasktype)); 9126 mptsas_dump_cmd(mpt, cmd); 9127 mptsas_remove_cmd0(mpt, cmd); 9128 mptsas_set_pkt_reason(mpt, cmd, reason, 9129 stat); 9130 mptsas_doneq_add0(mpt, cmd); 9131 } 9132 break; 9133 default: 9134 break; 9135 } 9136 } 9137 mutex_exit(&mpt->m_intr_mutex); 9138 9139 /* 9140 * Flush the waitq of this target's cmds 9141 */ 9142 cmd = mpt->m_waitq; 9143 9144 reason = CMD_RESET; 9145 stat = STAT_DEV_RESET; 9146 9147 switch (tasktype) { 9148 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 9149 while (cmd != NULL) { 9150 next_cmd = cmd->cmd_linkp; 9151 if (Tgt(cmd) == target) { 9152 mptsas_waitq_delete(mpt, cmd); 9153 mptsas_set_pkt_reason(mpt, cmd, 9154 reason, stat); 9155 mptsas_doneq_add(mpt, cmd); 9156 } 9157 cmd = next_cmd; 9158 } 9159 break; 9160 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 9161 reason = CMD_ABORTED; 9162 stat = STAT_ABORTED; 9163 /*FALLTHROUGH*/ 9164 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 9165 while (cmd != NULL) { 9166 next_cmd = cmd->cmd_linkp; 9167 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) { 9168 mptsas_waitq_delete(mpt, cmd); 9169 mptsas_set_pkt_reason(mpt, cmd, 9170 reason, stat); 9171 mptsas_doneq_add(mpt, cmd); 9172 } 9173 cmd = next_cmd; 9174 } 9175 break; 9176 default: 9177 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.", 9178 tasktype); 9179 break; 9180 } 9181 } 9182 9183 /* 9184 * Clean up hba state, abort all outstanding command and commands in waitq 9185 * reset timeout of all targets. 9186 */ 9187 static void 9188 mptsas_flush_hba(mptsas_t *mpt) 9189 { 9190 mptsas_slots_t *slots = mpt->m_active; 9191 mptsas_cmd_t *cmd; 9192 int slot; 9193 9194 NDBG25(("mptsas_flush_hba")); 9195 9196 /* 9197 * The I/O Controller should have already sent back 9198 * all commands via the scsi I/O reply frame. Make 9199 * sure all commands have been flushed. 9200 * Account for TM request, which use the last SMID. 9201 */ 9202 mutex_enter(&mpt->m_intr_mutex); 9203 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) { 9204 if ((cmd = slots->m_slot[slot]) == NULL) { 9205 continue; 9206 } 9207 9208 if (cmd->cmd_flags & CFLAG_CMDIOC) { 9209 /* 9210 * Need to make sure to tell everyone that might be 9211 * waiting on this command that it's going to fail. If 9212 * we get here, this command will never timeout because 9213 * the active command table is going to be re-allocated, 9214 * so there will be nothing to check against a time out. 9215 * Instead, mark the command as failed due to reset. 9216 */ 9217 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, 9218 STAT_BUS_RESET); 9219 if ((cmd->cmd_flags & CFLAG_PASSTHRU) || 9220 (cmd->cmd_flags & CFLAG_CONFIG) || 9221 (cmd->cmd_flags & CFLAG_FW_DIAG)) { 9222 cmd->cmd_flags |= CFLAG_FINISHED; 9223 cv_broadcast(&mpt->m_passthru_cv); 9224 cv_broadcast(&mpt->m_config_cv); 9225 cv_broadcast(&mpt->m_fw_diag_cv); 9226 } 9227 continue; 9228 } 9229 9230 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d", 9231 slot)); 9232 mptsas_dump_cmd(mpt, cmd); 9233 9234 mptsas_remove_cmd0(mpt, cmd); 9235 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET); 9236 mptsas_doneq_add0(mpt, cmd); 9237 } 9238 mutex_exit(&mpt->m_intr_mutex); 9239 9240 /* 9241 * Flush the waitq. 9242 */ 9243 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) { 9244 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET); 9245 if ((cmd->cmd_flags & CFLAG_PASSTHRU) || 9246 (cmd->cmd_flags & CFLAG_CONFIG) || 9247 (cmd->cmd_flags & CFLAG_FW_DIAG)) { 9248 cmd->cmd_flags |= CFLAG_FINISHED; 9249 cv_broadcast(&mpt->m_passthru_cv); 9250 cv_broadcast(&mpt->m_config_cv); 9251 cv_broadcast(&mpt->m_fw_diag_cv); 9252 } else { 9253 mptsas_doneq_add(mpt, cmd); 9254 } 9255 } 9256 } 9257 9258 /* 9259 * set pkt_reason and OR in pkt_statistics flag 9260 */ 9261 static void 9262 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason, 9263 uint_t stat) 9264 { 9265 #ifndef __lock_lint 9266 _NOTE(ARGUNUSED(mpt)) 9267 #endif 9268 9269 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x", 9270 (void *)cmd, reason, stat)); 9271 9272 if (cmd) { 9273 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) { 9274 cmd->cmd_pkt->pkt_reason = reason; 9275 } 9276 cmd->cmd_pkt->pkt_statistics |= stat; 9277 } 9278 } 9279 9280 static void 9281 mptsas_start_watch_reset_delay() 9282 { 9283 NDBG22(("mptsas_start_watch_reset_delay")); 9284 9285 mutex_enter(&mptsas_global_mutex); 9286 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) { 9287 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL, 9288 drv_usectohz((clock_t) 9289 MPTSAS_WATCH_RESET_DELAY_TICK * 1000)); 9290 ASSERT(mptsas_reset_watch != NULL); 9291 } 9292 mutex_exit(&mptsas_global_mutex); 9293 } 9294 9295 static void 9296 mptsas_setup_bus_reset_delay(mptsas_t *mpt) 9297 { 9298 mptsas_target_t *ptgt = NULL; 9299 9300 NDBG22(("mptsas_setup_bus_reset_delay")); 9301 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 9302 MPTSAS_HASH_FIRST); 9303 while (ptgt != NULL) { 9304 mutex_enter(&ptgt->m_tgt_intr_mutex); 9305 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE); 9306 ptgt->m_reset_delay = mpt->m_scsi_reset_delay; 9307 mutex_exit(&ptgt->m_tgt_intr_mutex); 9308 9309 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 9310 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 9311 } 9312 9313 mptsas_start_watch_reset_delay(); 9314 } 9315 9316 /* 9317 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every 9318 * mpt instance for active reset delays 9319 */ 9320 static void 9321 mptsas_watch_reset_delay(void *arg) 9322 { 9323 #ifndef __lock_lint 9324 _NOTE(ARGUNUSED(arg)) 9325 #endif 9326 9327 mptsas_t *mpt; 9328 int not_done = 0; 9329 9330 NDBG22(("mptsas_watch_reset_delay")); 9331 9332 mutex_enter(&mptsas_global_mutex); 9333 mptsas_reset_watch = 0; 9334 mutex_exit(&mptsas_global_mutex); 9335 rw_enter(&mptsas_global_rwlock, RW_READER); 9336 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) { 9337 if (mpt->m_tran == 0) { 9338 continue; 9339 } 9340 mutex_enter(&mpt->m_mutex); 9341 not_done += mptsas_watch_reset_delay_subr(mpt); 9342 mutex_exit(&mpt->m_mutex); 9343 } 9344 rw_exit(&mptsas_global_rwlock); 9345 9346 if (not_done) { 9347 mptsas_start_watch_reset_delay(); 9348 } 9349 } 9350 9351 static int 9352 mptsas_watch_reset_delay_subr(mptsas_t *mpt) 9353 { 9354 int done = 0; 9355 int restart = 0; 9356 mptsas_target_t *ptgt = NULL; 9357 9358 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt)); 9359 9360 ASSERT(mutex_owned(&mpt->m_mutex)); 9361 9362 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 9363 MPTSAS_HASH_FIRST); 9364 while (ptgt != NULL) { 9365 mutex_enter(&ptgt->m_tgt_intr_mutex); 9366 if (ptgt->m_reset_delay != 0) { 9367 ptgt->m_reset_delay -= 9368 MPTSAS_WATCH_RESET_DELAY_TICK; 9369 if (ptgt->m_reset_delay <= 0) { 9370 ptgt->m_reset_delay = 0; 9371 mptsas_set_throttle(mpt, ptgt, 9372 MAX_THROTTLE); 9373 restart++; 9374 } else { 9375 done = -1; 9376 } 9377 } 9378 mutex_exit(&ptgt->m_tgt_intr_mutex); 9379 9380 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 9381 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 9382 } 9383 9384 if (restart > 0) { 9385 mptsas_restart_hba(mpt); 9386 } 9387 return (done); 9388 } 9389 9390 #ifdef MPTSAS_TEST 9391 static void 9392 mptsas_test_reset(mptsas_t *mpt, int target) 9393 { 9394 mptsas_target_t *ptgt = NULL; 9395 9396 if (mptsas_rtest == target) { 9397 if (mptsas_do_scsi_reset(mpt, target) == TRUE) { 9398 mptsas_rtest = -1; 9399 } 9400 if (mptsas_rtest == -1) { 9401 NDBG22(("mptsas_test_reset success")); 9402 } 9403 } 9404 } 9405 #endif 9406 9407 /* 9408 * abort handling: 9409 * 9410 * Notes: 9411 * - if pkt is not NULL, abort just that command 9412 * - if pkt is NULL, abort all outstanding commands for target 9413 */ 9414 static int 9415 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 9416 { 9417 mptsas_t *mpt = ADDR2MPT(ap); 9418 int rval; 9419 mptsas_tgt_private_t *tgt_private; 9420 int target, lun; 9421 9422 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran-> 9423 tran_tgt_private; 9424 ASSERT(tgt_private != NULL); 9425 target = tgt_private->t_private->m_devhdl; 9426 lun = tgt_private->t_lun; 9427 9428 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun)); 9429 9430 mutex_enter(&mpt->m_mutex); 9431 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt); 9432 mutex_exit(&mpt->m_mutex); 9433 return (rval); 9434 } 9435 9436 static int 9437 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt) 9438 { 9439 mptsas_cmd_t *sp = NULL; 9440 mptsas_slots_t *slots = mpt->m_active; 9441 int rval = FALSE; 9442 9443 ASSERT(mutex_owned(&mpt->m_mutex)); 9444 9445 /* 9446 * Abort the command pkt on the target/lun in ap. If pkt is 9447 * NULL, abort all outstanding commands on that target/lun. 9448 * If you can abort them, return 1, else return 0. 9449 * Each packet that's aborted should be sent back to the target 9450 * driver through the callback routine, with pkt_reason set to 9451 * CMD_ABORTED. 9452 * 9453 * abort cmd pkt on HBA hardware; clean out of outstanding 9454 * command lists, etc. 9455 */ 9456 if (pkt != NULL) { 9457 /* abort the specified packet */ 9458 sp = PKT2CMD(pkt); 9459 9460 if (sp->cmd_queued) { 9461 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted", 9462 (void *)sp)); 9463 mptsas_waitq_delete(mpt, sp); 9464 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED, 9465 STAT_ABORTED); 9466 mptsas_doneq_add(mpt, sp); 9467 rval = TRUE; 9468 goto done; 9469 } 9470 9471 /* 9472 * Have mpt firmware abort this command 9473 */ 9474 mutex_enter(&mpt->m_intr_mutex); 9475 if (slots->m_slot[sp->cmd_slot] != NULL) { 9476 mutex_exit(&mpt->m_intr_mutex); 9477 rval = mptsas_ioc_task_management(mpt, 9478 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target, 9479 lun, NULL, 0, 0); 9480 9481 /* 9482 * The transport layer expects only TRUE and FALSE. 9483 * Therefore, if mptsas_ioc_task_management returns 9484 * FAILED we will return FALSE. 9485 */ 9486 if (rval == FAILED) 9487 rval = FALSE; 9488 goto done; 9489 } 9490 mutex_exit(&mpt->m_intr_mutex); 9491 } 9492 9493 /* 9494 * If pkt is NULL then abort task set 9495 */ 9496 rval = mptsas_ioc_task_management(mpt, 9497 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0); 9498 9499 /* 9500 * The transport layer expects only TRUE and FALSE. 9501 * Therefore, if mptsas_ioc_task_management returns 9502 * FAILED we will return FALSE. 9503 */ 9504 if (rval == FAILED) 9505 rval = FALSE; 9506 9507 #ifdef MPTSAS_TEST 9508 if (rval && mptsas_test_stop) { 9509 debug_enter("mptsas_do_scsi_abort"); 9510 } 9511 #endif 9512 9513 done: 9514 mptsas_doneq_empty(mpt); 9515 return (rval); 9516 } 9517 9518 /* 9519 * capability handling: 9520 * (*tran_getcap). Get the capability named, and return its value. 9521 */ 9522 static int 9523 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly) 9524 { 9525 mptsas_t *mpt = ADDR2MPT(ap); 9526 int ckey; 9527 int rval = FALSE; 9528 9529 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x", 9530 ap->a_target, cap, tgtonly)); 9531 9532 mutex_enter(&mpt->m_mutex); 9533 9534 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) { 9535 mutex_exit(&mpt->m_mutex); 9536 return (UNDEFINED); 9537 } 9538 9539 switch (ckey) { 9540 case SCSI_CAP_DMA_MAX: 9541 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer; 9542 break; 9543 case SCSI_CAP_ARQ: 9544 rval = TRUE; 9545 break; 9546 case SCSI_CAP_MSG_OUT: 9547 case SCSI_CAP_PARITY: 9548 case SCSI_CAP_UNTAGGED_QING: 9549 rval = TRUE; 9550 break; 9551 case SCSI_CAP_TAGGED_QING: 9552 rval = TRUE; 9553 break; 9554 case SCSI_CAP_RESET_NOTIFICATION: 9555 rval = TRUE; 9556 break; 9557 case SCSI_CAP_LINKED_CMDS: 9558 rval = FALSE; 9559 break; 9560 case SCSI_CAP_QFULL_RETRIES: 9561 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran-> 9562 tran_tgt_private))->t_private->m_qfull_retries; 9563 break; 9564 case SCSI_CAP_QFULL_RETRY_INTERVAL: 9565 rval = drv_hztousec(((mptsas_tgt_private_t *) 9566 (ap->a_hba_tran->tran_tgt_private))-> 9567 t_private->m_qfull_retry_interval) / 1000; 9568 break; 9569 case SCSI_CAP_CDB_LEN: 9570 rval = CDB_GROUP4; 9571 break; 9572 case SCSI_CAP_INTERCONNECT_TYPE: 9573 rval = INTERCONNECT_SAS; 9574 break; 9575 case SCSI_CAP_TRAN_LAYER_RETRIES: 9576 if (mpt->m_ioc_capabilities & 9577 MPI2_IOCFACTS_CAPABILITY_TLR) 9578 rval = TRUE; 9579 else 9580 rval = FALSE; 9581 break; 9582 default: 9583 rval = UNDEFINED; 9584 break; 9585 } 9586 9587 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval)); 9588 9589 mutex_exit(&mpt->m_mutex); 9590 return (rval); 9591 } 9592 9593 /* 9594 * (*tran_setcap). Set the capability named to the value given. 9595 */ 9596 static int 9597 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly) 9598 { 9599 mptsas_t *mpt = ADDR2MPT(ap); 9600 int ckey; 9601 int rval = FALSE; 9602 mptsas_target_t *ptgt; 9603 9604 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x", 9605 ap->a_target, cap, value, tgtonly)); 9606 9607 if (!tgtonly) { 9608 return (rval); 9609 } 9610 9611 mutex_enter(&mpt->m_mutex); 9612 9613 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) { 9614 mutex_exit(&mpt->m_mutex); 9615 return (UNDEFINED); 9616 } 9617 9618 switch (ckey) { 9619 case SCSI_CAP_DMA_MAX: 9620 case SCSI_CAP_MSG_OUT: 9621 case SCSI_CAP_PARITY: 9622 case SCSI_CAP_INITIATOR_ID: 9623 case SCSI_CAP_LINKED_CMDS: 9624 case SCSI_CAP_UNTAGGED_QING: 9625 case SCSI_CAP_RESET_NOTIFICATION: 9626 /* 9627 * None of these are settable via 9628 * the capability interface. 9629 */ 9630 break; 9631 case SCSI_CAP_ARQ: 9632 /* 9633 * We cannot turn off arq so return false if asked to 9634 */ 9635 if (value) { 9636 rval = TRUE; 9637 } else { 9638 rval = FALSE; 9639 } 9640 break; 9641 case SCSI_CAP_TAGGED_QING: 9642 ptgt = ((mptsas_tgt_private_t *) 9643 (ap->a_hba_tran->tran_tgt_private))->t_private; 9644 mutex_enter(&ptgt->m_tgt_intr_mutex); 9645 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE); 9646 mutex_exit(&ptgt->m_tgt_intr_mutex); 9647 rval = TRUE; 9648 break; 9649 case SCSI_CAP_QFULL_RETRIES: 9650 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))-> 9651 t_private->m_qfull_retries = (uchar_t)value; 9652 rval = TRUE; 9653 break; 9654 case SCSI_CAP_QFULL_RETRY_INTERVAL: 9655 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))-> 9656 t_private->m_qfull_retry_interval = 9657 drv_usectohz(value * 1000); 9658 rval = TRUE; 9659 break; 9660 default: 9661 rval = UNDEFINED; 9662 break; 9663 } 9664 mutex_exit(&mpt->m_mutex); 9665 return (rval); 9666 } 9667 9668 /* 9669 * Utility routine for mptsas_ifsetcap/ifgetcap 9670 */ 9671 /*ARGSUSED*/ 9672 static int 9673 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp) 9674 { 9675 NDBG24(("mptsas_scsi_capchk: cap=%s", cap)); 9676 9677 if (!cap) 9678 return (FALSE); 9679 9680 *cidxp = scsi_hba_lookup_capstr(cap); 9681 return (TRUE); 9682 } 9683 9684 static int 9685 mptsas_alloc_active_slots(mptsas_t *mpt, int flag) 9686 { 9687 mptsas_slots_t *old_active = mpt->m_active; 9688 mptsas_slots_t *new_active; 9689 size_t size; 9690 int rval = -1, nslot, i; 9691 mptsas_slot_free_e_t *pe; 9692 9693 if (mptsas_outstanding_cmds_n(mpt)) { 9694 NDBG9(("cannot change size of active slots array")); 9695 return (rval); 9696 } 9697 9698 size = MPTSAS_SLOTS_SIZE(mpt); 9699 new_active = kmem_zalloc(size, flag); 9700 if (new_active == NULL) { 9701 NDBG1(("new active alloc failed")); 9702 return (rval); 9703 } 9704 /* 9705 * Since SMID 0 is reserved and the TM slot is reserved, the 9706 * number of slots that can be used at any one time is 9707 * m_max_requests - 2. 9708 */ 9709 new_active->m_n_slots = nslot = (mpt->m_max_requests - 2); 9710 new_active->m_size = size; 9711 new_active->m_tags = 1; 9712 9713 if (old_active) { 9714 new_active->m_tgttbl = old_active->m_tgttbl; 9715 new_active->m_smptbl = old_active->m_smptbl; 9716 new_active->m_num_raid_configs = 9717 old_active->m_num_raid_configs; 9718 for (i = 0; i < new_active->m_num_raid_configs; i++) { 9719 new_active->m_raidconfig[i] = 9720 old_active->m_raidconfig[i]; 9721 } 9722 mptsas_free_active_slots(mpt); 9723 } 9724 9725 if (max_ncpus & (max_ncpus - 1)) { 9726 mpt->m_slot_freeq_pair_n = (1 << highbit(max_ncpus)); 9727 } else { 9728 mpt->m_slot_freeq_pair_n = max_ncpus; 9729 } 9730 mpt->m_slot_freeq_pairp = kmem_zalloc( 9731 mpt->m_slot_freeq_pair_n * 9732 sizeof (mptsas_slot_freeq_pair_t), KM_SLEEP); 9733 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) { 9734 list_create(&mpt->m_slot_freeq_pairp[i]. 9735 m_slot_allocq.s.m_fq_list, 9736 sizeof (mptsas_slot_free_e_t), 9737 offsetof(mptsas_slot_free_e_t, node)); 9738 list_create(&mpt->m_slot_freeq_pairp[i]. 9739 m_slot_releq.s.m_fq_list, 9740 sizeof (mptsas_slot_free_e_t), 9741 offsetof(mptsas_slot_free_e_t, node)); 9742 mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n = 0; 9743 mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n = 0; 9744 mutex_init(&mpt->m_slot_freeq_pairp[i]. 9745 m_slot_allocq.s.m_fq_mutex, NULL, MUTEX_DRIVER, 9746 DDI_INTR_PRI(mpt->m_intr_pri)); 9747 mutex_init(&mpt->m_slot_freeq_pairp[i]. 9748 m_slot_releq.s.m_fq_mutex, NULL, MUTEX_DRIVER, 9749 DDI_INTR_PRI(mpt->m_intr_pri)); 9750 } 9751 pe = mpt->m_slot_free_ae = kmem_zalloc(nslot * 9752 sizeof (mptsas_slot_free_e_t), KM_SLEEP); 9753 /* 9754 * An array of Mpi2ReplyDescriptorsUnion_t is defined here. 9755 * We are trying to eliminate the m_mutex in the context 9756 * reply code path in the ISR. Since the read of the 9757 * ReplyDescriptor and update/write of the ReplyIndex must 9758 * be atomic (since the poll thread may also update them at 9759 * the same time) so we first read out of the ReplyDescriptor 9760 * into this array and update the ReplyIndex register with a 9761 * separate mutex m_intr_mutex protected, and then release the 9762 * mutex and process all of them. the length of the array is 9763 * defined as max as 128(128*64=8k), which is 9764 * assumed as the maxmium depth of the interrupt coalese. 9765 */ 9766 mpt->m_reply = kmem_zalloc(MPI_ADDRESS_COALSCE_MAX * 9767 sizeof (Mpi2ReplyDescriptorsUnion_t), KM_SLEEP); 9768 for (i = 0; i < nslot; i++, pe++) { 9769 pe->slot = i + 1; /* SMID 0 is reserved */ 9770 pe->cpuid = i % mpt->m_slot_freeq_pair_n; 9771 list_insert_tail(&mpt->m_slot_freeq_pairp 9772 [i % mpt->m_slot_freeq_pair_n] 9773 .m_slot_allocq.s.m_fq_list, pe); 9774 mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n] 9775 .m_slot_allocq.s.m_fq_n++; 9776 mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n] 9777 .m_slot_allocq.s.m_fq_n_init++; 9778 } 9779 9780 mpt->m_active = new_active; 9781 rval = 0; 9782 9783 return (rval); 9784 } 9785 9786 static void 9787 mptsas_free_active_slots(mptsas_t *mpt) 9788 { 9789 mptsas_slots_t *active = mpt->m_active; 9790 size_t size; 9791 mptsas_slot_free_e_t *pe; 9792 int i; 9793 9794 if (active == NULL) 9795 return; 9796 9797 if (mpt->m_slot_freeq_pairp) { 9798 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) { 9799 while ((pe = list_head(&mpt->m_slot_freeq_pairp 9800 [i].m_slot_allocq.s.m_fq_list)) != NULL) { 9801 list_remove(&mpt->m_slot_freeq_pairp[i] 9802 .m_slot_allocq.s.m_fq_list, pe); 9803 } 9804 list_destroy(&mpt->m_slot_freeq_pairp 9805 [i].m_slot_allocq.s.m_fq_list); 9806 while ((pe = list_head(&mpt->m_slot_freeq_pairp 9807 [i].m_slot_releq.s.m_fq_list)) != NULL) { 9808 list_remove(&mpt->m_slot_freeq_pairp[i] 9809 .m_slot_releq.s.m_fq_list, pe); 9810 } 9811 list_destroy(&mpt->m_slot_freeq_pairp 9812 [i].m_slot_releq.s.m_fq_list); 9813 mutex_destroy(&mpt->m_slot_freeq_pairp 9814 [i].m_slot_allocq.s.m_fq_mutex); 9815 mutex_destroy(&mpt->m_slot_freeq_pairp 9816 [i].m_slot_releq.s.m_fq_mutex); 9817 } 9818 kmem_free(mpt->m_slot_freeq_pairp, mpt->m_slot_freeq_pair_n * 9819 sizeof (mptsas_slot_freeq_pair_t)); 9820 } 9821 if (mpt->m_slot_free_ae) 9822 kmem_free(mpt->m_slot_free_ae, mpt->m_active->m_n_slots * 9823 sizeof (mptsas_slot_free_e_t)); 9824 9825 if (mpt->m_reply) 9826 kmem_free(mpt->m_reply, MPI_ADDRESS_COALSCE_MAX * 9827 sizeof (Mpi2ReplyDescriptorsUnion_t)); 9828 9829 size = active->m_size; 9830 kmem_free(active, size); 9831 mpt->m_active = NULL; 9832 } 9833 9834 /* 9835 * Error logging, printing, and debug print routines. 9836 */ 9837 static char *mptsas_label = "mpt_sas"; 9838 9839 /*PRINTFLIKE3*/ 9840 void 9841 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...) 9842 { 9843 dev_info_t *dev; 9844 va_list ap; 9845 9846 if (mpt) { 9847 dev = mpt->m_dip; 9848 } else { 9849 dev = 0; 9850 } 9851 9852 mutex_enter(&mptsas_log_mutex); 9853 9854 va_start(ap, fmt); 9855 (void) vsprintf(mptsas_log_buf, fmt, ap); 9856 va_end(ap); 9857 9858 if (level == CE_CONT) { 9859 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf); 9860 } else { 9861 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf); 9862 } 9863 9864 mutex_exit(&mptsas_log_mutex); 9865 } 9866 9867 #ifdef MPTSAS_DEBUG 9868 /*PRINTFLIKE1*/ 9869 void 9870 mptsas_printf(char *fmt, ...) 9871 { 9872 dev_info_t *dev = 0; 9873 va_list ap; 9874 9875 mutex_enter(&mptsas_log_mutex); 9876 9877 va_start(ap, fmt); 9878 (void) vsprintf(mptsas_log_buf, fmt, ap); 9879 va_end(ap); 9880 9881 #ifdef PROM_PRINTF 9882 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf); 9883 #else 9884 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf); 9885 #endif 9886 mutex_exit(&mptsas_log_mutex); 9887 } 9888 #endif 9889 9890 /* 9891 * timeout handling 9892 */ 9893 static void 9894 mptsas_watch(void *arg) 9895 { 9896 #ifndef __lock_lint 9897 _NOTE(ARGUNUSED(arg)) 9898 #endif 9899 9900 mptsas_t *mpt; 9901 uint32_t doorbell; 9902 9903 NDBG30(("mptsas_watch")); 9904 9905 rw_enter(&mptsas_global_rwlock, RW_READER); 9906 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) { 9907 9908 mutex_enter(&mpt->m_mutex); 9909 9910 /* Skip device if not powered on */ 9911 if (mpt->m_options & MPTSAS_OPT_PM) { 9912 if (mpt->m_power_level == PM_LEVEL_D0) { 9913 (void) pm_busy_component(mpt->m_dip, 0); 9914 mpt->m_busy = 1; 9915 } else { 9916 mutex_exit(&mpt->m_mutex); 9917 continue; 9918 } 9919 } 9920 9921 /* 9922 * Check if controller is in a FAULT state. If so, reset it. 9923 */ 9924 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell); 9925 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 9926 doorbell &= MPI2_DOORBELL_DATA_MASK; 9927 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, " 9928 "code: %04x", doorbell); 9929 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET; 9930 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) { 9931 mptsas_log(mpt, CE_WARN, "Reset failed" 9932 "after fault was detected"); 9933 } 9934 } 9935 9936 /* 9937 * For now, always call mptsas_watchsubr. 9938 */ 9939 mptsas_watchsubr(mpt); 9940 9941 if (mpt->m_options & MPTSAS_OPT_PM) { 9942 mpt->m_busy = 0; 9943 (void) pm_idle_component(mpt->m_dip, 0); 9944 } 9945 9946 mutex_exit(&mpt->m_mutex); 9947 } 9948 rw_exit(&mptsas_global_rwlock); 9949 9950 mutex_enter(&mptsas_global_mutex); 9951 if (mptsas_timeouts_enabled) 9952 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick); 9953 mutex_exit(&mptsas_global_mutex); 9954 } 9955 9956 static void 9957 mptsas_watchsubr(mptsas_t *mpt) 9958 { 9959 int i; 9960 mptsas_cmd_t *cmd; 9961 mptsas_target_t *ptgt = NULL; 9962 9963 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt)); 9964 9965 #ifdef MPTSAS_TEST 9966 if (mptsas_enable_untagged) { 9967 mptsas_test_untagged++; 9968 } 9969 #endif 9970 9971 /* 9972 * Check for commands stuck in active slot 9973 * Account for TM requests, which use the last SMID. 9974 */ 9975 mutex_enter(&mpt->m_intr_mutex); 9976 for (i = 0; i <= mpt->m_active->m_n_slots; i++) { 9977 if ((cmd = mpt->m_active->m_slot[i]) != NULL) { 9978 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) { 9979 cmd->cmd_active_timeout -= 9980 mptsas_scsi_watchdog_tick; 9981 if (cmd->cmd_active_timeout <= 0) { 9982 /* 9983 * There seems to be a command stuck 9984 * in the active slot. Drain throttle. 9985 */ 9986 ptgt = cmd->cmd_tgt_addr; 9987 mutex_enter(&ptgt->m_tgt_intr_mutex); 9988 mptsas_set_throttle(mpt, ptgt, 9989 DRAIN_THROTTLE); 9990 mutex_exit(&ptgt->m_tgt_intr_mutex); 9991 } 9992 } 9993 if ((cmd->cmd_flags & CFLAG_PASSTHRU) || 9994 (cmd->cmd_flags & CFLAG_CONFIG) || 9995 (cmd->cmd_flags & CFLAG_FW_DIAG)) { 9996 cmd->cmd_active_timeout -= 9997 mptsas_scsi_watchdog_tick; 9998 if (cmd->cmd_active_timeout <= 0) { 9999 /* 10000 * passthrough command timeout 10001 */ 10002 cmd->cmd_flags |= (CFLAG_FINISHED | 10003 CFLAG_TIMEOUT); 10004 cv_broadcast(&mpt->m_passthru_cv); 10005 cv_broadcast(&mpt->m_config_cv); 10006 cv_broadcast(&mpt->m_fw_diag_cv); 10007 } 10008 } 10009 } 10010 } 10011 mutex_exit(&mpt->m_intr_mutex); 10012 10013 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 10014 MPTSAS_HASH_FIRST); 10015 while (ptgt != NULL) { 10016 /* 10017 * In order to avoid using m_mutex in the key code path in ISR, 10018 * separate mutexs are introduced to protect those elements 10019 * shown in ISR. 10020 */ 10021 mutex_enter(&ptgt->m_tgt_intr_mutex); 10022 10023 /* 10024 * If we were draining due to a qfull condition, 10025 * go back to full throttle. 10026 */ 10027 if ((ptgt->m_t_throttle < MAX_THROTTLE) && 10028 (ptgt->m_t_throttle > HOLD_THROTTLE) && 10029 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) { 10030 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE); 10031 mptsas_restart_hba(mpt); 10032 } 10033 10034 if ((ptgt->m_t_ncmds > 0) && 10035 (ptgt->m_timebase)) { 10036 10037 if (ptgt->m_timebase <= 10038 mptsas_scsi_watchdog_tick) { 10039 ptgt->m_timebase += 10040 mptsas_scsi_watchdog_tick; 10041 mutex_exit(&ptgt->m_tgt_intr_mutex); 10042 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 10043 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 10044 continue; 10045 } 10046 10047 ptgt->m_timeout -= mptsas_scsi_watchdog_tick; 10048 10049 if (ptgt->m_timeout < 0) { 10050 mutex_exit(&ptgt->m_tgt_intr_mutex); 10051 mptsas_cmd_timeout(mpt, ptgt->m_devhdl); 10052 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 10053 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 10054 continue; 10055 } 10056 10057 if ((ptgt->m_timeout) <= 10058 mptsas_scsi_watchdog_tick) { 10059 NDBG23(("pending timeout")); 10060 mptsas_set_throttle(mpt, ptgt, 10061 DRAIN_THROTTLE); 10062 } 10063 } 10064 mutex_exit(&ptgt->m_tgt_intr_mutex); 10065 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 10066 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 10067 } 10068 } 10069 10070 /* 10071 * timeout recovery 10072 */ 10073 static void 10074 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl) 10075 { 10076 10077 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl)); 10078 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for " 10079 "Target %d", devhdl); 10080 10081 /* 10082 * If the current target is not the target passed in, 10083 * try to reset that target. 10084 */ 10085 NDBG29(("mptsas_cmd_timeout: device reset")); 10086 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) { 10087 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout " 10088 "recovery failed!", devhdl); 10089 } 10090 } 10091 10092 /* 10093 * Device / Hotplug control 10094 */ 10095 static int 10096 mptsas_scsi_quiesce(dev_info_t *dip) 10097 { 10098 mptsas_t *mpt; 10099 scsi_hba_tran_t *tran; 10100 10101 tran = ddi_get_driver_private(dip); 10102 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL) 10103 return (-1); 10104 10105 return (mptsas_quiesce_bus(mpt)); 10106 } 10107 10108 static int 10109 mptsas_scsi_unquiesce(dev_info_t *dip) 10110 { 10111 mptsas_t *mpt; 10112 scsi_hba_tran_t *tran; 10113 10114 tran = ddi_get_driver_private(dip); 10115 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL) 10116 return (-1); 10117 10118 return (mptsas_unquiesce_bus(mpt)); 10119 } 10120 10121 static int 10122 mptsas_quiesce_bus(mptsas_t *mpt) 10123 { 10124 mptsas_target_t *ptgt = NULL; 10125 10126 NDBG28(("mptsas_quiesce_bus")); 10127 mutex_enter(&mpt->m_mutex); 10128 10129 /* Set all the throttles to zero */ 10130 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 10131 MPTSAS_HASH_FIRST); 10132 while (ptgt != NULL) { 10133 mutex_enter(&ptgt->m_tgt_intr_mutex); 10134 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE); 10135 mutex_exit(&ptgt->m_tgt_intr_mutex); 10136 10137 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 10138 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 10139 } 10140 10141 /* If there are any outstanding commands in the queue */ 10142 mutex_enter(&mpt->m_intr_mutex); 10143 if (mptsas_outstanding_cmds_n(mpt)) { 10144 mutex_exit(&mpt->m_intr_mutex); 10145 mpt->m_softstate |= MPTSAS_SS_DRAINING; 10146 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain, 10147 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000))); 10148 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) { 10149 /* 10150 * Quiesce has been interrupted 10151 */ 10152 mpt->m_softstate &= ~MPTSAS_SS_DRAINING; 10153 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 10154 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST); 10155 while (ptgt != NULL) { 10156 mutex_enter(&ptgt->m_tgt_intr_mutex); 10157 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE); 10158 mutex_exit(&ptgt->m_tgt_intr_mutex); 10159 10160 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 10161 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 10162 } 10163 mptsas_restart_hba(mpt); 10164 if (mpt->m_quiesce_timeid != 0) { 10165 timeout_id_t tid = mpt->m_quiesce_timeid; 10166 mpt->m_quiesce_timeid = 0; 10167 mutex_exit(&mpt->m_mutex); 10168 (void) untimeout(tid); 10169 return (-1); 10170 } 10171 mutex_exit(&mpt->m_mutex); 10172 return (-1); 10173 } else { 10174 /* Bus has been quiesced */ 10175 ASSERT(mpt->m_quiesce_timeid == 0); 10176 mpt->m_softstate &= ~MPTSAS_SS_DRAINING; 10177 mpt->m_softstate |= MPTSAS_SS_QUIESCED; 10178 mutex_exit(&mpt->m_mutex); 10179 return (0); 10180 } 10181 } 10182 mutex_exit(&mpt->m_intr_mutex); 10183 /* Bus was not busy - QUIESCED */ 10184 mutex_exit(&mpt->m_mutex); 10185 10186 return (0); 10187 } 10188 10189 static int 10190 mptsas_unquiesce_bus(mptsas_t *mpt) 10191 { 10192 mptsas_target_t *ptgt = NULL; 10193 10194 NDBG28(("mptsas_unquiesce_bus")); 10195 mutex_enter(&mpt->m_mutex); 10196 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED; 10197 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 10198 MPTSAS_HASH_FIRST); 10199 while (ptgt != NULL) { 10200 mutex_enter(&ptgt->m_tgt_intr_mutex); 10201 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE); 10202 mutex_exit(&ptgt->m_tgt_intr_mutex); 10203 10204 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 10205 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 10206 } 10207 mptsas_restart_hba(mpt); 10208 mutex_exit(&mpt->m_mutex); 10209 return (0); 10210 } 10211 10212 static void 10213 mptsas_ncmds_checkdrain(void *arg) 10214 { 10215 mptsas_t *mpt = arg; 10216 mptsas_target_t *ptgt = NULL; 10217 10218 mutex_enter(&mpt->m_mutex); 10219 if (mpt->m_softstate & MPTSAS_SS_DRAINING) { 10220 mpt->m_quiesce_timeid = 0; 10221 mutex_enter(&mpt->m_intr_mutex); 10222 if (mptsas_outstanding_cmds_n(mpt)) { 10223 mutex_exit(&mpt->m_intr_mutex); 10224 /* 10225 * The throttle may have been reset because 10226 * of a SCSI bus reset 10227 */ 10228 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 10229 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST); 10230 while (ptgt != NULL) { 10231 mutex_enter(&ptgt->m_tgt_intr_mutex); 10232 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE); 10233 mutex_exit(&ptgt->m_tgt_intr_mutex); 10234 10235 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 10236 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 10237 } 10238 10239 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain, 10240 mpt, (MPTSAS_QUIESCE_TIMEOUT * 10241 drv_usectohz(1000000))); 10242 } else { 10243 mutex_exit(&mpt->m_intr_mutex); 10244 /* Command queue has been drained */ 10245 cv_signal(&mpt->m_cv); 10246 } 10247 } 10248 mutex_exit(&mpt->m_mutex); 10249 } 10250 10251 /*ARGSUSED*/ 10252 static void 10253 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd) 10254 { 10255 int i; 10256 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp; 10257 char buf[128]; 10258 10259 buf[0] = '\0'; 10260 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd, 10261 Tgt(cmd), Lun(cmd))); 10262 (void) sprintf(&buf[0], "\tcdb=["); 10263 for (i = 0; i < (int)cmd->cmd_cdblen; i++) { 10264 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++); 10265 } 10266 (void) sprintf(&buf[strlen(buf)], " ]"); 10267 NDBG25(("?%s\n", buf)); 10268 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n", 10269 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics, 10270 cmd->cmd_pkt->pkt_state)); 10271 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ? 10272 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags)); 10273 } 10274 10275 static void 10276 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd) 10277 { 10278 caddr_t memp; 10279 pMPI2RequestHeader_t request_hdrp; 10280 struct scsi_pkt *pkt = cmd->cmd_pkt; 10281 mptsas_pt_request_t *pt = pkt->pkt_ha_private; 10282 uint32_t request_size, data_size, dataout_size; 10283 uint32_t direction; 10284 ddi_dma_cookie_t data_cookie; 10285 ddi_dma_cookie_t dataout_cookie; 10286 uint32_t request_desc_low, request_desc_high = 0; 10287 uint32_t i, sense_bufp; 10288 uint8_t desc_type; 10289 uint8_t *request, function; 10290 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl; 10291 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl; 10292 10293 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 10294 10295 request = pt->request; 10296 direction = pt->direction; 10297 request_size = pt->request_size; 10298 data_size = pt->data_size; 10299 dataout_size = pt->dataout_size; 10300 data_cookie = pt->data_cookie; 10301 dataout_cookie = pt->dataout_cookie; 10302 10303 /* 10304 * Store the passthrough message in memory location 10305 * corresponding to our slot number 10306 */ 10307 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot); 10308 request_hdrp = (pMPI2RequestHeader_t)memp; 10309 bzero(memp, mpt->m_req_frame_size); 10310 10311 for (i = 0; i < request_size; i++) { 10312 bcopy(request + i, memp + i, 1); 10313 } 10314 10315 if (data_size || dataout_size) { 10316 pMpi2SGESimple64_t sgep; 10317 uint32_t sge_flags; 10318 10319 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp + 10320 request_size); 10321 if (dataout_size) { 10322 10323 sge_flags = dataout_size | 10324 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 10325 MPI2_SGE_FLAGS_END_OF_BUFFER | 10326 MPI2_SGE_FLAGS_HOST_TO_IOC | 10327 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 10328 MPI2_SGE_FLAGS_SHIFT); 10329 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags); 10330 ddi_put32(acc_hdl, &sgep->Address.Low, 10331 (uint32_t)(dataout_cookie.dmac_laddress & 10332 0xffffffffull)); 10333 ddi_put32(acc_hdl, &sgep->Address.High, 10334 (uint32_t)(dataout_cookie.dmac_laddress 10335 >> 32)); 10336 sgep++; 10337 } 10338 sge_flags = data_size; 10339 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 10340 MPI2_SGE_FLAGS_LAST_ELEMENT | 10341 MPI2_SGE_FLAGS_END_OF_BUFFER | 10342 MPI2_SGE_FLAGS_END_OF_LIST | 10343 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 10344 MPI2_SGE_FLAGS_SHIFT); 10345 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) { 10346 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 10347 MPI2_SGE_FLAGS_SHIFT); 10348 } else { 10349 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 10350 MPI2_SGE_FLAGS_SHIFT); 10351 } 10352 ddi_put32(acc_hdl, &sgep->FlagsLength, 10353 sge_flags); 10354 ddi_put32(acc_hdl, &sgep->Address.Low, 10355 (uint32_t)(data_cookie.dmac_laddress & 10356 0xffffffffull)); 10357 ddi_put32(acc_hdl, &sgep->Address.High, 10358 (uint32_t)(data_cookie.dmac_laddress >> 32)); 10359 } 10360 10361 function = request_hdrp->Function; 10362 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) || 10363 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 10364 pMpi2SCSIIORequest_t scsi_io_req; 10365 10366 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp; 10367 /* 10368 * Put SGE for data and data_out buffer at the end of 10369 * scsi_io_request message header.(64 bytes in total) 10370 * Following above SGEs, the residual space will be 10371 * used by sense data. 10372 */ 10373 ddi_put8(acc_hdl, 10374 &scsi_io_req->SenseBufferLength, 10375 (uint8_t)(request_size - 64)); 10376 10377 sense_bufp = mpt->m_req_frame_dma_addr + 10378 (mpt->m_req_frame_size * cmd->cmd_slot); 10379 sense_bufp += 64; 10380 ddi_put32(acc_hdl, 10381 &scsi_io_req->SenseBufferLowAddress, sense_bufp); 10382 10383 /* 10384 * Set SGLOffset0 value 10385 */ 10386 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0, 10387 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4); 10388 10389 /* 10390 * Setup descriptor info. RAID passthrough must use the 10391 * default request descriptor which is already set, so if this 10392 * is a SCSI IO request, change the descriptor to SCSI IO. 10393 */ 10394 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) { 10395 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 10396 request_desc_high = (ddi_get16(acc_hdl, 10397 &scsi_io_req->DevHandle) << 16); 10398 } 10399 } 10400 10401 /* 10402 * We must wait till the message has been completed before 10403 * beginning the next message so we wait for this one to 10404 * finish. 10405 */ 10406 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV); 10407 request_desc_low = (cmd->cmd_slot << 16) + desc_type; 10408 cmd->cmd_rfm = NULL; 10409 mpt->m_active->m_slot[cmd->cmd_slot] = cmd; 10410 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high); 10411 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) || 10412 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) { 10413 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 10414 } 10415 } 10416 10417 10418 10419 static int 10420 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply, 10421 uint8_t *data, uint32_t request_size, uint32_t reply_size, 10422 uint32_t data_size, uint32_t direction, uint8_t *dataout, 10423 uint32_t dataout_size, short timeout, int mode) 10424 { 10425 mptsas_pt_request_t pt; 10426 mptsas_dma_alloc_state_t data_dma_state; 10427 mptsas_dma_alloc_state_t dataout_dma_state; 10428 caddr_t memp; 10429 mptsas_cmd_t *cmd = NULL; 10430 struct scsi_pkt *pkt; 10431 uint32_t reply_len = 0, sense_len = 0; 10432 pMPI2RequestHeader_t request_hdrp; 10433 pMPI2RequestHeader_t request_msg; 10434 pMPI2DefaultReply_t reply_msg; 10435 Mpi2SCSIIOReply_t rep_msg; 10436 int i, status = 0, pt_flags = 0, rv = 0; 10437 int rvalue; 10438 uint8_t function; 10439 10440 ASSERT(mutex_owned(&mpt->m_mutex)); 10441 10442 reply_msg = (pMPI2DefaultReply_t)(&rep_msg); 10443 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY)); 10444 request_msg = kmem_zalloc(request_size, KM_SLEEP); 10445 10446 mutex_exit(&mpt->m_mutex); 10447 /* 10448 * copy in the request buffer since it could be used by 10449 * another thread when the pt request into waitq 10450 */ 10451 if (ddi_copyin(request, request_msg, request_size, mode)) { 10452 mutex_enter(&mpt->m_mutex); 10453 status = EFAULT; 10454 mptsas_log(mpt, CE_WARN, "failed to copy request data"); 10455 goto out; 10456 } 10457 mutex_enter(&mpt->m_mutex); 10458 10459 function = request_msg->Function; 10460 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 10461 pMpi2SCSITaskManagementRequest_t task; 10462 task = (pMpi2SCSITaskManagementRequest_t)request_msg; 10463 mptsas_setup_bus_reset_delay(mpt); 10464 rv = mptsas_ioc_task_management(mpt, task->TaskType, 10465 task->DevHandle, (int)task->LUN[1], reply, reply_size, 10466 mode); 10467 10468 if (rv != TRUE) { 10469 status = EIO; 10470 mptsas_log(mpt, CE_WARN, "task management failed"); 10471 } 10472 goto out; 10473 } 10474 10475 if (data_size != 0) { 10476 data_dma_state.size = data_size; 10477 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) { 10478 status = ENOMEM; 10479 mptsas_log(mpt, CE_WARN, "failed to alloc DMA " 10480 "resource"); 10481 goto out; 10482 } 10483 pt_flags |= MPTSAS_DATA_ALLOCATED; 10484 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) { 10485 mutex_exit(&mpt->m_mutex); 10486 for (i = 0; i < data_size; i++) { 10487 if (ddi_copyin(data + i, (uint8_t *) 10488 data_dma_state.memp + i, 1, mode)) { 10489 mutex_enter(&mpt->m_mutex); 10490 status = EFAULT; 10491 mptsas_log(mpt, CE_WARN, "failed to " 10492 "copy read data"); 10493 goto out; 10494 } 10495 } 10496 mutex_enter(&mpt->m_mutex); 10497 } 10498 } 10499 10500 if (dataout_size != 0) { 10501 dataout_dma_state.size = dataout_size; 10502 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) { 10503 status = ENOMEM; 10504 mptsas_log(mpt, CE_WARN, "failed to alloc DMA " 10505 "resource"); 10506 goto out; 10507 } 10508 pt_flags |= MPTSAS_DATAOUT_ALLOCATED; 10509 mutex_exit(&mpt->m_mutex); 10510 for (i = 0; i < dataout_size; i++) { 10511 if (ddi_copyin(dataout + i, (uint8_t *) 10512 dataout_dma_state.memp + i, 1, mode)) { 10513 mutex_enter(&mpt->m_mutex); 10514 mptsas_log(mpt, CE_WARN, "failed to copy out" 10515 " data"); 10516 status = EFAULT; 10517 goto out; 10518 } 10519 } 10520 mutex_enter(&mpt->m_mutex); 10521 } 10522 10523 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) { 10524 status = EAGAIN; 10525 mptsas_log(mpt, CE_NOTE, "event ack command pool is full"); 10526 goto out; 10527 } 10528 pt_flags |= MPTSAS_REQUEST_POOL_CMD; 10529 10530 bzero((caddr_t)cmd, sizeof (*cmd)); 10531 bzero((caddr_t)pkt, scsi_pkt_size()); 10532 bzero((caddr_t)&pt, sizeof (pt)); 10533 10534 cmd->ioc_cmd_slot = (uint32_t)(rvalue); 10535 10536 pt.request = (uint8_t *)request_msg; 10537 pt.direction = direction; 10538 pt.request_size = request_size; 10539 pt.data_size = data_size; 10540 pt.dataout_size = dataout_size; 10541 pt.data_cookie = data_dma_state.cookie; 10542 pt.dataout_cookie = dataout_dma_state.cookie; 10543 10544 /* 10545 * Form a blank cmd/pkt to store the acknowledgement message 10546 */ 10547 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0]; 10548 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb; 10549 pkt->pkt_ha_private = (opaque_t)&pt; 10550 pkt->pkt_flags = FLAG_HEAD; 10551 pkt->pkt_time = timeout; 10552 cmd->cmd_pkt = pkt; 10553 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU; 10554 10555 /* 10556 * Save the command in a slot 10557 */ 10558 if (mptsas_save_cmd(mpt, cmd) == TRUE) { 10559 /* 10560 * Once passthru command get slot, set cmd_flags 10561 * CFLAG_PREPARED. 10562 */ 10563 cmd->cmd_flags |= CFLAG_PREPARED; 10564 mptsas_start_passthru(mpt, cmd); 10565 } else { 10566 mptsas_waitq_add(mpt, cmd); 10567 } 10568 10569 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) { 10570 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex); 10571 } 10572 10573 if (cmd->cmd_flags & CFLAG_PREPARED) { 10574 memp = mpt->m_req_frame + (mpt->m_req_frame_size * 10575 cmd->cmd_slot); 10576 request_hdrp = (pMPI2RequestHeader_t)memp; 10577 } 10578 10579 if (cmd->cmd_flags & CFLAG_TIMEOUT) { 10580 status = ETIMEDOUT; 10581 mptsas_log(mpt, CE_WARN, "passthrough command timeout"); 10582 pt_flags |= MPTSAS_CMD_TIMEOUT; 10583 goto out; 10584 } 10585 10586 if (cmd->cmd_rfm) { 10587 /* 10588 * cmd_rfm is zero means the command reply is a CONTEXT 10589 * reply and no PCI Write to post the free reply SMFA 10590 * because no reply message frame is used. 10591 * cmd_rfm is non-zero means the reply is a ADDRESS 10592 * reply and reply message frame is used. 10593 */ 10594 pt_flags |= MPTSAS_ADDRESS_REPLY; 10595 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0, 10596 DDI_DMA_SYNC_FORCPU); 10597 reply_msg = (pMPI2DefaultReply_t) 10598 (mpt->m_reply_frame + (cmd->cmd_rfm - 10599 mpt->m_reply_frame_dma_addr)); 10600 } 10601 10602 mptsas_fma_check(mpt, cmd); 10603 if (pkt->pkt_reason == CMD_TRAN_ERR) { 10604 status = EAGAIN; 10605 mptsas_log(mpt, CE_WARN, "passthru fma error"); 10606 goto out; 10607 } 10608 if (pkt->pkt_reason == CMD_RESET) { 10609 status = EAGAIN; 10610 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru"); 10611 goto out; 10612 } 10613 10614 if (pkt->pkt_reason == CMD_INCOMPLETE) { 10615 status = EIO; 10616 mptsas_log(mpt, CE_WARN, "passthrough command incomplete"); 10617 goto out; 10618 } 10619 10620 mutex_exit(&mpt->m_mutex); 10621 if (cmd->cmd_flags & CFLAG_PREPARED) { 10622 function = request_hdrp->Function; 10623 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) || 10624 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 10625 reply_len = sizeof (MPI2_SCSI_IO_REPLY); 10626 sense_len = reply_size - reply_len; 10627 } else { 10628 reply_len = reply_size; 10629 sense_len = 0; 10630 } 10631 10632 for (i = 0; i < reply_len; i++) { 10633 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1, 10634 mode)) { 10635 mutex_enter(&mpt->m_mutex); 10636 status = EFAULT; 10637 mptsas_log(mpt, CE_WARN, "failed to copy out " 10638 "reply data"); 10639 goto out; 10640 } 10641 } 10642 for (i = 0; i < sense_len; i++) { 10643 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i, 10644 reply + reply_len + i, 1, mode)) { 10645 mutex_enter(&mpt->m_mutex); 10646 status = EFAULT; 10647 mptsas_log(mpt, CE_WARN, "failed to copy out " 10648 "sense data"); 10649 goto out; 10650 } 10651 } 10652 } 10653 10654 if (data_size) { 10655 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) { 10656 (void) ddi_dma_sync(data_dma_state.handle, 0, 0, 10657 DDI_DMA_SYNC_FORCPU); 10658 for (i = 0; i < data_size; i++) { 10659 if (ddi_copyout((uint8_t *)( 10660 data_dma_state.memp + i), data + i, 1, 10661 mode)) { 10662 mutex_enter(&mpt->m_mutex); 10663 status = EFAULT; 10664 mptsas_log(mpt, CE_WARN, "failed to " 10665 "copy out the reply data"); 10666 goto out; 10667 } 10668 } 10669 } 10670 } 10671 mutex_enter(&mpt->m_mutex); 10672 out: 10673 /* 10674 * Put the reply frame back on the free queue, increment the free 10675 * index, and write the new index to the free index register. But only 10676 * if this reply is an ADDRESS reply. 10677 */ 10678 if (pt_flags & MPTSAS_ADDRESS_REPLY) { 10679 ddi_put32(mpt->m_acc_free_queue_hdl, 10680 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], 10681 cmd->cmd_rfm); 10682 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0, 10683 DDI_DMA_SYNC_FORDEV); 10684 if (++mpt->m_free_index == mpt->m_free_queue_depth) { 10685 mpt->m_free_index = 0; 10686 } 10687 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, 10688 mpt->m_free_index); 10689 } 10690 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) { 10691 mptsas_remove_cmd(mpt, cmd); 10692 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD); 10693 } 10694 if (pt_flags & MPTSAS_REQUEST_POOL_CMD) 10695 mptsas_return_to_pool(mpt, cmd); 10696 if (pt_flags & MPTSAS_DATA_ALLOCATED) { 10697 if (mptsas_check_dma_handle(data_dma_state.handle) != 10698 DDI_SUCCESS) { 10699 ddi_fm_service_impact(mpt->m_dip, 10700 DDI_SERVICE_UNAFFECTED); 10701 status = EFAULT; 10702 } 10703 mptsas_dma_free(&data_dma_state); 10704 } 10705 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) { 10706 if (mptsas_check_dma_handle(dataout_dma_state.handle) != 10707 DDI_SUCCESS) { 10708 ddi_fm_service_impact(mpt->m_dip, 10709 DDI_SERVICE_UNAFFECTED); 10710 status = EFAULT; 10711 } 10712 mptsas_dma_free(&dataout_dma_state); 10713 } 10714 if (pt_flags & MPTSAS_CMD_TIMEOUT) { 10715 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) { 10716 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed"); 10717 } 10718 } 10719 if (request_msg) 10720 kmem_free(request_msg, request_size); 10721 10722 return (status); 10723 } 10724 10725 static int 10726 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode) 10727 { 10728 /* 10729 * If timeout is 0, set timeout to default of 60 seconds. 10730 */ 10731 if (data->Timeout == 0) { 10732 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT; 10733 } 10734 10735 if (((data->DataSize == 0) && 10736 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) || 10737 ((data->DataSize != 0) && 10738 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) || 10739 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) || 10740 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) && 10741 (data->DataOutSize != 0))))) { 10742 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) { 10743 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ; 10744 } else { 10745 data->DataOutSize = 0; 10746 } 10747 /* 10748 * Send passthru request messages 10749 */ 10750 return (mptsas_do_passthru(mpt, 10751 (uint8_t *)((uintptr_t)data->PtrRequest), 10752 (uint8_t *)((uintptr_t)data->PtrReply), 10753 (uint8_t *)((uintptr_t)data->PtrData), 10754 data->RequestSize, data->ReplySize, 10755 data->DataSize, data->DataDirection, 10756 (uint8_t *)((uintptr_t)data->PtrDataOut), 10757 data->DataOutSize, data->Timeout, mode)); 10758 } else { 10759 return (EINVAL); 10760 } 10761 } 10762 10763 static uint8_t 10764 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id) 10765 { 10766 uint8_t index; 10767 10768 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) { 10769 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) { 10770 return (index); 10771 } 10772 } 10773 10774 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND); 10775 } 10776 10777 static void 10778 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd) 10779 { 10780 pMpi2DiagBufferPostRequest_t pDiag_post_msg; 10781 pMpi2DiagReleaseRequest_t pDiag_release_msg; 10782 struct scsi_pkt *pkt = cmd->cmd_pkt; 10783 mptsas_diag_request_t *diag = pkt->pkt_ha_private; 10784 uint32_t request_desc_low, i; 10785 10786 ASSERT(mutex_owned(&mpt->m_mutex)); 10787 10788 /* 10789 * Form the diag message depending on the post or release function. 10790 */ 10791 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) { 10792 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t) 10793 (mpt->m_req_frame + (mpt->m_req_frame_size * 10794 cmd->cmd_slot)); 10795 bzero(pDiag_post_msg, mpt->m_req_frame_size); 10796 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function, 10797 diag->function); 10798 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType, 10799 diag->pBuffer->buffer_type); 10800 ddi_put8(mpt->m_acc_req_frame_hdl, 10801 &pDiag_post_msg->ExtendedType, 10802 diag->pBuffer->extended_type); 10803 ddi_put32(mpt->m_acc_req_frame_hdl, 10804 &pDiag_post_msg->BufferLength, 10805 diag->pBuffer->buffer_data.size); 10806 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4); 10807 i++) { 10808 ddi_put32(mpt->m_acc_req_frame_hdl, 10809 &pDiag_post_msg->ProductSpecific[i], 10810 diag->pBuffer->product_specific[i]); 10811 } 10812 ddi_put32(mpt->m_acc_req_frame_hdl, 10813 &pDiag_post_msg->BufferAddress.Low, 10814 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress 10815 & 0xffffffffull)); 10816 ddi_put32(mpt->m_acc_req_frame_hdl, 10817 &pDiag_post_msg->BufferAddress.High, 10818 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress 10819 >> 32)); 10820 } else { 10821 pDiag_release_msg = (pMpi2DiagReleaseRequest_t) 10822 (mpt->m_req_frame + (mpt->m_req_frame_size * 10823 cmd->cmd_slot)); 10824 bzero(pDiag_release_msg, mpt->m_req_frame_size); 10825 ddi_put8(mpt->m_acc_req_frame_hdl, 10826 &pDiag_release_msg->Function, diag->function); 10827 ddi_put8(mpt->m_acc_req_frame_hdl, 10828 &pDiag_release_msg->BufferType, 10829 diag->pBuffer->buffer_type); 10830 } 10831 10832 /* 10833 * Send the message 10834 */ 10835 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0, 10836 DDI_DMA_SYNC_FORDEV); 10837 request_desc_low = (cmd->cmd_slot << 16) + 10838 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 10839 cmd->cmd_rfm = NULL; 10840 mpt->m_active->m_slot[cmd->cmd_slot] = cmd; 10841 MPTSAS_START_CMD(mpt, request_desc_low, 0); 10842 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) != 10843 DDI_SUCCESS) || 10844 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) != 10845 DDI_SUCCESS)) { 10846 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 10847 } 10848 } 10849 10850 static int 10851 mptsas_post_fw_diag_buffer(mptsas_t *mpt, 10852 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code) 10853 { 10854 mptsas_diag_request_t diag; 10855 int status, slot_num, post_flags = 0; 10856 mptsas_cmd_t *cmd = NULL; 10857 struct scsi_pkt *pkt; 10858 pMpi2DiagBufferPostReply_t reply; 10859 uint16_t iocstatus; 10860 uint32_t iocloginfo, transfer_length; 10861 10862 /* 10863 * If buffer is not enabled, just leave. 10864 */ 10865 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED; 10866 if (!pBuffer->enabled) { 10867 status = DDI_FAILURE; 10868 goto out; 10869 } 10870 10871 /* 10872 * Clear some flags initially. 10873 */ 10874 pBuffer->force_release = FALSE; 10875 pBuffer->valid_data = FALSE; 10876 pBuffer->owned_by_firmware = FALSE; 10877 10878 /* 10879 * Get a cmd buffer from the cmd buffer pool 10880 */ 10881 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) { 10882 status = DDI_FAILURE; 10883 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag"); 10884 goto out; 10885 } 10886 post_flags |= MPTSAS_REQUEST_POOL_CMD; 10887 10888 bzero((caddr_t)cmd, sizeof (*cmd)); 10889 bzero((caddr_t)pkt, scsi_pkt_size()); 10890 10891 cmd->ioc_cmd_slot = (uint32_t)(slot_num); 10892 10893 diag.pBuffer = pBuffer; 10894 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST; 10895 10896 /* 10897 * Form a blank cmd/pkt to store the acknowledgement message 10898 */ 10899 pkt->pkt_ha_private = (opaque_t)&diag; 10900 pkt->pkt_flags = FLAG_HEAD; 10901 pkt->pkt_time = 60; 10902 cmd->cmd_pkt = pkt; 10903 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG; 10904 10905 /* 10906 * Save the command in a slot 10907 */ 10908 if (mptsas_save_cmd(mpt, cmd) == TRUE) { 10909 /* 10910 * Once passthru command get slot, set cmd_flags 10911 * CFLAG_PREPARED. 10912 */ 10913 cmd->cmd_flags |= CFLAG_PREPARED; 10914 mptsas_start_diag(mpt, cmd); 10915 } else { 10916 mptsas_waitq_add(mpt, cmd); 10917 } 10918 10919 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) { 10920 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex); 10921 } 10922 10923 if (cmd->cmd_flags & CFLAG_TIMEOUT) { 10924 status = DDI_FAILURE; 10925 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout"); 10926 goto out; 10927 } 10928 10929 /* 10930 * cmd_rfm points to the reply message if a reply was given. Check the 10931 * IOCStatus to make sure everything went OK with the FW diag request 10932 * and set buffer flags. 10933 */ 10934 if (cmd->cmd_rfm) { 10935 post_flags |= MPTSAS_ADDRESS_REPLY; 10936 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0, 10937 DDI_DMA_SYNC_FORCPU); 10938 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame + 10939 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr)); 10940 10941 /* 10942 * Get the reply message data 10943 */ 10944 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl, 10945 &reply->IOCStatus); 10946 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl, 10947 &reply->IOCLogInfo); 10948 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl, 10949 &reply->TransferLength); 10950 10951 /* 10952 * If post failed quit. 10953 */ 10954 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) { 10955 status = DDI_FAILURE; 10956 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, " 10957 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus, 10958 iocloginfo, transfer_length)); 10959 goto out; 10960 } 10961 10962 /* 10963 * Post was successful. 10964 */ 10965 pBuffer->valid_data = TRUE; 10966 pBuffer->owned_by_firmware = TRUE; 10967 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS; 10968 status = DDI_SUCCESS; 10969 } 10970 10971 out: 10972 /* 10973 * Put the reply frame back on the free queue, increment the free 10974 * index, and write the new index to the free index register. But only 10975 * if this reply is an ADDRESS reply. 10976 */ 10977 if (post_flags & MPTSAS_ADDRESS_REPLY) { 10978 ddi_put32(mpt->m_acc_free_queue_hdl, 10979 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], 10980 cmd->cmd_rfm); 10981 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0, 10982 DDI_DMA_SYNC_FORDEV); 10983 if (++mpt->m_free_index == mpt->m_free_queue_depth) { 10984 mpt->m_free_index = 0; 10985 } 10986 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, 10987 mpt->m_free_index); 10988 } 10989 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) { 10990 mptsas_remove_cmd(mpt, cmd); 10991 post_flags &= (~MPTSAS_REQUEST_POOL_CMD); 10992 } 10993 if (post_flags & MPTSAS_REQUEST_POOL_CMD) { 10994 mptsas_return_to_pool(mpt, cmd); 10995 } 10996 10997 return (status); 10998 } 10999 11000 static int 11001 mptsas_release_fw_diag_buffer(mptsas_t *mpt, 11002 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code, 11003 uint32_t diag_type) 11004 { 11005 mptsas_diag_request_t diag; 11006 int status, slot_num, rel_flags = 0; 11007 mptsas_cmd_t *cmd = NULL; 11008 struct scsi_pkt *pkt; 11009 pMpi2DiagReleaseReply_t reply; 11010 uint16_t iocstatus; 11011 uint32_t iocloginfo; 11012 11013 /* 11014 * If buffer is not enabled, just leave. 11015 */ 11016 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED; 11017 if (!pBuffer->enabled) { 11018 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported " 11019 "by the IOC"); 11020 status = DDI_FAILURE; 11021 goto out; 11022 } 11023 11024 /* 11025 * Clear some flags initially. 11026 */ 11027 pBuffer->force_release = FALSE; 11028 pBuffer->valid_data = FALSE; 11029 pBuffer->owned_by_firmware = FALSE; 11030 11031 /* 11032 * Get a cmd buffer from the cmd buffer pool 11033 */ 11034 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) { 11035 status = DDI_FAILURE; 11036 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW " 11037 "Diag"); 11038 goto out; 11039 } 11040 rel_flags |= MPTSAS_REQUEST_POOL_CMD; 11041 11042 bzero((caddr_t)cmd, sizeof (*cmd)); 11043 bzero((caddr_t)pkt, scsi_pkt_size()); 11044 11045 cmd->ioc_cmd_slot = (uint32_t)(slot_num); 11046 11047 diag.pBuffer = pBuffer; 11048 diag.function = MPI2_FUNCTION_DIAG_RELEASE; 11049 11050 /* 11051 * Form a blank cmd/pkt to store the acknowledgement message 11052 */ 11053 pkt->pkt_ha_private = (opaque_t)&diag; 11054 pkt->pkt_flags = FLAG_HEAD; 11055 pkt->pkt_time = 60; 11056 cmd->cmd_pkt = pkt; 11057 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG; 11058 11059 /* 11060 * Save the command in a slot 11061 */ 11062 if (mptsas_save_cmd(mpt, cmd) == TRUE) { 11063 /* 11064 * Once passthru command get slot, set cmd_flags 11065 * CFLAG_PREPARED. 11066 */ 11067 cmd->cmd_flags |= CFLAG_PREPARED; 11068 mptsas_start_diag(mpt, cmd); 11069 } else { 11070 mptsas_waitq_add(mpt, cmd); 11071 } 11072 11073 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) { 11074 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex); 11075 } 11076 11077 if (cmd->cmd_flags & CFLAG_TIMEOUT) { 11078 status = DDI_FAILURE; 11079 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout"); 11080 goto out; 11081 } 11082 11083 /* 11084 * cmd_rfm points to the reply message if a reply was given. Check the 11085 * IOCStatus to make sure everything went OK with the FW diag request 11086 * and set buffer flags. 11087 */ 11088 if (cmd->cmd_rfm) { 11089 rel_flags |= MPTSAS_ADDRESS_REPLY; 11090 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0, 11091 DDI_DMA_SYNC_FORCPU); 11092 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame + 11093 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr)); 11094 11095 /* 11096 * Get the reply message data 11097 */ 11098 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl, 11099 &reply->IOCStatus); 11100 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl, 11101 &reply->IOCLogInfo); 11102 11103 /* 11104 * If release failed quit. 11105 */ 11106 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) || 11107 pBuffer->owned_by_firmware) { 11108 status = DDI_FAILURE; 11109 NDBG13(("release FW Diag Buffer failed: " 11110 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus, 11111 iocloginfo)); 11112 goto out; 11113 } 11114 11115 /* 11116 * Release was successful. 11117 */ 11118 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS; 11119 status = DDI_SUCCESS; 11120 11121 /* 11122 * If this was for an UNREGISTER diag type command, clear the 11123 * unique ID. 11124 */ 11125 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) { 11126 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID; 11127 } 11128 } 11129 11130 out: 11131 /* 11132 * Put the reply frame back on the free queue, increment the free 11133 * index, and write the new index to the free index register. But only 11134 * if this reply is an ADDRESS reply. 11135 */ 11136 if (rel_flags & MPTSAS_ADDRESS_REPLY) { 11137 ddi_put32(mpt->m_acc_free_queue_hdl, 11138 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], 11139 cmd->cmd_rfm); 11140 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0, 11141 DDI_DMA_SYNC_FORDEV); 11142 if (++mpt->m_free_index == mpt->m_free_queue_depth) { 11143 mpt->m_free_index = 0; 11144 } 11145 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, 11146 mpt->m_free_index); 11147 } 11148 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) { 11149 mptsas_remove_cmd(mpt, cmd); 11150 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD); 11151 } 11152 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) { 11153 mptsas_return_to_pool(mpt, cmd); 11154 } 11155 11156 return (status); 11157 } 11158 11159 static int 11160 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register, 11161 uint32_t *return_code) 11162 { 11163 mptsas_fw_diagnostic_buffer_t *pBuffer; 11164 uint8_t extended_type, buffer_type, i; 11165 uint32_t buffer_size; 11166 uint32_t unique_id; 11167 int status; 11168 11169 ASSERT(mutex_owned(&mpt->m_mutex)); 11170 11171 extended_type = diag_register->ExtendedType; 11172 buffer_type = diag_register->BufferType; 11173 buffer_size = diag_register->RequestedBufferSize; 11174 unique_id = diag_register->UniqueId; 11175 11176 /* 11177 * Check for valid buffer type 11178 */ 11179 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) { 11180 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER; 11181 return (DDI_FAILURE); 11182 } 11183 11184 /* 11185 * Get the current buffer and look up the unique ID. The unique ID 11186 * should not be found. If it is, the ID is already in use. 11187 */ 11188 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id); 11189 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type]; 11190 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 11191 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID; 11192 return (DDI_FAILURE); 11193 } 11194 11195 /* 11196 * The buffer's unique ID should not be registered yet, and the given 11197 * unique ID cannot be 0. 11198 */ 11199 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) || 11200 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) { 11201 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID; 11202 return (DDI_FAILURE); 11203 } 11204 11205 /* 11206 * If this buffer is already posted as immediate, just change owner. 11207 */ 11208 if (pBuffer->immediate && pBuffer->owned_by_firmware && 11209 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) { 11210 pBuffer->immediate = FALSE; 11211 pBuffer->unique_id = unique_id; 11212 return (DDI_SUCCESS); 11213 } 11214 11215 /* 11216 * Post a new buffer after checking if it's enabled. The DMA buffer 11217 * that is allocated will be contiguous (sgl_len = 1). 11218 */ 11219 if (!pBuffer->enabled) { 11220 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER; 11221 return (DDI_FAILURE); 11222 } 11223 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t)); 11224 pBuffer->buffer_data.size = buffer_size; 11225 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) { 11226 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for " 11227 "diag buffer: size = %d bytes", buffer_size); 11228 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER; 11229 return (DDI_FAILURE); 11230 } 11231 11232 /* 11233 * Copy the given info to the diag buffer and post the buffer. 11234 */ 11235 pBuffer->buffer_type = buffer_type; 11236 pBuffer->immediate = FALSE; 11237 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) { 11238 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4); 11239 i++) { 11240 pBuffer->product_specific[i] = 11241 diag_register->ProductSpecific[i]; 11242 } 11243 } 11244 pBuffer->extended_type = extended_type; 11245 pBuffer->unique_id = unique_id; 11246 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code); 11247 11248 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) != 11249 DDI_SUCCESS) { 11250 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in " 11251 "mptsas_diag_register."); 11252 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 11253 status = DDI_FAILURE; 11254 } 11255 11256 /* 11257 * In case there was a failure, free the DMA buffer. 11258 */ 11259 if (status == DDI_FAILURE) { 11260 mptsas_dma_free(&pBuffer->buffer_data); 11261 } 11262 11263 return (status); 11264 } 11265 11266 static int 11267 mptsas_diag_unregister(mptsas_t *mpt, 11268 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code) 11269 { 11270 mptsas_fw_diagnostic_buffer_t *pBuffer; 11271 uint8_t i; 11272 uint32_t unique_id; 11273 int status; 11274 11275 ASSERT(mutex_owned(&mpt->m_mutex)); 11276 11277 unique_id = diag_unregister->UniqueId; 11278 11279 /* 11280 * Get the current buffer and look up the unique ID. The unique ID 11281 * should be there. 11282 */ 11283 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id); 11284 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 11285 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID; 11286 return (DDI_FAILURE); 11287 } 11288 11289 pBuffer = &mpt->m_fw_diag_buffer_list[i]; 11290 11291 /* 11292 * Try to release the buffer from FW before freeing it. If release 11293 * fails, don't free the DMA buffer in case FW tries to access it 11294 * later. If buffer is not owned by firmware, can't release it. 11295 */ 11296 if (!pBuffer->owned_by_firmware) { 11297 status = DDI_SUCCESS; 11298 } else { 11299 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, 11300 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER); 11301 } 11302 11303 /* 11304 * At this point, return the current status no matter what happens with 11305 * the DMA buffer. 11306 */ 11307 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID; 11308 if (status == DDI_SUCCESS) { 11309 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) != 11310 DDI_SUCCESS) { 11311 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed " 11312 "in mptsas_diag_unregister."); 11313 ddi_fm_service_impact(mpt->m_dip, 11314 DDI_SERVICE_UNAFFECTED); 11315 } 11316 mptsas_dma_free(&pBuffer->buffer_data); 11317 } 11318 11319 return (status); 11320 } 11321 11322 static int 11323 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query, 11324 uint32_t *return_code) 11325 { 11326 mptsas_fw_diagnostic_buffer_t *pBuffer; 11327 uint8_t i; 11328 uint32_t unique_id; 11329 11330 ASSERT(mutex_owned(&mpt->m_mutex)); 11331 11332 unique_id = diag_query->UniqueId; 11333 11334 /* 11335 * If ID is valid, query on ID. 11336 * If ID is invalid, query on buffer type. 11337 */ 11338 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) { 11339 i = diag_query->BufferType; 11340 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) { 11341 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID; 11342 return (DDI_FAILURE); 11343 } 11344 } else { 11345 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id); 11346 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 11347 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID; 11348 return (DDI_FAILURE); 11349 } 11350 } 11351 11352 /* 11353 * Fill query structure with the diag buffer info. 11354 */ 11355 pBuffer = &mpt->m_fw_diag_buffer_list[i]; 11356 diag_query->BufferType = pBuffer->buffer_type; 11357 diag_query->ExtendedType = pBuffer->extended_type; 11358 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) { 11359 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4); 11360 i++) { 11361 diag_query->ProductSpecific[i] = 11362 pBuffer->product_specific[i]; 11363 } 11364 } 11365 diag_query->TotalBufferSize = pBuffer->buffer_data.size; 11366 diag_query->DriverAddedBufferSize = 0; 11367 diag_query->UniqueId = pBuffer->unique_id; 11368 diag_query->ApplicationFlags = 0; 11369 diag_query->DiagnosticFlags = 0; 11370 11371 /* 11372 * Set/Clear application flags 11373 */ 11374 if (pBuffer->immediate) { 11375 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED; 11376 } else { 11377 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED; 11378 } 11379 if (pBuffer->valid_data || pBuffer->owned_by_firmware) { 11380 diag_query->ApplicationFlags |= 11381 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID; 11382 } else { 11383 diag_query->ApplicationFlags &= 11384 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID; 11385 } 11386 if (pBuffer->owned_by_firmware) { 11387 diag_query->ApplicationFlags |= 11388 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS; 11389 } else { 11390 diag_query->ApplicationFlags &= 11391 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS; 11392 } 11393 11394 return (DDI_SUCCESS); 11395 } 11396 11397 static int 11398 mptsas_diag_read_buffer(mptsas_t *mpt, 11399 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf, 11400 uint32_t *return_code, int ioctl_mode) 11401 { 11402 mptsas_fw_diagnostic_buffer_t *pBuffer; 11403 uint8_t i, *pData; 11404 uint32_t unique_id, byte; 11405 int status; 11406 11407 ASSERT(mutex_owned(&mpt->m_mutex)); 11408 11409 unique_id = diag_read_buffer->UniqueId; 11410 11411 /* 11412 * Get the current buffer and look up the unique ID. The unique ID 11413 * should be there. 11414 */ 11415 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id); 11416 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 11417 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID; 11418 return (DDI_FAILURE); 11419 } 11420 11421 pBuffer = &mpt->m_fw_diag_buffer_list[i]; 11422 11423 /* 11424 * Make sure requested read is within limits 11425 */ 11426 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead > 11427 pBuffer->buffer_data.size) { 11428 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER; 11429 return (DDI_FAILURE); 11430 } 11431 11432 /* 11433 * Copy the requested data from DMA to the diag_read_buffer. The DMA 11434 * buffer that was allocated is one contiguous buffer. 11435 */ 11436 pData = (uint8_t *)(pBuffer->buffer_data.memp + 11437 diag_read_buffer->StartingOffset); 11438 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0, 11439 DDI_DMA_SYNC_FORCPU); 11440 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) { 11441 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode) 11442 != 0) { 11443 return (DDI_FAILURE); 11444 } 11445 } 11446 diag_read_buffer->Status = 0; 11447 11448 /* 11449 * Set or clear the Force Release flag. 11450 */ 11451 if (pBuffer->force_release) { 11452 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE; 11453 } else { 11454 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE; 11455 } 11456 11457 /* 11458 * If buffer is to be reregistered, make sure it's not already owned by 11459 * firmware first. 11460 */ 11461 status = DDI_SUCCESS; 11462 if (!pBuffer->owned_by_firmware) { 11463 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) { 11464 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, 11465 return_code); 11466 } 11467 } 11468 11469 return (status); 11470 } 11471 11472 static int 11473 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release, 11474 uint32_t *return_code) 11475 { 11476 mptsas_fw_diagnostic_buffer_t *pBuffer; 11477 uint8_t i; 11478 uint32_t unique_id; 11479 int status; 11480 11481 ASSERT(mutex_owned(&mpt->m_mutex)); 11482 11483 unique_id = diag_release->UniqueId; 11484 11485 /* 11486 * Get the current buffer and look up the unique ID. The unique ID 11487 * should be there. 11488 */ 11489 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id); 11490 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 11491 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID; 11492 return (DDI_FAILURE); 11493 } 11494 11495 pBuffer = &mpt->m_fw_diag_buffer_list[i]; 11496 11497 /* 11498 * If buffer is not owned by firmware, it's already been released. 11499 */ 11500 if (!pBuffer->owned_by_firmware) { 11501 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED; 11502 return (DDI_FAILURE); 11503 } 11504 11505 /* 11506 * Release the buffer. 11507 */ 11508 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code, 11509 MPTSAS_FW_DIAG_TYPE_RELEASE); 11510 return (status); 11511 } 11512 11513 static int 11514 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action, 11515 uint32_t length, uint32_t *return_code, int ioctl_mode) 11516 { 11517 mptsas_fw_diag_register_t diag_register; 11518 mptsas_fw_diag_unregister_t diag_unregister; 11519 mptsas_fw_diag_query_t diag_query; 11520 mptsas_diag_read_buffer_t diag_read_buffer; 11521 mptsas_fw_diag_release_t diag_release; 11522 int status = DDI_SUCCESS; 11523 uint32_t original_return_code, read_buf_len; 11524 11525 ASSERT(mutex_owned(&mpt->m_mutex)); 11526 11527 original_return_code = *return_code; 11528 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS; 11529 11530 switch (action) { 11531 case MPTSAS_FW_DIAG_TYPE_REGISTER: 11532 if (!length) { 11533 *return_code = 11534 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER; 11535 status = DDI_FAILURE; 11536 break; 11537 } 11538 if (ddi_copyin(diag_action, &diag_register, 11539 sizeof (diag_register), ioctl_mode) != 0) { 11540 return (DDI_FAILURE); 11541 } 11542 status = mptsas_diag_register(mpt, &diag_register, 11543 return_code); 11544 break; 11545 11546 case MPTSAS_FW_DIAG_TYPE_UNREGISTER: 11547 if (length < sizeof (diag_unregister)) { 11548 *return_code = 11549 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER; 11550 status = DDI_FAILURE; 11551 break; 11552 } 11553 if (ddi_copyin(diag_action, &diag_unregister, 11554 sizeof (diag_unregister), ioctl_mode) != 0) { 11555 return (DDI_FAILURE); 11556 } 11557 status = mptsas_diag_unregister(mpt, &diag_unregister, 11558 return_code); 11559 break; 11560 11561 case MPTSAS_FW_DIAG_TYPE_QUERY: 11562 if (length < sizeof (diag_query)) { 11563 *return_code = 11564 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER; 11565 status = DDI_FAILURE; 11566 break; 11567 } 11568 if (ddi_copyin(diag_action, &diag_query, 11569 sizeof (diag_query), ioctl_mode) != 0) { 11570 return (DDI_FAILURE); 11571 } 11572 status = mptsas_diag_query(mpt, &diag_query, 11573 return_code); 11574 if (status == DDI_SUCCESS) { 11575 if (ddi_copyout(&diag_query, diag_action, 11576 sizeof (diag_query), ioctl_mode) != 0) { 11577 return (DDI_FAILURE); 11578 } 11579 } 11580 break; 11581 11582 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER: 11583 if (ddi_copyin(diag_action, &diag_read_buffer, 11584 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) { 11585 return (DDI_FAILURE); 11586 } 11587 read_buf_len = sizeof (diag_read_buffer) - 11588 sizeof (diag_read_buffer.DataBuffer) + 11589 diag_read_buffer.BytesToRead; 11590 if (length < read_buf_len) { 11591 *return_code = 11592 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER; 11593 status = DDI_FAILURE; 11594 break; 11595 } 11596 status = mptsas_diag_read_buffer(mpt, 11597 &diag_read_buffer, diag_action + 11598 sizeof (diag_read_buffer) - 4, return_code, 11599 ioctl_mode); 11600 if (status == DDI_SUCCESS) { 11601 if (ddi_copyout(&diag_read_buffer, diag_action, 11602 sizeof (diag_read_buffer) - 4, ioctl_mode) 11603 != 0) { 11604 return (DDI_FAILURE); 11605 } 11606 } 11607 break; 11608 11609 case MPTSAS_FW_DIAG_TYPE_RELEASE: 11610 if (length < sizeof (diag_release)) { 11611 *return_code = 11612 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER; 11613 status = DDI_FAILURE; 11614 break; 11615 } 11616 if (ddi_copyin(diag_action, &diag_release, 11617 sizeof (diag_release), ioctl_mode) != 0) { 11618 return (DDI_FAILURE); 11619 } 11620 status = mptsas_diag_release(mpt, &diag_release, 11621 return_code); 11622 break; 11623 11624 default: 11625 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER; 11626 status = DDI_FAILURE; 11627 break; 11628 } 11629 11630 if ((status == DDI_FAILURE) && 11631 (original_return_code == MPTSAS_FW_DIAG_NEW) && 11632 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) { 11633 status = DDI_SUCCESS; 11634 } 11635 11636 return (status); 11637 } 11638 11639 static int 11640 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode) 11641 { 11642 int status; 11643 mptsas_diag_action_t driver_data; 11644 11645 ASSERT(mutex_owned(&mpt->m_mutex)); 11646 11647 /* 11648 * Copy the user data to a driver data buffer. 11649 */ 11650 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t), 11651 mode) == 0) { 11652 /* 11653 * Send diag action request if Action is valid 11654 */ 11655 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER || 11656 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER || 11657 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY || 11658 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER || 11659 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) { 11660 status = mptsas_do_diag_action(mpt, driver_data.Action, 11661 (void *)(uintptr_t)driver_data.PtrDiagAction, 11662 driver_data.Length, &driver_data.ReturnCode, 11663 mode); 11664 if (status == DDI_SUCCESS) { 11665 if (ddi_copyout(&driver_data.ReturnCode, 11666 &user_data->ReturnCode, 11667 sizeof (user_data->ReturnCode), mode) 11668 != 0) { 11669 status = EFAULT; 11670 } else { 11671 status = 0; 11672 } 11673 } else { 11674 status = EIO; 11675 } 11676 } else { 11677 status = EINVAL; 11678 } 11679 } else { 11680 status = EFAULT; 11681 } 11682 11683 return (status); 11684 } 11685 11686 /* 11687 * This routine handles the "event query" ioctl. 11688 */ 11689 static int 11690 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode, 11691 int *rval) 11692 { 11693 int status; 11694 mptsas_event_query_t driverdata; 11695 uint8_t i; 11696 11697 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE; 11698 11699 mutex_enter(&mpt->m_mutex); 11700 for (i = 0; i < 4; i++) { 11701 driverdata.Types[i] = mpt->m_event_mask[i]; 11702 } 11703 mutex_exit(&mpt->m_mutex); 11704 11705 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) { 11706 status = EFAULT; 11707 } else { 11708 *rval = MPTIOCTL_STATUS_GOOD; 11709 status = 0; 11710 } 11711 11712 return (status); 11713 } 11714 11715 /* 11716 * This routine handles the "event enable" ioctl. 11717 */ 11718 static int 11719 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode, 11720 int *rval) 11721 { 11722 int status; 11723 mptsas_event_enable_t driverdata; 11724 uint8_t i; 11725 11726 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) { 11727 mutex_enter(&mpt->m_mutex); 11728 for (i = 0; i < 4; i++) { 11729 mpt->m_event_mask[i] = driverdata.Types[i]; 11730 } 11731 mutex_exit(&mpt->m_mutex); 11732 11733 *rval = MPTIOCTL_STATUS_GOOD; 11734 status = 0; 11735 } else { 11736 status = EFAULT; 11737 } 11738 return (status); 11739 } 11740 11741 /* 11742 * This routine handles the "event report" ioctl. 11743 */ 11744 static int 11745 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode, 11746 int *rval) 11747 { 11748 int status; 11749 mptsas_event_report_t driverdata; 11750 11751 mutex_enter(&mpt->m_mutex); 11752 11753 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size), 11754 mode) == 0) { 11755 if (driverdata.Size >= sizeof (mpt->m_events)) { 11756 if (ddi_copyout(mpt->m_events, data->Events, 11757 sizeof (mpt->m_events), mode) != 0) { 11758 status = EFAULT; 11759 } else { 11760 if (driverdata.Size > sizeof (mpt->m_events)) { 11761 driverdata.Size = 11762 sizeof (mpt->m_events); 11763 if (ddi_copyout(&driverdata.Size, 11764 &data->Size, 11765 sizeof (driverdata.Size), 11766 mode) != 0) { 11767 status = EFAULT; 11768 } else { 11769 *rval = MPTIOCTL_STATUS_GOOD; 11770 status = 0; 11771 } 11772 } else { 11773 *rval = MPTIOCTL_STATUS_GOOD; 11774 status = 0; 11775 } 11776 } 11777 } else { 11778 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT; 11779 status = 0; 11780 } 11781 } else { 11782 status = EFAULT; 11783 } 11784 11785 mutex_exit(&mpt->m_mutex); 11786 return (status); 11787 } 11788 11789 static void 11790 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data) 11791 { 11792 int *reg_data; 11793 uint_t reglen; 11794 11795 /* 11796 * Lookup the 'reg' property and extract the other data 11797 */ 11798 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip, 11799 DDI_PROP_DONTPASS, "reg", ®_data, ®len) == 11800 DDI_PROP_SUCCESS) { 11801 /* 11802 * Extract the PCI data from the 'reg' property first DWORD. 11803 * The entry looks like the following: 11804 * First DWORD: 11805 * Bits 0 - 7 8-bit Register number 11806 * Bits 8 - 10 3-bit Function number 11807 * Bits 11 - 15 5-bit Device number 11808 * Bits 16 - 23 8-bit Bus number 11809 * Bits 24 - 25 2-bit Address Space type identifier 11810 * 11811 */ 11812 adapter_data->PciInformation.u.bits.BusNumber = 11813 (reg_data[0] & 0x00FF0000) >> 16; 11814 adapter_data->PciInformation.u.bits.DeviceNumber = 11815 (reg_data[0] & 0x0000F800) >> 11; 11816 adapter_data->PciInformation.u.bits.FunctionNumber = 11817 (reg_data[0] & 0x00000700) >> 8; 11818 ddi_prop_free((void *)reg_data); 11819 } else { 11820 /* 11821 * If we can't determine the PCI data then we fill in FF's for 11822 * the data to indicate this. 11823 */ 11824 adapter_data->PCIDeviceHwId = 0xFFFFFFFF; 11825 adapter_data->MpiPortNumber = 0xFFFFFFFF; 11826 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF; 11827 } 11828 11829 /* 11830 * Saved in the mpt->m_fwversion 11831 */ 11832 adapter_data->MpiFirmwareVersion = mpt->m_fwversion; 11833 } 11834 11835 static void 11836 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data) 11837 { 11838 char *driver_verstr = MPTSAS_MOD_STRING; 11839 11840 mptsas_lookup_pci_data(mpt, adapter_data); 11841 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2; 11842 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid; 11843 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid; 11844 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid; 11845 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid; 11846 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr); 11847 adapter_data->BiosVersion = 0; 11848 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion); 11849 } 11850 11851 static void 11852 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info) 11853 { 11854 int *reg_data, i; 11855 uint_t reglen; 11856 11857 /* 11858 * Lookup the 'reg' property and extract the other data 11859 */ 11860 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip, 11861 DDI_PROP_DONTPASS, "reg", ®_data, ®len) == 11862 DDI_PROP_SUCCESS) { 11863 /* 11864 * Extract the PCI data from the 'reg' property first DWORD. 11865 * The entry looks like the following: 11866 * First DWORD: 11867 * Bits 8 - 10 3-bit Function number 11868 * Bits 11 - 15 5-bit Device number 11869 * Bits 16 - 23 8-bit Bus number 11870 */ 11871 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16; 11872 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11; 11873 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8; 11874 ddi_prop_free((void *)reg_data); 11875 } else { 11876 /* 11877 * If we can't determine the PCI info then we fill in FF's for 11878 * the data to indicate this. 11879 */ 11880 pci_info->BusNumber = 0xFFFFFFFF; 11881 pci_info->DeviceNumber = 0xFF; 11882 pci_info->FunctionNumber = 0xFF; 11883 } 11884 11885 /* 11886 * Now get the interrupt vector and the pci header. The vector can 11887 * only be 0 right now. The header is the first 256 bytes of config 11888 * space. 11889 */ 11890 pci_info->InterruptVector = 0; 11891 for (i = 0; i < sizeof (pci_info->PciHeader); i++) { 11892 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle, 11893 i); 11894 } 11895 } 11896 11897 static int 11898 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode) 11899 { 11900 int status = 0; 11901 mptsas_reg_access_t driverdata; 11902 11903 mutex_enter(&mpt->m_mutex); 11904 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) { 11905 switch (driverdata.Command) { 11906 /* 11907 * IO access is not supported. 11908 */ 11909 case REG_IO_READ: 11910 case REG_IO_WRITE: 11911 mptsas_log(mpt, CE_WARN, "IO access is not " 11912 "supported. Use memory access."); 11913 status = EINVAL; 11914 break; 11915 11916 case REG_MEM_READ: 11917 driverdata.RegData = ddi_get32(mpt->m_datap, 11918 (uint32_t *)(void *)mpt->m_reg + 11919 driverdata.RegOffset); 11920 if (ddi_copyout(&driverdata.RegData, 11921 &data->RegData, 11922 sizeof (driverdata.RegData), mode) != 0) { 11923 mptsas_log(mpt, CE_WARN, "Register " 11924 "Read Failed"); 11925 status = EFAULT; 11926 } 11927 break; 11928 11929 case REG_MEM_WRITE: 11930 ddi_put32(mpt->m_datap, 11931 (uint32_t *)(void *)mpt->m_reg + 11932 driverdata.RegOffset, 11933 driverdata.RegData); 11934 break; 11935 11936 default: 11937 status = EINVAL; 11938 break; 11939 } 11940 } else { 11941 status = EFAULT; 11942 } 11943 11944 mutex_exit(&mpt->m_mutex); 11945 return (status); 11946 } 11947 11948 static int 11949 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, 11950 int *rval) 11951 { 11952 int status = 0; 11953 mptsas_t *mpt; 11954 mptsas_update_flash_t flashdata; 11955 mptsas_pass_thru_t passthru_data; 11956 mptsas_adapter_data_t adapter_data; 11957 mptsas_pci_info_t pci_info; 11958 int copylen; 11959 11960 int iport_flag = 0; 11961 dev_info_t *dip = NULL; 11962 mptsas_phymask_t phymask = 0; 11963 struct devctl_iocdata *dcp = NULL; 11964 uint32_t slotstatus = 0; 11965 char *addr = NULL; 11966 mptsas_target_t *ptgt = NULL; 11967 11968 *rval = MPTIOCTL_STATUS_GOOD; 11969 if (secpolicy_sys_config(credp, B_FALSE) != 0) { 11970 return (EPERM); 11971 } 11972 11973 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev))); 11974 if (mpt == NULL) { 11975 /* 11976 * Called from iport node, get the states 11977 */ 11978 iport_flag = 1; 11979 dip = mptsas_get_dip_from_dev(dev, &phymask); 11980 if (dip == NULL) { 11981 return (ENXIO); 11982 } 11983 mpt = DIP2MPT(dip); 11984 } 11985 /* Make sure power level is D0 before accessing registers */ 11986 mutex_enter(&mpt->m_mutex); 11987 if (mpt->m_options & MPTSAS_OPT_PM) { 11988 (void) pm_busy_component(mpt->m_dip, 0); 11989 if (mpt->m_power_level != PM_LEVEL_D0) { 11990 mutex_exit(&mpt->m_mutex); 11991 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) != 11992 DDI_SUCCESS) { 11993 mptsas_log(mpt, CE_WARN, 11994 "mptsas%d: mptsas_ioctl: Raise power " 11995 "request failed.", mpt->m_instance); 11996 (void) pm_idle_component(mpt->m_dip, 0); 11997 return (ENXIO); 11998 } 11999 } else { 12000 mutex_exit(&mpt->m_mutex); 12001 } 12002 } else { 12003 mutex_exit(&mpt->m_mutex); 12004 } 12005 12006 if (iport_flag) { 12007 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval); 12008 if (status != 0) { 12009 goto out; 12010 } 12011 /* 12012 * The following code control the OK2RM LED, it doesn't affect 12013 * the ioctl return status. 12014 */ 12015 if ((cmd == DEVCTL_DEVICE_ONLINE) || 12016 (cmd == DEVCTL_DEVICE_OFFLINE)) { 12017 if (ndi_dc_allochdl((void *)data, &dcp) != 12018 NDI_SUCCESS) { 12019 goto out; 12020 } 12021 addr = ndi_dc_getaddr(dcp); 12022 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask); 12023 if (ptgt == NULL) { 12024 NDBG14(("mptsas_ioctl led control: tgt %s not " 12025 "found", addr)); 12026 ndi_dc_freehdl(dcp); 12027 goto out; 12028 } 12029 mutex_enter(&mpt->m_mutex); 12030 if (cmd == DEVCTL_DEVICE_ONLINE) { 12031 ptgt->m_tgt_unconfigured = 0; 12032 } else if (cmd == DEVCTL_DEVICE_OFFLINE) { 12033 ptgt->m_tgt_unconfigured = 1; 12034 } 12035 slotstatus = 0; 12036 #ifdef MPTSAS_GET_LED 12037 /* 12038 * The get led status can't get a valid/reasonable 12039 * state, so ignore the get led status, and write the 12040 * required value directly 12041 */ 12042 if (mptsas_get_led_status(mpt, ptgt, &slotstatus) != 12043 DDI_SUCCESS) { 12044 NDBG14(("mptsas_ioctl: get LED for tgt %s " 12045 "failed %x", addr, slotstatus)); 12046 slotstatus = 0; 12047 } 12048 NDBG14(("mptsas_ioctl: LED status %x for %s", 12049 slotstatus, addr)); 12050 #endif 12051 if (cmd == DEVCTL_DEVICE_OFFLINE) { 12052 slotstatus |= 12053 MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE; 12054 } else { 12055 slotstatus &= 12056 ~MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE; 12057 } 12058 if (mptsas_set_led_status(mpt, ptgt, slotstatus) != 12059 DDI_SUCCESS) { 12060 NDBG14(("mptsas_ioctl: set LED for tgt %s " 12061 "failed %x", addr, slotstatus)); 12062 } 12063 mutex_exit(&mpt->m_mutex); 12064 ndi_dc_freehdl(dcp); 12065 } 12066 goto out; 12067 } 12068 switch (cmd) { 12069 case MPTIOCTL_UPDATE_FLASH: 12070 if (ddi_copyin((void *)data, &flashdata, 12071 sizeof (struct mptsas_update_flash), mode)) { 12072 status = EFAULT; 12073 break; 12074 } 12075 12076 mutex_enter(&mpt->m_mutex); 12077 if (mptsas_update_flash(mpt, 12078 (caddr_t)(long)flashdata.PtrBuffer, 12079 flashdata.ImageSize, flashdata.ImageType, mode)) { 12080 status = EFAULT; 12081 } 12082 12083 /* 12084 * Reset the chip to start using the new 12085 * firmware. Reset if failed also. 12086 */ 12087 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET; 12088 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) { 12089 status = EFAULT; 12090 } 12091 mutex_exit(&mpt->m_mutex); 12092 break; 12093 case MPTIOCTL_PASS_THRU: 12094 /* 12095 * The user has requested to pass through a command to 12096 * be executed by the MPT firmware. Call our routine 12097 * which does this. Only allow one passthru IOCTL at 12098 * one time. Other threads will block on 12099 * m_passthru_mutex, which is of adaptive variant. 12100 */ 12101 if (ddi_copyin((void *)data, &passthru_data, 12102 sizeof (mptsas_pass_thru_t), mode)) { 12103 status = EFAULT; 12104 break; 12105 } 12106 mutex_enter(&mpt->m_passthru_mutex); 12107 mutex_enter(&mpt->m_mutex); 12108 status = mptsas_pass_thru(mpt, &passthru_data, mode); 12109 mutex_exit(&mpt->m_mutex); 12110 mutex_exit(&mpt->m_passthru_mutex); 12111 12112 break; 12113 case MPTIOCTL_GET_ADAPTER_DATA: 12114 /* 12115 * The user has requested to read adapter data. Call 12116 * our routine which does this. 12117 */ 12118 bzero(&adapter_data, sizeof (mptsas_adapter_data_t)); 12119 if (ddi_copyin((void *)data, (void *)&adapter_data, 12120 sizeof (mptsas_adapter_data_t), mode)) { 12121 status = EFAULT; 12122 break; 12123 } 12124 if (adapter_data.StructureLength >= 12125 sizeof (mptsas_adapter_data_t)) { 12126 adapter_data.StructureLength = (uint32_t) 12127 sizeof (mptsas_adapter_data_t); 12128 copylen = sizeof (mptsas_adapter_data_t); 12129 mutex_enter(&mpt->m_mutex); 12130 mptsas_read_adapter_data(mpt, &adapter_data); 12131 mutex_exit(&mpt->m_mutex); 12132 } else { 12133 adapter_data.StructureLength = (uint32_t) 12134 sizeof (mptsas_adapter_data_t); 12135 copylen = sizeof (adapter_data.StructureLength); 12136 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT; 12137 } 12138 if (ddi_copyout((void *)(&adapter_data), (void *)data, 12139 copylen, mode) != 0) { 12140 status = EFAULT; 12141 } 12142 break; 12143 case MPTIOCTL_GET_PCI_INFO: 12144 /* 12145 * The user has requested to read pci info. Call 12146 * our routine which does this. 12147 */ 12148 bzero(&pci_info, sizeof (mptsas_pci_info_t)); 12149 mutex_enter(&mpt->m_mutex); 12150 mptsas_read_pci_info(mpt, &pci_info); 12151 mutex_exit(&mpt->m_mutex); 12152 if (ddi_copyout((void *)(&pci_info), (void *)data, 12153 sizeof (mptsas_pci_info_t), mode) != 0) { 12154 status = EFAULT; 12155 } 12156 break; 12157 case MPTIOCTL_RESET_ADAPTER: 12158 mutex_enter(&mpt->m_mutex); 12159 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET; 12160 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) { 12161 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL " 12162 "failed"); 12163 status = EFAULT; 12164 } 12165 mutex_exit(&mpt->m_mutex); 12166 break; 12167 case MPTIOCTL_DIAG_ACTION: 12168 /* 12169 * The user has done a diag buffer action. Call our 12170 * routine which does this. Only allow one diag action 12171 * at one time. 12172 */ 12173 mutex_enter(&mpt->m_mutex); 12174 if (mpt->m_diag_action_in_progress) { 12175 mutex_exit(&mpt->m_mutex); 12176 return (EBUSY); 12177 } 12178 mpt->m_diag_action_in_progress = 1; 12179 status = mptsas_diag_action(mpt, 12180 (mptsas_diag_action_t *)data, mode); 12181 mpt->m_diag_action_in_progress = 0; 12182 mutex_exit(&mpt->m_mutex); 12183 break; 12184 case MPTIOCTL_EVENT_QUERY: 12185 /* 12186 * The user has done an event query. Call our routine 12187 * which does this. 12188 */ 12189 status = mptsas_event_query(mpt, 12190 (mptsas_event_query_t *)data, mode, rval); 12191 break; 12192 case MPTIOCTL_EVENT_ENABLE: 12193 /* 12194 * The user has done an event enable. Call our routine 12195 * which does this. 12196 */ 12197 status = mptsas_event_enable(mpt, 12198 (mptsas_event_enable_t *)data, mode, rval); 12199 break; 12200 case MPTIOCTL_EVENT_REPORT: 12201 /* 12202 * The user has done an event report. Call our routine 12203 * which does this. 12204 */ 12205 status = mptsas_event_report(mpt, 12206 (mptsas_event_report_t *)data, mode, rval); 12207 break; 12208 case MPTIOCTL_REG_ACCESS: 12209 /* 12210 * The user has requested register access. Call our 12211 * routine which does this. 12212 */ 12213 status = mptsas_reg_access(mpt, 12214 (mptsas_reg_access_t *)data, mode); 12215 break; 12216 default: 12217 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, 12218 rval); 12219 break; 12220 } 12221 12222 out: 12223 if (mpt->m_options & MPTSAS_OPT_PM) 12224 (void) pm_idle_component(mpt->m_dip, 0); 12225 return (status); 12226 } 12227 12228 int 12229 mptsas_restart_ioc(mptsas_t *mpt) 12230 { 12231 int rval = DDI_SUCCESS; 12232 mptsas_target_t *ptgt = NULL; 12233 12234 ASSERT(mutex_owned(&mpt->m_mutex)); 12235 12236 /* 12237 * Set a flag telling I/O path that we're processing a reset. This is 12238 * needed because after the reset is complete, the hash table still 12239 * needs to be rebuilt. If I/Os are started before the hash table is 12240 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked 12241 * so that they can be retried. 12242 */ 12243 mpt->m_in_reset = TRUE; 12244 12245 /* 12246 * Set all throttles to HOLD 12247 */ 12248 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 12249 MPTSAS_HASH_FIRST); 12250 while (ptgt != NULL) { 12251 mutex_enter(&ptgt->m_tgt_intr_mutex); 12252 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE); 12253 mutex_exit(&ptgt->m_tgt_intr_mutex); 12254 12255 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 12256 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 12257 } 12258 12259 /* 12260 * Disable interrupts 12261 */ 12262 MPTSAS_DISABLE_INTR(mpt); 12263 12264 /* 12265 * Abort all commands: outstanding commands, commands in waitq 12266 */ 12267 mptsas_flush_hba(mpt); 12268 12269 /* 12270 * Reinitialize the chip. 12271 */ 12272 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) { 12273 rval = DDI_FAILURE; 12274 } 12275 12276 /* 12277 * Enable interrupts again 12278 */ 12279 MPTSAS_ENABLE_INTR(mpt); 12280 12281 /* 12282 * If mptsas_init_chip was successful, update the driver data. 12283 */ 12284 if (rval == DDI_SUCCESS) { 12285 mptsas_update_driver_data(mpt); 12286 } 12287 12288 /* 12289 * Reset the throttles 12290 */ 12291 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 12292 MPTSAS_HASH_FIRST); 12293 while (ptgt != NULL) { 12294 mutex_enter(&ptgt->m_tgt_intr_mutex); 12295 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE); 12296 mutex_exit(&ptgt->m_tgt_intr_mutex); 12297 12298 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 12299 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 12300 } 12301 12302 mptsas_doneq_empty(mpt); 12303 mptsas_restart_hba(mpt); 12304 12305 if (rval != DDI_SUCCESS) { 12306 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE); 12307 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST); 12308 } 12309 12310 /* 12311 * Clear the reset flag so that I/Os can continue. 12312 */ 12313 mpt->m_in_reset = FALSE; 12314 12315 return (rval); 12316 } 12317 12318 static int 12319 mptsas_init_chip(mptsas_t *mpt, int first_time) 12320 { 12321 ddi_dma_cookie_t cookie; 12322 uint32_t i; 12323 int rval; 12324 12325 /* 12326 * Check to see if the firmware image is valid 12327 */ 12328 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) & 12329 MPI2_DIAG_FLASH_BAD_SIG) { 12330 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!"); 12331 goto fail; 12332 } 12333 12334 /* 12335 * Reset the chip 12336 */ 12337 rval = mptsas_ioc_reset(mpt); 12338 if (rval == MPTSAS_RESET_FAIL) { 12339 mptsas_log(mpt, CE_WARN, "hard reset failed!"); 12340 goto fail; 12341 } 12342 12343 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) { 12344 goto mur; 12345 } 12346 /* 12347 * Setup configuration space 12348 */ 12349 if (mptsas_config_space_init(mpt) == FALSE) { 12350 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init " 12351 "failed!"); 12352 goto fail; 12353 } 12354 12355 /* 12356 * IOC facts can change after a diag reset so all buffers that are 12357 * based on these numbers must be de-allocated and re-allocated. Get 12358 * new IOC facts each time chip is initialized. 12359 */ 12360 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) { 12361 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed"); 12362 goto fail; 12363 } 12364 12365 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) { 12366 goto fail; 12367 } 12368 /* 12369 * Allocate request message frames, reply free queue, reply descriptor 12370 * post queue, and reply message frames using latest IOC facts. 12371 */ 12372 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) { 12373 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed"); 12374 goto fail; 12375 } 12376 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) { 12377 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!"); 12378 goto fail; 12379 } 12380 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) { 12381 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!"); 12382 goto fail; 12383 } 12384 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) { 12385 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!"); 12386 goto fail; 12387 } 12388 12389 mur: 12390 /* 12391 * Re-Initialize ioc to operational state 12392 */ 12393 if (mptsas_ioc_init(mpt) == DDI_FAILURE) { 12394 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed"); 12395 goto fail; 12396 } 12397 12398 mptsas_alloc_reply_args(mpt); 12399 12400 /* 12401 * Initialize reply post index. Reply free index is initialized after 12402 * the next loop. 12403 */ 12404 mpt->m_post_index = 0; 12405 12406 /* 12407 * Initialize the Reply Free Queue with the physical addresses of our 12408 * reply frames. 12409 */ 12410 cookie.dmac_address = mpt->m_reply_frame_dma_addr; 12411 for (i = 0; i < mpt->m_max_replies; i++) { 12412 ddi_put32(mpt->m_acc_free_queue_hdl, 12413 &((uint32_t *)(void *)mpt->m_free_queue)[i], 12414 cookie.dmac_address); 12415 cookie.dmac_address += mpt->m_reply_frame_size; 12416 } 12417 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0, 12418 DDI_DMA_SYNC_FORDEV); 12419 12420 /* 12421 * Initialize the reply free index to one past the last frame on the 12422 * queue. This will signify that the queue is empty to start with. 12423 */ 12424 mpt->m_free_index = i; 12425 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i); 12426 12427 /* 12428 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's. 12429 */ 12430 for (i = 0; i < mpt->m_post_queue_depth; i++) { 12431 ddi_put64(mpt->m_acc_post_queue_hdl, 12432 &((uint64_t *)(void *)mpt->m_post_queue)[i], 12433 0xFFFFFFFFFFFFFFFF); 12434 } 12435 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0, 12436 DDI_DMA_SYNC_FORDEV); 12437 12438 /* 12439 * Enable ports 12440 */ 12441 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) { 12442 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed"); 12443 goto fail; 12444 } 12445 12446 /* 12447 * enable events 12448 */ 12449 if (mptsas_ioc_enable_event_notification(mpt)) { 12450 goto fail; 12451 } 12452 12453 /* 12454 * We need checks in attach and these. 12455 * chip_init is called in mult. places 12456 */ 12457 12458 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) != 12459 DDI_SUCCESS) || 12460 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) != 12461 DDI_SUCCESS) || 12462 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) != 12463 DDI_SUCCESS) || 12464 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) != 12465 DDI_SUCCESS) || 12466 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) != 12467 DDI_SUCCESS)) { 12468 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 12469 goto fail; 12470 } 12471 12472 /* Check all acc handles */ 12473 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) || 12474 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) != 12475 DDI_SUCCESS) || 12476 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) != 12477 DDI_SUCCESS) || 12478 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) != 12479 DDI_SUCCESS) || 12480 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) != 12481 DDI_SUCCESS) || 12482 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) != 12483 DDI_SUCCESS) || 12484 (mptsas_check_acc_handle(mpt->m_config_handle) != 12485 DDI_SUCCESS)) { 12486 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED); 12487 goto fail; 12488 } 12489 12490 return (DDI_SUCCESS); 12491 12492 fail: 12493 return (DDI_FAILURE); 12494 } 12495 12496 static int 12497 mptsas_get_pci_cap(mptsas_t *mpt) 12498 { 12499 ushort_t caps_ptr, cap, cap_count; 12500 12501 if (mpt->m_config_handle == NULL) 12502 return (FALSE); 12503 /* 12504 * Check if capabilities list is supported and if so, 12505 * get initial capabilities pointer and clear bits 0,1. 12506 */ 12507 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) 12508 & PCI_STAT_CAP) { 12509 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle, 12510 PCI_CONF_CAP_PTR), 4); 12511 } else { 12512 caps_ptr = PCI_CAP_NEXT_PTR_NULL; 12513 } 12514 12515 /* 12516 * Walk capabilities if supported. 12517 */ 12518 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) { 12519 12520 /* 12521 * Check that we haven't exceeded the maximum number of 12522 * capabilities and that the pointer is in a valid range. 12523 */ 12524 if (++cap_count > 48) { 12525 mptsas_log(mpt, CE_WARN, 12526 "too many device capabilities.\n"); 12527 break; 12528 } 12529 if (caps_ptr < 64) { 12530 mptsas_log(mpt, CE_WARN, 12531 "capabilities pointer 0x%x out of range.\n", 12532 caps_ptr); 12533 break; 12534 } 12535 12536 /* 12537 * Get next capability and check that it is valid. 12538 * For now, we only support power management. 12539 */ 12540 cap = pci_config_get8(mpt->m_config_handle, caps_ptr); 12541 switch (cap) { 12542 case PCI_CAP_ID_PM: 12543 mptsas_log(mpt, CE_NOTE, 12544 "?mptsas%d supports power management.\n", 12545 mpt->m_instance); 12546 mpt->m_options |= MPTSAS_OPT_PM; 12547 12548 /* Save PMCSR offset */ 12549 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR; 12550 break; 12551 /* 12552 * The following capabilities are valid. Any others 12553 * will cause a message to be logged. 12554 */ 12555 case PCI_CAP_ID_VPD: 12556 case PCI_CAP_ID_MSI: 12557 case PCI_CAP_ID_PCIX: 12558 case PCI_CAP_ID_PCI_E: 12559 case PCI_CAP_ID_MSI_X: 12560 break; 12561 default: 12562 mptsas_log(mpt, CE_NOTE, 12563 "?mptsas%d unrecognized capability " 12564 "0x%x.\n", mpt->m_instance, cap); 12565 break; 12566 } 12567 12568 /* 12569 * Get next capabilities pointer and clear bits 0,1. 12570 */ 12571 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle, 12572 (caps_ptr + PCI_CAP_NEXT_PTR)), 4); 12573 } 12574 return (TRUE); 12575 } 12576 12577 static int 12578 mptsas_init_pm(mptsas_t *mpt) 12579 { 12580 char pmc_name[16]; 12581 char *pmc[] = { 12582 NULL, 12583 "0=Off (PCI D3 State)", 12584 "3=On (PCI D0 State)", 12585 NULL 12586 }; 12587 uint16_t pmcsr_stat; 12588 12589 if (mptsas_get_pci_cap(mpt) == FALSE) { 12590 return (DDI_FAILURE); 12591 } 12592 /* 12593 * If PCI's capability does not support PM, then don't need 12594 * to registe the pm-components 12595 */ 12596 if (!(mpt->m_options & MPTSAS_OPT_PM)) 12597 return (DDI_SUCCESS); 12598 /* 12599 * If power management is supported by this chip, create 12600 * pm-components property for the power management framework 12601 */ 12602 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance); 12603 pmc[0] = pmc_name; 12604 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip, 12605 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) { 12606 mutex_enter(&mpt->m_intr_mutex); 12607 mpt->m_options &= ~MPTSAS_OPT_PM; 12608 mutex_exit(&mpt->m_intr_mutex); 12609 mptsas_log(mpt, CE_WARN, 12610 "mptsas%d: pm-component property creation failed.", 12611 mpt->m_instance); 12612 return (DDI_FAILURE); 12613 } 12614 12615 /* 12616 * Power on device. 12617 */ 12618 (void) pm_busy_component(mpt->m_dip, 0); 12619 pmcsr_stat = pci_config_get16(mpt->m_config_handle, 12620 mpt->m_pmcsr_offset); 12621 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) { 12622 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device", 12623 mpt->m_instance); 12624 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset, 12625 PCI_PMCSR_D0); 12626 } 12627 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) { 12628 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed"); 12629 return (DDI_FAILURE); 12630 } 12631 mutex_enter(&mpt->m_intr_mutex); 12632 mpt->m_power_level = PM_LEVEL_D0; 12633 mutex_exit(&mpt->m_intr_mutex); 12634 /* 12635 * Set pm idle delay. 12636 */ 12637 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY, 12638 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT); 12639 12640 return (DDI_SUCCESS); 12641 } 12642 12643 static int 12644 mptsas_register_intrs(mptsas_t *mpt) 12645 { 12646 dev_info_t *dip; 12647 int intr_types; 12648 12649 dip = mpt->m_dip; 12650 12651 /* Get supported interrupt types */ 12652 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) { 12653 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types " 12654 "failed\n"); 12655 return (FALSE); 12656 } 12657 12658 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types)); 12659 12660 /* 12661 * Try MSI, but fall back to FIXED 12662 */ 12663 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) { 12664 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) { 12665 NDBG0(("Using MSI interrupt type")); 12666 mpt->m_intr_type = DDI_INTR_TYPE_MSI; 12667 return (TRUE); 12668 } 12669 } 12670 if (intr_types & DDI_INTR_TYPE_FIXED) { 12671 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) { 12672 NDBG0(("Using FIXED interrupt type")); 12673 mpt->m_intr_type = DDI_INTR_TYPE_FIXED; 12674 return (TRUE); 12675 } else { 12676 NDBG0(("FIXED interrupt registration failed")); 12677 return (FALSE); 12678 } 12679 } 12680 12681 return (FALSE); 12682 } 12683 12684 static void 12685 mptsas_unregister_intrs(mptsas_t *mpt) 12686 { 12687 mptsas_rem_intrs(mpt); 12688 } 12689 12690 /* 12691 * mptsas_add_intrs: 12692 * 12693 * Register FIXED or MSI interrupts. 12694 */ 12695 static int 12696 mptsas_add_intrs(mptsas_t *mpt, int intr_type) 12697 { 12698 dev_info_t *dip = mpt->m_dip; 12699 int avail, actual, count = 0; 12700 int i, flag, ret; 12701 12702 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type)); 12703 12704 /* Get number of interrupts */ 12705 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 12706 if ((ret != DDI_SUCCESS) || (count <= 0)) { 12707 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, " 12708 "ret %d count %d\n", ret, count); 12709 12710 return (DDI_FAILURE); 12711 } 12712 12713 /* Get number of available interrupts */ 12714 ret = ddi_intr_get_navail(dip, intr_type, &avail); 12715 if ((ret != DDI_SUCCESS) || (avail == 0)) { 12716 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, " 12717 "ret %d avail %d\n", ret, avail); 12718 12719 return (DDI_FAILURE); 12720 } 12721 12722 if (avail < count) { 12723 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, " 12724 "navail() returned %d", count, avail); 12725 } 12726 12727 /* Mpt only have one interrupt routine */ 12728 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) { 12729 count = 1; 12730 } 12731 12732 /* Allocate an array of interrupt handles */ 12733 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t); 12734 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP); 12735 12736 flag = DDI_INTR_ALLOC_NORMAL; 12737 12738 /* call ddi_intr_alloc() */ 12739 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0, 12740 count, &actual, flag); 12741 12742 if ((ret != DDI_SUCCESS) || (actual == 0)) { 12743 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n", 12744 ret); 12745 kmem_free(mpt->m_htable, mpt->m_intr_size); 12746 return (DDI_FAILURE); 12747 } 12748 12749 /* use interrupt count returned or abort? */ 12750 if (actual < count) { 12751 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n", 12752 count, actual); 12753 } 12754 12755 mpt->m_intr_cnt = actual; 12756 12757 /* 12758 * Get priority for first msi, assume remaining are all the same 12759 */ 12760 if ((ret = ddi_intr_get_pri(mpt->m_htable[0], 12761 &mpt->m_intr_pri)) != DDI_SUCCESS) { 12762 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret); 12763 12764 /* Free already allocated intr */ 12765 for (i = 0; i < actual; i++) { 12766 (void) ddi_intr_free(mpt->m_htable[i]); 12767 } 12768 12769 kmem_free(mpt->m_htable, mpt->m_intr_size); 12770 return (DDI_FAILURE); 12771 } 12772 12773 /* Test for high level mutex */ 12774 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) { 12775 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: " 12776 "Hi level interrupt not supported\n"); 12777 12778 /* Free already allocated intr */ 12779 for (i = 0; i < actual; i++) { 12780 (void) ddi_intr_free(mpt->m_htable[i]); 12781 } 12782 12783 kmem_free(mpt->m_htable, mpt->m_intr_size); 12784 return (DDI_FAILURE); 12785 } 12786 12787 /* Call ddi_intr_add_handler() */ 12788 for (i = 0; i < actual; i++) { 12789 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr, 12790 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 12791 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() " 12792 "failed %d\n", ret); 12793 12794 /* Free already allocated intr */ 12795 for (i = 0; i < actual; i++) { 12796 (void) ddi_intr_free(mpt->m_htable[i]); 12797 } 12798 12799 kmem_free(mpt->m_htable, mpt->m_intr_size); 12800 return (DDI_FAILURE); 12801 } 12802 } 12803 12804 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap)) 12805 != DDI_SUCCESS) { 12806 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret); 12807 12808 /* Free already allocated intr */ 12809 for (i = 0; i < actual; i++) { 12810 (void) ddi_intr_free(mpt->m_htable[i]); 12811 } 12812 12813 kmem_free(mpt->m_htable, mpt->m_intr_size); 12814 return (DDI_FAILURE); 12815 } 12816 12817 /* 12818 * Enable interrupts 12819 */ 12820 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) { 12821 /* Call ddi_intr_block_enable() for MSI interrupts */ 12822 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt); 12823 } else { 12824 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 12825 for (i = 0; i < mpt->m_intr_cnt; i++) { 12826 (void) ddi_intr_enable(mpt->m_htable[i]); 12827 } 12828 } 12829 return (DDI_SUCCESS); 12830 } 12831 12832 /* 12833 * mptsas_rem_intrs: 12834 * 12835 * Unregister FIXED or MSI interrupts 12836 */ 12837 static void 12838 mptsas_rem_intrs(mptsas_t *mpt) 12839 { 12840 int i; 12841 12842 NDBG6(("mptsas_rem_intrs")); 12843 12844 /* Disable all interrupts */ 12845 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) { 12846 /* Call ddi_intr_block_disable() */ 12847 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt); 12848 } else { 12849 for (i = 0; i < mpt->m_intr_cnt; i++) { 12850 (void) ddi_intr_disable(mpt->m_htable[i]); 12851 } 12852 } 12853 12854 /* Call ddi_intr_remove_handler() */ 12855 for (i = 0; i < mpt->m_intr_cnt; i++) { 12856 (void) ddi_intr_remove_handler(mpt->m_htable[i]); 12857 (void) ddi_intr_free(mpt->m_htable[i]); 12858 } 12859 12860 kmem_free(mpt->m_htable, mpt->m_intr_size); 12861 } 12862 12863 /* 12864 * The IO fault service error handling callback function 12865 */ 12866 /*ARGSUSED*/ 12867 static int 12868 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 12869 { 12870 /* 12871 * as the driver can always deal with an error in any dma or 12872 * access handle, we can just return the fme_status value. 12873 */ 12874 pci_ereport_post(dip, err, NULL); 12875 return (err->fme_status); 12876 } 12877 12878 /* 12879 * mptsas_fm_init - initialize fma capabilities and register with IO 12880 * fault services. 12881 */ 12882 static void 12883 mptsas_fm_init(mptsas_t *mpt) 12884 { 12885 /* 12886 * Need to change iblock to priority for new MSI intr 12887 */ 12888 ddi_iblock_cookie_t fm_ibc; 12889 12890 /* Only register with IO Fault Services if we have some capability */ 12891 if (mpt->m_fm_capabilities) { 12892 /* Adjust access and dma attributes for FMA */ 12893 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 12894 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 12895 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 12896 12897 /* 12898 * Register capabilities with IO Fault Services. 12899 * mpt->m_fm_capabilities will be updated to indicate 12900 * capabilities actually supported (not requested.) 12901 */ 12902 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc); 12903 12904 /* 12905 * Initialize pci ereport capabilities if ereport 12906 * capable (should always be.) 12907 */ 12908 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) || 12909 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) { 12910 pci_ereport_setup(mpt->m_dip); 12911 } 12912 12913 /* 12914 * Register error callback if error callback capable. 12915 */ 12916 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) { 12917 ddi_fm_handler_register(mpt->m_dip, 12918 mptsas_fm_error_cb, (void *) mpt); 12919 } 12920 } 12921 } 12922 12923 /* 12924 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO 12925 * fault services. 12926 * 12927 */ 12928 static void 12929 mptsas_fm_fini(mptsas_t *mpt) 12930 { 12931 /* Only unregister FMA capabilities if registered */ 12932 if (mpt->m_fm_capabilities) { 12933 12934 /* 12935 * Un-register error callback if error callback capable. 12936 */ 12937 12938 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) { 12939 ddi_fm_handler_unregister(mpt->m_dip); 12940 } 12941 12942 /* 12943 * Release any resources allocated by pci_ereport_setup() 12944 */ 12945 12946 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) || 12947 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) { 12948 pci_ereport_teardown(mpt->m_dip); 12949 } 12950 12951 /* Unregister from IO Fault Services */ 12952 ddi_fm_fini(mpt->m_dip); 12953 12954 /* Adjust access and dma attributes for FMA */ 12955 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 12956 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 12957 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 12958 12959 } 12960 } 12961 12962 int 12963 mptsas_check_acc_handle(ddi_acc_handle_t handle) 12964 { 12965 ddi_fm_error_t de; 12966 12967 if (handle == NULL) 12968 return (DDI_FAILURE); 12969 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0); 12970 return (de.fme_status); 12971 } 12972 12973 int 12974 mptsas_check_dma_handle(ddi_dma_handle_t handle) 12975 { 12976 ddi_fm_error_t de; 12977 12978 if (handle == NULL) 12979 return (DDI_FAILURE); 12980 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0); 12981 return (de.fme_status); 12982 } 12983 12984 void 12985 mptsas_fm_ereport(mptsas_t *mpt, char *detail) 12986 { 12987 uint64_t ena; 12988 char buf[FM_MAX_CLASS]; 12989 12990 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 12991 ena = fm_ena_generate(0, FM_ENA_FMT1); 12992 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) { 12993 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP, 12994 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 12995 } 12996 } 12997 12998 static int 12999 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address, 13000 uint16_t *dev_handle, mptsas_target_t **pptgt) 13001 { 13002 int rval; 13003 uint32_t dev_info; 13004 uint64_t sas_wwn; 13005 mptsas_phymask_t phymask; 13006 uint8_t physport, phynum, config, disk; 13007 mptsas_slots_t *slots = mpt->m_active; 13008 uint64_t devicename; 13009 uint16_t pdev_hdl; 13010 mptsas_target_t *tmp_tgt = NULL; 13011 uint16_t bay_num, enclosure; 13012 13013 ASSERT(*pptgt == NULL); 13014 13015 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle, 13016 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl, 13017 &bay_num, &enclosure); 13018 if (rval != DDI_SUCCESS) { 13019 rval = DEV_INFO_FAIL_PAGE0; 13020 return (rval); 13021 } 13022 13023 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET | 13024 MPI2_SAS_DEVICE_INFO_SATA_DEVICE | 13025 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) { 13026 rval = DEV_INFO_WRONG_DEVICE_TYPE; 13027 return (rval); 13028 } 13029 13030 /* 13031 * Check if the dev handle is for a Phys Disk. If so, set return value 13032 * and exit. Don't add Phys Disks to hash. 13033 */ 13034 for (config = 0; config < slots->m_num_raid_configs; config++) { 13035 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) { 13036 if (*dev_handle == slots->m_raidconfig[config]. 13037 m_physdisk_devhdl[disk]) { 13038 rval = DEV_INFO_PHYS_DISK; 13039 return (rval); 13040 } 13041 } 13042 } 13043 13044 /* 13045 * Get SATA Device Name from SAS device page0 for 13046 * sata device, if device name doesn't exist, set m_sas_wwn to 13047 * 0 for direct attached SATA. For the device behind the expander 13048 * we still can use STP address assigned by expander. 13049 */ 13050 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE | 13051 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) { 13052 mutex_exit(&mpt->m_mutex); 13053 /* alloc a tmp_tgt to send the cmd */ 13054 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), 13055 KM_SLEEP); 13056 tmp_tgt->m_devhdl = *dev_handle; 13057 tmp_tgt->m_deviceinfo = dev_info; 13058 tmp_tgt->m_qfull_retries = QFULL_RETRIES; 13059 tmp_tgt->m_qfull_retry_interval = 13060 drv_usectohz(QFULL_RETRY_INTERVAL * 1000); 13061 tmp_tgt->m_t_throttle = MAX_THROTTLE; 13062 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0); 13063 kmem_free(tmp_tgt, sizeof (struct mptsas_target)); 13064 mutex_enter(&mpt->m_mutex); 13065 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) { 13066 sas_wwn = devicename; 13067 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) { 13068 sas_wwn = 0; 13069 } 13070 } 13071 13072 phymask = mptsas_physport_to_phymask(mpt, physport); 13073 *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn, 13074 dev_info, phymask, phynum, mpt); 13075 if (*pptgt == NULL) { 13076 mptsas_log(mpt, CE_WARN, "Failed to allocated target" 13077 "structure!"); 13078 rval = DEV_INFO_FAIL_ALLOC; 13079 return (rval); 13080 } 13081 (*pptgt)->m_enclosure = enclosure; 13082 (*pptgt)->m_slot_num = bay_num; 13083 return (DEV_INFO_SUCCESS); 13084 } 13085 13086 uint64_t 13087 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun) 13088 { 13089 uint64_t sata_guid = 0, *pwwn = NULL; 13090 int target = ptgt->m_devhdl; 13091 uchar_t *inq83 = NULL; 13092 int inq83_len = 0xFF; 13093 uchar_t *dblk = NULL; 13094 int inq83_retry = 3; 13095 int rval = DDI_FAILURE; 13096 13097 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 13098 13099 inq83_retry: 13100 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83, 13101 inq83_len, NULL, 1); 13102 if (rval != DDI_SUCCESS) { 13103 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page " 13104 "0x83 for target:%x, lun:%x failed!", target, lun); 13105 goto out; 13106 } 13107 /* According to SAT2, the first descriptor is logic unit name */ 13108 dblk = &inq83[4]; 13109 if ((dblk[1] & 0x30) != 0) { 13110 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated."); 13111 goto out; 13112 } 13113 pwwn = (uint64_t *)(void *)(&dblk[4]); 13114 if ((dblk[4] & 0xf0) == 0x50) { 13115 sata_guid = BE_64(*pwwn); 13116 goto out; 13117 } else if (dblk[4] == 'A') { 13118 NDBG20(("SATA drive has no NAA format GUID.")); 13119 goto out; 13120 } else { 13121 /* The data is not ready, wait and retry */ 13122 inq83_retry--; 13123 if (inq83_retry <= 0) { 13124 goto out; 13125 } 13126 NDBG20(("The GUID is not ready, retry...")); 13127 delay(1 * drv_usectohz(1000000)); 13128 goto inq83_retry; 13129 } 13130 out: 13131 kmem_free(inq83, inq83_len); 13132 return (sata_guid); 13133 } 13134 13135 static int 13136 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page, 13137 unsigned char *buf, int len, int *reallen, uchar_t evpd) 13138 { 13139 uchar_t cdb[CDB_GROUP0]; 13140 struct scsi_address ap; 13141 struct buf *data_bp = NULL; 13142 int resid = 0; 13143 int ret = DDI_FAILURE; 13144 13145 ASSERT(len <= 0xffff); 13146 13147 ap.a_target = MPTSAS_INVALID_DEVHDL; 13148 ap.a_lun = (uchar_t)(lun); 13149 ap.a_hba_tran = mpt->m_tran; 13150 13151 data_bp = scsi_alloc_consistent_buf(&ap, 13152 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL); 13153 if (data_bp == NULL) { 13154 return (ret); 13155 } 13156 bzero(cdb, CDB_GROUP0); 13157 cdb[0] = SCMD_INQUIRY; 13158 cdb[1] = evpd; 13159 cdb[2] = page; 13160 cdb[3] = (len & 0xff00) >> 8; 13161 cdb[4] = (len & 0x00ff); 13162 cdb[5] = 0; 13163 13164 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp, 13165 &resid); 13166 if (ret == DDI_SUCCESS) { 13167 if (reallen) { 13168 *reallen = len - resid; 13169 } 13170 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len); 13171 } 13172 if (data_bp) { 13173 scsi_free_consistent_buf(data_bp); 13174 } 13175 return (ret); 13176 } 13177 13178 static int 13179 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap, 13180 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp, 13181 int *resid) 13182 { 13183 struct scsi_pkt *pktp = NULL; 13184 scsi_hba_tran_t *tran_clone = NULL; 13185 mptsas_tgt_private_t *tgt_private = NULL; 13186 int ret = DDI_FAILURE; 13187 13188 /* 13189 * scsi_hba_tran_t->tran_tgt_private is used to pass the address 13190 * information to scsi_init_pkt, allocate a scsi_hba_tran structure 13191 * to simulate the cmds from sd 13192 */ 13193 tran_clone = kmem_alloc( 13194 sizeof (scsi_hba_tran_t), KM_SLEEP); 13195 if (tran_clone == NULL) { 13196 goto out; 13197 } 13198 bcopy((caddr_t)mpt->m_tran, 13199 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t)); 13200 tgt_private = kmem_alloc( 13201 sizeof (mptsas_tgt_private_t), KM_SLEEP); 13202 if (tgt_private == NULL) { 13203 goto out; 13204 } 13205 tgt_private->t_lun = ap->a_lun; 13206 tgt_private->t_private = ptgt; 13207 tran_clone->tran_tgt_private = tgt_private; 13208 ap->a_hba_tran = tran_clone; 13209 13210 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL, 13211 data_bp, cdblen, sizeof (struct scsi_arq_status), 13212 0, PKT_CONSISTENT, NULL, NULL); 13213 if (pktp == NULL) { 13214 goto out; 13215 } 13216 bcopy(cdb, pktp->pkt_cdbp, cdblen); 13217 pktp->pkt_flags = FLAG_NOPARITY; 13218 if (scsi_poll(pktp) < 0) { 13219 goto out; 13220 } 13221 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) { 13222 goto out; 13223 } 13224 if (resid != NULL) { 13225 *resid = pktp->pkt_resid; 13226 } 13227 13228 ret = DDI_SUCCESS; 13229 out: 13230 if (pktp) { 13231 scsi_destroy_pkt(pktp); 13232 } 13233 if (tran_clone) { 13234 kmem_free(tran_clone, sizeof (scsi_hba_tran_t)); 13235 } 13236 if (tgt_private) { 13237 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t)); 13238 } 13239 return (ret); 13240 } 13241 static int 13242 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun) 13243 { 13244 char *cp = NULL; 13245 char *ptr = NULL; 13246 size_t s = 0; 13247 char *wwid_str = NULL; 13248 char *lun_str = NULL; 13249 long lunnum; 13250 long phyid = -1; 13251 int rc = DDI_FAILURE; 13252 13253 ptr = name; 13254 ASSERT(ptr[0] == 'w' || ptr[0] == 'p'); 13255 ptr++; 13256 if ((cp = strchr(ptr, ',')) == NULL) { 13257 return (DDI_FAILURE); 13258 } 13259 13260 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 13261 s = (uintptr_t)cp - (uintptr_t)ptr; 13262 13263 bcopy(ptr, wwid_str, s); 13264 wwid_str[s] = '\0'; 13265 13266 ptr = ++cp; 13267 13268 if ((cp = strchr(ptr, '\0')) == NULL) { 13269 goto out; 13270 } 13271 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 13272 s = (uintptr_t)cp - (uintptr_t)ptr; 13273 13274 bcopy(ptr, lun_str, s); 13275 lun_str[s] = '\0'; 13276 13277 if (name[0] == 'p') { 13278 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid); 13279 } else { 13280 rc = scsi_wwnstr_to_wwn(wwid_str, wwid); 13281 } 13282 if (rc != DDI_SUCCESS) 13283 goto out; 13284 13285 if (phyid != -1) { 13286 ASSERT(phyid < MPTSAS_MAX_PHYS); 13287 *phy = (uint8_t)phyid; 13288 } 13289 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum); 13290 if (rc != 0) 13291 goto out; 13292 13293 *lun = (int)lunnum; 13294 rc = DDI_SUCCESS; 13295 out: 13296 if (wwid_str) 13297 kmem_free(wwid_str, SCSI_MAXNAMELEN); 13298 if (lun_str) 13299 kmem_free(lun_str, SCSI_MAXNAMELEN); 13300 13301 return (rc); 13302 } 13303 13304 /* 13305 * mptsas_parse_smp_name() is to parse sas wwn string 13306 * which format is "wWWN" 13307 */ 13308 static int 13309 mptsas_parse_smp_name(char *name, uint64_t *wwn) 13310 { 13311 char *ptr = name; 13312 13313 if (*ptr != 'w') { 13314 return (DDI_FAILURE); 13315 } 13316 13317 ptr++; 13318 if (scsi_wwnstr_to_wwn(ptr, wwn)) { 13319 return (DDI_FAILURE); 13320 } 13321 return (DDI_SUCCESS); 13322 } 13323 13324 static int 13325 mptsas_bus_config(dev_info_t *pdip, uint_t flag, 13326 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 13327 { 13328 int ret = NDI_FAILURE; 13329 int circ = 0; 13330 int circ1 = 0; 13331 mptsas_t *mpt; 13332 char *ptr = NULL; 13333 char *devnm = NULL; 13334 uint64_t wwid = 0; 13335 uint8_t phy = 0xFF; 13336 int lun = 0; 13337 uint_t mflags = flag; 13338 int bconfig = TRUE; 13339 13340 if (scsi_hba_iport_unit_address(pdip) == 0) { 13341 return (DDI_FAILURE); 13342 } 13343 13344 mpt = DIP2MPT(pdip); 13345 if (!mpt) { 13346 return (DDI_FAILURE); 13347 } 13348 /* 13349 * Hold the nexus across the bus_config 13350 */ 13351 ndi_devi_enter(scsi_vhci_dip, &circ); 13352 ndi_devi_enter(pdip, &circ1); 13353 switch (op) { 13354 case BUS_CONFIG_ONE: 13355 /* parse wwid/target name out of name given */ 13356 if ((ptr = strchr((char *)arg, '@')) == NULL) { 13357 ret = NDI_FAILURE; 13358 break; 13359 } 13360 ptr++; 13361 if (strncmp((char *)arg, "smp", 3) == 0) { 13362 /* 13363 * This is a SMP target device 13364 */ 13365 ret = mptsas_parse_smp_name(ptr, &wwid); 13366 if (ret != DDI_SUCCESS) { 13367 ret = NDI_FAILURE; 13368 break; 13369 } 13370 ret = mptsas_config_smp(pdip, wwid, childp); 13371 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) { 13372 /* 13373 * OBP could pass down a non-canonical form 13374 * bootpath without LUN part when LUN is 0. 13375 * So driver need adjust the string. 13376 */ 13377 if (strchr(ptr, ',') == NULL) { 13378 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 13379 (void) sprintf(devnm, "%s,0", (char *)arg); 13380 ptr = strchr(devnm, '@'); 13381 ptr++; 13382 } 13383 13384 /* 13385 * The device path is wWWID format and the device 13386 * is not SMP target device. 13387 */ 13388 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun); 13389 if (ret != DDI_SUCCESS) { 13390 ret = NDI_FAILURE; 13391 break; 13392 } 13393 *childp = NULL; 13394 if (ptr[0] == 'w') { 13395 ret = mptsas_config_one_addr(pdip, wwid, 13396 lun, childp); 13397 } else if (ptr[0] == 'p') { 13398 ret = mptsas_config_one_phy(pdip, phy, lun, 13399 childp); 13400 } 13401 13402 /* 13403 * If this is CD/DVD device in OBP path, the 13404 * ndi_busop_bus_config can be skipped as config one 13405 * operation is done above. 13406 */ 13407 if ((ret == NDI_SUCCESS) && (*childp != NULL) && 13408 (strcmp(ddi_node_name(*childp), "cdrom") == 0) && 13409 (strncmp((char *)arg, "disk", 4) == 0)) { 13410 bconfig = FALSE; 13411 ndi_hold_devi(*childp); 13412 } 13413 } else { 13414 ret = NDI_FAILURE; 13415 break; 13416 } 13417 13418 /* 13419 * DDI group instructed us to use this flag. 13420 */ 13421 mflags |= NDI_MDI_FALLBACK; 13422 break; 13423 case BUS_CONFIG_DRIVER: 13424 case BUS_CONFIG_ALL: 13425 mptsas_config_all(pdip); 13426 ret = NDI_SUCCESS; 13427 break; 13428 } 13429 13430 if ((ret == NDI_SUCCESS) && bconfig) { 13431 ret = ndi_busop_bus_config(pdip, mflags, op, 13432 (devnm == NULL) ? arg : devnm, childp, 0); 13433 } 13434 13435 ndi_devi_exit(pdip, circ1); 13436 ndi_devi_exit(scsi_vhci_dip, circ); 13437 if (devnm != NULL) 13438 kmem_free(devnm, SCSI_MAXNAMELEN); 13439 return (ret); 13440 } 13441 13442 static int 13443 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip, 13444 mptsas_target_t *ptgt) 13445 { 13446 int rval = DDI_FAILURE; 13447 struct scsi_inquiry *sd_inq = NULL; 13448 mptsas_t *mpt = DIP2MPT(pdip); 13449 13450 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP); 13451 13452 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq, 13453 SUN_INQSIZE, 0, (uchar_t)0); 13454 13455 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) { 13456 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun); 13457 } else { 13458 rval = DDI_FAILURE; 13459 } 13460 13461 kmem_free(sd_inq, SUN_INQSIZE); 13462 return (rval); 13463 } 13464 13465 static int 13466 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun, 13467 dev_info_t **lundip) 13468 { 13469 int rval; 13470 mptsas_t *mpt = DIP2MPT(pdip); 13471 int phymask; 13472 mptsas_target_t *ptgt = NULL; 13473 13474 /* 13475 * Get the physical port associated to the iport 13476 */ 13477 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0, 13478 "phymask", 0); 13479 13480 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr); 13481 if (ptgt == NULL) { 13482 /* 13483 * didn't match any device by searching 13484 */ 13485 return (DDI_FAILURE); 13486 } 13487 /* 13488 * If the LUN already exists and the status is online, 13489 * we just return the pointer to dev_info_t directly. 13490 * For the mdi_pathinfo node, we'll handle it in 13491 * mptsas_create_virt_lun() 13492 * TODO should be also in mptsas_handle_dr 13493 */ 13494 13495 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun); 13496 if (*lundip != NULL) { 13497 /* 13498 * TODO Another senario is, we hotplug the same disk 13499 * on the same slot, the devhdl changed, is this 13500 * possible? 13501 * tgt_private->t_private != ptgt 13502 */ 13503 if (sasaddr != ptgt->m_sas_wwn) { 13504 /* 13505 * The device has changed although the devhdl is the 13506 * same (Enclosure mapping mode, change drive on the 13507 * same slot) 13508 */ 13509 return (DDI_FAILURE); 13510 } 13511 return (DDI_SUCCESS); 13512 } 13513 13514 if (phymask == 0) { 13515 /* 13516 * Configure IR volume 13517 */ 13518 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip); 13519 return (rval); 13520 } 13521 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt); 13522 13523 return (rval); 13524 } 13525 13526 static int 13527 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun, 13528 dev_info_t **lundip) 13529 { 13530 int rval; 13531 mptsas_t *mpt = DIP2MPT(pdip); 13532 int phymask; 13533 mptsas_target_t *ptgt = NULL; 13534 13535 /* 13536 * Get the physical port associated to the iport 13537 */ 13538 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0, 13539 "phymask", 0); 13540 13541 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy); 13542 if (ptgt == NULL) { 13543 /* 13544 * didn't match any device by searching 13545 */ 13546 return (DDI_FAILURE); 13547 } 13548 13549 /* 13550 * If the LUN already exists and the status is online, 13551 * we just return the pointer to dev_info_t directly. 13552 * For the mdi_pathinfo node, we'll handle it in 13553 * mptsas_create_virt_lun(). 13554 */ 13555 13556 *lundip = mptsas_find_child_phy(pdip, phy); 13557 if (*lundip != NULL) { 13558 return (DDI_SUCCESS); 13559 } 13560 13561 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt); 13562 13563 return (rval); 13564 } 13565 13566 static int 13567 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num, 13568 uint8_t *lun_addr_type) 13569 { 13570 uint32_t lun_idx = 0; 13571 13572 ASSERT(lun_num != NULL); 13573 ASSERT(lun_addr_type != NULL); 13574 13575 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE; 13576 /* determine report luns addressing type */ 13577 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) { 13578 /* 13579 * Vendors in the field have been found to be concatenating 13580 * bus/target/lun to equal the complete lun value instead 13581 * of switching to flat space addressing 13582 */ 13583 /* 00b - peripheral device addressing method */ 13584 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL: 13585 /* FALLTHRU */ 13586 /* 10b - logical unit addressing method */ 13587 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT: 13588 /* FALLTHRU */ 13589 /* 01b - flat space addressing method */ 13590 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE: 13591 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */ 13592 *lun_addr_type = (buf[lun_idx] & 13593 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6; 13594 *lun_num = (buf[lun_idx] & 0x3F) << 8; 13595 *lun_num |= buf[lun_idx + 1]; 13596 return (DDI_SUCCESS); 13597 default: 13598 return (DDI_FAILURE); 13599 } 13600 } 13601 13602 static int 13603 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt) 13604 { 13605 struct buf *repluns_bp = NULL; 13606 struct scsi_address ap; 13607 uchar_t cdb[CDB_GROUP5]; 13608 int ret = DDI_FAILURE; 13609 int retry = 0; 13610 int lun_list_len = 0; 13611 uint16_t lun_num = 0; 13612 uint8_t lun_addr_type = 0; 13613 uint32_t lun_cnt = 0; 13614 uint32_t lun_total = 0; 13615 dev_info_t *cdip = NULL; 13616 uint16_t *saved_repluns = NULL; 13617 char *buffer = NULL; 13618 int buf_len = 128; 13619 mptsas_t *mpt = DIP2MPT(pdip); 13620 uint64_t sas_wwn = 0; 13621 uint8_t phy = 0xFF; 13622 uint32_t dev_info = 0; 13623 13624 mutex_enter(&mpt->m_mutex); 13625 sas_wwn = ptgt->m_sas_wwn; 13626 phy = ptgt->m_phynum; 13627 dev_info = ptgt->m_deviceinfo; 13628 mutex_exit(&mpt->m_mutex); 13629 13630 if (sas_wwn == 0) { 13631 /* 13632 * It's a SATA without Device Name 13633 * So don't try multi-LUNs 13634 */ 13635 if (mptsas_find_child_phy(pdip, phy)) { 13636 return (DDI_SUCCESS); 13637 } else { 13638 /* 13639 * need configure and create node 13640 */ 13641 return (DDI_FAILURE); 13642 } 13643 } 13644 13645 /* 13646 * WWN (SAS address or Device Name exist) 13647 */ 13648 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE | 13649 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) { 13650 /* 13651 * SATA device with Device Name 13652 * So don't try multi-LUNs 13653 */ 13654 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) { 13655 return (DDI_SUCCESS); 13656 } else { 13657 return (DDI_FAILURE); 13658 } 13659 } 13660 13661 do { 13662 ap.a_target = MPTSAS_INVALID_DEVHDL; 13663 ap.a_lun = 0; 13664 ap.a_hba_tran = mpt->m_tran; 13665 repluns_bp = scsi_alloc_consistent_buf(&ap, 13666 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL); 13667 if (repluns_bp == NULL) { 13668 retry++; 13669 continue; 13670 } 13671 bzero(cdb, CDB_GROUP5); 13672 cdb[0] = SCMD_REPORT_LUNS; 13673 cdb[6] = (buf_len & 0xff000000) >> 24; 13674 cdb[7] = (buf_len & 0x00ff0000) >> 16; 13675 cdb[8] = (buf_len & 0x0000ff00) >> 8; 13676 cdb[9] = (buf_len & 0x000000ff); 13677 13678 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5, 13679 repluns_bp, NULL); 13680 if (ret != DDI_SUCCESS) { 13681 scsi_free_consistent_buf(repluns_bp); 13682 retry++; 13683 continue; 13684 } 13685 lun_list_len = BE_32(*(int *)((void *)( 13686 repluns_bp->b_un.b_addr))); 13687 if (buf_len >= lun_list_len + 8) { 13688 ret = DDI_SUCCESS; 13689 break; 13690 } 13691 scsi_free_consistent_buf(repluns_bp); 13692 buf_len = lun_list_len + 8; 13693 13694 } while (retry < 3); 13695 13696 if (ret != DDI_SUCCESS) 13697 return (ret); 13698 buffer = (char *)repluns_bp->b_un.b_addr; 13699 /* 13700 * find out the number of luns returned by the SCSI ReportLun call 13701 * and allocate buffer space 13702 */ 13703 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE; 13704 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP); 13705 if (saved_repluns == NULL) { 13706 scsi_free_consistent_buf(repluns_bp); 13707 return (DDI_FAILURE); 13708 } 13709 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) { 13710 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer), 13711 &lun_num, &lun_addr_type) != DDI_SUCCESS) { 13712 continue; 13713 } 13714 saved_repluns[lun_cnt] = lun_num; 13715 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num)) 13716 ret = DDI_SUCCESS; 13717 else 13718 ret = mptsas_probe_lun(pdip, lun_num, &cdip, 13719 ptgt); 13720 if ((ret == DDI_SUCCESS) && (cdip != NULL)) { 13721 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, 13722 MPTSAS_DEV_GONE); 13723 } 13724 } 13725 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt); 13726 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total); 13727 scsi_free_consistent_buf(repluns_bp); 13728 return (DDI_SUCCESS); 13729 } 13730 13731 static int 13732 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip) 13733 { 13734 int rval = DDI_FAILURE; 13735 struct scsi_inquiry *sd_inq = NULL; 13736 mptsas_t *mpt = DIP2MPT(pdip); 13737 mptsas_target_t *ptgt = NULL; 13738 13739 mutex_enter(&mpt->m_mutex); 13740 ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target); 13741 mutex_exit(&mpt->m_mutex); 13742 if (ptgt == NULL) { 13743 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x " 13744 "not found.", target); 13745 return (rval); 13746 } 13747 13748 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP); 13749 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq, 13750 SUN_INQSIZE, 0, (uchar_t)0); 13751 13752 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) { 13753 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt, 13754 0); 13755 } else { 13756 rval = DDI_FAILURE; 13757 } 13758 13759 kmem_free(sd_inq, SUN_INQSIZE); 13760 return (rval); 13761 } 13762 13763 /* 13764 * configure all RAID volumes for virtual iport 13765 */ 13766 static void 13767 mptsas_config_all_viport(dev_info_t *pdip) 13768 { 13769 mptsas_t *mpt = DIP2MPT(pdip); 13770 int config, vol; 13771 int target; 13772 dev_info_t *lundip = NULL; 13773 mptsas_slots_t *slots = mpt->m_active; 13774 13775 /* 13776 * Get latest RAID info and search for any Volume DevHandles. If any 13777 * are found, configure the volume. 13778 */ 13779 mutex_enter(&mpt->m_mutex); 13780 for (config = 0; config < slots->m_num_raid_configs; config++) { 13781 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) { 13782 if (slots->m_raidconfig[config].m_raidvol[vol].m_israid 13783 == 1) { 13784 target = slots->m_raidconfig[config]. 13785 m_raidvol[vol].m_raidhandle; 13786 mutex_exit(&mpt->m_mutex); 13787 (void) mptsas_config_raid(pdip, target, 13788 &lundip); 13789 mutex_enter(&mpt->m_mutex); 13790 } 13791 } 13792 } 13793 mutex_exit(&mpt->m_mutex); 13794 } 13795 13796 static void 13797 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns, 13798 int lun_cnt, mptsas_target_t *ptgt) 13799 { 13800 dev_info_t *child = NULL, *savechild = NULL; 13801 mdi_pathinfo_t *pip = NULL, *savepip = NULL; 13802 uint64_t sas_wwn, wwid; 13803 uint8_t phy; 13804 int lun; 13805 int i; 13806 int find; 13807 char *addr; 13808 char *nodename; 13809 mptsas_t *mpt = DIP2MPT(pdip); 13810 13811 mutex_enter(&mpt->m_mutex); 13812 wwid = ptgt->m_sas_wwn; 13813 mutex_exit(&mpt->m_mutex); 13814 13815 child = ddi_get_child(pdip); 13816 while (child) { 13817 find = 0; 13818 savechild = child; 13819 child = ddi_get_next_sibling(child); 13820 13821 nodename = ddi_node_name(savechild); 13822 if (strcmp(nodename, "smp") == 0) { 13823 continue; 13824 } 13825 13826 addr = ddi_get_name_addr(savechild); 13827 if (addr == NULL) { 13828 continue; 13829 } 13830 13831 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) != 13832 DDI_SUCCESS) { 13833 continue; 13834 } 13835 13836 if (wwid == sas_wwn) { 13837 for (i = 0; i < lun_cnt; i++) { 13838 if (repluns[i] == lun) { 13839 find = 1; 13840 break; 13841 } 13842 } 13843 } else { 13844 continue; 13845 } 13846 if (find == 0) { 13847 /* 13848 * The lun has not been there already 13849 */ 13850 (void) mptsas_offline_lun(pdip, savechild, NULL, 13851 NDI_DEVI_REMOVE); 13852 } 13853 } 13854 13855 pip = mdi_get_next_client_path(pdip, NULL); 13856 while (pip) { 13857 find = 0; 13858 savepip = pip; 13859 addr = MDI_PI(pip)->pi_addr; 13860 13861 pip = mdi_get_next_client_path(pdip, pip); 13862 13863 if (addr == NULL) { 13864 continue; 13865 } 13866 13867 if (mptsas_parse_address(addr, &sas_wwn, &phy, 13868 &lun) != DDI_SUCCESS) { 13869 continue; 13870 } 13871 13872 if (sas_wwn == wwid) { 13873 for (i = 0; i < lun_cnt; i++) { 13874 if (repluns[i] == lun) { 13875 find = 1; 13876 break; 13877 } 13878 } 13879 } else { 13880 continue; 13881 } 13882 13883 if (find == 0) { 13884 /* 13885 * The lun has not been there already 13886 */ 13887 (void) mptsas_offline_lun(pdip, NULL, savepip, 13888 NDI_DEVI_REMOVE); 13889 } 13890 } 13891 } 13892 13893 void 13894 mptsas_update_hashtab(struct mptsas *mpt) 13895 { 13896 uint32_t page_address; 13897 int rval = 0; 13898 uint16_t dev_handle; 13899 mptsas_target_t *ptgt = NULL; 13900 mptsas_smp_t smp_node; 13901 13902 /* 13903 * Get latest RAID info. 13904 */ 13905 (void) mptsas_get_raid_info(mpt); 13906 13907 dev_handle = mpt->m_smp_devhdl; 13908 for (; mpt->m_done_traverse_smp == 0; ) { 13909 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL & 13910 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle; 13911 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node) 13912 != DDI_SUCCESS) { 13913 break; 13914 } 13915 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl; 13916 (void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node); 13917 } 13918 13919 /* 13920 * Config target devices 13921 */ 13922 dev_handle = mpt->m_dev_handle; 13923 13924 /* 13925 * Do loop to get sas device page 0 by GetNextHandle till the 13926 * the last handle. If the sas device is a SATA/SSP target, 13927 * we try to config it. 13928 */ 13929 for (; mpt->m_done_traverse_dev == 0; ) { 13930 ptgt = NULL; 13931 page_address = 13932 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE & 13933 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | 13934 (uint32_t)dev_handle; 13935 rval = mptsas_get_target_device_info(mpt, page_address, 13936 &dev_handle, &ptgt); 13937 if ((rval == DEV_INFO_FAIL_PAGE0) || 13938 (rval == DEV_INFO_FAIL_ALLOC)) { 13939 break; 13940 } 13941 13942 mpt->m_dev_handle = dev_handle; 13943 } 13944 13945 } 13946 13947 void 13948 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab) 13949 { 13950 mptsas_hash_data_t *data; 13951 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST); 13952 while (data != NULL) { 13953 data->devhdl = MPTSAS_INVALID_DEVHDL; 13954 data->device_info = 0; 13955 /* 13956 * For tgttbl, clear dr_flag. 13957 */ 13958 data->dr_flag = MPTSAS_DR_INACTIVE; 13959 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT); 13960 } 13961 } 13962 13963 void 13964 mptsas_update_driver_data(struct mptsas *mpt) 13965 { 13966 /* 13967 * TODO after hard reset, update the driver data structures 13968 * 1. update port/phymask mapping table mpt->m_phy_info 13969 * 2. invalid all the entries in hash table 13970 * m_devhdl = 0xffff and m_deviceinfo = 0 13971 * 3. call sas_device_page/expander_page to update hash table 13972 */ 13973 mptsas_update_phymask(mpt); 13974 /* 13975 * Invalid the existing entries 13976 */ 13977 mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl); 13978 mptsas_invalid_hashtab(&mpt->m_active->m_smptbl); 13979 mpt->m_done_traverse_dev = 0; 13980 mpt->m_done_traverse_smp = 0; 13981 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL; 13982 mptsas_update_hashtab(mpt); 13983 } 13984 13985 static void 13986 mptsas_config_all(dev_info_t *pdip) 13987 { 13988 dev_info_t *smpdip = NULL; 13989 mptsas_t *mpt = DIP2MPT(pdip); 13990 int phymask = 0; 13991 mptsas_phymask_t phy_mask; 13992 mptsas_target_t *ptgt = NULL; 13993 mptsas_smp_t *psmp; 13994 13995 /* 13996 * Get the phymask associated to the iport 13997 */ 13998 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0, 13999 "phymask", 0); 14000 14001 /* 14002 * Enumerate RAID volumes here (phymask == 0). 14003 */ 14004 if (phymask == 0) { 14005 mptsas_config_all_viport(pdip); 14006 return; 14007 } 14008 14009 mutex_enter(&mpt->m_mutex); 14010 14011 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) { 14012 mptsas_update_hashtab(mpt); 14013 } 14014 14015 psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl, 14016 MPTSAS_HASH_FIRST); 14017 while (psmp != NULL) { 14018 phy_mask = psmp->m_phymask; 14019 if (phy_mask == phymask) { 14020 smpdip = NULL; 14021 mutex_exit(&mpt->m_mutex); 14022 (void) mptsas_online_smp(pdip, psmp, &smpdip); 14023 mutex_enter(&mpt->m_mutex); 14024 } 14025 psmp = (mptsas_smp_t *)mptsas_hash_traverse( 14026 &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT); 14027 } 14028 14029 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 14030 MPTSAS_HASH_FIRST); 14031 while (ptgt != NULL) { 14032 phy_mask = ptgt->m_phymask; 14033 if (phy_mask == phymask) { 14034 mutex_exit(&mpt->m_mutex); 14035 (void) mptsas_config_target(pdip, ptgt); 14036 mutex_enter(&mpt->m_mutex); 14037 } 14038 14039 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 14040 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 14041 } 14042 mutex_exit(&mpt->m_mutex); 14043 } 14044 14045 static int 14046 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt) 14047 { 14048 int rval = DDI_FAILURE; 14049 dev_info_t *tdip; 14050 14051 rval = mptsas_config_luns(pdip, ptgt); 14052 if (rval != DDI_SUCCESS) { 14053 /* 14054 * The return value means the SCMD_REPORT_LUNS 14055 * did not execute successfully. The target maybe 14056 * doesn't support such command. 14057 */ 14058 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt); 14059 } 14060 return (rval); 14061 } 14062 14063 /* 14064 * Return fail if not all the childs/paths are freed. 14065 * if there is any path under the HBA, the return value will be always fail 14066 * because we didn't call mdi_pi_free for path 14067 */ 14068 static int 14069 mptsas_offline_target(dev_info_t *pdip, char *name) 14070 { 14071 dev_info_t *child = NULL, *prechild = NULL; 14072 mdi_pathinfo_t *pip = NULL, *savepip = NULL; 14073 int tmp_rval, rval = DDI_SUCCESS; 14074 char *addr, *cp; 14075 size_t s; 14076 mptsas_t *mpt = DIP2MPT(pdip); 14077 14078 child = ddi_get_child(pdip); 14079 while (child) { 14080 addr = ddi_get_name_addr(child); 14081 prechild = child; 14082 child = ddi_get_next_sibling(child); 14083 14084 if (addr == NULL) { 14085 continue; 14086 } 14087 if ((cp = strchr(addr, ',')) == NULL) { 14088 continue; 14089 } 14090 14091 s = (uintptr_t)cp - (uintptr_t)addr; 14092 14093 if (strncmp(addr, name, s) != 0) { 14094 continue; 14095 } 14096 14097 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL, 14098 NDI_DEVI_REMOVE); 14099 if (tmp_rval != DDI_SUCCESS) { 14100 rval = DDI_FAILURE; 14101 if (ndi_prop_create_boolean(DDI_DEV_T_NONE, 14102 prechild, MPTSAS_DEV_GONE) != 14103 DDI_PROP_SUCCESS) { 14104 mptsas_log(mpt, CE_WARN, "mptsas driver " 14105 "unable to create property for " 14106 "SAS %s (MPTSAS_DEV_GONE)", addr); 14107 } 14108 } 14109 } 14110 14111 pip = mdi_get_next_client_path(pdip, NULL); 14112 while (pip) { 14113 addr = MDI_PI(pip)->pi_addr; 14114 savepip = pip; 14115 pip = mdi_get_next_client_path(pdip, pip); 14116 if (addr == NULL) { 14117 continue; 14118 } 14119 14120 if ((cp = strchr(addr, ',')) == NULL) { 14121 continue; 14122 } 14123 14124 s = (uintptr_t)cp - (uintptr_t)addr; 14125 14126 if (strncmp(addr, name, s) != 0) { 14127 continue; 14128 } 14129 14130 (void) mptsas_offline_lun(pdip, NULL, savepip, 14131 NDI_DEVI_REMOVE); 14132 /* 14133 * driver will not invoke mdi_pi_free, so path will not 14134 * be freed forever, return DDI_FAILURE. 14135 */ 14136 rval = DDI_FAILURE; 14137 } 14138 return (rval); 14139 } 14140 14141 static int 14142 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip, 14143 mdi_pathinfo_t *rpip, uint_t flags) 14144 { 14145 int rval = DDI_FAILURE; 14146 char *devname; 14147 dev_info_t *cdip, *parent; 14148 14149 if (rpip != NULL) { 14150 parent = scsi_vhci_dip; 14151 cdip = mdi_pi_get_client(rpip); 14152 } else if (rdip != NULL) { 14153 parent = pdip; 14154 cdip = rdip; 14155 } else { 14156 return (DDI_FAILURE); 14157 } 14158 14159 /* 14160 * Make sure node is attached otherwise 14161 * it won't have related cache nodes to 14162 * clean up. i_ddi_devi_attached is 14163 * similiar to i_ddi_node_state(cdip) >= 14164 * DS_ATTACHED. 14165 */ 14166 if (i_ddi_devi_attached(cdip)) { 14167 14168 /* Get full devname */ 14169 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 14170 (void) ddi_deviname(cdip, devname); 14171 /* Clean cache */ 14172 (void) devfs_clean(parent, devname + 1, 14173 DV_CLEAN_FORCE); 14174 kmem_free(devname, MAXNAMELEN + 1); 14175 } 14176 if (rpip != NULL) { 14177 if (MDI_PI_IS_OFFLINE(rpip)) { 14178 rval = DDI_SUCCESS; 14179 } else { 14180 rval = mdi_pi_offline(rpip, 0); 14181 } 14182 } else { 14183 rval = ndi_devi_offline(cdip, flags); 14184 } 14185 14186 return (rval); 14187 } 14188 14189 static dev_info_t * 14190 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn) 14191 { 14192 dev_info_t *child = NULL; 14193 char *smp_wwn = NULL; 14194 14195 child = ddi_get_child(parent); 14196 while (child) { 14197 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 14198 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn) 14199 != DDI_SUCCESS) { 14200 child = ddi_get_next_sibling(child); 14201 continue; 14202 } 14203 14204 if (strcmp(smp_wwn, str_wwn) == 0) { 14205 ddi_prop_free(smp_wwn); 14206 break; 14207 } 14208 child = ddi_get_next_sibling(child); 14209 ddi_prop_free(smp_wwn); 14210 } 14211 return (child); 14212 } 14213 14214 static int 14215 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags) 14216 { 14217 int rval = DDI_FAILURE; 14218 char *devname; 14219 char wwn_str[MPTSAS_WWN_STRLEN]; 14220 dev_info_t *cdip; 14221 14222 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr); 14223 14224 cdip = mptsas_find_smp_child(pdip, wwn_str); 14225 14226 if (cdip == NULL) 14227 return (DDI_SUCCESS); 14228 14229 /* 14230 * Make sure node is attached otherwise 14231 * it won't have related cache nodes to 14232 * clean up. i_ddi_devi_attached is 14233 * similiar to i_ddi_node_state(cdip) >= 14234 * DS_ATTACHED. 14235 */ 14236 if (i_ddi_devi_attached(cdip)) { 14237 14238 /* Get full devname */ 14239 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 14240 (void) ddi_deviname(cdip, devname); 14241 /* Clean cache */ 14242 (void) devfs_clean(pdip, devname + 1, 14243 DV_CLEAN_FORCE); 14244 kmem_free(devname, MAXNAMELEN + 1); 14245 } 14246 14247 rval = ndi_devi_offline(cdip, flags); 14248 14249 return (rval); 14250 } 14251 14252 static dev_info_t * 14253 mptsas_find_child(dev_info_t *pdip, char *name) 14254 { 14255 dev_info_t *child = NULL; 14256 char *rname = NULL; 14257 int rval = DDI_FAILURE; 14258 14259 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 14260 14261 child = ddi_get_child(pdip); 14262 while (child) { 14263 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN); 14264 if (rval != DDI_SUCCESS) { 14265 child = ddi_get_next_sibling(child); 14266 bzero(rname, SCSI_MAXNAMELEN); 14267 continue; 14268 } 14269 14270 if (strcmp(rname, name) == 0) { 14271 break; 14272 } 14273 child = ddi_get_next_sibling(child); 14274 bzero(rname, SCSI_MAXNAMELEN); 14275 } 14276 14277 kmem_free(rname, SCSI_MAXNAMELEN); 14278 14279 return (child); 14280 } 14281 14282 14283 static dev_info_t * 14284 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun) 14285 { 14286 dev_info_t *child = NULL; 14287 char *name = NULL; 14288 char *addr = NULL; 14289 14290 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 14291 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 14292 (void) sprintf(name, "%016"PRIx64, sasaddr); 14293 (void) sprintf(addr, "w%s,%x", name, lun); 14294 child = mptsas_find_child(pdip, addr); 14295 kmem_free(name, SCSI_MAXNAMELEN); 14296 kmem_free(addr, SCSI_MAXNAMELEN); 14297 return (child); 14298 } 14299 14300 static dev_info_t * 14301 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy) 14302 { 14303 dev_info_t *child; 14304 char *addr; 14305 14306 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 14307 (void) sprintf(addr, "p%x,0", phy); 14308 child = mptsas_find_child(pdip, addr); 14309 kmem_free(addr, SCSI_MAXNAMELEN); 14310 return (child); 14311 } 14312 14313 static mdi_pathinfo_t * 14314 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy) 14315 { 14316 mdi_pathinfo_t *path; 14317 char *addr = NULL; 14318 14319 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 14320 (void) sprintf(addr, "p%x,0", phy); 14321 path = mdi_pi_find(pdip, NULL, addr); 14322 kmem_free(addr, SCSI_MAXNAMELEN); 14323 return (path); 14324 } 14325 14326 static mdi_pathinfo_t * 14327 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun) 14328 { 14329 mdi_pathinfo_t *path; 14330 char *name = NULL; 14331 char *addr = NULL; 14332 14333 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 14334 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 14335 (void) sprintf(name, "%016"PRIx64, sasaddr); 14336 (void) sprintf(addr, "w%s,%x", name, lun); 14337 path = mdi_pi_find(parent, NULL, addr); 14338 kmem_free(name, SCSI_MAXNAMELEN); 14339 kmem_free(addr, SCSI_MAXNAMELEN); 14340 14341 return (path); 14342 } 14343 14344 static int 14345 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq, 14346 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun) 14347 { 14348 int i = 0; 14349 uchar_t *inq83 = NULL; 14350 int inq83_len1 = 0xFF; 14351 int inq83_len = 0; 14352 int rval = DDI_FAILURE; 14353 ddi_devid_t devid; 14354 char *guid = NULL; 14355 int target = ptgt->m_devhdl; 14356 mdi_pathinfo_t *pip = NULL; 14357 mptsas_t *mpt = DIP2MPT(pdip); 14358 14359 /* 14360 * For DVD/CD ROM and tape devices and optical 14361 * devices, we won't try to enumerate them under 14362 * scsi_vhci, so no need to try page83 14363 */ 14364 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT || 14365 sd_inq->inq_dtype == DTYPE_OPTICAL || 14366 sd_inq->inq_dtype == DTYPE_ESI)) 14367 goto create_lun; 14368 14369 /* 14370 * The LCA returns good SCSI status, but corrupt page 83 data the first 14371 * time it is queried. The solution is to keep trying to request page83 14372 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in 14373 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver 14374 * give up to get VPD page at this stage and fail the enumeration. 14375 */ 14376 14377 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP); 14378 14379 for (i = 0; i < mptsas_inq83_retry_timeout; i++) { 14380 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83, 14381 inq83_len1, &inq83_len, 1); 14382 if (rval != 0) { 14383 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page " 14384 "0x83 for target:%x, lun:%x failed!", target, lun); 14385 if (mptsas_physical_bind_failed_page_83 != B_FALSE) 14386 goto create_lun; 14387 goto out; 14388 } 14389 /* 14390 * create DEVID from inquiry data 14391 */ 14392 if ((rval = ddi_devid_scsi_encode( 14393 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq, 14394 sizeof (struct scsi_inquiry), NULL, 0, inq83, 14395 (size_t)inq83_len, &devid)) == DDI_SUCCESS) { 14396 /* 14397 * extract GUID from DEVID 14398 */ 14399 guid = ddi_devid_to_guid(devid); 14400 14401 /* 14402 * Do not enable MPXIO if the strlen(guid) is greater 14403 * than MPTSAS_MAX_GUID_LEN, this constrain would be 14404 * handled by framework later. 14405 */ 14406 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) { 14407 ddi_devid_free_guid(guid); 14408 guid = NULL; 14409 if (mpt->m_mpxio_enable == TRUE) { 14410 mptsas_log(mpt, CE_NOTE, "!Target:%x, " 14411 "lun:%x doesn't have a valid GUID, " 14412 "multipathing for this drive is " 14413 "not enabled", target, lun); 14414 } 14415 } 14416 14417 /* 14418 * devid no longer needed 14419 */ 14420 ddi_devid_free(devid); 14421 break; 14422 } else if (rval == DDI_NOT_WELL_FORMED) { 14423 /* 14424 * return value of ddi_devid_scsi_encode equal to 14425 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth 14426 * to retry inquiry page 0x83 and get GUID. 14427 */ 14428 NDBG20(("Not well formed devid, retry...")); 14429 delay(1 * drv_usectohz(1000000)); 14430 continue; 14431 } else { 14432 mptsas_log(mpt, CE_WARN, "!Encode devid failed for " 14433 "path target:%x, lun:%x", target, lun); 14434 rval = DDI_FAILURE; 14435 goto create_lun; 14436 } 14437 } 14438 14439 if (i == mptsas_inq83_retry_timeout) { 14440 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout " 14441 "for path target:%x, lun:%x", target, lun); 14442 } 14443 14444 rval = DDI_FAILURE; 14445 14446 create_lun: 14447 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) { 14448 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip, 14449 ptgt, lun); 14450 } 14451 if (rval != DDI_SUCCESS) { 14452 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip, 14453 ptgt, lun); 14454 14455 } 14456 out: 14457 if (guid != NULL) { 14458 /* 14459 * guid no longer needed 14460 */ 14461 ddi_devid_free_guid(guid); 14462 } 14463 if (inq83 != NULL) 14464 kmem_free(inq83, inq83_len1); 14465 return (rval); 14466 } 14467 14468 static int 14469 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid, 14470 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun) 14471 { 14472 int target; 14473 char *nodename = NULL; 14474 char **compatible = NULL; 14475 int ncompatible = 0; 14476 int mdi_rtn = MDI_FAILURE; 14477 int rval = DDI_FAILURE; 14478 char *old_guid = NULL; 14479 mptsas_t *mpt = DIP2MPT(pdip); 14480 char *lun_addr = NULL; 14481 char *wwn_str = NULL; 14482 char *attached_wwn_str = NULL; 14483 char *component = NULL; 14484 uint8_t phy = 0xFF; 14485 uint64_t sas_wwn; 14486 int64_t lun64 = 0; 14487 uint32_t devinfo; 14488 uint16_t dev_hdl; 14489 uint16_t pdev_hdl; 14490 uint64_t dev_sas_wwn; 14491 uint64_t pdev_sas_wwn; 14492 uint32_t pdev_info; 14493 uint8_t physport; 14494 uint8_t phy_id; 14495 uint32_t page_address; 14496 uint16_t bay_num, enclosure; 14497 char pdev_wwn_str[MPTSAS_WWN_STRLEN]; 14498 uint32_t dev_info; 14499 14500 mutex_enter(&mpt->m_mutex); 14501 target = ptgt->m_devhdl; 14502 sas_wwn = ptgt->m_sas_wwn; 14503 devinfo = ptgt->m_deviceinfo; 14504 phy = ptgt->m_phynum; 14505 mutex_exit(&mpt->m_mutex); 14506 14507 if (sas_wwn) { 14508 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun); 14509 } else { 14510 *pip = mptsas_find_path_phy(pdip, phy); 14511 } 14512 14513 if (*pip != NULL) { 14514 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip; 14515 ASSERT(*lun_dip != NULL); 14516 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip, 14517 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM), 14518 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) { 14519 if (strncmp(guid, old_guid, strlen(guid)) == 0) { 14520 /* 14521 * Same path back online again. 14522 */ 14523 (void) ddi_prop_free(old_guid); 14524 if ((!MDI_PI_IS_ONLINE(*pip)) && 14525 (!MDI_PI_IS_STANDBY(*pip)) && 14526 (ptgt->m_tgt_unconfigured == 0)) { 14527 rval = mdi_pi_online(*pip, 0); 14528 mutex_enter(&mpt->m_mutex); 14529 (void) mptsas_set_led_status(mpt, ptgt, 14530 0); 14531 mutex_exit(&mpt->m_mutex); 14532 } else { 14533 rval = DDI_SUCCESS; 14534 } 14535 if (rval != DDI_SUCCESS) { 14536 mptsas_log(mpt, CE_WARN, "path:target: " 14537 "%x, lun:%x online failed!", target, 14538 lun); 14539 *pip = NULL; 14540 *lun_dip = NULL; 14541 } 14542 return (rval); 14543 } else { 14544 /* 14545 * The GUID of the LUN has changed which maybe 14546 * because customer mapped another volume to the 14547 * same LUN. 14548 */ 14549 mptsas_log(mpt, CE_WARN, "The GUID of the " 14550 "target:%x, lun:%x was changed, maybe " 14551 "because someone mapped another volume " 14552 "to the same LUN", target, lun); 14553 (void) ddi_prop_free(old_guid); 14554 if (!MDI_PI_IS_OFFLINE(*pip)) { 14555 rval = mdi_pi_offline(*pip, 0); 14556 if (rval != MDI_SUCCESS) { 14557 mptsas_log(mpt, CE_WARN, "path:" 14558 "target:%x, lun:%x offline " 14559 "failed!", target, lun); 14560 *pip = NULL; 14561 *lun_dip = NULL; 14562 return (DDI_FAILURE); 14563 } 14564 } 14565 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) { 14566 mptsas_log(mpt, CE_WARN, "path:target:" 14567 "%x, lun:%x free failed!", target, 14568 lun); 14569 *pip = NULL; 14570 *lun_dip = NULL; 14571 return (DDI_FAILURE); 14572 } 14573 } 14574 } else { 14575 mptsas_log(mpt, CE_WARN, "Can't get client-guid " 14576 "property for path:target:%x, lun:%x", target, lun); 14577 *pip = NULL; 14578 *lun_dip = NULL; 14579 return (DDI_FAILURE); 14580 } 14581 } 14582 scsi_hba_nodename_compatible_get(inq, NULL, 14583 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible); 14584 14585 /* 14586 * if nodename can't be determined then print a message and skip it 14587 */ 14588 if (nodename == NULL) { 14589 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible " 14590 "driver for target%d lun %d dtype:0x%02x", target, lun, 14591 inq->inq_dtype); 14592 return (DDI_FAILURE); 14593 } 14594 14595 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP); 14596 /* The property is needed by MPAPI */ 14597 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn); 14598 14599 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP); 14600 if (guid) { 14601 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun); 14602 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn); 14603 } else { 14604 (void) sprintf(lun_addr, "p%x,%x", phy, lun); 14605 (void) sprintf(wwn_str, "p%x", phy); 14606 } 14607 14608 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename, 14609 guid, lun_addr, compatible, ncompatible, 14610 0, pip); 14611 if (mdi_rtn == MDI_SUCCESS) { 14612 14613 if (mdi_prop_update_string(*pip, MDI_GUID, 14614 guid) != DDI_SUCCESS) { 14615 mptsas_log(mpt, CE_WARN, "mptsas driver unable to " 14616 "create prop for target %d lun %d (MDI_GUID)", 14617 target, lun); 14618 mdi_rtn = MDI_FAILURE; 14619 goto virt_create_done; 14620 } 14621 14622 if (mdi_prop_update_int(*pip, LUN_PROP, 14623 lun) != DDI_SUCCESS) { 14624 mptsas_log(mpt, CE_WARN, "mptsas driver unable to " 14625 "create prop for target %d lun %d (LUN_PROP)", 14626 target, lun); 14627 mdi_rtn = MDI_FAILURE; 14628 goto virt_create_done; 14629 } 14630 lun64 = (int64_t)lun; 14631 if (mdi_prop_update_int64(*pip, LUN64_PROP, 14632 lun64) != DDI_SUCCESS) { 14633 mptsas_log(mpt, CE_WARN, "mptsas driver unable to " 14634 "create prop for target %d (LUN64_PROP)", 14635 target); 14636 mdi_rtn = MDI_FAILURE; 14637 goto virt_create_done; 14638 } 14639 if (mdi_prop_update_string_array(*pip, "compatible", 14640 compatible, ncompatible) != 14641 DDI_PROP_SUCCESS) { 14642 mptsas_log(mpt, CE_WARN, "mptsas driver unable to " 14643 "create prop for target %d lun %d (COMPATIBLE)", 14644 target, lun); 14645 mdi_rtn = MDI_FAILURE; 14646 goto virt_create_done; 14647 } 14648 if (sas_wwn && (mdi_prop_update_string(*pip, 14649 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) { 14650 mptsas_log(mpt, CE_WARN, "mptsas driver unable to " 14651 "create prop for target %d lun %d " 14652 "(target-port)", target, lun); 14653 mdi_rtn = MDI_FAILURE; 14654 goto virt_create_done; 14655 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip, 14656 "sata-phy", phy) != DDI_PROP_SUCCESS)) { 14657 /* 14658 * Direct attached SATA device without DeviceName 14659 */ 14660 mptsas_log(mpt, CE_WARN, "mptsas driver unable to " 14661 "create prop for SAS target %d lun %d " 14662 "(sata-phy)", target, lun); 14663 mdi_rtn = MDI_FAILURE; 14664 goto virt_create_done; 14665 } 14666 mutex_enter(&mpt->m_mutex); 14667 14668 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE & 14669 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | 14670 (uint32_t)ptgt->m_devhdl; 14671 rval = mptsas_get_sas_device_page0(mpt, page_address, 14672 &dev_hdl, &dev_sas_wwn, &dev_info, &physport, 14673 &phy_id, &pdev_hdl, &bay_num, &enclosure); 14674 if (rval != DDI_SUCCESS) { 14675 mutex_exit(&mpt->m_mutex); 14676 mptsas_log(mpt, CE_WARN, "mptsas unable to get " 14677 "parent device for handle %d", page_address); 14678 mdi_rtn = MDI_FAILURE; 14679 goto virt_create_done; 14680 } 14681 14682 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE & 14683 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl; 14684 rval = mptsas_get_sas_device_page0(mpt, page_address, 14685 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport, 14686 &phy_id, &pdev_hdl, &bay_num, &enclosure); 14687 if (rval != DDI_SUCCESS) { 14688 mutex_exit(&mpt->m_mutex); 14689 mptsas_log(mpt, CE_WARN, "mptsas unable to get" 14690 "device info for handle %d", page_address); 14691 mdi_rtn = MDI_FAILURE; 14692 goto virt_create_done; 14693 } 14694 14695 mutex_exit(&mpt->m_mutex); 14696 14697 /* 14698 * If this device direct attached to the controller 14699 * set the attached-port to the base wwid 14700 */ 14701 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) 14702 != DEVINFO_DIRECT_ATTACHED) { 14703 (void) sprintf(pdev_wwn_str, "w%016"PRIx64, 14704 pdev_sas_wwn); 14705 } else { 14706 /* 14707 * Update the iport's attached-port to guid 14708 */ 14709 if (sas_wwn == 0) { 14710 (void) sprintf(wwn_str, "p%x", phy); 14711 } else { 14712 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn); 14713 } 14714 if (ddi_prop_update_string(DDI_DEV_T_NONE, 14715 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) != 14716 DDI_PROP_SUCCESS) { 14717 mptsas_log(mpt, CE_WARN, 14718 "mptsas unable to create " 14719 "property for iport target-port" 14720 " %s (sas_wwn)", 14721 wwn_str); 14722 mdi_rtn = MDI_FAILURE; 14723 goto virt_create_done; 14724 } 14725 14726 (void) sprintf(pdev_wwn_str, "w%016"PRIx64, 14727 mpt->un.m_base_wwid); 14728 } 14729 14730 if (mdi_prop_update_string(*pip, 14731 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) != 14732 DDI_PROP_SUCCESS) { 14733 mptsas_log(mpt, CE_WARN, "mptsas unable to create " 14734 "property for iport attached-port %s (sas_wwn)", 14735 attached_wwn_str); 14736 mdi_rtn = MDI_FAILURE; 14737 goto virt_create_done; 14738 } 14739 14740 14741 if (inq->inq_dtype == 0) { 14742 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 14743 /* 14744 * set obp path for pathinfo 14745 */ 14746 (void) snprintf(component, MAXPATHLEN, 14747 "disk@%s", lun_addr); 14748 14749 if (mdi_pi_pathname_obp_set(*pip, component) != 14750 DDI_SUCCESS) { 14751 mptsas_log(mpt, CE_WARN, "mpt_sas driver " 14752 "unable to set obp-path for object %s", 14753 component); 14754 mdi_rtn = MDI_FAILURE; 14755 goto virt_create_done; 14756 } 14757 } 14758 14759 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip; 14760 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE | 14761 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) { 14762 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip, 14763 "pm-capable", 1)) != 14764 DDI_PROP_SUCCESS) { 14765 mptsas_log(mpt, CE_WARN, "mptsas driver" 14766 "failed to create pm-capable " 14767 "property, target %d", target); 14768 mdi_rtn = MDI_FAILURE; 14769 goto virt_create_done; 14770 } 14771 } 14772 /* 14773 * Create the phy-num property 14774 */ 14775 if (mdi_prop_update_int(*pip, "phy-num", 14776 ptgt->m_phynum) != DDI_SUCCESS) { 14777 mptsas_log(mpt, CE_WARN, "mptsas driver unable to " 14778 "create phy-num property for target %d lun %d", 14779 target, lun); 14780 mdi_rtn = MDI_FAILURE; 14781 goto virt_create_done; 14782 } 14783 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr)); 14784 mdi_rtn = mdi_pi_online(*pip, 0); 14785 if (mdi_rtn == MDI_SUCCESS) { 14786 mutex_enter(&mpt->m_mutex); 14787 if (mptsas_set_led_status(mpt, ptgt, 0) != 14788 DDI_SUCCESS) { 14789 NDBG14(("mptsas: clear LED for slot %x " 14790 "failed", ptgt->m_slot_num)); 14791 } 14792 mutex_exit(&mpt->m_mutex); 14793 } 14794 if (mdi_rtn == MDI_NOT_SUPPORTED) { 14795 mdi_rtn = MDI_FAILURE; 14796 } 14797 virt_create_done: 14798 if (*pip && mdi_rtn != MDI_SUCCESS) { 14799 (void) mdi_pi_free(*pip, 0); 14800 *pip = NULL; 14801 *lun_dip = NULL; 14802 } 14803 } 14804 14805 scsi_hba_nodename_compatible_free(nodename, compatible); 14806 if (lun_addr != NULL) { 14807 kmem_free(lun_addr, SCSI_MAXNAMELEN); 14808 } 14809 if (wwn_str != NULL) { 14810 kmem_free(wwn_str, MPTSAS_WWN_STRLEN); 14811 } 14812 if (component != NULL) { 14813 kmem_free(component, MAXPATHLEN); 14814 } 14815 14816 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); 14817 } 14818 14819 static int 14820 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq, 14821 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun) 14822 { 14823 int target; 14824 int rval; 14825 int ndi_rtn = NDI_FAILURE; 14826 uint64_t be_sas_wwn; 14827 char *nodename = NULL; 14828 char **compatible = NULL; 14829 int ncompatible = 0; 14830 int instance = 0; 14831 mptsas_t *mpt = DIP2MPT(pdip); 14832 char *wwn_str = NULL; 14833 char *component = NULL; 14834 char *attached_wwn_str = NULL; 14835 uint8_t phy = 0xFF; 14836 uint64_t sas_wwn; 14837 uint32_t devinfo; 14838 uint16_t dev_hdl; 14839 uint16_t pdev_hdl; 14840 uint64_t pdev_sas_wwn; 14841 uint64_t dev_sas_wwn; 14842 uint32_t pdev_info; 14843 uint8_t physport; 14844 uint8_t phy_id; 14845 uint32_t page_address; 14846 uint16_t bay_num, enclosure; 14847 char pdev_wwn_str[MPTSAS_WWN_STRLEN]; 14848 uint32_t dev_info; 14849 int64_t lun64 = 0; 14850 14851 mutex_enter(&mpt->m_mutex); 14852 target = ptgt->m_devhdl; 14853 sas_wwn = ptgt->m_sas_wwn; 14854 devinfo = ptgt->m_deviceinfo; 14855 phy = ptgt->m_phynum; 14856 mutex_exit(&mpt->m_mutex); 14857 14858 /* 14859 * generate compatible property with binding-set "mpt" 14860 */ 14861 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL, 14862 &nodename, &compatible, &ncompatible); 14863 14864 /* 14865 * if nodename can't be determined then print a message and skip it 14866 */ 14867 if (nodename == NULL) { 14868 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver " 14869 "for target %d lun %d", target, lun); 14870 return (DDI_FAILURE); 14871 } 14872 14873 ndi_rtn = ndi_devi_alloc(pdip, nodename, 14874 DEVI_SID_NODEID, lun_dip); 14875 14876 /* 14877 * if lun alloc success, set props 14878 */ 14879 if (ndi_rtn == NDI_SUCCESS) { 14880 14881 if (ndi_prop_update_int(DDI_DEV_T_NONE, 14882 *lun_dip, LUN_PROP, lun) != 14883 DDI_PROP_SUCCESS) { 14884 mptsas_log(mpt, CE_WARN, "mptsas unable to create " 14885 "property for target %d lun %d (LUN_PROP)", 14886 target, lun); 14887 ndi_rtn = NDI_FAILURE; 14888 goto phys_create_done; 14889 } 14890 14891 lun64 = (int64_t)lun; 14892 if (ndi_prop_update_int64(DDI_DEV_T_NONE, 14893 *lun_dip, LUN64_PROP, lun64) != 14894 DDI_PROP_SUCCESS) { 14895 mptsas_log(mpt, CE_WARN, "mptsas unable to create " 14896 "property for target %d lun64 %d (LUN64_PROP)", 14897 target, lun); 14898 ndi_rtn = NDI_FAILURE; 14899 goto phys_create_done; 14900 } 14901 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, 14902 *lun_dip, "compatible", compatible, ncompatible) 14903 != DDI_PROP_SUCCESS) { 14904 mptsas_log(mpt, CE_WARN, "mptsas unable to create " 14905 "property for target %d lun %d (COMPATIBLE)", 14906 target, lun); 14907 ndi_rtn = NDI_FAILURE; 14908 goto phys_create_done; 14909 } 14910 14911 /* 14912 * We need the SAS WWN for non-multipath devices, so 14913 * we'll use the same property as that multipathing 14914 * devices need to present for MPAPI. If we don't have 14915 * a WWN (e.g. parallel SCSI), don't create the prop. 14916 */ 14917 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP); 14918 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn); 14919 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE, 14920 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) 14921 != DDI_PROP_SUCCESS) { 14922 mptsas_log(mpt, CE_WARN, "mptsas unable to " 14923 "create property for SAS target %d lun %d " 14924 "(target-port)", target, lun); 14925 ndi_rtn = NDI_FAILURE; 14926 goto phys_create_done; 14927 } 14928 14929 be_sas_wwn = BE_64(sas_wwn); 14930 if (sas_wwn && ndi_prop_update_byte_array( 14931 DDI_DEV_T_NONE, *lun_dip, "port-wwn", 14932 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) { 14933 mptsas_log(mpt, CE_WARN, "mptsas unable to " 14934 "create property for SAS target %d lun %d " 14935 "(port-wwn)", target, lun); 14936 ndi_rtn = NDI_FAILURE; 14937 goto phys_create_done; 14938 } else if ((sas_wwn == 0) && (ndi_prop_update_int( 14939 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) != 14940 DDI_PROP_SUCCESS)) { 14941 /* 14942 * Direct attached SATA device without DeviceName 14943 */ 14944 mptsas_log(mpt, CE_WARN, "mptsas unable to " 14945 "create property for SAS target %d lun %d " 14946 "(sata-phy)", target, lun); 14947 ndi_rtn = NDI_FAILURE; 14948 goto phys_create_done; 14949 } 14950 14951 if (ndi_prop_create_boolean(DDI_DEV_T_NONE, 14952 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) { 14953 mptsas_log(mpt, CE_WARN, "mptsas unable to" 14954 "create property for SAS target %d lun %d" 14955 " (SAS_PROP)", target, lun); 14956 ndi_rtn = NDI_FAILURE; 14957 goto phys_create_done; 14958 } 14959 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE, 14960 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) { 14961 mptsas_log(mpt, CE_WARN, "mptsas unable " 14962 "to create guid property for target %d " 14963 "lun %d", target, lun); 14964 ndi_rtn = NDI_FAILURE; 14965 goto phys_create_done; 14966 } 14967 14968 /* 14969 * The following code is to set properties for SM-HBA support, 14970 * it doesn't apply to RAID volumes 14971 */ 14972 if (ptgt->m_phymask == 0) 14973 goto phys_raid_lun; 14974 14975 mutex_enter(&mpt->m_mutex); 14976 14977 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE & 14978 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | 14979 (uint32_t)ptgt->m_devhdl; 14980 rval = mptsas_get_sas_device_page0(mpt, page_address, 14981 &dev_hdl, &dev_sas_wwn, &dev_info, 14982 &physport, &phy_id, &pdev_hdl, 14983 &bay_num, &enclosure); 14984 if (rval != DDI_SUCCESS) { 14985 mutex_exit(&mpt->m_mutex); 14986 mptsas_log(mpt, CE_WARN, "mptsas unable to get" 14987 "parent device for handle %d.", page_address); 14988 ndi_rtn = NDI_FAILURE; 14989 goto phys_create_done; 14990 } 14991 14992 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE & 14993 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl; 14994 rval = mptsas_get_sas_device_page0(mpt, page_address, 14995 &dev_hdl, &pdev_sas_wwn, &pdev_info, 14996 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure); 14997 if (rval != DDI_SUCCESS) { 14998 mutex_exit(&mpt->m_mutex); 14999 mptsas_log(mpt, CE_WARN, "mptsas unable to create " 15000 "device for handle %d.", page_address); 15001 ndi_rtn = NDI_FAILURE; 15002 goto phys_create_done; 15003 } 15004 15005 mutex_exit(&mpt->m_mutex); 15006 15007 /* 15008 * If this device direct attached to the controller 15009 * set the attached-port to the base wwid 15010 */ 15011 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) 15012 != DEVINFO_DIRECT_ATTACHED) { 15013 (void) sprintf(pdev_wwn_str, "w%016"PRIx64, 15014 pdev_sas_wwn); 15015 } else { 15016 /* 15017 * Update the iport's attached-port to guid 15018 */ 15019 if (sas_wwn == 0) { 15020 (void) sprintf(wwn_str, "p%x", phy); 15021 } else { 15022 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn); 15023 } 15024 if (ddi_prop_update_string(DDI_DEV_T_NONE, 15025 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) != 15026 DDI_PROP_SUCCESS) { 15027 mptsas_log(mpt, CE_WARN, 15028 "mptsas unable to create " 15029 "property for iport target-port" 15030 " %s (sas_wwn)", 15031 wwn_str); 15032 ndi_rtn = NDI_FAILURE; 15033 goto phys_create_done; 15034 } 15035 15036 (void) sprintf(pdev_wwn_str, "w%016"PRIx64, 15037 mpt->un.m_base_wwid); 15038 } 15039 15040 if (ndi_prop_update_string(DDI_DEV_T_NONE, 15041 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) != 15042 DDI_PROP_SUCCESS) { 15043 mptsas_log(mpt, CE_WARN, 15044 "mptsas unable to create " 15045 "property for iport attached-port %s (sas_wwn)", 15046 attached_wwn_str); 15047 ndi_rtn = NDI_FAILURE; 15048 goto phys_create_done; 15049 } 15050 15051 if (IS_ATAPI_DEVICE(dev_info)) { 15052 if (ndi_prop_update_string(DDI_DEV_T_NONE, 15053 *lun_dip, MPTSAS_VARIANT, "atapi") != 15054 DDI_PROP_SUCCESS) { 15055 mptsas_log(mpt, CE_WARN, 15056 "mptsas unable to create " 15057 "property for device variant "); 15058 ndi_rtn = NDI_FAILURE; 15059 goto phys_create_done; 15060 } 15061 } 15062 15063 if (IS_SATA_DEVICE(dev_info)) { 15064 if (ndi_prop_update_string(DDI_DEV_T_NONE, 15065 *lun_dip, MPTSAS_VARIANT, "sata") != 15066 DDI_PROP_SUCCESS) { 15067 mptsas_log(mpt, CE_WARN, 15068 "mptsas unable to create " 15069 "property for device variant "); 15070 ndi_rtn = NDI_FAILURE; 15071 goto phys_create_done; 15072 } 15073 } 15074 phys_raid_lun: 15075 /* 15076 * if this is a SAS controller, and the target is a SATA 15077 * drive, set the 'pm-capable' property for sd and if on 15078 * an OPL platform, also check if this is an ATAPI 15079 * device. 15080 */ 15081 instance = ddi_get_instance(mpt->m_dip); 15082 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE | 15083 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) { 15084 NDBG2(("mptsas%d: creating pm-capable property, " 15085 "target %d", instance, target)); 15086 15087 if ((ndi_prop_update_int(DDI_DEV_T_NONE, 15088 *lun_dip, "pm-capable", 1)) != 15089 DDI_PROP_SUCCESS) { 15090 mptsas_log(mpt, CE_WARN, "mptsas " 15091 "failed to create pm-capable " 15092 "property, target %d", target); 15093 ndi_rtn = NDI_FAILURE; 15094 goto phys_create_done; 15095 } 15096 15097 } 15098 15099 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) { 15100 /* 15101 * add 'obp-path' properties for devinfo 15102 */ 15103 bzero(wwn_str, sizeof (wwn_str)); 15104 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn); 15105 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 15106 if (guid) { 15107 (void) snprintf(component, MAXPATHLEN, 15108 "disk@w%s,%x", wwn_str, lun); 15109 } else { 15110 (void) snprintf(component, MAXPATHLEN, 15111 "disk@p%x,%x", phy, lun); 15112 } 15113 if (ddi_pathname_obp_set(*lun_dip, component) 15114 != DDI_SUCCESS) { 15115 mptsas_log(mpt, CE_WARN, "mpt_sas driver " 15116 "unable to set obp-path for SAS " 15117 "object %s", component); 15118 ndi_rtn = NDI_FAILURE; 15119 goto phys_create_done; 15120 } 15121 } 15122 /* 15123 * Create the phy-num property for non-raid disk 15124 */ 15125 if (ptgt->m_phymask != 0) { 15126 if (ndi_prop_update_int(DDI_DEV_T_NONE, 15127 *lun_dip, "phy-num", ptgt->m_phynum) != 15128 DDI_PROP_SUCCESS) { 15129 mptsas_log(mpt, CE_WARN, "mptsas driver " 15130 "failed to create phy-num property for " 15131 "target %d", target); 15132 ndi_rtn = NDI_FAILURE; 15133 goto phys_create_done; 15134 } 15135 } 15136 phys_create_done: 15137 /* 15138 * If props were setup ok, online the lun 15139 */ 15140 if (ndi_rtn == NDI_SUCCESS) { 15141 /* 15142 * Try to online the new node 15143 */ 15144 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH); 15145 } 15146 if (ndi_rtn == NDI_SUCCESS) { 15147 mutex_enter(&mpt->m_mutex); 15148 if (mptsas_set_led_status(mpt, ptgt, 0) != 15149 DDI_SUCCESS) { 15150 NDBG14(("mptsas: clear LED for tgt %x " 15151 "failed", ptgt->m_slot_num)); 15152 } 15153 mutex_exit(&mpt->m_mutex); 15154 } 15155 15156 /* 15157 * If success set rtn flag, else unwire alloc'd lun 15158 */ 15159 if (ndi_rtn != NDI_SUCCESS) { 15160 NDBG12(("mptsas driver unable to online " 15161 "target %d lun %d", target, lun)); 15162 ndi_prop_remove_all(*lun_dip); 15163 (void) ndi_devi_free(*lun_dip); 15164 *lun_dip = NULL; 15165 } 15166 } 15167 15168 scsi_hba_nodename_compatible_free(nodename, compatible); 15169 15170 if (wwn_str != NULL) { 15171 kmem_free(wwn_str, MPTSAS_WWN_STRLEN); 15172 } 15173 if (component != NULL) { 15174 kmem_free(component, MAXPATHLEN); 15175 } 15176 15177 15178 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); 15179 } 15180 15181 static int 15182 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn) 15183 { 15184 mptsas_t *mpt = DIP2MPT(pdip); 15185 struct smp_device smp_sd; 15186 15187 /* XXX An HBA driver should not be allocating an smp_device. */ 15188 bzero(&smp_sd, sizeof (struct smp_device)); 15189 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran; 15190 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE); 15191 15192 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS) 15193 return (NDI_FAILURE); 15194 return (NDI_SUCCESS); 15195 } 15196 15197 static int 15198 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip) 15199 { 15200 mptsas_t *mpt = DIP2MPT(pdip); 15201 mptsas_smp_t *psmp = NULL; 15202 int rval; 15203 int phymask; 15204 15205 /* 15206 * Get the physical port associated to the iport 15207 * PHYMASK TODO 15208 */ 15209 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0, 15210 "phymask", 0); 15211 /* 15212 * Find the smp node in hash table with specified sas address and 15213 * physical port 15214 */ 15215 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn); 15216 if (psmp == NULL) { 15217 return (DDI_FAILURE); 15218 } 15219 15220 rval = mptsas_online_smp(pdip, psmp, smp_dip); 15221 15222 return (rval); 15223 } 15224 15225 static int 15226 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, 15227 dev_info_t **smp_dip) 15228 { 15229 char wwn_str[MPTSAS_WWN_STRLEN]; 15230 char attached_wwn_str[MPTSAS_WWN_STRLEN]; 15231 int ndi_rtn = NDI_FAILURE; 15232 int rval = 0; 15233 mptsas_smp_t dev_info; 15234 uint32_t page_address; 15235 mptsas_t *mpt = DIP2MPT(pdip); 15236 uint16_t dev_hdl; 15237 uint64_t sas_wwn; 15238 uint64_t smp_sas_wwn; 15239 uint8_t physport; 15240 uint8_t phy_id; 15241 uint16_t pdev_hdl; 15242 uint8_t numphys = 0; 15243 uint16_t i = 0; 15244 char phymask[MPTSAS_MAX_PHYS]; 15245 char *iport = NULL; 15246 mptsas_phymask_t phy_mask = 0; 15247 uint16_t attached_devhdl; 15248 uint16_t bay_num, enclosure; 15249 15250 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr); 15251 15252 /* 15253 * Probe smp device, prevent the node of removed device from being 15254 * configured succesfully 15255 */ 15256 if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) { 15257 return (DDI_FAILURE); 15258 } 15259 15260 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) { 15261 return (DDI_SUCCESS); 15262 } 15263 15264 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip); 15265 15266 /* 15267 * if lun alloc success, set props 15268 */ 15269 if (ndi_rtn == NDI_SUCCESS) { 15270 /* 15271 * Set the flavor of the child to be SMP flavored 15272 */ 15273 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP); 15274 15275 if (ndi_prop_update_string(DDI_DEV_T_NONE, 15276 *smp_dip, SMP_WWN, wwn_str) != 15277 DDI_PROP_SUCCESS) { 15278 mptsas_log(mpt, CE_WARN, "mptsas unable to create " 15279 "property for smp device %s (sas_wwn)", 15280 wwn_str); 15281 ndi_rtn = NDI_FAILURE; 15282 goto smp_create_done; 15283 } 15284 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr); 15285 if (ndi_prop_update_string(DDI_DEV_T_NONE, 15286 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != 15287 DDI_PROP_SUCCESS) { 15288 mptsas_log(mpt, CE_WARN, "mptsas unable to create " 15289 "property for iport target-port %s (sas_wwn)", 15290 wwn_str); 15291 ndi_rtn = NDI_FAILURE; 15292 goto smp_create_done; 15293 } 15294 15295 mutex_enter(&mpt->m_mutex); 15296 15297 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL & 15298 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl; 15299 rval = mptsas_get_sas_expander_page0(mpt, page_address, 15300 &dev_info); 15301 if (rval != DDI_SUCCESS) { 15302 mutex_exit(&mpt->m_mutex); 15303 mptsas_log(mpt, CE_WARN, 15304 "mptsas unable to get expander " 15305 "parent device info for %x", page_address); 15306 ndi_rtn = NDI_FAILURE; 15307 goto smp_create_done; 15308 } 15309 15310 smp_node->m_pdevhdl = dev_info.m_pdevhdl; 15311 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE & 15312 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | 15313 (uint32_t)dev_info.m_pdevhdl; 15314 rval = mptsas_get_sas_device_page0(mpt, page_address, 15315 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, 15316 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure); 15317 if (rval != DDI_SUCCESS) { 15318 mutex_exit(&mpt->m_mutex); 15319 mptsas_log(mpt, CE_WARN, "mptsas unable to get " 15320 "device info for %x", page_address); 15321 ndi_rtn = NDI_FAILURE; 15322 goto smp_create_done; 15323 } 15324 15325 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE & 15326 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | 15327 (uint32_t)dev_info.m_devhdl; 15328 rval = mptsas_get_sas_device_page0(mpt, page_address, 15329 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo, 15330 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure); 15331 if (rval != DDI_SUCCESS) { 15332 mutex_exit(&mpt->m_mutex); 15333 mptsas_log(mpt, CE_WARN, "mptsas unable to get " 15334 "device info for %x", page_address); 15335 ndi_rtn = NDI_FAILURE; 15336 goto smp_create_done; 15337 } 15338 mutex_exit(&mpt->m_mutex); 15339 15340 /* 15341 * If this smp direct attached to the controller 15342 * set the attached-port to the base wwid 15343 */ 15344 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) 15345 != DEVINFO_DIRECT_ATTACHED) { 15346 (void) sprintf(attached_wwn_str, "w%016"PRIx64, 15347 sas_wwn); 15348 } else { 15349 (void) sprintf(attached_wwn_str, "w%016"PRIx64, 15350 mpt->un.m_base_wwid); 15351 } 15352 15353 if (ndi_prop_update_string(DDI_DEV_T_NONE, 15354 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) != 15355 DDI_PROP_SUCCESS) { 15356 mptsas_log(mpt, CE_WARN, "mptsas unable to create " 15357 "property for smp attached-port %s (sas_wwn)", 15358 attached_wwn_str); 15359 ndi_rtn = NDI_FAILURE; 15360 goto smp_create_done; 15361 } 15362 15363 if (ndi_prop_create_boolean(DDI_DEV_T_NONE, 15364 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) { 15365 mptsas_log(mpt, CE_WARN, "mptsas unable to " 15366 "create property for SMP %s (SMP_PROP) ", 15367 wwn_str); 15368 ndi_rtn = NDI_FAILURE; 15369 goto smp_create_done; 15370 } 15371 15372 /* 15373 * check the smp to see whether it direct 15374 * attached to the controller 15375 */ 15376 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) 15377 != DEVINFO_DIRECT_ATTACHED) { 15378 goto smp_create_done; 15379 } 15380 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 15381 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1); 15382 if (numphys > 0) { 15383 goto smp_create_done; 15384 } 15385 /* 15386 * this iport is an old iport, we need to 15387 * reconfig the props for it. 15388 */ 15389 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip, 15390 MPTSAS_VIRTUAL_PORT, 0) != 15391 DDI_PROP_SUCCESS) { 15392 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip, 15393 MPTSAS_VIRTUAL_PORT); 15394 mptsas_log(mpt, CE_WARN, "mptsas virtual port " 15395 "prop update failed"); 15396 goto smp_create_done; 15397 } 15398 15399 mutex_enter(&mpt->m_mutex); 15400 numphys = 0; 15401 iport = ddi_get_name_addr(pdip); 15402 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 15403 bzero(phymask, sizeof (phymask)); 15404 (void) sprintf(phymask, 15405 "%x", mpt->m_phy_info[i].phy_mask); 15406 if (strcmp(phymask, iport) == 0) { 15407 phy_mask = mpt->m_phy_info[i].phy_mask; 15408 break; 15409 } 15410 } 15411 15412 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 15413 if ((phy_mask >> i) & 0x01) { 15414 numphys++; 15415 } 15416 } 15417 /* 15418 * Update PHY info for smhba 15419 */ 15420 if (mptsas_smhba_phy_init(mpt)) { 15421 mutex_exit(&mpt->m_mutex); 15422 mptsas_log(mpt, CE_WARN, "mptsas phy update " 15423 "failed"); 15424 goto smp_create_done; 15425 } 15426 mutex_exit(&mpt->m_mutex); 15427 15428 mptsas_smhba_set_phy_props(mpt, iport, pdip, 15429 numphys, &attached_devhdl); 15430 15431 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip, 15432 MPTSAS_NUM_PHYS, numphys) != 15433 DDI_PROP_SUCCESS) { 15434 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip, 15435 MPTSAS_NUM_PHYS); 15436 mptsas_log(mpt, CE_WARN, "mptsas update " 15437 "num phys props failed"); 15438 goto smp_create_done; 15439 } 15440 /* 15441 * Add parent's props for SMHBA support 15442 */ 15443 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip, 15444 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) != 15445 DDI_PROP_SUCCESS) { 15446 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip, 15447 SCSI_ADDR_PROP_ATTACHED_PORT); 15448 mptsas_log(mpt, CE_WARN, "mptsas update iport" 15449 "attached-port failed"); 15450 goto smp_create_done; 15451 } 15452 15453 smp_create_done: 15454 /* 15455 * If props were setup ok, online the lun 15456 */ 15457 if (ndi_rtn == NDI_SUCCESS) { 15458 /* 15459 * Try to online the new node 15460 */ 15461 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH); 15462 } 15463 15464 /* 15465 * If success set rtn flag, else unwire alloc'd lun 15466 */ 15467 if (ndi_rtn != NDI_SUCCESS) { 15468 NDBG12(("mptsas unable to online " 15469 "SMP target %s", wwn_str)); 15470 ndi_prop_remove_all(*smp_dip); 15471 (void) ndi_devi_free(*smp_dip); 15472 } 15473 } 15474 15475 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); 15476 } 15477 15478 /* smp transport routine */ 15479 static int mptsas_smp_start(struct smp_pkt *smp_pkt) 15480 { 15481 uint64_t wwn; 15482 Mpi2SmpPassthroughRequest_t req; 15483 Mpi2SmpPassthroughReply_t rep; 15484 uint32_t direction = 0; 15485 mptsas_t *mpt; 15486 int ret; 15487 uint64_t tmp64; 15488 15489 mpt = (mptsas_t *)smp_pkt->smp_pkt_address-> 15490 smp_a_hba_tran->smp_tran_hba_private; 15491 15492 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE); 15493 /* 15494 * Need to compose a SMP request message 15495 * and call mptsas_do_passthru() function 15496 */ 15497 bzero(&req, sizeof (req)); 15498 bzero(&rep, sizeof (rep)); 15499 req.PassthroughFlags = 0; 15500 req.PhysicalPort = 0xff; 15501 req.ChainOffset = 0; 15502 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 15503 15504 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) { 15505 smp_pkt->smp_pkt_reason = ERANGE; 15506 return (DDI_FAILURE); 15507 } 15508 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4)); 15509 15510 req.MsgFlags = 0; 15511 tmp64 = LE_64(wwn); 15512 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE); 15513 if (smp_pkt->smp_pkt_rspsize > 0) { 15514 direction |= MPTSAS_PASS_THRU_DIRECTION_READ; 15515 } 15516 if (smp_pkt->smp_pkt_reqsize > 0) { 15517 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE; 15518 } 15519 15520 mutex_enter(&mpt->m_mutex); 15521 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, 15522 (uint8_t *)smp_pkt->smp_pkt_rsp, 15523 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep), 15524 smp_pkt->smp_pkt_rspsize - 4, direction, 15525 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4, 15526 smp_pkt->smp_pkt_timeout, FKIOCTL); 15527 mutex_exit(&mpt->m_mutex); 15528 if (ret != 0) { 15529 cmn_err(CE_WARN, "smp_start do passthru error %d", ret); 15530 smp_pkt->smp_pkt_reason = (uchar_t)(ret); 15531 return (DDI_FAILURE); 15532 } 15533 /* do passthrough success, check the smp status */ 15534 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) { 15535 switch (LE_16(rep.IOCStatus)) { 15536 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 15537 smp_pkt->smp_pkt_reason = ENODEV; 15538 break; 15539 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: 15540 smp_pkt->smp_pkt_reason = EOVERFLOW; 15541 break; 15542 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: 15543 smp_pkt->smp_pkt_reason = EIO; 15544 break; 15545 default: 15546 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc" 15547 "status:%x", LE_16(rep.IOCStatus)); 15548 smp_pkt->smp_pkt_reason = EIO; 15549 break; 15550 } 15551 return (DDI_FAILURE); 15552 } 15553 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) { 15554 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x", 15555 rep.SASStatus); 15556 smp_pkt->smp_pkt_reason = EIO; 15557 return (DDI_FAILURE); 15558 } 15559 15560 return (DDI_SUCCESS); 15561 } 15562 15563 /* 15564 * If we didn't get a match, we need to get sas page0 for each device, and 15565 * untill we get a match. If failed, return NULL 15566 */ 15567 static mptsas_target_t * 15568 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy) 15569 { 15570 int i, j = 0; 15571 int rval = 0; 15572 uint16_t cur_handle; 15573 uint32_t page_address; 15574 mptsas_target_t *ptgt = NULL; 15575 15576 /* 15577 * PHY named device must be direct attached and attaches to 15578 * narrow port, if the iport is not parent of the device which 15579 * we are looking for. 15580 */ 15581 for (i = 0; i < MPTSAS_MAX_PHYS; i++) { 15582 if ((1 << i) & phymask) 15583 j++; 15584 } 15585 15586 if (j > 1) 15587 return (NULL); 15588 15589 /* 15590 * Must be a narrow port and single device attached to the narrow port 15591 * So the physical port num of device which is equal to the iport's 15592 * port num is the device what we are looking for. 15593 */ 15594 15595 if (mpt->m_phy_info[phy].phy_mask != phymask) 15596 return (NULL); 15597 15598 mutex_enter(&mpt->m_mutex); 15599 15600 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl, 15601 MPTSAS_HASH_FIRST); 15602 while (ptgt != NULL) { 15603 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) { 15604 mutex_exit(&mpt->m_mutex); 15605 return (ptgt); 15606 } 15607 15608 ptgt = (mptsas_target_t *)mptsas_hash_traverse( 15609 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT); 15610 } 15611 15612 if (mpt->m_done_traverse_dev) { 15613 mutex_exit(&mpt->m_mutex); 15614 return (NULL); 15615 } 15616 15617 /* If didn't get a match, come here */ 15618 cur_handle = mpt->m_dev_handle; 15619 for (; ; ) { 15620 ptgt = NULL; 15621 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE & 15622 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle; 15623 rval = mptsas_get_target_device_info(mpt, page_address, 15624 &cur_handle, &ptgt); 15625 if ((rval == DEV_INFO_FAIL_PAGE0) || 15626 (rval == DEV_INFO_FAIL_ALLOC)) { 15627 break; 15628 } 15629 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) || 15630 (rval == DEV_INFO_PHYS_DISK)) { 15631 continue; 15632 } 15633 mpt->m_dev_handle = cur_handle; 15634 15635 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) { 15636 break; 15637 } 15638 } 15639 15640 mutex_exit(&mpt->m_mutex); 15641 return (ptgt); 15642 } 15643 15644 /* 15645 * The ptgt->m_sas_wwn contains the wwid for each disk. 15646 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid 15647 * If we didn't get a match, we need to get sas page0 for each device, and 15648 * untill we get a match 15649 * If failed, return NULL 15650 */ 15651 static mptsas_target_t * 15652 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid) 15653 { 15654 int rval = 0; 15655 uint16_t cur_handle; 15656 uint32_t page_address; 15657 mptsas_target_t *tmp_tgt = NULL; 15658 15659 mutex_enter(&mpt->m_mutex); 15660 tmp_tgt = (struct mptsas_target *)mptsas_hash_search( 15661 &mpt->m_active->m_tgttbl, wwid, phymask); 15662 if (tmp_tgt != NULL) { 15663 mutex_exit(&mpt->m_mutex); 15664 return (tmp_tgt); 15665 } 15666 15667 if (phymask == 0) { 15668 /* 15669 * It's IR volume 15670 */ 15671 rval = mptsas_get_raid_info(mpt); 15672 if (rval) { 15673 tmp_tgt = (struct mptsas_target *)mptsas_hash_search( 15674 &mpt->m_active->m_tgttbl, wwid, phymask); 15675 } 15676 mutex_exit(&mpt->m_mutex); 15677 return (tmp_tgt); 15678 } 15679 15680 if (mpt->m_done_traverse_dev) { 15681 mutex_exit(&mpt->m_mutex); 15682 return (NULL); 15683 } 15684 15685 /* If didn't get a match, come here */ 15686 cur_handle = mpt->m_dev_handle; 15687 for (; ; ) { 15688 tmp_tgt = NULL; 15689 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE & 15690 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle; 15691 rval = mptsas_get_target_device_info(mpt, page_address, 15692 &cur_handle, &tmp_tgt); 15693 if ((rval == DEV_INFO_FAIL_PAGE0) || 15694 (rval == DEV_INFO_FAIL_ALLOC)) { 15695 tmp_tgt = NULL; 15696 break; 15697 } 15698 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) || 15699 (rval == DEV_INFO_PHYS_DISK)) { 15700 continue; 15701 } 15702 mpt->m_dev_handle = cur_handle; 15703 if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) && 15704 (tmp_tgt->m_phymask == phymask)) { 15705 break; 15706 } 15707 } 15708 15709 mutex_exit(&mpt->m_mutex); 15710 return (tmp_tgt); 15711 } 15712 15713 static mptsas_smp_t * 15714 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid) 15715 { 15716 int rval = 0; 15717 uint16_t cur_handle; 15718 uint32_t page_address; 15719 mptsas_smp_t smp_node, *psmp = NULL; 15720 15721 mutex_enter(&mpt->m_mutex); 15722 psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl, 15723 wwid, phymask); 15724 if (psmp != NULL) { 15725 mutex_exit(&mpt->m_mutex); 15726 return (psmp); 15727 } 15728 15729 if (mpt->m_done_traverse_smp) { 15730 mutex_exit(&mpt->m_mutex); 15731 return (NULL); 15732 } 15733 15734 /* If didn't get a match, come here */ 15735 cur_handle = mpt->m_smp_devhdl; 15736 for (; ; ) { 15737 psmp = NULL; 15738 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL & 15739 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle; 15740 rval = mptsas_get_sas_expander_page0(mpt, page_address, 15741 &smp_node); 15742 if (rval != DDI_SUCCESS) { 15743 break; 15744 } 15745 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl; 15746 psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node); 15747 ASSERT(psmp); 15748 if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) && 15749 (psmp->m_phymask == phymask)) { 15750 break; 15751 } 15752 } 15753 15754 mutex_exit(&mpt->m_mutex); 15755 return (psmp); 15756 } 15757 15758 /* helper functions using hash */ 15759 15760 /* 15761 * Can't have duplicate entries for same devhdl, 15762 * if there are invalid entries, the devhdl should be set to 0xffff 15763 */ 15764 static void * 15765 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl) 15766 { 15767 mptsas_hash_data_t *data; 15768 15769 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST); 15770 while (data != NULL) { 15771 if (data->devhdl == devhdl) { 15772 break; 15773 } 15774 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT); 15775 } 15776 return (data); 15777 } 15778 15779 mptsas_target_t * 15780 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid, 15781 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum, mptsas_t *mpt) 15782 { 15783 mptsas_target_t *tmp_tgt = NULL; 15784 15785 tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask); 15786 if (tmp_tgt != NULL) { 15787 NDBG20(("Hash item already exist")); 15788 tmp_tgt->m_deviceinfo = devinfo; 15789 tmp_tgt->m_devhdl = devhdl; 15790 return (tmp_tgt); 15791 } 15792 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP); 15793 if (tmp_tgt == NULL) { 15794 cmn_err(CE_WARN, "Fatal, allocated tgt failed"); 15795 return (NULL); 15796 } 15797 tmp_tgt->m_devhdl = devhdl; 15798 tmp_tgt->m_sas_wwn = wwid; 15799 tmp_tgt->m_deviceinfo = devinfo; 15800 tmp_tgt->m_phymask = phymask; 15801 tmp_tgt->m_phynum = phynum; 15802 /* Initialized the tgt structure */ 15803 tmp_tgt->m_qfull_retries = QFULL_RETRIES; 15804 tmp_tgt->m_qfull_retry_interval = 15805 drv_usectohz(QFULL_RETRY_INTERVAL * 1000); 15806 tmp_tgt->m_t_throttle = MAX_THROTTLE; 15807 mutex_init(&tmp_tgt->m_tgt_intr_mutex, NULL, MUTEX_DRIVER, 15808 DDI_INTR_PRI(mpt->m_intr_pri)); 15809 15810 mptsas_hash_add(hashtab, tmp_tgt); 15811 15812 return (tmp_tgt); 15813 } 15814 15815 static void 15816 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid, 15817 mptsas_phymask_t phymask) 15818 { 15819 mptsas_target_t *tmp_tgt; 15820 tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask); 15821 if (tmp_tgt == NULL) { 15822 cmn_err(CE_WARN, "Tgt not found, nothing to free"); 15823 } else { 15824 mutex_destroy(&tmp_tgt->m_tgt_intr_mutex); 15825 kmem_free(tmp_tgt, sizeof (struct mptsas_target)); 15826 } 15827 } 15828 15829 /* 15830 * Return the entry in the hash table 15831 */ 15832 static mptsas_smp_t * 15833 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data) 15834 { 15835 uint64_t key1 = data->m_sasaddr; 15836 mptsas_phymask_t key2 = data->m_phymask; 15837 mptsas_smp_t *ret_data; 15838 15839 ret_data = mptsas_hash_search(hashtab, key1, key2); 15840 if (ret_data != NULL) { 15841 bcopy(data, ret_data, sizeof (mptsas_smp_t)); 15842 return (ret_data); 15843 } 15844 15845 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP); 15846 bcopy(data, ret_data, sizeof (mptsas_smp_t)); 15847 mptsas_hash_add(hashtab, ret_data); 15848 return (ret_data); 15849 } 15850 15851 static void 15852 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid, 15853 mptsas_phymask_t phymask) 15854 { 15855 mptsas_smp_t *tmp_smp; 15856 tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask); 15857 if (tmp_smp == NULL) { 15858 cmn_err(CE_WARN, "Smp element not found, nothing to free"); 15859 } else { 15860 kmem_free(tmp_smp, sizeof (struct mptsas_smp)); 15861 } 15862 } 15863 15864 /* 15865 * Hash operation functions 15866 * key1 is the sas_wwn, key2 is the phymask 15867 */ 15868 static void 15869 mptsas_hash_init(mptsas_hash_table_t *hashtab) 15870 { 15871 if (hashtab == NULL) { 15872 return; 15873 } 15874 bzero(hashtab->head, sizeof (mptsas_hash_node_t) * 15875 MPTSAS_HASH_ARRAY_SIZE); 15876 hashtab->cur = NULL; 15877 hashtab->line = 0; 15878 } 15879 15880 static void 15881 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen) 15882 { 15883 uint16_t line = 0; 15884 mptsas_hash_node_t *cur = NULL, *last = NULL; 15885 15886 if (hashtab == NULL) { 15887 return; 15888 } 15889 for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) { 15890 cur = hashtab->head[line]; 15891 while (cur != NULL) { 15892 last = cur; 15893 cur = cur->next; 15894 kmem_free(last->data, datalen); 15895 kmem_free(last, sizeof (mptsas_hash_node_t)); 15896 } 15897 } 15898 } 15899 15900 /* 15901 * You must guarantee the element doesn't exist in the hash table 15902 * before you call mptsas_hash_add() 15903 */ 15904 static void 15905 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data) 15906 { 15907 uint64_t key1 = ((mptsas_hash_data_t *)data)->key1; 15908 mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2; 15909 mptsas_hash_node_t **head = NULL; 15910 mptsas_hash_node_t *node = NULL; 15911 15912 if (hashtab == NULL) { 15913 return; 15914 } 15915 ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL); 15916 node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP); 15917 node->data = data; 15918 15919 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]); 15920 if (*head == NULL) { 15921 *head = node; 15922 } else { 15923 node->next = *head; 15924 *head = node; 15925 } 15926 } 15927 15928 static void * 15929 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1, 15930 mptsas_phymask_t key2) 15931 { 15932 mptsas_hash_node_t **head = NULL; 15933 mptsas_hash_node_t *last = NULL, *cur = NULL; 15934 mptsas_hash_data_t *data; 15935 if (hashtab == NULL) { 15936 return (NULL); 15937 } 15938 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]); 15939 cur = *head; 15940 while (cur != NULL) { 15941 data = cur->data; 15942 if ((data->key1 == key1) && (data->key2 == key2)) { 15943 if (last == NULL) { 15944 (*head) = cur->next; 15945 } else { 15946 last->next = cur->next; 15947 } 15948 kmem_free(cur, sizeof (mptsas_hash_node_t)); 15949 return (data); 15950 } else { 15951 last = cur; 15952 cur = cur->next; 15953 } 15954 } 15955 return (NULL); 15956 } 15957 15958 static void * 15959 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1, 15960 mptsas_phymask_t key2) 15961 { 15962 mptsas_hash_node_t *cur = NULL; 15963 mptsas_hash_data_t *data; 15964 if (hashtab == NULL) { 15965 return (NULL); 15966 } 15967 cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]; 15968 while (cur != NULL) { 15969 data = cur->data; 15970 if ((data->key1 == key1) && (data->key2 == key2)) { 15971 return (data); 15972 } else { 15973 cur = cur->next; 15974 } 15975 } 15976 return (NULL); 15977 } 15978 15979 static void * 15980 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos) 15981 { 15982 mptsas_hash_node_t *this = NULL; 15983 15984 if (hashtab == NULL) { 15985 return (NULL); 15986 } 15987 15988 if (pos == MPTSAS_HASH_FIRST) { 15989 hashtab->line = 0; 15990 hashtab->cur = NULL; 15991 this = hashtab->head[0]; 15992 } else { 15993 if (hashtab->cur == NULL) { 15994 return (NULL); 15995 } else { 15996 this = hashtab->cur->next; 15997 } 15998 } 15999 16000 while (this == NULL) { 16001 hashtab->line++; 16002 if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) { 16003 /* the traverse reaches the end */ 16004 hashtab->cur = NULL; 16005 return (NULL); 16006 } else { 16007 this = hashtab->head[hashtab->line]; 16008 } 16009 } 16010 hashtab->cur = this; 16011 return (this->data); 16012 } 16013 16014 /* 16015 * Functions for SGPIO LED support 16016 */ 16017 static dev_info_t * 16018 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask) 16019 { 16020 dev_info_t *dip; 16021 int prop; 16022 dip = e_ddi_hold_devi_by_dev(dev, 0); 16023 if (dip == NULL) 16024 return (dip); 16025 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 16026 "phymask", 0); 16027 *phymask = (mptsas_phymask_t)prop; 16028 ddi_release_devi(dip); 16029 return (dip); 16030 } 16031 static mptsas_target_t * 16032 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask) 16033 { 16034 uint8_t phynum; 16035 uint64_t wwn; 16036 int lun; 16037 mptsas_target_t *ptgt = NULL; 16038 16039 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) { 16040 return (NULL); 16041 } 16042 if (addr[0] == 'w') { 16043 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn); 16044 } else { 16045 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum); 16046 } 16047 return (ptgt); 16048 } 16049 16050 #ifdef MPTSAS_GET_LED 16051 static int 16052 mptsas_get_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, 16053 uint32_t *slotstatus) 16054 { 16055 return (mptsas_send_sep(mpt, ptgt, slotstatus, 16056 MPI2_SEP_REQ_ACTION_READ_STATUS)); 16057 } 16058 #endif 16059 static int 16060 mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, uint32_t slotstatus) 16061 { 16062 NDBG14(("mptsas_ioctl: set LED status %x for slot %x", 16063 slotstatus, ptgt->m_slot_num)); 16064 return (mptsas_send_sep(mpt, ptgt, &slotstatus, 16065 MPI2_SEP_REQ_ACTION_WRITE_STATUS)); 16066 } 16067 /* 16068 * send sep request, use enclosure/slot addressing 16069 */ 16070 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt, 16071 uint32_t *status, uint8_t act) 16072 { 16073 Mpi2SepRequest_t req; 16074 Mpi2SepReply_t rep; 16075 int ret; 16076 16077 ASSERT(mutex_owned(&mpt->m_mutex)); 16078 16079 bzero(&req, sizeof (req)); 16080 bzero(&rep, sizeof (rep)); 16081 16082 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 16083 req.Action = act; 16084 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; 16085 req.EnclosureHandle = LE_16(ptgt->m_enclosure); 16086 req.Slot = LE_16(ptgt->m_slot_num); 16087 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) { 16088 req.SlotStatus = LE_32(*status); 16089 } 16090 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL, 16091 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL); 16092 if (ret != 0) { 16093 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP " 16094 "Processor Request message error %d", ret); 16095 return (DDI_FAILURE); 16096 } 16097 /* do passthrough success, check the ioc status */ 16098 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) { 16099 if ((LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) == 16100 MPI2_IOCSTATUS_INVALID_FIELD) { 16101 mptsas_log(mpt, CE_NOTE, "send sep act %x: Not " 16102 "supported action, loginfo %x", act, 16103 LE_32(rep.IOCLogInfo)); 16104 return (DDI_FAILURE); 16105 } 16106 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc " 16107 "status:%x", act, LE_16(rep.IOCStatus)); 16108 return (DDI_FAILURE); 16109 } 16110 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) { 16111 *status = LE_32(rep.SlotStatus); 16112 } 16113 16114 return (DDI_SUCCESS); 16115 } 16116 16117 int 16118 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr, 16119 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp, 16120 uint32_t alloc_size, ddi_dma_cookie_t *cookiep) 16121 { 16122 ddi_dma_cookie_t new_cookie; 16123 size_t alloc_len; 16124 uint_t ncookie; 16125 16126 if (cookiep == NULL) 16127 cookiep = &new_cookie; 16128 16129 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP, 16130 NULL, dma_hdp) != DDI_SUCCESS) { 16131 dma_hdp = NULL; 16132 return (FALSE); 16133 } 16134 16135 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr, 16136 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len, 16137 acc_hdp) != DDI_SUCCESS) { 16138 ddi_dma_free_handle(dma_hdp); 16139 dma_hdp = NULL; 16140 return (FALSE); 16141 } 16142 16143 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len, 16144 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL, 16145 cookiep, &ncookie) != DDI_DMA_MAPPED) { 16146 (void) ddi_dma_mem_free(acc_hdp); 16147 ddi_dma_free_handle(dma_hdp); 16148 dma_hdp = NULL; 16149 return (FALSE); 16150 } 16151 16152 return (TRUE); 16153 } 16154 16155 void 16156 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp) 16157 { 16158 if (*dma_hdp == NULL) 16159 return; 16160 16161 (void) ddi_dma_unbind_handle(*dma_hdp); 16162 (void) ddi_dma_mem_free(acc_hdp); 16163 ddi_dma_free_handle(dma_hdp); 16164 dma_hdp = NULL; 16165 } 16166 16167 static int 16168 mptsas_outstanding_cmds_n(mptsas_t *mpt) 16169 { 16170 int n = 0, i; 16171 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) { 16172 mutex_enter(&mpt->m_slot_freeq_pairp[i]. 16173 m_slot_allocq.s.m_fq_mutex); 16174 mutex_enter(&mpt->m_slot_freeq_pairp[i]. 16175 m_slot_releq.s.m_fq_mutex); 16176 n += (mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n_init - 16177 mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n - 16178 mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n); 16179 mutex_exit(&mpt->m_slot_freeq_pairp[i]. 16180 m_slot_releq.s.m_fq_mutex); 16181 mutex_exit(&mpt->m_slot_freeq_pairp[i]. 16182 m_slot_allocq.s.m_fq_mutex); 16183 } 16184 if (mpt->m_max_requests - 2 < n) 16185 panic("mptsas: free slot allocq and releq crazy"); 16186 return (n); 16187 } 16188