1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic iSCSI HBA Driver 4 * Copyright (c) 2003-2013 QLogic Corporation 5 */ 6 #include <linux/moduleparam.h> 7 #include <linux/slab.h> 8 #include <linux/blkdev.h> 9 #include <linux/iscsi_boot_sysfs.h> 10 #include <linux/inet.h> 11 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsicam.h> 14 15 #include "ql4_def.h" 16 #include "ql4_version.h" 17 #include "ql4_glbl.h" 18 #include "ql4_dbg.h" 19 #include "ql4_inline.h" 20 #include "ql4_83xx.h" 21 22 /* 23 * Driver version 24 */ 25 static char qla4xxx_version_str[40]; 26 27 /* 28 * SRB allocation cache 29 */ 30 static struct kmem_cache *srb_cachep; 31 32 /* 33 * Module parameter information and variables 34 */ 35 static int ql4xdisablesysfsboot = 1; 36 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 37 MODULE_PARM_DESC(ql4xdisablesysfsboot, 38 " Set to disable exporting boot targets to sysfs.\n" 39 "\t\t 0 - Export boot targets\n" 40 "\t\t 1 - Do not export boot targets (Default)"); 41 42 int ql4xdontresethba; 43 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 44 MODULE_PARM_DESC(ql4xdontresethba, 45 " Don't reset the HBA for driver recovery.\n" 46 "\t\t 0 - It will reset HBA (Default)\n" 47 "\t\t 1 - It will NOT reset HBA"); 48 49 int ql4xextended_error_logging; 50 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 51 MODULE_PARM_DESC(ql4xextended_error_logging, 52 " Option to enable extended error logging.\n" 53 "\t\t 0 - no logging (Default)\n" 54 "\t\t 2 - debug logging"); 55 56 int ql4xenablemsix = 1; 57 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 58 MODULE_PARM_DESC(ql4xenablemsix, 59 " Set to enable MSI or MSI-X interrupt mechanism.\n" 60 "\t\t 0 = enable INTx interrupt mechanism.\n" 61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" 62 "\t\t 2 = enable MSI interrupt mechanism."); 63 64 #define QL4_DEF_QDEPTH 32 65 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 66 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 67 MODULE_PARM_DESC(ql4xmaxqdepth, 68 " Maximum queue depth to report for target devices.\n" 69 "\t\t Default: 32."); 70 71 static int ql4xqfulltracking = 1; 72 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); 73 MODULE_PARM_DESC(ql4xqfulltracking, 74 " Enable or disable dynamic tracking and adjustment of\n" 75 "\t\t scsi device queue depth.\n" 76 "\t\t 0 - Disable.\n" 77 "\t\t 1 - Enable. (Default)"); 78 79 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 80 module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 81 MODULE_PARM_DESC(ql4xsess_recovery_tmo, 82 " Target Session Recovery Timeout.\n" 83 "\t\t Default: 120 sec."); 84 85 int ql4xmdcapmask = 0; 86 module_param(ql4xmdcapmask, int, S_IRUGO); 87 MODULE_PARM_DESC(ql4xmdcapmask, 88 " Set the Minidump driver capture mask level.\n" 89 "\t\t Default is 0 (firmware default capture mask)\n" 90 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); 91 92 int ql4xenablemd = 1; 93 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); 94 MODULE_PARM_DESC(ql4xenablemd, 95 " Set to enable minidump.\n" 96 "\t\t 0 - disable minidump\n" 97 "\t\t 1 - enable minidump (Default)"); 98 99 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 100 /* 101 * SCSI host template entry points 102 */ 103 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); 104 105 /* 106 * iSCSI template entry points 107 */ 108 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 109 enum iscsi_param param, char *buf); 110 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 111 enum iscsi_param param, char *buf); 112 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 113 enum iscsi_host_param param, char *buf); 114 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, 115 uint32_t len); 116 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 117 enum iscsi_param_type param_type, 118 int param, char *buf); 119 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); 120 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, 121 struct sockaddr *dst_addr, 122 int non_blocking); 123 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); 124 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); 125 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 126 enum iscsi_param param, char *buf); 127 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 128 static struct iscsi_cls_conn * 129 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); 130 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 131 struct iscsi_cls_conn *cls_conn, 132 uint64_t transport_fd, int is_leading); 133 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); 134 static struct iscsi_cls_session * 135 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 136 uint16_t qdepth, uint32_t initial_cmdsn); 137 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); 138 static void qla4xxx_task_work(struct work_struct *wdata); 139 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); 140 static int qla4xxx_task_xmit(struct iscsi_task *); 141 static void qla4xxx_task_cleanup(struct iscsi_task *); 142 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); 143 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 144 struct iscsi_stats *stats); 145 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 146 uint32_t iface_type, uint32_t payload_size, 147 uint32_t pid, struct sockaddr *dst_addr); 148 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 149 uint32_t *num_entries, char *buf); 150 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); 151 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, 152 int len); 153 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); 154 155 /* 156 * SCSI host template entry points 157 */ 158 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 159 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); 160 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 161 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 162 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 163 static int qla4xxx_slave_alloc(struct scsi_device *device); 164 static umode_t qla4_attr_is_visible(int param_type, int param); 165 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 166 167 /* 168 * iSCSI Flash DDB sysfs entry points 169 */ 170 static int 171 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 172 struct iscsi_bus_flash_conn *fnode_conn, 173 void *data, int len); 174 static int 175 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 176 int param, char *buf); 177 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 178 int len); 179 static int 180 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); 181 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 182 struct iscsi_bus_flash_conn *fnode_conn); 183 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 184 struct iscsi_bus_flash_conn *fnode_conn); 185 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); 186 187 static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 188 QLA82XX_LEGACY_INTR_CONFIG; 189 190 static const uint32_t qla4_82xx_reg_tbl[] = { 191 QLA82XX_PEG_HALT_STATUS1, 192 QLA82XX_PEG_HALT_STATUS2, 193 QLA82XX_PEG_ALIVE_COUNTER, 194 QLA82XX_CRB_DRV_ACTIVE, 195 QLA82XX_CRB_DEV_STATE, 196 QLA82XX_CRB_DRV_STATE, 197 QLA82XX_CRB_DRV_SCRATCH, 198 QLA82XX_CRB_DEV_PART_INFO, 199 QLA82XX_CRB_DRV_IDC_VERSION, 200 QLA82XX_FW_VERSION_MAJOR, 201 QLA82XX_FW_VERSION_MINOR, 202 QLA82XX_FW_VERSION_SUB, 203 CRB_CMDPEG_STATE, 204 CRB_TEMP_STATE, 205 }; 206 207 static const uint32_t qla4_83xx_reg_tbl[] = { 208 QLA83XX_PEG_HALT_STATUS1, 209 QLA83XX_PEG_HALT_STATUS2, 210 QLA83XX_PEG_ALIVE_COUNTER, 211 QLA83XX_CRB_DRV_ACTIVE, 212 QLA83XX_CRB_DEV_STATE, 213 QLA83XX_CRB_DRV_STATE, 214 QLA83XX_CRB_DRV_SCRATCH, 215 QLA83XX_CRB_DEV_PART_INFO1, 216 QLA83XX_CRB_IDC_VER_MAJOR, 217 QLA83XX_FW_VER_MAJOR, 218 QLA83XX_FW_VER_MINOR, 219 QLA83XX_FW_VER_SUB, 220 QLA83XX_CMDPEG_STATE, 221 QLA83XX_ASIC_TEMP, 222 }; 223 224 static struct scsi_host_template qla4xxx_driver_template = { 225 .module = THIS_MODULE, 226 .name = DRIVER_NAME, 227 .proc_name = DRIVER_NAME, 228 .queuecommand = qla4xxx_queuecommand, 229 230 .eh_abort_handler = qla4xxx_eh_abort, 231 .eh_device_reset_handler = qla4xxx_eh_device_reset, 232 .eh_target_reset_handler = qla4xxx_eh_target_reset, 233 .eh_host_reset_handler = qla4xxx_eh_host_reset, 234 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 235 236 .slave_alloc = qla4xxx_slave_alloc, 237 .change_queue_depth = scsi_change_queue_depth, 238 239 .this_id = -1, 240 .cmd_per_lun = 3, 241 .sg_tablesize = SG_ALL, 242 243 .max_sectors = 0xFFFF, 244 .shost_attrs = qla4xxx_host_attrs, 245 .host_reset = qla4xxx_host_reset, 246 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 247 }; 248 249 static struct iscsi_transport qla4xxx_iscsi_transport = { 250 .owner = THIS_MODULE, 251 .name = DRIVER_NAME, 252 .caps = CAP_TEXT_NEGO | 253 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 254 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 255 CAP_MULTI_R2T, 256 .attr_is_visible = qla4_attr_is_visible, 257 .create_session = qla4xxx_session_create, 258 .destroy_session = qla4xxx_session_destroy, 259 .start_conn = qla4xxx_conn_start, 260 .create_conn = qla4xxx_conn_create, 261 .bind_conn = qla4xxx_conn_bind, 262 .stop_conn = iscsi_conn_stop, 263 .destroy_conn = qla4xxx_conn_destroy, 264 .set_param = iscsi_set_param, 265 .get_conn_param = qla4xxx_conn_get_param, 266 .get_session_param = qla4xxx_session_get_param, 267 .get_ep_param = qla4xxx_get_ep_param, 268 .ep_connect = qla4xxx_ep_connect, 269 .ep_poll = qla4xxx_ep_poll, 270 .ep_disconnect = qla4xxx_ep_disconnect, 271 .get_stats = qla4xxx_conn_get_stats, 272 .send_pdu = iscsi_conn_send_pdu, 273 .xmit_task = qla4xxx_task_xmit, 274 .cleanup_task = qla4xxx_task_cleanup, 275 .alloc_pdu = qla4xxx_alloc_pdu, 276 277 .get_host_param = qla4xxx_host_get_param, 278 .set_iface_param = qla4xxx_iface_set_param, 279 .get_iface_param = qla4xxx_get_iface_param, 280 .bsg_request = qla4xxx_bsg_request, 281 .send_ping = qla4xxx_send_ping, 282 .get_chap = qla4xxx_get_chap_list, 283 .delete_chap = qla4xxx_delete_chap, 284 .set_chap = qla4xxx_set_chap_entry, 285 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, 286 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, 287 .new_flashnode = qla4xxx_sysfs_ddb_add, 288 .del_flashnode = qla4xxx_sysfs_ddb_delete, 289 .login_flashnode = qla4xxx_sysfs_ddb_login, 290 .logout_flashnode = qla4xxx_sysfs_ddb_logout, 291 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, 292 .get_host_stats = qla4xxx_get_host_stats, 293 }; 294 295 static struct scsi_transport_template *qla4xxx_scsi_transport; 296 297 static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) 298 { 299 u32 reg_val = 0; 300 int rval = QLA_SUCCESS; 301 302 if (is_qla8022(ha)) 303 reg_val = readl(&ha->qla4_82xx_reg->host_status); 304 else if (is_qla8032(ha) || is_qla8042(ha)) 305 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); 306 else 307 reg_val = readw(&ha->reg->ctrl_status); 308 309 if (reg_val == QL4_ISP_REG_DISCONNECT) 310 rval = QLA_ERROR; 311 312 return rval; 313 } 314 315 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 316 uint32_t iface_type, uint32_t payload_size, 317 uint32_t pid, struct sockaddr *dst_addr) 318 { 319 struct scsi_qla_host *ha = to_qla_host(shost); 320 struct sockaddr_in *addr; 321 struct sockaddr_in6 *addr6; 322 uint32_t options = 0; 323 uint8_t ipaddr[IPv6_ADDR_LEN]; 324 int rval; 325 326 memset(ipaddr, 0, IPv6_ADDR_LEN); 327 /* IPv4 to IPv4 */ 328 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && 329 (dst_addr->sa_family == AF_INET)) { 330 addr = (struct sockaddr_in *)dst_addr; 331 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); 332 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " 333 "dest: %pI4\n", __func__, 334 &ha->ip_config.ip_address, ipaddr)); 335 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, 336 ipaddr); 337 if (rval) 338 rval = -EINVAL; 339 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && 340 (dst_addr->sa_family == AF_INET6)) { 341 /* IPv6 to IPv6 */ 342 addr6 = (struct sockaddr_in6 *)dst_addr; 343 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); 344 345 options |= PING_IPV6_PROTOCOL_ENABLE; 346 347 /* Ping using LinkLocal address */ 348 if ((iface_num == 0) || (iface_num == 1)) { 349 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " 350 "src: %pI6 dest: %pI6\n", __func__, 351 &ha->ip_config.ipv6_link_local_addr, 352 ipaddr)); 353 options |= PING_IPV6_LINKLOCAL_ADDR; 354 rval = qla4xxx_ping_iocb(ha, options, payload_size, 355 pid, ipaddr); 356 } else { 357 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " 358 "not supported\n", __func__, iface_num); 359 rval = -ENOSYS; 360 goto exit_send_ping; 361 } 362 363 /* 364 * If ping using LinkLocal address fails, try ping using 365 * IPv6 address 366 */ 367 if (rval != QLA_SUCCESS) { 368 options &= ~PING_IPV6_LINKLOCAL_ADDR; 369 if (iface_num == 0) { 370 options |= PING_IPV6_ADDR0; 371 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 372 "Ping src: %pI6 " 373 "dest: %pI6\n", __func__, 374 &ha->ip_config.ipv6_addr0, 375 ipaddr)); 376 } else if (iface_num == 1) { 377 options |= PING_IPV6_ADDR1; 378 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 379 "Ping src: %pI6 " 380 "dest: %pI6\n", __func__, 381 &ha->ip_config.ipv6_addr1, 382 ipaddr)); 383 } 384 rval = qla4xxx_ping_iocb(ha, options, payload_size, 385 pid, ipaddr); 386 if (rval) 387 rval = -EINVAL; 388 } 389 } else 390 rval = -ENOSYS; 391 exit_send_ping: 392 return rval; 393 } 394 395 static umode_t qla4_attr_is_visible(int param_type, int param) 396 { 397 switch (param_type) { 398 case ISCSI_HOST_PARAM: 399 switch (param) { 400 case ISCSI_HOST_PARAM_HWADDRESS: 401 case ISCSI_HOST_PARAM_IPADDRESS: 402 case ISCSI_HOST_PARAM_INITIATOR_NAME: 403 case ISCSI_HOST_PARAM_PORT_STATE: 404 case ISCSI_HOST_PARAM_PORT_SPEED: 405 return S_IRUGO; 406 default: 407 return 0; 408 } 409 case ISCSI_PARAM: 410 switch (param) { 411 case ISCSI_PARAM_PERSISTENT_ADDRESS: 412 case ISCSI_PARAM_PERSISTENT_PORT: 413 case ISCSI_PARAM_CONN_ADDRESS: 414 case ISCSI_PARAM_CONN_PORT: 415 case ISCSI_PARAM_TARGET_NAME: 416 case ISCSI_PARAM_TPGT: 417 case ISCSI_PARAM_TARGET_ALIAS: 418 case ISCSI_PARAM_MAX_BURST: 419 case ISCSI_PARAM_MAX_R2T: 420 case ISCSI_PARAM_FIRST_BURST: 421 case ISCSI_PARAM_MAX_RECV_DLENGTH: 422 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 423 case ISCSI_PARAM_IFACE_NAME: 424 case ISCSI_PARAM_CHAP_OUT_IDX: 425 case ISCSI_PARAM_CHAP_IN_IDX: 426 case ISCSI_PARAM_USERNAME: 427 case ISCSI_PARAM_PASSWORD: 428 case ISCSI_PARAM_USERNAME_IN: 429 case ISCSI_PARAM_PASSWORD_IN: 430 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: 431 case ISCSI_PARAM_DISCOVERY_SESS: 432 case ISCSI_PARAM_PORTAL_TYPE: 433 case ISCSI_PARAM_CHAP_AUTH_EN: 434 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: 435 case ISCSI_PARAM_BIDI_CHAP_EN: 436 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: 437 case ISCSI_PARAM_DEF_TIME2WAIT: 438 case ISCSI_PARAM_DEF_TIME2RETAIN: 439 case ISCSI_PARAM_HDRDGST_EN: 440 case ISCSI_PARAM_DATADGST_EN: 441 case ISCSI_PARAM_INITIAL_R2T_EN: 442 case ISCSI_PARAM_IMM_DATA_EN: 443 case ISCSI_PARAM_PDU_INORDER_EN: 444 case ISCSI_PARAM_DATASEQ_INORDER_EN: 445 case ISCSI_PARAM_MAX_SEGMENT_SIZE: 446 case ISCSI_PARAM_TCP_TIMESTAMP_STAT: 447 case ISCSI_PARAM_TCP_WSF_DISABLE: 448 case ISCSI_PARAM_TCP_NAGLE_DISABLE: 449 case ISCSI_PARAM_TCP_TIMER_SCALE: 450 case ISCSI_PARAM_TCP_TIMESTAMP_EN: 451 case ISCSI_PARAM_TCP_XMIT_WSF: 452 case ISCSI_PARAM_TCP_RECV_WSF: 453 case ISCSI_PARAM_IP_FRAGMENT_DISABLE: 454 case ISCSI_PARAM_IPV4_TOS: 455 case ISCSI_PARAM_IPV6_TC: 456 case ISCSI_PARAM_IPV6_FLOW_LABEL: 457 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: 458 case ISCSI_PARAM_KEEPALIVE_TMO: 459 case ISCSI_PARAM_LOCAL_PORT: 460 case ISCSI_PARAM_ISID: 461 case ISCSI_PARAM_TSID: 462 case ISCSI_PARAM_DEF_TASKMGMT_TMO: 463 case ISCSI_PARAM_ERL: 464 case ISCSI_PARAM_STATSN: 465 case ISCSI_PARAM_EXP_STATSN: 466 case ISCSI_PARAM_DISCOVERY_PARENT_IDX: 467 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 468 case ISCSI_PARAM_LOCAL_IPADDR: 469 return S_IRUGO; 470 default: 471 return 0; 472 } 473 case ISCSI_NET_PARAM: 474 switch (param) { 475 case ISCSI_NET_PARAM_IPV4_ADDR: 476 case ISCSI_NET_PARAM_IPV4_SUBNET: 477 case ISCSI_NET_PARAM_IPV4_GW: 478 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 479 case ISCSI_NET_PARAM_IFACE_ENABLE: 480 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 481 case ISCSI_NET_PARAM_IPV6_ADDR: 482 case ISCSI_NET_PARAM_IPV6_ROUTER: 483 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 484 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 485 case ISCSI_NET_PARAM_VLAN_ID: 486 case ISCSI_NET_PARAM_VLAN_PRIORITY: 487 case ISCSI_NET_PARAM_VLAN_ENABLED: 488 case ISCSI_NET_PARAM_MTU: 489 case ISCSI_NET_PARAM_PORT: 490 case ISCSI_NET_PARAM_IPADDR_STATE: 491 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 492 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 493 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 494 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 495 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 496 case ISCSI_NET_PARAM_TCP_WSF: 497 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 498 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 499 case ISCSI_NET_PARAM_CACHE_ID: 500 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 501 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 502 case ISCSI_NET_PARAM_IPV4_TOS_EN: 503 case ISCSI_NET_PARAM_IPV4_TOS: 504 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 505 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 506 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 507 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 508 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 509 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 510 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 511 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 512 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 513 case ISCSI_NET_PARAM_REDIRECT_EN: 514 case ISCSI_NET_PARAM_IPV4_TTL: 515 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 516 case ISCSI_NET_PARAM_IPV6_MLD_EN: 517 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 518 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 519 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 520 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 521 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 522 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 523 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 524 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 525 return S_IRUGO; 526 default: 527 return 0; 528 } 529 case ISCSI_IFACE_PARAM: 530 switch (param) { 531 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 532 case ISCSI_IFACE_PARAM_HDRDGST_EN: 533 case ISCSI_IFACE_PARAM_DATADGST_EN: 534 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 535 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 536 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 537 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 538 case ISCSI_IFACE_PARAM_ERL: 539 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 540 case ISCSI_IFACE_PARAM_FIRST_BURST: 541 case ISCSI_IFACE_PARAM_MAX_R2T: 542 case ISCSI_IFACE_PARAM_MAX_BURST: 543 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 544 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 545 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 546 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 547 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 548 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 549 return S_IRUGO; 550 default: 551 return 0; 552 } 553 case ISCSI_FLASHNODE_PARAM: 554 switch (param) { 555 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 556 case ISCSI_FLASHNODE_PORTAL_TYPE: 557 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 558 case ISCSI_FLASHNODE_DISCOVERY_SESS: 559 case ISCSI_FLASHNODE_ENTRY_EN: 560 case ISCSI_FLASHNODE_HDR_DGST_EN: 561 case ISCSI_FLASHNODE_DATA_DGST_EN: 562 case ISCSI_FLASHNODE_IMM_DATA_EN: 563 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 564 case ISCSI_FLASHNODE_DATASEQ_INORDER: 565 case ISCSI_FLASHNODE_PDU_INORDER: 566 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 567 case ISCSI_FLASHNODE_SNACK_REQ_EN: 568 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 569 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 570 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 571 case ISCSI_FLASHNODE_ERL: 572 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 573 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 574 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 575 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 576 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 577 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 578 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 579 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 580 case ISCSI_FLASHNODE_FIRST_BURST: 581 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 582 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 583 case ISCSI_FLASHNODE_MAX_R2T: 584 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 585 case ISCSI_FLASHNODE_ISID: 586 case ISCSI_FLASHNODE_TSID: 587 case ISCSI_FLASHNODE_PORT: 588 case ISCSI_FLASHNODE_MAX_BURST: 589 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 590 case ISCSI_FLASHNODE_IPADDR: 591 case ISCSI_FLASHNODE_ALIAS: 592 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 593 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 594 case ISCSI_FLASHNODE_LOCAL_PORT: 595 case ISCSI_FLASHNODE_IPV4_TOS: 596 case ISCSI_FLASHNODE_IPV6_TC: 597 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 598 case ISCSI_FLASHNODE_NAME: 599 case ISCSI_FLASHNODE_TPGT: 600 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 601 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 602 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 603 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 604 case ISCSI_FLASHNODE_TCP_RECV_WSF: 605 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 606 case ISCSI_FLASHNODE_USERNAME: 607 case ISCSI_FLASHNODE_PASSWORD: 608 case ISCSI_FLASHNODE_STATSN: 609 case ISCSI_FLASHNODE_EXP_STATSN: 610 case ISCSI_FLASHNODE_IS_BOOT_TGT: 611 return S_IRUGO; 612 default: 613 return 0; 614 } 615 } 616 617 return 0; 618 } 619 620 /** 621 * qla4xxx_create_chap_list - Create CHAP list from FLASH 622 * @ha: pointer to adapter structure 623 * 624 * Read flash and make a list of CHAP entries, during login when a CHAP entry 625 * is received, it will be checked in this list. If entry exist then the CHAP 626 * entry index is set in the DDB. If CHAP entry does not exist in this list 627 * then a new entry is added in FLASH in CHAP table and the index obtained is 628 * used in the DDB. 629 **/ 630 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) 631 { 632 int rval = 0; 633 uint8_t *chap_flash_data = NULL; 634 uint32_t offset; 635 dma_addr_t chap_dma; 636 uint32_t chap_size = 0; 637 638 if (is_qla40XX(ha)) 639 chap_size = MAX_CHAP_ENTRIES_40XX * 640 sizeof(struct ql4_chap_table); 641 else /* Single region contains CHAP info for both 642 * ports which is divided into half for each port. 643 */ 644 chap_size = ha->hw.flt_chap_size / 2; 645 646 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, 647 &chap_dma, GFP_KERNEL); 648 if (!chap_flash_data) { 649 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); 650 return; 651 } 652 653 if (is_qla40XX(ha)) { 654 offset = FLASH_CHAP_OFFSET; 655 } else { 656 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 657 if (ha->port_num == 1) 658 offset += chap_size; 659 } 660 661 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 662 if (rval != QLA_SUCCESS) 663 goto exit_chap_list; 664 665 if (ha->chap_list == NULL) 666 ha->chap_list = vmalloc(chap_size); 667 if (ha->chap_list == NULL) { 668 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); 669 goto exit_chap_list; 670 } 671 672 memset(ha->chap_list, 0, chap_size); 673 memcpy(ha->chap_list, chap_flash_data, chap_size); 674 675 exit_chap_list: 676 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); 677 } 678 679 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, 680 int16_t chap_index, 681 struct ql4_chap_table **chap_entry) 682 { 683 int rval = QLA_ERROR; 684 int max_chap_entries; 685 686 if (!ha->chap_list) { 687 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 688 goto exit_get_chap; 689 } 690 691 if (is_qla80XX(ha)) 692 max_chap_entries = (ha->hw.flt_chap_size / 2) / 693 sizeof(struct ql4_chap_table); 694 else 695 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 696 697 if (chap_index > max_chap_entries) { 698 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); 699 goto exit_get_chap; 700 } 701 702 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; 703 if ((*chap_entry)->cookie != 704 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 705 *chap_entry = NULL; 706 } else { 707 rval = QLA_SUCCESS; 708 } 709 710 exit_get_chap: 711 return rval; 712 } 713 714 /** 715 * qla4xxx_find_free_chap_index - Find the first free chap index 716 * @ha: pointer to adapter structure 717 * @chap_index: CHAP index to be returned 718 * 719 * Find the first free chap index available in the chap table 720 * 721 * Note: Caller should acquire the chap lock before getting here. 722 **/ 723 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, 724 uint16_t *chap_index) 725 { 726 int i, rval; 727 int free_index = -1; 728 int max_chap_entries = 0; 729 struct ql4_chap_table *chap_table; 730 731 if (is_qla80XX(ha)) 732 max_chap_entries = (ha->hw.flt_chap_size / 2) / 733 sizeof(struct ql4_chap_table); 734 else 735 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 736 737 if (!ha->chap_list) { 738 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 739 rval = QLA_ERROR; 740 goto exit_find_chap; 741 } 742 743 for (i = 0; i < max_chap_entries; i++) { 744 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 745 746 if ((chap_table->cookie != 747 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && 748 (i > MAX_RESRV_CHAP_IDX)) { 749 free_index = i; 750 break; 751 } 752 } 753 754 if (free_index != -1) { 755 *chap_index = free_index; 756 rval = QLA_SUCCESS; 757 } else { 758 rval = QLA_ERROR; 759 } 760 761 exit_find_chap: 762 return rval; 763 } 764 765 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 766 uint32_t *num_entries, char *buf) 767 { 768 struct scsi_qla_host *ha = to_qla_host(shost); 769 struct ql4_chap_table *chap_table; 770 struct iscsi_chap_rec *chap_rec; 771 int max_chap_entries = 0; 772 int valid_chap_entries = 0; 773 int ret = 0, i; 774 775 if (is_qla80XX(ha)) 776 max_chap_entries = (ha->hw.flt_chap_size / 2) / 777 sizeof(struct ql4_chap_table); 778 else 779 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 780 781 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", 782 __func__, *num_entries, chap_tbl_idx); 783 784 if (!buf) { 785 ret = -ENOMEM; 786 goto exit_get_chap_list; 787 } 788 789 qla4xxx_create_chap_list(ha); 790 791 chap_rec = (struct iscsi_chap_rec *) buf; 792 mutex_lock(&ha->chap_sem); 793 for (i = chap_tbl_idx; i < max_chap_entries; i++) { 794 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 795 if (chap_table->cookie != 796 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) 797 continue; 798 799 chap_rec->chap_tbl_idx = i; 800 strlcpy(chap_rec->username, chap_table->name, 801 ISCSI_CHAP_AUTH_NAME_MAX_LEN); 802 strlcpy(chap_rec->password, chap_table->secret, 803 QL4_CHAP_MAX_SECRET_LEN); 804 chap_rec->password_length = chap_table->secret_len; 805 806 if (chap_table->flags & BIT_7) /* local */ 807 chap_rec->chap_type = CHAP_TYPE_OUT; 808 809 if (chap_table->flags & BIT_6) /* peer */ 810 chap_rec->chap_type = CHAP_TYPE_IN; 811 812 chap_rec++; 813 814 valid_chap_entries++; 815 if (valid_chap_entries == *num_entries) 816 break; 817 else 818 continue; 819 } 820 mutex_unlock(&ha->chap_sem); 821 822 exit_get_chap_list: 823 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", 824 __func__, valid_chap_entries); 825 *num_entries = valid_chap_entries; 826 return ret; 827 } 828 829 static int __qla4xxx_is_chap_active(struct device *dev, void *data) 830 { 831 int ret = 0; 832 uint16_t *chap_tbl_idx = (uint16_t *) data; 833 struct iscsi_cls_session *cls_session; 834 struct iscsi_session *sess; 835 struct ddb_entry *ddb_entry; 836 837 if (!iscsi_is_session_dev(dev)) 838 goto exit_is_chap_active; 839 840 cls_session = iscsi_dev_to_session(dev); 841 sess = cls_session->dd_data; 842 ddb_entry = sess->dd_data; 843 844 if (iscsi_is_session_online(cls_session)) 845 goto exit_is_chap_active; 846 847 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) 848 ret = 1; 849 850 exit_is_chap_active: 851 return ret; 852 } 853 854 static int qla4xxx_is_chap_active(struct Scsi_Host *shost, 855 uint16_t chap_tbl_idx) 856 { 857 int ret = 0; 858 859 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, 860 __qla4xxx_is_chap_active); 861 862 return ret; 863 } 864 865 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) 866 { 867 struct scsi_qla_host *ha = to_qla_host(shost); 868 struct ql4_chap_table *chap_table; 869 dma_addr_t chap_dma; 870 int max_chap_entries = 0; 871 uint32_t offset = 0; 872 uint32_t chap_size; 873 int ret = 0; 874 875 chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); 876 if (chap_table == NULL) 877 return -ENOMEM; 878 879 if (is_qla80XX(ha)) 880 max_chap_entries = (ha->hw.flt_chap_size / 2) / 881 sizeof(struct ql4_chap_table); 882 else 883 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 884 885 if (chap_tbl_idx > max_chap_entries) { 886 ret = -EINVAL; 887 goto exit_delete_chap; 888 } 889 890 /* Check if chap index is in use. 891 * If chap is in use don't delet chap entry */ 892 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); 893 if (ret) { 894 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " 895 "delete from flash\n", chap_tbl_idx); 896 ret = -EBUSY; 897 goto exit_delete_chap; 898 } 899 900 chap_size = sizeof(struct ql4_chap_table); 901 if (is_qla40XX(ha)) 902 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); 903 else { 904 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 905 /* flt_chap_size is CHAP table size for both ports 906 * so divide it by 2 to calculate the offset for second port 907 */ 908 if (ha->port_num == 1) 909 offset += (ha->hw.flt_chap_size / 2); 910 offset += (chap_tbl_idx * chap_size); 911 } 912 913 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 914 if (ret != QLA_SUCCESS) { 915 ret = -EINVAL; 916 goto exit_delete_chap; 917 } 918 919 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", 920 __le16_to_cpu(chap_table->cookie))); 921 922 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { 923 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); 924 goto exit_delete_chap; 925 } 926 927 chap_table->cookie = __constant_cpu_to_le16(0xFFFF); 928 929 offset = FLASH_CHAP_OFFSET | 930 (chap_tbl_idx * sizeof(struct ql4_chap_table)); 931 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, 932 FLASH_OPT_RMW_COMMIT); 933 if (ret == QLA_SUCCESS && ha->chap_list) { 934 mutex_lock(&ha->chap_sem); 935 /* Update ha chap_list cache */ 936 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, 937 chap_table, sizeof(struct ql4_chap_table)); 938 mutex_unlock(&ha->chap_sem); 939 } 940 if (ret != QLA_SUCCESS) 941 ret = -EINVAL; 942 943 exit_delete_chap: 944 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); 945 return ret; 946 } 947 948 /** 949 * qla4xxx_set_chap_entry - Make chap entry with given information 950 * @shost: pointer to host 951 * @data: chap info - credentials, index and type to make chap entry 952 * @len: length of data 953 * 954 * Add or update chap entry with the given information 955 **/ 956 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) 957 { 958 struct scsi_qla_host *ha = to_qla_host(shost); 959 struct iscsi_chap_rec chap_rec; 960 struct ql4_chap_table *chap_entry = NULL; 961 struct iscsi_param_info *param_info; 962 struct nlattr *attr; 963 int max_chap_entries = 0; 964 int type; 965 int rem = len; 966 int rc = 0; 967 int size; 968 969 memset(&chap_rec, 0, sizeof(chap_rec)); 970 971 nla_for_each_attr(attr, data, len, rem) { 972 param_info = nla_data(attr); 973 974 switch (param_info->param) { 975 case ISCSI_CHAP_PARAM_INDEX: 976 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; 977 break; 978 case ISCSI_CHAP_PARAM_CHAP_TYPE: 979 chap_rec.chap_type = param_info->value[0]; 980 break; 981 case ISCSI_CHAP_PARAM_USERNAME: 982 size = min_t(size_t, sizeof(chap_rec.username), 983 param_info->len); 984 memcpy(chap_rec.username, param_info->value, size); 985 break; 986 case ISCSI_CHAP_PARAM_PASSWORD: 987 size = min_t(size_t, sizeof(chap_rec.password), 988 param_info->len); 989 memcpy(chap_rec.password, param_info->value, size); 990 break; 991 case ISCSI_CHAP_PARAM_PASSWORD_LEN: 992 chap_rec.password_length = param_info->value[0]; 993 break; 994 default: 995 ql4_printk(KERN_ERR, ha, 996 "%s: No such sysfs attribute\n", __func__); 997 rc = -ENOSYS; 998 goto exit_set_chap; 999 } 1000 } 1001 1002 if (chap_rec.chap_type == CHAP_TYPE_IN) 1003 type = BIDI_CHAP; 1004 else 1005 type = LOCAL_CHAP; 1006 1007 if (is_qla80XX(ha)) 1008 max_chap_entries = (ha->hw.flt_chap_size / 2) / 1009 sizeof(struct ql4_chap_table); 1010 else 1011 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 1012 1013 mutex_lock(&ha->chap_sem); 1014 if (chap_rec.chap_tbl_idx < max_chap_entries) { 1015 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, 1016 &chap_entry); 1017 if (!rc) { 1018 if (!(type == qla4xxx_get_chap_type(chap_entry))) { 1019 ql4_printk(KERN_INFO, ha, 1020 "Type mismatch for CHAP entry %d\n", 1021 chap_rec.chap_tbl_idx); 1022 rc = -EINVAL; 1023 goto exit_unlock_chap; 1024 } 1025 1026 /* If chap index is in use then don't modify it */ 1027 rc = qla4xxx_is_chap_active(shost, 1028 chap_rec.chap_tbl_idx); 1029 if (rc) { 1030 ql4_printk(KERN_INFO, ha, 1031 "CHAP entry %d is in use\n", 1032 chap_rec.chap_tbl_idx); 1033 rc = -EBUSY; 1034 goto exit_unlock_chap; 1035 } 1036 } 1037 } else { 1038 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); 1039 if (rc) { 1040 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); 1041 rc = -EBUSY; 1042 goto exit_unlock_chap; 1043 } 1044 } 1045 1046 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, 1047 chap_rec.chap_tbl_idx, type); 1048 1049 exit_unlock_chap: 1050 mutex_unlock(&ha->chap_sem); 1051 1052 exit_set_chap: 1053 return rc; 1054 } 1055 1056 1057 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) 1058 { 1059 struct scsi_qla_host *ha = to_qla_host(shost); 1060 struct iscsi_offload_host_stats *host_stats = NULL; 1061 int host_stats_size; 1062 int ret = 0; 1063 int ddb_idx = 0; 1064 struct ql_iscsi_stats *ql_iscsi_stats = NULL; 1065 int stats_size; 1066 dma_addr_t iscsi_stats_dma; 1067 1068 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); 1069 1070 host_stats_size = sizeof(struct iscsi_offload_host_stats); 1071 1072 if (host_stats_size != len) { 1073 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", 1074 __func__, len, host_stats_size); 1075 ret = -EINVAL; 1076 goto exit_host_stats; 1077 } 1078 host_stats = (struct iscsi_offload_host_stats *)buf; 1079 1080 if (!buf) { 1081 ret = -ENOMEM; 1082 goto exit_host_stats; 1083 } 1084 1085 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1086 1087 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1088 &iscsi_stats_dma, GFP_KERNEL); 1089 if (!ql_iscsi_stats) { 1090 ql4_printk(KERN_ERR, ha, 1091 "Unable to allocate memory for iscsi stats\n"); 1092 ret = -ENOMEM; 1093 goto exit_host_stats; 1094 } 1095 1096 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, 1097 iscsi_stats_dma); 1098 if (ret != QLA_SUCCESS) { 1099 ql4_printk(KERN_ERR, ha, 1100 "Unable to retrieve iscsi stats\n"); 1101 ret = -EIO; 1102 goto exit_host_stats; 1103 } 1104 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); 1105 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); 1106 host_stats->mactx_multicast_frames = 1107 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); 1108 host_stats->mactx_broadcast_frames = 1109 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); 1110 host_stats->mactx_pause_frames = 1111 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); 1112 host_stats->mactx_control_frames = 1113 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); 1114 host_stats->mactx_deferral = 1115 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); 1116 host_stats->mactx_excess_deferral = 1117 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); 1118 host_stats->mactx_late_collision = 1119 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); 1120 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); 1121 host_stats->mactx_single_collision = 1122 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); 1123 host_stats->mactx_multiple_collision = 1124 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); 1125 host_stats->mactx_collision = 1126 le64_to_cpu(ql_iscsi_stats->mac_tx_collision); 1127 host_stats->mactx_frames_dropped = 1128 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); 1129 host_stats->mactx_jumbo_frames = 1130 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); 1131 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); 1132 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); 1133 host_stats->macrx_unknown_control_frames = 1134 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); 1135 host_stats->macrx_pause_frames = 1136 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); 1137 host_stats->macrx_control_frames = 1138 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); 1139 host_stats->macrx_dribble = 1140 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); 1141 host_stats->macrx_frame_length_error = 1142 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); 1143 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); 1144 host_stats->macrx_carrier_sense_error = 1145 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); 1146 host_stats->macrx_frame_discarded = 1147 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); 1148 host_stats->macrx_frames_dropped = 1149 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); 1150 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); 1151 host_stats->mac_encoding_error = 1152 le64_to_cpu(ql_iscsi_stats->mac_encoding_error); 1153 host_stats->macrx_length_error_large = 1154 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); 1155 host_stats->macrx_length_error_small = 1156 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); 1157 host_stats->macrx_multicast_frames = 1158 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); 1159 host_stats->macrx_broadcast_frames = 1160 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); 1161 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); 1162 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); 1163 host_stats->iptx_fragments = 1164 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); 1165 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); 1166 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); 1167 host_stats->iprx_fragments = 1168 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); 1169 host_stats->ip_datagram_reassembly = 1170 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); 1171 host_stats->ip_invalid_address_error = 1172 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); 1173 host_stats->ip_error_packets = 1174 le64_to_cpu(ql_iscsi_stats->ip_error_packets); 1175 host_stats->ip_fragrx_overlap = 1176 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); 1177 host_stats->ip_fragrx_outoforder = 1178 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); 1179 host_stats->ip_datagram_reassembly_timeout = 1180 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); 1181 host_stats->ipv6tx_packets = 1182 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); 1183 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); 1184 host_stats->ipv6tx_fragments = 1185 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); 1186 host_stats->ipv6rx_packets = 1187 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); 1188 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); 1189 host_stats->ipv6rx_fragments = 1190 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); 1191 host_stats->ipv6_datagram_reassembly = 1192 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); 1193 host_stats->ipv6_invalid_address_error = 1194 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); 1195 host_stats->ipv6_error_packets = 1196 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); 1197 host_stats->ipv6_fragrx_overlap = 1198 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); 1199 host_stats->ipv6_fragrx_outoforder = 1200 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); 1201 host_stats->ipv6_datagram_reassembly_timeout = 1202 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); 1203 host_stats->tcptx_segments = 1204 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); 1205 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); 1206 host_stats->tcprx_segments = 1207 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); 1208 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); 1209 host_stats->tcp_duplicate_ack_retx = 1210 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); 1211 host_stats->tcp_retx_timer_expired = 1212 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); 1213 host_stats->tcprx_duplicate_ack = 1214 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); 1215 host_stats->tcprx_pure_ackr = 1216 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); 1217 host_stats->tcptx_delayed_ack = 1218 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); 1219 host_stats->tcptx_pure_ack = 1220 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); 1221 host_stats->tcprx_segment_error = 1222 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); 1223 host_stats->tcprx_segment_outoforder = 1224 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); 1225 host_stats->tcprx_window_probe = 1226 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); 1227 host_stats->tcprx_window_update = 1228 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); 1229 host_stats->tcptx_window_probe_persist = 1230 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); 1231 host_stats->ecc_error_correction = 1232 le64_to_cpu(ql_iscsi_stats->ecc_error_correction); 1233 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); 1234 host_stats->iscsi_data_bytes_tx = 1235 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); 1236 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); 1237 host_stats->iscsi_data_bytes_rx = 1238 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); 1239 host_stats->iscsi_io_completed = 1240 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); 1241 host_stats->iscsi_unexpected_io_rx = 1242 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); 1243 host_stats->iscsi_format_error = 1244 le64_to_cpu(ql_iscsi_stats->iscsi_format_error); 1245 host_stats->iscsi_hdr_digest_error = 1246 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); 1247 host_stats->iscsi_data_digest_error = 1248 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); 1249 host_stats->iscsi_sequence_error = 1250 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); 1251 exit_host_stats: 1252 if (ql_iscsi_stats) 1253 dma_free_coherent(&ha->pdev->dev, stats_size, 1254 ql_iscsi_stats, iscsi_stats_dma); 1255 1256 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", 1257 __func__); 1258 return ret; 1259 } 1260 1261 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 1262 enum iscsi_param_type param_type, 1263 int param, char *buf) 1264 { 1265 struct Scsi_Host *shost = iscsi_iface_to_shost(iface); 1266 struct scsi_qla_host *ha = to_qla_host(shost); 1267 int ival; 1268 char *pval = NULL; 1269 int len = -ENOSYS; 1270 1271 if (param_type == ISCSI_NET_PARAM) { 1272 switch (param) { 1273 case ISCSI_NET_PARAM_IPV4_ADDR: 1274 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1275 break; 1276 case ISCSI_NET_PARAM_IPV4_SUBNET: 1277 len = sprintf(buf, "%pI4\n", 1278 &ha->ip_config.subnet_mask); 1279 break; 1280 case ISCSI_NET_PARAM_IPV4_GW: 1281 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); 1282 break; 1283 case ISCSI_NET_PARAM_IFACE_ENABLE: 1284 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1285 OP_STATE(ha->ip_config.ipv4_options, 1286 IPOPT_IPV4_PROTOCOL_ENABLE, pval); 1287 } else { 1288 OP_STATE(ha->ip_config.ipv6_options, 1289 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); 1290 } 1291 1292 len = sprintf(buf, "%s\n", pval); 1293 break; 1294 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 1295 len = sprintf(buf, "%s\n", 1296 (ha->ip_config.tcp_options & 1297 TCPOPT_DHCP_ENABLE) ? 1298 "dhcp" : "static"); 1299 break; 1300 case ISCSI_NET_PARAM_IPV6_ADDR: 1301 if (iface->iface_num == 0) 1302 len = sprintf(buf, "%pI6\n", 1303 &ha->ip_config.ipv6_addr0); 1304 if (iface->iface_num == 1) 1305 len = sprintf(buf, "%pI6\n", 1306 &ha->ip_config.ipv6_addr1); 1307 break; 1308 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 1309 len = sprintf(buf, "%pI6\n", 1310 &ha->ip_config.ipv6_link_local_addr); 1311 break; 1312 case ISCSI_NET_PARAM_IPV6_ROUTER: 1313 len = sprintf(buf, "%pI6\n", 1314 &ha->ip_config.ipv6_default_router_addr); 1315 break; 1316 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 1317 pval = (ha->ip_config.ipv6_addl_options & 1318 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? 1319 "nd" : "static"; 1320 1321 len = sprintf(buf, "%s\n", pval); 1322 break; 1323 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 1324 pval = (ha->ip_config.ipv6_addl_options & 1325 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? 1326 "auto" : "static"; 1327 1328 len = sprintf(buf, "%s\n", pval); 1329 break; 1330 case ISCSI_NET_PARAM_VLAN_ID: 1331 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1332 ival = ha->ip_config.ipv4_vlan_tag & 1333 ISCSI_MAX_VLAN_ID; 1334 else 1335 ival = ha->ip_config.ipv6_vlan_tag & 1336 ISCSI_MAX_VLAN_ID; 1337 1338 len = sprintf(buf, "%d\n", ival); 1339 break; 1340 case ISCSI_NET_PARAM_VLAN_PRIORITY: 1341 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1342 ival = (ha->ip_config.ipv4_vlan_tag >> 13) & 1343 ISCSI_MAX_VLAN_PRIORITY; 1344 else 1345 ival = (ha->ip_config.ipv6_vlan_tag >> 13) & 1346 ISCSI_MAX_VLAN_PRIORITY; 1347 1348 len = sprintf(buf, "%d\n", ival); 1349 break; 1350 case ISCSI_NET_PARAM_VLAN_ENABLED: 1351 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1352 OP_STATE(ha->ip_config.ipv4_options, 1353 IPOPT_VLAN_TAGGING_ENABLE, pval); 1354 } else { 1355 OP_STATE(ha->ip_config.ipv6_options, 1356 IPV6_OPT_VLAN_TAGGING_ENABLE, pval); 1357 } 1358 len = sprintf(buf, "%s\n", pval); 1359 break; 1360 case ISCSI_NET_PARAM_MTU: 1361 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); 1362 break; 1363 case ISCSI_NET_PARAM_PORT: 1364 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1365 len = sprintf(buf, "%d\n", 1366 ha->ip_config.ipv4_port); 1367 else 1368 len = sprintf(buf, "%d\n", 1369 ha->ip_config.ipv6_port); 1370 break; 1371 case ISCSI_NET_PARAM_IPADDR_STATE: 1372 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1373 pval = iscsi_get_ipaddress_state_name( 1374 ha->ip_config.ipv4_addr_state); 1375 } else { 1376 if (iface->iface_num == 0) 1377 pval = iscsi_get_ipaddress_state_name( 1378 ha->ip_config.ipv6_addr0_state); 1379 else if (iface->iface_num == 1) 1380 pval = iscsi_get_ipaddress_state_name( 1381 ha->ip_config.ipv6_addr1_state); 1382 } 1383 1384 len = sprintf(buf, "%s\n", pval); 1385 break; 1386 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 1387 pval = iscsi_get_ipaddress_state_name( 1388 ha->ip_config.ipv6_link_local_state); 1389 len = sprintf(buf, "%s\n", pval); 1390 break; 1391 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 1392 pval = iscsi_get_router_state_name( 1393 ha->ip_config.ipv6_default_router_state); 1394 len = sprintf(buf, "%s\n", pval); 1395 break; 1396 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 1397 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1398 OP_STATE(~ha->ip_config.tcp_options, 1399 TCPOPT_DELAYED_ACK_DISABLE, pval); 1400 } else { 1401 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1402 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); 1403 } 1404 len = sprintf(buf, "%s\n", pval); 1405 break; 1406 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 1407 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1408 OP_STATE(~ha->ip_config.tcp_options, 1409 TCPOPT_NAGLE_ALGO_DISABLE, pval); 1410 } else { 1411 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1412 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); 1413 } 1414 len = sprintf(buf, "%s\n", pval); 1415 break; 1416 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 1417 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1418 OP_STATE(~ha->ip_config.tcp_options, 1419 TCPOPT_WINDOW_SCALE_DISABLE, pval); 1420 } else { 1421 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1422 IPV6_TCPOPT_WINDOW_SCALE_DISABLE, 1423 pval); 1424 } 1425 len = sprintf(buf, "%s\n", pval); 1426 break; 1427 case ISCSI_NET_PARAM_TCP_WSF: 1428 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1429 len = sprintf(buf, "%d\n", 1430 ha->ip_config.tcp_wsf); 1431 else 1432 len = sprintf(buf, "%d\n", 1433 ha->ip_config.ipv6_tcp_wsf); 1434 break; 1435 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 1436 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1437 ival = (ha->ip_config.tcp_options & 1438 TCPOPT_TIMER_SCALE) >> 1; 1439 else 1440 ival = (ha->ip_config.ipv6_tcp_options & 1441 IPV6_TCPOPT_TIMER_SCALE) >> 1; 1442 1443 len = sprintf(buf, "%d\n", ival); 1444 break; 1445 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 1446 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1447 OP_STATE(ha->ip_config.tcp_options, 1448 TCPOPT_TIMESTAMP_ENABLE, pval); 1449 } else { 1450 OP_STATE(ha->ip_config.ipv6_tcp_options, 1451 IPV6_TCPOPT_TIMESTAMP_EN, pval); 1452 } 1453 len = sprintf(buf, "%s\n", pval); 1454 break; 1455 case ISCSI_NET_PARAM_CACHE_ID: 1456 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1457 len = sprintf(buf, "%d\n", 1458 ha->ip_config.ipv4_cache_id); 1459 else 1460 len = sprintf(buf, "%d\n", 1461 ha->ip_config.ipv6_cache_id); 1462 break; 1463 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 1464 OP_STATE(ha->ip_config.tcp_options, 1465 TCPOPT_DNS_SERVER_IP_EN, pval); 1466 1467 len = sprintf(buf, "%s\n", pval); 1468 break; 1469 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 1470 OP_STATE(ha->ip_config.tcp_options, 1471 TCPOPT_SLP_DA_INFO_EN, pval); 1472 1473 len = sprintf(buf, "%s\n", pval); 1474 break; 1475 case ISCSI_NET_PARAM_IPV4_TOS_EN: 1476 OP_STATE(ha->ip_config.ipv4_options, 1477 IPOPT_IPV4_TOS_EN, pval); 1478 1479 len = sprintf(buf, "%s\n", pval); 1480 break; 1481 case ISCSI_NET_PARAM_IPV4_TOS: 1482 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); 1483 break; 1484 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 1485 OP_STATE(ha->ip_config.ipv4_options, 1486 IPOPT_GRAT_ARP_EN, pval); 1487 1488 len = sprintf(buf, "%s\n", pval); 1489 break; 1490 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 1491 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, 1492 pval); 1493 1494 len = sprintf(buf, "%s\n", pval); 1495 break; 1496 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 1497 pval = (ha->ip_config.ipv4_alt_cid_len) ? 1498 (char *)ha->ip_config.ipv4_alt_cid : ""; 1499 1500 len = sprintf(buf, "%s\n", pval); 1501 break; 1502 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 1503 OP_STATE(ha->ip_config.ipv4_options, 1504 IPOPT_REQ_VID_EN, pval); 1505 1506 len = sprintf(buf, "%s\n", pval); 1507 break; 1508 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 1509 OP_STATE(ha->ip_config.ipv4_options, 1510 IPOPT_USE_VID_EN, pval); 1511 1512 len = sprintf(buf, "%s\n", pval); 1513 break; 1514 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 1515 pval = (ha->ip_config.ipv4_vid_len) ? 1516 (char *)ha->ip_config.ipv4_vid : ""; 1517 1518 len = sprintf(buf, "%s\n", pval); 1519 break; 1520 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 1521 OP_STATE(ha->ip_config.ipv4_options, 1522 IPOPT_LEARN_IQN_EN, pval); 1523 1524 len = sprintf(buf, "%s\n", pval); 1525 break; 1526 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 1527 OP_STATE(~ha->ip_config.ipv4_options, 1528 IPOPT_FRAGMENTATION_DISABLE, pval); 1529 1530 len = sprintf(buf, "%s\n", pval); 1531 break; 1532 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 1533 OP_STATE(ha->ip_config.ipv4_options, 1534 IPOPT_IN_FORWARD_EN, pval); 1535 1536 len = sprintf(buf, "%s\n", pval); 1537 break; 1538 case ISCSI_NET_PARAM_REDIRECT_EN: 1539 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1540 OP_STATE(ha->ip_config.ipv4_options, 1541 IPOPT_ARP_REDIRECT_EN, pval); 1542 } else { 1543 OP_STATE(ha->ip_config.ipv6_options, 1544 IPV6_OPT_REDIRECT_EN, pval); 1545 } 1546 len = sprintf(buf, "%s\n", pval); 1547 break; 1548 case ISCSI_NET_PARAM_IPV4_TTL: 1549 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); 1550 break; 1551 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 1552 OP_STATE(ha->ip_config.ipv6_options, 1553 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); 1554 1555 len = sprintf(buf, "%s\n", pval); 1556 break; 1557 case ISCSI_NET_PARAM_IPV6_MLD_EN: 1558 OP_STATE(ha->ip_config.ipv6_addl_options, 1559 IPV6_ADDOPT_MLD_EN, pval); 1560 1561 len = sprintf(buf, "%s\n", pval); 1562 break; 1563 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 1564 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); 1565 break; 1566 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 1567 len = sprintf(buf, "%d\n", 1568 ha->ip_config.ipv6_traffic_class); 1569 break; 1570 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 1571 len = sprintf(buf, "%d\n", 1572 ha->ip_config.ipv6_hop_limit); 1573 break; 1574 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 1575 len = sprintf(buf, "%d\n", 1576 ha->ip_config.ipv6_nd_reach_time); 1577 break; 1578 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 1579 len = sprintf(buf, "%d\n", 1580 ha->ip_config.ipv6_nd_rexmit_timer); 1581 break; 1582 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 1583 len = sprintf(buf, "%d\n", 1584 ha->ip_config.ipv6_nd_stale_timeout); 1585 break; 1586 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 1587 len = sprintf(buf, "%d\n", 1588 ha->ip_config.ipv6_dup_addr_detect_count); 1589 break; 1590 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 1591 len = sprintf(buf, "%d\n", 1592 ha->ip_config.ipv6_gw_advrt_mtu); 1593 break; 1594 default: 1595 len = -ENOSYS; 1596 } 1597 } else if (param_type == ISCSI_IFACE_PARAM) { 1598 switch (param) { 1599 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 1600 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); 1601 break; 1602 case ISCSI_IFACE_PARAM_HDRDGST_EN: 1603 OP_STATE(ha->ip_config.iscsi_options, 1604 ISCSIOPTS_HEADER_DIGEST_EN, pval); 1605 1606 len = sprintf(buf, "%s\n", pval); 1607 break; 1608 case ISCSI_IFACE_PARAM_DATADGST_EN: 1609 OP_STATE(ha->ip_config.iscsi_options, 1610 ISCSIOPTS_DATA_DIGEST_EN, pval); 1611 1612 len = sprintf(buf, "%s\n", pval); 1613 break; 1614 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 1615 OP_STATE(ha->ip_config.iscsi_options, 1616 ISCSIOPTS_IMMEDIATE_DATA_EN, pval); 1617 1618 len = sprintf(buf, "%s\n", pval); 1619 break; 1620 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 1621 OP_STATE(ha->ip_config.iscsi_options, 1622 ISCSIOPTS_INITIAL_R2T_EN, pval); 1623 1624 len = sprintf(buf, "%s\n", pval); 1625 break; 1626 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 1627 OP_STATE(ha->ip_config.iscsi_options, 1628 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); 1629 1630 len = sprintf(buf, "%s\n", pval); 1631 break; 1632 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 1633 OP_STATE(ha->ip_config.iscsi_options, 1634 ISCSIOPTS_DATA_PDU_INORDER_EN, pval); 1635 1636 len = sprintf(buf, "%s\n", pval); 1637 break; 1638 case ISCSI_IFACE_PARAM_ERL: 1639 len = sprintf(buf, "%d\n", 1640 (ha->ip_config.iscsi_options & 1641 ISCSIOPTS_ERL)); 1642 break; 1643 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 1644 len = sprintf(buf, "%u\n", 1645 ha->ip_config.iscsi_max_pdu_size * 1646 BYTE_UNITS); 1647 break; 1648 case ISCSI_IFACE_PARAM_FIRST_BURST: 1649 len = sprintf(buf, "%u\n", 1650 ha->ip_config.iscsi_first_burst_len * 1651 BYTE_UNITS); 1652 break; 1653 case ISCSI_IFACE_PARAM_MAX_R2T: 1654 len = sprintf(buf, "%d\n", 1655 ha->ip_config.iscsi_max_outstnd_r2t); 1656 break; 1657 case ISCSI_IFACE_PARAM_MAX_BURST: 1658 len = sprintf(buf, "%u\n", 1659 ha->ip_config.iscsi_max_burst_len * 1660 BYTE_UNITS); 1661 break; 1662 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 1663 OP_STATE(ha->ip_config.iscsi_options, 1664 ISCSIOPTS_CHAP_AUTH_EN, pval); 1665 1666 len = sprintf(buf, "%s\n", pval); 1667 break; 1668 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 1669 OP_STATE(ha->ip_config.iscsi_options, 1670 ISCSIOPTS_BIDI_CHAP_EN, pval); 1671 1672 len = sprintf(buf, "%s\n", pval); 1673 break; 1674 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 1675 OP_STATE(ha->ip_config.iscsi_options, 1676 ISCSIOPTS_DISCOVERY_AUTH_EN, pval); 1677 1678 len = sprintf(buf, "%s\n", pval); 1679 break; 1680 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 1681 OP_STATE(ha->ip_config.iscsi_options, 1682 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); 1683 1684 len = sprintf(buf, "%s\n", pval); 1685 break; 1686 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 1687 OP_STATE(ha->ip_config.iscsi_options, 1688 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); 1689 1690 len = sprintf(buf, "%s\n", pval); 1691 break; 1692 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 1693 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); 1694 break; 1695 default: 1696 len = -ENOSYS; 1697 } 1698 } 1699 1700 return len; 1701 } 1702 1703 static struct iscsi_endpoint * 1704 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, 1705 int non_blocking) 1706 { 1707 int ret; 1708 struct iscsi_endpoint *ep; 1709 struct qla_endpoint *qla_ep; 1710 struct scsi_qla_host *ha; 1711 struct sockaddr_in *addr; 1712 struct sockaddr_in6 *addr6; 1713 1714 if (!shost) { 1715 ret = -ENXIO; 1716 pr_err("%s: shost is NULL\n", __func__); 1717 return ERR_PTR(ret); 1718 } 1719 1720 ha = iscsi_host_priv(shost); 1721 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); 1722 if (!ep) { 1723 ret = -ENOMEM; 1724 return ERR_PTR(ret); 1725 } 1726 1727 qla_ep = ep->dd_data; 1728 memset(qla_ep, 0, sizeof(struct qla_endpoint)); 1729 if (dst_addr->sa_family == AF_INET) { 1730 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); 1731 addr = (struct sockaddr_in *)&qla_ep->dst_addr; 1732 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, 1733 (char *)&addr->sin_addr)); 1734 } else if (dst_addr->sa_family == AF_INET6) { 1735 memcpy(&qla_ep->dst_addr, dst_addr, 1736 sizeof(struct sockaddr_in6)); 1737 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; 1738 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, 1739 (char *)&addr6->sin6_addr)); 1740 } else { 1741 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", 1742 __func__); 1743 } 1744 1745 qla_ep->host = shost; 1746 1747 return ep; 1748 } 1749 1750 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1751 { 1752 struct qla_endpoint *qla_ep; 1753 struct scsi_qla_host *ha; 1754 int ret = 0; 1755 1756 qla_ep = ep->dd_data; 1757 ha = to_qla_host(qla_ep->host); 1758 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); 1759 1760 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) 1761 ret = 1; 1762 1763 return ret; 1764 } 1765 1766 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) 1767 { 1768 struct qla_endpoint *qla_ep; 1769 struct scsi_qla_host *ha; 1770 1771 qla_ep = ep->dd_data; 1772 ha = to_qla_host(qla_ep->host); 1773 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1774 ha->host_no)); 1775 iscsi_destroy_endpoint(ep); 1776 } 1777 1778 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 1779 enum iscsi_param param, 1780 char *buf) 1781 { 1782 struct qla_endpoint *qla_ep = ep->dd_data; 1783 struct sockaddr *dst_addr; 1784 struct scsi_qla_host *ha; 1785 1786 if (!qla_ep) 1787 return -ENOTCONN; 1788 1789 ha = to_qla_host(qla_ep->host); 1790 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1791 ha->host_no)); 1792 1793 switch (param) { 1794 case ISCSI_PARAM_CONN_PORT: 1795 case ISCSI_PARAM_CONN_ADDRESS: 1796 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1797 if (!dst_addr) 1798 return -ENOTCONN; 1799 1800 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 1801 &qla_ep->dst_addr, param, buf); 1802 default: 1803 return -ENOSYS; 1804 } 1805 } 1806 1807 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1808 struct iscsi_stats *stats) 1809 { 1810 struct iscsi_session *sess; 1811 struct iscsi_cls_session *cls_sess; 1812 struct ddb_entry *ddb_entry; 1813 struct scsi_qla_host *ha; 1814 struct ql_iscsi_stats *ql_iscsi_stats; 1815 int stats_size; 1816 int ret; 1817 dma_addr_t iscsi_stats_dma; 1818 1819 cls_sess = iscsi_conn_to_session(cls_conn); 1820 sess = cls_sess->dd_data; 1821 ddb_entry = sess->dd_data; 1822 ha = ddb_entry->ha; 1823 1824 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1825 ha->host_no)); 1826 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1827 /* Allocate memory */ 1828 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1829 &iscsi_stats_dma, GFP_KERNEL); 1830 if (!ql_iscsi_stats) { 1831 ql4_printk(KERN_ERR, ha, 1832 "Unable to allocate memory for iscsi stats\n"); 1833 goto exit_get_stats; 1834 } 1835 1836 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, 1837 iscsi_stats_dma); 1838 if (ret != QLA_SUCCESS) { 1839 ql4_printk(KERN_ERR, ha, 1840 "Unable to retrieve iscsi stats\n"); 1841 goto free_stats; 1842 } 1843 1844 /* octets */ 1845 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); 1846 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); 1847 /* xmit pdus */ 1848 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); 1849 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); 1850 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); 1851 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); 1852 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); 1853 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); 1854 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); 1855 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); 1856 /* recv pdus */ 1857 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); 1858 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); 1859 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); 1860 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); 1861 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); 1862 stats->logoutrsp_pdus = 1863 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); 1864 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); 1865 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); 1866 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); 1867 1868 free_stats: 1869 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, 1870 iscsi_stats_dma); 1871 exit_get_stats: 1872 return; 1873 } 1874 1875 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) 1876 { 1877 struct iscsi_cls_session *session; 1878 unsigned long flags; 1879 enum blk_eh_timer_return ret = BLK_EH_DONE; 1880 1881 session = starget_to_session(scsi_target(sc->device)); 1882 1883 spin_lock_irqsave(&session->lock, flags); 1884 if (session->state == ISCSI_SESSION_FAILED) 1885 ret = BLK_EH_RESET_TIMER; 1886 spin_unlock_irqrestore(&session->lock, flags); 1887 1888 return ret; 1889 } 1890 1891 static void qla4xxx_set_port_speed(struct Scsi_Host *shost) 1892 { 1893 struct scsi_qla_host *ha = to_qla_host(shost); 1894 struct iscsi_cls_host *ihost = shost->shost_data; 1895 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; 1896 1897 qla4xxx_get_firmware_state(ha); 1898 1899 switch (ha->addl_fw_state & 0x0F00) { 1900 case FW_ADDSTATE_LINK_SPEED_10MBPS: 1901 speed = ISCSI_PORT_SPEED_10MBPS; 1902 break; 1903 case FW_ADDSTATE_LINK_SPEED_100MBPS: 1904 speed = ISCSI_PORT_SPEED_100MBPS; 1905 break; 1906 case FW_ADDSTATE_LINK_SPEED_1GBPS: 1907 speed = ISCSI_PORT_SPEED_1GBPS; 1908 break; 1909 case FW_ADDSTATE_LINK_SPEED_10GBPS: 1910 speed = ISCSI_PORT_SPEED_10GBPS; 1911 break; 1912 } 1913 ihost->port_speed = speed; 1914 } 1915 1916 static void qla4xxx_set_port_state(struct Scsi_Host *shost) 1917 { 1918 struct scsi_qla_host *ha = to_qla_host(shost); 1919 struct iscsi_cls_host *ihost = shost->shost_data; 1920 uint32_t state = ISCSI_PORT_STATE_DOWN; 1921 1922 if (test_bit(AF_LINK_UP, &ha->flags)) 1923 state = ISCSI_PORT_STATE_UP; 1924 1925 ihost->port_state = state; 1926 } 1927 1928 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 1929 enum iscsi_host_param param, char *buf) 1930 { 1931 struct scsi_qla_host *ha = to_qla_host(shost); 1932 int len; 1933 1934 switch (param) { 1935 case ISCSI_HOST_PARAM_HWADDRESS: 1936 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); 1937 break; 1938 case ISCSI_HOST_PARAM_IPADDRESS: 1939 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1940 break; 1941 case ISCSI_HOST_PARAM_INITIATOR_NAME: 1942 len = sprintf(buf, "%s\n", ha->name_string); 1943 break; 1944 case ISCSI_HOST_PARAM_PORT_STATE: 1945 qla4xxx_set_port_state(shost); 1946 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); 1947 break; 1948 case ISCSI_HOST_PARAM_PORT_SPEED: 1949 qla4xxx_set_port_speed(shost); 1950 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); 1951 break; 1952 default: 1953 return -ENOSYS; 1954 } 1955 1956 return len; 1957 } 1958 1959 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) 1960 { 1961 if (ha->iface_ipv4) 1962 return; 1963 1964 /* IPv4 */ 1965 ha->iface_ipv4 = iscsi_create_iface(ha->host, 1966 &qla4xxx_iscsi_transport, 1967 ISCSI_IFACE_TYPE_IPV4, 0, 0); 1968 if (!ha->iface_ipv4) 1969 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " 1970 "iface0.\n"); 1971 } 1972 1973 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) 1974 { 1975 if (!ha->iface_ipv6_0) 1976 /* IPv6 iface-0 */ 1977 ha->iface_ipv6_0 = iscsi_create_iface(ha->host, 1978 &qla4xxx_iscsi_transport, 1979 ISCSI_IFACE_TYPE_IPV6, 0, 1980 0); 1981 if (!ha->iface_ipv6_0) 1982 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1983 "iface0.\n"); 1984 1985 if (!ha->iface_ipv6_1) 1986 /* IPv6 iface-1 */ 1987 ha->iface_ipv6_1 = iscsi_create_iface(ha->host, 1988 &qla4xxx_iscsi_transport, 1989 ISCSI_IFACE_TYPE_IPV6, 1, 1990 0); 1991 if (!ha->iface_ipv6_1) 1992 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1993 "iface1.\n"); 1994 } 1995 1996 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) 1997 { 1998 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) 1999 qla4xxx_create_ipv4_iface(ha); 2000 2001 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) 2002 qla4xxx_create_ipv6_iface(ha); 2003 } 2004 2005 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) 2006 { 2007 if (ha->iface_ipv4) { 2008 iscsi_destroy_iface(ha->iface_ipv4); 2009 ha->iface_ipv4 = NULL; 2010 } 2011 } 2012 2013 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) 2014 { 2015 if (ha->iface_ipv6_0) { 2016 iscsi_destroy_iface(ha->iface_ipv6_0); 2017 ha->iface_ipv6_0 = NULL; 2018 } 2019 if (ha->iface_ipv6_1) { 2020 iscsi_destroy_iface(ha->iface_ipv6_1); 2021 ha->iface_ipv6_1 = NULL; 2022 } 2023 } 2024 2025 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) 2026 { 2027 qla4xxx_destroy_ipv4_iface(ha); 2028 qla4xxx_destroy_ipv6_iface(ha); 2029 } 2030 2031 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, 2032 struct iscsi_iface_param_info *iface_param, 2033 struct addr_ctrl_blk *init_fw_cb) 2034 { 2035 /* 2036 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. 2037 * iface_num 1 is valid only for IPv6 Addr. 2038 */ 2039 switch (iface_param->param) { 2040 case ISCSI_NET_PARAM_IPV6_ADDR: 2041 if (iface_param->iface_num & 0x1) 2042 /* IPv6 Addr 1 */ 2043 memcpy(init_fw_cb->ipv6_addr1, iface_param->value, 2044 sizeof(init_fw_cb->ipv6_addr1)); 2045 else 2046 /* IPv6 Addr 0 */ 2047 memcpy(init_fw_cb->ipv6_addr0, iface_param->value, 2048 sizeof(init_fw_cb->ipv6_addr0)); 2049 break; 2050 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 2051 if (iface_param->iface_num & 0x1) 2052 break; 2053 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], 2054 sizeof(init_fw_cb->ipv6_if_id)); 2055 break; 2056 case ISCSI_NET_PARAM_IPV6_ROUTER: 2057 if (iface_param->iface_num & 0x1) 2058 break; 2059 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, 2060 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2061 break; 2062 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 2063 /* Autocfg applies to even interface */ 2064 if (iface_param->iface_num & 0x1) 2065 break; 2066 2067 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) 2068 init_fw_cb->ipv6_addtl_opts &= 2069 cpu_to_le16( 2070 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2071 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) 2072 init_fw_cb->ipv6_addtl_opts |= 2073 cpu_to_le16( 2074 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2075 else 2076 ql4_printk(KERN_ERR, ha, 2077 "Invalid autocfg setting for IPv6 addr\n"); 2078 break; 2079 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 2080 /* Autocfg applies to even interface */ 2081 if (iface_param->iface_num & 0x1) 2082 break; 2083 2084 if (iface_param->value[0] == 2085 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) 2086 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( 2087 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2088 else if (iface_param->value[0] == 2089 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) 2090 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( 2091 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2092 else 2093 ql4_printk(KERN_ERR, ha, 2094 "Invalid autocfg setting for IPv6 linklocal addr\n"); 2095 break; 2096 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: 2097 /* Autocfg applies to even interface */ 2098 if (iface_param->iface_num & 0x1) 2099 break; 2100 2101 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) 2102 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, 2103 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2104 break; 2105 case ISCSI_NET_PARAM_IFACE_ENABLE: 2106 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2107 init_fw_cb->ipv6_opts |= 2108 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); 2109 qla4xxx_create_ipv6_iface(ha); 2110 } else { 2111 init_fw_cb->ipv6_opts &= 2112 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 2113 0xFFFF); 2114 qla4xxx_destroy_ipv6_iface(ha); 2115 } 2116 break; 2117 case ISCSI_NET_PARAM_VLAN_TAG: 2118 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) 2119 break; 2120 init_fw_cb->ipv6_vlan_tag = 2121 cpu_to_be16(*(uint16_t *)iface_param->value); 2122 break; 2123 case ISCSI_NET_PARAM_VLAN_ENABLED: 2124 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2125 init_fw_cb->ipv6_opts |= 2126 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); 2127 else 2128 init_fw_cb->ipv6_opts &= 2129 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); 2130 break; 2131 case ISCSI_NET_PARAM_MTU: 2132 init_fw_cb->eth_mtu_size = 2133 cpu_to_le16(*(uint16_t *)iface_param->value); 2134 break; 2135 case ISCSI_NET_PARAM_PORT: 2136 /* Autocfg applies to even interface */ 2137 if (iface_param->iface_num & 0x1) 2138 break; 2139 2140 init_fw_cb->ipv6_port = 2141 cpu_to_le16(*(uint16_t *)iface_param->value); 2142 break; 2143 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2144 if (iface_param->iface_num & 0x1) 2145 break; 2146 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2147 init_fw_cb->ipv6_tcp_opts |= 2148 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2149 else 2150 init_fw_cb->ipv6_tcp_opts &= 2151 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & 2152 0xFFFF); 2153 break; 2154 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2155 if (iface_param->iface_num & 0x1) 2156 break; 2157 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2158 init_fw_cb->ipv6_tcp_opts |= 2159 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2160 else 2161 init_fw_cb->ipv6_tcp_opts &= 2162 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2163 break; 2164 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2165 if (iface_param->iface_num & 0x1) 2166 break; 2167 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2168 init_fw_cb->ipv6_tcp_opts |= 2169 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2170 else 2171 init_fw_cb->ipv6_tcp_opts &= 2172 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2173 break; 2174 case ISCSI_NET_PARAM_TCP_WSF: 2175 if (iface_param->iface_num & 0x1) 2176 break; 2177 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; 2178 break; 2179 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2180 if (iface_param->iface_num & 0x1) 2181 break; 2182 init_fw_cb->ipv6_tcp_opts &= 2183 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); 2184 init_fw_cb->ipv6_tcp_opts |= 2185 cpu_to_le16((iface_param->value[0] << 1) & 2186 IPV6_TCPOPT_TIMER_SCALE); 2187 break; 2188 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2189 if (iface_param->iface_num & 0x1) 2190 break; 2191 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2192 init_fw_cb->ipv6_tcp_opts |= 2193 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); 2194 else 2195 init_fw_cb->ipv6_tcp_opts &= 2196 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); 2197 break; 2198 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 2199 if (iface_param->iface_num & 0x1) 2200 break; 2201 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2202 init_fw_cb->ipv6_opts |= 2203 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2204 else 2205 init_fw_cb->ipv6_opts &= 2206 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2207 break; 2208 case ISCSI_NET_PARAM_REDIRECT_EN: 2209 if (iface_param->iface_num & 0x1) 2210 break; 2211 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2212 init_fw_cb->ipv6_opts |= 2213 cpu_to_le16(IPV6_OPT_REDIRECT_EN); 2214 else 2215 init_fw_cb->ipv6_opts &= 2216 cpu_to_le16(~IPV6_OPT_REDIRECT_EN); 2217 break; 2218 case ISCSI_NET_PARAM_IPV6_MLD_EN: 2219 if (iface_param->iface_num & 0x1) 2220 break; 2221 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2222 init_fw_cb->ipv6_addtl_opts |= 2223 cpu_to_le16(IPV6_ADDOPT_MLD_EN); 2224 else 2225 init_fw_cb->ipv6_addtl_opts &= 2226 cpu_to_le16(~IPV6_ADDOPT_MLD_EN); 2227 break; 2228 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 2229 if (iface_param->iface_num & 0x1) 2230 break; 2231 init_fw_cb->ipv6_flow_lbl = 2232 cpu_to_le16(*(uint16_t *)iface_param->value); 2233 break; 2234 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 2235 if (iface_param->iface_num & 0x1) 2236 break; 2237 init_fw_cb->ipv6_traffic_class = iface_param->value[0]; 2238 break; 2239 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 2240 if (iface_param->iface_num & 0x1) 2241 break; 2242 init_fw_cb->ipv6_hop_limit = iface_param->value[0]; 2243 break; 2244 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 2245 if (iface_param->iface_num & 0x1) 2246 break; 2247 init_fw_cb->ipv6_nd_reach_time = 2248 cpu_to_le32(*(uint32_t *)iface_param->value); 2249 break; 2250 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 2251 if (iface_param->iface_num & 0x1) 2252 break; 2253 init_fw_cb->ipv6_nd_rexmit_timer = 2254 cpu_to_le32(*(uint32_t *)iface_param->value); 2255 break; 2256 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 2257 if (iface_param->iface_num & 0x1) 2258 break; 2259 init_fw_cb->ipv6_nd_stale_timeout = 2260 cpu_to_le32(*(uint32_t *)iface_param->value); 2261 break; 2262 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 2263 if (iface_param->iface_num & 0x1) 2264 break; 2265 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; 2266 break; 2267 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 2268 if (iface_param->iface_num & 0x1) 2269 break; 2270 init_fw_cb->ipv6_gw_advrt_mtu = 2271 cpu_to_le32(*(uint32_t *)iface_param->value); 2272 break; 2273 default: 2274 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", 2275 iface_param->param); 2276 break; 2277 } 2278 } 2279 2280 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, 2281 struct iscsi_iface_param_info *iface_param, 2282 struct addr_ctrl_blk *init_fw_cb) 2283 { 2284 switch (iface_param->param) { 2285 case ISCSI_NET_PARAM_IPV4_ADDR: 2286 memcpy(init_fw_cb->ipv4_addr, iface_param->value, 2287 sizeof(init_fw_cb->ipv4_addr)); 2288 break; 2289 case ISCSI_NET_PARAM_IPV4_SUBNET: 2290 memcpy(init_fw_cb->ipv4_subnet, iface_param->value, 2291 sizeof(init_fw_cb->ipv4_subnet)); 2292 break; 2293 case ISCSI_NET_PARAM_IPV4_GW: 2294 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, 2295 sizeof(init_fw_cb->ipv4_gw_addr)); 2296 break; 2297 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 2298 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) 2299 init_fw_cb->ipv4_tcp_opts |= 2300 cpu_to_le16(TCPOPT_DHCP_ENABLE); 2301 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) 2302 init_fw_cb->ipv4_tcp_opts &= 2303 cpu_to_le16(~TCPOPT_DHCP_ENABLE); 2304 else 2305 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); 2306 break; 2307 case ISCSI_NET_PARAM_IFACE_ENABLE: 2308 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2309 init_fw_cb->ipv4_ip_opts |= 2310 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); 2311 qla4xxx_create_ipv4_iface(ha); 2312 } else { 2313 init_fw_cb->ipv4_ip_opts &= 2314 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 2315 0xFFFF); 2316 qla4xxx_destroy_ipv4_iface(ha); 2317 } 2318 break; 2319 case ISCSI_NET_PARAM_VLAN_TAG: 2320 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) 2321 break; 2322 init_fw_cb->ipv4_vlan_tag = 2323 cpu_to_be16(*(uint16_t *)iface_param->value); 2324 break; 2325 case ISCSI_NET_PARAM_VLAN_ENABLED: 2326 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2327 init_fw_cb->ipv4_ip_opts |= 2328 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); 2329 else 2330 init_fw_cb->ipv4_ip_opts &= 2331 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); 2332 break; 2333 case ISCSI_NET_PARAM_MTU: 2334 init_fw_cb->eth_mtu_size = 2335 cpu_to_le16(*(uint16_t *)iface_param->value); 2336 break; 2337 case ISCSI_NET_PARAM_PORT: 2338 init_fw_cb->ipv4_port = 2339 cpu_to_le16(*(uint16_t *)iface_param->value); 2340 break; 2341 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2342 if (iface_param->iface_num & 0x1) 2343 break; 2344 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2345 init_fw_cb->ipv4_tcp_opts |= 2346 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); 2347 else 2348 init_fw_cb->ipv4_tcp_opts &= 2349 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & 2350 0xFFFF); 2351 break; 2352 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2353 if (iface_param->iface_num & 0x1) 2354 break; 2355 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2356 init_fw_cb->ipv4_tcp_opts |= 2357 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); 2358 else 2359 init_fw_cb->ipv4_tcp_opts &= 2360 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); 2361 break; 2362 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2363 if (iface_param->iface_num & 0x1) 2364 break; 2365 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2366 init_fw_cb->ipv4_tcp_opts |= 2367 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); 2368 else 2369 init_fw_cb->ipv4_tcp_opts &= 2370 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); 2371 break; 2372 case ISCSI_NET_PARAM_TCP_WSF: 2373 if (iface_param->iface_num & 0x1) 2374 break; 2375 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; 2376 break; 2377 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2378 if (iface_param->iface_num & 0x1) 2379 break; 2380 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); 2381 init_fw_cb->ipv4_tcp_opts |= 2382 cpu_to_le16((iface_param->value[0] << 1) & 2383 TCPOPT_TIMER_SCALE); 2384 break; 2385 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2386 if (iface_param->iface_num & 0x1) 2387 break; 2388 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2389 init_fw_cb->ipv4_tcp_opts |= 2390 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); 2391 else 2392 init_fw_cb->ipv4_tcp_opts &= 2393 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); 2394 break; 2395 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 2396 if (iface_param->iface_num & 0x1) 2397 break; 2398 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2399 init_fw_cb->ipv4_tcp_opts |= 2400 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); 2401 else 2402 init_fw_cb->ipv4_tcp_opts &= 2403 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); 2404 break; 2405 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 2406 if (iface_param->iface_num & 0x1) 2407 break; 2408 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2409 init_fw_cb->ipv4_tcp_opts |= 2410 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); 2411 else 2412 init_fw_cb->ipv4_tcp_opts &= 2413 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); 2414 break; 2415 case ISCSI_NET_PARAM_IPV4_TOS_EN: 2416 if (iface_param->iface_num & 0x1) 2417 break; 2418 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2419 init_fw_cb->ipv4_ip_opts |= 2420 cpu_to_le16(IPOPT_IPV4_TOS_EN); 2421 else 2422 init_fw_cb->ipv4_ip_opts &= 2423 cpu_to_le16(~IPOPT_IPV4_TOS_EN); 2424 break; 2425 case ISCSI_NET_PARAM_IPV4_TOS: 2426 if (iface_param->iface_num & 0x1) 2427 break; 2428 init_fw_cb->ipv4_tos = iface_param->value[0]; 2429 break; 2430 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 2431 if (iface_param->iface_num & 0x1) 2432 break; 2433 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2434 init_fw_cb->ipv4_ip_opts |= 2435 cpu_to_le16(IPOPT_GRAT_ARP_EN); 2436 else 2437 init_fw_cb->ipv4_ip_opts &= 2438 cpu_to_le16(~IPOPT_GRAT_ARP_EN); 2439 break; 2440 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 2441 if (iface_param->iface_num & 0x1) 2442 break; 2443 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2444 init_fw_cb->ipv4_ip_opts |= 2445 cpu_to_le16(IPOPT_ALT_CID_EN); 2446 else 2447 init_fw_cb->ipv4_ip_opts &= 2448 cpu_to_le16(~IPOPT_ALT_CID_EN); 2449 break; 2450 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 2451 if (iface_param->iface_num & 0x1) 2452 break; 2453 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, 2454 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); 2455 init_fw_cb->ipv4_dhcp_alt_cid_len = 2456 strlen(init_fw_cb->ipv4_dhcp_alt_cid); 2457 break; 2458 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 2459 if (iface_param->iface_num & 0x1) 2460 break; 2461 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2462 init_fw_cb->ipv4_ip_opts |= 2463 cpu_to_le16(IPOPT_REQ_VID_EN); 2464 else 2465 init_fw_cb->ipv4_ip_opts &= 2466 cpu_to_le16(~IPOPT_REQ_VID_EN); 2467 break; 2468 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 2469 if (iface_param->iface_num & 0x1) 2470 break; 2471 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2472 init_fw_cb->ipv4_ip_opts |= 2473 cpu_to_le16(IPOPT_USE_VID_EN); 2474 else 2475 init_fw_cb->ipv4_ip_opts &= 2476 cpu_to_le16(~IPOPT_USE_VID_EN); 2477 break; 2478 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 2479 if (iface_param->iface_num & 0x1) 2480 break; 2481 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, 2482 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); 2483 init_fw_cb->ipv4_dhcp_vid_len = 2484 strlen(init_fw_cb->ipv4_dhcp_vid); 2485 break; 2486 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 2487 if (iface_param->iface_num & 0x1) 2488 break; 2489 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2490 init_fw_cb->ipv4_ip_opts |= 2491 cpu_to_le16(IPOPT_LEARN_IQN_EN); 2492 else 2493 init_fw_cb->ipv4_ip_opts &= 2494 cpu_to_le16(~IPOPT_LEARN_IQN_EN); 2495 break; 2496 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 2497 if (iface_param->iface_num & 0x1) 2498 break; 2499 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2500 init_fw_cb->ipv4_ip_opts |= 2501 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); 2502 else 2503 init_fw_cb->ipv4_ip_opts &= 2504 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); 2505 break; 2506 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 2507 if (iface_param->iface_num & 0x1) 2508 break; 2509 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2510 init_fw_cb->ipv4_ip_opts |= 2511 cpu_to_le16(IPOPT_IN_FORWARD_EN); 2512 else 2513 init_fw_cb->ipv4_ip_opts &= 2514 cpu_to_le16(~IPOPT_IN_FORWARD_EN); 2515 break; 2516 case ISCSI_NET_PARAM_REDIRECT_EN: 2517 if (iface_param->iface_num & 0x1) 2518 break; 2519 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2520 init_fw_cb->ipv4_ip_opts |= 2521 cpu_to_le16(IPOPT_ARP_REDIRECT_EN); 2522 else 2523 init_fw_cb->ipv4_ip_opts &= 2524 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); 2525 break; 2526 case ISCSI_NET_PARAM_IPV4_TTL: 2527 if (iface_param->iface_num & 0x1) 2528 break; 2529 init_fw_cb->ipv4_ttl = iface_param->value[0]; 2530 break; 2531 default: 2532 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", 2533 iface_param->param); 2534 break; 2535 } 2536 } 2537 2538 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, 2539 struct iscsi_iface_param_info *iface_param, 2540 struct addr_ctrl_blk *init_fw_cb) 2541 { 2542 switch (iface_param->param) { 2543 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 2544 if (iface_param->iface_num & 0x1) 2545 break; 2546 init_fw_cb->def_timeout = 2547 cpu_to_le16(*(uint16_t *)iface_param->value); 2548 break; 2549 case ISCSI_IFACE_PARAM_HDRDGST_EN: 2550 if (iface_param->iface_num & 0x1) 2551 break; 2552 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2553 init_fw_cb->iscsi_opts |= 2554 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); 2555 else 2556 init_fw_cb->iscsi_opts &= 2557 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); 2558 break; 2559 case ISCSI_IFACE_PARAM_DATADGST_EN: 2560 if (iface_param->iface_num & 0x1) 2561 break; 2562 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2563 init_fw_cb->iscsi_opts |= 2564 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); 2565 else 2566 init_fw_cb->iscsi_opts &= 2567 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); 2568 break; 2569 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 2570 if (iface_param->iface_num & 0x1) 2571 break; 2572 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2573 init_fw_cb->iscsi_opts |= 2574 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); 2575 else 2576 init_fw_cb->iscsi_opts &= 2577 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); 2578 break; 2579 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 2580 if (iface_param->iface_num & 0x1) 2581 break; 2582 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2583 init_fw_cb->iscsi_opts |= 2584 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); 2585 else 2586 init_fw_cb->iscsi_opts &= 2587 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); 2588 break; 2589 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 2590 if (iface_param->iface_num & 0x1) 2591 break; 2592 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2593 init_fw_cb->iscsi_opts |= 2594 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); 2595 else 2596 init_fw_cb->iscsi_opts &= 2597 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); 2598 break; 2599 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 2600 if (iface_param->iface_num & 0x1) 2601 break; 2602 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2603 init_fw_cb->iscsi_opts |= 2604 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); 2605 else 2606 init_fw_cb->iscsi_opts &= 2607 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); 2608 break; 2609 case ISCSI_IFACE_PARAM_ERL: 2610 if (iface_param->iface_num & 0x1) 2611 break; 2612 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); 2613 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & 2614 ISCSIOPTS_ERL); 2615 break; 2616 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 2617 if (iface_param->iface_num & 0x1) 2618 break; 2619 init_fw_cb->iscsi_max_pdu_size = 2620 cpu_to_le32(*(uint32_t *)iface_param->value) / 2621 BYTE_UNITS; 2622 break; 2623 case ISCSI_IFACE_PARAM_FIRST_BURST: 2624 if (iface_param->iface_num & 0x1) 2625 break; 2626 init_fw_cb->iscsi_fburst_len = 2627 cpu_to_le32(*(uint32_t *)iface_param->value) / 2628 BYTE_UNITS; 2629 break; 2630 case ISCSI_IFACE_PARAM_MAX_R2T: 2631 if (iface_param->iface_num & 0x1) 2632 break; 2633 init_fw_cb->iscsi_max_outstnd_r2t = 2634 cpu_to_le16(*(uint16_t *)iface_param->value); 2635 break; 2636 case ISCSI_IFACE_PARAM_MAX_BURST: 2637 if (iface_param->iface_num & 0x1) 2638 break; 2639 init_fw_cb->iscsi_max_burst_len = 2640 cpu_to_le32(*(uint32_t *)iface_param->value) / 2641 BYTE_UNITS; 2642 break; 2643 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 2644 if (iface_param->iface_num & 0x1) 2645 break; 2646 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2647 init_fw_cb->iscsi_opts |= 2648 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); 2649 else 2650 init_fw_cb->iscsi_opts &= 2651 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); 2652 break; 2653 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 2654 if (iface_param->iface_num & 0x1) 2655 break; 2656 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2657 init_fw_cb->iscsi_opts |= 2658 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); 2659 else 2660 init_fw_cb->iscsi_opts &= 2661 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); 2662 break; 2663 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 2664 if (iface_param->iface_num & 0x1) 2665 break; 2666 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2667 init_fw_cb->iscsi_opts |= 2668 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); 2669 else 2670 init_fw_cb->iscsi_opts &= 2671 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); 2672 break; 2673 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 2674 if (iface_param->iface_num & 0x1) 2675 break; 2676 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2677 init_fw_cb->iscsi_opts |= 2678 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2679 else 2680 init_fw_cb->iscsi_opts &= 2681 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2682 break; 2683 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 2684 if (iface_param->iface_num & 0x1) 2685 break; 2686 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2687 init_fw_cb->iscsi_opts |= 2688 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2689 else 2690 init_fw_cb->iscsi_opts &= 2691 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2692 break; 2693 default: 2694 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", 2695 iface_param->param); 2696 break; 2697 } 2698 } 2699 2700 static void 2701 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) 2702 { 2703 struct addr_ctrl_blk_def *acb; 2704 acb = (struct addr_ctrl_blk_def *)init_fw_cb; 2705 memset(acb->reserved1, 0, sizeof(acb->reserved1)); 2706 memset(acb->reserved2, 0, sizeof(acb->reserved2)); 2707 memset(acb->reserved3, 0, sizeof(acb->reserved3)); 2708 memset(acb->reserved4, 0, sizeof(acb->reserved4)); 2709 memset(acb->reserved5, 0, sizeof(acb->reserved5)); 2710 memset(acb->reserved6, 0, sizeof(acb->reserved6)); 2711 memset(acb->reserved7, 0, sizeof(acb->reserved7)); 2712 memset(acb->reserved8, 0, sizeof(acb->reserved8)); 2713 memset(acb->reserved9, 0, sizeof(acb->reserved9)); 2714 memset(acb->reserved10, 0, sizeof(acb->reserved10)); 2715 memset(acb->reserved11, 0, sizeof(acb->reserved11)); 2716 memset(acb->reserved12, 0, sizeof(acb->reserved12)); 2717 memset(acb->reserved13, 0, sizeof(acb->reserved13)); 2718 memset(acb->reserved14, 0, sizeof(acb->reserved14)); 2719 memset(acb->reserved15, 0, sizeof(acb->reserved15)); 2720 } 2721 2722 static int 2723 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) 2724 { 2725 struct scsi_qla_host *ha = to_qla_host(shost); 2726 int rval = 0; 2727 struct iscsi_iface_param_info *iface_param = NULL; 2728 struct addr_ctrl_blk *init_fw_cb = NULL; 2729 dma_addr_t init_fw_cb_dma; 2730 uint32_t mbox_cmd[MBOX_REG_COUNT]; 2731 uint32_t mbox_sts[MBOX_REG_COUNT]; 2732 uint32_t rem = len; 2733 struct nlattr *attr; 2734 2735 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2736 sizeof(struct addr_ctrl_blk), 2737 &init_fw_cb_dma, GFP_KERNEL); 2738 if (!init_fw_cb) { 2739 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2740 __func__); 2741 return -ENOMEM; 2742 } 2743 2744 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 2745 memset(&mbox_sts, 0, sizeof(mbox_sts)); 2746 2747 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { 2748 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); 2749 rval = -EIO; 2750 goto exit_init_fw_cb; 2751 } 2752 2753 nla_for_each_attr(attr, data, len, rem) { 2754 iface_param = nla_data(attr); 2755 2756 if (iface_param->param_type == ISCSI_NET_PARAM) { 2757 switch (iface_param->iface_type) { 2758 case ISCSI_IFACE_TYPE_IPV4: 2759 switch (iface_param->iface_num) { 2760 case 0: 2761 qla4xxx_set_ipv4(ha, iface_param, 2762 init_fw_cb); 2763 break; 2764 default: 2765 /* Cannot have more than one IPv4 interface */ 2766 ql4_printk(KERN_ERR, ha, 2767 "Invalid IPv4 iface number = %d\n", 2768 iface_param->iface_num); 2769 break; 2770 } 2771 break; 2772 case ISCSI_IFACE_TYPE_IPV6: 2773 switch (iface_param->iface_num) { 2774 case 0: 2775 case 1: 2776 qla4xxx_set_ipv6(ha, iface_param, 2777 init_fw_cb); 2778 break; 2779 default: 2780 /* Cannot have more than two IPv6 interface */ 2781 ql4_printk(KERN_ERR, ha, 2782 "Invalid IPv6 iface number = %d\n", 2783 iface_param->iface_num); 2784 break; 2785 } 2786 break; 2787 default: 2788 ql4_printk(KERN_ERR, ha, 2789 "Invalid iface type\n"); 2790 break; 2791 } 2792 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { 2793 qla4xxx_set_iscsi_param(ha, iface_param, 2794 init_fw_cb); 2795 } else { 2796 continue; 2797 } 2798 } 2799 2800 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); 2801 2802 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, 2803 sizeof(struct addr_ctrl_blk), 2804 FLASH_OPT_RMW_COMMIT); 2805 if (rval != QLA_SUCCESS) { 2806 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", 2807 __func__); 2808 rval = -EIO; 2809 goto exit_init_fw_cb; 2810 } 2811 2812 rval = qla4xxx_disable_acb(ha); 2813 if (rval != QLA_SUCCESS) { 2814 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", 2815 __func__); 2816 rval = -EIO; 2817 goto exit_init_fw_cb; 2818 } 2819 2820 wait_for_completion_timeout(&ha->disable_acb_comp, 2821 DISABLE_ACB_TOV * HZ); 2822 2823 qla4xxx_initcb_to_acb(init_fw_cb); 2824 2825 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); 2826 if (rval != QLA_SUCCESS) { 2827 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", 2828 __func__); 2829 rval = -EIO; 2830 goto exit_init_fw_cb; 2831 } 2832 2833 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2834 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, 2835 init_fw_cb_dma); 2836 2837 exit_init_fw_cb: 2838 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), 2839 init_fw_cb, init_fw_cb_dma); 2840 2841 return rval; 2842 } 2843 2844 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 2845 enum iscsi_param param, char *buf) 2846 { 2847 struct iscsi_session *sess = cls_sess->dd_data; 2848 struct ddb_entry *ddb_entry = sess->dd_data; 2849 struct scsi_qla_host *ha = ddb_entry->ha; 2850 struct iscsi_cls_conn *cls_conn = ddb_entry->conn; 2851 struct ql4_chap_table chap_tbl; 2852 int rval, len; 2853 uint16_t idx; 2854 2855 memset(&chap_tbl, 0, sizeof(chap_tbl)); 2856 switch (param) { 2857 case ISCSI_PARAM_CHAP_IN_IDX: 2858 rval = qla4xxx_get_chap_index(ha, sess->username_in, 2859 sess->password_in, BIDI_CHAP, 2860 &idx); 2861 if (rval) 2862 len = sprintf(buf, "\n"); 2863 else 2864 len = sprintf(buf, "%hu\n", idx); 2865 break; 2866 case ISCSI_PARAM_CHAP_OUT_IDX: 2867 if (ddb_entry->ddb_type == FLASH_DDB) { 2868 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 2869 idx = ddb_entry->chap_tbl_idx; 2870 rval = QLA_SUCCESS; 2871 } else { 2872 rval = QLA_ERROR; 2873 } 2874 } else { 2875 rval = qla4xxx_get_chap_index(ha, sess->username, 2876 sess->password, 2877 LOCAL_CHAP, &idx); 2878 } 2879 if (rval) 2880 len = sprintf(buf, "\n"); 2881 else 2882 len = sprintf(buf, "%hu\n", idx); 2883 break; 2884 case ISCSI_PARAM_USERNAME: 2885 case ISCSI_PARAM_PASSWORD: 2886 /* First, populate session username and password for FLASH DDB, 2887 * if not already done. This happens when session login fails 2888 * for a FLASH DDB. 2889 */ 2890 if (ddb_entry->ddb_type == FLASH_DDB && 2891 ddb_entry->chap_tbl_idx != INVALID_ENTRY && 2892 !sess->username && !sess->password) { 2893 idx = ddb_entry->chap_tbl_idx; 2894 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 2895 chap_tbl.secret, 2896 idx); 2897 if (!rval) { 2898 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 2899 (char *)chap_tbl.name, 2900 strlen((char *)chap_tbl.name)); 2901 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 2902 (char *)chap_tbl.secret, 2903 chap_tbl.secret_len); 2904 } 2905 } 2906 fallthrough; 2907 default: 2908 return iscsi_session_get_param(cls_sess, param, buf); 2909 } 2910 2911 return len; 2912 } 2913 2914 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, 2915 enum iscsi_param param, char *buf) 2916 { 2917 struct iscsi_conn *conn; 2918 struct qla_conn *qla_conn; 2919 struct sockaddr *dst_addr; 2920 2921 conn = cls_conn->dd_data; 2922 qla_conn = conn->dd_data; 2923 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; 2924 2925 switch (param) { 2926 case ISCSI_PARAM_CONN_PORT: 2927 case ISCSI_PARAM_CONN_ADDRESS: 2928 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2929 dst_addr, param, buf); 2930 default: 2931 return iscsi_conn_get_param(cls_conn, param, buf); 2932 } 2933 } 2934 2935 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) 2936 { 2937 uint32_t mbx_sts = 0; 2938 uint16_t tmp_ddb_index; 2939 int ret; 2940 2941 get_ddb_index: 2942 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); 2943 2944 if (tmp_ddb_index >= MAX_DDB_ENTRIES) { 2945 DEBUG2(ql4_printk(KERN_INFO, ha, 2946 "Free DDB index not available\n")); 2947 ret = QLA_ERROR; 2948 goto exit_get_ddb_index; 2949 } 2950 2951 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) 2952 goto get_ddb_index; 2953 2954 DEBUG2(ql4_printk(KERN_INFO, ha, 2955 "Found a free DDB index at %d\n", tmp_ddb_index)); 2956 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); 2957 if (ret == QLA_ERROR) { 2958 if (mbx_sts == MBOX_STS_COMMAND_ERROR) { 2959 ql4_printk(KERN_INFO, ha, 2960 "DDB index = %d not available trying next\n", 2961 tmp_ddb_index); 2962 goto get_ddb_index; 2963 } 2964 DEBUG2(ql4_printk(KERN_INFO, ha, 2965 "Free FW DDB not available\n")); 2966 } 2967 2968 *ddb_index = tmp_ddb_index; 2969 2970 exit_get_ddb_index: 2971 return ret; 2972 } 2973 2974 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, 2975 struct ddb_entry *ddb_entry, 2976 char *existing_ipaddr, 2977 char *user_ipaddr) 2978 { 2979 uint8_t dst_ipaddr[IPv6_ADDR_LEN]; 2980 char formatted_ipaddr[DDB_IPADDR_LEN]; 2981 int status = QLA_SUCCESS, ret = 0; 2982 2983 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { 2984 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2985 '\0', NULL); 2986 if (ret == 0) { 2987 status = QLA_ERROR; 2988 goto out_match; 2989 } 2990 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); 2991 } else { 2992 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2993 '\0', NULL); 2994 if (ret == 0) { 2995 status = QLA_ERROR; 2996 goto out_match; 2997 } 2998 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); 2999 } 3000 3001 if (strcmp(existing_ipaddr, formatted_ipaddr)) 3002 status = QLA_ERROR; 3003 3004 out_match: 3005 return status; 3006 } 3007 3008 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, 3009 struct iscsi_cls_conn *cls_conn) 3010 { 3011 int idx = 0, max_ddbs, rval; 3012 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3013 struct iscsi_session *sess, *existing_sess; 3014 struct iscsi_conn *conn, *existing_conn; 3015 struct ddb_entry *ddb_entry; 3016 3017 sess = cls_sess->dd_data; 3018 conn = cls_conn->dd_data; 3019 3020 if (sess->targetname == NULL || 3021 conn->persistent_address == NULL || 3022 conn->persistent_port == 0) 3023 return QLA_ERROR; 3024 3025 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 3026 MAX_DEV_DB_ENTRIES; 3027 3028 for (idx = 0; idx < max_ddbs; idx++) { 3029 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 3030 if (ddb_entry == NULL) 3031 continue; 3032 3033 if (ddb_entry->ddb_type != FLASH_DDB) 3034 continue; 3035 3036 existing_sess = ddb_entry->sess->dd_data; 3037 existing_conn = ddb_entry->conn->dd_data; 3038 3039 if (existing_sess->targetname == NULL || 3040 existing_conn->persistent_address == NULL || 3041 existing_conn->persistent_port == 0) 3042 continue; 3043 3044 DEBUG2(ql4_printk(KERN_INFO, ha, 3045 "IQN = %s User IQN = %s\n", 3046 existing_sess->targetname, 3047 sess->targetname)); 3048 3049 DEBUG2(ql4_printk(KERN_INFO, ha, 3050 "IP = %s User IP = %s\n", 3051 existing_conn->persistent_address, 3052 conn->persistent_address)); 3053 3054 DEBUG2(ql4_printk(KERN_INFO, ha, 3055 "Port = %d User Port = %d\n", 3056 existing_conn->persistent_port, 3057 conn->persistent_port)); 3058 3059 if (strcmp(existing_sess->targetname, sess->targetname)) 3060 continue; 3061 rval = qla4xxx_match_ipaddress(ha, ddb_entry, 3062 existing_conn->persistent_address, 3063 conn->persistent_address); 3064 if (rval == QLA_ERROR) 3065 continue; 3066 if (existing_conn->persistent_port != conn->persistent_port) 3067 continue; 3068 break; 3069 } 3070 3071 if (idx == max_ddbs) 3072 return QLA_ERROR; 3073 3074 DEBUG2(ql4_printk(KERN_INFO, ha, 3075 "Match found in fwdb sessions\n")); 3076 return QLA_SUCCESS; 3077 } 3078 3079 static struct iscsi_cls_session * 3080 qla4xxx_session_create(struct iscsi_endpoint *ep, 3081 uint16_t cmds_max, uint16_t qdepth, 3082 uint32_t initial_cmdsn) 3083 { 3084 struct iscsi_cls_session *cls_sess; 3085 struct scsi_qla_host *ha; 3086 struct qla_endpoint *qla_ep; 3087 struct ddb_entry *ddb_entry; 3088 uint16_t ddb_index; 3089 struct iscsi_session *sess; 3090 int ret; 3091 3092 if (!ep) { 3093 printk(KERN_ERR "qla4xxx: missing ep.\n"); 3094 return NULL; 3095 } 3096 3097 qla_ep = ep->dd_data; 3098 ha = to_qla_host(qla_ep->host); 3099 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3100 ha->host_no)); 3101 3102 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 3103 if (ret == QLA_ERROR) 3104 return NULL; 3105 3106 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, 3107 cmds_max, sizeof(struct ddb_entry), 3108 sizeof(struct ql4_task_data), 3109 initial_cmdsn, ddb_index); 3110 if (!cls_sess) 3111 return NULL; 3112 3113 sess = cls_sess->dd_data; 3114 ddb_entry = sess->dd_data; 3115 ddb_entry->fw_ddb_index = ddb_index; 3116 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3117 ddb_entry->ha = ha; 3118 ddb_entry->sess = cls_sess; 3119 ddb_entry->unblock_sess = qla4xxx_unblock_ddb; 3120 ddb_entry->ddb_change = qla4xxx_ddb_change; 3121 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); 3122 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 3123 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 3124 ha->tot_ddbs++; 3125 3126 return cls_sess; 3127 } 3128 3129 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) 3130 { 3131 struct iscsi_session *sess; 3132 struct ddb_entry *ddb_entry; 3133 struct scsi_qla_host *ha; 3134 unsigned long flags, wtime; 3135 struct dev_db_entry *fw_ddb_entry = NULL; 3136 dma_addr_t fw_ddb_entry_dma; 3137 uint32_t ddb_state; 3138 int ret; 3139 3140 sess = cls_sess->dd_data; 3141 ddb_entry = sess->dd_data; 3142 ha = ddb_entry->ha; 3143 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3144 ha->host_no)); 3145 3146 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3147 &fw_ddb_entry_dma, GFP_KERNEL); 3148 if (!fw_ddb_entry) { 3149 ql4_printk(KERN_ERR, ha, 3150 "%s: Unable to allocate dma buffer\n", __func__); 3151 goto destroy_session; 3152 } 3153 3154 wtime = jiffies + (HZ * LOGOUT_TOV); 3155 do { 3156 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 3157 fw_ddb_entry, fw_ddb_entry_dma, 3158 NULL, NULL, &ddb_state, NULL, 3159 NULL, NULL); 3160 if (ret == QLA_ERROR) 3161 goto destroy_session; 3162 3163 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 3164 (ddb_state == DDB_DS_SESSION_FAILED)) 3165 goto destroy_session; 3166 3167 schedule_timeout_uninterruptible(HZ); 3168 } while ((time_after(wtime, jiffies))); 3169 3170 destroy_session: 3171 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 3172 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) 3173 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 3174 spin_lock_irqsave(&ha->hardware_lock, flags); 3175 qla4xxx_free_ddb(ha, ddb_entry); 3176 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3177 3178 iscsi_session_teardown(cls_sess); 3179 3180 if (fw_ddb_entry) 3181 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3182 fw_ddb_entry, fw_ddb_entry_dma); 3183 } 3184 3185 static struct iscsi_cls_conn * 3186 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) 3187 { 3188 struct iscsi_cls_conn *cls_conn; 3189 struct iscsi_session *sess; 3190 struct ddb_entry *ddb_entry; 3191 struct scsi_qla_host *ha; 3192 3193 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 3194 conn_idx); 3195 if (!cls_conn) { 3196 pr_info("%s: Can not create connection for conn_idx = %u\n", 3197 __func__, conn_idx); 3198 return NULL; 3199 } 3200 3201 sess = cls_sess->dd_data; 3202 ddb_entry = sess->dd_data; 3203 ddb_entry->conn = cls_conn; 3204 3205 ha = ddb_entry->ha; 3206 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, 3207 conn_idx)); 3208 return cls_conn; 3209 } 3210 3211 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 3212 struct iscsi_cls_conn *cls_conn, 3213 uint64_t transport_fd, int is_leading) 3214 { 3215 struct iscsi_conn *conn; 3216 struct qla_conn *qla_conn; 3217 struct iscsi_endpoint *ep; 3218 struct ddb_entry *ddb_entry; 3219 struct scsi_qla_host *ha; 3220 struct iscsi_session *sess; 3221 3222 sess = cls_session->dd_data; 3223 ddb_entry = sess->dd_data; 3224 ha = ddb_entry->ha; 3225 3226 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3227 cls_session->sid, cls_conn->cid)); 3228 3229 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3230 return -EINVAL; 3231 ep = iscsi_lookup_endpoint(transport_fd); 3232 if (!ep) 3233 return -EINVAL; 3234 conn = cls_conn->dd_data; 3235 qla_conn = conn->dd_data; 3236 qla_conn->qla_ep = ep->dd_data; 3237 return 0; 3238 } 3239 3240 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) 3241 { 3242 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3243 struct iscsi_session *sess; 3244 struct ddb_entry *ddb_entry; 3245 struct scsi_qla_host *ha; 3246 struct dev_db_entry *fw_ddb_entry = NULL; 3247 dma_addr_t fw_ddb_entry_dma; 3248 uint32_t mbx_sts = 0; 3249 int ret = 0; 3250 int status = QLA_SUCCESS; 3251 3252 sess = cls_sess->dd_data; 3253 ddb_entry = sess->dd_data; 3254 ha = ddb_entry->ha; 3255 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3256 cls_sess->sid, cls_conn->cid)); 3257 3258 /* Check if we have matching FW DDB, if yes then do not 3259 * login to this target. This could cause target to logout previous 3260 * connection 3261 */ 3262 ret = qla4xxx_match_fwdb_session(ha, cls_conn); 3263 if (ret == QLA_SUCCESS) { 3264 ql4_printk(KERN_INFO, ha, 3265 "Session already exist in FW.\n"); 3266 ret = -EEXIST; 3267 goto exit_conn_start; 3268 } 3269 3270 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3271 &fw_ddb_entry_dma, GFP_KERNEL); 3272 if (!fw_ddb_entry) { 3273 ql4_printk(KERN_ERR, ha, 3274 "%s: Unable to allocate dma buffer\n", __func__); 3275 ret = -ENOMEM; 3276 goto exit_conn_start; 3277 } 3278 3279 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); 3280 if (ret) { 3281 /* If iscsid is stopped and started then no need to do 3282 * set param again since ddb state will be already 3283 * active and FW does not allow set ddb to an 3284 * active session. 3285 */ 3286 if (mbx_sts) 3287 if (ddb_entry->fw_ddb_device_state == 3288 DDB_DS_SESSION_ACTIVE) { 3289 ddb_entry->unblock_sess(ddb_entry->sess); 3290 goto exit_set_param; 3291 } 3292 3293 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", 3294 __func__, ddb_entry->fw_ddb_index); 3295 goto exit_conn_start; 3296 } 3297 3298 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); 3299 if (status == QLA_ERROR) { 3300 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, 3301 sess->targetname); 3302 ret = -EINVAL; 3303 goto exit_conn_start; 3304 } 3305 3306 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) 3307 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; 3308 3309 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, 3310 ddb_entry->fw_ddb_device_state)); 3311 3312 exit_set_param: 3313 ret = 0; 3314 3315 exit_conn_start: 3316 if (fw_ddb_entry) 3317 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3318 fw_ddb_entry, fw_ddb_entry_dma); 3319 return ret; 3320 } 3321 3322 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) 3323 { 3324 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3325 struct iscsi_session *sess; 3326 struct scsi_qla_host *ha; 3327 struct ddb_entry *ddb_entry; 3328 int options; 3329 3330 sess = cls_sess->dd_data; 3331 ddb_entry = sess->dd_data; 3332 ha = ddb_entry->ha; 3333 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, 3334 cls_conn->cid)); 3335 3336 options = LOGOUT_OPTION_CLOSE_SESSION; 3337 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) 3338 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 3339 } 3340 3341 static void qla4xxx_task_work(struct work_struct *wdata) 3342 { 3343 struct ql4_task_data *task_data; 3344 struct scsi_qla_host *ha; 3345 struct passthru_status *sts; 3346 struct iscsi_task *task; 3347 struct iscsi_hdr *hdr; 3348 uint8_t *data; 3349 uint32_t data_len; 3350 struct iscsi_conn *conn; 3351 int hdr_len; 3352 itt_t itt; 3353 3354 task_data = container_of(wdata, struct ql4_task_data, task_work); 3355 ha = task_data->ha; 3356 task = task_data->task; 3357 sts = &task_data->sts; 3358 hdr_len = sizeof(struct iscsi_hdr); 3359 3360 DEBUG3(printk(KERN_INFO "Status returned\n")); 3361 DEBUG3(qla4xxx_dump_buffer(sts, 64)); 3362 DEBUG3(printk(KERN_INFO "Response buffer")); 3363 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); 3364 3365 conn = task->conn; 3366 3367 switch (sts->completionStatus) { 3368 case PASSTHRU_STATUS_COMPLETE: 3369 hdr = (struct iscsi_hdr *)task_data->resp_buffer; 3370 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ 3371 itt = sts->handle; 3372 hdr->itt = itt; 3373 data = task_data->resp_buffer + hdr_len; 3374 data_len = task_data->resp_len - hdr_len; 3375 iscsi_complete_pdu(conn, hdr, data, data_len); 3376 break; 3377 default: 3378 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", 3379 sts->completionStatus); 3380 break; 3381 } 3382 return; 3383 } 3384 3385 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 3386 { 3387 struct ql4_task_data *task_data; 3388 struct iscsi_session *sess; 3389 struct ddb_entry *ddb_entry; 3390 struct scsi_qla_host *ha; 3391 int hdr_len; 3392 3393 sess = task->conn->session; 3394 ddb_entry = sess->dd_data; 3395 ha = ddb_entry->ha; 3396 task_data = task->dd_data; 3397 memset(task_data, 0, sizeof(struct ql4_task_data)); 3398 3399 if (task->sc) { 3400 ql4_printk(KERN_INFO, ha, 3401 "%s: SCSI Commands not implemented\n", __func__); 3402 return -EINVAL; 3403 } 3404 3405 hdr_len = sizeof(struct iscsi_hdr); 3406 task_data->ha = ha; 3407 task_data->task = task; 3408 3409 if (task->data_count) { 3410 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, 3411 task->data_count, 3412 DMA_TO_DEVICE); 3413 } 3414 3415 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3416 __func__, task->conn->max_recv_dlength, hdr_len)); 3417 3418 task_data->resp_len = task->conn->max_recv_dlength + hdr_len; 3419 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, 3420 task_data->resp_len, 3421 &task_data->resp_dma, 3422 GFP_ATOMIC); 3423 if (!task_data->resp_buffer) 3424 goto exit_alloc_pdu; 3425 3426 task_data->req_len = task->data_count + hdr_len; 3427 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, 3428 task_data->req_len, 3429 &task_data->req_dma, 3430 GFP_ATOMIC); 3431 if (!task_data->req_buffer) 3432 goto exit_alloc_pdu; 3433 3434 task->hdr = task_data->req_buffer; 3435 3436 INIT_WORK(&task_data->task_work, qla4xxx_task_work); 3437 3438 return 0; 3439 3440 exit_alloc_pdu: 3441 if (task_data->resp_buffer) 3442 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3443 task_data->resp_buffer, task_data->resp_dma); 3444 3445 if (task_data->req_buffer) 3446 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3447 task_data->req_buffer, task_data->req_dma); 3448 return -ENOMEM; 3449 } 3450 3451 static void qla4xxx_task_cleanup(struct iscsi_task *task) 3452 { 3453 struct ql4_task_data *task_data; 3454 struct iscsi_session *sess; 3455 struct ddb_entry *ddb_entry; 3456 struct scsi_qla_host *ha; 3457 int hdr_len; 3458 3459 hdr_len = sizeof(struct iscsi_hdr); 3460 sess = task->conn->session; 3461 ddb_entry = sess->dd_data; 3462 ha = ddb_entry->ha; 3463 task_data = task->dd_data; 3464 3465 if (task->data_count) { 3466 dma_unmap_single(&ha->pdev->dev, task_data->data_dma, 3467 task->data_count, DMA_TO_DEVICE); 3468 } 3469 3470 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3471 __func__, task->conn->max_recv_dlength, hdr_len)); 3472 3473 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3474 task_data->resp_buffer, task_data->resp_dma); 3475 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3476 task_data->req_buffer, task_data->req_dma); 3477 return; 3478 } 3479 3480 static int qla4xxx_task_xmit(struct iscsi_task *task) 3481 { 3482 struct scsi_cmnd *sc = task->sc; 3483 struct iscsi_session *sess = task->conn->session; 3484 struct ddb_entry *ddb_entry = sess->dd_data; 3485 struct scsi_qla_host *ha = ddb_entry->ha; 3486 3487 if (!sc) 3488 return qla4xxx_send_passthru0(task); 3489 3490 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", 3491 __func__); 3492 return -ENOSYS; 3493 } 3494 3495 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, 3496 struct iscsi_bus_flash_conn *conn, 3497 struct dev_db_entry *fw_ddb_entry) 3498 { 3499 unsigned long options = 0; 3500 int rc = 0; 3501 3502 options = le16_to_cpu(fw_ddb_entry->options); 3503 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3504 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3505 rc = iscsi_switch_str_param(&sess->portal_type, 3506 PORTAL_TYPE_IPV6); 3507 if (rc) 3508 goto exit_copy; 3509 } else { 3510 rc = iscsi_switch_str_param(&sess->portal_type, 3511 PORTAL_TYPE_IPV4); 3512 if (rc) 3513 goto exit_copy; 3514 } 3515 3516 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3517 &options); 3518 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3519 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); 3520 3521 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3522 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3523 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3524 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3525 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3526 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3527 &options); 3528 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3529 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3530 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); 3531 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3532 &options); 3533 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3534 sess->discovery_auth_optional = 3535 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3536 if (test_bit(ISCSIOPT_ERL1, &options)) 3537 sess->erl |= BIT_1; 3538 if (test_bit(ISCSIOPT_ERL0, &options)) 3539 sess->erl |= BIT_0; 3540 3541 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3542 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3543 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3544 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3545 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3546 conn->tcp_timer_scale |= BIT_3; 3547 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3548 conn->tcp_timer_scale |= BIT_2; 3549 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3550 conn->tcp_timer_scale |= BIT_1; 3551 3552 conn->tcp_timer_scale >>= 1; 3553 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3554 3555 options = le16_to_cpu(fw_ddb_entry->ip_options); 3556 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3557 3558 conn->max_recv_dlength = BYTE_UNITS * 3559 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3560 conn->max_xmit_dlength = BYTE_UNITS * 3561 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3562 sess->first_burst = BYTE_UNITS * 3563 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3564 sess->max_burst = BYTE_UNITS * 3565 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3566 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3567 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3568 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3569 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3570 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3571 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3572 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3573 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); 3574 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); 3575 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3576 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3577 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3578 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); 3579 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); 3580 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3581 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3582 3583 sess->default_taskmgmt_timeout = 3584 le16_to_cpu(fw_ddb_entry->def_timeout); 3585 conn->port = le16_to_cpu(fw_ddb_entry->port); 3586 3587 options = le16_to_cpu(fw_ddb_entry->options); 3588 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3589 if (!conn->ipaddress) { 3590 rc = -ENOMEM; 3591 goto exit_copy; 3592 } 3593 3594 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3595 if (!conn->redirect_ipaddr) { 3596 rc = -ENOMEM; 3597 goto exit_copy; 3598 } 3599 3600 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 3601 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); 3602 3603 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3604 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; 3605 3606 conn->link_local_ipv6_addr = kmemdup( 3607 fw_ddb_entry->link_local_ipv6_addr, 3608 IPv6_ADDR_LEN, GFP_KERNEL); 3609 if (!conn->link_local_ipv6_addr) { 3610 rc = -ENOMEM; 3611 goto exit_copy; 3612 } 3613 } else { 3614 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3615 } 3616 3617 if (fw_ddb_entry->iscsi_name[0]) { 3618 rc = iscsi_switch_str_param(&sess->targetname, 3619 (char *)fw_ddb_entry->iscsi_name); 3620 if (rc) 3621 goto exit_copy; 3622 } 3623 3624 if (fw_ddb_entry->iscsi_alias[0]) { 3625 rc = iscsi_switch_str_param(&sess->targetalias, 3626 (char *)fw_ddb_entry->iscsi_alias); 3627 if (rc) 3628 goto exit_copy; 3629 } 3630 3631 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3632 3633 exit_copy: 3634 return rc; 3635 } 3636 3637 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, 3638 struct iscsi_bus_flash_conn *conn, 3639 struct dev_db_entry *fw_ddb_entry) 3640 { 3641 uint16_t options; 3642 int rc = 0; 3643 3644 options = le16_to_cpu(fw_ddb_entry->options); 3645 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); 3646 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3647 options |= BIT_8; 3648 else 3649 options &= ~BIT_8; 3650 3651 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); 3652 SET_BITVAL(sess->discovery_sess, options, BIT_4); 3653 SET_BITVAL(sess->entry_state, options, BIT_3); 3654 fw_ddb_entry->options = cpu_to_le16(options); 3655 3656 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3657 SET_BITVAL(conn->hdrdgst_en, options, BIT_13); 3658 SET_BITVAL(conn->datadgst_en, options, BIT_12); 3659 SET_BITVAL(sess->imm_data_en, options, BIT_11); 3660 SET_BITVAL(sess->initial_r2t_en, options, BIT_10); 3661 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); 3662 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); 3663 SET_BITVAL(sess->chap_auth_en, options, BIT_7); 3664 SET_BITVAL(conn->snack_req_en, options, BIT_6); 3665 SET_BITVAL(sess->discovery_logout_en, options, BIT_5); 3666 SET_BITVAL(sess->bidi_chap_en, options, BIT_4); 3667 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); 3668 SET_BITVAL(sess->erl & BIT_1, options, BIT_1); 3669 SET_BITVAL(sess->erl & BIT_0, options, BIT_0); 3670 fw_ddb_entry->iscsi_options = cpu_to_le16(options); 3671 3672 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3673 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); 3674 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); 3675 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); 3676 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); 3677 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); 3678 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); 3679 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); 3680 fw_ddb_entry->tcp_options = cpu_to_le16(options); 3681 3682 options = le16_to_cpu(fw_ddb_entry->ip_options); 3683 SET_BITVAL(conn->fragment_disable, options, BIT_4); 3684 fw_ddb_entry->ip_options = cpu_to_le16(options); 3685 3686 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); 3687 fw_ddb_entry->iscsi_max_rcv_data_seg_len = 3688 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); 3689 fw_ddb_entry->iscsi_max_snd_data_seg_len = 3690 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); 3691 fw_ddb_entry->iscsi_first_burst_len = 3692 cpu_to_le16(sess->first_burst / BYTE_UNITS); 3693 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / 3694 BYTE_UNITS); 3695 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); 3696 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 3697 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 3698 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 3699 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); 3700 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); 3701 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 3702 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 3703 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 3704 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); 3705 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); 3706 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); 3707 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 3708 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 3709 fw_ddb_entry->port = cpu_to_le16(conn->port); 3710 fw_ddb_entry->def_timeout = 3711 cpu_to_le16(sess->default_taskmgmt_timeout); 3712 3713 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3714 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; 3715 else 3716 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 3717 3718 if (conn->ipaddress) 3719 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, 3720 sizeof(fw_ddb_entry->ip_addr)); 3721 3722 if (conn->redirect_ipaddr) 3723 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, 3724 sizeof(fw_ddb_entry->tgt_addr)); 3725 3726 if (conn->link_local_ipv6_addr) 3727 memcpy(fw_ddb_entry->link_local_ipv6_addr, 3728 conn->link_local_ipv6_addr, 3729 sizeof(fw_ddb_entry->link_local_ipv6_addr)); 3730 3731 if (sess->targetname) 3732 memcpy(fw_ddb_entry->iscsi_name, sess->targetname, 3733 sizeof(fw_ddb_entry->iscsi_name)); 3734 3735 if (sess->targetalias) 3736 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, 3737 sizeof(fw_ddb_entry->iscsi_alias)); 3738 3739 COPY_ISID(fw_ddb_entry->isid, sess->isid); 3740 3741 return rc; 3742 } 3743 3744 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, 3745 struct iscsi_session *sess, 3746 struct dev_db_entry *fw_ddb_entry) 3747 { 3748 unsigned long options = 0; 3749 uint16_t ddb_link; 3750 uint16_t disc_parent; 3751 char ip_addr[DDB_IPADDR_LEN]; 3752 3753 options = le16_to_cpu(fw_ddb_entry->options); 3754 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3755 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3756 &options); 3757 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3758 3759 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3760 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3761 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3762 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3763 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3764 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3765 &options); 3766 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3767 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3768 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3769 &options); 3770 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3771 sess->discovery_auth_optional = 3772 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3773 if (test_bit(ISCSIOPT_ERL1, &options)) 3774 sess->erl |= BIT_1; 3775 if (test_bit(ISCSIOPT_ERL0, &options)) 3776 sess->erl |= BIT_0; 3777 3778 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3779 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3780 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3781 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3782 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3783 conn->tcp_timer_scale |= BIT_3; 3784 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3785 conn->tcp_timer_scale |= BIT_2; 3786 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3787 conn->tcp_timer_scale |= BIT_1; 3788 3789 conn->tcp_timer_scale >>= 1; 3790 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3791 3792 options = le16_to_cpu(fw_ddb_entry->ip_options); 3793 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3794 3795 conn->max_recv_dlength = BYTE_UNITS * 3796 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3797 conn->max_xmit_dlength = BYTE_UNITS * 3798 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3799 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3800 sess->first_burst = BYTE_UNITS * 3801 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3802 sess->max_burst = BYTE_UNITS * 3803 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3804 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3805 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3806 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3807 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3808 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3809 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3810 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3811 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); 3812 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3813 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3814 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3815 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3816 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3817 3818 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 3819 if (ddb_link == DDB_ISNS) 3820 disc_parent = ISCSI_DISC_PARENT_ISNS; 3821 else if (ddb_link == DDB_NO_LINK) 3822 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3823 else if (ddb_link < MAX_DDB_ENTRIES) 3824 disc_parent = ISCSI_DISC_PARENT_SENDTGT; 3825 else 3826 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3827 3828 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 3829 iscsi_get_discovery_parent_name(disc_parent), 0); 3830 3831 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, 3832 (char *)fw_ddb_entry->iscsi_alias, 0); 3833 3834 options = le16_to_cpu(fw_ddb_entry->options); 3835 if (options & DDB_OPT_IPV6_DEVICE) { 3836 memset(ip_addr, 0, sizeof(ip_addr)); 3837 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); 3838 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, 3839 (char *)ip_addr, 0); 3840 } 3841 } 3842 3843 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 3844 struct dev_db_entry *fw_ddb_entry, 3845 struct iscsi_cls_session *cls_sess, 3846 struct iscsi_cls_conn *cls_conn) 3847 { 3848 int buflen = 0; 3849 struct iscsi_session *sess; 3850 struct ddb_entry *ddb_entry; 3851 struct ql4_chap_table chap_tbl; 3852 struct iscsi_conn *conn; 3853 char ip_addr[DDB_IPADDR_LEN]; 3854 uint16_t options = 0; 3855 3856 sess = cls_sess->dd_data; 3857 ddb_entry = sess->dd_data; 3858 conn = cls_conn->dd_data; 3859 memset(&chap_tbl, 0, sizeof(chap_tbl)); 3860 3861 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3862 3863 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3864 3865 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); 3866 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); 3867 3868 memset(ip_addr, 0, sizeof(ip_addr)); 3869 options = le16_to_cpu(fw_ddb_entry->options); 3870 if (options & DDB_OPT_IPV6_DEVICE) { 3871 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); 3872 3873 memset(ip_addr, 0, sizeof(ip_addr)); 3874 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); 3875 } else { 3876 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); 3877 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); 3878 } 3879 3880 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, 3881 (char *)ip_addr, buflen); 3882 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, 3883 (char *)fw_ddb_entry->iscsi_name, buflen); 3884 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 3885 (char *)ha->name_string, buflen); 3886 3887 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 3888 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 3889 chap_tbl.secret, 3890 ddb_entry->chap_tbl_idx)) { 3891 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 3892 (char *)chap_tbl.name, 3893 strlen((char *)chap_tbl.name)); 3894 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 3895 (char *)chap_tbl.secret, 3896 chap_tbl.secret_len); 3897 } 3898 } 3899 } 3900 3901 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 3902 struct ddb_entry *ddb_entry) 3903 { 3904 struct iscsi_cls_session *cls_sess; 3905 struct iscsi_cls_conn *cls_conn; 3906 uint32_t ddb_state; 3907 dma_addr_t fw_ddb_entry_dma; 3908 struct dev_db_entry *fw_ddb_entry; 3909 3910 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3911 &fw_ddb_entry_dma, GFP_KERNEL); 3912 if (!fw_ddb_entry) { 3913 ql4_printk(KERN_ERR, ha, 3914 "%s: Unable to allocate dma buffer\n", __func__); 3915 goto exit_session_conn_fwddb_param; 3916 } 3917 3918 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3919 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3920 NULL, NULL, NULL) == QLA_ERROR) { 3921 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3922 "get_ddb_entry for fw_ddb_index %d\n", 3923 ha->host_no, __func__, 3924 ddb_entry->fw_ddb_index)); 3925 goto exit_session_conn_fwddb_param; 3926 } 3927 3928 cls_sess = ddb_entry->sess; 3929 3930 cls_conn = ddb_entry->conn; 3931 3932 /* Update params */ 3933 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 3934 3935 exit_session_conn_fwddb_param: 3936 if (fw_ddb_entry) 3937 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3938 fw_ddb_entry, fw_ddb_entry_dma); 3939 } 3940 3941 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 3942 struct ddb_entry *ddb_entry) 3943 { 3944 struct iscsi_cls_session *cls_sess; 3945 struct iscsi_cls_conn *cls_conn; 3946 struct iscsi_session *sess; 3947 struct iscsi_conn *conn; 3948 uint32_t ddb_state; 3949 dma_addr_t fw_ddb_entry_dma; 3950 struct dev_db_entry *fw_ddb_entry; 3951 3952 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3953 &fw_ddb_entry_dma, GFP_KERNEL); 3954 if (!fw_ddb_entry) { 3955 ql4_printk(KERN_ERR, ha, 3956 "%s: Unable to allocate dma buffer\n", __func__); 3957 goto exit_session_conn_param; 3958 } 3959 3960 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3961 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3962 NULL, NULL, NULL) == QLA_ERROR) { 3963 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3964 "get_ddb_entry for fw_ddb_index %d\n", 3965 ha->host_no, __func__, 3966 ddb_entry->fw_ddb_index)); 3967 goto exit_session_conn_param; 3968 } 3969 3970 cls_sess = ddb_entry->sess; 3971 sess = cls_sess->dd_data; 3972 3973 cls_conn = ddb_entry->conn; 3974 conn = cls_conn->dd_data; 3975 3976 /* Update timers after login */ 3977 ddb_entry->default_relogin_timeout = 3978 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && 3979 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? 3980 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; 3981 ddb_entry->default_time2wait = 3982 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3983 3984 /* Update params */ 3985 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3986 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3987 3988 memcpy(sess->initiatorname, ha->name_string, 3989 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 3990 3991 exit_session_conn_param: 3992 if (fw_ddb_entry) 3993 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3994 fw_ddb_entry, fw_ddb_entry_dma); 3995 } 3996 3997 /* 3998 * Timer routines 3999 */ 4000 static void qla4xxx_timer(struct timer_list *t); 4001 4002 static void qla4xxx_start_timer(struct scsi_qla_host *ha, 4003 unsigned long interval) 4004 { 4005 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", 4006 __func__, ha->host->host_no)); 4007 timer_setup(&ha->timer, qla4xxx_timer, 0); 4008 ha->timer.expires = jiffies + interval * HZ; 4009 add_timer(&ha->timer); 4010 ha->timer_active = 1; 4011 } 4012 4013 static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 4014 { 4015 del_timer_sync(&ha->timer); 4016 ha->timer_active = 0; 4017 } 4018 4019 /*** 4020 * qla4xxx_mark_device_missing - blocks the session 4021 * @cls_session: Pointer to the session to be blocked 4022 * @ddb_entry: Pointer to device database entry 4023 * 4024 * This routine marks a device missing and close connection. 4025 **/ 4026 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) 4027 { 4028 iscsi_block_session(cls_session); 4029 } 4030 4031 /** 4032 * qla4xxx_mark_all_devices_missing - mark all devices as missing. 4033 * @ha: Pointer to host adapter structure. 4034 * 4035 * This routine marks a device missing and resets the relogin retry count. 4036 **/ 4037 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) 4038 { 4039 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); 4040 } 4041 4042 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 4043 struct ddb_entry *ddb_entry, 4044 struct scsi_cmnd *cmd) 4045 { 4046 struct srb *srb; 4047 4048 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 4049 if (!srb) 4050 return srb; 4051 4052 kref_init(&srb->srb_ref); 4053 srb->ha = ha; 4054 srb->ddb = ddb_entry; 4055 srb->cmd = cmd; 4056 srb->flags = 0; 4057 CMD_SP(cmd) = (void *)srb; 4058 4059 return srb; 4060 } 4061 4062 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) 4063 { 4064 struct scsi_cmnd *cmd = srb->cmd; 4065 4066 if (srb->flags & SRB_DMA_VALID) { 4067 scsi_dma_unmap(cmd); 4068 srb->flags &= ~SRB_DMA_VALID; 4069 } 4070 CMD_SP(cmd) = NULL; 4071 } 4072 4073 void qla4xxx_srb_compl(struct kref *ref) 4074 { 4075 struct srb *srb = container_of(ref, struct srb, srb_ref); 4076 struct scsi_cmnd *cmd = srb->cmd; 4077 struct scsi_qla_host *ha = srb->ha; 4078 4079 qla4xxx_srb_free_dma(ha, srb); 4080 4081 mempool_free(srb, ha->srb_mempool); 4082 4083 cmd->scsi_done(cmd); 4084 } 4085 4086 /** 4087 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 4088 * @host: scsi host 4089 * @cmd: Pointer to Linux's SCSI command structure 4090 * 4091 * Remarks: 4092 * This routine is invoked by Linux to send a SCSI command to the driver. 4093 * The mid-level driver tries to ensure that queuecommand never gets 4094 * invoked concurrently with itself or the interrupt handler (although 4095 * the interrupt handler may call this routine as part of request- 4096 * completion handling). Unfortunely, it sometimes calls the scheduler 4097 * in interrupt context which is a big NO! NO!. 4098 **/ 4099 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 4100 { 4101 struct scsi_qla_host *ha = to_qla_host(host); 4102 struct ddb_entry *ddb_entry = cmd->device->hostdata; 4103 struct iscsi_cls_session *sess = ddb_entry->sess; 4104 struct srb *srb; 4105 int rval; 4106 4107 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4108 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) 4109 cmd->result = DID_NO_CONNECT << 16; 4110 else 4111 cmd->result = DID_REQUEUE << 16; 4112 goto qc_fail_command; 4113 } 4114 4115 if (!sess) { 4116 cmd->result = DID_IMM_RETRY << 16; 4117 goto qc_fail_command; 4118 } 4119 4120 rval = iscsi_session_chkready(sess); 4121 if (rval) { 4122 cmd->result = rval; 4123 goto qc_fail_command; 4124 } 4125 4126 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4127 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4128 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4129 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4130 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4131 !test_bit(AF_ONLINE, &ha->flags) || 4132 !test_bit(AF_LINK_UP, &ha->flags) || 4133 test_bit(AF_LOOPBACK, &ha->flags) || 4134 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || 4135 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || 4136 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 4137 goto qc_host_busy; 4138 4139 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); 4140 if (!srb) 4141 goto qc_host_busy; 4142 4143 rval = qla4xxx_send_command_to_isp(ha, srb); 4144 if (rval != QLA_SUCCESS) 4145 goto qc_host_busy_free_sp; 4146 4147 return 0; 4148 4149 qc_host_busy_free_sp: 4150 qla4xxx_srb_free_dma(ha, srb); 4151 mempool_free(srb, ha->srb_mempool); 4152 4153 qc_host_busy: 4154 return SCSI_MLQUEUE_HOST_BUSY; 4155 4156 qc_fail_command: 4157 cmd->scsi_done(cmd); 4158 4159 return 0; 4160 } 4161 4162 /** 4163 * qla4xxx_mem_free - frees memory allocated to adapter 4164 * @ha: Pointer to host adapter structure. 4165 * 4166 * Frees memory previously allocated by qla4xxx_mem_alloc 4167 **/ 4168 static void qla4xxx_mem_free(struct scsi_qla_host *ha) 4169 { 4170 if (ha->queues) 4171 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 4172 ha->queues_dma); 4173 4174 vfree(ha->fw_dump); 4175 4176 ha->queues_len = 0; 4177 ha->queues = NULL; 4178 ha->queues_dma = 0; 4179 ha->request_ring = NULL; 4180 ha->request_dma = 0; 4181 ha->response_ring = NULL; 4182 ha->response_dma = 0; 4183 ha->shadow_regs = NULL; 4184 ha->shadow_regs_dma = 0; 4185 ha->fw_dump = NULL; 4186 ha->fw_dump_size = 0; 4187 4188 /* Free srb pool. */ 4189 mempool_destroy(ha->srb_mempool); 4190 ha->srb_mempool = NULL; 4191 4192 dma_pool_destroy(ha->chap_dma_pool); 4193 4194 vfree(ha->chap_list); 4195 ha->chap_list = NULL; 4196 4197 dma_pool_destroy(ha->fw_ddb_dma_pool); 4198 4199 /* release io space registers */ 4200 if (is_qla8022(ha)) { 4201 if (ha->nx_pcibase) 4202 iounmap( 4203 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 4204 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4205 if (ha->nx_pcibase) 4206 iounmap( 4207 (struct device_reg_83xx __iomem *)ha->nx_pcibase); 4208 } else if (ha->reg) { 4209 iounmap(ha->reg); 4210 } 4211 4212 vfree(ha->reset_tmplt.buff); 4213 4214 pci_release_regions(ha->pdev); 4215 } 4216 4217 /** 4218 * qla4xxx_mem_alloc - allocates memory for use by adapter. 4219 * @ha: Pointer to host adapter structure 4220 * 4221 * Allocates DMA memory for request and response queues. Also allocates memory 4222 * for srbs. 4223 **/ 4224 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) 4225 { 4226 unsigned long align; 4227 4228 /* Allocate contiguous block of DMA memory for queues. */ 4229 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4230 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + 4231 sizeof(struct shadow_regs) + 4232 MEM_ALIGN_VALUE + 4233 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4234 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4235 &ha->queues_dma, GFP_KERNEL); 4236 if (ha->queues == NULL) { 4237 ql4_printk(KERN_WARNING, ha, 4238 "Memory Allocation failed - queues.\n"); 4239 4240 goto mem_alloc_error_exit; 4241 } 4242 4243 /* 4244 * As per RISC alignment requirements -- the bus-address must be a 4245 * multiple of the request-ring size (in bytes). 4246 */ 4247 align = 0; 4248 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) 4249 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & 4250 (MEM_ALIGN_VALUE - 1)); 4251 4252 /* Update request and response queue pointers. */ 4253 ha->request_dma = ha->queues_dma + align; 4254 ha->request_ring = (struct queue_entry *) (ha->queues + align); 4255 ha->response_dma = ha->queues_dma + align + 4256 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); 4257 ha->response_ring = (struct queue_entry *) (ha->queues + align + 4258 (REQUEST_QUEUE_DEPTH * 4259 QUEUE_SIZE)); 4260 ha->shadow_regs_dma = ha->queues_dma + align + 4261 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4262 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); 4263 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + 4264 (REQUEST_QUEUE_DEPTH * 4265 QUEUE_SIZE) + 4266 (RESPONSE_QUEUE_DEPTH * 4267 QUEUE_SIZE)); 4268 4269 /* Allocate memory for srb pool. */ 4270 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 4271 mempool_free_slab, srb_cachep); 4272 if (ha->srb_mempool == NULL) { 4273 ql4_printk(KERN_WARNING, ha, 4274 "Memory Allocation failed - SRB Pool.\n"); 4275 4276 goto mem_alloc_error_exit; 4277 } 4278 4279 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, 4280 CHAP_DMA_BLOCK_SIZE, 8, 0); 4281 4282 if (ha->chap_dma_pool == NULL) { 4283 ql4_printk(KERN_WARNING, ha, 4284 "%s: chap_dma_pool allocation failed..\n", __func__); 4285 goto mem_alloc_error_exit; 4286 } 4287 4288 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, 4289 DDB_DMA_BLOCK_SIZE, 8, 0); 4290 4291 if (ha->fw_ddb_dma_pool == NULL) { 4292 ql4_printk(KERN_WARNING, ha, 4293 "%s: fw_ddb_dma_pool allocation failed..\n", 4294 __func__); 4295 goto mem_alloc_error_exit; 4296 } 4297 4298 return QLA_SUCCESS; 4299 4300 mem_alloc_error_exit: 4301 return QLA_ERROR; 4302 } 4303 4304 /** 4305 * qla4_8xxx_check_temp - Check the ISP82XX temperature. 4306 * @ha: adapter block pointer. 4307 * 4308 * Note: The caller should not hold the idc lock. 4309 **/ 4310 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) 4311 { 4312 uint32_t temp, temp_state, temp_val; 4313 int status = QLA_SUCCESS; 4314 4315 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); 4316 4317 temp_state = qla82xx_get_temp_state(temp); 4318 temp_val = qla82xx_get_temp_val(temp); 4319 4320 if (temp_state == QLA82XX_TEMP_PANIC) { 4321 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" 4322 " exceeds maximum allowed. Hardware has been shut" 4323 " down.\n", temp_val); 4324 status = QLA_ERROR; 4325 } else if (temp_state == QLA82XX_TEMP_WARN) { 4326 if (ha->temperature == QLA82XX_TEMP_NORMAL) 4327 ql4_printk(KERN_WARNING, ha, "Device temperature %d" 4328 " degrees C exceeds operating range." 4329 " Immediate action needed.\n", temp_val); 4330 } else { 4331 if (ha->temperature == QLA82XX_TEMP_WARN) 4332 ql4_printk(KERN_INFO, ha, "Device temperature is" 4333 " now %d degrees C in normal range.\n", 4334 temp_val); 4335 } 4336 ha->temperature = temp_state; 4337 return status; 4338 } 4339 4340 /** 4341 * qla4_8xxx_check_fw_alive - Check firmware health 4342 * @ha: Pointer to host adapter structure. 4343 * 4344 * Context: Interrupt 4345 **/ 4346 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) 4347 { 4348 uint32_t fw_heartbeat_counter; 4349 int status = QLA_SUCCESS; 4350 4351 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, 4352 QLA8XXX_PEG_ALIVE_COUNTER); 4353 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 4354 if (fw_heartbeat_counter == 0xffffffff) { 4355 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 4356 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 4357 ha->host_no, __func__)); 4358 return status; 4359 } 4360 4361 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { 4362 ha->seconds_since_last_heartbeat++; 4363 /* FW not alive after 2 seconds */ 4364 if (ha->seconds_since_last_heartbeat == 2) { 4365 ha->seconds_since_last_heartbeat = 0; 4366 qla4_8xxx_dump_peg_reg(ha); 4367 status = QLA_ERROR; 4368 } 4369 } else 4370 ha->seconds_since_last_heartbeat = 0; 4371 4372 ha->fw_heartbeat_counter = fw_heartbeat_counter; 4373 return status; 4374 } 4375 4376 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) 4377 { 4378 uint32_t halt_status; 4379 int halt_status_unrecoverable = 0; 4380 4381 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); 4382 4383 if (is_qla8022(ha)) { 4384 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4385 __func__); 4386 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4387 CRB_NIU_XG_PAUSE_CTL_P0 | 4388 CRB_NIU_XG_PAUSE_CTL_P1); 4389 4390 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) 4391 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", 4392 __func__); 4393 if (halt_status & HALT_STATUS_UNRECOVERABLE) 4394 halt_status_unrecoverable = 1; 4395 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4396 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) 4397 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", 4398 __func__); 4399 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) 4400 halt_status_unrecoverable = 1; 4401 } 4402 4403 /* 4404 * Since we cannot change dev_state in interrupt context, 4405 * set appropriate DPC flag then wakeup DPC 4406 */ 4407 if (halt_status_unrecoverable) { 4408 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4409 } else { 4410 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", 4411 __func__); 4412 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4413 } 4414 qla4xxx_mailbox_premature_completion(ha); 4415 qla4xxx_wake_dpc(ha); 4416 } 4417 4418 /** 4419 * qla4_8xxx_watchdog - Poll dev state 4420 * @ha: Pointer to host adapter structure. 4421 * 4422 * Context: Interrupt 4423 **/ 4424 void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 4425 { 4426 uint32_t dev_state; 4427 uint32_t idc_ctrl; 4428 4429 if (is_qla8032(ha) && 4430 (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) 4431 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", 4432 __func__, ha->func_num); 4433 4434 /* don't poll if reset is going on */ 4435 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4436 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4437 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 4438 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 4439 4440 if (qla4_8xxx_check_temp(ha)) { 4441 if (is_qla8022(ha)) { 4442 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); 4443 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4444 CRB_NIU_XG_PAUSE_CTL_P0 | 4445 CRB_NIU_XG_PAUSE_CTL_P1); 4446 } 4447 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4448 qla4xxx_wake_dpc(ha); 4449 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 4450 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 4451 4452 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", 4453 __func__); 4454 4455 if (is_qla8032(ha) || is_qla8042(ha)) { 4456 idc_ctrl = qla4_83xx_rd_reg(ha, 4457 QLA83XX_IDC_DRV_CTRL); 4458 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { 4459 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", 4460 __func__); 4461 qla4xxx_mailbox_premature_completion( 4462 ha); 4463 } 4464 } 4465 4466 if ((is_qla8032(ha) || is_qla8042(ha)) || 4467 (is_qla8022(ha) && !ql4xdontresethba)) { 4468 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4469 qla4xxx_wake_dpc(ha); 4470 } 4471 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && 4472 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 4473 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 4474 __func__); 4475 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); 4476 qla4xxx_wake_dpc(ha); 4477 } else { 4478 /* Check firmware health */ 4479 if (qla4_8xxx_check_fw_alive(ha)) 4480 qla4_8xxx_process_fw_error(ha); 4481 } 4482 } 4483 } 4484 4485 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 4486 { 4487 struct iscsi_session *sess; 4488 struct ddb_entry *ddb_entry; 4489 struct scsi_qla_host *ha; 4490 4491 sess = cls_sess->dd_data; 4492 ddb_entry = sess->dd_data; 4493 ha = ddb_entry->ha; 4494 4495 if (!(ddb_entry->ddb_type == FLASH_DDB)) 4496 return; 4497 4498 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && 4499 !iscsi_is_session_online(cls_sess)) { 4500 if (atomic_read(&ddb_entry->retry_relogin_timer) != 4501 INVALID_ENTRY) { 4502 if (atomic_read(&ddb_entry->retry_relogin_timer) == 4503 0) { 4504 atomic_set(&ddb_entry->retry_relogin_timer, 4505 INVALID_ENTRY); 4506 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4507 set_bit(DF_RELOGIN, &ddb_entry->flags); 4508 DEBUG2(ql4_printk(KERN_INFO, ha, 4509 "%s: index [%d] login device\n", 4510 __func__, ddb_entry->fw_ddb_index)); 4511 } else 4512 atomic_dec(&ddb_entry->retry_relogin_timer); 4513 } 4514 } 4515 4516 /* Wait for relogin to timeout */ 4517 if (atomic_read(&ddb_entry->relogin_timer) && 4518 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { 4519 /* 4520 * If the relogin times out and the device is 4521 * still NOT ONLINE then try and relogin again. 4522 */ 4523 if (!iscsi_is_session_online(cls_sess)) { 4524 /* Reset retry relogin timer */ 4525 atomic_inc(&ddb_entry->relogin_retry_count); 4526 DEBUG2(ql4_printk(KERN_INFO, ha, 4527 "%s: index[%d] relogin timed out-retrying" 4528 " relogin (%d), retry (%d)\n", __func__, 4529 ddb_entry->fw_ddb_index, 4530 atomic_read(&ddb_entry->relogin_retry_count), 4531 ddb_entry->default_time2wait + 4)); 4532 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4533 atomic_set(&ddb_entry->retry_relogin_timer, 4534 ddb_entry->default_time2wait + 4); 4535 } 4536 } 4537 } 4538 4539 /** 4540 * qla4xxx_timer - checks every second for work to do. 4541 * @t: Context to obtain pointer to host adapter structure. 4542 **/ 4543 static void qla4xxx_timer(struct timer_list *t) 4544 { 4545 struct scsi_qla_host *ha = from_timer(ha, t, timer); 4546 int start_dpc = 0; 4547 uint16_t w; 4548 4549 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); 4550 4551 /* If we are in the middle of AER/EEH processing 4552 * skip any processing and reschedule the timer 4553 */ 4554 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4555 mod_timer(&ha->timer, jiffies + HZ); 4556 return; 4557 } 4558 4559 /* Hardware read to trigger an EEH error during mailbox waits. */ 4560 if (!pci_channel_offline(ha->pdev)) 4561 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 4562 4563 if (is_qla80XX(ha)) 4564 qla4_8xxx_watchdog(ha); 4565 4566 if (is_qla40XX(ha)) { 4567 /* Check for heartbeat interval. */ 4568 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 4569 ha->heartbeat_interval != 0) { 4570 ha->seconds_since_last_heartbeat++; 4571 if (ha->seconds_since_last_heartbeat > 4572 ha->heartbeat_interval + 2) 4573 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4574 } 4575 } 4576 4577 /* Process any deferred work. */ 4578 if (!list_empty(&ha->work_list)) 4579 start_dpc++; 4580 4581 /* Wakeup the dpc routine for this adapter, if needed. */ 4582 if (start_dpc || 4583 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4584 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 4585 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 4586 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 4587 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4588 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 4589 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 4590 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4591 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4592 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || 4593 test_bit(DPC_AEN, &ha->dpc_flags)) { 4594 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 4595 " - dpc flags = 0x%lx\n", 4596 ha->host_no, __func__, ha->dpc_flags)); 4597 qla4xxx_wake_dpc(ha); 4598 } 4599 4600 /* Reschedule timer thread to call us back in one second */ 4601 mod_timer(&ha->timer, jiffies + HZ); 4602 4603 DEBUG2(ha->seconds_since_last_intr++); 4604 } 4605 4606 /** 4607 * qla4xxx_cmd_wait - waits for all outstanding commands to complete 4608 * @ha: Pointer to host adapter structure. 4609 * 4610 * This routine stalls the driver until all outstanding commands are returned. 4611 * Caller must release the Hardware Lock prior to calling this routine. 4612 **/ 4613 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) 4614 { 4615 uint32_t index = 0; 4616 unsigned long flags; 4617 struct scsi_cmnd *cmd; 4618 unsigned long wtime; 4619 uint32_t wtmo; 4620 4621 if (is_qla40XX(ha)) 4622 wtmo = WAIT_CMD_TOV; 4623 else 4624 wtmo = ha->nx_reset_timeout / 2; 4625 4626 wtime = jiffies + (wtmo * HZ); 4627 4628 DEBUG2(ql4_printk(KERN_INFO, ha, 4629 "Wait up to %u seconds for cmds to complete\n", 4630 wtmo)); 4631 4632 while (!time_after_eq(jiffies, wtime)) { 4633 spin_lock_irqsave(&ha->hardware_lock, flags); 4634 /* Find a command that hasn't completed. */ 4635 for (index = 0; index < ha->host->can_queue; index++) { 4636 cmd = scsi_host_find_tag(ha->host, index); 4637 /* 4638 * We cannot just check if the index is valid, 4639 * becase if we are run from the scsi eh, then 4640 * the scsi/block layer is going to prevent 4641 * the tag from being released. 4642 */ 4643 if (cmd != NULL && CMD_SP(cmd)) 4644 break; 4645 } 4646 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4647 4648 /* If No Commands are pending, wait is complete */ 4649 if (index == ha->host->can_queue) 4650 return QLA_SUCCESS; 4651 4652 msleep(1000); 4653 } 4654 /* If we timed out on waiting for commands to come back 4655 * return ERROR. */ 4656 return QLA_ERROR; 4657 } 4658 4659 int qla4xxx_hw_reset(struct scsi_qla_host *ha) 4660 { 4661 uint32_t ctrl_status; 4662 unsigned long flags = 0; 4663 4664 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); 4665 4666 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 4667 return QLA_ERROR; 4668 4669 spin_lock_irqsave(&ha->hardware_lock, flags); 4670 4671 /* 4672 * If the SCSI Reset Interrupt bit is set, clear it. 4673 * Otherwise, the Soft Reset won't work. 4674 */ 4675 ctrl_status = readw(&ha->reg->ctrl_status); 4676 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) 4677 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4678 4679 /* Issue Soft Reset */ 4680 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); 4681 readl(&ha->reg->ctrl_status); 4682 4683 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4684 return QLA_SUCCESS; 4685 } 4686 4687 /** 4688 * qla4xxx_soft_reset - performs soft reset. 4689 * @ha: Pointer to host adapter structure. 4690 **/ 4691 int qla4xxx_soft_reset(struct scsi_qla_host *ha) 4692 { 4693 uint32_t max_wait_time; 4694 unsigned long flags = 0; 4695 int status; 4696 uint32_t ctrl_status; 4697 4698 status = qla4xxx_hw_reset(ha); 4699 if (status != QLA_SUCCESS) 4700 return status; 4701 4702 status = QLA_ERROR; 4703 /* Wait until the Network Reset Intr bit is cleared */ 4704 max_wait_time = RESET_INTR_TOV; 4705 do { 4706 spin_lock_irqsave(&ha->hardware_lock, flags); 4707 ctrl_status = readw(&ha->reg->ctrl_status); 4708 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4709 4710 if ((ctrl_status & CSR_NET_RESET_INTR) == 0) 4711 break; 4712 4713 msleep(1000); 4714 } while ((--max_wait_time)); 4715 4716 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { 4717 DEBUG2(printk(KERN_WARNING 4718 "scsi%ld: Network Reset Intr not cleared by " 4719 "Network function, clearing it now!\n", 4720 ha->host_no)); 4721 spin_lock_irqsave(&ha->hardware_lock, flags); 4722 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); 4723 readl(&ha->reg->ctrl_status); 4724 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4725 } 4726 4727 /* Wait until the firmware tells us the Soft Reset is done */ 4728 max_wait_time = SOFT_RESET_TOV; 4729 do { 4730 spin_lock_irqsave(&ha->hardware_lock, flags); 4731 ctrl_status = readw(&ha->reg->ctrl_status); 4732 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4733 4734 if ((ctrl_status & CSR_SOFT_RESET) == 0) { 4735 status = QLA_SUCCESS; 4736 break; 4737 } 4738 4739 msleep(1000); 4740 } while ((--max_wait_time)); 4741 4742 /* 4743 * Also, make sure that the SCSI Reset Interrupt bit has been cleared 4744 * after the soft reset has taken place. 4745 */ 4746 spin_lock_irqsave(&ha->hardware_lock, flags); 4747 ctrl_status = readw(&ha->reg->ctrl_status); 4748 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { 4749 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4750 readl(&ha->reg->ctrl_status); 4751 } 4752 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4753 4754 /* If soft reset fails then most probably the bios on other 4755 * function is also enabled. 4756 * Since the initialization is sequential the other fn 4757 * wont be able to acknowledge the soft reset. 4758 * Issue a force soft reset to workaround this scenario. 4759 */ 4760 if (max_wait_time == 0) { 4761 /* Issue Force Soft Reset */ 4762 spin_lock_irqsave(&ha->hardware_lock, flags); 4763 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); 4764 readl(&ha->reg->ctrl_status); 4765 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4766 /* Wait until the firmware tells us the Soft Reset is done */ 4767 max_wait_time = SOFT_RESET_TOV; 4768 do { 4769 spin_lock_irqsave(&ha->hardware_lock, flags); 4770 ctrl_status = readw(&ha->reg->ctrl_status); 4771 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4772 4773 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { 4774 status = QLA_SUCCESS; 4775 break; 4776 } 4777 4778 msleep(1000); 4779 } while ((--max_wait_time)); 4780 } 4781 4782 return status; 4783 } 4784 4785 /** 4786 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. 4787 * @ha: Pointer to host adapter structure. 4788 * @res: returned scsi status 4789 * 4790 * This routine is called just prior to a HARD RESET to return all 4791 * outstanding commands back to the Operating System. 4792 * Caller should make sure that the following locks are released 4793 * before this calling routine: Hardware lock, and io_request_lock. 4794 **/ 4795 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) 4796 { 4797 struct srb *srb; 4798 int i; 4799 unsigned long flags; 4800 4801 spin_lock_irqsave(&ha->hardware_lock, flags); 4802 for (i = 0; i < ha->host->can_queue; i++) { 4803 srb = qla4xxx_del_from_active_array(ha, i); 4804 if (srb != NULL) { 4805 srb->cmd->result = res; 4806 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 4807 } 4808 } 4809 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4810 } 4811 4812 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) 4813 { 4814 clear_bit(AF_ONLINE, &ha->flags); 4815 4816 /* Disable the board */ 4817 ql4_printk(KERN_INFO, ha, "Disabling the board\n"); 4818 4819 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 4820 qla4xxx_mark_all_devices_missing(ha); 4821 clear_bit(AF_INIT_DONE, &ha->flags); 4822 } 4823 4824 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) 4825 { 4826 struct iscsi_session *sess; 4827 struct ddb_entry *ddb_entry; 4828 4829 sess = cls_session->dd_data; 4830 ddb_entry = sess->dd_data; 4831 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; 4832 4833 if (ddb_entry->ddb_type == FLASH_DDB) 4834 iscsi_block_session(ddb_entry->sess); 4835 else 4836 iscsi_session_failure(cls_session->dd_data, 4837 ISCSI_ERR_CONN_FAILED); 4838 } 4839 4840 /** 4841 * qla4xxx_recover_adapter - recovers adapter after a fatal error 4842 * @ha: Pointer to host adapter structure. 4843 **/ 4844 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) 4845 { 4846 int status = QLA_ERROR; 4847 uint8_t reset_chip = 0; 4848 uint32_t dev_state; 4849 unsigned long wait; 4850 4851 /* Stall incoming I/O until we are done */ 4852 scsi_block_requests(ha->host); 4853 clear_bit(AF_ONLINE, &ha->flags); 4854 clear_bit(AF_LINK_UP, &ha->flags); 4855 4856 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); 4857 4858 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 4859 4860 if ((is_qla8032(ha) || is_qla8042(ha)) && 4861 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4862 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4863 __func__); 4864 /* disable pause frame for ISP83xx */ 4865 qla4_83xx_disable_pause(ha); 4866 } 4867 4868 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 4869 4870 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 4871 reset_chip = 1; 4872 4873 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) 4874 * do not reset adapter, jump to initialize_adapter */ 4875 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4876 status = QLA_SUCCESS; 4877 goto recover_ha_init_adapter; 4878 } 4879 4880 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked 4881 * from eh_host_reset or ioctl module */ 4882 if (is_qla80XX(ha) && !reset_chip && 4883 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4884 4885 DEBUG2(ql4_printk(KERN_INFO, ha, 4886 "scsi%ld: %s - Performing stop_firmware...\n", 4887 ha->host_no, __func__)); 4888 status = ha->isp_ops->reset_firmware(ha); 4889 if (status == QLA_SUCCESS) { 4890 ha->isp_ops->disable_intrs(ha); 4891 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4892 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4893 } else { 4894 /* If the stop_firmware fails then 4895 * reset the entire chip */ 4896 reset_chip = 1; 4897 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4898 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4899 } 4900 } 4901 4902 /* Issue full chip reset if recovering from a catastrophic error, 4903 * or if stop_firmware fails for ISP-8xxx. 4904 * This is the default case for ISP-4xxx */ 4905 if (is_qla40XX(ha) || reset_chip) { 4906 if (is_qla40XX(ha)) 4907 goto chip_reset; 4908 4909 /* Check if 8XXX firmware is alive or not 4910 * We may have arrived here from NEED_RESET 4911 * detection only */ 4912 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 4913 goto chip_reset; 4914 4915 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); 4916 while (time_before(jiffies, wait)) { 4917 if (qla4_8xxx_check_fw_alive(ha)) { 4918 qla4xxx_mailbox_premature_completion(ha); 4919 break; 4920 } 4921 4922 set_current_state(TASK_UNINTERRUPTIBLE); 4923 schedule_timeout(HZ); 4924 } 4925 chip_reset: 4926 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 4927 qla4xxx_cmd_wait(ha); 4928 4929 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4930 DEBUG2(ql4_printk(KERN_INFO, ha, 4931 "scsi%ld: %s - Performing chip reset..\n", 4932 ha->host_no, __func__)); 4933 status = ha->isp_ops->reset_chip(ha); 4934 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4935 } 4936 4937 /* Flush any pending ddb changed AENs */ 4938 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4939 4940 recover_ha_init_adapter: 4941 /* Upon successful firmware/chip reset, re-initialize the adapter */ 4942 if (status == QLA_SUCCESS) { 4943 /* For ISP-4xxx, force function 1 to always initialize 4944 * before function 3 to prevent both funcions from 4945 * stepping on top of the other */ 4946 if (is_qla40XX(ha) && (ha->mac_index == 3)) 4947 ssleep(6); 4948 4949 /* NOTE: AF_ONLINE flag set upon successful completion of 4950 * qla4xxx_initialize_adapter */ 4951 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 4952 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 4953 status = qla4_8xxx_check_init_adapter_retry(ha); 4954 if (status == QLA_ERROR) { 4955 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", 4956 ha->host_no, __func__); 4957 qla4xxx_dead_adapter_cleanup(ha); 4958 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4959 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4960 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4961 &ha->dpc_flags); 4962 goto exit_recover; 4963 } 4964 } 4965 } 4966 4967 /* Retry failed adapter initialization, if necessary 4968 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) 4969 * case to prevent ping-pong resets between functions */ 4970 if (!test_bit(AF_ONLINE, &ha->flags) && 4971 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4972 /* Adapter initialization failed, see if we can retry 4973 * resetting the ha. 4974 * Since we don't want to block the DPC for too long 4975 * with multiple resets in the same thread, 4976 * utilize DPC to retry */ 4977 if (is_qla80XX(ha)) { 4978 ha->isp_ops->idc_lock(ha); 4979 dev_state = qla4_8xxx_rd_direct(ha, 4980 QLA8XXX_CRB_DEV_STATE); 4981 ha->isp_ops->idc_unlock(ha); 4982 if (dev_state == QLA8XXX_DEV_FAILED) { 4983 ql4_printk(KERN_INFO, ha, "%s: don't retry " 4984 "recover adapter. H/W is in Failed " 4985 "state\n", __func__); 4986 qla4xxx_dead_adapter_cleanup(ha); 4987 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4988 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4989 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4990 &ha->dpc_flags); 4991 status = QLA_ERROR; 4992 4993 goto exit_recover; 4994 } 4995 } 4996 4997 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 4998 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 4999 DEBUG2(printk("scsi%ld: recover adapter - retrying " 5000 "(%d) more times\n", ha->host_no, 5001 ha->retry_reset_ha_cnt)); 5002 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5003 status = QLA_ERROR; 5004 } else { 5005 if (ha->retry_reset_ha_cnt > 0) { 5006 /* Schedule another Reset HA--DPC will retry */ 5007 ha->retry_reset_ha_cnt--; 5008 DEBUG2(printk("scsi%ld: recover adapter - " 5009 "retry remaining %d\n", 5010 ha->host_no, 5011 ha->retry_reset_ha_cnt)); 5012 status = QLA_ERROR; 5013 } 5014 5015 if (ha->retry_reset_ha_cnt == 0) { 5016 /* Recover adapter retries have been exhausted. 5017 * Adapter DEAD */ 5018 DEBUG2(printk("scsi%ld: recover adapter " 5019 "failed - board disabled\n", 5020 ha->host_no)); 5021 qla4xxx_dead_adapter_cleanup(ha); 5022 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5023 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5024 clear_bit(DPC_RESET_HA_FW_CONTEXT, 5025 &ha->dpc_flags); 5026 status = QLA_ERROR; 5027 } 5028 } 5029 } else { 5030 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5031 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5032 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5033 } 5034 5035 exit_recover: 5036 ha->adapter_error_count++; 5037 5038 if (test_bit(AF_ONLINE, &ha->flags)) 5039 ha->isp_ops->enable_intrs(ha); 5040 5041 scsi_unblock_requests(ha->host); 5042 5043 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 5044 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, 5045 status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); 5046 5047 return status; 5048 } 5049 5050 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) 5051 { 5052 struct iscsi_session *sess; 5053 struct ddb_entry *ddb_entry; 5054 struct scsi_qla_host *ha; 5055 5056 sess = cls_session->dd_data; 5057 ddb_entry = sess->dd_data; 5058 ha = ddb_entry->ha; 5059 if (!iscsi_is_session_online(cls_session)) { 5060 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 5061 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5062 " unblock session\n", ha->host_no, __func__, 5063 ddb_entry->fw_ddb_index); 5064 iscsi_unblock_session(ddb_entry->sess); 5065 } else { 5066 /* Trigger relogin */ 5067 if (ddb_entry->ddb_type == FLASH_DDB) { 5068 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || 5069 test_bit(DF_DISABLE_RELOGIN, 5070 &ddb_entry->flags))) 5071 qla4xxx_arm_relogin_timer(ddb_entry); 5072 } else 5073 iscsi_session_failure(cls_session->dd_data, 5074 ISCSI_ERR_CONN_FAILED); 5075 } 5076 } 5077 } 5078 5079 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) 5080 { 5081 struct iscsi_session *sess; 5082 struct ddb_entry *ddb_entry; 5083 struct scsi_qla_host *ha; 5084 5085 sess = cls_session->dd_data; 5086 ddb_entry = sess->dd_data; 5087 ha = ddb_entry->ha; 5088 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5089 " unblock session\n", ha->host_no, __func__, 5090 ddb_entry->fw_ddb_index); 5091 5092 iscsi_unblock_session(ddb_entry->sess); 5093 5094 /* Start scan target */ 5095 if (test_bit(AF_ONLINE, &ha->flags)) { 5096 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5097 " start scan\n", ha->host_no, __func__, 5098 ddb_entry->fw_ddb_index); 5099 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work); 5100 } 5101 return QLA_SUCCESS; 5102 } 5103 5104 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) 5105 { 5106 struct iscsi_session *sess; 5107 struct ddb_entry *ddb_entry; 5108 struct scsi_qla_host *ha; 5109 int status = QLA_SUCCESS; 5110 5111 sess = cls_session->dd_data; 5112 ddb_entry = sess->dd_data; 5113 ha = ddb_entry->ha; 5114 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5115 " unblock user space session\n", ha->host_no, __func__, 5116 ddb_entry->fw_ddb_index); 5117 5118 if (!iscsi_is_session_online(cls_session)) { 5119 iscsi_conn_start(ddb_entry->conn); 5120 iscsi_conn_login_event(ddb_entry->conn, 5121 ISCSI_CONN_STATE_LOGGED_IN); 5122 } else { 5123 ql4_printk(KERN_INFO, ha, 5124 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", 5125 ha->host_no, __func__, ddb_entry->fw_ddb_index, 5126 cls_session->sid); 5127 status = QLA_ERROR; 5128 } 5129 5130 return status; 5131 } 5132 5133 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 5134 { 5135 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); 5136 } 5137 5138 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 5139 { 5140 uint16_t relogin_timer; 5141 struct iscsi_session *sess; 5142 struct ddb_entry *ddb_entry; 5143 struct scsi_qla_host *ha; 5144 5145 sess = cls_sess->dd_data; 5146 ddb_entry = sess->dd_data; 5147 ha = ddb_entry->ha; 5148 5149 relogin_timer = max(ddb_entry->default_relogin_timeout, 5150 (uint16_t)RELOGIN_TOV); 5151 atomic_set(&ddb_entry->relogin_timer, relogin_timer); 5152 5153 DEBUG2(ql4_printk(KERN_INFO, ha, 5154 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, 5155 ddb_entry->fw_ddb_index, relogin_timer)); 5156 5157 qla4xxx_login_flash_ddb(cls_sess); 5158 } 5159 5160 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) 5161 { 5162 struct iscsi_session *sess; 5163 struct ddb_entry *ddb_entry; 5164 struct scsi_qla_host *ha; 5165 5166 sess = cls_sess->dd_data; 5167 ddb_entry = sess->dd_data; 5168 ha = ddb_entry->ha; 5169 5170 if (!(ddb_entry->ddb_type == FLASH_DDB)) 5171 return; 5172 5173 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 5174 return; 5175 5176 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && 5177 !iscsi_is_session_online(cls_sess)) { 5178 DEBUG2(ql4_printk(KERN_INFO, ha, 5179 "relogin issued\n")); 5180 qla4xxx_relogin_flash_ddb(cls_sess); 5181 } 5182 } 5183 5184 void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 5185 { 5186 if (ha->dpc_thread) 5187 queue_work(ha->dpc_thread, &ha->dpc_work); 5188 } 5189 5190 static struct qla4_work_evt * 5191 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, 5192 enum qla4_work_type type) 5193 { 5194 struct qla4_work_evt *e; 5195 uint32_t size = sizeof(struct qla4_work_evt) + data_size; 5196 5197 e = kzalloc(size, GFP_ATOMIC); 5198 if (!e) 5199 return NULL; 5200 5201 INIT_LIST_HEAD(&e->list); 5202 e->type = type; 5203 return e; 5204 } 5205 5206 static void qla4xxx_post_work(struct scsi_qla_host *ha, 5207 struct qla4_work_evt *e) 5208 { 5209 unsigned long flags; 5210 5211 spin_lock_irqsave(&ha->work_lock, flags); 5212 list_add_tail(&e->list, &ha->work_list); 5213 spin_unlock_irqrestore(&ha->work_lock, flags); 5214 qla4xxx_wake_dpc(ha); 5215 } 5216 5217 int qla4xxx_post_aen_work(struct scsi_qla_host *ha, 5218 enum iscsi_host_event_code aen_code, 5219 uint32_t data_size, uint8_t *data) 5220 { 5221 struct qla4_work_evt *e; 5222 5223 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); 5224 if (!e) 5225 return QLA_ERROR; 5226 5227 e->u.aen.code = aen_code; 5228 e->u.aen.data_size = data_size; 5229 memcpy(e->u.aen.data, data, data_size); 5230 5231 qla4xxx_post_work(ha, e); 5232 5233 return QLA_SUCCESS; 5234 } 5235 5236 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, 5237 uint32_t status, uint32_t pid, 5238 uint32_t data_size, uint8_t *data) 5239 { 5240 struct qla4_work_evt *e; 5241 5242 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); 5243 if (!e) 5244 return QLA_ERROR; 5245 5246 e->u.ping.status = status; 5247 e->u.ping.pid = pid; 5248 e->u.ping.data_size = data_size; 5249 memcpy(e->u.ping.data, data, data_size); 5250 5251 qla4xxx_post_work(ha, e); 5252 5253 return QLA_SUCCESS; 5254 } 5255 5256 static void qla4xxx_do_work(struct scsi_qla_host *ha) 5257 { 5258 struct qla4_work_evt *e, *tmp; 5259 unsigned long flags; 5260 LIST_HEAD(work); 5261 5262 spin_lock_irqsave(&ha->work_lock, flags); 5263 list_splice_init(&ha->work_list, &work); 5264 spin_unlock_irqrestore(&ha->work_lock, flags); 5265 5266 list_for_each_entry_safe(e, tmp, &work, list) { 5267 list_del_init(&e->list); 5268 5269 switch (e->type) { 5270 case QLA4_EVENT_AEN: 5271 iscsi_post_host_event(ha->host_no, 5272 &qla4xxx_iscsi_transport, 5273 e->u.aen.code, 5274 e->u.aen.data_size, 5275 e->u.aen.data); 5276 break; 5277 case QLA4_EVENT_PING_STATUS: 5278 iscsi_ping_comp_event(ha->host_no, 5279 &qla4xxx_iscsi_transport, 5280 e->u.ping.status, 5281 e->u.ping.pid, 5282 e->u.ping.data_size, 5283 e->u.ping.data); 5284 break; 5285 default: 5286 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " 5287 "supported", e->type); 5288 } 5289 kfree(e); 5290 } 5291 } 5292 5293 /** 5294 * qla4xxx_do_dpc - dpc routine 5295 * @work: Context to obtain pointer to host adapter structure. 5296 * 5297 * This routine is a task that is schedule by the interrupt handler 5298 * to perform the background processing for interrupts. We put it 5299 * on a task queue that is consumed whenever the scheduler runs; that's 5300 * so you can do anything (i.e. put the process to sleep etc). In fact, 5301 * the mid-level tries to sleep when it reaches the driver threshold 5302 * "host->can_queue". This can cause a panic if we were in our interrupt code. 5303 **/ 5304 static void qla4xxx_do_dpc(struct work_struct *work) 5305 { 5306 struct scsi_qla_host *ha = 5307 container_of(work, struct scsi_qla_host, dpc_work); 5308 int status = QLA_ERROR; 5309 5310 DEBUG2(ql4_printk(KERN_INFO, ha, 5311 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", 5312 ha->host_no, __func__, ha->flags, ha->dpc_flags)); 5313 5314 /* Initialization not yet finished. Don't do anything yet. */ 5315 if (!test_bit(AF_INIT_DONE, &ha->flags)) 5316 return; 5317 5318 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 5319 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 5320 ha->host_no, __func__, ha->flags)); 5321 return; 5322 } 5323 5324 /* post events to application */ 5325 qla4xxx_do_work(ha); 5326 5327 if (is_qla80XX(ha)) { 5328 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 5329 if (is_qla8032(ha) || is_qla8042(ha)) { 5330 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 5331 __func__); 5332 /* disable pause frame for ISP83xx */ 5333 qla4_83xx_disable_pause(ha); 5334 } 5335 5336 ha->isp_ops->idc_lock(ha); 5337 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 5338 QLA8XXX_DEV_FAILED); 5339 ha->isp_ops->idc_unlock(ha); 5340 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 5341 qla4_8xxx_device_state_handler(ha); 5342 } 5343 5344 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { 5345 if (is_qla8042(ha)) { 5346 if (ha->idc_info.info2 & 5347 ENABLE_INTERNAL_LOOPBACK) { 5348 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", 5349 __func__); 5350 status = qla4_84xx_config_acb(ha, 5351 ACB_CONFIG_DISABLE); 5352 if (status != QLA_SUCCESS) { 5353 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", 5354 __func__); 5355 } 5356 } 5357 } 5358 qla4_83xx_post_idc_ack(ha); 5359 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); 5360 } 5361 5362 if (is_qla8042(ha) && 5363 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { 5364 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", 5365 __func__); 5366 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != 5367 QLA_SUCCESS) { 5368 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", 5369 __func__); 5370 } 5371 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); 5372 } 5373 5374 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 5375 qla4_8xxx_need_qsnt_handler(ha); 5376 } 5377 } 5378 5379 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && 5380 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 5381 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 5382 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 5383 if ((is_qla8022(ha) && ql4xdontresethba) || 5384 ((is_qla8032(ha) || is_qla8042(ha)) && 5385 qla4_83xx_idc_dontreset(ha))) { 5386 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5387 ha->host_no, __func__)); 5388 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5389 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5390 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5391 goto dpc_post_reset_ha; 5392 } 5393 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 5394 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 5395 qla4xxx_recover_adapter(ha); 5396 5397 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 5398 uint8_t wait_time = RESET_INTR_TOV; 5399 5400 while ((readw(&ha->reg->ctrl_status) & 5401 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { 5402 if (--wait_time == 0) 5403 break; 5404 msleep(1000); 5405 } 5406 if (wait_time == 0) 5407 DEBUG2(printk("scsi%ld: %s: SR|FSR " 5408 "bit not cleared-- resetting\n", 5409 ha->host_no, __func__)); 5410 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 5411 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { 5412 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 5413 status = qla4xxx_recover_adapter(ha); 5414 } 5415 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5416 if (status == QLA_SUCCESS) 5417 ha->isp_ops->enable_intrs(ha); 5418 } 5419 } 5420 5421 dpc_post_reset_ha: 5422 /* ---- process AEN? --- */ 5423 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 5424 qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 5425 5426 /* ---- Get DHCP IP Address? --- */ 5427 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 5428 qla4xxx_get_dhcp_ip_address(ha); 5429 5430 /* ---- relogin device? --- */ 5431 if (adapter_up(ha) && 5432 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { 5433 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); 5434 } 5435 5436 /* ---- link change? --- */ 5437 if (!test_bit(AF_LOOPBACK, &ha->flags) && 5438 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 5439 if (!test_bit(AF_LINK_UP, &ha->flags)) { 5440 /* ---- link down? --- */ 5441 qla4xxx_mark_all_devices_missing(ha); 5442 } else { 5443 /* ---- link up? --- * 5444 * F/W will auto login to all devices ONLY ONCE after 5445 * link up during driver initialization and runtime 5446 * fatal error recovery. Therefore, the driver must 5447 * manually relogin to devices when recovering from 5448 * connection failures, logouts, expired KATO, etc. */ 5449 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { 5450 qla4xxx_build_ddb_list(ha, ha->is_reset); 5451 iscsi_host_for_each_session(ha->host, 5452 qla4xxx_login_flash_ddb); 5453 } else 5454 qla4xxx_relogin_all_devices(ha); 5455 } 5456 } 5457 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { 5458 if (qla4xxx_sysfs_ddb_export(ha)) 5459 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", 5460 __func__); 5461 } 5462 } 5463 5464 /** 5465 * qla4xxx_free_adapter - release the adapter 5466 * @ha: pointer to adapter structure 5467 **/ 5468 static void qla4xxx_free_adapter(struct scsi_qla_host *ha) 5469 { 5470 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 5471 5472 /* Turn-off interrupts on the card. */ 5473 ha->isp_ops->disable_intrs(ha); 5474 5475 if (is_qla40XX(ha)) { 5476 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 5477 &ha->reg->ctrl_status); 5478 readl(&ha->reg->ctrl_status); 5479 } else if (is_qla8022(ha)) { 5480 writel(0, &ha->qla4_82xx_reg->host_int); 5481 readl(&ha->qla4_82xx_reg->host_int); 5482 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5483 writel(0, &ha->qla4_83xx_reg->risc_intr); 5484 readl(&ha->qla4_83xx_reg->risc_intr); 5485 } 5486 5487 /* Remove timer thread, if present */ 5488 if (ha->timer_active) 5489 qla4xxx_stop_timer(ha); 5490 5491 /* Kill the kernel thread for this host */ 5492 if (ha->dpc_thread) 5493 destroy_workqueue(ha->dpc_thread); 5494 5495 /* Kill the kernel thread for this host */ 5496 if (ha->task_wq) 5497 destroy_workqueue(ha->task_wq); 5498 5499 /* Put firmware in known state */ 5500 ha->isp_ops->reset_firmware(ha); 5501 5502 if (is_qla80XX(ha)) { 5503 ha->isp_ops->idc_lock(ha); 5504 qla4_8xxx_clear_drv_active(ha); 5505 ha->isp_ops->idc_unlock(ha); 5506 } 5507 5508 /* Detach interrupts */ 5509 qla4xxx_free_irqs(ha); 5510 5511 /* free extra memory */ 5512 qla4xxx_mem_free(ha); 5513 } 5514 5515 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) 5516 { 5517 int status = 0; 5518 unsigned long mem_base, mem_len; 5519 struct pci_dev *pdev = ha->pdev; 5520 5521 status = pci_request_regions(pdev, DRIVER_NAME); 5522 if (status) { 5523 printk(KERN_WARNING 5524 "scsi(%ld) Failed to reserve PIO regions (%s) " 5525 "status=%d\n", ha->host_no, pci_name(pdev), status); 5526 goto iospace_error_exit; 5527 } 5528 5529 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", 5530 __func__, pdev->revision)); 5531 ha->revision_id = pdev->revision; 5532 5533 /* remap phys address */ 5534 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 5535 mem_len = pci_resource_len(pdev, 0); 5536 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", 5537 __func__, mem_base, mem_len)); 5538 5539 /* mapping of pcibase pointer */ 5540 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); 5541 if (!ha->nx_pcibase) { 5542 printk(KERN_ERR 5543 "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); 5544 pci_release_regions(ha->pdev); 5545 goto iospace_error_exit; 5546 } 5547 5548 /* Mapping of IO base pointer, door bell read and write pointer */ 5549 5550 /* mapping of IO base pointer */ 5551 if (is_qla8022(ha)) { 5552 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) 5553 ((uint8_t *)ha->nx_pcibase + 0xbc000 + 5554 (ha->pdev->devfn << 11)); 5555 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 5556 QLA82XX_CAM_RAM_DB2); 5557 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5558 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) 5559 ((uint8_t *)ha->nx_pcibase); 5560 } 5561 5562 return 0; 5563 iospace_error_exit: 5564 return -ENOMEM; 5565 } 5566 5567 /*** 5568 * qla4xxx_iospace_config - maps registers 5569 * @ha: pointer to adapter structure 5570 * 5571 * This routines maps HBA's registers from the pci address space 5572 * into the kernel virtual address space for memory mapped i/o. 5573 **/ 5574 int qla4xxx_iospace_config(struct scsi_qla_host *ha) 5575 { 5576 unsigned long pio, pio_len, pio_flags; 5577 unsigned long mmio, mmio_len, mmio_flags; 5578 5579 pio = pci_resource_start(ha->pdev, 0); 5580 pio_len = pci_resource_len(ha->pdev, 0); 5581 pio_flags = pci_resource_flags(ha->pdev, 0); 5582 if (pio_flags & IORESOURCE_IO) { 5583 if (pio_len < MIN_IOBASE_LEN) { 5584 ql4_printk(KERN_WARNING, ha, 5585 "Invalid PCI I/O region size\n"); 5586 pio = 0; 5587 } 5588 } else { 5589 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); 5590 pio = 0; 5591 } 5592 5593 /* Use MMIO operations for all accesses. */ 5594 mmio = pci_resource_start(ha->pdev, 1); 5595 mmio_len = pci_resource_len(ha->pdev, 1); 5596 mmio_flags = pci_resource_flags(ha->pdev, 1); 5597 5598 if (!(mmio_flags & IORESOURCE_MEM)) { 5599 ql4_printk(KERN_ERR, ha, 5600 "region #0 not an MMIO resource, aborting\n"); 5601 5602 goto iospace_error_exit; 5603 } 5604 5605 if (mmio_len < MIN_IOBASE_LEN) { 5606 ql4_printk(KERN_ERR, ha, 5607 "Invalid PCI mem region size, aborting\n"); 5608 goto iospace_error_exit; 5609 } 5610 5611 if (pci_request_regions(ha->pdev, DRIVER_NAME)) { 5612 ql4_printk(KERN_WARNING, ha, 5613 "Failed to reserve PIO/MMIO regions\n"); 5614 5615 goto iospace_error_exit; 5616 } 5617 5618 ha->pio_address = pio; 5619 ha->pio_length = pio_len; 5620 ha->reg = ioremap(mmio, MIN_IOBASE_LEN); 5621 if (!ha->reg) { 5622 ql4_printk(KERN_ERR, ha, 5623 "cannot remap MMIO, aborting\n"); 5624 5625 goto iospace_error_exit; 5626 } 5627 5628 return 0; 5629 5630 iospace_error_exit: 5631 return -ENOMEM; 5632 } 5633 5634 static struct isp_operations qla4xxx_isp_ops = { 5635 .iospace_config = qla4xxx_iospace_config, 5636 .pci_config = qla4xxx_pci_config, 5637 .disable_intrs = qla4xxx_disable_intrs, 5638 .enable_intrs = qla4xxx_enable_intrs, 5639 .start_firmware = qla4xxx_start_firmware, 5640 .intr_handler = qla4xxx_intr_handler, 5641 .interrupt_service_routine = qla4xxx_interrupt_service_routine, 5642 .reset_chip = qla4xxx_soft_reset, 5643 .reset_firmware = qla4xxx_hw_reset, 5644 .queue_iocb = qla4xxx_queue_iocb, 5645 .complete_iocb = qla4xxx_complete_iocb, 5646 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5647 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5648 .get_sys_info = qla4xxx_get_sys_info, 5649 .queue_mailbox_command = qla4xxx_queue_mbox_cmd, 5650 .process_mailbox_interrupt = qla4xxx_process_mbox_intr, 5651 }; 5652 5653 static struct isp_operations qla4_82xx_isp_ops = { 5654 .iospace_config = qla4_8xxx_iospace_config, 5655 .pci_config = qla4_8xxx_pci_config, 5656 .disable_intrs = qla4_82xx_disable_intrs, 5657 .enable_intrs = qla4_82xx_enable_intrs, 5658 .start_firmware = qla4_8xxx_load_risc, 5659 .restart_firmware = qla4_82xx_try_start_fw, 5660 .intr_handler = qla4_82xx_intr_handler, 5661 .interrupt_service_routine = qla4_82xx_interrupt_service_routine, 5662 .need_reset = qla4_8xxx_need_reset, 5663 .reset_chip = qla4_82xx_isp_reset, 5664 .reset_firmware = qla4_8xxx_stop_firmware, 5665 .queue_iocb = qla4_82xx_queue_iocb, 5666 .complete_iocb = qla4_82xx_complete_iocb, 5667 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, 5668 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, 5669 .get_sys_info = qla4_8xxx_get_sys_info, 5670 .rd_reg_direct = qla4_82xx_rd_32, 5671 .wr_reg_direct = qla4_82xx_wr_32, 5672 .rd_reg_indirect = qla4_82xx_md_rd_32, 5673 .wr_reg_indirect = qla4_82xx_md_wr_32, 5674 .idc_lock = qla4_82xx_idc_lock, 5675 .idc_unlock = qla4_82xx_idc_unlock, 5676 .rom_lock_recovery = qla4_82xx_rom_lock_recovery, 5677 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, 5678 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, 5679 }; 5680 5681 static struct isp_operations qla4_83xx_isp_ops = { 5682 .iospace_config = qla4_8xxx_iospace_config, 5683 .pci_config = qla4_8xxx_pci_config, 5684 .disable_intrs = qla4_83xx_disable_intrs, 5685 .enable_intrs = qla4_83xx_enable_intrs, 5686 .start_firmware = qla4_8xxx_load_risc, 5687 .restart_firmware = qla4_83xx_start_firmware, 5688 .intr_handler = qla4_83xx_intr_handler, 5689 .interrupt_service_routine = qla4_83xx_interrupt_service_routine, 5690 .need_reset = qla4_8xxx_need_reset, 5691 .reset_chip = qla4_83xx_isp_reset, 5692 .reset_firmware = qla4_8xxx_stop_firmware, 5693 .queue_iocb = qla4_83xx_queue_iocb, 5694 .complete_iocb = qla4_83xx_complete_iocb, 5695 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5696 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5697 .get_sys_info = qla4_8xxx_get_sys_info, 5698 .rd_reg_direct = qla4_83xx_rd_reg, 5699 .wr_reg_direct = qla4_83xx_wr_reg, 5700 .rd_reg_indirect = qla4_83xx_rd_reg_indirect, 5701 .wr_reg_indirect = qla4_83xx_wr_reg_indirect, 5702 .idc_lock = qla4_83xx_drv_lock, 5703 .idc_unlock = qla4_83xx_drv_unlock, 5704 .rom_lock_recovery = qla4_83xx_rom_lock_recovery, 5705 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, 5706 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, 5707 }; 5708 5709 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5710 { 5711 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 5712 } 5713 5714 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5715 { 5716 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); 5717 } 5718 5719 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5720 { 5721 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 5722 } 5723 5724 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5725 { 5726 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); 5727 } 5728 5729 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 5730 { 5731 struct scsi_qla_host *ha = data; 5732 char *str = buf; 5733 int rc; 5734 5735 switch (type) { 5736 case ISCSI_BOOT_ETH_FLAGS: 5737 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5738 break; 5739 case ISCSI_BOOT_ETH_INDEX: 5740 rc = sprintf(str, "0\n"); 5741 break; 5742 case ISCSI_BOOT_ETH_MAC: 5743 rc = sysfs_format_mac(str, ha->my_mac, 5744 MAC_ADDR_LEN); 5745 break; 5746 default: 5747 rc = -ENOSYS; 5748 break; 5749 } 5750 return rc; 5751 } 5752 5753 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) 5754 { 5755 int rc; 5756 5757 switch (type) { 5758 case ISCSI_BOOT_ETH_FLAGS: 5759 case ISCSI_BOOT_ETH_MAC: 5760 case ISCSI_BOOT_ETH_INDEX: 5761 rc = S_IRUGO; 5762 break; 5763 default: 5764 rc = 0; 5765 break; 5766 } 5767 return rc; 5768 } 5769 5770 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) 5771 { 5772 struct scsi_qla_host *ha = data; 5773 char *str = buf; 5774 int rc; 5775 5776 switch (type) { 5777 case ISCSI_BOOT_INI_INITIATOR_NAME: 5778 rc = sprintf(str, "%s\n", ha->name_string); 5779 break; 5780 default: 5781 rc = -ENOSYS; 5782 break; 5783 } 5784 return rc; 5785 } 5786 5787 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) 5788 { 5789 int rc; 5790 5791 switch (type) { 5792 case ISCSI_BOOT_INI_INITIATOR_NAME: 5793 rc = S_IRUGO; 5794 break; 5795 default: 5796 rc = 0; 5797 break; 5798 } 5799 return rc; 5800 } 5801 5802 static ssize_t 5803 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, 5804 char *buf) 5805 { 5806 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 5807 char *str = buf; 5808 int rc; 5809 5810 switch (type) { 5811 case ISCSI_BOOT_TGT_NAME: 5812 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); 5813 break; 5814 case ISCSI_BOOT_TGT_IP_ADDR: 5815 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) 5816 rc = sprintf(buf, "%pI4\n", 5817 &boot_conn->dest_ipaddr.ip_address); 5818 else 5819 rc = sprintf(str, "%pI6\n", 5820 &boot_conn->dest_ipaddr.ip_address); 5821 break; 5822 case ISCSI_BOOT_TGT_PORT: 5823 rc = sprintf(str, "%d\n", boot_conn->dest_port); 5824 break; 5825 case ISCSI_BOOT_TGT_CHAP_NAME: 5826 rc = sprintf(str, "%.*s\n", 5827 boot_conn->chap.target_chap_name_length, 5828 (char *)&boot_conn->chap.target_chap_name); 5829 break; 5830 case ISCSI_BOOT_TGT_CHAP_SECRET: 5831 rc = sprintf(str, "%.*s\n", 5832 boot_conn->chap.target_secret_length, 5833 (char *)&boot_conn->chap.target_secret); 5834 break; 5835 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5836 rc = sprintf(str, "%.*s\n", 5837 boot_conn->chap.intr_chap_name_length, 5838 (char *)&boot_conn->chap.intr_chap_name); 5839 break; 5840 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5841 rc = sprintf(str, "%.*s\n", 5842 boot_conn->chap.intr_secret_length, 5843 (char *)&boot_conn->chap.intr_secret); 5844 break; 5845 case ISCSI_BOOT_TGT_FLAGS: 5846 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5847 break; 5848 case ISCSI_BOOT_TGT_NIC_ASSOC: 5849 rc = sprintf(str, "0\n"); 5850 break; 5851 default: 5852 rc = -ENOSYS; 5853 break; 5854 } 5855 return rc; 5856 } 5857 5858 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) 5859 { 5860 struct scsi_qla_host *ha = data; 5861 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); 5862 5863 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5864 } 5865 5866 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) 5867 { 5868 struct scsi_qla_host *ha = data; 5869 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); 5870 5871 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5872 } 5873 5874 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) 5875 { 5876 int rc; 5877 5878 switch (type) { 5879 case ISCSI_BOOT_TGT_NAME: 5880 case ISCSI_BOOT_TGT_IP_ADDR: 5881 case ISCSI_BOOT_TGT_PORT: 5882 case ISCSI_BOOT_TGT_CHAP_NAME: 5883 case ISCSI_BOOT_TGT_CHAP_SECRET: 5884 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5885 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5886 case ISCSI_BOOT_TGT_NIC_ASSOC: 5887 case ISCSI_BOOT_TGT_FLAGS: 5888 rc = S_IRUGO; 5889 break; 5890 default: 5891 rc = 0; 5892 break; 5893 } 5894 return rc; 5895 } 5896 5897 static void qla4xxx_boot_release(void *data) 5898 { 5899 struct scsi_qla_host *ha = data; 5900 5901 scsi_host_put(ha->host); 5902 } 5903 5904 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) 5905 { 5906 dma_addr_t buf_dma; 5907 uint32_t addr, pri_addr, sec_addr; 5908 uint32_t offset; 5909 uint16_t func_num; 5910 uint8_t val; 5911 uint8_t *buf = NULL; 5912 size_t size = 13 * sizeof(uint8_t); 5913 int ret = QLA_SUCCESS; 5914 5915 func_num = PCI_FUNC(ha->pdev->devfn); 5916 5917 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", 5918 __func__, ha->pdev->device, func_num); 5919 5920 if (is_qla40XX(ha)) { 5921 if (func_num == 1) { 5922 addr = NVRAM_PORT0_BOOT_MODE; 5923 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; 5924 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; 5925 } else if (func_num == 3) { 5926 addr = NVRAM_PORT1_BOOT_MODE; 5927 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; 5928 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; 5929 } else { 5930 ret = QLA_ERROR; 5931 goto exit_boot_info; 5932 } 5933 5934 /* Check Boot Mode */ 5935 val = rd_nvram_byte(ha, addr); 5936 if (!(val & 0x07)) { 5937 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " 5938 "options : 0x%x\n", __func__, val)); 5939 ret = QLA_ERROR; 5940 goto exit_boot_info; 5941 } 5942 5943 /* get primary valid target index */ 5944 val = rd_nvram_byte(ha, pri_addr); 5945 if (val & BIT_7) 5946 ddb_index[0] = (val & 0x7f); 5947 5948 /* get secondary valid target index */ 5949 val = rd_nvram_byte(ha, sec_addr); 5950 if (val & BIT_7) 5951 ddb_index[1] = (val & 0x7f); 5952 goto exit_boot_info; 5953 } else if (is_qla80XX(ha)) { 5954 buf = dma_alloc_coherent(&ha->pdev->dev, size, 5955 &buf_dma, GFP_KERNEL); 5956 if (!buf) { 5957 DEBUG2(ql4_printk(KERN_ERR, ha, 5958 "%s: Unable to allocate dma buffer\n", 5959 __func__)); 5960 ret = QLA_ERROR; 5961 goto exit_boot_info; 5962 } 5963 5964 if (ha->port_num == 0) 5965 offset = BOOT_PARAM_OFFSET_PORT0; 5966 else if (ha->port_num == 1) 5967 offset = BOOT_PARAM_OFFSET_PORT1; 5968 else { 5969 ret = QLA_ERROR; 5970 goto exit_boot_info_free; 5971 } 5972 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + 5973 offset; 5974 if (qla4xxx_get_flash(ha, buf_dma, addr, 5975 13 * sizeof(uint8_t)) != QLA_SUCCESS) { 5976 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" 5977 " failed\n", ha->host_no, __func__)); 5978 ret = QLA_ERROR; 5979 goto exit_boot_info_free; 5980 } 5981 /* Check Boot Mode */ 5982 if (!(buf[1] & 0x07)) { 5983 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" 5984 " : 0x%x\n", buf[1])); 5985 ret = QLA_ERROR; 5986 goto exit_boot_info_free; 5987 } 5988 5989 /* get primary valid target index */ 5990 if (buf[2] & BIT_7) 5991 ddb_index[0] = buf[2] & 0x7f; 5992 5993 /* get secondary valid target index */ 5994 if (buf[11] & BIT_7) 5995 ddb_index[1] = buf[11] & 0x7f; 5996 } else { 5997 ret = QLA_ERROR; 5998 goto exit_boot_info; 5999 } 6000 6001 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" 6002 " target ID %d\n", __func__, ddb_index[0], 6003 ddb_index[1])); 6004 6005 exit_boot_info_free: 6006 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 6007 exit_boot_info: 6008 ha->pri_ddb_idx = ddb_index[0]; 6009 ha->sec_ddb_idx = ddb_index[1]; 6010 return ret; 6011 } 6012 6013 /** 6014 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password 6015 * @ha: pointer to adapter structure 6016 * @username: CHAP username to be returned 6017 * @password: CHAP password to be returned 6018 * 6019 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP 6020 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. 6021 * So from the CHAP cache find the first BIDI CHAP entry and set it 6022 * to the boot record in sysfs. 6023 **/ 6024 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, 6025 char *password) 6026 { 6027 int i, ret = -EINVAL; 6028 int max_chap_entries = 0; 6029 struct ql4_chap_table *chap_table; 6030 6031 if (is_qla80XX(ha)) 6032 max_chap_entries = (ha->hw.flt_chap_size / 2) / 6033 sizeof(struct ql4_chap_table); 6034 else 6035 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 6036 6037 if (!ha->chap_list) { 6038 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); 6039 return ret; 6040 } 6041 6042 mutex_lock(&ha->chap_sem); 6043 for (i = 0; i < max_chap_entries; i++) { 6044 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 6045 if (chap_table->cookie != 6046 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 6047 continue; 6048 } 6049 6050 if (chap_table->flags & BIT_7) /* local */ 6051 continue; 6052 6053 if (!(chap_table->flags & BIT_6)) /* Not BIDI */ 6054 continue; 6055 6056 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); 6057 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); 6058 ret = 0; 6059 break; 6060 } 6061 mutex_unlock(&ha->chap_sem); 6062 6063 return ret; 6064 } 6065 6066 6067 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, 6068 struct ql4_boot_session_info *boot_sess, 6069 uint16_t ddb_index) 6070 { 6071 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 6072 struct dev_db_entry *fw_ddb_entry; 6073 dma_addr_t fw_ddb_entry_dma; 6074 uint16_t idx; 6075 uint16_t options; 6076 int ret = QLA_SUCCESS; 6077 6078 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6079 &fw_ddb_entry_dma, GFP_KERNEL); 6080 if (!fw_ddb_entry) { 6081 DEBUG2(ql4_printk(KERN_ERR, ha, 6082 "%s: Unable to allocate dma buffer.\n", 6083 __func__)); 6084 ret = QLA_ERROR; 6085 return ret; 6086 } 6087 6088 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, 6089 fw_ddb_entry_dma, ddb_index)) { 6090 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " 6091 "index [%d]\n", __func__, ddb_index)); 6092 ret = QLA_ERROR; 6093 goto exit_boot_target; 6094 } 6095 6096 /* Update target name and IP from DDB */ 6097 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, 6098 min(sizeof(boot_sess->target_name), 6099 sizeof(fw_ddb_entry->iscsi_name))); 6100 6101 options = le16_to_cpu(fw_ddb_entry->options); 6102 if (options & DDB_OPT_IPV6_DEVICE) { 6103 memcpy(&boot_conn->dest_ipaddr.ip_address, 6104 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); 6105 } else { 6106 boot_conn->dest_ipaddr.ip_type = 0x1; 6107 memcpy(&boot_conn->dest_ipaddr.ip_address, 6108 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); 6109 } 6110 6111 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); 6112 6113 /* update chap information */ 6114 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 6115 6116 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6117 6118 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); 6119 6120 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. 6121 target_chap_name, 6122 (char *)&boot_conn->chap.target_secret, 6123 idx); 6124 if (ret) { 6125 ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); 6126 ret = QLA_ERROR; 6127 goto exit_boot_target; 6128 } 6129 6130 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6131 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6132 } 6133 6134 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6135 6136 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); 6137 6138 ret = qla4xxx_get_bidi_chap(ha, 6139 (char *)&boot_conn->chap.intr_chap_name, 6140 (char *)&boot_conn->chap.intr_secret); 6141 6142 if (ret) { 6143 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); 6144 ret = QLA_ERROR; 6145 goto exit_boot_target; 6146 } 6147 6148 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6149 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6150 } 6151 6152 exit_boot_target: 6153 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6154 fw_ddb_entry, fw_ddb_entry_dma); 6155 return ret; 6156 } 6157 6158 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) 6159 { 6160 uint16_t ddb_index[2]; 6161 int ret = QLA_ERROR; 6162 int rval; 6163 6164 memset(ddb_index, 0, sizeof(ddb_index)); 6165 ddb_index[0] = 0xffff; 6166 ddb_index[1] = 0xffff; 6167 ret = get_fw_boot_info(ha, ddb_index); 6168 if (ret != QLA_SUCCESS) { 6169 DEBUG2(ql4_printk(KERN_INFO, ha, 6170 "%s: No boot target configured.\n", __func__)); 6171 return ret; 6172 } 6173 6174 if (ql4xdisablesysfsboot) 6175 return QLA_SUCCESS; 6176 6177 if (ddb_index[0] == 0xffff) 6178 goto sec_target; 6179 6180 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), 6181 ddb_index[0]); 6182 if (rval != QLA_SUCCESS) { 6183 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " 6184 "configured\n", __func__)); 6185 } else 6186 ret = QLA_SUCCESS; 6187 6188 sec_target: 6189 if (ddb_index[1] == 0xffff) 6190 goto exit_get_boot_info; 6191 6192 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), 6193 ddb_index[1]); 6194 if (rval != QLA_SUCCESS) { 6195 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" 6196 " configured\n", __func__)); 6197 } else 6198 ret = QLA_SUCCESS; 6199 6200 exit_get_boot_info: 6201 return ret; 6202 } 6203 6204 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) 6205 { 6206 struct iscsi_boot_kobj *boot_kobj; 6207 6208 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) 6209 return QLA_ERROR; 6210 6211 if (ql4xdisablesysfsboot) { 6212 ql4_printk(KERN_INFO, ha, 6213 "%s: syfsboot disabled - driver will trigger login " 6214 "and publish session for discovery .\n", __func__); 6215 return QLA_SUCCESS; 6216 } 6217 6218 6219 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); 6220 if (!ha->boot_kset) 6221 goto kset_free; 6222 6223 if (!scsi_host_get(ha->host)) 6224 goto kset_free; 6225 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, 6226 qla4xxx_show_boot_tgt_pri_info, 6227 qla4xxx_tgt_get_attr_visibility, 6228 qla4xxx_boot_release); 6229 if (!boot_kobj) 6230 goto put_host; 6231 6232 if (!scsi_host_get(ha->host)) 6233 goto kset_free; 6234 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, 6235 qla4xxx_show_boot_tgt_sec_info, 6236 qla4xxx_tgt_get_attr_visibility, 6237 qla4xxx_boot_release); 6238 if (!boot_kobj) 6239 goto put_host; 6240 6241 if (!scsi_host_get(ha->host)) 6242 goto kset_free; 6243 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, 6244 qla4xxx_show_boot_ini_info, 6245 qla4xxx_ini_get_attr_visibility, 6246 qla4xxx_boot_release); 6247 if (!boot_kobj) 6248 goto put_host; 6249 6250 if (!scsi_host_get(ha->host)) 6251 goto kset_free; 6252 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, 6253 qla4xxx_show_boot_eth_info, 6254 qla4xxx_eth_get_attr_visibility, 6255 qla4xxx_boot_release); 6256 if (!boot_kobj) 6257 goto put_host; 6258 6259 return QLA_SUCCESS; 6260 6261 put_host: 6262 scsi_host_put(ha->host); 6263 kset_free: 6264 iscsi_boot_destroy_kset(ha->boot_kset); 6265 return -ENOMEM; 6266 } 6267 6268 6269 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, 6270 struct ql4_tuple_ddb *tddb) 6271 { 6272 struct iscsi_cls_session *cls_sess; 6273 struct iscsi_cls_conn *cls_conn; 6274 struct iscsi_session *sess; 6275 struct iscsi_conn *conn; 6276 6277 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 6278 cls_sess = ddb_entry->sess; 6279 sess = cls_sess->dd_data; 6280 cls_conn = ddb_entry->conn; 6281 conn = cls_conn->dd_data; 6282 6283 tddb->tpgt = sess->tpgt; 6284 tddb->port = conn->persistent_port; 6285 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); 6286 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); 6287 } 6288 6289 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, 6290 struct ql4_tuple_ddb *tddb, 6291 uint8_t *flash_isid) 6292 { 6293 uint16_t options = 0; 6294 6295 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 6296 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 6297 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); 6298 6299 options = le16_to_cpu(fw_ddb_entry->options); 6300 if (options & DDB_OPT_IPV6_DEVICE) 6301 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); 6302 else 6303 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 6304 6305 tddb->port = le16_to_cpu(fw_ddb_entry->port); 6306 6307 if (flash_isid == NULL) 6308 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], 6309 sizeof(tddb->isid)); 6310 else 6311 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); 6312 } 6313 6314 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 6315 struct ql4_tuple_ddb *old_tddb, 6316 struct ql4_tuple_ddb *new_tddb, 6317 uint8_t is_isid_compare) 6318 { 6319 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6320 return QLA_ERROR; 6321 6322 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) 6323 return QLA_ERROR; 6324 6325 if (old_tddb->port != new_tddb->port) 6326 return QLA_ERROR; 6327 6328 /* For multi sessions, driver generates the ISID, so do not compare 6329 * ISID in reset path since it would be a comparison between the 6330 * driver generated ISID and firmware generated ISID. This could 6331 * lead to adding duplicated DDBs in the list as driver generated 6332 * ISID would not match firmware generated ISID. 6333 */ 6334 if (is_isid_compare) { 6335 DEBUG2(ql4_printk(KERN_INFO, ha, 6336 "%s: old ISID [%pmR] New ISID [%pmR]\n", 6337 __func__, old_tddb->isid, new_tddb->isid)); 6338 6339 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6340 sizeof(old_tddb->isid))) 6341 return QLA_ERROR; 6342 } 6343 6344 DEBUG2(ql4_printk(KERN_INFO, ha, 6345 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", 6346 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, 6347 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, 6348 new_tddb->ip_addr, new_tddb->iscsi_name)); 6349 6350 return QLA_SUCCESS; 6351 } 6352 6353 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, 6354 struct dev_db_entry *fw_ddb_entry, 6355 uint32_t *index) 6356 { 6357 struct ddb_entry *ddb_entry; 6358 struct ql4_tuple_ddb *fw_tddb = NULL; 6359 struct ql4_tuple_ddb *tmp_tddb = NULL; 6360 int idx; 6361 int ret = QLA_ERROR; 6362 6363 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6364 if (!fw_tddb) { 6365 DEBUG2(ql4_printk(KERN_WARNING, ha, 6366 "Memory Allocation failed.\n")); 6367 ret = QLA_SUCCESS; 6368 goto exit_check; 6369 } 6370 6371 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6372 if (!tmp_tddb) { 6373 DEBUG2(ql4_printk(KERN_WARNING, ha, 6374 "Memory Allocation failed.\n")); 6375 ret = QLA_SUCCESS; 6376 goto exit_check; 6377 } 6378 6379 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6380 6381 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 6382 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 6383 if (ddb_entry == NULL) 6384 continue; 6385 6386 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 6387 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { 6388 ret = QLA_SUCCESS; /* found */ 6389 if (index != NULL) 6390 *index = idx; 6391 goto exit_check; 6392 } 6393 } 6394 6395 exit_check: 6396 vfree(fw_tddb); 6397 vfree(tmp_tddb); 6398 return ret; 6399 } 6400 6401 /** 6402 * qla4xxx_check_existing_isid - check if target with same isid exist 6403 * in target list 6404 * @list_nt: list of target 6405 * @isid: isid to check 6406 * 6407 * This routine return QLA_SUCCESS if target with same isid exist 6408 **/ 6409 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) 6410 { 6411 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6412 struct dev_db_entry *fw_ddb_entry; 6413 6414 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6415 fw_ddb_entry = &nt_ddb_idx->fw_ddb; 6416 6417 if (memcmp(&fw_ddb_entry->isid[0], &isid[0], 6418 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { 6419 return QLA_SUCCESS; 6420 } 6421 } 6422 return QLA_ERROR; 6423 } 6424 6425 /** 6426 * qla4xxx_update_isid - compare ddbs and updated isid 6427 * @ha: Pointer to host adapter structure. 6428 * @list_nt: list of nt target 6429 * @fw_ddb_entry: firmware ddb entry 6430 * 6431 * This routine update isid if ddbs have same iqn, same isid and 6432 * different IP addr. 6433 * Return QLA_SUCCESS if isid is updated. 6434 **/ 6435 static int qla4xxx_update_isid(struct scsi_qla_host *ha, 6436 struct list_head *list_nt, 6437 struct dev_db_entry *fw_ddb_entry) 6438 { 6439 uint8_t base_value, i; 6440 6441 base_value = fw_ddb_entry->isid[1] & 0x1f; 6442 for (i = 0; i < 8; i++) { 6443 fw_ddb_entry->isid[1] = (base_value | (i << 5)); 6444 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6445 break; 6446 } 6447 6448 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6449 return QLA_ERROR; 6450 6451 return QLA_SUCCESS; 6452 } 6453 6454 /** 6455 * qla4xxx_should_update_isid - check if isid need to update 6456 * @ha: Pointer to host adapter structure. 6457 * @old_tddb: ddb tuple 6458 * @new_tddb: ddb tuple 6459 * 6460 * Return QLA_SUCCESS if different IP, different PORT, same iqn, 6461 * same isid 6462 **/ 6463 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, 6464 struct ql4_tuple_ddb *old_tddb, 6465 struct ql4_tuple_ddb *new_tddb) 6466 { 6467 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { 6468 /* Same ip */ 6469 if (old_tddb->port == new_tddb->port) 6470 return QLA_ERROR; 6471 } 6472 6473 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6474 /* different iqn */ 6475 return QLA_ERROR; 6476 6477 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6478 sizeof(old_tddb->isid))) 6479 /* different isid */ 6480 return QLA_ERROR; 6481 6482 return QLA_SUCCESS; 6483 } 6484 6485 /** 6486 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt 6487 * @ha: Pointer to host adapter structure. 6488 * @list_nt: list of nt target. 6489 * @fw_ddb_entry: firmware ddb entry. 6490 * 6491 * This routine check if fw_ddb_entry already exists in list_nt to avoid 6492 * duplicate ddb in list_nt. 6493 * Return QLA_SUCCESS if duplicate ddb exit in list_nl. 6494 * Note: This function also update isid of DDB if required. 6495 **/ 6496 6497 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, 6498 struct list_head *list_nt, 6499 struct dev_db_entry *fw_ddb_entry) 6500 { 6501 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6502 struct ql4_tuple_ddb *fw_tddb = NULL; 6503 struct ql4_tuple_ddb *tmp_tddb = NULL; 6504 int rval, ret = QLA_ERROR; 6505 6506 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6507 if (!fw_tddb) { 6508 DEBUG2(ql4_printk(KERN_WARNING, ha, 6509 "Memory Allocation failed.\n")); 6510 ret = QLA_SUCCESS; 6511 goto exit_check; 6512 } 6513 6514 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6515 if (!tmp_tddb) { 6516 DEBUG2(ql4_printk(KERN_WARNING, ha, 6517 "Memory Allocation failed.\n")); 6518 ret = QLA_SUCCESS; 6519 goto exit_check; 6520 } 6521 6522 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6523 6524 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6525 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, 6526 nt_ddb_idx->flash_isid); 6527 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); 6528 /* found duplicate ddb */ 6529 if (ret == QLA_SUCCESS) 6530 goto exit_check; 6531 } 6532 6533 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6534 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); 6535 6536 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); 6537 if (ret == QLA_SUCCESS) { 6538 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); 6539 if (rval == QLA_SUCCESS) 6540 ret = QLA_ERROR; 6541 else 6542 ret = QLA_SUCCESS; 6543 6544 goto exit_check; 6545 } 6546 } 6547 6548 exit_check: 6549 vfree(fw_tddb); 6550 vfree(tmp_tddb); 6551 return ret; 6552 } 6553 6554 static void qla4xxx_free_ddb_list(struct list_head *list_ddb) 6555 { 6556 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6557 6558 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6559 list_del_init(&ddb_idx->list); 6560 vfree(ddb_idx); 6561 } 6562 } 6563 6564 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 6565 struct dev_db_entry *fw_ddb_entry) 6566 { 6567 struct iscsi_endpoint *ep; 6568 struct sockaddr_in *addr; 6569 struct sockaddr_in6 *addr6; 6570 struct sockaddr *t_addr; 6571 struct sockaddr_storage *dst_addr; 6572 char *ip; 6573 6574 /* TODO: need to destroy on unload iscsi_endpoint*/ 6575 dst_addr = vmalloc(sizeof(*dst_addr)); 6576 if (!dst_addr) 6577 return NULL; 6578 6579 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { 6580 t_addr = (struct sockaddr *)dst_addr; 6581 t_addr->sa_family = AF_INET6; 6582 addr6 = (struct sockaddr_in6 *)dst_addr; 6583 ip = (char *)&addr6->sin6_addr; 6584 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 6585 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6586 6587 } else { 6588 t_addr = (struct sockaddr *)dst_addr; 6589 t_addr->sa_family = AF_INET; 6590 addr = (struct sockaddr_in *)dst_addr; 6591 ip = (char *)&addr->sin_addr; 6592 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); 6593 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6594 } 6595 6596 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); 6597 vfree(dst_addr); 6598 return ep; 6599 } 6600 6601 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) 6602 { 6603 if (ql4xdisablesysfsboot) 6604 return QLA_SUCCESS; 6605 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) 6606 return QLA_ERROR; 6607 return QLA_SUCCESS; 6608 } 6609 6610 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 6611 struct ddb_entry *ddb_entry, 6612 uint16_t idx) 6613 { 6614 uint16_t def_timeout; 6615 6616 ddb_entry->ddb_type = FLASH_DDB; 6617 ddb_entry->fw_ddb_index = INVALID_ENTRY; 6618 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 6619 ddb_entry->ha = ha; 6620 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; 6621 ddb_entry->ddb_change = qla4xxx_flash_ddb_change; 6622 ddb_entry->chap_tbl_idx = INVALID_ENTRY; 6623 6624 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 6625 atomic_set(&ddb_entry->relogin_timer, 0); 6626 atomic_set(&ddb_entry->relogin_retry_count, 0); 6627 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 6628 ddb_entry->default_relogin_timeout = 6629 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? 6630 def_timeout : LOGIN_TOV; 6631 ddb_entry->default_time2wait = 6632 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 6633 6634 if (ql4xdisablesysfsboot && 6635 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) 6636 set_bit(DF_BOOT_TGT, &ddb_entry->flags); 6637 } 6638 6639 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) 6640 { 6641 uint32_t idx = 0; 6642 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ 6643 uint32_t sts[MBOX_REG_COUNT]; 6644 uint32_t ip_state; 6645 unsigned long wtime; 6646 int ret; 6647 6648 wtime = jiffies + (HZ * IP_CONFIG_TOV); 6649 do { 6650 for (idx = 0; idx < IP_ADDR_COUNT; idx++) { 6651 if (ip_idx[idx] == -1) 6652 continue; 6653 6654 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); 6655 6656 if (ret == QLA_ERROR) { 6657 ip_idx[idx] = -1; 6658 continue; 6659 } 6660 6661 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; 6662 6663 DEBUG2(ql4_printk(KERN_INFO, ha, 6664 "Waiting for IP state for idx = %d, state = 0x%x\n", 6665 ip_idx[idx], ip_state)); 6666 if (ip_state == IP_ADDRSTATE_UNCONFIGURED || 6667 ip_state == IP_ADDRSTATE_INVALID || 6668 ip_state == IP_ADDRSTATE_PREFERRED || 6669 ip_state == IP_ADDRSTATE_DEPRICATED || 6670 ip_state == IP_ADDRSTATE_DISABLING) 6671 ip_idx[idx] = -1; 6672 } 6673 6674 /* Break if all IP states checked */ 6675 if ((ip_idx[0] == -1) && 6676 (ip_idx[1] == -1) && 6677 (ip_idx[2] == -1) && 6678 (ip_idx[3] == -1)) 6679 break; 6680 schedule_timeout_uninterruptible(HZ); 6681 } while (time_after(wtime, jiffies)); 6682 } 6683 6684 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, 6685 struct dev_db_entry *flash_ddb_entry) 6686 { 6687 uint16_t options = 0; 6688 size_t ip_len = IP_ADDR_LEN; 6689 6690 options = le16_to_cpu(fw_ddb_entry->options); 6691 if (options & DDB_OPT_IPV6_DEVICE) 6692 ip_len = IPv6_ADDR_LEN; 6693 6694 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) 6695 return QLA_ERROR; 6696 6697 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], 6698 sizeof(fw_ddb_entry->isid))) 6699 return QLA_ERROR; 6700 6701 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, 6702 sizeof(fw_ddb_entry->port))) 6703 return QLA_ERROR; 6704 6705 return QLA_SUCCESS; 6706 } 6707 6708 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, 6709 struct dev_db_entry *fw_ddb_entry, 6710 uint32_t fw_idx, uint32_t *flash_index) 6711 { 6712 struct dev_db_entry *flash_ddb_entry; 6713 dma_addr_t flash_ddb_entry_dma; 6714 uint32_t idx = 0; 6715 int max_ddbs; 6716 int ret = QLA_ERROR, status; 6717 6718 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6719 MAX_DEV_DB_ENTRIES; 6720 6721 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6722 &flash_ddb_entry_dma); 6723 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { 6724 ql4_printk(KERN_ERR, ha, "Out of memory\n"); 6725 goto exit_find_st_idx; 6726 } 6727 6728 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6729 flash_ddb_entry_dma, fw_idx); 6730 if (status == QLA_SUCCESS) { 6731 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6732 if (status == QLA_SUCCESS) { 6733 *flash_index = fw_idx; 6734 ret = QLA_SUCCESS; 6735 goto exit_find_st_idx; 6736 } 6737 } 6738 6739 for (idx = 0; idx < max_ddbs; idx++) { 6740 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6741 flash_ddb_entry_dma, idx); 6742 if (status == QLA_ERROR) 6743 continue; 6744 6745 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6746 if (status == QLA_SUCCESS) { 6747 *flash_index = idx; 6748 ret = QLA_SUCCESS; 6749 goto exit_find_st_idx; 6750 } 6751 } 6752 6753 if (idx == max_ddbs) 6754 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", 6755 fw_idx); 6756 6757 exit_find_st_idx: 6758 if (flash_ddb_entry) 6759 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, 6760 flash_ddb_entry_dma); 6761 6762 return ret; 6763 } 6764 6765 static void qla4xxx_build_st_list(struct scsi_qla_host *ha, 6766 struct list_head *list_st) 6767 { 6768 struct qla_ddb_index *st_ddb_idx; 6769 int max_ddbs; 6770 int fw_idx_size; 6771 struct dev_db_entry *fw_ddb_entry; 6772 dma_addr_t fw_ddb_dma; 6773 int ret; 6774 uint32_t idx = 0, next_idx = 0; 6775 uint32_t state = 0, conn_err = 0; 6776 uint32_t flash_index = -1; 6777 uint16_t conn_id = 0; 6778 6779 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6780 &fw_ddb_dma); 6781 if (fw_ddb_entry == NULL) { 6782 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6783 goto exit_st_list; 6784 } 6785 6786 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6787 MAX_DEV_DB_ENTRIES; 6788 fw_idx_size = sizeof(struct qla_ddb_index); 6789 6790 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6791 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 6792 NULL, &next_idx, &state, 6793 &conn_err, NULL, &conn_id); 6794 if (ret == QLA_ERROR) 6795 break; 6796 6797 /* Ignore DDB if invalid state (unassigned) */ 6798 if (state == DDB_DS_UNASSIGNED) 6799 goto continue_next_st; 6800 6801 /* Check if ST, add to the list_st */ 6802 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 6803 goto continue_next_st; 6804 6805 st_ddb_idx = vzalloc(fw_idx_size); 6806 if (!st_ddb_idx) 6807 break; 6808 6809 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, 6810 &flash_index); 6811 if (ret == QLA_ERROR) { 6812 ql4_printk(KERN_ERR, ha, 6813 "No flash entry for ST at idx [%d]\n", idx); 6814 st_ddb_idx->flash_ddb_idx = idx; 6815 } else { 6816 ql4_printk(KERN_INFO, ha, 6817 "ST at idx [%d] is stored at flash [%d]\n", 6818 idx, flash_index); 6819 st_ddb_idx->flash_ddb_idx = flash_index; 6820 } 6821 6822 st_ddb_idx->fw_ddb_idx = idx; 6823 6824 list_add_tail(&st_ddb_idx->list, list_st); 6825 continue_next_st: 6826 if (next_idx == 0) 6827 break; 6828 } 6829 6830 exit_st_list: 6831 if (fw_ddb_entry) 6832 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 6833 } 6834 6835 /** 6836 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list 6837 * @ha: pointer to adapter structure 6838 * @list_ddb: List from which failed ddb to be removed 6839 * 6840 * Iterate over the list of DDBs and find and remove DDBs that are either in 6841 * no connection active state or failed state 6842 **/ 6843 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, 6844 struct list_head *list_ddb) 6845 { 6846 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6847 uint32_t next_idx = 0; 6848 uint32_t state = 0, conn_err = 0; 6849 int ret; 6850 6851 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6852 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, 6853 NULL, 0, NULL, &next_idx, &state, 6854 &conn_err, NULL, NULL); 6855 if (ret == QLA_ERROR) 6856 continue; 6857 6858 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 6859 state == DDB_DS_SESSION_FAILED) { 6860 list_del_init(&ddb_idx->list); 6861 vfree(ddb_idx); 6862 } 6863 } 6864 } 6865 6866 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, 6867 struct ddb_entry *ddb_entry, 6868 struct dev_db_entry *fw_ddb_entry) 6869 { 6870 struct iscsi_cls_session *cls_sess; 6871 struct iscsi_session *sess; 6872 uint32_t max_ddbs = 0; 6873 uint16_t ddb_link = -1; 6874 6875 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6876 MAX_DEV_DB_ENTRIES; 6877 6878 cls_sess = ddb_entry->sess; 6879 sess = cls_sess->dd_data; 6880 6881 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6882 if (ddb_link < max_ddbs) 6883 sess->discovery_parent_idx = ddb_link; 6884 else 6885 sess->discovery_parent_idx = DDB_NO_LINK; 6886 } 6887 6888 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, 6889 struct dev_db_entry *fw_ddb_entry, 6890 int is_reset, uint16_t idx) 6891 { 6892 struct iscsi_cls_session *cls_sess; 6893 struct iscsi_session *sess; 6894 struct iscsi_cls_conn *cls_conn; 6895 struct iscsi_endpoint *ep; 6896 uint16_t cmds_max = 32; 6897 uint16_t conn_id = 0; 6898 uint32_t initial_cmdsn = 0; 6899 int ret = QLA_SUCCESS; 6900 6901 struct ddb_entry *ddb_entry = NULL; 6902 6903 /* Create session object, with INVALID_ENTRY, 6904 * the targer_id would get set when we issue the login 6905 */ 6906 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, 6907 cmds_max, sizeof(struct ddb_entry), 6908 sizeof(struct ql4_task_data), 6909 initial_cmdsn, INVALID_ENTRY); 6910 if (!cls_sess) { 6911 ret = QLA_ERROR; 6912 goto exit_setup; 6913 } 6914 6915 /* 6916 * so calling module_put function to decrement the 6917 * reference count. 6918 **/ 6919 module_put(qla4xxx_iscsi_transport.owner); 6920 sess = cls_sess->dd_data; 6921 ddb_entry = sess->dd_data; 6922 ddb_entry->sess = cls_sess; 6923 6924 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 6925 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, 6926 sizeof(struct dev_db_entry)); 6927 6928 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); 6929 6930 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); 6931 6932 if (!cls_conn) { 6933 ret = QLA_ERROR; 6934 goto exit_setup; 6935 } 6936 6937 ddb_entry->conn = cls_conn; 6938 6939 /* Setup ep, for displaying attributes in sysfs */ 6940 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); 6941 if (ep) { 6942 ep->conn = cls_conn; 6943 cls_conn->ep = ep; 6944 } else { 6945 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); 6946 ret = QLA_ERROR; 6947 goto exit_setup; 6948 } 6949 6950 /* Update sess/conn params */ 6951 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 6952 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); 6953 6954 if (is_reset == RESET_ADAPTER) { 6955 iscsi_block_session(cls_sess); 6956 /* Use the relogin path to discover new devices 6957 * by short-circuiting the logic of setting 6958 * timer to relogin - instead set the flags 6959 * to initiate login right away. 6960 */ 6961 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 6962 set_bit(DF_RELOGIN, &ddb_entry->flags); 6963 } 6964 6965 exit_setup: 6966 return ret; 6967 } 6968 6969 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, 6970 struct list_head *list_ddb, 6971 struct dev_db_entry *fw_ddb_entry) 6972 { 6973 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6974 uint16_t ddb_link; 6975 6976 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6977 6978 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6979 if (ddb_idx->fw_ddb_idx == ddb_link) { 6980 DEBUG2(ql4_printk(KERN_INFO, ha, 6981 "Updating NT parent idx from [%d] to [%d]\n", 6982 ddb_link, ddb_idx->flash_ddb_idx)); 6983 fw_ddb_entry->ddb_link = 6984 cpu_to_le16(ddb_idx->flash_ddb_idx); 6985 return; 6986 } 6987 } 6988 } 6989 6990 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, 6991 struct list_head *list_nt, 6992 struct list_head *list_st, 6993 int is_reset) 6994 { 6995 struct dev_db_entry *fw_ddb_entry; 6996 struct ddb_entry *ddb_entry = NULL; 6997 dma_addr_t fw_ddb_dma; 6998 int max_ddbs; 6999 int fw_idx_size; 7000 int ret; 7001 uint32_t idx = 0, next_idx = 0; 7002 uint32_t state = 0, conn_err = 0; 7003 uint32_t ddb_idx = -1; 7004 uint16_t conn_id = 0; 7005 uint16_t ddb_link = -1; 7006 struct qla_ddb_index *nt_ddb_idx; 7007 7008 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7009 &fw_ddb_dma); 7010 if (fw_ddb_entry == NULL) { 7011 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7012 goto exit_nt_list; 7013 } 7014 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7015 MAX_DEV_DB_ENTRIES; 7016 fw_idx_size = sizeof(struct qla_ddb_index); 7017 7018 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7019 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7020 NULL, &next_idx, &state, 7021 &conn_err, NULL, &conn_id); 7022 if (ret == QLA_ERROR) 7023 break; 7024 7025 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) 7026 goto continue_next_nt; 7027 7028 /* Check if NT, then add to list it */ 7029 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 7030 goto continue_next_nt; 7031 7032 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 7033 if (ddb_link < max_ddbs) 7034 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); 7035 7036 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || 7037 state == DDB_DS_SESSION_FAILED) && 7038 (is_reset == INIT_ADAPTER)) 7039 goto continue_next_nt; 7040 7041 DEBUG2(ql4_printk(KERN_INFO, ha, 7042 "Adding DDB to session = 0x%x\n", idx)); 7043 7044 if (is_reset == INIT_ADAPTER) { 7045 nt_ddb_idx = vmalloc(fw_idx_size); 7046 if (!nt_ddb_idx) 7047 break; 7048 7049 nt_ddb_idx->fw_ddb_idx = idx; 7050 7051 /* Copy original isid as it may get updated in function 7052 * qla4xxx_update_isid(). We need original isid in 7053 * function qla4xxx_compare_tuple_ddb to find duplicate 7054 * target */ 7055 memcpy(&nt_ddb_idx->flash_isid[0], 7056 &fw_ddb_entry->isid[0], 7057 sizeof(nt_ddb_idx->flash_isid)); 7058 7059 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, 7060 fw_ddb_entry); 7061 if (ret == QLA_SUCCESS) { 7062 /* free nt_ddb_idx and do not add to list_nt */ 7063 vfree(nt_ddb_idx); 7064 goto continue_next_nt; 7065 } 7066 7067 /* Copy updated isid */ 7068 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, 7069 sizeof(struct dev_db_entry)); 7070 7071 list_add_tail(&nt_ddb_idx->list, list_nt); 7072 } else if (is_reset == RESET_ADAPTER) { 7073 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, 7074 &ddb_idx); 7075 if (ret == QLA_SUCCESS) { 7076 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 7077 ddb_idx); 7078 if (ddb_entry != NULL) 7079 qla4xxx_update_sess_disc_idx(ha, 7080 ddb_entry, 7081 fw_ddb_entry); 7082 goto continue_next_nt; 7083 } 7084 } 7085 7086 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); 7087 if (ret == QLA_ERROR) 7088 goto exit_nt_list; 7089 7090 continue_next_nt: 7091 if (next_idx == 0) 7092 break; 7093 } 7094 7095 exit_nt_list: 7096 if (fw_ddb_entry) 7097 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7098 } 7099 7100 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, 7101 struct list_head *list_nt, 7102 uint16_t target_id) 7103 { 7104 struct dev_db_entry *fw_ddb_entry; 7105 dma_addr_t fw_ddb_dma; 7106 int max_ddbs; 7107 int fw_idx_size; 7108 int ret; 7109 uint32_t idx = 0, next_idx = 0; 7110 uint32_t state = 0, conn_err = 0; 7111 uint16_t conn_id = 0; 7112 struct qla_ddb_index *nt_ddb_idx; 7113 7114 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7115 &fw_ddb_dma); 7116 if (fw_ddb_entry == NULL) { 7117 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7118 goto exit_new_nt_list; 7119 } 7120 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7121 MAX_DEV_DB_ENTRIES; 7122 fw_idx_size = sizeof(struct qla_ddb_index); 7123 7124 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7125 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7126 NULL, &next_idx, &state, 7127 &conn_err, NULL, &conn_id); 7128 if (ret == QLA_ERROR) 7129 break; 7130 7131 /* Check if NT, then add it to list */ 7132 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7133 goto continue_next_new_nt; 7134 7135 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) 7136 goto continue_next_new_nt; 7137 7138 DEBUG2(ql4_printk(KERN_INFO, ha, 7139 "Adding DDB to session = 0x%x\n", idx)); 7140 7141 nt_ddb_idx = vmalloc(fw_idx_size); 7142 if (!nt_ddb_idx) 7143 break; 7144 7145 nt_ddb_idx->fw_ddb_idx = idx; 7146 7147 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7148 if (ret == QLA_SUCCESS) { 7149 /* free nt_ddb_idx and do not add to list_nt */ 7150 vfree(nt_ddb_idx); 7151 goto continue_next_new_nt; 7152 } 7153 7154 if (target_id < max_ddbs) 7155 fw_ddb_entry->ddb_link = cpu_to_le16(target_id); 7156 7157 list_add_tail(&nt_ddb_idx->list, list_nt); 7158 7159 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7160 idx); 7161 if (ret == QLA_ERROR) 7162 goto exit_new_nt_list; 7163 7164 continue_next_new_nt: 7165 if (next_idx == 0) 7166 break; 7167 } 7168 7169 exit_new_nt_list: 7170 if (fw_ddb_entry) 7171 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7172 } 7173 7174 /** 7175 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry 7176 * @dev: dev associated with the sysfs entry 7177 * @data: pointer to flashnode session object 7178 * 7179 * Returns: 7180 * 1: if flashnode entry is non-persistent 7181 * 0: if flashnode entry is persistent 7182 **/ 7183 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) 7184 { 7185 struct iscsi_bus_flash_session *fnode_sess; 7186 7187 if (!iscsi_flashnode_bus_match(dev, NULL)) 7188 return 0; 7189 7190 fnode_sess = iscsi_dev_to_flash_session(dev); 7191 7192 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); 7193 } 7194 7195 /** 7196 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target 7197 * @ha: pointer to host 7198 * @fw_ddb_entry: flash ddb data 7199 * @idx: target index 7200 * @user: if set then this call is made from userland else from kernel 7201 * 7202 * Returns: 7203 * On sucess: QLA_SUCCESS 7204 * On failure: QLA_ERROR 7205 * 7206 * This create separate sysfs entries for session and connection attributes of 7207 * the given fw ddb entry. 7208 * If this is invoked as a result of a userspace call then the entry is marked 7209 * as nonpersistent using flash_state field. 7210 **/ 7211 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 7212 struct dev_db_entry *fw_ddb_entry, 7213 uint16_t *idx, int user) 7214 { 7215 struct iscsi_bus_flash_session *fnode_sess = NULL; 7216 struct iscsi_bus_flash_conn *fnode_conn = NULL; 7217 int rc = QLA_ERROR; 7218 7219 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, 7220 &qla4xxx_iscsi_transport, 0); 7221 if (!fnode_sess) { 7222 ql4_printk(KERN_ERR, ha, 7223 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", 7224 __func__, *idx, ha->host_no); 7225 goto exit_tgt_create; 7226 } 7227 7228 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, 7229 &qla4xxx_iscsi_transport, 0); 7230 if (!fnode_conn) { 7231 ql4_printk(KERN_ERR, ha, 7232 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", 7233 __func__, *idx, ha->host_no); 7234 goto free_sess; 7235 } 7236 7237 if (user) { 7238 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; 7239 } else { 7240 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7241 7242 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) 7243 fnode_sess->is_boot_target = 1; 7244 else 7245 fnode_sess->is_boot_target = 0; 7246 } 7247 7248 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7249 fw_ddb_entry); 7250 if (rc) 7251 goto free_sess; 7252 7253 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7254 __func__, fnode_sess->dev.kobj.name); 7255 7256 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7257 __func__, fnode_conn->dev.kobj.name); 7258 7259 return QLA_SUCCESS; 7260 7261 free_sess: 7262 iscsi_destroy_flashnode_sess(fnode_sess); 7263 7264 exit_tgt_create: 7265 return QLA_ERROR; 7266 } 7267 7268 /** 7269 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash 7270 * @shost: pointer to host 7271 * @buf: type of ddb entry (ipv4/ipv6) 7272 * @len: length of buf 7273 * 7274 * This creates new ddb entry in the flash by finding first free index and 7275 * storing default ddb there. And then create sysfs entry for the new ddb entry. 7276 **/ 7277 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 7278 int len) 7279 { 7280 struct scsi_qla_host *ha = to_qla_host(shost); 7281 struct dev_db_entry *fw_ddb_entry = NULL; 7282 dma_addr_t fw_ddb_entry_dma; 7283 struct device *dev; 7284 uint16_t idx = 0; 7285 uint16_t max_ddbs = 0; 7286 uint32_t options = 0; 7287 uint32_t rval = QLA_ERROR; 7288 7289 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && 7290 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { 7291 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", 7292 __func__)); 7293 goto exit_ddb_add; 7294 } 7295 7296 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 7297 MAX_DEV_DB_ENTRIES; 7298 7299 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7300 &fw_ddb_entry_dma, GFP_KERNEL); 7301 if (!fw_ddb_entry) { 7302 DEBUG2(ql4_printk(KERN_ERR, ha, 7303 "%s: Unable to allocate dma buffer\n", 7304 __func__)); 7305 goto exit_ddb_add; 7306 } 7307 7308 dev = iscsi_find_flashnode_sess(ha->host, NULL, 7309 qla4xxx_sysfs_ddb_is_non_persistent); 7310 if (dev) { 7311 ql4_printk(KERN_ERR, ha, 7312 "%s: A non-persistent entry %s found\n", 7313 __func__, dev->kobj.name); 7314 put_device(dev); 7315 goto exit_ddb_add; 7316 } 7317 7318 /* Index 0 and 1 are reserved for boot target entries */ 7319 for (idx = 2; idx < max_ddbs; idx++) { 7320 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, 7321 fw_ddb_entry_dma, idx)) 7322 break; 7323 } 7324 7325 if (idx == max_ddbs) 7326 goto exit_ddb_add; 7327 7328 if (!strncasecmp("ipv6", buf, 4)) 7329 options |= IPV6_DEFAULT_DDB_ENTRY; 7330 7331 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7332 if (rval == QLA_ERROR) 7333 goto exit_ddb_add; 7334 7335 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); 7336 7337 exit_ddb_add: 7338 if (fw_ddb_entry) 7339 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7340 fw_ddb_entry, fw_ddb_entry_dma); 7341 if (rval == QLA_SUCCESS) 7342 return idx; 7343 else 7344 return -EIO; 7345 } 7346 7347 /** 7348 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash 7349 * @fnode_sess: pointer to session attrs of flash ddb entry 7350 * @fnode_conn: pointer to connection attrs of flash ddb entry 7351 * 7352 * This writes the contents of target ddb buffer to Flash with a valid cookie 7353 * value in order to make the ddb entry persistent. 7354 **/ 7355 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, 7356 struct iscsi_bus_flash_conn *fnode_conn) 7357 { 7358 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7359 struct scsi_qla_host *ha = to_qla_host(shost); 7360 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; 7361 struct dev_db_entry *fw_ddb_entry = NULL; 7362 dma_addr_t fw_ddb_entry_dma; 7363 uint32_t options = 0; 7364 int rval = 0; 7365 7366 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7367 &fw_ddb_entry_dma, GFP_KERNEL); 7368 if (!fw_ddb_entry) { 7369 DEBUG2(ql4_printk(KERN_ERR, ha, 7370 "%s: Unable to allocate dma buffer\n", 7371 __func__)); 7372 rval = -ENOMEM; 7373 goto exit_ddb_apply; 7374 } 7375 7376 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7377 options |= IPV6_DEFAULT_DDB_ENTRY; 7378 7379 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7380 if (rval == QLA_ERROR) 7381 goto exit_ddb_apply; 7382 7383 dev_db_start_offset += (fnode_sess->target_id * 7384 sizeof(*fw_ddb_entry)); 7385 7386 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7387 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7388 7389 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 7390 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); 7391 7392 if (rval == QLA_SUCCESS) { 7393 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7394 ql4_printk(KERN_INFO, ha, 7395 "%s: flash node %u of host %lu written to flash\n", 7396 __func__, fnode_sess->target_id, ha->host_no); 7397 } else { 7398 rval = -EIO; 7399 ql4_printk(KERN_ERR, ha, 7400 "%s: Error while writing flash node %u of host %lu to flash\n", 7401 __func__, fnode_sess->target_id, ha->host_no); 7402 } 7403 7404 exit_ddb_apply: 7405 if (fw_ddb_entry) 7406 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7407 fw_ddb_entry, fw_ddb_entry_dma); 7408 return rval; 7409 } 7410 7411 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, 7412 struct dev_db_entry *fw_ddb_entry, 7413 uint16_t idx) 7414 { 7415 struct dev_db_entry *ddb_entry = NULL; 7416 dma_addr_t ddb_entry_dma; 7417 unsigned long wtime; 7418 uint32_t mbx_sts = 0; 7419 uint32_t state = 0, conn_err = 0; 7420 uint16_t tmo = 0; 7421 int ret = 0; 7422 7423 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7424 &ddb_entry_dma, GFP_KERNEL); 7425 if (!ddb_entry) { 7426 DEBUG2(ql4_printk(KERN_ERR, ha, 7427 "%s: Unable to allocate dma buffer\n", 7428 __func__)); 7429 return QLA_ERROR; 7430 } 7431 7432 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); 7433 7434 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); 7435 if (ret != QLA_SUCCESS) { 7436 DEBUG2(ql4_printk(KERN_ERR, ha, 7437 "%s: Unable to set ddb entry for index %d\n", 7438 __func__, idx)); 7439 goto exit_ddb_conn_open; 7440 } 7441 7442 qla4xxx_conn_open(ha, idx); 7443 7444 /* To ensure that sendtargets is done, wait for at least 12 secs */ 7445 tmo = ((ha->def_timeout > LOGIN_TOV) && 7446 (ha->def_timeout < LOGIN_TOV * 10) ? 7447 ha->def_timeout : LOGIN_TOV); 7448 7449 DEBUG2(ql4_printk(KERN_INFO, ha, 7450 "Default time to wait for login to ddb %d\n", tmo)); 7451 7452 wtime = jiffies + (HZ * tmo); 7453 do { 7454 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, 7455 NULL, &state, &conn_err, NULL, 7456 NULL); 7457 if (ret == QLA_ERROR) 7458 continue; 7459 7460 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 7461 state == DDB_DS_SESSION_FAILED) 7462 break; 7463 7464 schedule_timeout_uninterruptible(HZ / 10); 7465 } while (time_after(wtime, jiffies)); 7466 7467 exit_ddb_conn_open: 7468 if (ddb_entry) 7469 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7470 ddb_entry, ddb_entry_dma); 7471 return ret; 7472 } 7473 7474 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, 7475 struct dev_db_entry *fw_ddb_entry, 7476 uint16_t target_id) 7477 { 7478 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 7479 struct list_head list_nt; 7480 uint16_t ddb_index; 7481 int ret = 0; 7482 7483 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { 7484 ql4_printk(KERN_WARNING, ha, 7485 "%s: A discovery already in progress!\n", __func__); 7486 return QLA_ERROR; 7487 } 7488 7489 INIT_LIST_HEAD(&list_nt); 7490 7491 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7492 7493 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 7494 if (ret == QLA_ERROR) 7495 goto exit_login_st_clr_bit; 7496 7497 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); 7498 if (ret == QLA_ERROR) 7499 goto exit_login_st; 7500 7501 qla4xxx_build_new_nt_list(ha, &list_nt, target_id); 7502 7503 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { 7504 list_del_init(&ddb_idx->list); 7505 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); 7506 vfree(ddb_idx); 7507 } 7508 7509 exit_login_st: 7510 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { 7511 ql4_printk(KERN_ERR, ha, 7512 "Unable to clear DDB index = 0x%x\n", ddb_index); 7513 } 7514 7515 clear_bit(ddb_index, ha->ddb_idx_map); 7516 7517 exit_login_st_clr_bit: 7518 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7519 return ret; 7520 } 7521 7522 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, 7523 struct dev_db_entry *fw_ddb_entry, 7524 uint16_t idx) 7525 { 7526 int ret = QLA_ERROR; 7527 7528 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7529 if (ret != QLA_SUCCESS) 7530 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7531 idx); 7532 else 7533 ret = -EPERM; 7534 7535 return ret; 7536 } 7537 7538 /** 7539 * qla4xxx_sysfs_ddb_login - Login to the specified target 7540 * @fnode_sess: pointer to session attrs of flash ddb entry 7541 * @fnode_conn: pointer to connection attrs of flash ddb entry 7542 * 7543 * This logs in to the specified target 7544 **/ 7545 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 7546 struct iscsi_bus_flash_conn *fnode_conn) 7547 { 7548 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7549 struct scsi_qla_host *ha = to_qla_host(shost); 7550 struct dev_db_entry *fw_ddb_entry = NULL; 7551 dma_addr_t fw_ddb_entry_dma; 7552 uint32_t options = 0; 7553 int ret = 0; 7554 7555 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { 7556 ql4_printk(KERN_ERR, ha, 7557 "%s: Target info is not persistent\n", __func__); 7558 ret = -EIO; 7559 goto exit_ddb_login; 7560 } 7561 7562 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7563 &fw_ddb_entry_dma, GFP_KERNEL); 7564 if (!fw_ddb_entry) { 7565 DEBUG2(ql4_printk(KERN_ERR, ha, 7566 "%s: Unable to allocate dma buffer\n", 7567 __func__)); 7568 ret = -ENOMEM; 7569 goto exit_ddb_login; 7570 } 7571 7572 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7573 options |= IPV6_DEFAULT_DDB_ENTRY; 7574 7575 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7576 if (ret == QLA_ERROR) 7577 goto exit_ddb_login; 7578 7579 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7580 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7581 7582 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7583 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, 7584 fnode_sess->target_id); 7585 else 7586 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, 7587 fnode_sess->target_id); 7588 7589 if (ret > 0) 7590 ret = -EIO; 7591 7592 exit_ddb_login: 7593 if (fw_ddb_entry) 7594 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7595 fw_ddb_entry, fw_ddb_entry_dma); 7596 return ret; 7597 } 7598 7599 /** 7600 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target 7601 * @cls_sess: pointer to session to be logged out 7602 * 7603 * This performs session log out from the specified target 7604 **/ 7605 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) 7606 { 7607 struct iscsi_session *sess; 7608 struct ddb_entry *ddb_entry = NULL; 7609 struct scsi_qla_host *ha; 7610 struct dev_db_entry *fw_ddb_entry = NULL; 7611 dma_addr_t fw_ddb_entry_dma; 7612 unsigned long flags; 7613 unsigned long wtime; 7614 uint32_t ddb_state; 7615 int options; 7616 int ret = 0; 7617 7618 sess = cls_sess->dd_data; 7619 ddb_entry = sess->dd_data; 7620 ha = ddb_entry->ha; 7621 7622 if (ddb_entry->ddb_type != FLASH_DDB) { 7623 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", 7624 __func__); 7625 ret = -ENXIO; 7626 goto exit_ddb_logout; 7627 } 7628 7629 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 7630 ql4_printk(KERN_ERR, ha, 7631 "%s: Logout from boot target entry is not permitted.\n", 7632 __func__); 7633 ret = -EPERM; 7634 goto exit_ddb_logout; 7635 } 7636 7637 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7638 &fw_ddb_entry_dma, GFP_KERNEL); 7639 if (!fw_ddb_entry) { 7640 ql4_printk(KERN_ERR, ha, 7641 "%s: Unable to allocate dma buffer\n", __func__); 7642 ret = -ENOMEM; 7643 goto exit_ddb_logout; 7644 } 7645 7646 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 7647 goto ddb_logout_init; 7648 7649 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7650 fw_ddb_entry, fw_ddb_entry_dma, 7651 NULL, NULL, &ddb_state, NULL, 7652 NULL, NULL); 7653 if (ret == QLA_ERROR) 7654 goto ddb_logout_init; 7655 7656 if (ddb_state == DDB_DS_SESSION_ACTIVE) 7657 goto ddb_logout_init; 7658 7659 /* wait until next relogin is triggered using DF_RELOGIN and 7660 * clear DF_RELOGIN to avoid invocation of further relogin 7661 */ 7662 wtime = jiffies + (HZ * RELOGIN_TOV); 7663 do { 7664 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) 7665 goto ddb_logout_init; 7666 7667 schedule_timeout_uninterruptible(HZ); 7668 } while ((time_after(wtime, jiffies))); 7669 7670 ddb_logout_init: 7671 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 7672 atomic_set(&ddb_entry->relogin_timer, 0); 7673 7674 options = LOGOUT_OPTION_CLOSE_SESSION; 7675 qla4xxx_session_logout_ddb(ha, ddb_entry, options); 7676 7677 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); 7678 wtime = jiffies + (HZ * LOGOUT_TOV); 7679 do { 7680 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7681 fw_ddb_entry, fw_ddb_entry_dma, 7682 NULL, NULL, &ddb_state, NULL, 7683 NULL, NULL); 7684 if (ret == QLA_ERROR) 7685 goto ddb_logout_clr_sess; 7686 7687 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 7688 (ddb_state == DDB_DS_SESSION_FAILED)) 7689 goto ddb_logout_clr_sess; 7690 7691 schedule_timeout_uninterruptible(HZ); 7692 } while ((time_after(wtime, jiffies))); 7693 7694 ddb_logout_clr_sess: 7695 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 7696 /* 7697 * we have decremented the reference count of the driver 7698 * when we setup the session to have the driver unload 7699 * to be seamless without actually destroying the 7700 * session 7701 **/ 7702 try_module_get(qla4xxx_iscsi_transport.owner); 7703 iscsi_destroy_endpoint(ddb_entry->conn->ep); 7704 7705 spin_lock_irqsave(&ha->hardware_lock, flags); 7706 qla4xxx_free_ddb(ha, ddb_entry); 7707 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 7708 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7709 7710 iscsi_session_teardown(ddb_entry->sess); 7711 7712 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); 7713 ret = QLA_SUCCESS; 7714 7715 exit_ddb_logout: 7716 if (fw_ddb_entry) 7717 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7718 fw_ddb_entry, fw_ddb_entry_dma); 7719 return ret; 7720 } 7721 7722 /** 7723 * qla4xxx_sysfs_ddb_logout - Logout from the specified target 7724 * @fnode_sess: pointer to session attrs of flash ddb entry 7725 * @fnode_conn: pointer to connection attrs of flash ddb entry 7726 * 7727 * This performs log out from the specified target 7728 **/ 7729 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 7730 struct iscsi_bus_flash_conn *fnode_conn) 7731 { 7732 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7733 struct scsi_qla_host *ha = to_qla_host(shost); 7734 struct ql4_tuple_ddb *flash_tddb = NULL; 7735 struct ql4_tuple_ddb *tmp_tddb = NULL; 7736 struct dev_db_entry *fw_ddb_entry = NULL; 7737 struct ddb_entry *ddb_entry = NULL; 7738 dma_addr_t fw_ddb_dma; 7739 uint32_t next_idx = 0; 7740 uint32_t state = 0, conn_err = 0; 7741 uint16_t conn_id = 0; 7742 int idx, index; 7743 int status, ret = 0; 7744 7745 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7746 &fw_ddb_dma); 7747 if (fw_ddb_entry == NULL) { 7748 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); 7749 ret = -ENOMEM; 7750 goto exit_ddb_logout; 7751 } 7752 7753 flash_tddb = vzalloc(sizeof(*flash_tddb)); 7754 if (!flash_tddb) { 7755 ql4_printk(KERN_WARNING, ha, 7756 "%s:Memory Allocation failed.\n", __func__); 7757 ret = -ENOMEM; 7758 goto exit_ddb_logout; 7759 } 7760 7761 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 7762 if (!tmp_tddb) { 7763 ql4_printk(KERN_WARNING, ha, 7764 "%s:Memory Allocation failed.\n", __func__); 7765 ret = -ENOMEM; 7766 goto exit_ddb_logout; 7767 } 7768 7769 if (!fnode_sess->targetname) { 7770 ql4_printk(KERN_ERR, ha, 7771 "%s:Cannot logout from SendTarget entry\n", 7772 __func__); 7773 ret = -EPERM; 7774 goto exit_ddb_logout; 7775 } 7776 7777 if (fnode_sess->is_boot_target) { 7778 ql4_printk(KERN_ERR, ha, 7779 "%s: Logout from boot target entry is not permitted.\n", 7780 __func__); 7781 ret = -EPERM; 7782 goto exit_ddb_logout; 7783 } 7784 7785 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, 7786 ISCSI_NAME_SIZE); 7787 7788 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7789 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); 7790 else 7791 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); 7792 7793 flash_tddb->tpgt = fnode_sess->tpgt; 7794 flash_tddb->port = fnode_conn->port; 7795 7796 COPY_ISID(flash_tddb->isid, fnode_sess->isid); 7797 7798 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 7799 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 7800 if (ddb_entry == NULL) 7801 continue; 7802 7803 if (ddb_entry->ddb_type != FLASH_DDB) 7804 continue; 7805 7806 index = ddb_entry->sess->target_id; 7807 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, 7808 fw_ddb_dma, NULL, &next_idx, 7809 &state, &conn_err, NULL, 7810 &conn_id); 7811 if (status == QLA_ERROR) { 7812 ret = -ENOMEM; 7813 break; 7814 } 7815 7816 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); 7817 7818 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, 7819 true); 7820 if (status == QLA_SUCCESS) { 7821 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); 7822 break; 7823 } 7824 } 7825 7826 if (idx == MAX_DDB_ENTRIES) 7827 ret = -ESRCH; 7828 7829 exit_ddb_logout: 7830 vfree(flash_tddb); 7831 vfree(tmp_tddb); 7832 if (fw_ddb_entry) 7833 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7834 7835 return ret; 7836 } 7837 7838 static int 7839 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 7840 int param, char *buf) 7841 { 7842 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7843 struct scsi_qla_host *ha = to_qla_host(shost); 7844 struct iscsi_bus_flash_conn *fnode_conn; 7845 struct ql4_chap_table chap_tbl; 7846 struct device *dev; 7847 int parent_type; 7848 int rc = 0; 7849 7850 dev = iscsi_find_flashnode_conn(fnode_sess); 7851 if (!dev) 7852 return -EIO; 7853 7854 fnode_conn = iscsi_dev_to_flash_conn(dev); 7855 7856 switch (param) { 7857 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 7858 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); 7859 break; 7860 case ISCSI_FLASHNODE_PORTAL_TYPE: 7861 rc = sprintf(buf, "%s\n", fnode_sess->portal_type); 7862 break; 7863 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 7864 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); 7865 break; 7866 case ISCSI_FLASHNODE_DISCOVERY_SESS: 7867 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); 7868 break; 7869 case ISCSI_FLASHNODE_ENTRY_EN: 7870 rc = sprintf(buf, "%u\n", fnode_sess->entry_state); 7871 break; 7872 case ISCSI_FLASHNODE_HDR_DGST_EN: 7873 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); 7874 break; 7875 case ISCSI_FLASHNODE_DATA_DGST_EN: 7876 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); 7877 break; 7878 case ISCSI_FLASHNODE_IMM_DATA_EN: 7879 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); 7880 break; 7881 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 7882 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); 7883 break; 7884 case ISCSI_FLASHNODE_DATASEQ_INORDER: 7885 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); 7886 break; 7887 case ISCSI_FLASHNODE_PDU_INORDER: 7888 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); 7889 break; 7890 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 7891 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); 7892 break; 7893 case ISCSI_FLASHNODE_SNACK_REQ_EN: 7894 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); 7895 break; 7896 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 7897 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); 7898 break; 7899 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 7900 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); 7901 break; 7902 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 7903 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); 7904 break; 7905 case ISCSI_FLASHNODE_ERL: 7906 rc = sprintf(buf, "%u\n", fnode_sess->erl); 7907 break; 7908 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 7909 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); 7910 break; 7911 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 7912 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); 7913 break; 7914 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 7915 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); 7916 break; 7917 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 7918 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); 7919 break; 7920 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 7921 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); 7922 break; 7923 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 7924 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); 7925 break; 7926 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 7927 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); 7928 break; 7929 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 7930 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); 7931 break; 7932 case ISCSI_FLASHNODE_FIRST_BURST: 7933 rc = sprintf(buf, "%u\n", fnode_sess->first_burst); 7934 break; 7935 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 7936 rc = sprintf(buf, "%u\n", fnode_sess->time2wait); 7937 break; 7938 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 7939 rc = sprintf(buf, "%u\n", fnode_sess->time2retain); 7940 break; 7941 case ISCSI_FLASHNODE_MAX_R2T: 7942 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); 7943 break; 7944 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 7945 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); 7946 break; 7947 case ISCSI_FLASHNODE_ISID: 7948 rc = sprintf(buf, "%pm\n", fnode_sess->isid); 7949 break; 7950 case ISCSI_FLASHNODE_TSID: 7951 rc = sprintf(buf, "%u\n", fnode_sess->tsid); 7952 break; 7953 case ISCSI_FLASHNODE_PORT: 7954 rc = sprintf(buf, "%d\n", fnode_conn->port); 7955 break; 7956 case ISCSI_FLASHNODE_MAX_BURST: 7957 rc = sprintf(buf, "%u\n", fnode_sess->max_burst); 7958 break; 7959 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 7960 rc = sprintf(buf, "%u\n", 7961 fnode_sess->default_taskmgmt_timeout); 7962 break; 7963 case ISCSI_FLASHNODE_IPADDR: 7964 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7965 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); 7966 else 7967 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); 7968 break; 7969 case ISCSI_FLASHNODE_ALIAS: 7970 if (fnode_sess->targetalias) 7971 rc = sprintf(buf, "%s\n", fnode_sess->targetalias); 7972 else 7973 rc = sprintf(buf, "\n"); 7974 break; 7975 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 7976 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7977 rc = sprintf(buf, "%pI6\n", 7978 fnode_conn->redirect_ipaddr); 7979 else 7980 rc = sprintf(buf, "%pI4\n", 7981 fnode_conn->redirect_ipaddr); 7982 break; 7983 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 7984 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); 7985 break; 7986 case ISCSI_FLASHNODE_LOCAL_PORT: 7987 rc = sprintf(buf, "%u\n", fnode_conn->local_port); 7988 break; 7989 case ISCSI_FLASHNODE_IPV4_TOS: 7990 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); 7991 break; 7992 case ISCSI_FLASHNODE_IPV6_TC: 7993 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7994 rc = sprintf(buf, "%u\n", 7995 fnode_conn->ipv6_traffic_class); 7996 else 7997 rc = sprintf(buf, "\n"); 7998 break; 7999 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8000 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); 8001 break; 8002 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8003 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 8004 rc = sprintf(buf, "%pI6\n", 8005 fnode_conn->link_local_ipv6_addr); 8006 else 8007 rc = sprintf(buf, "\n"); 8008 break; 8009 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8010 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); 8011 break; 8012 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 8013 if (fnode_sess->discovery_parent_type == DDB_ISNS) 8014 parent_type = ISCSI_DISC_PARENT_ISNS; 8015 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 8016 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8017 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) 8018 parent_type = ISCSI_DISC_PARENT_SENDTGT; 8019 else 8020 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8021 8022 rc = sprintf(buf, "%s\n", 8023 iscsi_get_discovery_parent_name(parent_type)); 8024 break; 8025 case ISCSI_FLASHNODE_NAME: 8026 if (fnode_sess->targetname) 8027 rc = sprintf(buf, "%s\n", fnode_sess->targetname); 8028 else 8029 rc = sprintf(buf, "\n"); 8030 break; 8031 case ISCSI_FLASHNODE_TPGT: 8032 rc = sprintf(buf, "%u\n", fnode_sess->tpgt); 8033 break; 8034 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8035 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); 8036 break; 8037 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8038 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); 8039 break; 8040 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8041 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); 8042 break; 8043 case ISCSI_FLASHNODE_USERNAME: 8044 if (fnode_sess->chap_auth_en) { 8045 qla4xxx_get_uni_chap_at_index(ha, 8046 chap_tbl.name, 8047 chap_tbl.secret, 8048 fnode_sess->chap_out_idx); 8049 rc = sprintf(buf, "%s\n", chap_tbl.name); 8050 } else { 8051 rc = sprintf(buf, "\n"); 8052 } 8053 break; 8054 case ISCSI_FLASHNODE_PASSWORD: 8055 if (fnode_sess->chap_auth_en) { 8056 qla4xxx_get_uni_chap_at_index(ha, 8057 chap_tbl.name, 8058 chap_tbl.secret, 8059 fnode_sess->chap_out_idx); 8060 rc = sprintf(buf, "%s\n", chap_tbl.secret); 8061 } else { 8062 rc = sprintf(buf, "\n"); 8063 } 8064 break; 8065 case ISCSI_FLASHNODE_STATSN: 8066 rc = sprintf(buf, "%u\n", fnode_conn->statsn); 8067 break; 8068 case ISCSI_FLASHNODE_EXP_STATSN: 8069 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); 8070 break; 8071 case ISCSI_FLASHNODE_IS_BOOT_TGT: 8072 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); 8073 break; 8074 default: 8075 rc = -ENOSYS; 8076 break; 8077 } 8078 8079 put_device(dev); 8080 return rc; 8081 } 8082 8083 /** 8084 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry 8085 * @fnode_sess: pointer to session attrs of flash ddb entry 8086 * @fnode_conn: pointer to connection attrs of flash ddb entry 8087 * @data: Parameters and their values to update 8088 * @len: len of data 8089 * 8090 * This sets the parameter of flash ddb entry and writes them to flash 8091 **/ 8092 static int 8093 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 8094 struct iscsi_bus_flash_conn *fnode_conn, 8095 void *data, int len) 8096 { 8097 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8098 struct scsi_qla_host *ha = to_qla_host(shost); 8099 struct iscsi_flashnode_param_info *fnode_param; 8100 struct ql4_chap_table chap_tbl; 8101 struct nlattr *attr; 8102 uint16_t chap_out_idx = INVALID_ENTRY; 8103 int rc = QLA_ERROR; 8104 uint32_t rem = len; 8105 8106 memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); 8107 nla_for_each_attr(attr, data, len, rem) { 8108 fnode_param = nla_data(attr); 8109 8110 switch (fnode_param->param) { 8111 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 8112 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; 8113 break; 8114 case ISCSI_FLASHNODE_PORTAL_TYPE: 8115 memcpy(fnode_sess->portal_type, fnode_param->value, 8116 strlen(fnode_sess->portal_type)); 8117 break; 8118 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 8119 fnode_sess->auto_snd_tgt_disable = 8120 fnode_param->value[0]; 8121 break; 8122 case ISCSI_FLASHNODE_DISCOVERY_SESS: 8123 fnode_sess->discovery_sess = fnode_param->value[0]; 8124 break; 8125 case ISCSI_FLASHNODE_ENTRY_EN: 8126 fnode_sess->entry_state = fnode_param->value[0]; 8127 break; 8128 case ISCSI_FLASHNODE_HDR_DGST_EN: 8129 fnode_conn->hdrdgst_en = fnode_param->value[0]; 8130 break; 8131 case ISCSI_FLASHNODE_DATA_DGST_EN: 8132 fnode_conn->datadgst_en = fnode_param->value[0]; 8133 break; 8134 case ISCSI_FLASHNODE_IMM_DATA_EN: 8135 fnode_sess->imm_data_en = fnode_param->value[0]; 8136 break; 8137 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 8138 fnode_sess->initial_r2t_en = fnode_param->value[0]; 8139 break; 8140 case ISCSI_FLASHNODE_DATASEQ_INORDER: 8141 fnode_sess->dataseq_inorder_en = fnode_param->value[0]; 8142 break; 8143 case ISCSI_FLASHNODE_PDU_INORDER: 8144 fnode_sess->pdu_inorder_en = fnode_param->value[0]; 8145 break; 8146 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 8147 fnode_sess->chap_auth_en = fnode_param->value[0]; 8148 /* Invalidate chap index if chap auth is disabled */ 8149 if (!fnode_sess->chap_auth_en) 8150 fnode_sess->chap_out_idx = INVALID_ENTRY; 8151 8152 break; 8153 case ISCSI_FLASHNODE_SNACK_REQ_EN: 8154 fnode_conn->snack_req_en = fnode_param->value[0]; 8155 break; 8156 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 8157 fnode_sess->discovery_logout_en = fnode_param->value[0]; 8158 break; 8159 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 8160 fnode_sess->bidi_chap_en = fnode_param->value[0]; 8161 break; 8162 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 8163 fnode_sess->discovery_auth_optional = 8164 fnode_param->value[0]; 8165 break; 8166 case ISCSI_FLASHNODE_ERL: 8167 fnode_sess->erl = fnode_param->value[0]; 8168 break; 8169 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 8170 fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; 8171 break; 8172 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 8173 fnode_conn->tcp_nagle_disable = fnode_param->value[0]; 8174 break; 8175 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 8176 fnode_conn->tcp_wsf_disable = fnode_param->value[0]; 8177 break; 8178 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 8179 fnode_conn->tcp_timer_scale = fnode_param->value[0]; 8180 break; 8181 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 8182 fnode_conn->tcp_timestamp_en = fnode_param->value[0]; 8183 break; 8184 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 8185 fnode_conn->fragment_disable = fnode_param->value[0]; 8186 break; 8187 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 8188 fnode_conn->max_recv_dlength = 8189 *(unsigned *)fnode_param->value; 8190 break; 8191 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 8192 fnode_conn->max_xmit_dlength = 8193 *(unsigned *)fnode_param->value; 8194 break; 8195 case ISCSI_FLASHNODE_FIRST_BURST: 8196 fnode_sess->first_burst = 8197 *(unsigned *)fnode_param->value; 8198 break; 8199 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 8200 fnode_sess->time2wait = *(uint16_t *)fnode_param->value; 8201 break; 8202 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 8203 fnode_sess->time2retain = 8204 *(uint16_t *)fnode_param->value; 8205 break; 8206 case ISCSI_FLASHNODE_MAX_R2T: 8207 fnode_sess->max_r2t = 8208 *(uint16_t *)fnode_param->value; 8209 break; 8210 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 8211 fnode_conn->keepalive_timeout = 8212 *(uint16_t *)fnode_param->value; 8213 break; 8214 case ISCSI_FLASHNODE_ISID: 8215 memcpy(fnode_sess->isid, fnode_param->value, 8216 sizeof(fnode_sess->isid)); 8217 break; 8218 case ISCSI_FLASHNODE_TSID: 8219 fnode_sess->tsid = *(uint16_t *)fnode_param->value; 8220 break; 8221 case ISCSI_FLASHNODE_PORT: 8222 fnode_conn->port = *(uint16_t *)fnode_param->value; 8223 break; 8224 case ISCSI_FLASHNODE_MAX_BURST: 8225 fnode_sess->max_burst = *(unsigned *)fnode_param->value; 8226 break; 8227 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 8228 fnode_sess->default_taskmgmt_timeout = 8229 *(uint16_t *)fnode_param->value; 8230 break; 8231 case ISCSI_FLASHNODE_IPADDR: 8232 memcpy(fnode_conn->ipaddress, fnode_param->value, 8233 IPv6_ADDR_LEN); 8234 break; 8235 case ISCSI_FLASHNODE_ALIAS: 8236 rc = iscsi_switch_str_param(&fnode_sess->targetalias, 8237 (char *)fnode_param->value); 8238 break; 8239 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 8240 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, 8241 IPv6_ADDR_LEN); 8242 break; 8243 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 8244 fnode_conn->max_segment_size = 8245 *(unsigned *)fnode_param->value; 8246 break; 8247 case ISCSI_FLASHNODE_LOCAL_PORT: 8248 fnode_conn->local_port = 8249 *(uint16_t *)fnode_param->value; 8250 break; 8251 case ISCSI_FLASHNODE_IPV4_TOS: 8252 fnode_conn->ipv4_tos = fnode_param->value[0]; 8253 break; 8254 case ISCSI_FLASHNODE_IPV6_TC: 8255 fnode_conn->ipv6_traffic_class = fnode_param->value[0]; 8256 break; 8257 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8258 fnode_conn->ipv6_flow_label = fnode_param->value[0]; 8259 break; 8260 case ISCSI_FLASHNODE_NAME: 8261 rc = iscsi_switch_str_param(&fnode_sess->targetname, 8262 (char *)fnode_param->value); 8263 break; 8264 case ISCSI_FLASHNODE_TPGT: 8265 fnode_sess->tpgt = *(uint16_t *)fnode_param->value; 8266 break; 8267 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8268 memcpy(fnode_conn->link_local_ipv6_addr, 8269 fnode_param->value, IPv6_ADDR_LEN); 8270 break; 8271 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8272 fnode_sess->discovery_parent_idx = 8273 *(uint16_t *)fnode_param->value; 8274 break; 8275 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8276 fnode_conn->tcp_xmit_wsf = 8277 *(uint8_t *)fnode_param->value; 8278 break; 8279 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8280 fnode_conn->tcp_recv_wsf = 8281 *(uint8_t *)fnode_param->value; 8282 break; 8283 case ISCSI_FLASHNODE_STATSN: 8284 fnode_conn->statsn = *(uint32_t *)fnode_param->value; 8285 break; 8286 case ISCSI_FLASHNODE_EXP_STATSN: 8287 fnode_conn->exp_statsn = 8288 *(uint32_t *)fnode_param->value; 8289 break; 8290 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8291 chap_out_idx = *(uint16_t *)fnode_param->value; 8292 if (!qla4xxx_get_uni_chap_at_index(ha, 8293 chap_tbl.name, 8294 chap_tbl.secret, 8295 chap_out_idx)) { 8296 fnode_sess->chap_out_idx = chap_out_idx; 8297 /* Enable chap auth if chap index is valid */ 8298 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; 8299 } 8300 break; 8301 default: 8302 ql4_printk(KERN_ERR, ha, 8303 "%s: No such sysfs attribute\n", __func__); 8304 rc = -ENOSYS; 8305 goto exit_set_param; 8306 } 8307 } 8308 8309 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); 8310 8311 exit_set_param: 8312 return rc; 8313 } 8314 8315 /** 8316 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry 8317 * @fnode_sess: pointer to session attrs of flash ddb entry 8318 * 8319 * This invalidates the flash ddb entry at the given index 8320 **/ 8321 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) 8322 { 8323 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8324 struct scsi_qla_host *ha = to_qla_host(shost); 8325 uint32_t dev_db_start_offset; 8326 uint32_t dev_db_end_offset; 8327 struct dev_db_entry *fw_ddb_entry = NULL; 8328 dma_addr_t fw_ddb_entry_dma; 8329 uint16_t *ddb_cookie = NULL; 8330 size_t ddb_size = 0; 8331 void *pddb = NULL; 8332 int target_id; 8333 int rc = 0; 8334 8335 if (fnode_sess->is_boot_target) { 8336 rc = -EPERM; 8337 DEBUG2(ql4_printk(KERN_ERR, ha, 8338 "%s: Deletion of boot target entry is not permitted.\n", 8339 __func__)); 8340 goto exit_ddb_del; 8341 } 8342 8343 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) 8344 goto sysfs_ddb_del; 8345 8346 if (is_qla40XX(ha)) { 8347 dev_db_start_offset = FLASH_OFFSET_DB_INFO; 8348 dev_db_end_offset = FLASH_OFFSET_DB_END; 8349 dev_db_start_offset += (fnode_sess->target_id * 8350 sizeof(*fw_ddb_entry)); 8351 ddb_size = sizeof(*fw_ddb_entry); 8352 } else { 8353 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + 8354 (ha->hw.flt_region_ddb << 2); 8355 /* flt_ddb_size is DDB table size for both ports 8356 * so divide it by 2 to calculate the offset for second port 8357 */ 8358 if (ha->port_num == 1) 8359 dev_db_start_offset += (ha->hw.flt_ddb_size / 2); 8360 8361 dev_db_end_offset = dev_db_start_offset + 8362 (ha->hw.flt_ddb_size / 2); 8363 8364 dev_db_start_offset += (fnode_sess->target_id * 8365 sizeof(*fw_ddb_entry)); 8366 dev_db_start_offset += offsetof(struct dev_db_entry, cookie); 8367 8368 ddb_size = sizeof(*ddb_cookie); 8369 } 8370 8371 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", 8372 __func__, dev_db_start_offset, dev_db_end_offset)); 8373 8374 if (dev_db_start_offset > dev_db_end_offset) { 8375 rc = -EIO; 8376 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", 8377 __func__, fnode_sess->target_id)); 8378 goto exit_ddb_del; 8379 } 8380 8381 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, 8382 &fw_ddb_entry_dma, GFP_KERNEL); 8383 if (!pddb) { 8384 rc = -ENOMEM; 8385 DEBUG2(ql4_printk(KERN_ERR, ha, 8386 "%s: Unable to allocate dma buffer\n", 8387 __func__)); 8388 goto exit_ddb_del; 8389 } 8390 8391 if (is_qla40XX(ha)) { 8392 fw_ddb_entry = pddb; 8393 memset(fw_ddb_entry, 0, ddb_size); 8394 ddb_cookie = &fw_ddb_entry->cookie; 8395 } else { 8396 ddb_cookie = pddb; 8397 } 8398 8399 /* invalidate the cookie */ 8400 *ddb_cookie = 0xFFEE; 8401 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 8402 ddb_size, FLASH_OPT_RMW_COMMIT); 8403 8404 sysfs_ddb_del: 8405 target_id = fnode_sess->target_id; 8406 iscsi_destroy_flashnode_sess(fnode_sess); 8407 ql4_printk(KERN_INFO, ha, 8408 "%s: session and conn entries for flashnode %u of host %lu deleted\n", 8409 __func__, target_id, ha->host_no); 8410 exit_ddb_del: 8411 if (pddb) 8412 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, 8413 fw_ddb_entry_dma); 8414 return rc; 8415 } 8416 8417 /** 8418 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs 8419 * @ha: pointer to adapter structure 8420 * 8421 * Export the firmware DDB for all send targets and normal targets to sysfs. 8422 **/ 8423 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) 8424 { 8425 struct dev_db_entry *fw_ddb_entry = NULL; 8426 dma_addr_t fw_ddb_entry_dma; 8427 uint16_t max_ddbs; 8428 uint16_t idx = 0; 8429 int ret = QLA_SUCCESS; 8430 8431 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 8432 sizeof(*fw_ddb_entry), 8433 &fw_ddb_entry_dma, GFP_KERNEL); 8434 if (!fw_ddb_entry) { 8435 DEBUG2(ql4_printk(KERN_ERR, ha, 8436 "%s: Unable to allocate dma buffer\n", 8437 __func__)); 8438 return -ENOMEM; 8439 } 8440 8441 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 8442 MAX_DEV_DB_ENTRIES; 8443 8444 for (idx = 0; idx < max_ddbs; idx++) { 8445 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, 8446 idx)) 8447 continue; 8448 8449 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); 8450 if (ret) { 8451 ret = -EIO; 8452 break; 8453 } 8454 } 8455 8456 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, 8457 fw_ddb_entry_dma); 8458 8459 return ret; 8460 } 8461 8462 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) 8463 { 8464 iscsi_destroy_all_flashnode(ha->host); 8465 } 8466 8467 /** 8468 * qla4xxx_build_ddb_list - Build ddb list and setup sessions 8469 * @ha: pointer to adapter structure 8470 * @is_reset: Is this init path or reset path 8471 * 8472 * Create a list of sendtargets (st) from firmware DDBs, issue send targets 8473 * using connection open, then create the list of normal targets (nt) 8474 * from firmware DDBs. Based on the list of nt setup session and connection 8475 * objects. 8476 **/ 8477 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 8478 { 8479 uint16_t tmo = 0; 8480 struct list_head list_st, list_nt; 8481 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; 8482 unsigned long wtime; 8483 8484 if (!test_bit(AF_LINK_UP, &ha->flags)) { 8485 set_bit(AF_BUILD_DDB_LIST, &ha->flags); 8486 ha->is_reset = is_reset; 8487 return; 8488 } 8489 8490 INIT_LIST_HEAD(&list_st); 8491 INIT_LIST_HEAD(&list_nt); 8492 8493 qla4xxx_build_st_list(ha, &list_st); 8494 8495 /* Before issuing conn open mbox, ensure all IPs states are configured 8496 * Note, conn open fails if IPs are not configured 8497 */ 8498 qla4xxx_wait_for_ip_configuration(ha); 8499 8500 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 8501 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 8502 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 8503 } 8504 8505 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 8506 tmo = ((ha->def_timeout > LOGIN_TOV) && 8507 (ha->def_timeout < LOGIN_TOV * 10) ? 8508 ha->def_timeout : LOGIN_TOV); 8509 8510 DEBUG2(ql4_printk(KERN_INFO, ha, 8511 "Default time to wait for build ddb %d\n", tmo)); 8512 8513 wtime = jiffies + (HZ * tmo); 8514 do { 8515 if (list_empty(&list_st)) 8516 break; 8517 8518 qla4xxx_remove_failed_ddb(ha, &list_st); 8519 schedule_timeout_uninterruptible(HZ / 10); 8520 } while (time_after(wtime, jiffies)); 8521 8522 8523 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); 8524 8525 qla4xxx_free_ddb_list(&list_st); 8526 qla4xxx_free_ddb_list(&list_nt); 8527 8528 qla4xxx_free_ddb_index(ha); 8529 } 8530 8531 /** 8532 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login 8533 * response. 8534 * @ha: pointer to adapter structure 8535 * 8536 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be 8537 * set in DDB and we will wait for login response of boot targets during 8538 * probe. 8539 **/ 8540 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) 8541 { 8542 struct ddb_entry *ddb_entry; 8543 struct dev_db_entry *fw_ddb_entry = NULL; 8544 dma_addr_t fw_ddb_entry_dma; 8545 unsigned long wtime; 8546 uint32_t ddb_state; 8547 int max_ddbs, idx, ret; 8548 8549 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 8550 MAX_DEV_DB_ENTRIES; 8551 8552 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8553 &fw_ddb_entry_dma, GFP_KERNEL); 8554 if (!fw_ddb_entry) { 8555 ql4_printk(KERN_ERR, ha, 8556 "%s: Unable to allocate dma buffer\n", __func__); 8557 goto exit_login_resp; 8558 } 8559 8560 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); 8561 8562 for (idx = 0; idx < max_ddbs; idx++) { 8563 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8564 if (ddb_entry == NULL) 8565 continue; 8566 8567 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 8568 DEBUG2(ql4_printk(KERN_INFO, ha, 8569 "%s: DDB index [%d]\n", __func__, 8570 ddb_entry->fw_ddb_index)); 8571 do { 8572 ret = qla4xxx_get_fwddb_entry(ha, 8573 ddb_entry->fw_ddb_index, 8574 fw_ddb_entry, fw_ddb_entry_dma, 8575 NULL, NULL, &ddb_state, NULL, 8576 NULL, NULL); 8577 if (ret == QLA_ERROR) 8578 goto exit_login_resp; 8579 8580 if ((ddb_state == DDB_DS_SESSION_ACTIVE) || 8581 (ddb_state == DDB_DS_SESSION_FAILED)) 8582 break; 8583 8584 schedule_timeout_uninterruptible(HZ); 8585 8586 } while ((time_after(wtime, jiffies))); 8587 8588 if (!time_after(wtime, jiffies)) { 8589 DEBUG2(ql4_printk(KERN_INFO, ha, 8590 "%s: Login response wait timer expired\n", 8591 __func__)); 8592 goto exit_login_resp; 8593 } 8594 } 8595 } 8596 8597 exit_login_resp: 8598 if (fw_ddb_entry) 8599 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8600 fw_ddb_entry, fw_ddb_entry_dma); 8601 } 8602 8603 /** 8604 * qla4xxx_probe_adapter - callback function to probe HBA 8605 * @pdev: pointer to pci_dev structure 8606 * @ent: pointer to pci_device entry 8607 * 8608 * This routine will probe for Qlogic 4xxx iSCSI host adapters. 8609 * It returns zero if successful. It also initializes all data necessary for 8610 * the driver. 8611 **/ 8612 static int qla4xxx_probe_adapter(struct pci_dev *pdev, 8613 const struct pci_device_id *ent) 8614 { 8615 int ret = -ENODEV, status; 8616 struct Scsi_Host *host; 8617 struct scsi_qla_host *ha; 8618 uint8_t init_retry_count = 0; 8619 char buf[34]; 8620 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; 8621 uint32_t dev_state; 8622 8623 if (pci_enable_device(pdev)) 8624 return -1; 8625 8626 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); 8627 if (host == NULL) { 8628 printk(KERN_WARNING 8629 "qla4xxx: Couldn't allocate host from scsi layer!\n"); 8630 goto probe_disable_device; 8631 } 8632 8633 /* Clear our data area */ 8634 ha = to_qla_host(host); 8635 memset(ha, 0, sizeof(*ha)); 8636 8637 /* Save the information from PCI BIOS. */ 8638 ha->pdev = pdev; 8639 ha->host = host; 8640 ha->host_no = host->host_no; 8641 ha->func_num = PCI_FUNC(ha->pdev->devfn); 8642 8643 pci_enable_pcie_error_reporting(pdev); 8644 8645 /* Setup Runtime configurable options */ 8646 if (is_qla8022(ha)) { 8647 ha->isp_ops = &qla4_82xx_isp_ops; 8648 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; 8649 ha->qdr_sn_window = -1; 8650 ha->ddr_mn_window = -1; 8651 ha->curr_window = 255; 8652 nx_legacy_intr = &legacy_intr[ha->func_num]; 8653 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 8654 ha->nx_legacy_intr.tgt_status_reg = 8655 nx_legacy_intr->tgt_status_reg; 8656 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 8657 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 8658 } else if (is_qla8032(ha) || is_qla8042(ha)) { 8659 ha->isp_ops = &qla4_83xx_isp_ops; 8660 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; 8661 } else { 8662 ha->isp_ops = &qla4xxx_isp_ops; 8663 } 8664 8665 if (is_qla80XX(ha)) { 8666 rwlock_init(&ha->hw_lock); 8667 ha->pf_bit = ha->func_num << 16; 8668 /* Set EEH reset type to fundamental if required by hba */ 8669 pdev->needs_freset = 1; 8670 } 8671 8672 /* Configure PCI I/O space. */ 8673 ret = ha->isp_ops->iospace_config(ha); 8674 if (ret) 8675 goto probe_failed_ioconfig; 8676 8677 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", 8678 pdev->device, pdev->irq, ha->reg); 8679 8680 qla4xxx_config_dma_addressing(ha); 8681 8682 /* Initialize lists and spinlocks. */ 8683 INIT_LIST_HEAD(&ha->free_srb_q); 8684 8685 mutex_init(&ha->mbox_sem); 8686 mutex_init(&ha->chap_sem); 8687 init_completion(&ha->mbx_intr_comp); 8688 init_completion(&ha->disable_acb_comp); 8689 init_completion(&ha->idc_comp); 8690 init_completion(&ha->link_up_comp); 8691 8692 spin_lock_init(&ha->hardware_lock); 8693 spin_lock_init(&ha->work_lock); 8694 8695 /* Initialize work list */ 8696 INIT_LIST_HEAD(&ha->work_list); 8697 8698 /* Allocate dma buffers */ 8699 if (qla4xxx_mem_alloc(ha)) { 8700 ql4_printk(KERN_WARNING, ha, 8701 "[ERROR] Failed to allocate memory for adapter\n"); 8702 8703 ret = -ENOMEM; 8704 goto probe_failed; 8705 } 8706 8707 host->cmd_per_lun = 3; 8708 host->max_channel = 0; 8709 host->max_lun = MAX_LUNS - 1; 8710 host->max_id = MAX_TARGETS; 8711 host->max_cmd_len = IOCB_MAX_CDB_LEN; 8712 host->can_queue = MAX_SRBS ; 8713 host->transportt = qla4xxx_scsi_transport; 8714 8715 pci_set_drvdata(pdev, ha); 8716 8717 ret = scsi_add_host(host, &pdev->dev); 8718 if (ret) 8719 goto probe_failed; 8720 8721 if (is_qla80XX(ha)) 8722 qla4_8xxx_get_flash_info(ha); 8723 8724 if (is_qla8032(ha) || is_qla8042(ha)) { 8725 qla4_83xx_read_reset_template(ha); 8726 /* 8727 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. 8728 * If DONRESET_BIT0 is set, drivers should not set dev_state 8729 * to NEED_RESET. But if NEED_RESET is set, drivers should 8730 * should honor the reset. 8731 */ 8732 if (ql4xdontresethba == 1) 8733 qla4_83xx_set_idc_dontreset(ha); 8734 } 8735 8736 /* 8737 * Initialize the Host adapter request/response queues and 8738 * firmware 8739 * NOTE: interrupts enabled upon successful completion 8740 */ 8741 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8742 8743 /* Dont retry adapter initialization if IRQ allocation failed */ 8744 if (is_qla80XX(ha) && (status == QLA_ERROR)) 8745 goto skip_retry_init; 8746 8747 while ((!test_bit(AF_ONLINE, &ha->flags)) && 8748 init_retry_count++ < MAX_INIT_RETRIES) { 8749 8750 if (is_qla80XX(ha)) { 8751 ha->isp_ops->idc_lock(ha); 8752 dev_state = qla4_8xxx_rd_direct(ha, 8753 QLA8XXX_CRB_DEV_STATE); 8754 ha->isp_ops->idc_unlock(ha); 8755 if (dev_state == QLA8XXX_DEV_FAILED) { 8756 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 8757 "initialize adapter. H/W is in failed state\n", 8758 __func__); 8759 break; 8760 } 8761 } 8762 DEBUG2(printk("scsi: %s: retrying adapter initialization " 8763 "(%d)\n", __func__, init_retry_count)); 8764 8765 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 8766 continue; 8767 8768 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8769 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 8770 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) 8771 goto skip_retry_init; 8772 } 8773 } 8774 8775 skip_retry_init: 8776 if (!test_bit(AF_ONLINE, &ha->flags)) { 8777 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 8778 8779 if ((is_qla8022(ha) && ql4xdontresethba) || 8780 ((is_qla8032(ha) || is_qla8042(ha)) && 8781 qla4_83xx_idc_dontreset(ha))) { 8782 /* Put the device in failed state. */ 8783 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 8784 ha->isp_ops->idc_lock(ha); 8785 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 8786 QLA8XXX_DEV_FAILED); 8787 ha->isp_ops->idc_unlock(ha); 8788 } 8789 ret = -ENODEV; 8790 goto remove_host; 8791 } 8792 8793 /* Startup the kernel thread for this host adapter. */ 8794 DEBUG2(printk("scsi: %s: Starting kernel thread for " 8795 "qla4xxx_dpc\n", __func__)); 8796 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); 8797 ha->dpc_thread = create_singlethread_workqueue(buf); 8798 if (!ha->dpc_thread) { 8799 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); 8800 ret = -ENODEV; 8801 goto remove_host; 8802 } 8803 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 8804 8805 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, 8806 ha->host_no); 8807 if (!ha->task_wq) { 8808 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); 8809 ret = -ENODEV; 8810 goto remove_host; 8811 } 8812 8813 /* 8814 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc 8815 * (which is called indirectly by qla4xxx_initialize_adapter), 8816 * so that irqs will be registered after crbinit but before 8817 * mbx_intr_enable. 8818 */ 8819 if (is_qla40XX(ha)) { 8820 ret = qla4xxx_request_irqs(ha); 8821 if (ret) { 8822 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 8823 "interrupt %d already in use.\n", pdev->irq); 8824 goto remove_host; 8825 } 8826 } 8827 8828 pci_save_state(ha->pdev); 8829 ha->isp_ops->enable_intrs(ha); 8830 8831 /* Start timer thread. */ 8832 qla4xxx_start_timer(ha, 1); 8833 8834 set_bit(AF_INIT_DONE, &ha->flags); 8835 8836 qla4_8xxx_alloc_sysfs_attr(ha); 8837 8838 printk(KERN_INFO 8839 " QLogic iSCSI HBA Driver version: %s\n" 8840 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 8841 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 8842 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, 8843 ha->fw_info.fw_patch, ha->fw_info.fw_build); 8844 8845 /* Set the driver version */ 8846 if (is_qla80XX(ha)) 8847 qla4_8xxx_set_param(ha, SET_DRVR_VERSION); 8848 8849 if (qla4xxx_setup_boot_info(ha)) 8850 ql4_printk(KERN_ERR, ha, 8851 "%s: No iSCSI boot target configured\n", __func__); 8852 8853 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); 8854 /* Perform the build ddb list and login to each */ 8855 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 8856 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 8857 qla4xxx_wait_login_resp_boot_tgt(ha); 8858 8859 qla4xxx_create_chap_list(ha); 8860 8861 qla4xxx_create_ifaces(ha); 8862 return 0; 8863 8864 remove_host: 8865 scsi_remove_host(ha->host); 8866 8867 probe_failed: 8868 qla4xxx_free_adapter(ha); 8869 8870 probe_failed_ioconfig: 8871 pci_disable_pcie_error_reporting(pdev); 8872 scsi_host_put(ha->host); 8873 8874 probe_disable_device: 8875 pci_disable_device(pdev); 8876 8877 return ret; 8878 } 8879 8880 /** 8881 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize 8882 * @ha: pointer to adapter structure 8883 * 8884 * Mark the other ISP-4xxx port to indicate that the driver is being removed, 8885 * so that the other port will not re-initialize while in the process of 8886 * removing the ha due to driver unload or hba hotplug. 8887 **/ 8888 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) 8889 { 8890 struct scsi_qla_host *other_ha = NULL; 8891 struct pci_dev *other_pdev = NULL; 8892 int fn = ISP4XXX_PCI_FN_2; 8893 8894 /*iscsi function numbers for ISP4xxx is 1 and 3*/ 8895 if (PCI_FUNC(ha->pdev->devfn) & BIT_1) 8896 fn = ISP4XXX_PCI_FN_1; 8897 8898 other_pdev = 8899 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 8900 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 8901 fn)); 8902 8903 /* Get other_ha if other_pdev is valid and state is enable*/ 8904 if (other_pdev) { 8905 if (atomic_read(&other_pdev->enable_cnt)) { 8906 other_ha = pci_get_drvdata(other_pdev); 8907 if (other_ha) { 8908 set_bit(AF_HA_REMOVAL, &other_ha->flags); 8909 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " 8910 "Prevent %s reinit\n", __func__, 8911 dev_name(&other_ha->pdev->dev))); 8912 } 8913 } 8914 pci_dev_put(other_pdev); 8915 } 8916 } 8917 8918 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, 8919 struct ddb_entry *ddb_entry) 8920 { 8921 struct dev_db_entry *fw_ddb_entry = NULL; 8922 dma_addr_t fw_ddb_entry_dma; 8923 unsigned long wtime; 8924 uint32_t ddb_state; 8925 int options; 8926 int status; 8927 8928 options = LOGOUT_OPTION_CLOSE_SESSION; 8929 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { 8930 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 8931 goto clear_ddb; 8932 } 8933 8934 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8935 &fw_ddb_entry_dma, GFP_KERNEL); 8936 if (!fw_ddb_entry) { 8937 ql4_printk(KERN_ERR, ha, 8938 "%s: Unable to allocate dma buffer\n", __func__); 8939 goto clear_ddb; 8940 } 8941 8942 wtime = jiffies + (HZ * LOGOUT_TOV); 8943 do { 8944 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 8945 fw_ddb_entry, fw_ddb_entry_dma, 8946 NULL, NULL, &ddb_state, NULL, 8947 NULL, NULL); 8948 if (status == QLA_ERROR) 8949 goto free_ddb; 8950 8951 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 8952 (ddb_state == DDB_DS_SESSION_FAILED)) 8953 goto free_ddb; 8954 8955 schedule_timeout_uninterruptible(HZ); 8956 } while ((time_after(wtime, jiffies))); 8957 8958 free_ddb: 8959 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8960 fw_ddb_entry, fw_ddb_entry_dma); 8961 clear_ddb: 8962 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 8963 } 8964 8965 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) 8966 { 8967 struct ddb_entry *ddb_entry; 8968 int idx; 8969 8970 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 8971 8972 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8973 if ((ddb_entry != NULL) && 8974 (ddb_entry->ddb_type == FLASH_DDB)) { 8975 8976 qla4xxx_destroy_ddb(ha, ddb_entry); 8977 /* 8978 * we have decremented the reference count of the driver 8979 * when we setup the session to have the driver unload 8980 * to be seamless without actually destroying the 8981 * session 8982 **/ 8983 try_module_get(qla4xxx_iscsi_transport.owner); 8984 iscsi_destroy_endpoint(ddb_entry->conn->ep); 8985 qla4xxx_free_ddb(ha, ddb_entry); 8986 iscsi_session_teardown(ddb_entry->sess); 8987 } 8988 } 8989 } 8990 /** 8991 * qla4xxx_remove_adapter - callback function to remove adapter. 8992 * @pdev: PCI device pointer 8993 **/ 8994 static void qla4xxx_remove_adapter(struct pci_dev *pdev) 8995 { 8996 struct scsi_qla_host *ha; 8997 8998 /* 8999 * If the PCI device is disabled then it means probe_adapter had 9000 * failed and resources already cleaned up on probe_adapter exit. 9001 */ 9002 if (!pci_is_enabled(pdev)) 9003 return; 9004 9005 ha = pci_get_drvdata(pdev); 9006 9007 if (is_qla40XX(ha)) 9008 qla4xxx_prevent_other_port_reinit(ha); 9009 9010 /* destroy iface from sysfs */ 9011 qla4xxx_destroy_ifaces(ha); 9012 9013 if ((!ql4xdisablesysfsboot) && ha->boot_kset) 9014 iscsi_boot_destroy_kset(ha->boot_kset); 9015 9016 qla4xxx_destroy_fw_ddb_session(ha); 9017 qla4_8xxx_free_sysfs_attr(ha); 9018 9019 qla4xxx_sysfs_ddb_remove(ha); 9020 scsi_remove_host(ha->host); 9021 9022 qla4xxx_free_adapter(ha); 9023 9024 scsi_host_put(ha->host); 9025 9026 pci_disable_pcie_error_reporting(pdev); 9027 pci_disable_device(pdev); 9028 } 9029 9030 /** 9031 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. 9032 * @ha: HA context 9033 */ 9034 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 9035 { 9036 /* Update our PCI device dma_mask for full 64 bit mask */ 9037 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { 9038 dev_dbg(&ha->pdev->dev, 9039 "Failed to set 64 bit PCI consistent mask; " 9040 "using 32 bit.\n"); 9041 dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32)); 9042 } 9043 } 9044 9045 static int qla4xxx_slave_alloc(struct scsi_device *sdev) 9046 { 9047 struct iscsi_cls_session *cls_sess; 9048 struct iscsi_session *sess; 9049 struct ddb_entry *ddb; 9050 int queue_depth = QL4_DEF_QDEPTH; 9051 9052 cls_sess = starget_to_session(sdev->sdev_target); 9053 sess = cls_sess->dd_data; 9054 ddb = sess->dd_data; 9055 9056 sdev->hostdata = ddb; 9057 9058 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 9059 queue_depth = ql4xmaxqdepth; 9060 9061 scsi_change_queue_depth(sdev, queue_depth); 9062 return 0; 9063 } 9064 9065 /** 9066 * qla4xxx_del_from_active_array - returns an active srb 9067 * @ha: Pointer to host adapter structure. 9068 * @index: index into the active_array 9069 * 9070 * This routine removes and returns the srb at the specified index 9071 **/ 9072 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 9073 uint32_t index) 9074 { 9075 struct srb *srb = NULL; 9076 struct scsi_cmnd *cmd = NULL; 9077 9078 cmd = scsi_host_find_tag(ha->host, index); 9079 if (!cmd) 9080 return srb; 9081 9082 srb = (struct srb *)CMD_SP(cmd); 9083 if (!srb) 9084 return srb; 9085 9086 /* update counters */ 9087 if (srb->flags & SRB_DMA_VALID) { 9088 ha->iocb_cnt -= srb->iocb_cnt; 9089 if (srb->cmd) 9090 srb->cmd->host_scribble = 9091 (unsigned char *)(unsigned long) MAX_SRBS; 9092 } 9093 return srb; 9094 } 9095 9096 /** 9097 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 9098 * @ha: Pointer to host adapter structure. 9099 * @cmd: Scsi Command to wait on. 9100 * 9101 * This routine waits for the command to be returned by the Firmware 9102 * for some max time. 9103 **/ 9104 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, 9105 struct scsi_cmnd *cmd) 9106 { 9107 int done = 0; 9108 struct srb *rp; 9109 uint32_t max_wait_time = EH_WAIT_CMD_TOV; 9110 int ret = SUCCESS; 9111 9112 /* Dont wait on command if PCI error is being handled 9113 * by PCI AER driver 9114 */ 9115 if (unlikely(pci_channel_offline(ha->pdev)) || 9116 (test_bit(AF_EEH_BUSY, &ha->flags))) { 9117 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", 9118 ha->host_no, __func__); 9119 return ret; 9120 } 9121 9122 do { 9123 /* Checking to see if its returned to OS */ 9124 rp = (struct srb *) CMD_SP(cmd); 9125 if (rp == NULL) { 9126 done++; 9127 break; 9128 } 9129 9130 msleep(2000); 9131 } while (max_wait_time--); 9132 9133 return done; 9134 } 9135 9136 /** 9137 * qla4xxx_wait_for_hba_online - waits for HBA to come online 9138 * @ha: Pointer to host adapter structure 9139 **/ 9140 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) 9141 { 9142 unsigned long wait_online; 9143 9144 wait_online = jiffies + (HBA_ONLINE_TOV * HZ); 9145 while (time_before(jiffies, wait_online)) { 9146 9147 if (adapter_up(ha)) 9148 return QLA_SUCCESS; 9149 9150 msleep(2000); 9151 } 9152 9153 return QLA_ERROR; 9154 } 9155 9156 /** 9157 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. 9158 * @ha: pointer to HBA 9159 * @stgt: pointer to SCSI target 9160 * @sdev: pointer to SCSI device 9161 * 9162 * This function waits for all outstanding commands to a lun to complete. It 9163 * returns 0 if all pending commands are returned and 1 otherwise. 9164 **/ 9165 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, 9166 struct scsi_target *stgt, 9167 struct scsi_device *sdev) 9168 { 9169 int cnt; 9170 int status = 0; 9171 struct scsi_cmnd *cmd; 9172 9173 /* 9174 * Waiting for all commands for the designated target or dev 9175 * in the active array 9176 */ 9177 for (cnt = 0; cnt < ha->host->can_queue; cnt++) { 9178 cmd = scsi_host_find_tag(ha->host, cnt); 9179 if (cmd && stgt == scsi_target(cmd->device) && 9180 (!sdev || sdev == cmd->device)) { 9181 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9182 status++; 9183 break; 9184 } 9185 } 9186 } 9187 return status; 9188 } 9189 9190 /** 9191 * qla4xxx_eh_abort - callback for abort task. 9192 * @cmd: Pointer to Linux's SCSI command structure 9193 * 9194 * This routine is called by the Linux OS to abort the specified 9195 * command. 9196 **/ 9197 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) 9198 { 9199 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9200 unsigned int id = cmd->device->id; 9201 uint64_t lun = cmd->device->lun; 9202 unsigned long flags; 9203 struct srb *srb = NULL; 9204 int ret = SUCCESS; 9205 int wait = 0; 9206 int rval; 9207 9208 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", 9209 ha->host_no, id, lun, cmd, cmd->cmnd[0]); 9210 9211 rval = qla4xxx_isp_check_reg(ha); 9212 if (rval != QLA_SUCCESS) { 9213 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9214 return FAILED; 9215 } 9216 9217 spin_lock_irqsave(&ha->hardware_lock, flags); 9218 srb = (struct srb *) CMD_SP(cmd); 9219 if (!srb) { 9220 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9221 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", 9222 ha->host_no, id, lun); 9223 return SUCCESS; 9224 } 9225 kref_get(&srb->srb_ref); 9226 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9227 9228 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 9229 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", 9230 ha->host_no, id, lun)); 9231 ret = FAILED; 9232 } else { 9233 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", 9234 ha->host_no, id, lun)); 9235 wait = 1; 9236 } 9237 9238 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 9239 9240 /* Wait for command to complete */ 9241 if (wait) { 9242 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9243 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", 9244 ha->host_no, id, lun)); 9245 ret = FAILED; 9246 } 9247 } 9248 9249 ql4_printk(KERN_INFO, ha, 9250 "scsi%ld:%d:%llu: Abort command - %s\n", 9251 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); 9252 9253 return ret; 9254 } 9255 9256 /** 9257 * qla4xxx_eh_device_reset - callback for target reset. 9258 * @cmd: Pointer to Linux's SCSI command structure 9259 * 9260 * This routine is called by the Linux OS to reset all luns on the 9261 * specified target. 9262 **/ 9263 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) 9264 { 9265 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9266 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9267 int ret = FAILED, stat; 9268 int rval; 9269 9270 if (!ddb_entry) 9271 return ret; 9272 9273 ret = iscsi_block_scsi_eh(cmd); 9274 if (ret) 9275 return ret; 9276 ret = FAILED; 9277 9278 ql4_printk(KERN_INFO, ha, 9279 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, 9280 cmd->device->channel, cmd->device->id, cmd->device->lun); 9281 9282 DEBUG2(printk(KERN_INFO 9283 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 9284 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 9285 cmd, jiffies, cmd->request->timeout / HZ, 9286 ha->dpc_flags, cmd->result, cmd->allowed)); 9287 9288 rval = qla4xxx_isp_check_reg(ha); 9289 if (rval != QLA_SUCCESS) { 9290 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9291 return FAILED; 9292 } 9293 9294 /* FIXME: wait for hba to go online */ 9295 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9296 if (stat != QLA_SUCCESS) { 9297 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); 9298 goto eh_dev_reset_done; 9299 } 9300 9301 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9302 cmd->device)) { 9303 ql4_printk(KERN_INFO, ha, 9304 "DEVICE RESET FAILED - waiting for " 9305 "commands.\n"); 9306 goto eh_dev_reset_done; 9307 } 9308 9309 /* Send marker. */ 9310 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9311 MM_LUN_RESET) != QLA_SUCCESS) 9312 goto eh_dev_reset_done; 9313 9314 ql4_printk(KERN_INFO, ha, 9315 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", 9316 ha->host_no, cmd->device->channel, cmd->device->id, 9317 cmd->device->lun); 9318 9319 ret = SUCCESS; 9320 9321 eh_dev_reset_done: 9322 9323 return ret; 9324 } 9325 9326 /** 9327 * qla4xxx_eh_target_reset - callback for target reset. 9328 * @cmd: Pointer to Linux's SCSI command structure 9329 * 9330 * This routine is called by the Linux OS to reset the target. 9331 **/ 9332 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) 9333 { 9334 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9335 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9336 int stat, ret; 9337 int rval; 9338 9339 if (!ddb_entry) 9340 return FAILED; 9341 9342 ret = iscsi_block_scsi_eh(cmd); 9343 if (ret) 9344 return ret; 9345 9346 starget_printk(KERN_INFO, scsi_target(cmd->device), 9347 "WARM TARGET RESET ISSUED.\n"); 9348 9349 DEBUG2(printk(KERN_INFO 9350 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 9351 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 9352 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, 9353 ha->dpc_flags, cmd->result, cmd->allowed)); 9354 9355 rval = qla4xxx_isp_check_reg(ha); 9356 if (rval != QLA_SUCCESS) { 9357 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9358 return FAILED; 9359 } 9360 9361 stat = qla4xxx_reset_target(ha, ddb_entry); 9362 if (stat != QLA_SUCCESS) { 9363 starget_printk(KERN_INFO, scsi_target(cmd->device), 9364 "WARM TARGET RESET FAILED.\n"); 9365 return FAILED; 9366 } 9367 9368 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9369 NULL)) { 9370 starget_printk(KERN_INFO, scsi_target(cmd->device), 9371 "WARM TARGET DEVICE RESET FAILED - " 9372 "waiting for commands.\n"); 9373 return FAILED; 9374 } 9375 9376 /* Send marker. */ 9377 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9378 MM_TGT_WARM_RESET) != QLA_SUCCESS) { 9379 starget_printk(KERN_INFO, scsi_target(cmd->device), 9380 "WARM TARGET DEVICE RESET FAILED - " 9381 "marker iocb failed.\n"); 9382 return FAILED; 9383 } 9384 9385 starget_printk(KERN_INFO, scsi_target(cmd->device), 9386 "WARM TARGET RESET SUCCEEDED.\n"); 9387 return SUCCESS; 9388 } 9389 9390 /** 9391 * qla4xxx_is_eh_active - check if error handler is running 9392 * @shost: Pointer to SCSI Host struct 9393 * 9394 * This routine finds that if reset host is called in EH 9395 * scenario or from some application like sg_reset 9396 **/ 9397 static int qla4xxx_is_eh_active(struct Scsi_Host *shost) 9398 { 9399 if (shost->shost_state == SHOST_RECOVERY) 9400 return 1; 9401 return 0; 9402 } 9403 9404 /** 9405 * qla4xxx_eh_host_reset - kernel callback 9406 * @cmd: Pointer to Linux's SCSI command structure 9407 * 9408 * This routine is invoked by the Linux kernel to perform fatal error 9409 * recovery on the specified adapter. 9410 **/ 9411 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) 9412 { 9413 int return_status = FAILED; 9414 struct scsi_qla_host *ha; 9415 int rval; 9416 9417 ha = to_qla_host(cmd->device->host); 9418 9419 rval = qla4xxx_isp_check_reg(ha); 9420 if (rval != QLA_SUCCESS) { 9421 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9422 return FAILED; 9423 } 9424 9425 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9426 qla4_83xx_set_idc_dontreset(ha); 9427 9428 /* 9429 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other 9430 * protocol drivers, we should not set device_state to NEED_RESET 9431 */ 9432 if (ql4xdontresethba || 9433 ((is_qla8032(ha) || is_qla8042(ha)) && 9434 qla4_83xx_idc_dontreset(ha))) { 9435 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 9436 ha->host_no, __func__)); 9437 9438 /* Clear outstanding srb in queues */ 9439 if (qla4xxx_is_eh_active(cmd->device->host)) 9440 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); 9441 9442 return FAILED; 9443 } 9444 9445 ql4_printk(KERN_INFO, ha, 9446 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, 9447 cmd->device->channel, cmd->device->id, cmd->device->lun); 9448 9449 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 9450 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " 9451 "DEAD.\n", ha->host_no, cmd->device->channel, 9452 __func__)); 9453 9454 return FAILED; 9455 } 9456 9457 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9458 if (is_qla80XX(ha)) 9459 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 9460 else 9461 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9462 } 9463 9464 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) 9465 return_status = SUCCESS; 9466 9467 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", 9468 return_status == FAILED ? "FAILED" : "SUCCEEDED"); 9469 9470 return return_status; 9471 } 9472 9473 static int qla4xxx_context_reset(struct scsi_qla_host *ha) 9474 { 9475 uint32_t mbox_cmd[MBOX_REG_COUNT]; 9476 uint32_t mbox_sts[MBOX_REG_COUNT]; 9477 struct addr_ctrl_blk_def *acb = NULL; 9478 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); 9479 int rval = QLA_SUCCESS; 9480 dma_addr_t acb_dma; 9481 9482 acb = dma_alloc_coherent(&ha->pdev->dev, 9483 sizeof(struct addr_ctrl_blk_def), 9484 &acb_dma, GFP_KERNEL); 9485 if (!acb) { 9486 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", 9487 __func__); 9488 rval = -ENOMEM; 9489 goto exit_port_reset; 9490 } 9491 9492 memset(acb, 0, acb_len); 9493 9494 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); 9495 if (rval != QLA_SUCCESS) { 9496 rval = -EIO; 9497 goto exit_free_acb; 9498 } 9499 9500 rval = qla4xxx_disable_acb(ha); 9501 if (rval != QLA_SUCCESS) { 9502 rval = -EIO; 9503 goto exit_free_acb; 9504 } 9505 9506 wait_for_completion_timeout(&ha->disable_acb_comp, 9507 DISABLE_ACB_TOV * HZ); 9508 9509 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); 9510 if (rval != QLA_SUCCESS) { 9511 rval = -EIO; 9512 goto exit_free_acb; 9513 } 9514 9515 exit_free_acb: 9516 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), 9517 acb, acb_dma); 9518 exit_port_reset: 9519 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, 9520 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); 9521 return rval; 9522 } 9523 9524 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) 9525 { 9526 struct scsi_qla_host *ha = to_qla_host(shost); 9527 int rval = QLA_SUCCESS; 9528 uint32_t idc_ctrl; 9529 9530 if (ql4xdontresethba) { 9531 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", 9532 __func__)); 9533 rval = -EPERM; 9534 goto exit_host_reset; 9535 } 9536 9537 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 9538 goto recover_adapter; 9539 9540 switch (reset_type) { 9541 case SCSI_ADAPTER_RESET: 9542 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9543 break; 9544 case SCSI_FIRMWARE_RESET: 9545 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9546 if (is_qla80XX(ha)) 9547 /* set firmware context reset */ 9548 set_bit(DPC_RESET_HA_FW_CONTEXT, 9549 &ha->dpc_flags); 9550 else { 9551 rval = qla4xxx_context_reset(ha); 9552 goto exit_host_reset; 9553 } 9554 } 9555 break; 9556 } 9557 9558 recover_adapter: 9559 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if 9560 * reset is issued by application */ 9561 if ((is_qla8032(ha) || is_qla8042(ha)) && 9562 test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9563 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 9564 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 9565 (idc_ctrl | GRACEFUL_RESET_BIT1)); 9566 } 9567 9568 rval = qla4xxx_recover_adapter(ha); 9569 if (rval != QLA_SUCCESS) { 9570 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", 9571 __func__)); 9572 rval = -EIO; 9573 } 9574 9575 exit_host_reset: 9576 return rval; 9577 } 9578 9579 /* PCI AER driver recovers from all correctable errors w/o 9580 * driver intervention. For uncorrectable errors PCI AER 9581 * driver calls the following device driver's callbacks 9582 * 9583 * - Fatal Errors - link_reset 9584 * - Non-Fatal Errors - driver's error_detected() which 9585 * returns CAN_RECOVER, NEED_RESET or DISCONNECT. 9586 * 9587 * PCI AER driver calls 9588 * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled() 9589 * returns RECOVERED or NEED_RESET if fw_hung 9590 * NEED_RESET - driver's slot_reset() 9591 * DISCONNECT - device is dead & cannot recover 9592 * RECOVERED - driver's resume() 9593 */ 9594 static pci_ers_result_t 9595 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9596 { 9597 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9598 9599 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", 9600 ha->host_no, __func__, state); 9601 9602 if (!is_aer_supported(ha)) 9603 return PCI_ERS_RESULT_NONE; 9604 9605 switch (state) { 9606 case pci_channel_io_normal: 9607 clear_bit(AF_EEH_BUSY, &ha->flags); 9608 return PCI_ERS_RESULT_CAN_RECOVER; 9609 case pci_channel_io_frozen: 9610 set_bit(AF_EEH_BUSY, &ha->flags); 9611 qla4xxx_mailbox_premature_completion(ha); 9612 qla4xxx_free_irqs(ha); 9613 pci_disable_device(pdev); 9614 /* Return back all IOs */ 9615 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 9616 return PCI_ERS_RESULT_NEED_RESET; 9617 case pci_channel_io_perm_failure: 9618 set_bit(AF_EEH_BUSY, &ha->flags); 9619 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); 9620 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 9621 return PCI_ERS_RESULT_DISCONNECT; 9622 } 9623 return PCI_ERS_RESULT_NEED_RESET; 9624 } 9625 9626 /** 9627 * qla4xxx_pci_mmio_enabled() - gets called if 9628 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER 9629 * and read/write to the device still works. 9630 * @pdev: PCI device pointer 9631 **/ 9632 static pci_ers_result_t 9633 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) 9634 { 9635 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9636 9637 if (!is_aer_supported(ha)) 9638 return PCI_ERS_RESULT_NONE; 9639 9640 return PCI_ERS_RESULT_RECOVERED; 9641 } 9642 9643 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 9644 { 9645 uint32_t rval = QLA_ERROR; 9646 int fn; 9647 struct pci_dev *other_pdev = NULL; 9648 9649 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); 9650 9651 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9652 9653 if (test_bit(AF_ONLINE, &ha->flags)) { 9654 clear_bit(AF_ONLINE, &ha->flags); 9655 clear_bit(AF_LINK_UP, &ha->flags); 9656 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 9657 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 9658 } 9659 9660 fn = PCI_FUNC(ha->pdev->devfn); 9661 if (is_qla8022(ha)) { 9662 while (fn > 0) { 9663 fn--; 9664 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", 9665 ha->host_no, __func__, fn); 9666 /* Get the pci device given the domain, bus, 9667 * slot/function number */ 9668 other_pdev = pci_get_domain_bus_and_slot( 9669 pci_domain_nr(ha->pdev->bus), 9670 ha->pdev->bus->number, 9671 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 9672 fn)); 9673 9674 if (!other_pdev) 9675 continue; 9676 9677 if (atomic_read(&other_pdev->enable_cnt)) { 9678 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", 9679 ha->host_no, __func__, fn); 9680 pci_dev_put(other_pdev); 9681 break; 9682 } 9683 pci_dev_put(other_pdev); 9684 } 9685 } else { 9686 /* this case is meant for ISP83xx/ISP84xx only */ 9687 if (qla4_83xx_can_perform_reset(ha)) { 9688 /* reset fn as iSCSI is going to perform the reset */ 9689 fn = 0; 9690 } 9691 } 9692 9693 /* The first function on the card, the reset owner will 9694 * start & initialize the firmware. The other functions 9695 * on the card will reset the firmware context 9696 */ 9697 if (!fn) { 9698 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " 9699 "0x%x is the owner\n", ha->host_no, __func__, 9700 ha->pdev->devfn); 9701 9702 ha->isp_ops->idc_lock(ha); 9703 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9704 QLA8XXX_DEV_COLD); 9705 ha->isp_ops->idc_unlock(ha); 9706 9707 rval = qla4_8xxx_update_idc_reg(ha); 9708 if (rval == QLA_ERROR) { 9709 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", 9710 ha->host_no, __func__); 9711 ha->isp_ops->idc_lock(ha); 9712 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9713 QLA8XXX_DEV_FAILED); 9714 ha->isp_ops->idc_unlock(ha); 9715 goto exit_error_recovery; 9716 } 9717 9718 clear_bit(AF_FW_RECOVERY, &ha->flags); 9719 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9720 9721 if (rval != QLA_SUCCESS) { 9722 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9723 "FAILED\n", ha->host_no, __func__); 9724 qla4xxx_free_irqs(ha); 9725 ha->isp_ops->idc_lock(ha); 9726 qla4_8xxx_clear_drv_active(ha); 9727 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9728 QLA8XXX_DEV_FAILED); 9729 ha->isp_ops->idc_unlock(ha); 9730 } else { 9731 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9732 "READY\n", ha->host_no, __func__); 9733 ha->isp_ops->idc_lock(ha); 9734 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9735 QLA8XXX_DEV_READY); 9736 /* Clear driver state register */ 9737 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); 9738 qla4_8xxx_set_drv_active(ha); 9739 ha->isp_ops->idc_unlock(ha); 9740 ha->isp_ops->enable_intrs(ha); 9741 } 9742 } else { 9743 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 9744 "the reset owner\n", ha->host_no, __func__, 9745 ha->pdev->devfn); 9746 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == 9747 QLA8XXX_DEV_READY)) { 9748 clear_bit(AF_FW_RECOVERY, &ha->flags); 9749 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9750 if (rval == QLA_SUCCESS) 9751 ha->isp_ops->enable_intrs(ha); 9752 else 9753 qla4xxx_free_irqs(ha); 9754 9755 ha->isp_ops->idc_lock(ha); 9756 qla4_8xxx_set_drv_active(ha); 9757 ha->isp_ops->idc_unlock(ha); 9758 } 9759 } 9760 exit_error_recovery: 9761 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9762 return rval; 9763 } 9764 9765 static pci_ers_result_t 9766 qla4xxx_pci_slot_reset(struct pci_dev *pdev) 9767 { 9768 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 9769 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9770 int rc; 9771 9772 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", 9773 ha->host_no, __func__); 9774 9775 if (!is_aer_supported(ha)) 9776 return PCI_ERS_RESULT_NONE; 9777 9778 /* Restore the saved state of PCIe device - 9779 * BAR registers, PCI Config space, PCIX, MSI, 9780 * IOV states 9781 */ 9782 pci_restore_state(pdev); 9783 9784 /* pci_restore_state() clears the saved_state flag of the device 9785 * save restored state which resets saved_state flag 9786 */ 9787 pci_save_state(pdev); 9788 9789 /* Initialize device or resume if in suspended state */ 9790 rc = pci_enable_device(pdev); 9791 if (rc) { 9792 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " 9793 "device after reset\n", ha->host_no, __func__); 9794 goto exit_slot_reset; 9795 } 9796 9797 ha->isp_ops->disable_intrs(ha); 9798 9799 if (is_qla80XX(ha)) { 9800 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 9801 ret = PCI_ERS_RESULT_RECOVERED; 9802 goto exit_slot_reset; 9803 } else 9804 goto exit_slot_reset; 9805 } 9806 9807 exit_slot_reset: 9808 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" 9809 "device after reset\n", ha->host_no, __func__, ret); 9810 return ret; 9811 } 9812 9813 static void 9814 qla4xxx_pci_resume(struct pci_dev *pdev) 9815 { 9816 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9817 int ret; 9818 9819 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", 9820 ha->host_no, __func__); 9821 9822 ret = qla4xxx_wait_for_hba_online(ha); 9823 if (ret != QLA_SUCCESS) { 9824 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " 9825 "resume I/O from slot/link_reset\n", ha->host_no, 9826 __func__); 9827 } 9828 9829 clear_bit(AF_EEH_BUSY, &ha->flags); 9830 } 9831 9832 static const struct pci_error_handlers qla4xxx_err_handler = { 9833 .error_detected = qla4xxx_pci_error_detected, 9834 .mmio_enabled = qla4xxx_pci_mmio_enabled, 9835 .slot_reset = qla4xxx_pci_slot_reset, 9836 .resume = qla4xxx_pci_resume, 9837 }; 9838 9839 static struct pci_device_id qla4xxx_pci_tbl[] = { 9840 { 9841 .vendor = PCI_VENDOR_ID_QLOGIC, 9842 .device = PCI_DEVICE_ID_QLOGIC_ISP4010, 9843 .subvendor = PCI_ANY_ID, 9844 .subdevice = PCI_ANY_ID, 9845 }, 9846 { 9847 .vendor = PCI_VENDOR_ID_QLOGIC, 9848 .device = PCI_DEVICE_ID_QLOGIC_ISP4022, 9849 .subvendor = PCI_ANY_ID, 9850 .subdevice = PCI_ANY_ID, 9851 }, 9852 { 9853 .vendor = PCI_VENDOR_ID_QLOGIC, 9854 .device = PCI_DEVICE_ID_QLOGIC_ISP4032, 9855 .subvendor = PCI_ANY_ID, 9856 .subdevice = PCI_ANY_ID, 9857 }, 9858 { 9859 .vendor = PCI_VENDOR_ID_QLOGIC, 9860 .device = PCI_DEVICE_ID_QLOGIC_ISP8022, 9861 .subvendor = PCI_ANY_ID, 9862 .subdevice = PCI_ANY_ID, 9863 }, 9864 { 9865 .vendor = PCI_VENDOR_ID_QLOGIC, 9866 .device = PCI_DEVICE_ID_QLOGIC_ISP8324, 9867 .subvendor = PCI_ANY_ID, 9868 .subdevice = PCI_ANY_ID, 9869 }, 9870 { 9871 .vendor = PCI_VENDOR_ID_QLOGIC, 9872 .device = PCI_DEVICE_ID_QLOGIC_ISP8042, 9873 .subvendor = PCI_ANY_ID, 9874 .subdevice = PCI_ANY_ID, 9875 }, 9876 {0, 0}, 9877 }; 9878 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 9879 9880 static struct pci_driver qla4xxx_pci_driver = { 9881 .name = DRIVER_NAME, 9882 .id_table = qla4xxx_pci_tbl, 9883 .probe = qla4xxx_probe_adapter, 9884 .remove = qla4xxx_remove_adapter, 9885 .err_handler = &qla4xxx_err_handler, 9886 }; 9887 9888 static int __init qla4xxx_module_init(void) 9889 { 9890 int ret; 9891 9892 if (ql4xqfulltracking) 9893 qla4xxx_driver_template.track_queue_depth = 1; 9894 9895 /* Allocate cache for SRBs. */ 9896 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9897 SLAB_HWCACHE_ALIGN, NULL); 9898 if (srb_cachep == NULL) { 9899 printk(KERN_ERR 9900 "%s: Unable to allocate SRB cache..." 9901 "Failing load!\n", DRIVER_NAME); 9902 ret = -ENOMEM; 9903 goto no_srp_cache; 9904 } 9905 9906 /* Derive version string. */ 9907 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); 9908 if (ql4xextended_error_logging) 9909 strcat(qla4xxx_version_str, "-debug"); 9910 9911 qla4xxx_scsi_transport = 9912 iscsi_register_transport(&qla4xxx_iscsi_transport); 9913 if (!qla4xxx_scsi_transport){ 9914 ret = -ENODEV; 9915 goto release_srb_cache; 9916 } 9917 9918 ret = pci_register_driver(&qla4xxx_pci_driver); 9919 if (ret) 9920 goto unregister_transport; 9921 9922 printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); 9923 return 0; 9924 9925 unregister_transport: 9926 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9927 release_srb_cache: 9928 kmem_cache_destroy(srb_cachep); 9929 no_srp_cache: 9930 return ret; 9931 } 9932 9933 static void __exit qla4xxx_module_exit(void) 9934 { 9935 pci_unregister_driver(&qla4xxx_pci_driver); 9936 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9937 kmem_cache_destroy(srb_cachep); 9938 } 9939 9940 module_init(qla4xxx_module_init); 9941 module_exit(qla4xxx_module_exit); 9942 9943 MODULE_AUTHOR("QLogic Corporation"); 9944 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); 9945 MODULE_LICENSE("GPL"); 9946 MODULE_VERSION(QLA4XXX_DRIVER_VERSION); 9947