1 /* 2 * QLogic iSCSI HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 */ 7 #include <linux/moduleparam.h> 8 #include <linux/slab.h> 9 #include <linux/blkdev.h> 10 #include <linux/iscsi_boot_sysfs.h> 11 #include <linux/inet.h> 12 13 #include <scsi/scsi_tcq.h> 14 #include <scsi/scsicam.h> 15 16 #include "ql4_def.h" 17 #include "ql4_version.h" 18 #include "ql4_glbl.h" 19 #include "ql4_dbg.h" 20 #include "ql4_inline.h" 21 #include "ql4_83xx.h" 22 23 /* 24 * Driver version 25 */ 26 static char qla4xxx_version_str[40]; 27 28 /* 29 * SRB allocation cache 30 */ 31 static struct kmem_cache *srb_cachep; 32 33 /* 34 * Module parameter information and variables 35 */ 36 static int ql4xdisablesysfsboot = 1; 37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 38 MODULE_PARM_DESC(ql4xdisablesysfsboot, 39 " Set to disable exporting boot targets to sysfs.\n" 40 "\t\t 0 - Export boot targets\n" 41 "\t\t 1 - Do not export boot targets (Default)"); 42 43 int ql4xdontresethba; 44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 45 MODULE_PARM_DESC(ql4xdontresethba, 46 " Don't reset the HBA for driver recovery.\n" 47 "\t\t 0 - It will reset HBA (Default)\n" 48 "\t\t 1 - It will NOT reset HBA"); 49 50 int ql4xextended_error_logging; 51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 52 MODULE_PARM_DESC(ql4xextended_error_logging, 53 " Option to enable extended error logging.\n" 54 "\t\t 0 - no logging (Default)\n" 55 "\t\t 2 - debug logging"); 56 57 int ql4xenablemsix = 1; 58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 59 MODULE_PARM_DESC(ql4xenablemsix, 60 " Set to enable MSI or MSI-X interrupt mechanism.\n" 61 "\t\t 0 = enable INTx interrupt mechanism.\n" 62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" 63 "\t\t 2 = enable MSI interrupt mechanism."); 64 65 #define QL4_DEF_QDEPTH 32 66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 68 MODULE_PARM_DESC(ql4xmaxqdepth, 69 " Maximum queue depth to report for target devices.\n" 70 "\t\t Default: 32."); 71 72 static int ql4xqfulltracking = 1; 73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); 74 MODULE_PARM_DESC(ql4xqfulltracking, 75 " Enable or disable dynamic tracking and adjustment of\n" 76 "\t\t scsi device queue depth.\n" 77 "\t\t 0 - Disable.\n" 78 "\t\t 1 - Enable. (Default)"); 79 80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 82 MODULE_PARM_DESC(ql4xsess_recovery_tmo, 83 " Target Session Recovery Timeout.\n" 84 "\t\t Default: 120 sec."); 85 86 int ql4xmdcapmask = 0; 87 module_param(ql4xmdcapmask, int, S_IRUGO); 88 MODULE_PARM_DESC(ql4xmdcapmask, 89 " Set the Minidump driver capture mask level.\n" 90 "\t\t Default is 0 (firmware default capture mask)\n" 91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); 92 93 int ql4xenablemd = 1; 94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); 95 MODULE_PARM_DESC(ql4xenablemd, 96 " Set to enable minidump.\n" 97 "\t\t 0 - disable minidump\n" 98 "\t\t 1 - enable minidump (Default)"); 99 100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 101 /* 102 * SCSI host template entry points 103 */ 104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); 105 106 /* 107 * iSCSI template entry points 108 */ 109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 110 enum iscsi_param param, char *buf); 111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 112 enum iscsi_param param, char *buf); 113 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 114 enum iscsi_host_param param, char *buf); 115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, 116 uint32_t len); 117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 118 enum iscsi_param_type param_type, 119 int param, char *buf); 120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); 121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, 122 struct sockaddr *dst_addr, 123 int non_blocking); 124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); 125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); 126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 127 enum iscsi_param param, char *buf); 128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 129 static struct iscsi_cls_conn * 130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); 131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 132 struct iscsi_cls_conn *cls_conn, 133 uint64_t transport_fd, int is_leading); 134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); 135 static struct iscsi_cls_session * 136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 137 uint16_t qdepth, uint32_t initial_cmdsn); 138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); 139 static void qla4xxx_task_work(struct work_struct *wdata); 140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); 141 static int qla4xxx_task_xmit(struct iscsi_task *); 142 static void qla4xxx_task_cleanup(struct iscsi_task *); 143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); 144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 145 struct iscsi_stats *stats); 146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 147 uint32_t iface_type, uint32_t payload_size, 148 uint32_t pid, struct sockaddr *dst_addr); 149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 150 uint32_t *num_entries, char *buf); 151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); 152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, 153 int len); 154 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); 155 156 /* 157 * SCSI host template entry points 158 */ 159 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 160 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); 161 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 162 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 163 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 164 static int qla4xxx_slave_alloc(struct scsi_device *device); 165 static umode_t qla4_attr_is_visible(int param_type, int param); 166 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 167 168 /* 169 * iSCSI Flash DDB sysfs entry points 170 */ 171 static int 172 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 173 struct iscsi_bus_flash_conn *fnode_conn, 174 void *data, int len); 175 static int 176 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 177 int param, char *buf); 178 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 179 int len); 180 static int 181 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); 182 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 183 struct iscsi_bus_flash_conn *fnode_conn); 184 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 185 struct iscsi_bus_flash_conn *fnode_conn); 186 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); 187 188 static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 189 QLA82XX_LEGACY_INTR_CONFIG; 190 191 static struct scsi_host_template qla4xxx_driver_template = { 192 .module = THIS_MODULE, 193 .name = DRIVER_NAME, 194 .proc_name = DRIVER_NAME, 195 .queuecommand = qla4xxx_queuecommand, 196 197 .eh_abort_handler = qla4xxx_eh_abort, 198 .eh_device_reset_handler = qla4xxx_eh_device_reset, 199 .eh_target_reset_handler = qla4xxx_eh_target_reset, 200 .eh_host_reset_handler = qla4xxx_eh_host_reset, 201 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 202 203 .slave_alloc = qla4xxx_slave_alloc, 204 .change_queue_depth = scsi_change_queue_depth, 205 206 .this_id = -1, 207 .cmd_per_lun = 3, 208 .use_clustering = ENABLE_CLUSTERING, 209 .sg_tablesize = SG_ALL, 210 211 .max_sectors = 0xFFFF, 212 .shost_attrs = qla4xxx_host_attrs, 213 .host_reset = qla4xxx_host_reset, 214 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 215 }; 216 217 static struct iscsi_transport qla4xxx_iscsi_transport = { 218 .owner = THIS_MODULE, 219 .name = DRIVER_NAME, 220 .caps = CAP_TEXT_NEGO | 221 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 222 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 223 CAP_MULTI_R2T, 224 .attr_is_visible = qla4_attr_is_visible, 225 .create_session = qla4xxx_session_create, 226 .destroy_session = qla4xxx_session_destroy, 227 .start_conn = qla4xxx_conn_start, 228 .create_conn = qla4xxx_conn_create, 229 .bind_conn = qla4xxx_conn_bind, 230 .stop_conn = iscsi_conn_stop, 231 .destroy_conn = qla4xxx_conn_destroy, 232 .set_param = iscsi_set_param, 233 .get_conn_param = qla4xxx_conn_get_param, 234 .get_session_param = qla4xxx_session_get_param, 235 .get_ep_param = qla4xxx_get_ep_param, 236 .ep_connect = qla4xxx_ep_connect, 237 .ep_poll = qla4xxx_ep_poll, 238 .ep_disconnect = qla4xxx_ep_disconnect, 239 .get_stats = qla4xxx_conn_get_stats, 240 .send_pdu = iscsi_conn_send_pdu, 241 .xmit_task = qla4xxx_task_xmit, 242 .cleanup_task = qla4xxx_task_cleanup, 243 .alloc_pdu = qla4xxx_alloc_pdu, 244 245 .get_host_param = qla4xxx_host_get_param, 246 .set_iface_param = qla4xxx_iface_set_param, 247 .get_iface_param = qla4xxx_get_iface_param, 248 .bsg_request = qla4xxx_bsg_request, 249 .send_ping = qla4xxx_send_ping, 250 .get_chap = qla4xxx_get_chap_list, 251 .delete_chap = qla4xxx_delete_chap, 252 .set_chap = qla4xxx_set_chap_entry, 253 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, 254 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, 255 .new_flashnode = qla4xxx_sysfs_ddb_add, 256 .del_flashnode = qla4xxx_sysfs_ddb_delete, 257 .login_flashnode = qla4xxx_sysfs_ddb_login, 258 .logout_flashnode = qla4xxx_sysfs_ddb_logout, 259 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, 260 .get_host_stats = qla4xxx_get_host_stats, 261 }; 262 263 static struct scsi_transport_template *qla4xxx_scsi_transport; 264 265 static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) 266 { 267 u32 reg_val = 0; 268 int rval = QLA_SUCCESS; 269 270 if (is_qla8022(ha)) 271 reg_val = readl(&ha->qla4_82xx_reg->host_status); 272 else if (is_qla8032(ha) || is_qla8042(ha)) 273 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); 274 else 275 reg_val = readw(&ha->reg->ctrl_status); 276 277 if (reg_val == QL4_ISP_REG_DISCONNECT) 278 rval = QLA_ERROR; 279 280 return rval; 281 } 282 283 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 284 uint32_t iface_type, uint32_t payload_size, 285 uint32_t pid, struct sockaddr *dst_addr) 286 { 287 struct scsi_qla_host *ha = to_qla_host(shost); 288 struct sockaddr_in *addr; 289 struct sockaddr_in6 *addr6; 290 uint32_t options = 0; 291 uint8_t ipaddr[IPv6_ADDR_LEN]; 292 int rval; 293 294 memset(ipaddr, 0, IPv6_ADDR_LEN); 295 /* IPv4 to IPv4 */ 296 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && 297 (dst_addr->sa_family == AF_INET)) { 298 addr = (struct sockaddr_in *)dst_addr; 299 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); 300 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " 301 "dest: %pI4\n", __func__, 302 &ha->ip_config.ip_address, ipaddr)); 303 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, 304 ipaddr); 305 if (rval) 306 rval = -EINVAL; 307 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && 308 (dst_addr->sa_family == AF_INET6)) { 309 /* IPv6 to IPv6 */ 310 addr6 = (struct sockaddr_in6 *)dst_addr; 311 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); 312 313 options |= PING_IPV6_PROTOCOL_ENABLE; 314 315 /* Ping using LinkLocal address */ 316 if ((iface_num == 0) || (iface_num == 1)) { 317 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " 318 "src: %pI6 dest: %pI6\n", __func__, 319 &ha->ip_config.ipv6_link_local_addr, 320 ipaddr)); 321 options |= PING_IPV6_LINKLOCAL_ADDR; 322 rval = qla4xxx_ping_iocb(ha, options, payload_size, 323 pid, ipaddr); 324 } else { 325 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " 326 "not supported\n", __func__, iface_num); 327 rval = -ENOSYS; 328 goto exit_send_ping; 329 } 330 331 /* 332 * If ping using LinkLocal address fails, try ping using 333 * IPv6 address 334 */ 335 if (rval != QLA_SUCCESS) { 336 options &= ~PING_IPV6_LINKLOCAL_ADDR; 337 if (iface_num == 0) { 338 options |= PING_IPV6_ADDR0; 339 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 340 "Ping src: %pI6 " 341 "dest: %pI6\n", __func__, 342 &ha->ip_config.ipv6_addr0, 343 ipaddr)); 344 } else if (iface_num == 1) { 345 options |= PING_IPV6_ADDR1; 346 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " 347 "Ping src: %pI6 " 348 "dest: %pI6\n", __func__, 349 &ha->ip_config.ipv6_addr1, 350 ipaddr)); 351 } 352 rval = qla4xxx_ping_iocb(ha, options, payload_size, 353 pid, ipaddr); 354 if (rval) 355 rval = -EINVAL; 356 } 357 } else 358 rval = -ENOSYS; 359 exit_send_ping: 360 return rval; 361 } 362 363 static umode_t qla4_attr_is_visible(int param_type, int param) 364 { 365 switch (param_type) { 366 case ISCSI_HOST_PARAM: 367 switch (param) { 368 case ISCSI_HOST_PARAM_HWADDRESS: 369 case ISCSI_HOST_PARAM_IPADDRESS: 370 case ISCSI_HOST_PARAM_INITIATOR_NAME: 371 case ISCSI_HOST_PARAM_PORT_STATE: 372 case ISCSI_HOST_PARAM_PORT_SPEED: 373 return S_IRUGO; 374 default: 375 return 0; 376 } 377 case ISCSI_PARAM: 378 switch (param) { 379 case ISCSI_PARAM_PERSISTENT_ADDRESS: 380 case ISCSI_PARAM_PERSISTENT_PORT: 381 case ISCSI_PARAM_CONN_ADDRESS: 382 case ISCSI_PARAM_CONN_PORT: 383 case ISCSI_PARAM_TARGET_NAME: 384 case ISCSI_PARAM_TPGT: 385 case ISCSI_PARAM_TARGET_ALIAS: 386 case ISCSI_PARAM_MAX_BURST: 387 case ISCSI_PARAM_MAX_R2T: 388 case ISCSI_PARAM_FIRST_BURST: 389 case ISCSI_PARAM_MAX_RECV_DLENGTH: 390 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 391 case ISCSI_PARAM_IFACE_NAME: 392 case ISCSI_PARAM_CHAP_OUT_IDX: 393 case ISCSI_PARAM_CHAP_IN_IDX: 394 case ISCSI_PARAM_USERNAME: 395 case ISCSI_PARAM_PASSWORD: 396 case ISCSI_PARAM_USERNAME_IN: 397 case ISCSI_PARAM_PASSWORD_IN: 398 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: 399 case ISCSI_PARAM_DISCOVERY_SESS: 400 case ISCSI_PARAM_PORTAL_TYPE: 401 case ISCSI_PARAM_CHAP_AUTH_EN: 402 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: 403 case ISCSI_PARAM_BIDI_CHAP_EN: 404 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: 405 case ISCSI_PARAM_DEF_TIME2WAIT: 406 case ISCSI_PARAM_DEF_TIME2RETAIN: 407 case ISCSI_PARAM_HDRDGST_EN: 408 case ISCSI_PARAM_DATADGST_EN: 409 case ISCSI_PARAM_INITIAL_R2T_EN: 410 case ISCSI_PARAM_IMM_DATA_EN: 411 case ISCSI_PARAM_PDU_INORDER_EN: 412 case ISCSI_PARAM_DATASEQ_INORDER_EN: 413 case ISCSI_PARAM_MAX_SEGMENT_SIZE: 414 case ISCSI_PARAM_TCP_TIMESTAMP_STAT: 415 case ISCSI_PARAM_TCP_WSF_DISABLE: 416 case ISCSI_PARAM_TCP_NAGLE_DISABLE: 417 case ISCSI_PARAM_TCP_TIMER_SCALE: 418 case ISCSI_PARAM_TCP_TIMESTAMP_EN: 419 case ISCSI_PARAM_TCP_XMIT_WSF: 420 case ISCSI_PARAM_TCP_RECV_WSF: 421 case ISCSI_PARAM_IP_FRAGMENT_DISABLE: 422 case ISCSI_PARAM_IPV4_TOS: 423 case ISCSI_PARAM_IPV6_TC: 424 case ISCSI_PARAM_IPV6_FLOW_LABEL: 425 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: 426 case ISCSI_PARAM_KEEPALIVE_TMO: 427 case ISCSI_PARAM_LOCAL_PORT: 428 case ISCSI_PARAM_ISID: 429 case ISCSI_PARAM_TSID: 430 case ISCSI_PARAM_DEF_TASKMGMT_TMO: 431 case ISCSI_PARAM_ERL: 432 case ISCSI_PARAM_STATSN: 433 case ISCSI_PARAM_EXP_STATSN: 434 case ISCSI_PARAM_DISCOVERY_PARENT_IDX: 435 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 436 case ISCSI_PARAM_LOCAL_IPADDR: 437 return S_IRUGO; 438 default: 439 return 0; 440 } 441 case ISCSI_NET_PARAM: 442 switch (param) { 443 case ISCSI_NET_PARAM_IPV4_ADDR: 444 case ISCSI_NET_PARAM_IPV4_SUBNET: 445 case ISCSI_NET_PARAM_IPV4_GW: 446 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 447 case ISCSI_NET_PARAM_IFACE_ENABLE: 448 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 449 case ISCSI_NET_PARAM_IPV6_ADDR: 450 case ISCSI_NET_PARAM_IPV6_ROUTER: 451 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 452 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 453 case ISCSI_NET_PARAM_VLAN_ID: 454 case ISCSI_NET_PARAM_VLAN_PRIORITY: 455 case ISCSI_NET_PARAM_VLAN_ENABLED: 456 case ISCSI_NET_PARAM_MTU: 457 case ISCSI_NET_PARAM_PORT: 458 case ISCSI_NET_PARAM_IPADDR_STATE: 459 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 460 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 461 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 462 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 463 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 464 case ISCSI_NET_PARAM_TCP_WSF: 465 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 466 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 467 case ISCSI_NET_PARAM_CACHE_ID: 468 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 469 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 470 case ISCSI_NET_PARAM_IPV4_TOS_EN: 471 case ISCSI_NET_PARAM_IPV4_TOS: 472 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 473 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 474 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 475 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 476 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 477 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 478 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 479 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 480 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 481 case ISCSI_NET_PARAM_REDIRECT_EN: 482 case ISCSI_NET_PARAM_IPV4_TTL: 483 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 484 case ISCSI_NET_PARAM_IPV6_MLD_EN: 485 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 486 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 487 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 488 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 489 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 490 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 491 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 492 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 493 return S_IRUGO; 494 default: 495 return 0; 496 } 497 case ISCSI_IFACE_PARAM: 498 switch (param) { 499 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 500 case ISCSI_IFACE_PARAM_HDRDGST_EN: 501 case ISCSI_IFACE_PARAM_DATADGST_EN: 502 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 503 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 504 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 505 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 506 case ISCSI_IFACE_PARAM_ERL: 507 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 508 case ISCSI_IFACE_PARAM_FIRST_BURST: 509 case ISCSI_IFACE_PARAM_MAX_R2T: 510 case ISCSI_IFACE_PARAM_MAX_BURST: 511 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 512 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 513 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 514 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 515 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 516 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 517 return S_IRUGO; 518 default: 519 return 0; 520 } 521 case ISCSI_FLASHNODE_PARAM: 522 switch (param) { 523 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 524 case ISCSI_FLASHNODE_PORTAL_TYPE: 525 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 526 case ISCSI_FLASHNODE_DISCOVERY_SESS: 527 case ISCSI_FLASHNODE_ENTRY_EN: 528 case ISCSI_FLASHNODE_HDR_DGST_EN: 529 case ISCSI_FLASHNODE_DATA_DGST_EN: 530 case ISCSI_FLASHNODE_IMM_DATA_EN: 531 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 532 case ISCSI_FLASHNODE_DATASEQ_INORDER: 533 case ISCSI_FLASHNODE_PDU_INORDER: 534 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 535 case ISCSI_FLASHNODE_SNACK_REQ_EN: 536 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 537 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 538 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 539 case ISCSI_FLASHNODE_ERL: 540 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 541 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 542 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 543 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 544 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 545 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 546 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 547 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 548 case ISCSI_FLASHNODE_FIRST_BURST: 549 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 550 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 551 case ISCSI_FLASHNODE_MAX_R2T: 552 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 553 case ISCSI_FLASHNODE_ISID: 554 case ISCSI_FLASHNODE_TSID: 555 case ISCSI_FLASHNODE_PORT: 556 case ISCSI_FLASHNODE_MAX_BURST: 557 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 558 case ISCSI_FLASHNODE_IPADDR: 559 case ISCSI_FLASHNODE_ALIAS: 560 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 561 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 562 case ISCSI_FLASHNODE_LOCAL_PORT: 563 case ISCSI_FLASHNODE_IPV4_TOS: 564 case ISCSI_FLASHNODE_IPV6_TC: 565 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 566 case ISCSI_FLASHNODE_NAME: 567 case ISCSI_FLASHNODE_TPGT: 568 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 569 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 570 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 571 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 572 case ISCSI_FLASHNODE_TCP_RECV_WSF: 573 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 574 case ISCSI_FLASHNODE_USERNAME: 575 case ISCSI_FLASHNODE_PASSWORD: 576 case ISCSI_FLASHNODE_STATSN: 577 case ISCSI_FLASHNODE_EXP_STATSN: 578 case ISCSI_FLASHNODE_IS_BOOT_TGT: 579 return S_IRUGO; 580 default: 581 return 0; 582 } 583 } 584 585 return 0; 586 } 587 588 /** 589 * qla4xxx_create chap_list - Create CHAP list from FLASH 590 * @ha: pointer to adapter structure 591 * 592 * Read flash and make a list of CHAP entries, during login when a CHAP entry 593 * is received, it will be checked in this list. If entry exist then the CHAP 594 * entry index is set in the DDB. If CHAP entry does not exist in this list 595 * then a new entry is added in FLASH in CHAP table and the index obtained is 596 * used in the DDB. 597 **/ 598 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) 599 { 600 int rval = 0; 601 uint8_t *chap_flash_data = NULL; 602 uint32_t offset; 603 dma_addr_t chap_dma; 604 uint32_t chap_size = 0; 605 606 if (is_qla40XX(ha)) 607 chap_size = MAX_CHAP_ENTRIES_40XX * 608 sizeof(struct ql4_chap_table); 609 else /* Single region contains CHAP info for both 610 * ports which is divided into half for each port. 611 */ 612 chap_size = ha->hw.flt_chap_size / 2; 613 614 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, 615 &chap_dma, GFP_KERNEL); 616 if (!chap_flash_data) { 617 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); 618 return; 619 } 620 621 if (is_qla40XX(ha)) { 622 offset = FLASH_CHAP_OFFSET; 623 } else { 624 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 625 if (ha->port_num == 1) 626 offset += chap_size; 627 } 628 629 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 630 if (rval != QLA_SUCCESS) 631 goto exit_chap_list; 632 633 if (ha->chap_list == NULL) 634 ha->chap_list = vmalloc(chap_size); 635 if (ha->chap_list == NULL) { 636 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); 637 goto exit_chap_list; 638 } 639 640 memset(ha->chap_list, 0, chap_size); 641 memcpy(ha->chap_list, chap_flash_data, chap_size); 642 643 exit_chap_list: 644 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); 645 } 646 647 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, 648 int16_t chap_index, 649 struct ql4_chap_table **chap_entry) 650 { 651 int rval = QLA_ERROR; 652 int max_chap_entries; 653 654 if (!ha->chap_list) { 655 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 656 rval = QLA_ERROR; 657 goto exit_get_chap; 658 } 659 660 if (is_qla80XX(ha)) 661 max_chap_entries = (ha->hw.flt_chap_size / 2) / 662 sizeof(struct ql4_chap_table); 663 else 664 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 665 666 if (chap_index > max_chap_entries) { 667 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); 668 rval = QLA_ERROR; 669 goto exit_get_chap; 670 } 671 672 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; 673 if ((*chap_entry)->cookie != 674 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 675 rval = QLA_ERROR; 676 *chap_entry = NULL; 677 } else { 678 rval = QLA_SUCCESS; 679 } 680 681 exit_get_chap: 682 return rval; 683 } 684 685 /** 686 * qla4xxx_find_free_chap_index - Find the first free chap index 687 * @ha: pointer to adapter structure 688 * @chap_index: CHAP index to be returned 689 * 690 * Find the first free chap index available in the chap table 691 * 692 * Note: Caller should acquire the chap lock before getting here. 693 **/ 694 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, 695 uint16_t *chap_index) 696 { 697 int i, rval; 698 int free_index = -1; 699 int max_chap_entries = 0; 700 struct ql4_chap_table *chap_table; 701 702 if (is_qla80XX(ha)) 703 max_chap_entries = (ha->hw.flt_chap_size / 2) / 704 sizeof(struct ql4_chap_table); 705 else 706 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 707 708 if (!ha->chap_list) { 709 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); 710 rval = QLA_ERROR; 711 goto exit_find_chap; 712 } 713 714 for (i = 0; i < max_chap_entries; i++) { 715 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 716 717 if ((chap_table->cookie != 718 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && 719 (i > MAX_RESRV_CHAP_IDX)) { 720 free_index = i; 721 break; 722 } 723 } 724 725 if (free_index != -1) { 726 *chap_index = free_index; 727 rval = QLA_SUCCESS; 728 } else { 729 rval = QLA_ERROR; 730 } 731 732 exit_find_chap: 733 return rval; 734 } 735 736 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, 737 uint32_t *num_entries, char *buf) 738 { 739 struct scsi_qla_host *ha = to_qla_host(shost); 740 struct ql4_chap_table *chap_table; 741 struct iscsi_chap_rec *chap_rec; 742 int max_chap_entries = 0; 743 int valid_chap_entries = 0; 744 int ret = 0, i; 745 746 if (is_qla80XX(ha)) 747 max_chap_entries = (ha->hw.flt_chap_size / 2) / 748 sizeof(struct ql4_chap_table); 749 else 750 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 751 752 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", 753 __func__, *num_entries, chap_tbl_idx); 754 755 if (!buf) { 756 ret = -ENOMEM; 757 goto exit_get_chap_list; 758 } 759 760 qla4xxx_create_chap_list(ha); 761 762 chap_rec = (struct iscsi_chap_rec *) buf; 763 mutex_lock(&ha->chap_sem); 764 for (i = chap_tbl_idx; i < max_chap_entries; i++) { 765 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 766 if (chap_table->cookie != 767 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) 768 continue; 769 770 chap_rec->chap_tbl_idx = i; 771 strlcpy(chap_rec->username, chap_table->name, 772 ISCSI_CHAP_AUTH_NAME_MAX_LEN); 773 strlcpy(chap_rec->password, chap_table->secret, 774 QL4_CHAP_MAX_SECRET_LEN); 775 chap_rec->password_length = chap_table->secret_len; 776 777 if (chap_table->flags & BIT_7) /* local */ 778 chap_rec->chap_type = CHAP_TYPE_OUT; 779 780 if (chap_table->flags & BIT_6) /* peer */ 781 chap_rec->chap_type = CHAP_TYPE_IN; 782 783 chap_rec++; 784 785 valid_chap_entries++; 786 if (valid_chap_entries == *num_entries) 787 break; 788 else 789 continue; 790 } 791 mutex_unlock(&ha->chap_sem); 792 793 exit_get_chap_list: 794 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", 795 __func__, valid_chap_entries); 796 *num_entries = valid_chap_entries; 797 return ret; 798 } 799 800 static int __qla4xxx_is_chap_active(struct device *dev, void *data) 801 { 802 int ret = 0; 803 uint16_t *chap_tbl_idx = (uint16_t *) data; 804 struct iscsi_cls_session *cls_session; 805 struct iscsi_session *sess; 806 struct ddb_entry *ddb_entry; 807 808 if (!iscsi_is_session_dev(dev)) 809 goto exit_is_chap_active; 810 811 cls_session = iscsi_dev_to_session(dev); 812 sess = cls_session->dd_data; 813 ddb_entry = sess->dd_data; 814 815 if (iscsi_session_chkready(cls_session)) 816 goto exit_is_chap_active; 817 818 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) 819 ret = 1; 820 821 exit_is_chap_active: 822 return ret; 823 } 824 825 static int qla4xxx_is_chap_active(struct Scsi_Host *shost, 826 uint16_t chap_tbl_idx) 827 { 828 int ret = 0; 829 830 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, 831 __qla4xxx_is_chap_active); 832 833 return ret; 834 } 835 836 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) 837 { 838 struct scsi_qla_host *ha = to_qla_host(shost); 839 struct ql4_chap_table *chap_table; 840 dma_addr_t chap_dma; 841 int max_chap_entries = 0; 842 uint32_t offset = 0; 843 uint32_t chap_size; 844 int ret = 0; 845 846 chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); 847 if (chap_table == NULL) 848 return -ENOMEM; 849 850 if (is_qla80XX(ha)) 851 max_chap_entries = (ha->hw.flt_chap_size / 2) / 852 sizeof(struct ql4_chap_table); 853 else 854 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 855 856 if (chap_tbl_idx > max_chap_entries) { 857 ret = -EINVAL; 858 goto exit_delete_chap; 859 } 860 861 /* Check if chap index is in use. 862 * If chap is in use don't delet chap entry */ 863 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); 864 if (ret) { 865 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " 866 "delete from flash\n", chap_tbl_idx); 867 ret = -EBUSY; 868 goto exit_delete_chap; 869 } 870 871 chap_size = sizeof(struct ql4_chap_table); 872 if (is_qla40XX(ha)) 873 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); 874 else { 875 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); 876 /* flt_chap_size is CHAP table size for both ports 877 * so divide it by 2 to calculate the offset for second port 878 */ 879 if (ha->port_num == 1) 880 offset += (ha->hw.flt_chap_size / 2); 881 offset += (chap_tbl_idx * chap_size); 882 } 883 884 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); 885 if (ret != QLA_SUCCESS) { 886 ret = -EINVAL; 887 goto exit_delete_chap; 888 } 889 890 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", 891 __le16_to_cpu(chap_table->cookie))); 892 893 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { 894 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); 895 goto exit_delete_chap; 896 } 897 898 chap_table->cookie = __constant_cpu_to_le16(0xFFFF); 899 900 offset = FLASH_CHAP_OFFSET | 901 (chap_tbl_idx * sizeof(struct ql4_chap_table)); 902 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, 903 FLASH_OPT_RMW_COMMIT); 904 if (ret == QLA_SUCCESS && ha->chap_list) { 905 mutex_lock(&ha->chap_sem); 906 /* Update ha chap_list cache */ 907 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, 908 chap_table, sizeof(struct ql4_chap_table)); 909 mutex_unlock(&ha->chap_sem); 910 } 911 if (ret != QLA_SUCCESS) 912 ret = -EINVAL; 913 914 exit_delete_chap: 915 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); 916 return ret; 917 } 918 919 /** 920 * qla4xxx_set_chap_entry - Make chap entry with given information 921 * @shost: pointer to host 922 * @data: chap info - credentials, index and type to make chap entry 923 * @len: length of data 924 * 925 * Add or update chap entry with the given information 926 **/ 927 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) 928 { 929 struct scsi_qla_host *ha = to_qla_host(shost); 930 struct iscsi_chap_rec chap_rec; 931 struct ql4_chap_table *chap_entry = NULL; 932 struct iscsi_param_info *param_info; 933 struct nlattr *attr; 934 int max_chap_entries = 0; 935 int type; 936 int rem = len; 937 int rc = 0; 938 int size; 939 940 memset(&chap_rec, 0, sizeof(chap_rec)); 941 942 nla_for_each_attr(attr, data, len, rem) { 943 param_info = nla_data(attr); 944 945 switch (param_info->param) { 946 case ISCSI_CHAP_PARAM_INDEX: 947 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; 948 break; 949 case ISCSI_CHAP_PARAM_CHAP_TYPE: 950 chap_rec.chap_type = param_info->value[0]; 951 break; 952 case ISCSI_CHAP_PARAM_USERNAME: 953 size = min_t(size_t, sizeof(chap_rec.username), 954 param_info->len); 955 memcpy(chap_rec.username, param_info->value, size); 956 break; 957 case ISCSI_CHAP_PARAM_PASSWORD: 958 size = min_t(size_t, sizeof(chap_rec.password), 959 param_info->len); 960 memcpy(chap_rec.password, param_info->value, size); 961 break; 962 case ISCSI_CHAP_PARAM_PASSWORD_LEN: 963 chap_rec.password_length = param_info->value[0]; 964 break; 965 default: 966 ql4_printk(KERN_ERR, ha, 967 "%s: No such sysfs attribute\n", __func__); 968 rc = -ENOSYS; 969 goto exit_set_chap; 970 }; 971 } 972 973 if (chap_rec.chap_type == CHAP_TYPE_IN) 974 type = BIDI_CHAP; 975 else 976 type = LOCAL_CHAP; 977 978 if (is_qla80XX(ha)) 979 max_chap_entries = (ha->hw.flt_chap_size / 2) / 980 sizeof(struct ql4_chap_table); 981 else 982 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 983 984 mutex_lock(&ha->chap_sem); 985 if (chap_rec.chap_tbl_idx < max_chap_entries) { 986 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, 987 &chap_entry); 988 if (!rc) { 989 if (!(type == qla4xxx_get_chap_type(chap_entry))) { 990 ql4_printk(KERN_INFO, ha, 991 "Type mismatch for CHAP entry %d\n", 992 chap_rec.chap_tbl_idx); 993 rc = -EINVAL; 994 goto exit_unlock_chap; 995 } 996 997 /* If chap index is in use then don't modify it */ 998 rc = qla4xxx_is_chap_active(shost, 999 chap_rec.chap_tbl_idx); 1000 if (rc) { 1001 ql4_printk(KERN_INFO, ha, 1002 "CHAP entry %d is in use\n", 1003 chap_rec.chap_tbl_idx); 1004 rc = -EBUSY; 1005 goto exit_unlock_chap; 1006 } 1007 } 1008 } else { 1009 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); 1010 if (rc) { 1011 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); 1012 rc = -EBUSY; 1013 goto exit_unlock_chap; 1014 } 1015 } 1016 1017 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, 1018 chap_rec.chap_tbl_idx, type); 1019 1020 exit_unlock_chap: 1021 mutex_unlock(&ha->chap_sem); 1022 1023 exit_set_chap: 1024 return rc; 1025 } 1026 1027 1028 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) 1029 { 1030 struct scsi_qla_host *ha = to_qla_host(shost); 1031 struct iscsi_offload_host_stats *host_stats = NULL; 1032 int host_stats_size; 1033 int ret = 0; 1034 int ddb_idx = 0; 1035 struct ql_iscsi_stats *ql_iscsi_stats = NULL; 1036 int stats_size; 1037 dma_addr_t iscsi_stats_dma; 1038 1039 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); 1040 1041 host_stats_size = sizeof(struct iscsi_offload_host_stats); 1042 1043 if (host_stats_size != len) { 1044 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", 1045 __func__, len, host_stats_size); 1046 ret = -EINVAL; 1047 goto exit_host_stats; 1048 } 1049 host_stats = (struct iscsi_offload_host_stats *)buf; 1050 1051 if (!buf) { 1052 ret = -ENOMEM; 1053 goto exit_host_stats; 1054 } 1055 1056 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1057 1058 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1059 &iscsi_stats_dma, GFP_KERNEL); 1060 if (!ql_iscsi_stats) { 1061 ql4_printk(KERN_ERR, ha, 1062 "Unable to allocate memory for iscsi stats\n"); 1063 ret = -ENOMEM; 1064 goto exit_host_stats; 1065 } 1066 1067 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, 1068 iscsi_stats_dma); 1069 if (ret != QLA_SUCCESS) { 1070 ql4_printk(KERN_ERR, ha, 1071 "Unable to retrieve iscsi stats\n"); 1072 ret = -EIO; 1073 goto exit_host_stats; 1074 } 1075 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); 1076 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); 1077 host_stats->mactx_multicast_frames = 1078 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); 1079 host_stats->mactx_broadcast_frames = 1080 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); 1081 host_stats->mactx_pause_frames = 1082 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); 1083 host_stats->mactx_control_frames = 1084 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); 1085 host_stats->mactx_deferral = 1086 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); 1087 host_stats->mactx_excess_deferral = 1088 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); 1089 host_stats->mactx_late_collision = 1090 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); 1091 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); 1092 host_stats->mactx_single_collision = 1093 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); 1094 host_stats->mactx_multiple_collision = 1095 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); 1096 host_stats->mactx_collision = 1097 le64_to_cpu(ql_iscsi_stats->mac_tx_collision); 1098 host_stats->mactx_frames_dropped = 1099 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); 1100 host_stats->mactx_jumbo_frames = 1101 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); 1102 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); 1103 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); 1104 host_stats->macrx_unknown_control_frames = 1105 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); 1106 host_stats->macrx_pause_frames = 1107 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); 1108 host_stats->macrx_control_frames = 1109 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); 1110 host_stats->macrx_dribble = 1111 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); 1112 host_stats->macrx_frame_length_error = 1113 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); 1114 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); 1115 host_stats->macrx_carrier_sense_error = 1116 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); 1117 host_stats->macrx_frame_discarded = 1118 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); 1119 host_stats->macrx_frames_dropped = 1120 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); 1121 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); 1122 host_stats->mac_encoding_error = 1123 le64_to_cpu(ql_iscsi_stats->mac_encoding_error); 1124 host_stats->macrx_length_error_large = 1125 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); 1126 host_stats->macrx_length_error_small = 1127 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); 1128 host_stats->macrx_multicast_frames = 1129 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); 1130 host_stats->macrx_broadcast_frames = 1131 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); 1132 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); 1133 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); 1134 host_stats->iptx_fragments = 1135 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); 1136 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); 1137 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); 1138 host_stats->iprx_fragments = 1139 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); 1140 host_stats->ip_datagram_reassembly = 1141 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); 1142 host_stats->ip_invalid_address_error = 1143 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); 1144 host_stats->ip_error_packets = 1145 le64_to_cpu(ql_iscsi_stats->ip_error_packets); 1146 host_stats->ip_fragrx_overlap = 1147 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); 1148 host_stats->ip_fragrx_outoforder = 1149 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); 1150 host_stats->ip_datagram_reassembly_timeout = 1151 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); 1152 host_stats->ipv6tx_packets = 1153 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); 1154 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); 1155 host_stats->ipv6tx_fragments = 1156 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); 1157 host_stats->ipv6rx_packets = 1158 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); 1159 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); 1160 host_stats->ipv6rx_fragments = 1161 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); 1162 host_stats->ipv6_datagram_reassembly = 1163 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); 1164 host_stats->ipv6_invalid_address_error = 1165 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); 1166 host_stats->ipv6_error_packets = 1167 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); 1168 host_stats->ipv6_fragrx_overlap = 1169 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); 1170 host_stats->ipv6_fragrx_outoforder = 1171 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); 1172 host_stats->ipv6_datagram_reassembly_timeout = 1173 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); 1174 host_stats->tcptx_segments = 1175 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); 1176 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); 1177 host_stats->tcprx_segments = 1178 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); 1179 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); 1180 host_stats->tcp_duplicate_ack_retx = 1181 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); 1182 host_stats->tcp_retx_timer_expired = 1183 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); 1184 host_stats->tcprx_duplicate_ack = 1185 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); 1186 host_stats->tcprx_pure_ackr = 1187 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); 1188 host_stats->tcptx_delayed_ack = 1189 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); 1190 host_stats->tcptx_pure_ack = 1191 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); 1192 host_stats->tcprx_segment_error = 1193 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); 1194 host_stats->tcprx_segment_outoforder = 1195 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); 1196 host_stats->tcprx_window_probe = 1197 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); 1198 host_stats->tcprx_window_update = 1199 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); 1200 host_stats->tcptx_window_probe_persist = 1201 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); 1202 host_stats->ecc_error_correction = 1203 le64_to_cpu(ql_iscsi_stats->ecc_error_correction); 1204 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); 1205 host_stats->iscsi_data_bytes_tx = 1206 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); 1207 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); 1208 host_stats->iscsi_data_bytes_rx = 1209 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); 1210 host_stats->iscsi_io_completed = 1211 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); 1212 host_stats->iscsi_unexpected_io_rx = 1213 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); 1214 host_stats->iscsi_format_error = 1215 le64_to_cpu(ql_iscsi_stats->iscsi_format_error); 1216 host_stats->iscsi_hdr_digest_error = 1217 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); 1218 host_stats->iscsi_data_digest_error = 1219 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); 1220 host_stats->iscsi_sequence_error = 1221 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); 1222 exit_host_stats: 1223 if (ql_iscsi_stats) 1224 dma_free_coherent(&ha->pdev->dev, host_stats_size, 1225 ql_iscsi_stats, iscsi_stats_dma); 1226 1227 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", 1228 __func__); 1229 return ret; 1230 } 1231 1232 static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 1233 enum iscsi_param_type param_type, 1234 int param, char *buf) 1235 { 1236 struct Scsi_Host *shost = iscsi_iface_to_shost(iface); 1237 struct scsi_qla_host *ha = to_qla_host(shost); 1238 int ival; 1239 char *pval = NULL; 1240 int len = -ENOSYS; 1241 1242 if (param_type == ISCSI_NET_PARAM) { 1243 switch (param) { 1244 case ISCSI_NET_PARAM_IPV4_ADDR: 1245 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1246 break; 1247 case ISCSI_NET_PARAM_IPV4_SUBNET: 1248 len = sprintf(buf, "%pI4\n", 1249 &ha->ip_config.subnet_mask); 1250 break; 1251 case ISCSI_NET_PARAM_IPV4_GW: 1252 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); 1253 break; 1254 case ISCSI_NET_PARAM_IFACE_ENABLE: 1255 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1256 OP_STATE(ha->ip_config.ipv4_options, 1257 IPOPT_IPV4_PROTOCOL_ENABLE, pval); 1258 } else { 1259 OP_STATE(ha->ip_config.ipv6_options, 1260 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); 1261 } 1262 1263 len = sprintf(buf, "%s\n", pval); 1264 break; 1265 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 1266 len = sprintf(buf, "%s\n", 1267 (ha->ip_config.tcp_options & 1268 TCPOPT_DHCP_ENABLE) ? 1269 "dhcp" : "static"); 1270 break; 1271 case ISCSI_NET_PARAM_IPV6_ADDR: 1272 if (iface->iface_num == 0) 1273 len = sprintf(buf, "%pI6\n", 1274 &ha->ip_config.ipv6_addr0); 1275 if (iface->iface_num == 1) 1276 len = sprintf(buf, "%pI6\n", 1277 &ha->ip_config.ipv6_addr1); 1278 break; 1279 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 1280 len = sprintf(buf, "%pI6\n", 1281 &ha->ip_config.ipv6_link_local_addr); 1282 break; 1283 case ISCSI_NET_PARAM_IPV6_ROUTER: 1284 len = sprintf(buf, "%pI6\n", 1285 &ha->ip_config.ipv6_default_router_addr); 1286 break; 1287 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 1288 pval = (ha->ip_config.ipv6_addl_options & 1289 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? 1290 "nd" : "static"; 1291 1292 len = sprintf(buf, "%s\n", pval); 1293 break; 1294 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 1295 pval = (ha->ip_config.ipv6_addl_options & 1296 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? 1297 "auto" : "static"; 1298 1299 len = sprintf(buf, "%s\n", pval); 1300 break; 1301 case ISCSI_NET_PARAM_VLAN_ID: 1302 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1303 ival = ha->ip_config.ipv4_vlan_tag & 1304 ISCSI_MAX_VLAN_ID; 1305 else 1306 ival = ha->ip_config.ipv6_vlan_tag & 1307 ISCSI_MAX_VLAN_ID; 1308 1309 len = sprintf(buf, "%d\n", ival); 1310 break; 1311 case ISCSI_NET_PARAM_VLAN_PRIORITY: 1312 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1313 ival = (ha->ip_config.ipv4_vlan_tag >> 13) & 1314 ISCSI_MAX_VLAN_PRIORITY; 1315 else 1316 ival = (ha->ip_config.ipv6_vlan_tag >> 13) & 1317 ISCSI_MAX_VLAN_PRIORITY; 1318 1319 len = sprintf(buf, "%d\n", ival); 1320 break; 1321 case ISCSI_NET_PARAM_VLAN_ENABLED: 1322 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1323 OP_STATE(ha->ip_config.ipv4_options, 1324 IPOPT_VLAN_TAGGING_ENABLE, pval); 1325 } else { 1326 OP_STATE(ha->ip_config.ipv6_options, 1327 IPV6_OPT_VLAN_TAGGING_ENABLE, pval); 1328 } 1329 len = sprintf(buf, "%s\n", pval); 1330 break; 1331 case ISCSI_NET_PARAM_MTU: 1332 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); 1333 break; 1334 case ISCSI_NET_PARAM_PORT: 1335 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1336 len = sprintf(buf, "%d\n", 1337 ha->ip_config.ipv4_port); 1338 else 1339 len = sprintf(buf, "%d\n", 1340 ha->ip_config.ipv6_port); 1341 break; 1342 case ISCSI_NET_PARAM_IPADDR_STATE: 1343 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1344 pval = iscsi_get_ipaddress_state_name( 1345 ha->ip_config.ipv4_addr_state); 1346 } else { 1347 if (iface->iface_num == 0) 1348 pval = iscsi_get_ipaddress_state_name( 1349 ha->ip_config.ipv6_addr0_state); 1350 else if (iface->iface_num == 1) 1351 pval = iscsi_get_ipaddress_state_name( 1352 ha->ip_config.ipv6_addr1_state); 1353 } 1354 1355 len = sprintf(buf, "%s\n", pval); 1356 break; 1357 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: 1358 pval = iscsi_get_ipaddress_state_name( 1359 ha->ip_config.ipv6_link_local_state); 1360 len = sprintf(buf, "%s\n", pval); 1361 break; 1362 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: 1363 pval = iscsi_get_router_state_name( 1364 ha->ip_config.ipv6_default_router_state); 1365 len = sprintf(buf, "%s\n", pval); 1366 break; 1367 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 1368 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1369 OP_STATE(~ha->ip_config.tcp_options, 1370 TCPOPT_DELAYED_ACK_DISABLE, pval); 1371 } else { 1372 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1373 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); 1374 } 1375 len = sprintf(buf, "%s\n", pval); 1376 break; 1377 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 1378 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1379 OP_STATE(~ha->ip_config.tcp_options, 1380 TCPOPT_NAGLE_ALGO_DISABLE, pval); 1381 } else { 1382 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1383 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); 1384 } 1385 len = sprintf(buf, "%s\n", pval); 1386 break; 1387 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 1388 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1389 OP_STATE(~ha->ip_config.tcp_options, 1390 TCPOPT_WINDOW_SCALE_DISABLE, pval); 1391 } else { 1392 OP_STATE(~ha->ip_config.ipv6_tcp_options, 1393 IPV6_TCPOPT_WINDOW_SCALE_DISABLE, 1394 pval); 1395 } 1396 len = sprintf(buf, "%s\n", pval); 1397 break; 1398 case ISCSI_NET_PARAM_TCP_WSF: 1399 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1400 len = sprintf(buf, "%d\n", 1401 ha->ip_config.tcp_wsf); 1402 else 1403 len = sprintf(buf, "%d\n", 1404 ha->ip_config.ipv6_tcp_wsf); 1405 break; 1406 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 1407 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1408 ival = (ha->ip_config.tcp_options & 1409 TCPOPT_TIMER_SCALE) >> 1; 1410 else 1411 ival = (ha->ip_config.ipv6_tcp_options & 1412 IPV6_TCPOPT_TIMER_SCALE) >> 1; 1413 1414 len = sprintf(buf, "%d\n", ival); 1415 break; 1416 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 1417 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1418 OP_STATE(ha->ip_config.tcp_options, 1419 TCPOPT_TIMESTAMP_ENABLE, pval); 1420 } else { 1421 OP_STATE(ha->ip_config.ipv6_tcp_options, 1422 IPV6_TCPOPT_TIMESTAMP_EN, pval); 1423 } 1424 len = sprintf(buf, "%s\n", pval); 1425 break; 1426 case ISCSI_NET_PARAM_CACHE_ID: 1427 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) 1428 len = sprintf(buf, "%d\n", 1429 ha->ip_config.ipv4_cache_id); 1430 else 1431 len = sprintf(buf, "%d\n", 1432 ha->ip_config.ipv6_cache_id); 1433 break; 1434 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 1435 OP_STATE(ha->ip_config.tcp_options, 1436 TCPOPT_DNS_SERVER_IP_EN, pval); 1437 1438 len = sprintf(buf, "%s\n", pval); 1439 break; 1440 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 1441 OP_STATE(ha->ip_config.tcp_options, 1442 TCPOPT_SLP_DA_INFO_EN, pval); 1443 1444 len = sprintf(buf, "%s\n", pval); 1445 break; 1446 case ISCSI_NET_PARAM_IPV4_TOS_EN: 1447 OP_STATE(ha->ip_config.ipv4_options, 1448 IPOPT_IPV4_TOS_EN, pval); 1449 1450 len = sprintf(buf, "%s\n", pval); 1451 break; 1452 case ISCSI_NET_PARAM_IPV4_TOS: 1453 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); 1454 break; 1455 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 1456 OP_STATE(ha->ip_config.ipv4_options, 1457 IPOPT_GRAT_ARP_EN, pval); 1458 1459 len = sprintf(buf, "%s\n", pval); 1460 break; 1461 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 1462 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, 1463 pval); 1464 1465 len = sprintf(buf, "%s\n", pval); 1466 break; 1467 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 1468 pval = (ha->ip_config.ipv4_alt_cid_len) ? 1469 (char *)ha->ip_config.ipv4_alt_cid : ""; 1470 1471 len = sprintf(buf, "%s\n", pval); 1472 break; 1473 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 1474 OP_STATE(ha->ip_config.ipv4_options, 1475 IPOPT_REQ_VID_EN, pval); 1476 1477 len = sprintf(buf, "%s\n", pval); 1478 break; 1479 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 1480 OP_STATE(ha->ip_config.ipv4_options, 1481 IPOPT_USE_VID_EN, pval); 1482 1483 len = sprintf(buf, "%s\n", pval); 1484 break; 1485 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 1486 pval = (ha->ip_config.ipv4_vid_len) ? 1487 (char *)ha->ip_config.ipv4_vid : ""; 1488 1489 len = sprintf(buf, "%s\n", pval); 1490 break; 1491 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 1492 OP_STATE(ha->ip_config.ipv4_options, 1493 IPOPT_LEARN_IQN_EN, pval); 1494 1495 len = sprintf(buf, "%s\n", pval); 1496 break; 1497 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 1498 OP_STATE(~ha->ip_config.ipv4_options, 1499 IPOPT_FRAGMENTATION_DISABLE, pval); 1500 1501 len = sprintf(buf, "%s\n", pval); 1502 break; 1503 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 1504 OP_STATE(ha->ip_config.ipv4_options, 1505 IPOPT_IN_FORWARD_EN, pval); 1506 1507 len = sprintf(buf, "%s\n", pval); 1508 break; 1509 case ISCSI_NET_PARAM_REDIRECT_EN: 1510 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { 1511 OP_STATE(ha->ip_config.ipv4_options, 1512 IPOPT_ARP_REDIRECT_EN, pval); 1513 } else { 1514 OP_STATE(ha->ip_config.ipv6_options, 1515 IPV6_OPT_REDIRECT_EN, pval); 1516 } 1517 len = sprintf(buf, "%s\n", pval); 1518 break; 1519 case ISCSI_NET_PARAM_IPV4_TTL: 1520 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); 1521 break; 1522 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 1523 OP_STATE(ha->ip_config.ipv6_options, 1524 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); 1525 1526 len = sprintf(buf, "%s\n", pval); 1527 break; 1528 case ISCSI_NET_PARAM_IPV6_MLD_EN: 1529 OP_STATE(ha->ip_config.ipv6_addl_options, 1530 IPV6_ADDOPT_MLD_EN, pval); 1531 1532 len = sprintf(buf, "%s\n", pval); 1533 break; 1534 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 1535 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); 1536 break; 1537 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 1538 len = sprintf(buf, "%d\n", 1539 ha->ip_config.ipv6_traffic_class); 1540 break; 1541 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 1542 len = sprintf(buf, "%d\n", 1543 ha->ip_config.ipv6_hop_limit); 1544 break; 1545 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 1546 len = sprintf(buf, "%d\n", 1547 ha->ip_config.ipv6_nd_reach_time); 1548 break; 1549 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 1550 len = sprintf(buf, "%d\n", 1551 ha->ip_config.ipv6_nd_rexmit_timer); 1552 break; 1553 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 1554 len = sprintf(buf, "%d\n", 1555 ha->ip_config.ipv6_nd_stale_timeout); 1556 break; 1557 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 1558 len = sprintf(buf, "%d\n", 1559 ha->ip_config.ipv6_dup_addr_detect_count); 1560 break; 1561 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 1562 len = sprintf(buf, "%d\n", 1563 ha->ip_config.ipv6_gw_advrt_mtu); 1564 break; 1565 default: 1566 len = -ENOSYS; 1567 } 1568 } else if (param_type == ISCSI_IFACE_PARAM) { 1569 switch (param) { 1570 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 1571 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); 1572 break; 1573 case ISCSI_IFACE_PARAM_HDRDGST_EN: 1574 OP_STATE(ha->ip_config.iscsi_options, 1575 ISCSIOPTS_HEADER_DIGEST_EN, pval); 1576 1577 len = sprintf(buf, "%s\n", pval); 1578 break; 1579 case ISCSI_IFACE_PARAM_DATADGST_EN: 1580 OP_STATE(ha->ip_config.iscsi_options, 1581 ISCSIOPTS_DATA_DIGEST_EN, pval); 1582 1583 len = sprintf(buf, "%s\n", pval); 1584 break; 1585 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 1586 OP_STATE(ha->ip_config.iscsi_options, 1587 ISCSIOPTS_IMMEDIATE_DATA_EN, pval); 1588 1589 len = sprintf(buf, "%s\n", pval); 1590 break; 1591 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 1592 OP_STATE(ha->ip_config.iscsi_options, 1593 ISCSIOPTS_INITIAL_R2T_EN, pval); 1594 1595 len = sprintf(buf, "%s\n", pval); 1596 break; 1597 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 1598 OP_STATE(ha->ip_config.iscsi_options, 1599 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); 1600 1601 len = sprintf(buf, "%s\n", pval); 1602 break; 1603 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 1604 OP_STATE(ha->ip_config.iscsi_options, 1605 ISCSIOPTS_DATA_PDU_INORDER_EN, pval); 1606 1607 len = sprintf(buf, "%s\n", pval); 1608 break; 1609 case ISCSI_IFACE_PARAM_ERL: 1610 len = sprintf(buf, "%d\n", 1611 (ha->ip_config.iscsi_options & 1612 ISCSIOPTS_ERL)); 1613 break; 1614 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 1615 len = sprintf(buf, "%u\n", 1616 ha->ip_config.iscsi_max_pdu_size * 1617 BYTE_UNITS); 1618 break; 1619 case ISCSI_IFACE_PARAM_FIRST_BURST: 1620 len = sprintf(buf, "%u\n", 1621 ha->ip_config.iscsi_first_burst_len * 1622 BYTE_UNITS); 1623 break; 1624 case ISCSI_IFACE_PARAM_MAX_R2T: 1625 len = sprintf(buf, "%d\n", 1626 ha->ip_config.iscsi_max_outstnd_r2t); 1627 break; 1628 case ISCSI_IFACE_PARAM_MAX_BURST: 1629 len = sprintf(buf, "%u\n", 1630 ha->ip_config.iscsi_max_burst_len * 1631 BYTE_UNITS); 1632 break; 1633 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 1634 OP_STATE(ha->ip_config.iscsi_options, 1635 ISCSIOPTS_CHAP_AUTH_EN, pval); 1636 1637 len = sprintf(buf, "%s\n", pval); 1638 break; 1639 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 1640 OP_STATE(ha->ip_config.iscsi_options, 1641 ISCSIOPTS_BIDI_CHAP_EN, pval); 1642 1643 len = sprintf(buf, "%s\n", pval); 1644 break; 1645 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 1646 OP_STATE(ha->ip_config.iscsi_options, 1647 ISCSIOPTS_DISCOVERY_AUTH_EN, pval); 1648 1649 len = sprintf(buf, "%s\n", pval); 1650 break; 1651 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 1652 OP_STATE(ha->ip_config.iscsi_options, 1653 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); 1654 1655 len = sprintf(buf, "%s\n", pval); 1656 break; 1657 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 1658 OP_STATE(ha->ip_config.iscsi_options, 1659 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); 1660 1661 len = sprintf(buf, "%s\n", pval); 1662 break; 1663 case ISCSI_IFACE_PARAM_INITIATOR_NAME: 1664 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); 1665 break; 1666 default: 1667 len = -ENOSYS; 1668 } 1669 } 1670 1671 return len; 1672 } 1673 1674 static struct iscsi_endpoint * 1675 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, 1676 int non_blocking) 1677 { 1678 int ret; 1679 struct iscsi_endpoint *ep; 1680 struct qla_endpoint *qla_ep; 1681 struct scsi_qla_host *ha; 1682 struct sockaddr_in *addr; 1683 struct sockaddr_in6 *addr6; 1684 1685 if (!shost) { 1686 ret = -ENXIO; 1687 pr_err("%s: shost is NULL\n", __func__); 1688 return ERR_PTR(ret); 1689 } 1690 1691 ha = iscsi_host_priv(shost); 1692 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); 1693 if (!ep) { 1694 ret = -ENOMEM; 1695 return ERR_PTR(ret); 1696 } 1697 1698 qla_ep = ep->dd_data; 1699 memset(qla_ep, 0, sizeof(struct qla_endpoint)); 1700 if (dst_addr->sa_family == AF_INET) { 1701 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); 1702 addr = (struct sockaddr_in *)&qla_ep->dst_addr; 1703 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, 1704 (char *)&addr->sin_addr)); 1705 } else if (dst_addr->sa_family == AF_INET6) { 1706 memcpy(&qla_ep->dst_addr, dst_addr, 1707 sizeof(struct sockaddr_in6)); 1708 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; 1709 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, 1710 (char *)&addr6->sin6_addr)); 1711 } else { 1712 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", 1713 __func__); 1714 } 1715 1716 qla_ep->host = shost; 1717 1718 return ep; 1719 } 1720 1721 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1722 { 1723 struct qla_endpoint *qla_ep; 1724 struct scsi_qla_host *ha; 1725 int ret = 0; 1726 1727 qla_ep = ep->dd_data; 1728 ha = to_qla_host(qla_ep->host); 1729 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); 1730 1731 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) 1732 ret = 1; 1733 1734 return ret; 1735 } 1736 1737 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) 1738 { 1739 struct qla_endpoint *qla_ep; 1740 struct scsi_qla_host *ha; 1741 1742 qla_ep = ep->dd_data; 1743 ha = to_qla_host(qla_ep->host); 1744 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1745 ha->host_no)); 1746 iscsi_destroy_endpoint(ep); 1747 } 1748 1749 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, 1750 enum iscsi_param param, 1751 char *buf) 1752 { 1753 struct qla_endpoint *qla_ep = ep->dd_data; 1754 struct sockaddr *dst_addr; 1755 struct scsi_qla_host *ha; 1756 1757 if (!qla_ep) 1758 return -ENOTCONN; 1759 1760 ha = to_qla_host(qla_ep->host); 1761 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1762 ha->host_no)); 1763 1764 switch (param) { 1765 case ISCSI_PARAM_CONN_PORT: 1766 case ISCSI_PARAM_CONN_ADDRESS: 1767 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1768 if (!dst_addr) 1769 return -ENOTCONN; 1770 1771 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 1772 &qla_ep->dst_addr, param, buf); 1773 default: 1774 return -ENOSYS; 1775 } 1776 } 1777 1778 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1779 struct iscsi_stats *stats) 1780 { 1781 struct iscsi_session *sess; 1782 struct iscsi_cls_session *cls_sess; 1783 struct ddb_entry *ddb_entry; 1784 struct scsi_qla_host *ha; 1785 struct ql_iscsi_stats *ql_iscsi_stats; 1786 int stats_size; 1787 int ret; 1788 dma_addr_t iscsi_stats_dma; 1789 1790 cls_sess = iscsi_conn_to_session(cls_conn); 1791 sess = cls_sess->dd_data; 1792 ddb_entry = sess->dd_data; 1793 ha = ddb_entry->ha; 1794 1795 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1796 ha->host_no)); 1797 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1798 /* Allocate memory */ 1799 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1800 &iscsi_stats_dma, GFP_KERNEL); 1801 if (!ql_iscsi_stats) { 1802 ql4_printk(KERN_ERR, ha, 1803 "Unable to allocate memory for iscsi stats\n"); 1804 goto exit_get_stats; 1805 } 1806 1807 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, 1808 iscsi_stats_dma); 1809 if (ret != QLA_SUCCESS) { 1810 ql4_printk(KERN_ERR, ha, 1811 "Unable to retrieve iscsi stats\n"); 1812 goto free_stats; 1813 } 1814 1815 /* octets */ 1816 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); 1817 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); 1818 /* xmit pdus */ 1819 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); 1820 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); 1821 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); 1822 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); 1823 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); 1824 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); 1825 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); 1826 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); 1827 /* recv pdus */ 1828 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); 1829 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); 1830 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); 1831 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); 1832 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); 1833 stats->logoutrsp_pdus = 1834 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); 1835 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); 1836 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); 1837 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); 1838 1839 free_stats: 1840 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, 1841 iscsi_stats_dma); 1842 exit_get_stats: 1843 return; 1844 } 1845 1846 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) 1847 { 1848 struct iscsi_cls_session *session; 1849 struct iscsi_session *sess; 1850 unsigned long flags; 1851 enum blk_eh_timer_return ret = BLK_EH_DONE; 1852 1853 session = starget_to_session(scsi_target(sc->device)); 1854 sess = session->dd_data; 1855 1856 spin_lock_irqsave(&session->lock, flags); 1857 if (session->state == ISCSI_SESSION_FAILED) 1858 ret = BLK_EH_RESET_TIMER; 1859 spin_unlock_irqrestore(&session->lock, flags); 1860 1861 return ret; 1862 } 1863 1864 static void qla4xxx_set_port_speed(struct Scsi_Host *shost) 1865 { 1866 struct scsi_qla_host *ha = to_qla_host(shost); 1867 struct iscsi_cls_host *ihost = shost->shost_data; 1868 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; 1869 1870 qla4xxx_get_firmware_state(ha); 1871 1872 switch (ha->addl_fw_state & 0x0F00) { 1873 case FW_ADDSTATE_LINK_SPEED_10MBPS: 1874 speed = ISCSI_PORT_SPEED_10MBPS; 1875 break; 1876 case FW_ADDSTATE_LINK_SPEED_100MBPS: 1877 speed = ISCSI_PORT_SPEED_100MBPS; 1878 break; 1879 case FW_ADDSTATE_LINK_SPEED_1GBPS: 1880 speed = ISCSI_PORT_SPEED_1GBPS; 1881 break; 1882 case FW_ADDSTATE_LINK_SPEED_10GBPS: 1883 speed = ISCSI_PORT_SPEED_10GBPS; 1884 break; 1885 } 1886 ihost->port_speed = speed; 1887 } 1888 1889 static void qla4xxx_set_port_state(struct Scsi_Host *shost) 1890 { 1891 struct scsi_qla_host *ha = to_qla_host(shost); 1892 struct iscsi_cls_host *ihost = shost->shost_data; 1893 uint32_t state = ISCSI_PORT_STATE_DOWN; 1894 1895 if (test_bit(AF_LINK_UP, &ha->flags)) 1896 state = ISCSI_PORT_STATE_UP; 1897 1898 ihost->port_state = state; 1899 } 1900 1901 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 1902 enum iscsi_host_param param, char *buf) 1903 { 1904 struct scsi_qla_host *ha = to_qla_host(shost); 1905 int len; 1906 1907 switch (param) { 1908 case ISCSI_HOST_PARAM_HWADDRESS: 1909 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); 1910 break; 1911 case ISCSI_HOST_PARAM_IPADDRESS: 1912 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); 1913 break; 1914 case ISCSI_HOST_PARAM_INITIATOR_NAME: 1915 len = sprintf(buf, "%s\n", ha->name_string); 1916 break; 1917 case ISCSI_HOST_PARAM_PORT_STATE: 1918 qla4xxx_set_port_state(shost); 1919 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); 1920 break; 1921 case ISCSI_HOST_PARAM_PORT_SPEED: 1922 qla4xxx_set_port_speed(shost); 1923 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); 1924 break; 1925 default: 1926 return -ENOSYS; 1927 } 1928 1929 return len; 1930 } 1931 1932 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) 1933 { 1934 if (ha->iface_ipv4) 1935 return; 1936 1937 /* IPv4 */ 1938 ha->iface_ipv4 = iscsi_create_iface(ha->host, 1939 &qla4xxx_iscsi_transport, 1940 ISCSI_IFACE_TYPE_IPV4, 0, 0); 1941 if (!ha->iface_ipv4) 1942 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " 1943 "iface0.\n"); 1944 } 1945 1946 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) 1947 { 1948 if (!ha->iface_ipv6_0) 1949 /* IPv6 iface-0 */ 1950 ha->iface_ipv6_0 = iscsi_create_iface(ha->host, 1951 &qla4xxx_iscsi_transport, 1952 ISCSI_IFACE_TYPE_IPV6, 0, 1953 0); 1954 if (!ha->iface_ipv6_0) 1955 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1956 "iface0.\n"); 1957 1958 if (!ha->iface_ipv6_1) 1959 /* IPv6 iface-1 */ 1960 ha->iface_ipv6_1 = iscsi_create_iface(ha->host, 1961 &qla4xxx_iscsi_transport, 1962 ISCSI_IFACE_TYPE_IPV6, 1, 1963 0); 1964 if (!ha->iface_ipv6_1) 1965 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " 1966 "iface1.\n"); 1967 } 1968 1969 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) 1970 { 1971 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) 1972 qla4xxx_create_ipv4_iface(ha); 1973 1974 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) 1975 qla4xxx_create_ipv6_iface(ha); 1976 } 1977 1978 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) 1979 { 1980 if (ha->iface_ipv4) { 1981 iscsi_destroy_iface(ha->iface_ipv4); 1982 ha->iface_ipv4 = NULL; 1983 } 1984 } 1985 1986 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) 1987 { 1988 if (ha->iface_ipv6_0) { 1989 iscsi_destroy_iface(ha->iface_ipv6_0); 1990 ha->iface_ipv6_0 = NULL; 1991 } 1992 if (ha->iface_ipv6_1) { 1993 iscsi_destroy_iface(ha->iface_ipv6_1); 1994 ha->iface_ipv6_1 = NULL; 1995 } 1996 } 1997 1998 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) 1999 { 2000 qla4xxx_destroy_ipv4_iface(ha); 2001 qla4xxx_destroy_ipv6_iface(ha); 2002 } 2003 2004 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, 2005 struct iscsi_iface_param_info *iface_param, 2006 struct addr_ctrl_blk *init_fw_cb) 2007 { 2008 /* 2009 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. 2010 * iface_num 1 is valid only for IPv6 Addr. 2011 */ 2012 switch (iface_param->param) { 2013 case ISCSI_NET_PARAM_IPV6_ADDR: 2014 if (iface_param->iface_num & 0x1) 2015 /* IPv6 Addr 1 */ 2016 memcpy(init_fw_cb->ipv6_addr1, iface_param->value, 2017 sizeof(init_fw_cb->ipv6_addr1)); 2018 else 2019 /* IPv6 Addr 0 */ 2020 memcpy(init_fw_cb->ipv6_addr0, iface_param->value, 2021 sizeof(init_fw_cb->ipv6_addr0)); 2022 break; 2023 case ISCSI_NET_PARAM_IPV6_LINKLOCAL: 2024 if (iface_param->iface_num & 0x1) 2025 break; 2026 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], 2027 sizeof(init_fw_cb->ipv6_if_id)); 2028 break; 2029 case ISCSI_NET_PARAM_IPV6_ROUTER: 2030 if (iface_param->iface_num & 0x1) 2031 break; 2032 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, 2033 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2034 break; 2035 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: 2036 /* Autocfg applies to even interface */ 2037 if (iface_param->iface_num & 0x1) 2038 break; 2039 2040 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) 2041 init_fw_cb->ipv6_addtl_opts &= 2042 cpu_to_le16( 2043 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2044 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) 2045 init_fw_cb->ipv6_addtl_opts |= 2046 cpu_to_le16( 2047 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); 2048 else 2049 ql4_printk(KERN_ERR, ha, 2050 "Invalid autocfg setting for IPv6 addr\n"); 2051 break; 2052 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: 2053 /* Autocfg applies to even interface */ 2054 if (iface_param->iface_num & 0x1) 2055 break; 2056 2057 if (iface_param->value[0] == 2058 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) 2059 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( 2060 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2061 else if (iface_param->value[0] == 2062 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) 2063 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( 2064 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); 2065 else 2066 ql4_printk(KERN_ERR, ha, 2067 "Invalid autocfg setting for IPv6 linklocal addr\n"); 2068 break; 2069 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: 2070 /* Autocfg applies to even interface */ 2071 if (iface_param->iface_num & 0x1) 2072 break; 2073 2074 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) 2075 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, 2076 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); 2077 break; 2078 case ISCSI_NET_PARAM_IFACE_ENABLE: 2079 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2080 init_fw_cb->ipv6_opts |= 2081 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); 2082 qla4xxx_create_ipv6_iface(ha); 2083 } else { 2084 init_fw_cb->ipv6_opts &= 2085 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 2086 0xFFFF); 2087 qla4xxx_destroy_ipv6_iface(ha); 2088 } 2089 break; 2090 case ISCSI_NET_PARAM_VLAN_TAG: 2091 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) 2092 break; 2093 init_fw_cb->ipv6_vlan_tag = 2094 cpu_to_be16(*(uint16_t *)iface_param->value); 2095 break; 2096 case ISCSI_NET_PARAM_VLAN_ENABLED: 2097 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2098 init_fw_cb->ipv6_opts |= 2099 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); 2100 else 2101 init_fw_cb->ipv6_opts &= 2102 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); 2103 break; 2104 case ISCSI_NET_PARAM_MTU: 2105 init_fw_cb->eth_mtu_size = 2106 cpu_to_le16(*(uint16_t *)iface_param->value); 2107 break; 2108 case ISCSI_NET_PARAM_PORT: 2109 /* Autocfg applies to even interface */ 2110 if (iface_param->iface_num & 0x1) 2111 break; 2112 2113 init_fw_cb->ipv6_port = 2114 cpu_to_le16(*(uint16_t *)iface_param->value); 2115 break; 2116 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2117 if (iface_param->iface_num & 0x1) 2118 break; 2119 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2120 init_fw_cb->ipv6_tcp_opts |= 2121 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2122 else 2123 init_fw_cb->ipv6_tcp_opts &= 2124 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & 2125 0xFFFF); 2126 break; 2127 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2128 if (iface_param->iface_num & 0x1) 2129 break; 2130 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2131 init_fw_cb->ipv6_tcp_opts |= 2132 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2133 else 2134 init_fw_cb->ipv6_tcp_opts &= 2135 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); 2136 break; 2137 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2138 if (iface_param->iface_num & 0x1) 2139 break; 2140 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2141 init_fw_cb->ipv6_tcp_opts |= 2142 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2143 else 2144 init_fw_cb->ipv6_tcp_opts &= 2145 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); 2146 break; 2147 case ISCSI_NET_PARAM_TCP_WSF: 2148 if (iface_param->iface_num & 0x1) 2149 break; 2150 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; 2151 break; 2152 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2153 if (iface_param->iface_num & 0x1) 2154 break; 2155 init_fw_cb->ipv6_tcp_opts &= 2156 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); 2157 init_fw_cb->ipv6_tcp_opts |= 2158 cpu_to_le16((iface_param->value[0] << 1) & 2159 IPV6_TCPOPT_TIMER_SCALE); 2160 break; 2161 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2162 if (iface_param->iface_num & 0x1) 2163 break; 2164 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2165 init_fw_cb->ipv6_tcp_opts |= 2166 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); 2167 else 2168 init_fw_cb->ipv6_tcp_opts &= 2169 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); 2170 break; 2171 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: 2172 if (iface_param->iface_num & 0x1) 2173 break; 2174 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2175 init_fw_cb->ipv6_opts |= 2176 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2177 else 2178 init_fw_cb->ipv6_opts &= 2179 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); 2180 break; 2181 case ISCSI_NET_PARAM_REDIRECT_EN: 2182 if (iface_param->iface_num & 0x1) 2183 break; 2184 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2185 init_fw_cb->ipv6_opts |= 2186 cpu_to_le16(IPV6_OPT_REDIRECT_EN); 2187 else 2188 init_fw_cb->ipv6_opts &= 2189 cpu_to_le16(~IPV6_OPT_REDIRECT_EN); 2190 break; 2191 case ISCSI_NET_PARAM_IPV6_MLD_EN: 2192 if (iface_param->iface_num & 0x1) 2193 break; 2194 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2195 init_fw_cb->ipv6_addtl_opts |= 2196 cpu_to_le16(IPV6_ADDOPT_MLD_EN); 2197 else 2198 init_fw_cb->ipv6_addtl_opts &= 2199 cpu_to_le16(~IPV6_ADDOPT_MLD_EN); 2200 break; 2201 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: 2202 if (iface_param->iface_num & 0x1) 2203 break; 2204 init_fw_cb->ipv6_flow_lbl = 2205 cpu_to_le16(*(uint16_t *)iface_param->value); 2206 break; 2207 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: 2208 if (iface_param->iface_num & 0x1) 2209 break; 2210 init_fw_cb->ipv6_traffic_class = iface_param->value[0]; 2211 break; 2212 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: 2213 if (iface_param->iface_num & 0x1) 2214 break; 2215 init_fw_cb->ipv6_hop_limit = iface_param->value[0]; 2216 break; 2217 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: 2218 if (iface_param->iface_num & 0x1) 2219 break; 2220 init_fw_cb->ipv6_nd_reach_time = 2221 cpu_to_le32(*(uint32_t *)iface_param->value); 2222 break; 2223 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: 2224 if (iface_param->iface_num & 0x1) 2225 break; 2226 init_fw_cb->ipv6_nd_rexmit_timer = 2227 cpu_to_le32(*(uint32_t *)iface_param->value); 2228 break; 2229 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: 2230 if (iface_param->iface_num & 0x1) 2231 break; 2232 init_fw_cb->ipv6_nd_stale_timeout = 2233 cpu_to_le32(*(uint32_t *)iface_param->value); 2234 break; 2235 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: 2236 if (iface_param->iface_num & 0x1) 2237 break; 2238 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; 2239 break; 2240 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: 2241 if (iface_param->iface_num & 0x1) 2242 break; 2243 init_fw_cb->ipv6_gw_advrt_mtu = 2244 cpu_to_le32(*(uint32_t *)iface_param->value); 2245 break; 2246 default: 2247 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", 2248 iface_param->param); 2249 break; 2250 } 2251 } 2252 2253 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, 2254 struct iscsi_iface_param_info *iface_param, 2255 struct addr_ctrl_blk *init_fw_cb) 2256 { 2257 switch (iface_param->param) { 2258 case ISCSI_NET_PARAM_IPV4_ADDR: 2259 memcpy(init_fw_cb->ipv4_addr, iface_param->value, 2260 sizeof(init_fw_cb->ipv4_addr)); 2261 break; 2262 case ISCSI_NET_PARAM_IPV4_SUBNET: 2263 memcpy(init_fw_cb->ipv4_subnet, iface_param->value, 2264 sizeof(init_fw_cb->ipv4_subnet)); 2265 break; 2266 case ISCSI_NET_PARAM_IPV4_GW: 2267 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, 2268 sizeof(init_fw_cb->ipv4_gw_addr)); 2269 break; 2270 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 2271 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) 2272 init_fw_cb->ipv4_tcp_opts |= 2273 cpu_to_le16(TCPOPT_DHCP_ENABLE); 2274 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) 2275 init_fw_cb->ipv4_tcp_opts &= 2276 cpu_to_le16(~TCPOPT_DHCP_ENABLE); 2277 else 2278 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); 2279 break; 2280 case ISCSI_NET_PARAM_IFACE_ENABLE: 2281 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { 2282 init_fw_cb->ipv4_ip_opts |= 2283 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); 2284 qla4xxx_create_ipv4_iface(ha); 2285 } else { 2286 init_fw_cb->ipv4_ip_opts &= 2287 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 2288 0xFFFF); 2289 qla4xxx_destroy_ipv4_iface(ha); 2290 } 2291 break; 2292 case ISCSI_NET_PARAM_VLAN_TAG: 2293 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) 2294 break; 2295 init_fw_cb->ipv4_vlan_tag = 2296 cpu_to_be16(*(uint16_t *)iface_param->value); 2297 break; 2298 case ISCSI_NET_PARAM_VLAN_ENABLED: 2299 if (iface_param->value[0] == ISCSI_VLAN_ENABLE) 2300 init_fw_cb->ipv4_ip_opts |= 2301 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); 2302 else 2303 init_fw_cb->ipv4_ip_opts &= 2304 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); 2305 break; 2306 case ISCSI_NET_PARAM_MTU: 2307 init_fw_cb->eth_mtu_size = 2308 cpu_to_le16(*(uint16_t *)iface_param->value); 2309 break; 2310 case ISCSI_NET_PARAM_PORT: 2311 init_fw_cb->ipv4_port = 2312 cpu_to_le16(*(uint16_t *)iface_param->value); 2313 break; 2314 case ISCSI_NET_PARAM_DELAYED_ACK_EN: 2315 if (iface_param->iface_num & 0x1) 2316 break; 2317 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2318 init_fw_cb->ipv4_tcp_opts |= 2319 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); 2320 else 2321 init_fw_cb->ipv4_tcp_opts &= 2322 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & 2323 0xFFFF); 2324 break; 2325 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2326 if (iface_param->iface_num & 0x1) 2327 break; 2328 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2329 init_fw_cb->ipv4_tcp_opts |= 2330 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); 2331 else 2332 init_fw_cb->ipv4_tcp_opts &= 2333 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); 2334 break; 2335 case ISCSI_NET_PARAM_TCP_WSF_DISABLE: 2336 if (iface_param->iface_num & 0x1) 2337 break; 2338 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2339 init_fw_cb->ipv4_tcp_opts |= 2340 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); 2341 else 2342 init_fw_cb->ipv4_tcp_opts &= 2343 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); 2344 break; 2345 case ISCSI_NET_PARAM_TCP_WSF: 2346 if (iface_param->iface_num & 0x1) 2347 break; 2348 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; 2349 break; 2350 case ISCSI_NET_PARAM_TCP_TIMER_SCALE: 2351 if (iface_param->iface_num & 0x1) 2352 break; 2353 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); 2354 init_fw_cb->ipv4_tcp_opts |= 2355 cpu_to_le16((iface_param->value[0] << 1) & 2356 TCPOPT_TIMER_SCALE); 2357 break; 2358 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: 2359 if (iface_param->iface_num & 0x1) 2360 break; 2361 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2362 init_fw_cb->ipv4_tcp_opts |= 2363 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); 2364 else 2365 init_fw_cb->ipv4_tcp_opts &= 2366 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); 2367 break; 2368 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: 2369 if (iface_param->iface_num & 0x1) 2370 break; 2371 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2372 init_fw_cb->ipv4_tcp_opts |= 2373 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); 2374 else 2375 init_fw_cb->ipv4_tcp_opts &= 2376 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); 2377 break; 2378 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: 2379 if (iface_param->iface_num & 0x1) 2380 break; 2381 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2382 init_fw_cb->ipv4_tcp_opts |= 2383 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); 2384 else 2385 init_fw_cb->ipv4_tcp_opts &= 2386 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); 2387 break; 2388 case ISCSI_NET_PARAM_IPV4_TOS_EN: 2389 if (iface_param->iface_num & 0x1) 2390 break; 2391 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2392 init_fw_cb->ipv4_ip_opts |= 2393 cpu_to_le16(IPOPT_IPV4_TOS_EN); 2394 else 2395 init_fw_cb->ipv4_ip_opts &= 2396 cpu_to_le16(~IPOPT_IPV4_TOS_EN); 2397 break; 2398 case ISCSI_NET_PARAM_IPV4_TOS: 2399 if (iface_param->iface_num & 0x1) 2400 break; 2401 init_fw_cb->ipv4_tos = iface_param->value[0]; 2402 break; 2403 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: 2404 if (iface_param->iface_num & 0x1) 2405 break; 2406 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2407 init_fw_cb->ipv4_ip_opts |= 2408 cpu_to_le16(IPOPT_GRAT_ARP_EN); 2409 else 2410 init_fw_cb->ipv4_ip_opts &= 2411 cpu_to_le16(~IPOPT_GRAT_ARP_EN); 2412 break; 2413 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: 2414 if (iface_param->iface_num & 0x1) 2415 break; 2416 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2417 init_fw_cb->ipv4_ip_opts |= 2418 cpu_to_le16(IPOPT_ALT_CID_EN); 2419 else 2420 init_fw_cb->ipv4_ip_opts &= 2421 cpu_to_le16(~IPOPT_ALT_CID_EN); 2422 break; 2423 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: 2424 if (iface_param->iface_num & 0x1) 2425 break; 2426 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, 2427 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); 2428 init_fw_cb->ipv4_dhcp_alt_cid_len = 2429 strlen(init_fw_cb->ipv4_dhcp_alt_cid); 2430 break; 2431 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: 2432 if (iface_param->iface_num & 0x1) 2433 break; 2434 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2435 init_fw_cb->ipv4_ip_opts |= 2436 cpu_to_le16(IPOPT_REQ_VID_EN); 2437 else 2438 init_fw_cb->ipv4_ip_opts &= 2439 cpu_to_le16(~IPOPT_REQ_VID_EN); 2440 break; 2441 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: 2442 if (iface_param->iface_num & 0x1) 2443 break; 2444 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2445 init_fw_cb->ipv4_ip_opts |= 2446 cpu_to_le16(IPOPT_USE_VID_EN); 2447 else 2448 init_fw_cb->ipv4_ip_opts &= 2449 cpu_to_le16(~IPOPT_USE_VID_EN); 2450 break; 2451 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: 2452 if (iface_param->iface_num & 0x1) 2453 break; 2454 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, 2455 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); 2456 init_fw_cb->ipv4_dhcp_vid_len = 2457 strlen(init_fw_cb->ipv4_dhcp_vid); 2458 break; 2459 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: 2460 if (iface_param->iface_num & 0x1) 2461 break; 2462 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2463 init_fw_cb->ipv4_ip_opts |= 2464 cpu_to_le16(IPOPT_LEARN_IQN_EN); 2465 else 2466 init_fw_cb->ipv4_ip_opts &= 2467 cpu_to_le16(~IPOPT_LEARN_IQN_EN); 2468 break; 2469 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: 2470 if (iface_param->iface_num & 0x1) 2471 break; 2472 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) 2473 init_fw_cb->ipv4_ip_opts |= 2474 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); 2475 else 2476 init_fw_cb->ipv4_ip_opts &= 2477 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); 2478 break; 2479 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: 2480 if (iface_param->iface_num & 0x1) 2481 break; 2482 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2483 init_fw_cb->ipv4_ip_opts |= 2484 cpu_to_le16(IPOPT_IN_FORWARD_EN); 2485 else 2486 init_fw_cb->ipv4_ip_opts &= 2487 cpu_to_le16(~IPOPT_IN_FORWARD_EN); 2488 break; 2489 case ISCSI_NET_PARAM_REDIRECT_EN: 2490 if (iface_param->iface_num & 0x1) 2491 break; 2492 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2493 init_fw_cb->ipv4_ip_opts |= 2494 cpu_to_le16(IPOPT_ARP_REDIRECT_EN); 2495 else 2496 init_fw_cb->ipv4_ip_opts &= 2497 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); 2498 break; 2499 case ISCSI_NET_PARAM_IPV4_TTL: 2500 if (iface_param->iface_num & 0x1) 2501 break; 2502 init_fw_cb->ipv4_ttl = iface_param->value[0]; 2503 break; 2504 default: 2505 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", 2506 iface_param->param); 2507 break; 2508 } 2509 } 2510 2511 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, 2512 struct iscsi_iface_param_info *iface_param, 2513 struct addr_ctrl_blk *init_fw_cb) 2514 { 2515 switch (iface_param->param) { 2516 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: 2517 if (iface_param->iface_num & 0x1) 2518 break; 2519 init_fw_cb->def_timeout = 2520 cpu_to_le16(*(uint16_t *)iface_param->value); 2521 break; 2522 case ISCSI_IFACE_PARAM_HDRDGST_EN: 2523 if (iface_param->iface_num & 0x1) 2524 break; 2525 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2526 init_fw_cb->iscsi_opts |= 2527 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); 2528 else 2529 init_fw_cb->iscsi_opts &= 2530 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); 2531 break; 2532 case ISCSI_IFACE_PARAM_DATADGST_EN: 2533 if (iface_param->iface_num & 0x1) 2534 break; 2535 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2536 init_fw_cb->iscsi_opts |= 2537 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); 2538 else 2539 init_fw_cb->iscsi_opts &= 2540 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); 2541 break; 2542 case ISCSI_IFACE_PARAM_IMM_DATA_EN: 2543 if (iface_param->iface_num & 0x1) 2544 break; 2545 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2546 init_fw_cb->iscsi_opts |= 2547 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); 2548 else 2549 init_fw_cb->iscsi_opts &= 2550 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); 2551 break; 2552 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: 2553 if (iface_param->iface_num & 0x1) 2554 break; 2555 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2556 init_fw_cb->iscsi_opts |= 2557 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); 2558 else 2559 init_fw_cb->iscsi_opts &= 2560 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); 2561 break; 2562 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: 2563 if (iface_param->iface_num & 0x1) 2564 break; 2565 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2566 init_fw_cb->iscsi_opts |= 2567 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); 2568 else 2569 init_fw_cb->iscsi_opts &= 2570 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); 2571 break; 2572 case ISCSI_IFACE_PARAM_PDU_INORDER_EN: 2573 if (iface_param->iface_num & 0x1) 2574 break; 2575 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2576 init_fw_cb->iscsi_opts |= 2577 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); 2578 else 2579 init_fw_cb->iscsi_opts &= 2580 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); 2581 break; 2582 case ISCSI_IFACE_PARAM_ERL: 2583 if (iface_param->iface_num & 0x1) 2584 break; 2585 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); 2586 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & 2587 ISCSIOPTS_ERL); 2588 break; 2589 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: 2590 if (iface_param->iface_num & 0x1) 2591 break; 2592 init_fw_cb->iscsi_max_pdu_size = 2593 cpu_to_le32(*(uint32_t *)iface_param->value) / 2594 BYTE_UNITS; 2595 break; 2596 case ISCSI_IFACE_PARAM_FIRST_BURST: 2597 if (iface_param->iface_num & 0x1) 2598 break; 2599 init_fw_cb->iscsi_fburst_len = 2600 cpu_to_le32(*(uint32_t *)iface_param->value) / 2601 BYTE_UNITS; 2602 break; 2603 case ISCSI_IFACE_PARAM_MAX_R2T: 2604 if (iface_param->iface_num & 0x1) 2605 break; 2606 init_fw_cb->iscsi_max_outstnd_r2t = 2607 cpu_to_le16(*(uint16_t *)iface_param->value); 2608 break; 2609 case ISCSI_IFACE_PARAM_MAX_BURST: 2610 if (iface_param->iface_num & 0x1) 2611 break; 2612 init_fw_cb->iscsi_max_burst_len = 2613 cpu_to_le32(*(uint32_t *)iface_param->value) / 2614 BYTE_UNITS; 2615 break; 2616 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: 2617 if (iface_param->iface_num & 0x1) 2618 break; 2619 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2620 init_fw_cb->iscsi_opts |= 2621 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); 2622 else 2623 init_fw_cb->iscsi_opts &= 2624 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); 2625 break; 2626 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: 2627 if (iface_param->iface_num & 0x1) 2628 break; 2629 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2630 init_fw_cb->iscsi_opts |= 2631 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); 2632 else 2633 init_fw_cb->iscsi_opts &= 2634 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); 2635 break; 2636 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: 2637 if (iface_param->iface_num & 0x1) 2638 break; 2639 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2640 init_fw_cb->iscsi_opts |= 2641 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); 2642 else 2643 init_fw_cb->iscsi_opts &= 2644 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); 2645 break; 2646 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: 2647 if (iface_param->iface_num & 0x1) 2648 break; 2649 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2650 init_fw_cb->iscsi_opts |= 2651 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2652 else 2653 init_fw_cb->iscsi_opts &= 2654 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); 2655 break; 2656 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: 2657 if (iface_param->iface_num & 0x1) 2658 break; 2659 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) 2660 init_fw_cb->iscsi_opts |= 2661 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2662 else 2663 init_fw_cb->iscsi_opts &= 2664 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); 2665 break; 2666 default: 2667 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", 2668 iface_param->param); 2669 break; 2670 } 2671 } 2672 2673 static void 2674 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) 2675 { 2676 struct addr_ctrl_blk_def *acb; 2677 acb = (struct addr_ctrl_blk_def *)init_fw_cb; 2678 memset(acb->reserved1, 0, sizeof(acb->reserved1)); 2679 memset(acb->reserved2, 0, sizeof(acb->reserved2)); 2680 memset(acb->reserved3, 0, sizeof(acb->reserved3)); 2681 memset(acb->reserved4, 0, sizeof(acb->reserved4)); 2682 memset(acb->reserved5, 0, sizeof(acb->reserved5)); 2683 memset(acb->reserved6, 0, sizeof(acb->reserved6)); 2684 memset(acb->reserved7, 0, sizeof(acb->reserved7)); 2685 memset(acb->reserved8, 0, sizeof(acb->reserved8)); 2686 memset(acb->reserved9, 0, sizeof(acb->reserved9)); 2687 memset(acb->reserved10, 0, sizeof(acb->reserved10)); 2688 memset(acb->reserved11, 0, sizeof(acb->reserved11)); 2689 memset(acb->reserved12, 0, sizeof(acb->reserved12)); 2690 memset(acb->reserved13, 0, sizeof(acb->reserved13)); 2691 memset(acb->reserved14, 0, sizeof(acb->reserved14)); 2692 memset(acb->reserved15, 0, sizeof(acb->reserved15)); 2693 } 2694 2695 static int 2696 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) 2697 { 2698 struct scsi_qla_host *ha = to_qla_host(shost); 2699 int rval = 0; 2700 struct iscsi_iface_param_info *iface_param = NULL; 2701 struct addr_ctrl_blk *init_fw_cb = NULL; 2702 dma_addr_t init_fw_cb_dma; 2703 uint32_t mbox_cmd[MBOX_REG_COUNT]; 2704 uint32_t mbox_sts[MBOX_REG_COUNT]; 2705 uint32_t rem = len; 2706 struct nlattr *attr; 2707 2708 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 2709 sizeof(struct addr_ctrl_blk), 2710 &init_fw_cb_dma, GFP_KERNEL); 2711 if (!init_fw_cb) { 2712 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2713 __func__); 2714 return -ENOMEM; 2715 } 2716 2717 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 2718 memset(&mbox_sts, 0, sizeof(mbox_sts)); 2719 2720 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { 2721 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); 2722 rval = -EIO; 2723 goto exit_init_fw_cb; 2724 } 2725 2726 nla_for_each_attr(attr, data, len, rem) { 2727 iface_param = nla_data(attr); 2728 2729 if (iface_param->param_type == ISCSI_NET_PARAM) { 2730 switch (iface_param->iface_type) { 2731 case ISCSI_IFACE_TYPE_IPV4: 2732 switch (iface_param->iface_num) { 2733 case 0: 2734 qla4xxx_set_ipv4(ha, iface_param, 2735 init_fw_cb); 2736 break; 2737 default: 2738 /* Cannot have more than one IPv4 interface */ 2739 ql4_printk(KERN_ERR, ha, 2740 "Invalid IPv4 iface number = %d\n", 2741 iface_param->iface_num); 2742 break; 2743 } 2744 break; 2745 case ISCSI_IFACE_TYPE_IPV6: 2746 switch (iface_param->iface_num) { 2747 case 0: 2748 case 1: 2749 qla4xxx_set_ipv6(ha, iface_param, 2750 init_fw_cb); 2751 break; 2752 default: 2753 /* Cannot have more than two IPv6 interface */ 2754 ql4_printk(KERN_ERR, ha, 2755 "Invalid IPv6 iface number = %d\n", 2756 iface_param->iface_num); 2757 break; 2758 } 2759 break; 2760 default: 2761 ql4_printk(KERN_ERR, ha, 2762 "Invalid iface type\n"); 2763 break; 2764 } 2765 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { 2766 qla4xxx_set_iscsi_param(ha, iface_param, 2767 init_fw_cb); 2768 } else { 2769 continue; 2770 } 2771 } 2772 2773 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); 2774 2775 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, 2776 sizeof(struct addr_ctrl_blk), 2777 FLASH_OPT_RMW_COMMIT); 2778 if (rval != QLA_SUCCESS) { 2779 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", 2780 __func__); 2781 rval = -EIO; 2782 goto exit_init_fw_cb; 2783 } 2784 2785 rval = qla4xxx_disable_acb(ha); 2786 if (rval != QLA_SUCCESS) { 2787 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", 2788 __func__); 2789 rval = -EIO; 2790 goto exit_init_fw_cb; 2791 } 2792 2793 wait_for_completion_timeout(&ha->disable_acb_comp, 2794 DISABLE_ACB_TOV * HZ); 2795 2796 qla4xxx_initcb_to_acb(init_fw_cb); 2797 2798 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); 2799 if (rval != QLA_SUCCESS) { 2800 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", 2801 __func__); 2802 rval = -EIO; 2803 goto exit_init_fw_cb; 2804 } 2805 2806 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); 2807 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, 2808 init_fw_cb_dma); 2809 2810 exit_init_fw_cb: 2811 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), 2812 init_fw_cb, init_fw_cb_dma); 2813 2814 return rval; 2815 } 2816 2817 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, 2818 enum iscsi_param param, char *buf) 2819 { 2820 struct iscsi_session *sess = cls_sess->dd_data; 2821 struct ddb_entry *ddb_entry = sess->dd_data; 2822 struct scsi_qla_host *ha = ddb_entry->ha; 2823 struct iscsi_cls_conn *cls_conn = ddb_entry->conn; 2824 struct ql4_chap_table chap_tbl; 2825 int rval, len; 2826 uint16_t idx; 2827 2828 memset(&chap_tbl, 0, sizeof(chap_tbl)); 2829 switch (param) { 2830 case ISCSI_PARAM_CHAP_IN_IDX: 2831 rval = qla4xxx_get_chap_index(ha, sess->username_in, 2832 sess->password_in, BIDI_CHAP, 2833 &idx); 2834 if (rval) 2835 len = sprintf(buf, "\n"); 2836 else 2837 len = sprintf(buf, "%hu\n", idx); 2838 break; 2839 case ISCSI_PARAM_CHAP_OUT_IDX: 2840 if (ddb_entry->ddb_type == FLASH_DDB) { 2841 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 2842 idx = ddb_entry->chap_tbl_idx; 2843 rval = QLA_SUCCESS; 2844 } else { 2845 rval = QLA_ERROR; 2846 } 2847 } else { 2848 rval = qla4xxx_get_chap_index(ha, sess->username, 2849 sess->password, 2850 LOCAL_CHAP, &idx); 2851 } 2852 if (rval) 2853 len = sprintf(buf, "\n"); 2854 else 2855 len = sprintf(buf, "%hu\n", idx); 2856 break; 2857 case ISCSI_PARAM_USERNAME: 2858 case ISCSI_PARAM_PASSWORD: 2859 /* First, populate session username and password for FLASH DDB, 2860 * if not already done. This happens when session login fails 2861 * for a FLASH DDB. 2862 */ 2863 if (ddb_entry->ddb_type == FLASH_DDB && 2864 ddb_entry->chap_tbl_idx != INVALID_ENTRY && 2865 !sess->username && !sess->password) { 2866 idx = ddb_entry->chap_tbl_idx; 2867 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 2868 chap_tbl.secret, 2869 idx); 2870 if (!rval) { 2871 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 2872 (char *)chap_tbl.name, 2873 strlen((char *)chap_tbl.name)); 2874 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 2875 (char *)chap_tbl.secret, 2876 chap_tbl.secret_len); 2877 } 2878 } 2879 /* allow fall-through */ 2880 default: 2881 return iscsi_session_get_param(cls_sess, param, buf); 2882 } 2883 2884 return len; 2885 } 2886 2887 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, 2888 enum iscsi_param param, char *buf) 2889 { 2890 struct iscsi_conn *conn; 2891 struct qla_conn *qla_conn; 2892 struct sockaddr *dst_addr; 2893 2894 conn = cls_conn->dd_data; 2895 qla_conn = conn->dd_data; 2896 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; 2897 2898 switch (param) { 2899 case ISCSI_PARAM_CONN_PORT: 2900 case ISCSI_PARAM_CONN_ADDRESS: 2901 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2902 dst_addr, param, buf); 2903 default: 2904 return iscsi_conn_get_param(cls_conn, param, buf); 2905 } 2906 } 2907 2908 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) 2909 { 2910 uint32_t mbx_sts = 0; 2911 uint16_t tmp_ddb_index; 2912 int ret; 2913 2914 get_ddb_index: 2915 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); 2916 2917 if (tmp_ddb_index >= MAX_DDB_ENTRIES) { 2918 DEBUG2(ql4_printk(KERN_INFO, ha, 2919 "Free DDB index not available\n")); 2920 ret = QLA_ERROR; 2921 goto exit_get_ddb_index; 2922 } 2923 2924 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) 2925 goto get_ddb_index; 2926 2927 DEBUG2(ql4_printk(KERN_INFO, ha, 2928 "Found a free DDB index at %d\n", tmp_ddb_index)); 2929 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); 2930 if (ret == QLA_ERROR) { 2931 if (mbx_sts == MBOX_STS_COMMAND_ERROR) { 2932 ql4_printk(KERN_INFO, ha, 2933 "DDB index = %d not available trying next\n", 2934 tmp_ddb_index); 2935 goto get_ddb_index; 2936 } 2937 DEBUG2(ql4_printk(KERN_INFO, ha, 2938 "Free FW DDB not available\n")); 2939 } 2940 2941 *ddb_index = tmp_ddb_index; 2942 2943 exit_get_ddb_index: 2944 return ret; 2945 } 2946 2947 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, 2948 struct ddb_entry *ddb_entry, 2949 char *existing_ipaddr, 2950 char *user_ipaddr) 2951 { 2952 uint8_t dst_ipaddr[IPv6_ADDR_LEN]; 2953 char formatted_ipaddr[DDB_IPADDR_LEN]; 2954 int status = QLA_SUCCESS, ret = 0; 2955 2956 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { 2957 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2958 '\0', NULL); 2959 if (ret == 0) { 2960 status = QLA_ERROR; 2961 goto out_match; 2962 } 2963 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); 2964 } else { 2965 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, 2966 '\0', NULL); 2967 if (ret == 0) { 2968 status = QLA_ERROR; 2969 goto out_match; 2970 } 2971 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); 2972 } 2973 2974 if (strcmp(existing_ipaddr, formatted_ipaddr)) 2975 status = QLA_ERROR; 2976 2977 out_match: 2978 return status; 2979 } 2980 2981 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, 2982 struct iscsi_cls_conn *cls_conn) 2983 { 2984 int idx = 0, max_ddbs, rval; 2985 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 2986 struct iscsi_session *sess, *existing_sess; 2987 struct iscsi_conn *conn, *existing_conn; 2988 struct ddb_entry *ddb_entry; 2989 2990 sess = cls_sess->dd_data; 2991 conn = cls_conn->dd_data; 2992 2993 if (sess->targetname == NULL || 2994 conn->persistent_address == NULL || 2995 conn->persistent_port == 0) 2996 return QLA_ERROR; 2997 2998 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 2999 MAX_DEV_DB_ENTRIES; 3000 3001 for (idx = 0; idx < max_ddbs; idx++) { 3002 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 3003 if (ddb_entry == NULL) 3004 continue; 3005 3006 if (ddb_entry->ddb_type != FLASH_DDB) 3007 continue; 3008 3009 existing_sess = ddb_entry->sess->dd_data; 3010 existing_conn = ddb_entry->conn->dd_data; 3011 3012 if (existing_sess->targetname == NULL || 3013 existing_conn->persistent_address == NULL || 3014 existing_conn->persistent_port == 0) 3015 continue; 3016 3017 DEBUG2(ql4_printk(KERN_INFO, ha, 3018 "IQN = %s User IQN = %s\n", 3019 existing_sess->targetname, 3020 sess->targetname)); 3021 3022 DEBUG2(ql4_printk(KERN_INFO, ha, 3023 "IP = %s User IP = %s\n", 3024 existing_conn->persistent_address, 3025 conn->persistent_address)); 3026 3027 DEBUG2(ql4_printk(KERN_INFO, ha, 3028 "Port = %d User Port = %d\n", 3029 existing_conn->persistent_port, 3030 conn->persistent_port)); 3031 3032 if (strcmp(existing_sess->targetname, sess->targetname)) 3033 continue; 3034 rval = qla4xxx_match_ipaddress(ha, ddb_entry, 3035 existing_conn->persistent_address, 3036 conn->persistent_address); 3037 if (rval == QLA_ERROR) 3038 continue; 3039 if (existing_conn->persistent_port != conn->persistent_port) 3040 continue; 3041 break; 3042 } 3043 3044 if (idx == max_ddbs) 3045 return QLA_ERROR; 3046 3047 DEBUG2(ql4_printk(KERN_INFO, ha, 3048 "Match found in fwdb sessions\n")); 3049 return QLA_SUCCESS; 3050 } 3051 3052 static struct iscsi_cls_session * 3053 qla4xxx_session_create(struct iscsi_endpoint *ep, 3054 uint16_t cmds_max, uint16_t qdepth, 3055 uint32_t initial_cmdsn) 3056 { 3057 struct iscsi_cls_session *cls_sess; 3058 struct scsi_qla_host *ha; 3059 struct qla_endpoint *qla_ep; 3060 struct ddb_entry *ddb_entry; 3061 uint16_t ddb_index; 3062 struct iscsi_session *sess; 3063 struct sockaddr *dst_addr; 3064 int ret; 3065 3066 if (!ep) { 3067 printk(KERN_ERR "qla4xxx: missing ep.\n"); 3068 return NULL; 3069 } 3070 3071 qla_ep = ep->dd_data; 3072 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 3073 ha = to_qla_host(qla_ep->host); 3074 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3075 ha->host_no)); 3076 3077 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 3078 if (ret == QLA_ERROR) 3079 return NULL; 3080 3081 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, 3082 cmds_max, sizeof(struct ddb_entry), 3083 sizeof(struct ql4_task_data), 3084 initial_cmdsn, ddb_index); 3085 if (!cls_sess) 3086 return NULL; 3087 3088 sess = cls_sess->dd_data; 3089 ddb_entry = sess->dd_data; 3090 ddb_entry->fw_ddb_index = ddb_index; 3091 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3092 ddb_entry->ha = ha; 3093 ddb_entry->sess = cls_sess; 3094 ddb_entry->unblock_sess = qla4xxx_unblock_ddb; 3095 ddb_entry->ddb_change = qla4xxx_ddb_change; 3096 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); 3097 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 3098 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 3099 ha->tot_ddbs++; 3100 3101 return cls_sess; 3102 } 3103 3104 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) 3105 { 3106 struct iscsi_session *sess; 3107 struct ddb_entry *ddb_entry; 3108 struct scsi_qla_host *ha; 3109 unsigned long flags, wtime; 3110 struct dev_db_entry *fw_ddb_entry = NULL; 3111 dma_addr_t fw_ddb_entry_dma; 3112 uint32_t ddb_state; 3113 int ret; 3114 3115 sess = cls_sess->dd_data; 3116 ddb_entry = sess->dd_data; 3117 ha = ddb_entry->ha; 3118 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 3119 ha->host_no)); 3120 3121 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3122 &fw_ddb_entry_dma, GFP_KERNEL); 3123 if (!fw_ddb_entry) { 3124 ql4_printk(KERN_ERR, ha, 3125 "%s: Unable to allocate dma buffer\n", __func__); 3126 goto destroy_session; 3127 } 3128 3129 wtime = jiffies + (HZ * LOGOUT_TOV); 3130 do { 3131 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 3132 fw_ddb_entry, fw_ddb_entry_dma, 3133 NULL, NULL, &ddb_state, NULL, 3134 NULL, NULL); 3135 if (ret == QLA_ERROR) 3136 goto destroy_session; 3137 3138 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 3139 (ddb_state == DDB_DS_SESSION_FAILED)) 3140 goto destroy_session; 3141 3142 schedule_timeout_uninterruptible(HZ); 3143 } while ((time_after(wtime, jiffies))); 3144 3145 destroy_session: 3146 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 3147 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) 3148 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 3149 spin_lock_irqsave(&ha->hardware_lock, flags); 3150 qla4xxx_free_ddb(ha, ddb_entry); 3151 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3152 3153 iscsi_session_teardown(cls_sess); 3154 3155 if (fw_ddb_entry) 3156 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3157 fw_ddb_entry, fw_ddb_entry_dma); 3158 } 3159 3160 static struct iscsi_cls_conn * 3161 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) 3162 { 3163 struct iscsi_cls_conn *cls_conn; 3164 struct iscsi_session *sess; 3165 struct ddb_entry *ddb_entry; 3166 struct scsi_qla_host *ha; 3167 3168 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 3169 conn_idx); 3170 if (!cls_conn) { 3171 pr_info("%s: Can not create connection for conn_idx = %u\n", 3172 __func__, conn_idx); 3173 return NULL; 3174 } 3175 3176 sess = cls_sess->dd_data; 3177 ddb_entry = sess->dd_data; 3178 ddb_entry->conn = cls_conn; 3179 3180 ha = ddb_entry->ha; 3181 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, 3182 conn_idx)); 3183 return cls_conn; 3184 } 3185 3186 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, 3187 struct iscsi_cls_conn *cls_conn, 3188 uint64_t transport_fd, int is_leading) 3189 { 3190 struct iscsi_conn *conn; 3191 struct qla_conn *qla_conn; 3192 struct iscsi_endpoint *ep; 3193 struct ddb_entry *ddb_entry; 3194 struct scsi_qla_host *ha; 3195 struct iscsi_session *sess; 3196 3197 sess = cls_session->dd_data; 3198 ddb_entry = sess->dd_data; 3199 ha = ddb_entry->ha; 3200 3201 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3202 cls_session->sid, cls_conn->cid)); 3203 3204 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3205 return -EINVAL; 3206 ep = iscsi_lookup_endpoint(transport_fd); 3207 conn = cls_conn->dd_data; 3208 qla_conn = conn->dd_data; 3209 qla_conn->qla_ep = ep->dd_data; 3210 return 0; 3211 } 3212 3213 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) 3214 { 3215 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3216 struct iscsi_session *sess; 3217 struct ddb_entry *ddb_entry; 3218 struct scsi_qla_host *ha; 3219 struct dev_db_entry *fw_ddb_entry = NULL; 3220 dma_addr_t fw_ddb_entry_dma; 3221 uint32_t mbx_sts = 0; 3222 int ret = 0; 3223 int status = QLA_SUCCESS; 3224 3225 sess = cls_sess->dd_data; 3226 ddb_entry = sess->dd_data; 3227 ha = ddb_entry->ha; 3228 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, 3229 cls_sess->sid, cls_conn->cid)); 3230 3231 /* Check if we have matching FW DDB, if yes then do not 3232 * login to this target. This could cause target to logout previous 3233 * connection 3234 */ 3235 ret = qla4xxx_match_fwdb_session(ha, cls_conn); 3236 if (ret == QLA_SUCCESS) { 3237 ql4_printk(KERN_INFO, ha, 3238 "Session already exist in FW.\n"); 3239 ret = -EEXIST; 3240 goto exit_conn_start; 3241 } 3242 3243 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3244 &fw_ddb_entry_dma, GFP_KERNEL); 3245 if (!fw_ddb_entry) { 3246 ql4_printk(KERN_ERR, ha, 3247 "%s: Unable to allocate dma buffer\n", __func__); 3248 ret = -ENOMEM; 3249 goto exit_conn_start; 3250 } 3251 3252 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); 3253 if (ret) { 3254 /* If iscsid is stopped and started then no need to do 3255 * set param again since ddb state will be already 3256 * active and FW does not allow set ddb to an 3257 * active session. 3258 */ 3259 if (mbx_sts) 3260 if (ddb_entry->fw_ddb_device_state == 3261 DDB_DS_SESSION_ACTIVE) { 3262 ddb_entry->unblock_sess(ddb_entry->sess); 3263 goto exit_set_param; 3264 } 3265 3266 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", 3267 __func__, ddb_entry->fw_ddb_index); 3268 goto exit_conn_start; 3269 } 3270 3271 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); 3272 if (status == QLA_ERROR) { 3273 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, 3274 sess->targetname); 3275 ret = -EINVAL; 3276 goto exit_conn_start; 3277 } 3278 3279 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) 3280 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; 3281 3282 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, 3283 ddb_entry->fw_ddb_device_state)); 3284 3285 exit_set_param: 3286 ret = 0; 3287 3288 exit_conn_start: 3289 if (fw_ddb_entry) 3290 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3291 fw_ddb_entry, fw_ddb_entry_dma); 3292 return ret; 3293 } 3294 3295 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) 3296 { 3297 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); 3298 struct iscsi_session *sess; 3299 struct scsi_qla_host *ha; 3300 struct ddb_entry *ddb_entry; 3301 int options; 3302 3303 sess = cls_sess->dd_data; 3304 ddb_entry = sess->dd_data; 3305 ha = ddb_entry->ha; 3306 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, 3307 cls_conn->cid)); 3308 3309 options = LOGOUT_OPTION_CLOSE_SESSION; 3310 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) 3311 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 3312 } 3313 3314 static void qla4xxx_task_work(struct work_struct *wdata) 3315 { 3316 struct ql4_task_data *task_data; 3317 struct scsi_qla_host *ha; 3318 struct passthru_status *sts; 3319 struct iscsi_task *task; 3320 struct iscsi_hdr *hdr; 3321 uint8_t *data; 3322 uint32_t data_len; 3323 struct iscsi_conn *conn; 3324 int hdr_len; 3325 itt_t itt; 3326 3327 task_data = container_of(wdata, struct ql4_task_data, task_work); 3328 ha = task_data->ha; 3329 task = task_data->task; 3330 sts = &task_data->sts; 3331 hdr_len = sizeof(struct iscsi_hdr); 3332 3333 DEBUG3(printk(KERN_INFO "Status returned\n")); 3334 DEBUG3(qla4xxx_dump_buffer(sts, 64)); 3335 DEBUG3(printk(KERN_INFO "Response buffer")); 3336 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); 3337 3338 conn = task->conn; 3339 3340 switch (sts->completionStatus) { 3341 case PASSTHRU_STATUS_COMPLETE: 3342 hdr = (struct iscsi_hdr *)task_data->resp_buffer; 3343 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ 3344 itt = sts->handle; 3345 hdr->itt = itt; 3346 data = task_data->resp_buffer + hdr_len; 3347 data_len = task_data->resp_len - hdr_len; 3348 iscsi_complete_pdu(conn, hdr, data, data_len); 3349 break; 3350 default: 3351 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", 3352 sts->completionStatus); 3353 break; 3354 } 3355 return; 3356 } 3357 3358 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 3359 { 3360 struct ql4_task_data *task_data; 3361 struct iscsi_session *sess; 3362 struct ddb_entry *ddb_entry; 3363 struct scsi_qla_host *ha; 3364 int hdr_len; 3365 3366 sess = task->conn->session; 3367 ddb_entry = sess->dd_data; 3368 ha = ddb_entry->ha; 3369 task_data = task->dd_data; 3370 memset(task_data, 0, sizeof(struct ql4_task_data)); 3371 3372 if (task->sc) { 3373 ql4_printk(KERN_INFO, ha, 3374 "%s: SCSI Commands not implemented\n", __func__); 3375 return -EINVAL; 3376 } 3377 3378 hdr_len = sizeof(struct iscsi_hdr); 3379 task_data->ha = ha; 3380 task_data->task = task; 3381 3382 if (task->data_count) { 3383 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, 3384 task->data_count, 3385 PCI_DMA_TODEVICE); 3386 } 3387 3388 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3389 __func__, task->conn->max_recv_dlength, hdr_len)); 3390 3391 task_data->resp_len = task->conn->max_recv_dlength + hdr_len; 3392 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, 3393 task_data->resp_len, 3394 &task_data->resp_dma, 3395 GFP_ATOMIC); 3396 if (!task_data->resp_buffer) 3397 goto exit_alloc_pdu; 3398 3399 task_data->req_len = task->data_count + hdr_len; 3400 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, 3401 task_data->req_len, 3402 &task_data->req_dma, 3403 GFP_ATOMIC); 3404 if (!task_data->req_buffer) 3405 goto exit_alloc_pdu; 3406 3407 task->hdr = task_data->req_buffer; 3408 3409 INIT_WORK(&task_data->task_work, qla4xxx_task_work); 3410 3411 return 0; 3412 3413 exit_alloc_pdu: 3414 if (task_data->resp_buffer) 3415 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3416 task_data->resp_buffer, task_data->resp_dma); 3417 3418 if (task_data->req_buffer) 3419 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3420 task_data->req_buffer, task_data->req_dma); 3421 return -ENOMEM; 3422 } 3423 3424 static void qla4xxx_task_cleanup(struct iscsi_task *task) 3425 { 3426 struct ql4_task_data *task_data; 3427 struct iscsi_session *sess; 3428 struct ddb_entry *ddb_entry; 3429 struct scsi_qla_host *ha; 3430 int hdr_len; 3431 3432 hdr_len = sizeof(struct iscsi_hdr); 3433 sess = task->conn->session; 3434 ddb_entry = sess->dd_data; 3435 ha = ddb_entry->ha; 3436 task_data = task->dd_data; 3437 3438 if (task->data_count) { 3439 dma_unmap_single(&ha->pdev->dev, task_data->data_dma, 3440 task->data_count, PCI_DMA_TODEVICE); 3441 } 3442 3443 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3444 __func__, task->conn->max_recv_dlength, hdr_len)); 3445 3446 dma_free_coherent(&ha->pdev->dev, task_data->resp_len, 3447 task_data->resp_buffer, task_data->resp_dma); 3448 dma_free_coherent(&ha->pdev->dev, task_data->req_len, 3449 task_data->req_buffer, task_data->req_dma); 3450 return; 3451 } 3452 3453 static int qla4xxx_task_xmit(struct iscsi_task *task) 3454 { 3455 struct scsi_cmnd *sc = task->sc; 3456 struct iscsi_session *sess = task->conn->session; 3457 struct ddb_entry *ddb_entry = sess->dd_data; 3458 struct scsi_qla_host *ha = ddb_entry->ha; 3459 3460 if (!sc) 3461 return qla4xxx_send_passthru0(task); 3462 3463 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", 3464 __func__); 3465 return -ENOSYS; 3466 } 3467 3468 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, 3469 struct iscsi_bus_flash_conn *conn, 3470 struct dev_db_entry *fw_ddb_entry) 3471 { 3472 unsigned long options = 0; 3473 int rc = 0; 3474 3475 options = le16_to_cpu(fw_ddb_entry->options); 3476 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3477 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3478 rc = iscsi_switch_str_param(&sess->portal_type, 3479 PORTAL_TYPE_IPV6); 3480 if (rc) 3481 goto exit_copy; 3482 } else { 3483 rc = iscsi_switch_str_param(&sess->portal_type, 3484 PORTAL_TYPE_IPV4); 3485 if (rc) 3486 goto exit_copy; 3487 } 3488 3489 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3490 &options); 3491 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3492 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); 3493 3494 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3495 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3496 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3497 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3498 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3499 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3500 &options); 3501 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3502 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3503 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); 3504 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3505 &options); 3506 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3507 sess->discovery_auth_optional = 3508 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3509 if (test_bit(ISCSIOPT_ERL1, &options)) 3510 sess->erl |= BIT_1; 3511 if (test_bit(ISCSIOPT_ERL0, &options)) 3512 sess->erl |= BIT_0; 3513 3514 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3515 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3516 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3517 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3518 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3519 conn->tcp_timer_scale |= BIT_3; 3520 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3521 conn->tcp_timer_scale |= BIT_2; 3522 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3523 conn->tcp_timer_scale |= BIT_1; 3524 3525 conn->tcp_timer_scale >>= 1; 3526 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3527 3528 options = le16_to_cpu(fw_ddb_entry->ip_options); 3529 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3530 3531 conn->max_recv_dlength = BYTE_UNITS * 3532 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3533 conn->max_xmit_dlength = BYTE_UNITS * 3534 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3535 sess->first_burst = BYTE_UNITS * 3536 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3537 sess->max_burst = BYTE_UNITS * 3538 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3539 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3540 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3541 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3542 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3543 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3544 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3545 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3546 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); 3547 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); 3548 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3549 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3550 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3551 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); 3552 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); 3553 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3554 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3555 3556 sess->default_taskmgmt_timeout = 3557 le16_to_cpu(fw_ddb_entry->def_timeout); 3558 conn->port = le16_to_cpu(fw_ddb_entry->port); 3559 3560 options = le16_to_cpu(fw_ddb_entry->options); 3561 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3562 if (!conn->ipaddress) { 3563 rc = -ENOMEM; 3564 goto exit_copy; 3565 } 3566 3567 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3568 if (!conn->redirect_ipaddr) { 3569 rc = -ENOMEM; 3570 goto exit_copy; 3571 } 3572 3573 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 3574 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); 3575 3576 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3577 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; 3578 3579 conn->link_local_ipv6_addr = kmemdup( 3580 fw_ddb_entry->link_local_ipv6_addr, 3581 IPv6_ADDR_LEN, GFP_KERNEL); 3582 if (!conn->link_local_ipv6_addr) { 3583 rc = -ENOMEM; 3584 goto exit_copy; 3585 } 3586 } else { 3587 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3588 } 3589 3590 if (fw_ddb_entry->iscsi_name[0]) { 3591 rc = iscsi_switch_str_param(&sess->targetname, 3592 (char *)fw_ddb_entry->iscsi_name); 3593 if (rc) 3594 goto exit_copy; 3595 } 3596 3597 if (fw_ddb_entry->iscsi_alias[0]) { 3598 rc = iscsi_switch_str_param(&sess->targetalias, 3599 (char *)fw_ddb_entry->iscsi_alias); 3600 if (rc) 3601 goto exit_copy; 3602 } 3603 3604 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3605 3606 exit_copy: 3607 return rc; 3608 } 3609 3610 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, 3611 struct iscsi_bus_flash_conn *conn, 3612 struct dev_db_entry *fw_ddb_entry) 3613 { 3614 uint16_t options; 3615 int rc = 0; 3616 3617 options = le16_to_cpu(fw_ddb_entry->options); 3618 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); 3619 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3620 options |= BIT_8; 3621 else 3622 options &= ~BIT_8; 3623 3624 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); 3625 SET_BITVAL(sess->discovery_sess, options, BIT_4); 3626 SET_BITVAL(sess->entry_state, options, BIT_3); 3627 fw_ddb_entry->options = cpu_to_le16(options); 3628 3629 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3630 SET_BITVAL(conn->hdrdgst_en, options, BIT_13); 3631 SET_BITVAL(conn->datadgst_en, options, BIT_12); 3632 SET_BITVAL(sess->imm_data_en, options, BIT_11); 3633 SET_BITVAL(sess->initial_r2t_en, options, BIT_10); 3634 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); 3635 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); 3636 SET_BITVAL(sess->chap_auth_en, options, BIT_7); 3637 SET_BITVAL(conn->snack_req_en, options, BIT_6); 3638 SET_BITVAL(sess->discovery_logout_en, options, BIT_5); 3639 SET_BITVAL(sess->bidi_chap_en, options, BIT_4); 3640 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); 3641 SET_BITVAL(sess->erl & BIT_1, options, BIT_1); 3642 SET_BITVAL(sess->erl & BIT_0, options, BIT_0); 3643 fw_ddb_entry->iscsi_options = cpu_to_le16(options); 3644 3645 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3646 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); 3647 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); 3648 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); 3649 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); 3650 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); 3651 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); 3652 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); 3653 fw_ddb_entry->tcp_options = cpu_to_le16(options); 3654 3655 options = le16_to_cpu(fw_ddb_entry->ip_options); 3656 SET_BITVAL(conn->fragment_disable, options, BIT_4); 3657 fw_ddb_entry->ip_options = cpu_to_le16(options); 3658 3659 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); 3660 fw_ddb_entry->iscsi_max_rcv_data_seg_len = 3661 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); 3662 fw_ddb_entry->iscsi_max_snd_data_seg_len = 3663 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); 3664 fw_ddb_entry->iscsi_first_burst_len = 3665 cpu_to_le16(sess->first_burst / BYTE_UNITS); 3666 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / 3667 BYTE_UNITS); 3668 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); 3669 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 3670 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 3671 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 3672 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); 3673 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); 3674 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 3675 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 3676 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 3677 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); 3678 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); 3679 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); 3680 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 3681 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 3682 fw_ddb_entry->port = cpu_to_le16(conn->port); 3683 fw_ddb_entry->def_timeout = 3684 cpu_to_le16(sess->default_taskmgmt_timeout); 3685 3686 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) 3687 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; 3688 else 3689 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 3690 3691 if (conn->ipaddress) 3692 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, 3693 sizeof(fw_ddb_entry->ip_addr)); 3694 3695 if (conn->redirect_ipaddr) 3696 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, 3697 sizeof(fw_ddb_entry->tgt_addr)); 3698 3699 if (conn->link_local_ipv6_addr) 3700 memcpy(fw_ddb_entry->link_local_ipv6_addr, 3701 conn->link_local_ipv6_addr, 3702 sizeof(fw_ddb_entry->link_local_ipv6_addr)); 3703 3704 if (sess->targetname) 3705 memcpy(fw_ddb_entry->iscsi_name, sess->targetname, 3706 sizeof(fw_ddb_entry->iscsi_name)); 3707 3708 if (sess->targetalias) 3709 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, 3710 sizeof(fw_ddb_entry->iscsi_alias)); 3711 3712 COPY_ISID(fw_ddb_entry->isid, sess->isid); 3713 3714 return rc; 3715 } 3716 3717 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, 3718 struct iscsi_session *sess, 3719 struct dev_db_entry *fw_ddb_entry) 3720 { 3721 unsigned long options = 0; 3722 uint16_t ddb_link; 3723 uint16_t disc_parent; 3724 char ip_addr[DDB_IPADDR_LEN]; 3725 3726 options = le16_to_cpu(fw_ddb_entry->options); 3727 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); 3728 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, 3729 &options); 3730 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); 3731 3732 options = le16_to_cpu(fw_ddb_entry->iscsi_options); 3733 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); 3734 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); 3735 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); 3736 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); 3737 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, 3738 &options); 3739 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); 3740 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); 3741 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, 3742 &options); 3743 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); 3744 sess->discovery_auth_optional = 3745 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); 3746 if (test_bit(ISCSIOPT_ERL1, &options)) 3747 sess->erl |= BIT_1; 3748 if (test_bit(ISCSIOPT_ERL0, &options)) 3749 sess->erl |= BIT_0; 3750 3751 options = le16_to_cpu(fw_ddb_entry->tcp_options); 3752 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); 3753 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); 3754 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); 3755 if (test_bit(TCPOPT_TIMER_SCALE3, &options)) 3756 conn->tcp_timer_scale |= BIT_3; 3757 if (test_bit(TCPOPT_TIMER_SCALE2, &options)) 3758 conn->tcp_timer_scale |= BIT_2; 3759 if (test_bit(TCPOPT_TIMER_SCALE1, &options)) 3760 conn->tcp_timer_scale |= BIT_1; 3761 3762 conn->tcp_timer_scale >>= 1; 3763 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); 3764 3765 options = le16_to_cpu(fw_ddb_entry->ip_options); 3766 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); 3767 3768 conn->max_recv_dlength = BYTE_UNITS * 3769 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 3770 conn->max_xmit_dlength = BYTE_UNITS * 3771 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); 3772 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); 3773 sess->first_burst = BYTE_UNITS * 3774 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); 3775 sess->max_burst = BYTE_UNITS * 3776 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); 3777 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3778 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); 3779 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 3780 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); 3781 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; 3782 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; 3783 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3784 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); 3785 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); 3786 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); 3787 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); 3788 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); 3789 COPY_ISID(sess->isid, fw_ddb_entry->isid); 3790 3791 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 3792 if (ddb_link == DDB_ISNS) 3793 disc_parent = ISCSI_DISC_PARENT_ISNS; 3794 else if (ddb_link == DDB_NO_LINK) 3795 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3796 else if (ddb_link < MAX_DDB_ENTRIES) 3797 disc_parent = ISCSI_DISC_PARENT_SENDTGT; 3798 else 3799 disc_parent = ISCSI_DISC_PARENT_UNKNOWN; 3800 3801 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 3802 iscsi_get_discovery_parent_name(disc_parent), 0); 3803 3804 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, 3805 (char *)fw_ddb_entry->iscsi_alias, 0); 3806 3807 options = le16_to_cpu(fw_ddb_entry->options); 3808 if (options & DDB_OPT_IPV6_DEVICE) { 3809 memset(ip_addr, 0, sizeof(ip_addr)); 3810 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); 3811 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, 3812 (char *)ip_addr, 0); 3813 } 3814 } 3815 3816 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 3817 struct dev_db_entry *fw_ddb_entry, 3818 struct iscsi_cls_session *cls_sess, 3819 struct iscsi_cls_conn *cls_conn) 3820 { 3821 int buflen = 0; 3822 struct iscsi_session *sess; 3823 struct ddb_entry *ddb_entry; 3824 struct ql4_chap_table chap_tbl; 3825 struct iscsi_conn *conn; 3826 char ip_addr[DDB_IPADDR_LEN]; 3827 uint16_t options = 0; 3828 3829 sess = cls_sess->dd_data; 3830 ddb_entry = sess->dd_data; 3831 conn = cls_conn->dd_data; 3832 memset(&chap_tbl, 0, sizeof(chap_tbl)); 3833 3834 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3835 3836 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3837 3838 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); 3839 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); 3840 3841 memset(ip_addr, 0, sizeof(ip_addr)); 3842 options = le16_to_cpu(fw_ddb_entry->options); 3843 if (options & DDB_OPT_IPV6_DEVICE) { 3844 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); 3845 3846 memset(ip_addr, 0, sizeof(ip_addr)); 3847 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); 3848 } else { 3849 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); 3850 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); 3851 } 3852 3853 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, 3854 (char *)ip_addr, buflen); 3855 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, 3856 (char *)fw_ddb_entry->iscsi_name, buflen); 3857 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 3858 (char *)ha->name_string, buflen); 3859 3860 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { 3861 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, 3862 chap_tbl.secret, 3863 ddb_entry->chap_tbl_idx)) { 3864 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, 3865 (char *)chap_tbl.name, 3866 strlen((char *)chap_tbl.name)); 3867 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, 3868 (char *)chap_tbl.secret, 3869 chap_tbl.secret_len); 3870 } 3871 } 3872 } 3873 3874 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 3875 struct ddb_entry *ddb_entry) 3876 { 3877 struct iscsi_cls_session *cls_sess; 3878 struct iscsi_cls_conn *cls_conn; 3879 uint32_t ddb_state; 3880 dma_addr_t fw_ddb_entry_dma; 3881 struct dev_db_entry *fw_ddb_entry; 3882 3883 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3884 &fw_ddb_entry_dma, GFP_KERNEL); 3885 if (!fw_ddb_entry) { 3886 ql4_printk(KERN_ERR, ha, 3887 "%s: Unable to allocate dma buffer\n", __func__); 3888 goto exit_session_conn_fwddb_param; 3889 } 3890 3891 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3892 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3893 NULL, NULL, NULL) == QLA_ERROR) { 3894 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3895 "get_ddb_entry for fw_ddb_index %d\n", 3896 ha->host_no, __func__, 3897 ddb_entry->fw_ddb_index)); 3898 goto exit_session_conn_fwddb_param; 3899 } 3900 3901 cls_sess = ddb_entry->sess; 3902 3903 cls_conn = ddb_entry->conn; 3904 3905 /* Update params */ 3906 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 3907 3908 exit_session_conn_fwddb_param: 3909 if (fw_ddb_entry) 3910 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3911 fw_ddb_entry, fw_ddb_entry_dma); 3912 } 3913 3914 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 3915 struct ddb_entry *ddb_entry) 3916 { 3917 struct iscsi_cls_session *cls_sess; 3918 struct iscsi_cls_conn *cls_conn; 3919 struct iscsi_session *sess; 3920 struct iscsi_conn *conn; 3921 uint32_t ddb_state; 3922 dma_addr_t fw_ddb_entry_dma; 3923 struct dev_db_entry *fw_ddb_entry; 3924 3925 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3926 &fw_ddb_entry_dma, GFP_KERNEL); 3927 if (!fw_ddb_entry) { 3928 ql4_printk(KERN_ERR, ha, 3929 "%s: Unable to allocate dma buffer\n", __func__); 3930 goto exit_session_conn_param; 3931 } 3932 3933 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 3934 fw_ddb_entry_dma, NULL, NULL, &ddb_state, 3935 NULL, NULL, NULL) == QLA_ERROR) { 3936 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " 3937 "get_ddb_entry for fw_ddb_index %d\n", 3938 ha->host_no, __func__, 3939 ddb_entry->fw_ddb_index)); 3940 goto exit_session_conn_param; 3941 } 3942 3943 cls_sess = ddb_entry->sess; 3944 sess = cls_sess->dd_data; 3945 3946 cls_conn = ddb_entry->conn; 3947 conn = cls_conn->dd_data; 3948 3949 /* Update timers after login */ 3950 ddb_entry->default_relogin_timeout = 3951 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && 3952 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? 3953 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; 3954 ddb_entry->default_time2wait = 3955 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 3956 3957 /* Update params */ 3958 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 3959 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); 3960 3961 memcpy(sess->initiatorname, ha->name_string, 3962 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 3963 3964 exit_session_conn_param: 3965 if (fw_ddb_entry) 3966 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3967 fw_ddb_entry, fw_ddb_entry_dma); 3968 } 3969 3970 /* 3971 * Timer routines 3972 */ 3973 static void qla4xxx_timer(struct timer_list *t); 3974 3975 static void qla4xxx_start_timer(struct scsi_qla_host *ha, 3976 unsigned long interval) 3977 { 3978 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", 3979 __func__, ha->host->host_no)); 3980 timer_setup(&ha->timer, qla4xxx_timer, 0); 3981 ha->timer.expires = jiffies + interval * HZ; 3982 add_timer(&ha->timer); 3983 ha->timer_active = 1; 3984 } 3985 3986 static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 3987 { 3988 del_timer_sync(&ha->timer); 3989 ha->timer_active = 0; 3990 } 3991 3992 /*** 3993 * qla4xxx_mark_device_missing - blocks the session 3994 * @cls_session: Pointer to the session to be blocked 3995 * @ddb_entry: Pointer to device database entry 3996 * 3997 * This routine marks a device missing and close connection. 3998 **/ 3999 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) 4000 { 4001 iscsi_block_session(cls_session); 4002 } 4003 4004 /** 4005 * qla4xxx_mark_all_devices_missing - mark all devices as missing. 4006 * @ha: Pointer to host adapter structure. 4007 * 4008 * This routine marks a device missing and resets the relogin retry count. 4009 **/ 4010 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) 4011 { 4012 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); 4013 } 4014 4015 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 4016 struct ddb_entry *ddb_entry, 4017 struct scsi_cmnd *cmd) 4018 { 4019 struct srb *srb; 4020 4021 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 4022 if (!srb) 4023 return srb; 4024 4025 kref_init(&srb->srb_ref); 4026 srb->ha = ha; 4027 srb->ddb = ddb_entry; 4028 srb->cmd = cmd; 4029 srb->flags = 0; 4030 CMD_SP(cmd) = (void *)srb; 4031 4032 return srb; 4033 } 4034 4035 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) 4036 { 4037 struct scsi_cmnd *cmd = srb->cmd; 4038 4039 if (srb->flags & SRB_DMA_VALID) { 4040 scsi_dma_unmap(cmd); 4041 srb->flags &= ~SRB_DMA_VALID; 4042 } 4043 CMD_SP(cmd) = NULL; 4044 } 4045 4046 void qla4xxx_srb_compl(struct kref *ref) 4047 { 4048 struct srb *srb = container_of(ref, struct srb, srb_ref); 4049 struct scsi_cmnd *cmd = srb->cmd; 4050 struct scsi_qla_host *ha = srb->ha; 4051 4052 qla4xxx_srb_free_dma(ha, srb); 4053 4054 mempool_free(srb, ha->srb_mempool); 4055 4056 cmd->scsi_done(cmd); 4057 } 4058 4059 /** 4060 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 4061 * @host: scsi host 4062 * @cmd: Pointer to Linux's SCSI command structure 4063 * 4064 * Remarks: 4065 * This routine is invoked by Linux to send a SCSI command to the driver. 4066 * The mid-level driver tries to ensure that queuecommand never gets 4067 * invoked concurrently with itself or the interrupt handler (although 4068 * the interrupt handler may call this routine as part of request- 4069 * completion handling). Unfortunely, it sometimes calls the scheduler 4070 * in interrupt context which is a big NO! NO!. 4071 **/ 4072 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 4073 { 4074 struct scsi_qla_host *ha = to_qla_host(host); 4075 struct ddb_entry *ddb_entry = cmd->device->hostdata; 4076 struct iscsi_cls_session *sess = ddb_entry->sess; 4077 struct srb *srb; 4078 int rval; 4079 4080 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4081 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) 4082 cmd->result = DID_NO_CONNECT << 16; 4083 else 4084 cmd->result = DID_REQUEUE << 16; 4085 goto qc_fail_command; 4086 } 4087 4088 if (!sess) { 4089 cmd->result = DID_IMM_RETRY << 16; 4090 goto qc_fail_command; 4091 } 4092 4093 rval = iscsi_session_chkready(sess); 4094 if (rval) { 4095 cmd->result = rval; 4096 goto qc_fail_command; 4097 } 4098 4099 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4100 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4101 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4102 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4103 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4104 !test_bit(AF_ONLINE, &ha->flags) || 4105 !test_bit(AF_LINK_UP, &ha->flags) || 4106 test_bit(AF_LOOPBACK, &ha->flags) || 4107 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || 4108 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || 4109 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 4110 goto qc_host_busy; 4111 4112 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); 4113 if (!srb) 4114 goto qc_host_busy; 4115 4116 rval = qla4xxx_send_command_to_isp(ha, srb); 4117 if (rval != QLA_SUCCESS) 4118 goto qc_host_busy_free_sp; 4119 4120 return 0; 4121 4122 qc_host_busy_free_sp: 4123 qla4xxx_srb_free_dma(ha, srb); 4124 mempool_free(srb, ha->srb_mempool); 4125 4126 qc_host_busy: 4127 return SCSI_MLQUEUE_HOST_BUSY; 4128 4129 qc_fail_command: 4130 cmd->scsi_done(cmd); 4131 4132 return 0; 4133 } 4134 4135 /** 4136 * qla4xxx_mem_free - frees memory allocated to adapter 4137 * @ha: Pointer to host adapter structure. 4138 * 4139 * Frees memory previously allocated by qla4xxx_mem_alloc 4140 **/ 4141 static void qla4xxx_mem_free(struct scsi_qla_host *ha) 4142 { 4143 if (ha->queues) 4144 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 4145 ha->queues_dma); 4146 4147 if (ha->fw_dump) 4148 vfree(ha->fw_dump); 4149 4150 ha->queues_len = 0; 4151 ha->queues = NULL; 4152 ha->queues_dma = 0; 4153 ha->request_ring = NULL; 4154 ha->request_dma = 0; 4155 ha->response_ring = NULL; 4156 ha->response_dma = 0; 4157 ha->shadow_regs = NULL; 4158 ha->shadow_regs_dma = 0; 4159 ha->fw_dump = NULL; 4160 ha->fw_dump_size = 0; 4161 4162 /* Free srb pool. */ 4163 if (ha->srb_mempool) 4164 mempool_destroy(ha->srb_mempool); 4165 4166 ha->srb_mempool = NULL; 4167 4168 if (ha->chap_dma_pool) 4169 dma_pool_destroy(ha->chap_dma_pool); 4170 4171 if (ha->chap_list) 4172 vfree(ha->chap_list); 4173 ha->chap_list = NULL; 4174 4175 if (ha->fw_ddb_dma_pool) 4176 dma_pool_destroy(ha->fw_ddb_dma_pool); 4177 4178 /* release io space registers */ 4179 if (is_qla8022(ha)) { 4180 if (ha->nx_pcibase) 4181 iounmap( 4182 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 4183 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4184 if (ha->nx_pcibase) 4185 iounmap( 4186 (struct device_reg_83xx __iomem *)ha->nx_pcibase); 4187 } else if (ha->reg) { 4188 iounmap(ha->reg); 4189 } 4190 4191 if (ha->reset_tmplt.buff) 4192 vfree(ha->reset_tmplt.buff); 4193 4194 pci_release_regions(ha->pdev); 4195 } 4196 4197 /** 4198 * qla4xxx_mem_alloc - allocates memory for use by adapter. 4199 * @ha: Pointer to host adapter structure 4200 * 4201 * Allocates DMA memory for request and response queues. Also allocates memory 4202 * for srbs. 4203 **/ 4204 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) 4205 { 4206 unsigned long align; 4207 4208 /* Allocate contiguous block of DMA memory for queues. */ 4209 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4210 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + 4211 sizeof(struct shadow_regs) + 4212 MEM_ALIGN_VALUE + 4213 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4214 ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len, 4215 &ha->queues_dma, GFP_KERNEL); 4216 if (ha->queues == NULL) { 4217 ql4_printk(KERN_WARNING, ha, 4218 "Memory Allocation failed - queues.\n"); 4219 4220 goto mem_alloc_error_exit; 4221 } 4222 4223 /* 4224 * As per RISC alignment requirements -- the bus-address must be a 4225 * multiple of the request-ring size (in bytes). 4226 */ 4227 align = 0; 4228 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) 4229 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & 4230 (MEM_ALIGN_VALUE - 1)); 4231 4232 /* Update request and response queue pointers. */ 4233 ha->request_dma = ha->queues_dma + align; 4234 ha->request_ring = (struct queue_entry *) (ha->queues + align); 4235 ha->response_dma = ha->queues_dma + align + 4236 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); 4237 ha->response_ring = (struct queue_entry *) (ha->queues + align + 4238 (REQUEST_QUEUE_DEPTH * 4239 QUEUE_SIZE)); 4240 ha->shadow_regs_dma = ha->queues_dma + align + 4241 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + 4242 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); 4243 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + 4244 (REQUEST_QUEUE_DEPTH * 4245 QUEUE_SIZE) + 4246 (RESPONSE_QUEUE_DEPTH * 4247 QUEUE_SIZE)); 4248 4249 /* Allocate memory for srb pool. */ 4250 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 4251 mempool_free_slab, srb_cachep); 4252 if (ha->srb_mempool == NULL) { 4253 ql4_printk(KERN_WARNING, ha, 4254 "Memory Allocation failed - SRB Pool.\n"); 4255 4256 goto mem_alloc_error_exit; 4257 } 4258 4259 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, 4260 CHAP_DMA_BLOCK_SIZE, 8, 0); 4261 4262 if (ha->chap_dma_pool == NULL) { 4263 ql4_printk(KERN_WARNING, ha, 4264 "%s: chap_dma_pool allocation failed..\n", __func__); 4265 goto mem_alloc_error_exit; 4266 } 4267 4268 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, 4269 DDB_DMA_BLOCK_SIZE, 8, 0); 4270 4271 if (ha->fw_ddb_dma_pool == NULL) { 4272 ql4_printk(KERN_WARNING, ha, 4273 "%s: fw_ddb_dma_pool allocation failed..\n", 4274 __func__); 4275 goto mem_alloc_error_exit; 4276 } 4277 4278 return QLA_SUCCESS; 4279 4280 mem_alloc_error_exit: 4281 qla4xxx_mem_free(ha); 4282 return QLA_ERROR; 4283 } 4284 4285 /** 4286 * qla4_8xxx_check_temp - Check the ISP82XX temperature. 4287 * @ha: adapter block pointer. 4288 * 4289 * Note: The caller should not hold the idc lock. 4290 **/ 4291 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) 4292 { 4293 uint32_t temp, temp_state, temp_val; 4294 int status = QLA_SUCCESS; 4295 4296 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); 4297 4298 temp_state = qla82xx_get_temp_state(temp); 4299 temp_val = qla82xx_get_temp_val(temp); 4300 4301 if (temp_state == QLA82XX_TEMP_PANIC) { 4302 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" 4303 " exceeds maximum allowed. Hardware has been shut" 4304 " down.\n", temp_val); 4305 status = QLA_ERROR; 4306 } else if (temp_state == QLA82XX_TEMP_WARN) { 4307 if (ha->temperature == QLA82XX_TEMP_NORMAL) 4308 ql4_printk(KERN_WARNING, ha, "Device temperature %d" 4309 " degrees C exceeds operating range." 4310 " Immediate action needed.\n", temp_val); 4311 } else { 4312 if (ha->temperature == QLA82XX_TEMP_WARN) 4313 ql4_printk(KERN_INFO, ha, "Device temperature is" 4314 " now %d degrees C in normal range.\n", 4315 temp_val); 4316 } 4317 ha->temperature = temp_state; 4318 return status; 4319 } 4320 4321 /** 4322 * qla4_8xxx_check_fw_alive - Check firmware health 4323 * @ha: Pointer to host adapter structure. 4324 * 4325 * Context: Interrupt 4326 **/ 4327 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) 4328 { 4329 uint32_t fw_heartbeat_counter; 4330 int status = QLA_SUCCESS; 4331 4332 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, 4333 QLA8XXX_PEG_ALIVE_COUNTER); 4334 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 4335 if (fw_heartbeat_counter == 0xffffffff) { 4336 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 4337 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", 4338 ha->host_no, __func__)); 4339 return status; 4340 } 4341 4342 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { 4343 ha->seconds_since_last_heartbeat++; 4344 /* FW not alive after 2 seconds */ 4345 if (ha->seconds_since_last_heartbeat == 2) { 4346 ha->seconds_since_last_heartbeat = 0; 4347 qla4_8xxx_dump_peg_reg(ha); 4348 status = QLA_ERROR; 4349 } 4350 } else 4351 ha->seconds_since_last_heartbeat = 0; 4352 4353 ha->fw_heartbeat_counter = fw_heartbeat_counter; 4354 return status; 4355 } 4356 4357 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) 4358 { 4359 uint32_t halt_status; 4360 int halt_status_unrecoverable = 0; 4361 4362 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); 4363 4364 if (is_qla8022(ha)) { 4365 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4366 __func__); 4367 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4368 CRB_NIU_XG_PAUSE_CTL_P0 | 4369 CRB_NIU_XG_PAUSE_CTL_P1); 4370 4371 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) 4372 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", 4373 __func__); 4374 if (halt_status & HALT_STATUS_UNRECOVERABLE) 4375 halt_status_unrecoverable = 1; 4376 } else if (is_qla8032(ha) || is_qla8042(ha)) { 4377 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) 4378 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", 4379 __func__); 4380 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) 4381 halt_status_unrecoverable = 1; 4382 } 4383 4384 /* 4385 * Since we cannot change dev_state in interrupt context, 4386 * set appropriate DPC flag then wakeup DPC 4387 */ 4388 if (halt_status_unrecoverable) { 4389 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4390 } else { 4391 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", 4392 __func__); 4393 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4394 } 4395 qla4xxx_mailbox_premature_completion(ha); 4396 qla4xxx_wake_dpc(ha); 4397 } 4398 4399 /** 4400 * qla4_8xxx_watchdog - Poll dev state 4401 * @ha: Pointer to host adapter structure. 4402 * 4403 * Context: Interrupt 4404 **/ 4405 void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 4406 { 4407 uint32_t dev_state; 4408 uint32_t idc_ctrl; 4409 4410 if (is_qla8032(ha) && 4411 (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) 4412 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", 4413 __func__, ha->func_num); 4414 4415 /* don't poll if reset is going on */ 4416 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4417 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4418 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 4419 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 4420 4421 if (qla4_8xxx_check_temp(ha)) { 4422 if (is_qla8022(ha)) { 4423 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); 4424 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 4425 CRB_NIU_XG_PAUSE_CTL_P0 | 4426 CRB_NIU_XG_PAUSE_CTL_P1); 4427 } 4428 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 4429 qla4xxx_wake_dpc(ha); 4430 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 4431 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 4432 4433 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", 4434 __func__); 4435 4436 if (is_qla8032(ha) || is_qla8042(ha)) { 4437 idc_ctrl = qla4_83xx_rd_reg(ha, 4438 QLA83XX_IDC_DRV_CTRL); 4439 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { 4440 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", 4441 __func__); 4442 qla4xxx_mailbox_premature_completion( 4443 ha); 4444 } 4445 } 4446 4447 if ((is_qla8032(ha) || is_qla8042(ha)) || 4448 (is_qla8022(ha) && !ql4xdontresethba)) { 4449 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4450 qla4xxx_wake_dpc(ha); 4451 } 4452 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && 4453 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 4454 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 4455 __func__); 4456 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); 4457 qla4xxx_wake_dpc(ha); 4458 } else { 4459 /* Check firmware health */ 4460 if (qla4_8xxx_check_fw_alive(ha)) 4461 qla4_8xxx_process_fw_error(ha); 4462 } 4463 } 4464 } 4465 4466 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 4467 { 4468 struct iscsi_session *sess; 4469 struct ddb_entry *ddb_entry; 4470 struct scsi_qla_host *ha; 4471 4472 sess = cls_sess->dd_data; 4473 ddb_entry = sess->dd_data; 4474 ha = ddb_entry->ha; 4475 4476 if (!(ddb_entry->ddb_type == FLASH_DDB)) 4477 return; 4478 4479 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && 4480 !iscsi_is_session_online(cls_sess)) { 4481 if (atomic_read(&ddb_entry->retry_relogin_timer) != 4482 INVALID_ENTRY) { 4483 if (atomic_read(&ddb_entry->retry_relogin_timer) == 4484 0) { 4485 atomic_set(&ddb_entry->retry_relogin_timer, 4486 INVALID_ENTRY); 4487 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4488 set_bit(DF_RELOGIN, &ddb_entry->flags); 4489 DEBUG2(ql4_printk(KERN_INFO, ha, 4490 "%s: index [%d] login device\n", 4491 __func__, ddb_entry->fw_ddb_index)); 4492 } else 4493 atomic_dec(&ddb_entry->retry_relogin_timer); 4494 } 4495 } 4496 4497 /* Wait for relogin to timeout */ 4498 if (atomic_read(&ddb_entry->relogin_timer) && 4499 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { 4500 /* 4501 * If the relogin times out and the device is 4502 * still NOT ONLINE then try and relogin again. 4503 */ 4504 if (!iscsi_is_session_online(cls_sess)) { 4505 /* Reset retry relogin timer */ 4506 atomic_inc(&ddb_entry->relogin_retry_count); 4507 DEBUG2(ql4_printk(KERN_INFO, ha, 4508 "%s: index[%d] relogin timed out-retrying" 4509 " relogin (%d), retry (%d)\n", __func__, 4510 ddb_entry->fw_ddb_index, 4511 atomic_read(&ddb_entry->relogin_retry_count), 4512 ddb_entry->default_time2wait + 4)); 4513 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 4514 atomic_set(&ddb_entry->retry_relogin_timer, 4515 ddb_entry->default_time2wait + 4); 4516 } 4517 } 4518 } 4519 4520 /** 4521 * qla4xxx_timer - checks every second for work to do. 4522 * @ha: Pointer to host adapter structure. 4523 **/ 4524 static void qla4xxx_timer(struct timer_list *t) 4525 { 4526 struct scsi_qla_host *ha = from_timer(ha, t, timer); 4527 int start_dpc = 0; 4528 uint16_t w; 4529 4530 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); 4531 4532 /* If we are in the middle of AER/EEH processing 4533 * skip any processing and reschedule the timer 4534 */ 4535 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 4536 mod_timer(&ha->timer, jiffies + HZ); 4537 return; 4538 } 4539 4540 /* Hardware read to trigger an EEH error during mailbox waits. */ 4541 if (!pci_channel_offline(ha->pdev)) 4542 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 4543 4544 if (is_qla80XX(ha)) 4545 qla4_8xxx_watchdog(ha); 4546 4547 if (is_qla40XX(ha)) { 4548 /* Check for heartbeat interval. */ 4549 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 4550 ha->heartbeat_interval != 0) { 4551 ha->seconds_since_last_heartbeat++; 4552 if (ha->seconds_since_last_heartbeat > 4553 ha->heartbeat_interval + 2) 4554 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4555 } 4556 } 4557 4558 /* Process any deferred work. */ 4559 if (!list_empty(&ha->work_list)) 4560 start_dpc++; 4561 4562 /* Wakeup the dpc routine for this adapter, if needed. */ 4563 if (start_dpc || 4564 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4565 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 4566 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 4567 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 4568 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 4569 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 4570 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 4571 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4572 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4573 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || 4574 test_bit(DPC_AEN, &ha->dpc_flags)) { 4575 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 4576 " - dpc flags = 0x%lx\n", 4577 ha->host_no, __func__, ha->dpc_flags)); 4578 qla4xxx_wake_dpc(ha); 4579 } 4580 4581 /* Reschedule timer thread to call us back in one second */ 4582 mod_timer(&ha->timer, jiffies + HZ); 4583 4584 DEBUG2(ha->seconds_since_last_intr++); 4585 } 4586 4587 /** 4588 * qla4xxx_cmd_wait - waits for all outstanding commands to complete 4589 * @ha: Pointer to host adapter structure. 4590 * 4591 * This routine stalls the driver until all outstanding commands are returned. 4592 * Caller must release the Hardware Lock prior to calling this routine. 4593 **/ 4594 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) 4595 { 4596 uint32_t index = 0; 4597 unsigned long flags; 4598 struct scsi_cmnd *cmd; 4599 unsigned long wtime; 4600 uint32_t wtmo; 4601 4602 if (is_qla40XX(ha)) 4603 wtmo = WAIT_CMD_TOV; 4604 else 4605 wtmo = ha->nx_reset_timeout / 2; 4606 4607 wtime = jiffies + (wtmo * HZ); 4608 4609 DEBUG2(ql4_printk(KERN_INFO, ha, 4610 "Wait up to %u seconds for cmds to complete\n", 4611 wtmo)); 4612 4613 while (!time_after_eq(jiffies, wtime)) { 4614 spin_lock_irqsave(&ha->hardware_lock, flags); 4615 /* Find a command that hasn't completed. */ 4616 for (index = 0; index < ha->host->can_queue; index++) { 4617 cmd = scsi_host_find_tag(ha->host, index); 4618 /* 4619 * We cannot just check if the index is valid, 4620 * becase if we are run from the scsi eh, then 4621 * the scsi/block layer is going to prevent 4622 * the tag from being released. 4623 */ 4624 if (cmd != NULL && CMD_SP(cmd)) 4625 break; 4626 } 4627 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4628 4629 /* If No Commands are pending, wait is complete */ 4630 if (index == ha->host->can_queue) 4631 return QLA_SUCCESS; 4632 4633 msleep(1000); 4634 } 4635 /* If we timed out on waiting for commands to come back 4636 * return ERROR. */ 4637 return QLA_ERROR; 4638 } 4639 4640 int qla4xxx_hw_reset(struct scsi_qla_host *ha) 4641 { 4642 uint32_t ctrl_status; 4643 unsigned long flags = 0; 4644 4645 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); 4646 4647 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 4648 return QLA_ERROR; 4649 4650 spin_lock_irqsave(&ha->hardware_lock, flags); 4651 4652 /* 4653 * If the SCSI Reset Interrupt bit is set, clear it. 4654 * Otherwise, the Soft Reset won't work. 4655 */ 4656 ctrl_status = readw(&ha->reg->ctrl_status); 4657 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) 4658 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4659 4660 /* Issue Soft Reset */ 4661 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); 4662 readl(&ha->reg->ctrl_status); 4663 4664 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4665 return QLA_SUCCESS; 4666 } 4667 4668 /** 4669 * qla4xxx_soft_reset - performs soft reset. 4670 * @ha: Pointer to host adapter structure. 4671 **/ 4672 int qla4xxx_soft_reset(struct scsi_qla_host *ha) 4673 { 4674 uint32_t max_wait_time; 4675 unsigned long flags = 0; 4676 int status; 4677 uint32_t ctrl_status; 4678 4679 status = qla4xxx_hw_reset(ha); 4680 if (status != QLA_SUCCESS) 4681 return status; 4682 4683 status = QLA_ERROR; 4684 /* Wait until the Network Reset Intr bit is cleared */ 4685 max_wait_time = RESET_INTR_TOV; 4686 do { 4687 spin_lock_irqsave(&ha->hardware_lock, flags); 4688 ctrl_status = readw(&ha->reg->ctrl_status); 4689 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4690 4691 if ((ctrl_status & CSR_NET_RESET_INTR) == 0) 4692 break; 4693 4694 msleep(1000); 4695 } while ((--max_wait_time)); 4696 4697 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { 4698 DEBUG2(printk(KERN_WARNING 4699 "scsi%ld: Network Reset Intr not cleared by " 4700 "Network function, clearing it now!\n", 4701 ha->host_no)); 4702 spin_lock_irqsave(&ha->hardware_lock, flags); 4703 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); 4704 readl(&ha->reg->ctrl_status); 4705 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4706 } 4707 4708 /* Wait until the firmware tells us the Soft Reset is done */ 4709 max_wait_time = SOFT_RESET_TOV; 4710 do { 4711 spin_lock_irqsave(&ha->hardware_lock, flags); 4712 ctrl_status = readw(&ha->reg->ctrl_status); 4713 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4714 4715 if ((ctrl_status & CSR_SOFT_RESET) == 0) { 4716 status = QLA_SUCCESS; 4717 break; 4718 } 4719 4720 msleep(1000); 4721 } while ((--max_wait_time)); 4722 4723 /* 4724 * Also, make sure that the SCSI Reset Interrupt bit has been cleared 4725 * after the soft reset has taken place. 4726 */ 4727 spin_lock_irqsave(&ha->hardware_lock, flags); 4728 ctrl_status = readw(&ha->reg->ctrl_status); 4729 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { 4730 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); 4731 readl(&ha->reg->ctrl_status); 4732 } 4733 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4734 4735 /* If soft reset fails then most probably the bios on other 4736 * function is also enabled. 4737 * Since the initialization is sequential the other fn 4738 * wont be able to acknowledge the soft reset. 4739 * Issue a force soft reset to workaround this scenario. 4740 */ 4741 if (max_wait_time == 0) { 4742 /* Issue Force Soft Reset */ 4743 spin_lock_irqsave(&ha->hardware_lock, flags); 4744 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); 4745 readl(&ha->reg->ctrl_status); 4746 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4747 /* Wait until the firmware tells us the Soft Reset is done */ 4748 max_wait_time = SOFT_RESET_TOV; 4749 do { 4750 spin_lock_irqsave(&ha->hardware_lock, flags); 4751 ctrl_status = readw(&ha->reg->ctrl_status); 4752 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4753 4754 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { 4755 status = QLA_SUCCESS; 4756 break; 4757 } 4758 4759 msleep(1000); 4760 } while ((--max_wait_time)); 4761 } 4762 4763 return status; 4764 } 4765 4766 /** 4767 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. 4768 * @ha: Pointer to host adapter structure. 4769 * @res: returned scsi status 4770 * 4771 * This routine is called just prior to a HARD RESET to return all 4772 * outstanding commands back to the Operating System. 4773 * Caller should make sure that the following locks are released 4774 * before this calling routine: Hardware lock, and io_request_lock. 4775 **/ 4776 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) 4777 { 4778 struct srb *srb; 4779 int i; 4780 unsigned long flags; 4781 4782 spin_lock_irqsave(&ha->hardware_lock, flags); 4783 for (i = 0; i < ha->host->can_queue; i++) { 4784 srb = qla4xxx_del_from_active_array(ha, i); 4785 if (srb != NULL) { 4786 srb->cmd->result = res; 4787 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 4788 } 4789 } 4790 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4791 } 4792 4793 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) 4794 { 4795 clear_bit(AF_ONLINE, &ha->flags); 4796 4797 /* Disable the board */ 4798 ql4_printk(KERN_INFO, ha, "Disabling the board\n"); 4799 4800 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 4801 qla4xxx_mark_all_devices_missing(ha); 4802 clear_bit(AF_INIT_DONE, &ha->flags); 4803 } 4804 4805 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) 4806 { 4807 struct iscsi_session *sess; 4808 struct ddb_entry *ddb_entry; 4809 4810 sess = cls_session->dd_data; 4811 ddb_entry = sess->dd_data; 4812 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; 4813 4814 if (ddb_entry->ddb_type == FLASH_DDB) 4815 iscsi_block_session(ddb_entry->sess); 4816 else 4817 iscsi_session_failure(cls_session->dd_data, 4818 ISCSI_ERR_CONN_FAILED); 4819 } 4820 4821 /** 4822 * qla4xxx_recover_adapter - recovers adapter after a fatal error 4823 * @ha: Pointer to host adapter structure. 4824 **/ 4825 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) 4826 { 4827 int status = QLA_ERROR; 4828 uint8_t reset_chip = 0; 4829 uint32_t dev_state; 4830 unsigned long wait; 4831 4832 /* Stall incoming I/O until we are done */ 4833 scsi_block_requests(ha->host); 4834 clear_bit(AF_ONLINE, &ha->flags); 4835 clear_bit(AF_LINK_UP, &ha->flags); 4836 4837 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); 4838 4839 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 4840 4841 if ((is_qla8032(ha) || is_qla8042(ha)) && 4842 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4843 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 4844 __func__); 4845 /* disable pause frame for ISP83xx */ 4846 qla4_83xx_disable_pause(ha); 4847 } 4848 4849 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 4850 4851 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 4852 reset_chip = 1; 4853 4854 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) 4855 * do not reset adapter, jump to initialize_adapter */ 4856 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4857 status = QLA_SUCCESS; 4858 goto recover_ha_init_adapter; 4859 } 4860 4861 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked 4862 * from eh_host_reset or ioctl module */ 4863 if (is_qla80XX(ha) && !reset_chip && 4864 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 4865 4866 DEBUG2(ql4_printk(KERN_INFO, ha, 4867 "scsi%ld: %s - Performing stop_firmware...\n", 4868 ha->host_no, __func__)); 4869 status = ha->isp_ops->reset_firmware(ha); 4870 if (status == QLA_SUCCESS) { 4871 ha->isp_ops->disable_intrs(ha); 4872 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4873 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4874 } else { 4875 /* If the stop_firmware fails then 4876 * reset the entire chip */ 4877 reset_chip = 1; 4878 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 4879 set_bit(DPC_RESET_HA, &ha->dpc_flags); 4880 } 4881 } 4882 4883 /* Issue full chip reset if recovering from a catastrophic error, 4884 * or if stop_firmware fails for ISP-8xxx. 4885 * This is the default case for ISP-4xxx */ 4886 if (is_qla40XX(ha) || reset_chip) { 4887 if (is_qla40XX(ha)) 4888 goto chip_reset; 4889 4890 /* Check if 8XXX firmware is alive or not 4891 * We may have arrived here from NEED_RESET 4892 * detection only */ 4893 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 4894 goto chip_reset; 4895 4896 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); 4897 while (time_before(jiffies, wait)) { 4898 if (qla4_8xxx_check_fw_alive(ha)) { 4899 qla4xxx_mailbox_premature_completion(ha); 4900 break; 4901 } 4902 4903 set_current_state(TASK_UNINTERRUPTIBLE); 4904 schedule_timeout(HZ); 4905 } 4906 chip_reset: 4907 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 4908 qla4xxx_cmd_wait(ha); 4909 4910 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4911 DEBUG2(ql4_printk(KERN_INFO, ha, 4912 "scsi%ld: %s - Performing chip reset..\n", 4913 ha->host_no, __func__)); 4914 status = ha->isp_ops->reset_chip(ha); 4915 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4916 } 4917 4918 /* Flush any pending ddb changed AENs */ 4919 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4920 4921 recover_ha_init_adapter: 4922 /* Upon successful firmware/chip reset, re-initialize the adapter */ 4923 if (status == QLA_SUCCESS) { 4924 /* For ISP-4xxx, force function 1 to always initialize 4925 * before function 3 to prevent both funcions from 4926 * stepping on top of the other */ 4927 if (is_qla40XX(ha) && (ha->mac_index == 3)) 4928 ssleep(6); 4929 4930 /* NOTE: AF_ONLINE flag set upon successful completion of 4931 * qla4xxx_initialize_adapter */ 4932 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 4933 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 4934 status = qla4_8xxx_check_init_adapter_retry(ha); 4935 if (status == QLA_ERROR) { 4936 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", 4937 ha->host_no, __func__); 4938 qla4xxx_dead_adapter_cleanup(ha); 4939 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4940 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4941 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4942 &ha->dpc_flags); 4943 goto exit_recover; 4944 } 4945 } 4946 } 4947 4948 /* Retry failed adapter initialization, if necessary 4949 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) 4950 * case to prevent ping-pong resets between functions */ 4951 if (!test_bit(AF_ONLINE, &ha->flags) && 4952 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 4953 /* Adapter initialization failed, see if we can retry 4954 * resetting the ha. 4955 * Since we don't want to block the DPC for too long 4956 * with multiple resets in the same thread, 4957 * utilize DPC to retry */ 4958 if (is_qla80XX(ha)) { 4959 ha->isp_ops->idc_lock(ha); 4960 dev_state = qla4_8xxx_rd_direct(ha, 4961 QLA8XXX_CRB_DEV_STATE); 4962 ha->isp_ops->idc_unlock(ha); 4963 if (dev_state == QLA8XXX_DEV_FAILED) { 4964 ql4_printk(KERN_INFO, ha, "%s: don't retry " 4965 "recover adapter. H/W is in Failed " 4966 "state\n", __func__); 4967 qla4xxx_dead_adapter_cleanup(ha); 4968 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4969 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 4970 clear_bit(DPC_RESET_HA_FW_CONTEXT, 4971 &ha->dpc_flags); 4972 status = QLA_ERROR; 4973 4974 goto exit_recover; 4975 } 4976 } 4977 4978 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { 4979 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; 4980 DEBUG2(printk("scsi%ld: recover adapter - retrying " 4981 "(%d) more times\n", ha->host_no, 4982 ha->retry_reset_ha_cnt)); 4983 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 4984 status = QLA_ERROR; 4985 } else { 4986 if (ha->retry_reset_ha_cnt > 0) { 4987 /* Schedule another Reset HA--DPC will retry */ 4988 ha->retry_reset_ha_cnt--; 4989 DEBUG2(printk("scsi%ld: recover adapter - " 4990 "retry remaining %d\n", 4991 ha->host_no, 4992 ha->retry_reset_ha_cnt)); 4993 status = QLA_ERROR; 4994 } 4995 4996 if (ha->retry_reset_ha_cnt == 0) { 4997 /* Recover adapter retries have been exhausted. 4998 * Adapter DEAD */ 4999 DEBUG2(printk("scsi%ld: recover adapter " 5000 "failed - board disabled\n", 5001 ha->host_no)); 5002 qla4xxx_dead_adapter_cleanup(ha); 5003 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5004 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5005 clear_bit(DPC_RESET_HA_FW_CONTEXT, 5006 &ha->dpc_flags); 5007 status = QLA_ERROR; 5008 } 5009 } 5010 } else { 5011 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5012 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5013 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); 5014 } 5015 5016 exit_recover: 5017 ha->adapter_error_count++; 5018 5019 if (test_bit(AF_ONLINE, &ha->flags)) 5020 ha->isp_ops->enable_intrs(ha); 5021 5022 scsi_unblock_requests(ha->host); 5023 5024 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 5025 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, 5026 status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); 5027 5028 return status; 5029 } 5030 5031 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) 5032 { 5033 struct iscsi_session *sess; 5034 struct ddb_entry *ddb_entry; 5035 struct scsi_qla_host *ha; 5036 5037 sess = cls_session->dd_data; 5038 ddb_entry = sess->dd_data; 5039 ha = ddb_entry->ha; 5040 if (!iscsi_is_session_online(cls_session)) { 5041 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 5042 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5043 " unblock session\n", ha->host_no, __func__, 5044 ddb_entry->fw_ddb_index); 5045 iscsi_unblock_session(ddb_entry->sess); 5046 } else { 5047 /* Trigger relogin */ 5048 if (ddb_entry->ddb_type == FLASH_DDB) { 5049 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || 5050 test_bit(DF_DISABLE_RELOGIN, 5051 &ddb_entry->flags))) 5052 qla4xxx_arm_relogin_timer(ddb_entry); 5053 } else 5054 iscsi_session_failure(cls_session->dd_data, 5055 ISCSI_ERR_CONN_FAILED); 5056 } 5057 } 5058 } 5059 5060 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) 5061 { 5062 struct iscsi_session *sess; 5063 struct ddb_entry *ddb_entry; 5064 struct scsi_qla_host *ha; 5065 5066 sess = cls_session->dd_data; 5067 ddb_entry = sess->dd_data; 5068 ha = ddb_entry->ha; 5069 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5070 " unblock session\n", ha->host_no, __func__, 5071 ddb_entry->fw_ddb_index); 5072 5073 iscsi_unblock_session(ddb_entry->sess); 5074 5075 /* Start scan target */ 5076 if (test_bit(AF_ONLINE, &ha->flags)) { 5077 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5078 " start scan\n", ha->host_no, __func__, 5079 ddb_entry->fw_ddb_index); 5080 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work); 5081 } 5082 return QLA_SUCCESS; 5083 } 5084 5085 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) 5086 { 5087 struct iscsi_session *sess; 5088 struct ddb_entry *ddb_entry; 5089 struct scsi_qla_host *ha; 5090 int status = QLA_SUCCESS; 5091 5092 sess = cls_session->dd_data; 5093 ddb_entry = sess->dd_data; 5094 ha = ddb_entry->ha; 5095 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 5096 " unblock user space session\n", ha->host_no, __func__, 5097 ddb_entry->fw_ddb_index); 5098 5099 if (!iscsi_is_session_online(cls_session)) { 5100 iscsi_conn_start(ddb_entry->conn); 5101 iscsi_conn_login_event(ddb_entry->conn, 5102 ISCSI_CONN_STATE_LOGGED_IN); 5103 } else { 5104 ql4_printk(KERN_INFO, ha, 5105 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", 5106 ha->host_no, __func__, ddb_entry->fw_ddb_index, 5107 cls_session->sid); 5108 status = QLA_ERROR; 5109 } 5110 5111 return status; 5112 } 5113 5114 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 5115 { 5116 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); 5117 } 5118 5119 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 5120 { 5121 uint16_t relogin_timer; 5122 struct iscsi_session *sess; 5123 struct ddb_entry *ddb_entry; 5124 struct scsi_qla_host *ha; 5125 5126 sess = cls_sess->dd_data; 5127 ddb_entry = sess->dd_data; 5128 ha = ddb_entry->ha; 5129 5130 relogin_timer = max(ddb_entry->default_relogin_timeout, 5131 (uint16_t)RELOGIN_TOV); 5132 atomic_set(&ddb_entry->relogin_timer, relogin_timer); 5133 5134 DEBUG2(ql4_printk(KERN_INFO, ha, 5135 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, 5136 ddb_entry->fw_ddb_index, relogin_timer)); 5137 5138 qla4xxx_login_flash_ddb(cls_sess); 5139 } 5140 5141 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) 5142 { 5143 struct iscsi_session *sess; 5144 struct ddb_entry *ddb_entry; 5145 struct scsi_qla_host *ha; 5146 5147 sess = cls_sess->dd_data; 5148 ddb_entry = sess->dd_data; 5149 ha = ddb_entry->ha; 5150 5151 if (!(ddb_entry->ddb_type == FLASH_DDB)) 5152 return; 5153 5154 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 5155 return; 5156 5157 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && 5158 !iscsi_is_session_online(cls_sess)) { 5159 DEBUG2(ql4_printk(KERN_INFO, ha, 5160 "relogin issued\n")); 5161 qla4xxx_relogin_flash_ddb(cls_sess); 5162 } 5163 } 5164 5165 void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 5166 { 5167 if (ha->dpc_thread) 5168 queue_work(ha->dpc_thread, &ha->dpc_work); 5169 } 5170 5171 static struct qla4_work_evt * 5172 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, 5173 enum qla4_work_type type) 5174 { 5175 struct qla4_work_evt *e; 5176 uint32_t size = sizeof(struct qla4_work_evt) + data_size; 5177 5178 e = kzalloc(size, GFP_ATOMIC); 5179 if (!e) 5180 return NULL; 5181 5182 INIT_LIST_HEAD(&e->list); 5183 e->type = type; 5184 return e; 5185 } 5186 5187 static void qla4xxx_post_work(struct scsi_qla_host *ha, 5188 struct qla4_work_evt *e) 5189 { 5190 unsigned long flags; 5191 5192 spin_lock_irqsave(&ha->work_lock, flags); 5193 list_add_tail(&e->list, &ha->work_list); 5194 spin_unlock_irqrestore(&ha->work_lock, flags); 5195 qla4xxx_wake_dpc(ha); 5196 } 5197 5198 int qla4xxx_post_aen_work(struct scsi_qla_host *ha, 5199 enum iscsi_host_event_code aen_code, 5200 uint32_t data_size, uint8_t *data) 5201 { 5202 struct qla4_work_evt *e; 5203 5204 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); 5205 if (!e) 5206 return QLA_ERROR; 5207 5208 e->u.aen.code = aen_code; 5209 e->u.aen.data_size = data_size; 5210 memcpy(e->u.aen.data, data, data_size); 5211 5212 qla4xxx_post_work(ha, e); 5213 5214 return QLA_SUCCESS; 5215 } 5216 5217 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, 5218 uint32_t status, uint32_t pid, 5219 uint32_t data_size, uint8_t *data) 5220 { 5221 struct qla4_work_evt *e; 5222 5223 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); 5224 if (!e) 5225 return QLA_ERROR; 5226 5227 e->u.ping.status = status; 5228 e->u.ping.pid = pid; 5229 e->u.ping.data_size = data_size; 5230 memcpy(e->u.ping.data, data, data_size); 5231 5232 qla4xxx_post_work(ha, e); 5233 5234 return QLA_SUCCESS; 5235 } 5236 5237 static void qla4xxx_do_work(struct scsi_qla_host *ha) 5238 { 5239 struct qla4_work_evt *e, *tmp; 5240 unsigned long flags; 5241 LIST_HEAD(work); 5242 5243 spin_lock_irqsave(&ha->work_lock, flags); 5244 list_splice_init(&ha->work_list, &work); 5245 spin_unlock_irqrestore(&ha->work_lock, flags); 5246 5247 list_for_each_entry_safe(e, tmp, &work, list) { 5248 list_del_init(&e->list); 5249 5250 switch (e->type) { 5251 case QLA4_EVENT_AEN: 5252 iscsi_post_host_event(ha->host_no, 5253 &qla4xxx_iscsi_transport, 5254 e->u.aen.code, 5255 e->u.aen.data_size, 5256 e->u.aen.data); 5257 break; 5258 case QLA4_EVENT_PING_STATUS: 5259 iscsi_ping_comp_event(ha->host_no, 5260 &qla4xxx_iscsi_transport, 5261 e->u.ping.status, 5262 e->u.ping.pid, 5263 e->u.ping.data_size, 5264 e->u.ping.data); 5265 break; 5266 default: 5267 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " 5268 "supported", e->type); 5269 } 5270 kfree(e); 5271 } 5272 } 5273 5274 /** 5275 * qla4xxx_do_dpc - dpc routine 5276 * @data: in our case pointer to adapter structure 5277 * 5278 * This routine is a task that is schedule by the interrupt handler 5279 * to perform the background processing for interrupts. We put it 5280 * on a task queue that is consumed whenever the scheduler runs; that's 5281 * so you can do anything (i.e. put the process to sleep etc). In fact, 5282 * the mid-level tries to sleep when it reaches the driver threshold 5283 * "host->can_queue". This can cause a panic if we were in our interrupt code. 5284 **/ 5285 static void qla4xxx_do_dpc(struct work_struct *work) 5286 { 5287 struct scsi_qla_host *ha = 5288 container_of(work, struct scsi_qla_host, dpc_work); 5289 int status = QLA_ERROR; 5290 5291 DEBUG2(ql4_printk(KERN_INFO, ha, 5292 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", 5293 ha->host_no, __func__, ha->flags, ha->dpc_flags)); 5294 5295 /* Initialization not yet finished. Don't do anything yet. */ 5296 if (!test_bit(AF_INIT_DONE, &ha->flags)) 5297 return; 5298 5299 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 5300 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 5301 ha->host_no, __func__, ha->flags)); 5302 return; 5303 } 5304 5305 /* post events to application */ 5306 qla4xxx_do_work(ha); 5307 5308 if (is_qla80XX(ha)) { 5309 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 5310 if (is_qla8032(ha) || is_qla8042(ha)) { 5311 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 5312 __func__); 5313 /* disable pause frame for ISP83xx */ 5314 qla4_83xx_disable_pause(ha); 5315 } 5316 5317 ha->isp_ops->idc_lock(ha); 5318 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 5319 QLA8XXX_DEV_FAILED); 5320 ha->isp_ops->idc_unlock(ha); 5321 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 5322 qla4_8xxx_device_state_handler(ha); 5323 } 5324 5325 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { 5326 if (is_qla8042(ha)) { 5327 if (ha->idc_info.info2 & 5328 ENABLE_INTERNAL_LOOPBACK) { 5329 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", 5330 __func__); 5331 status = qla4_84xx_config_acb(ha, 5332 ACB_CONFIG_DISABLE); 5333 if (status != QLA_SUCCESS) { 5334 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", 5335 __func__); 5336 } 5337 } 5338 } 5339 qla4_83xx_post_idc_ack(ha); 5340 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); 5341 } 5342 5343 if (is_qla8042(ha) && 5344 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { 5345 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", 5346 __func__); 5347 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != 5348 QLA_SUCCESS) { 5349 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", 5350 __func__); 5351 } 5352 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); 5353 } 5354 5355 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 5356 qla4_8xxx_need_qsnt_handler(ha); 5357 } 5358 } 5359 5360 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && 5361 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 5362 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 5363 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 5364 if ((is_qla8022(ha) && ql4xdontresethba) || 5365 ((is_qla8032(ha) || is_qla8042(ha)) && 5366 qla4_83xx_idc_dontreset(ha))) { 5367 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5368 ha->host_no, __func__)); 5369 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 5370 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5371 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5372 goto dpc_post_reset_ha; 5373 } 5374 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || 5375 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 5376 qla4xxx_recover_adapter(ha); 5377 5378 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 5379 uint8_t wait_time = RESET_INTR_TOV; 5380 5381 while ((readw(&ha->reg->ctrl_status) & 5382 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { 5383 if (--wait_time == 0) 5384 break; 5385 msleep(1000); 5386 } 5387 if (wait_time == 0) 5388 DEBUG2(printk("scsi%ld: %s: SR|FSR " 5389 "bit not cleared-- resetting\n", 5390 ha->host_no, __func__)); 5391 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 5392 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { 5393 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 5394 status = qla4xxx_recover_adapter(ha); 5395 } 5396 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 5397 if (status == QLA_SUCCESS) 5398 ha->isp_ops->enable_intrs(ha); 5399 } 5400 } 5401 5402 dpc_post_reset_ha: 5403 /* ---- process AEN? --- */ 5404 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) 5405 qla4xxx_process_aen(ha, PROCESS_ALL_AENS); 5406 5407 /* ---- Get DHCP IP Address? --- */ 5408 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 5409 qla4xxx_get_dhcp_ip_address(ha); 5410 5411 /* ---- relogin device? --- */ 5412 if (adapter_up(ha) && 5413 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { 5414 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); 5415 } 5416 5417 /* ---- link change? --- */ 5418 if (!test_bit(AF_LOOPBACK, &ha->flags) && 5419 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 5420 if (!test_bit(AF_LINK_UP, &ha->flags)) { 5421 /* ---- link down? --- */ 5422 qla4xxx_mark_all_devices_missing(ha); 5423 } else { 5424 /* ---- link up? --- * 5425 * F/W will auto login to all devices ONLY ONCE after 5426 * link up during driver initialization and runtime 5427 * fatal error recovery. Therefore, the driver must 5428 * manually relogin to devices when recovering from 5429 * connection failures, logouts, expired KATO, etc. */ 5430 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { 5431 qla4xxx_build_ddb_list(ha, ha->is_reset); 5432 iscsi_host_for_each_session(ha->host, 5433 qla4xxx_login_flash_ddb); 5434 } else 5435 qla4xxx_relogin_all_devices(ha); 5436 } 5437 } 5438 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { 5439 if (qla4xxx_sysfs_ddb_export(ha)) 5440 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", 5441 __func__); 5442 } 5443 } 5444 5445 /** 5446 * qla4xxx_free_adapter - release the adapter 5447 * @ha: pointer to adapter structure 5448 **/ 5449 static void qla4xxx_free_adapter(struct scsi_qla_host *ha) 5450 { 5451 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 5452 5453 /* Turn-off interrupts on the card. */ 5454 ha->isp_ops->disable_intrs(ha); 5455 5456 if (is_qla40XX(ha)) { 5457 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 5458 &ha->reg->ctrl_status); 5459 readl(&ha->reg->ctrl_status); 5460 } else if (is_qla8022(ha)) { 5461 writel(0, &ha->qla4_82xx_reg->host_int); 5462 readl(&ha->qla4_82xx_reg->host_int); 5463 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5464 writel(0, &ha->qla4_83xx_reg->risc_intr); 5465 readl(&ha->qla4_83xx_reg->risc_intr); 5466 } 5467 5468 /* Remove timer thread, if present */ 5469 if (ha->timer_active) 5470 qla4xxx_stop_timer(ha); 5471 5472 /* Kill the kernel thread for this host */ 5473 if (ha->dpc_thread) 5474 destroy_workqueue(ha->dpc_thread); 5475 5476 /* Kill the kernel thread for this host */ 5477 if (ha->task_wq) 5478 destroy_workqueue(ha->task_wq); 5479 5480 /* Put firmware in known state */ 5481 ha->isp_ops->reset_firmware(ha); 5482 5483 if (is_qla80XX(ha)) { 5484 ha->isp_ops->idc_lock(ha); 5485 qla4_8xxx_clear_drv_active(ha); 5486 ha->isp_ops->idc_unlock(ha); 5487 } 5488 5489 /* Detach interrupts */ 5490 qla4xxx_free_irqs(ha); 5491 5492 /* free extra memory */ 5493 qla4xxx_mem_free(ha); 5494 } 5495 5496 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) 5497 { 5498 int status = 0; 5499 unsigned long mem_base, mem_len, db_base, db_len; 5500 struct pci_dev *pdev = ha->pdev; 5501 5502 status = pci_request_regions(pdev, DRIVER_NAME); 5503 if (status) { 5504 printk(KERN_WARNING 5505 "scsi(%ld) Failed to reserve PIO regions (%s) " 5506 "status=%d\n", ha->host_no, pci_name(pdev), status); 5507 goto iospace_error_exit; 5508 } 5509 5510 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", 5511 __func__, pdev->revision)); 5512 ha->revision_id = pdev->revision; 5513 5514 /* remap phys address */ 5515 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 5516 mem_len = pci_resource_len(pdev, 0); 5517 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", 5518 __func__, mem_base, mem_len)); 5519 5520 /* mapping of pcibase pointer */ 5521 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); 5522 if (!ha->nx_pcibase) { 5523 printk(KERN_ERR 5524 "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); 5525 pci_release_regions(ha->pdev); 5526 goto iospace_error_exit; 5527 } 5528 5529 /* Mapping of IO base pointer, door bell read and write pointer */ 5530 5531 /* mapping of IO base pointer */ 5532 if (is_qla8022(ha)) { 5533 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) 5534 ((uint8_t *)ha->nx_pcibase + 0xbc000 + 5535 (ha->pdev->devfn << 11)); 5536 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 5537 QLA82XX_CAM_RAM_DB2); 5538 } else if (is_qla8032(ha) || is_qla8042(ha)) { 5539 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) 5540 ((uint8_t *)ha->nx_pcibase); 5541 } 5542 5543 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 5544 db_len = pci_resource_len(pdev, 4); 5545 5546 return 0; 5547 iospace_error_exit: 5548 return -ENOMEM; 5549 } 5550 5551 /*** 5552 * qla4xxx_iospace_config - maps registers 5553 * @ha: pointer to adapter structure 5554 * 5555 * This routines maps HBA's registers from the pci address space 5556 * into the kernel virtual address space for memory mapped i/o. 5557 **/ 5558 int qla4xxx_iospace_config(struct scsi_qla_host *ha) 5559 { 5560 unsigned long pio, pio_len, pio_flags; 5561 unsigned long mmio, mmio_len, mmio_flags; 5562 5563 pio = pci_resource_start(ha->pdev, 0); 5564 pio_len = pci_resource_len(ha->pdev, 0); 5565 pio_flags = pci_resource_flags(ha->pdev, 0); 5566 if (pio_flags & IORESOURCE_IO) { 5567 if (pio_len < MIN_IOBASE_LEN) { 5568 ql4_printk(KERN_WARNING, ha, 5569 "Invalid PCI I/O region size\n"); 5570 pio = 0; 5571 } 5572 } else { 5573 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); 5574 pio = 0; 5575 } 5576 5577 /* Use MMIO operations for all accesses. */ 5578 mmio = pci_resource_start(ha->pdev, 1); 5579 mmio_len = pci_resource_len(ha->pdev, 1); 5580 mmio_flags = pci_resource_flags(ha->pdev, 1); 5581 5582 if (!(mmio_flags & IORESOURCE_MEM)) { 5583 ql4_printk(KERN_ERR, ha, 5584 "region #0 not an MMIO resource, aborting\n"); 5585 5586 goto iospace_error_exit; 5587 } 5588 5589 if (mmio_len < MIN_IOBASE_LEN) { 5590 ql4_printk(KERN_ERR, ha, 5591 "Invalid PCI mem region size, aborting\n"); 5592 goto iospace_error_exit; 5593 } 5594 5595 if (pci_request_regions(ha->pdev, DRIVER_NAME)) { 5596 ql4_printk(KERN_WARNING, ha, 5597 "Failed to reserve PIO/MMIO regions\n"); 5598 5599 goto iospace_error_exit; 5600 } 5601 5602 ha->pio_address = pio; 5603 ha->pio_length = pio_len; 5604 ha->reg = ioremap(mmio, MIN_IOBASE_LEN); 5605 if (!ha->reg) { 5606 ql4_printk(KERN_ERR, ha, 5607 "cannot remap MMIO, aborting\n"); 5608 5609 goto iospace_error_exit; 5610 } 5611 5612 return 0; 5613 5614 iospace_error_exit: 5615 return -ENOMEM; 5616 } 5617 5618 static struct isp_operations qla4xxx_isp_ops = { 5619 .iospace_config = qla4xxx_iospace_config, 5620 .pci_config = qla4xxx_pci_config, 5621 .disable_intrs = qla4xxx_disable_intrs, 5622 .enable_intrs = qla4xxx_enable_intrs, 5623 .start_firmware = qla4xxx_start_firmware, 5624 .intr_handler = qla4xxx_intr_handler, 5625 .interrupt_service_routine = qla4xxx_interrupt_service_routine, 5626 .reset_chip = qla4xxx_soft_reset, 5627 .reset_firmware = qla4xxx_hw_reset, 5628 .queue_iocb = qla4xxx_queue_iocb, 5629 .complete_iocb = qla4xxx_complete_iocb, 5630 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5631 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5632 .get_sys_info = qla4xxx_get_sys_info, 5633 .queue_mailbox_command = qla4xxx_queue_mbox_cmd, 5634 .process_mailbox_interrupt = qla4xxx_process_mbox_intr, 5635 }; 5636 5637 static struct isp_operations qla4_82xx_isp_ops = { 5638 .iospace_config = qla4_8xxx_iospace_config, 5639 .pci_config = qla4_8xxx_pci_config, 5640 .disable_intrs = qla4_82xx_disable_intrs, 5641 .enable_intrs = qla4_82xx_enable_intrs, 5642 .start_firmware = qla4_8xxx_load_risc, 5643 .restart_firmware = qla4_82xx_try_start_fw, 5644 .intr_handler = qla4_82xx_intr_handler, 5645 .interrupt_service_routine = qla4_82xx_interrupt_service_routine, 5646 .need_reset = qla4_8xxx_need_reset, 5647 .reset_chip = qla4_82xx_isp_reset, 5648 .reset_firmware = qla4_8xxx_stop_firmware, 5649 .queue_iocb = qla4_82xx_queue_iocb, 5650 .complete_iocb = qla4_82xx_complete_iocb, 5651 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, 5652 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, 5653 .get_sys_info = qla4_8xxx_get_sys_info, 5654 .rd_reg_direct = qla4_82xx_rd_32, 5655 .wr_reg_direct = qla4_82xx_wr_32, 5656 .rd_reg_indirect = qla4_82xx_md_rd_32, 5657 .wr_reg_indirect = qla4_82xx_md_wr_32, 5658 .idc_lock = qla4_82xx_idc_lock, 5659 .idc_unlock = qla4_82xx_idc_unlock, 5660 .rom_lock_recovery = qla4_82xx_rom_lock_recovery, 5661 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, 5662 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, 5663 }; 5664 5665 static struct isp_operations qla4_83xx_isp_ops = { 5666 .iospace_config = qla4_8xxx_iospace_config, 5667 .pci_config = qla4_8xxx_pci_config, 5668 .disable_intrs = qla4_83xx_disable_intrs, 5669 .enable_intrs = qla4_83xx_enable_intrs, 5670 .start_firmware = qla4_8xxx_load_risc, 5671 .restart_firmware = qla4_83xx_start_firmware, 5672 .intr_handler = qla4_83xx_intr_handler, 5673 .interrupt_service_routine = qla4_83xx_interrupt_service_routine, 5674 .need_reset = qla4_8xxx_need_reset, 5675 .reset_chip = qla4_83xx_isp_reset, 5676 .reset_firmware = qla4_8xxx_stop_firmware, 5677 .queue_iocb = qla4_83xx_queue_iocb, 5678 .complete_iocb = qla4_83xx_complete_iocb, 5679 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 5680 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 5681 .get_sys_info = qla4_8xxx_get_sys_info, 5682 .rd_reg_direct = qla4_83xx_rd_reg, 5683 .wr_reg_direct = qla4_83xx_wr_reg, 5684 .rd_reg_indirect = qla4_83xx_rd_reg_indirect, 5685 .wr_reg_indirect = qla4_83xx_wr_reg_indirect, 5686 .idc_lock = qla4_83xx_drv_lock, 5687 .idc_unlock = qla4_83xx_drv_unlock, 5688 .rom_lock_recovery = qla4_83xx_rom_lock_recovery, 5689 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, 5690 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, 5691 }; 5692 5693 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5694 { 5695 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 5696 } 5697 5698 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 5699 { 5700 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); 5701 } 5702 5703 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5704 { 5705 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 5706 } 5707 5708 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 5709 { 5710 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); 5711 } 5712 5713 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 5714 { 5715 struct scsi_qla_host *ha = data; 5716 char *str = buf; 5717 int rc; 5718 5719 switch (type) { 5720 case ISCSI_BOOT_ETH_FLAGS: 5721 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5722 break; 5723 case ISCSI_BOOT_ETH_INDEX: 5724 rc = sprintf(str, "0\n"); 5725 break; 5726 case ISCSI_BOOT_ETH_MAC: 5727 rc = sysfs_format_mac(str, ha->my_mac, 5728 MAC_ADDR_LEN); 5729 break; 5730 default: 5731 rc = -ENOSYS; 5732 break; 5733 } 5734 return rc; 5735 } 5736 5737 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) 5738 { 5739 int rc; 5740 5741 switch (type) { 5742 case ISCSI_BOOT_ETH_FLAGS: 5743 case ISCSI_BOOT_ETH_MAC: 5744 case ISCSI_BOOT_ETH_INDEX: 5745 rc = S_IRUGO; 5746 break; 5747 default: 5748 rc = 0; 5749 break; 5750 } 5751 return rc; 5752 } 5753 5754 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) 5755 { 5756 struct scsi_qla_host *ha = data; 5757 char *str = buf; 5758 int rc; 5759 5760 switch (type) { 5761 case ISCSI_BOOT_INI_INITIATOR_NAME: 5762 rc = sprintf(str, "%s\n", ha->name_string); 5763 break; 5764 default: 5765 rc = -ENOSYS; 5766 break; 5767 } 5768 return rc; 5769 } 5770 5771 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) 5772 { 5773 int rc; 5774 5775 switch (type) { 5776 case ISCSI_BOOT_INI_INITIATOR_NAME: 5777 rc = S_IRUGO; 5778 break; 5779 default: 5780 rc = 0; 5781 break; 5782 } 5783 return rc; 5784 } 5785 5786 static ssize_t 5787 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, 5788 char *buf) 5789 { 5790 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 5791 char *str = buf; 5792 int rc; 5793 5794 switch (type) { 5795 case ISCSI_BOOT_TGT_NAME: 5796 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); 5797 break; 5798 case ISCSI_BOOT_TGT_IP_ADDR: 5799 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) 5800 rc = sprintf(buf, "%pI4\n", 5801 &boot_conn->dest_ipaddr.ip_address); 5802 else 5803 rc = sprintf(str, "%pI6\n", 5804 &boot_conn->dest_ipaddr.ip_address); 5805 break; 5806 case ISCSI_BOOT_TGT_PORT: 5807 rc = sprintf(str, "%d\n", boot_conn->dest_port); 5808 break; 5809 case ISCSI_BOOT_TGT_CHAP_NAME: 5810 rc = sprintf(str, "%.*s\n", 5811 boot_conn->chap.target_chap_name_length, 5812 (char *)&boot_conn->chap.target_chap_name); 5813 break; 5814 case ISCSI_BOOT_TGT_CHAP_SECRET: 5815 rc = sprintf(str, "%.*s\n", 5816 boot_conn->chap.target_secret_length, 5817 (char *)&boot_conn->chap.target_secret); 5818 break; 5819 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5820 rc = sprintf(str, "%.*s\n", 5821 boot_conn->chap.intr_chap_name_length, 5822 (char *)&boot_conn->chap.intr_chap_name); 5823 break; 5824 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5825 rc = sprintf(str, "%.*s\n", 5826 boot_conn->chap.intr_secret_length, 5827 (char *)&boot_conn->chap.intr_secret); 5828 break; 5829 case ISCSI_BOOT_TGT_FLAGS: 5830 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); 5831 break; 5832 case ISCSI_BOOT_TGT_NIC_ASSOC: 5833 rc = sprintf(str, "0\n"); 5834 break; 5835 default: 5836 rc = -ENOSYS; 5837 break; 5838 } 5839 return rc; 5840 } 5841 5842 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) 5843 { 5844 struct scsi_qla_host *ha = data; 5845 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); 5846 5847 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5848 } 5849 5850 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) 5851 { 5852 struct scsi_qla_host *ha = data; 5853 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); 5854 5855 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); 5856 } 5857 5858 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) 5859 { 5860 int rc; 5861 5862 switch (type) { 5863 case ISCSI_BOOT_TGT_NAME: 5864 case ISCSI_BOOT_TGT_IP_ADDR: 5865 case ISCSI_BOOT_TGT_PORT: 5866 case ISCSI_BOOT_TGT_CHAP_NAME: 5867 case ISCSI_BOOT_TGT_CHAP_SECRET: 5868 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5869 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5870 case ISCSI_BOOT_TGT_NIC_ASSOC: 5871 case ISCSI_BOOT_TGT_FLAGS: 5872 rc = S_IRUGO; 5873 break; 5874 default: 5875 rc = 0; 5876 break; 5877 } 5878 return rc; 5879 } 5880 5881 static void qla4xxx_boot_release(void *data) 5882 { 5883 struct scsi_qla_host *ha = data; 5884 5885 scsi_host_put(ha->host); 5886 } 5887 5888 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) 5889 { 5890 dma_addr_t buf_dma; 5891 uint32_t addr, pri_addr, sec_addr; 5892 uint32_t offset; 5893 uint16_t func_num; 5894 uint8_t val; 5895 uint8_t *buf = NULL; 5896 size_t size = 13 * sizeof(uint8_t); 5897 int ret = QLA_SUCCESS; 5898 5899 func_num = PCI_FUNC(ha->pdev->devfn); 5900 5901 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", 5902 __func__, ha->pdev->device, func_num); 5903 5904 if (is_qla40XX(ha)) { 5905 if (func_num == 1) { 5906 addr = NVRAM_PORT0_BOOT_MODE; 5907 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; 5908 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; 5909 } else if (func_num == 3) { 5910 addr = NVRAM_PORT1_BOOT_MODE; 5911 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; 5912 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; 5913 } else { 5914 ret = QLA_ERROR; 5915 goto exit_boot_info; 5916 } 5917 5918 /* Check Boot Mode */ 5919 val = rd_nvram_byte(ha, addr); 5920 if (!(val & 0x07)) { 5921 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " 5922 "options : 0x%x\n", __func__, val)); 5923 ret = QLA_ERROR; 5924 goto exit_boot_info; 5925 } 5926 5927 /* get primary valid target index */ 5928 val = rd_nvram_byte(ha, pri_addr); 5929 if (val & BIT_7) 5930 ddb_index[0] = (val & 0x7f); 5931 5932 /* get secondary valid target index */ 5933 val = rd_nvram_byte(ha, sec_addr); 5934 if (val & BIT_7) 5935 ddb_index[1] = (val & 0x7f); 5936 5937 } else if (is_qla80XX(ha)) { 5938 buf = dma_alloc_coherent(&ha->pdev->dev, size, 5939 &buf_dma, GFP_KERNEL); 5940 if (!buf) { 5941 DEBUG2(ql4_printk(KERN_ERR, ha, 5942 "%s: Unable to allocate dma buffer\n", 5943 __func__)); 5944 ret = QLA_ERROR; 5945 goto exit_boot_info; 5946 } 5947 5948 if (ha->port_num == 0) 5949 offset = BOOT_PARAM_OFFSET_PORT0; 5950 else if (ha->port_num == 1) 5951 offset = BOOT_PARAM_OFFSET_PORT1; 5952 else { 5953 ret = QLA_ERROR; 5954 goto exit_boot_info_free; 5955 } 5956 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + 5957 offset; 5958 if (qla4xxx_get_flash(ha, buf_dma, addr, 5959 13 * sizeof(uint8_t)) != QLA_SUCCESS) { 5960 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" 5961 " failed\n", ha->host_no, __func__)); 5962 ret = QLA_ERROR; 5963 goto exit_boot_info_free; 5964 } 5965 /* Check Boot Mode */ 5966 if (!(buf[1] & 0x07)) { 5967 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" 5968 " : 0x%x\n", buf[1])); 5969 ret = QLA_ERROR; 5970 goto exit_boot_info_free; 5971 } 5972 5973 /* get primary valid target index */ 5974 if (buf[2] & BIT_7) 5975 ddb_index[0] = buf[2] & 0x7f; 5976 5977 /* get secondary valid target index */ 5978 if (buf[11] & BIT_7) 5979 ddb_index[1] = buf[11] & 0x7f; 5980 } else { 5981 ret = QLA_ERROR; 5982 goto exit_boot_info; 5983 } 5984 5985 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" 5986 " target ID %d\n", __func__, ddb_index[0], 5987 ddb_index[1])); 5988 5989 exit_boot_info_free: 5990 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 5991 exit_boot_info: 5992 ha->pri_ddb_idx = ddb_index[0]; 5993 ha->sec_ddb_idx = ddb_index[1]; 5994 return ret; 5995 } 5996 5997 /** 5998 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password 5999 * @ha: pointer to adapter structure 6000 * @username: CHAP username to be returned 6001 * @password: CHAP password to be returned 6002 * 6003 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP 6004 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. 6005 * So from the CHAP cache find the first BIDI CHAP entry and set it 6006 * to the boot record in sysfs. 6007 **/ 6008 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, 6009 char *password) 6010 { 6011 int i, ret = -EINVAL; 6012 int max_chap_entries = 0; 6013 struct ql4_chap_table *chap_table; 6014 6015 if (is_qla80XX(ha)) 6016 max_chap_entries = (ha->hw.flt_chap_size / 2) / 6017 sizeof(struct ql4_chap_table); 6018 else 6019 max_chap_entries = MAX_CHAP_ENTRIES_40XX; 6020 6021 if (!ha->chap_list) { 6022 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); 6023 return ret; 6024 } 6025 6026 mutex_lock(&ha->chap_sem); 6027 for (i = 0; i < max_chap_entries; i++) { 6028 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 6029 if (chap_table->cookie != 6030 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { 6031 continue; 6032 } 6033 6034 if (chap_table->flags & BIT_7) /* local */ 6035 continue; 6036 6037 if (!(chap_table->flags & BIT_6)) /* Not BIDI */ 6038 continue; 6039 6040 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); 6041 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); 6042 ret = 0; 6043 break; 6044 } 6045 mutex_unlock(&ha->chap_sem); 6046 6047 return ret; 6048 } 6049 6050 6051 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, 6052 struct ql4_boot_session_info *boot_sess, 6053 uint16_t ddb_index) 6054 { 6055 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; 6056 struct dev_db_entry *fw_ddb_entry; 6057 dma_addr_t fw_ddb_entry_dma; 6058 uint16_t idx; 6059 uint16_t options; 6060 int ret = QLA_SUCCESS; 6061 6062 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6063 &fw_ddb_entry_dma, GFP_KERNEL); 6064 if (!fw_ddb_entry) { 6065 DEBUG2(ql4_printk(KERN_ERR, ha, 6066 "%s: Unable to allocate dma buffer.\n", 6067 __func__)); 6068 ret = QLA_ERROR; 6069 return ret; 6070 } 6071 6072 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, 6073 fw_ddb_entry_dma, ddb_index)) { 6074 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " 6075 "index [%d]\n", __func__, ddb_index)); 6076 ret = QLA_ERROR; 6077 goto exit_boot_target; 6078 } 6079 6080 /* Update target name and IP from DDB */ 6081 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, 6082 min(sizeof(boot_sess->target_name), 6083 sizeof(fw_ddb_entry->iscsi_name))); 6084 6085 options = le16_to_cpu(fw_ddb_entry->options); 6086 if (options & DDB_OPT_IPV6_DEVICE) { 6087 memcpy(&boot_conn->dest_ipaddr.ip_address, 6088 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); 6089 } else { 6090 boot_conn->dest_ipaddr.ip_type = 0x1; 6091 memcpy(&boot_conn->dest_ipaddr.ip_address, 6092 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); 6093 } 6094 6095 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); 6096 6097 /* update chap information */ 6098 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 6099 6100 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6101 6102 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); 6103 6104 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. 6105 target_chap_name, 6106 (char *)&boot_conn->chap.target_secret, 6107 idx); 6108 if (ret) { 6109 ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); 6110 ret = QLA_ERROR; 6111 goto exit_boot_target; 6112 } 6113 6114 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6115 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6116 } 6117 6118 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { 6119 6120 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); 6121 6122 ret = qla4xxx_get_bidi_chap(ha, 6123 (char *)&boot_conn->chap.intr_chap_name, 6124 (char *)&boot_conn->chap.intr_secret); 6125 6126 if (ret) { 6127 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); 6128 ret = QLA_ERROR; 6129 goto exit_boot_target; 6130 } 6131 6132 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; 6133 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; 6134 } 6135 6136 exit_boot_target: 6137 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6138 fw_ddb_entry, fw_ddb_entry_dma); 6139 return ret; 6140 } 6141 6142 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) 6143 { 6144 uint16_t ddb_index[2]; 6145 int ret = QLA_ERROR; 6146 int rval; 6147 6148 memset(ddb_index, 0, sizeof(ddb_index)); 6149 ddb_index[0] = 0xffff; 6150 ddb_index[1] = 0xffff; 6151 ret = get_fw_boot_info(ha, ddb_index); 6152 if (ret != QLA_SUCCESS) { 6153 DEBUG2(ql4_printk(KERN_INFO, ha, 6154 "%s: No boot target configured.\n", __func__)); 6155 return ret; 6156 } 6157 6158 if (ql4xdisablesysfsboot) 6159 return QLA_SUCCESS; 6160 6161 if (ddb_index[0] == 0xffff) 6162 goto sec_target; 6163 6164 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), 6165 ddb_index[0]); 6166 if (rval != QLA_SUCCESS) { 6167 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " 6168 "configured\n", __func__)); 6169 } else 6170 ret = QLA_SUCCESS; 6171 6172 sec_target: 6173 if (ddb_index[1] == 0xffff) 6174 goto exit_get_boot_info; 6175 6176 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), 6177 ddb_index[1]); 6178 if (rval != QLA_SUCCESS) { 6179 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" 6180 " configured\n", __func__)); 6181 } else 6182 ret = QLA_SUCCESS; 6183 6184 exit_get_boot_info: 6185 return ret; 6186 } 6187 6188 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) 6189 { 6190 struct iscsi_boot_kobj *boot_kobj; 6191 6192 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) 6193 return QLA_ERROR; 6194 6195 if (ql4xdisablesysfsboot) { 6196 ql4_printk(KERN_INFO, ha, 6197 "%s: syfsboot disabled - driver will trigger login " 6198 "and publish session for discovery .\n", __func__); 6199 return QLA_SUCCESS; 6200 } 6201 6202 6203 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); 6204 if (!ha->boot_kset) 6205 goto kset_free; 6206 6207 if (!scsi_host_get(ha->host)) 6208 goto kset_free; 6209 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, 6210 qla4xxx_show_boot_tgt_pri_info, 6211 qla4xxx_tgt_get_attr_visibility, 6212 qla4xxx_boot_release); 6213 if (!boot_kobj) 6214 goto put_host; 6215 6216 if (!scsi_host_get(ha->host)) 6217 goto kset_free; 6218 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, 6219 qla4xxx_show_boot_tgt_sec_info, 6220 qla4xxx_tgt_get_attr_visibility, 6221 qla4xxx_boot_release); 6222 if (!boot_kobj) 6223 goto put_host; 6224 6225 if (!scsi_host_get(ha->host)) 6226 goto kset_free; 6227 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, 6228 qla4xxx_show_boot_ini_info, 6229 qla4xxx_ini_get_attr_visibility, 6230 qla4xxx_boot_release); 6231 if (!boot_kobj) 6232 goto put_host; 6233 6234 if (!scsi_host_get(ha->host)) 6235 goto kset_free; 6236 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, 6237 qla4xxx_show_boot_eth_info, 6238 qla4xxx_eth_get_attr_visibility, 6239 qla4xxx_boot_release); 6240 if (!boot_kobj) 6241 goto put_host; 6242 6243 return QLA_SUCCESS; 6244 6245 put_host: 6246 scsi_host_put(ha->host); 6247 kset_free: 6248 iscsi_boot_destroy_kset(ha->boot_kset); 6249 return -ENOMEM; 6250 } 6251 6252 6253 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, 6254 struct ql4_tuple_ddb *tddb) 6255 { 6256 struct scsi_qla_host *ha; 6257 struct iscsi_cls_session *cls_sess; 6258 struct iscsi_cls_conn *cls_conn; 6259 struct iscsi_session *sess; 6260 struct iscsi_conn *conn; 6261 6262 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 6263 ha = ddb_entry->ha; 6264 cls_sess = ddb_entry->sess; 6265 sess = cls_sess->dd_data; 6266 cls_conn = ddb_entry->conn; 6267 conn = cls_conn->dd_data; 6268 6269 tddb->tpgt = sess->tpgt; 6270 tddb->port = conn->persistent_port; 6271 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); 6272 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); 6273 } 6274 6275 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, 6276 struct ql4_tuple_ddb *tddb, 6277 uint8_t *flash_isid) 6278 { 6279 uint16_t options = 0; 6280 6281 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 6282 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], 6283 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); 6284 6285 options = le16_to_cpu(fw_ddb_entry->options); 6286 if (options & DDB_OPT_IPV6_DEVICE) 6287 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); 6288 else 6289 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 6290 6291 tddb->port = le16_to_cpu(fw_ddb_entry->port); 6292 6293 if (flash_isid == NULL) 6294 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], 6295 sizeof(tddb->isid)); 6296 else 6297 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); 6298 } 6299 6300 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 6301 struct ql4_tuple_ddb *old_tddb, 6302 struct ql4_tuple_ddb *new_tddb, 6303 uint8_t is_isid_compare) 6304 { 6305 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6306 return QLA_ERROR; 6307 6308 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) 6309 return QLA_ERROR; 6310 6311 if (old_tddb->port != new_tddb->port) 6312 return QLA_ERROR; 6313 6314 /* For multi sessions, driver generates the ISID, so do not compare 6315 * ISID in reset path since it would be a comparison between the 6316 * driver generated ISID and firmware generated ISID. This could 6317 * lead to adding duplicated DDBs in the list as driver generated 6318 * ISID would not match firmware generated ISID. 6319 */ 6320 if (is_isid_compare) { 6321 DEBUG2(ql4_printk(KERN_INFO, ha, 6322 "%s: old ISID [%pmR] New ISID [%pmR]\n", 6323 __func__, old_tddb->isid, new_tddb->isid)); 6324 6325 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6326 sizeof(old_tddb->isid))) 6327 return QLA_ERROR; 6328 } 6329 6330 DEBUG2(ql4_printk(KERN_INFO, ha, 6331 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", 6332 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, 6333 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, 6334 new_tddb->ip_addr, new_tddb->iscsi_name)); 6335 6336 return QLA_SUCCESS; 6337 } 6338 6339 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, 6340 struct dev_db_entry *fw_ddb_entry, 6341 uint32_t *index) 6342 { 6343 struct ddb_entry *ddb_entry; 6344 struct ql4_tuple_ddb *fw_tddb = NULL; 6345 struct ql4_tuple_ddb *tmp_tddb = NULL; 6346 int idx; 6347 int ret = QLA_ERROR; 6348 6349 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6350 if (!fw_tddb) { 6351 DEBUG2(ql4_printk(KERN_WARNING, ha, 6352 "Memory Allocation failed.\n")); 6353 ret = QLA_SUCCESS; 6354 goto exit_check; 6355 } 6356 6357 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6358 if (!tmp_tddb) { 6359 DEBUG2(ql4_printk(KERN_WARNING, ha, 6360 "Memory Allocation failed.\n")); 6361 ret = QLA_SUCCESS; 6362 goto exit_check; 6363 } 6364 6365 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6366 6367 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 6368 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 6369 if (ddb_entry == NULL) 6370 continue; 6371 6372 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 6373 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { 6374 ret = QLA_SUCCESS; /* found */ 6375 if (index != NULL) 6376 *index = idx; 6377 goto exit_check; 6378 } 6379 } 6380 6381 exit_check: 6382 if (fw_tddb) 6383 vfree(fw_tddb); 6384 if (tmp_tddb) 6385 vfree(tmp_tddb); 6386 return ret; 6387 } 6388 6389 /** 6390 * qla4xxx_check_existing_isid - check if target with same isid exist 6391 * in target list 6392 * @list_nt: list of target 6393 * @isid: isid to check 6394 * 6395 * This routine return QLA_SUCCESS if target with same isid exist 6396 **/ 6397 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) 6398 { 6399 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6400 struct dev_db_entry *fw_ddb_entry; 6401 6402 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6403 fw_ddb_entry = &nt_ddb_idx->fw_ddb; 6404 6405 if (memcmp(&fw_ddb_entry->isid[0], &isid[0], 6406 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { 6407 return QLA_SUCCESS; 6408 } 6409 } 6410 return QLA_ERROR; 6411 } 6412 6413 /** 6414 * qla4xxx_update_isid - compare ddbs and updated isid 6415 * @ha: Pointer to host adapter structure. 6416 * @list_nt: list of nt target 6417 * @fw_ddb_entry: firmware ddb entry 6418 * 6419 * This routine update isid if ddbs have same iqn, same isid and 6420 * different IP addr. 6421 * Return QLA_SUCCESS if isid is updated. 6422 **/ 6423 static int qla4xxx_update_isid(struct scsi_qla_host *ha, 6424 struct list_head *list_nt, 6425 struct dev_db_entry *fw_ddb_entry) 6426 { 6427 uint8_t base_value, i; 6428 6429 base_value = fw_ddb_entry->isid[1] & 0x1f; 6430 for (i = 0; i < 8; i++) { 6431 fw_ddb_entry->isid[1] = (base_value | (i << 5)); 6432 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6433 break; 6434 } 6435 6436 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) 6437 return QLA_ERROR; 6438 6439 return QLA_SUCCESS; 6440 } 6441 6442 /** 6443 * qla4xxx_should_update_isid - check if isid need to update 6444 * @ha: Pointer to host adapter structure. 6445 * @old_tddb: ddb tuple 6446 * @new_tddb: ddb tuple 6447 * 6448 * Return QLA_SUCCESS if different IP, different PORT, same iqn, 6449 * same isid 6450 **/ 6451 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, 6452 struct ql4_tuple_ddb *old_tddb, 6453 struct ql4_tuple_ddb *new_tddb) 6454 { 6455 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { 6456 /* Same ip */ 6457 if (old_tddb->port == new_tddb->port) 6458 return QLA_ERROR; 6459 } 6460 6461 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 6462 /* different iqn */ 6463 return QLA_ERROR; 6464 6465 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], 6466 sizeof(old_tddb->isid))) 6467 /* different isid */ 6468 return QLA_ERROR; 6469 6470 return QLA_SUCCESS; 6471 } 6472 6473 /** 6474 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt 6475 * @ha: Pointer to host adapter structure. 6476 * @list_nt: list of nt target. 6477 * @fw_ddb_entry: firmware ddb entry. 6478 * 6479 * This routine check if fw_ddb_entry already exists in list_nt to avoid 6480 * duplicate ddb in list_nt. 6481 * Return QLA_SUCCESS if duplicate ddb exit in list_nl. 6482 * Note: This function also update isid of DDB if required. 6483 **/ 6484 6485 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, 6486 struct list_head *list_nt, 6487 struct dev_db_entry *fw_ddb_entry) 6488 { 6489 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 6490 struct ql4_tuple_ddb *fw_tddb = NULL; 6491 struct ql4_tuple_ddb *tmp_tddb = NULL; 6492 int rval, ret = QLA_ERROR; 6493 6494 fw_tddb = vzalloc(sizeof(*fw_tddb)); 6495 if (!fw_tddb) { 6496 DEBUG2(ql4_printk(KERN_WARNING, ha, 6497 "Memory Allocation failed.\n")); 6498 ret = QLA_SUCCESS; 6499 goto exit_check; 6500 } 6501 6502 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 6503 if (!tmp_tddb) { 6504 DEBUG2(ql4_printk(KERN_WARNING, ha, 6505 "Memory Allocation failed.\n")); 6506 ret = QLA_SUCCESS; 6507 goto exit_check; 6508 } 6509 6510 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); 6511 6512 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6513 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, 6514 nt_ddb_idx->flash_isid); 6515 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); 6516 /* found duplicate ddb */ 6517 if (ret == QLA_SUCCESS) 6518 goto exit_check; 6519 } 6520 6521 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 6522 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); 6523 6524 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); 6525 if (ret == QLA_SUCCESS) { 6526 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); 6527 if (rval == QLA_SUCCESS) 6528 ret = QLA_ERROR; 6529 else 6530 ret = QLA_SUCCESS; 6531 6532 goto exit_check; 6533 } 6534 } 6535 6536 exit_check: 6537 if (fw_tddb) 6538 vfree(fw_tddb); 6539 if (tmp_tddb) 6540 vfree(tmp_tddb); 6541 return ret; 6542 } 6543 6544 static void qla4xxx_free_ddb_list(struct list_head *list_ddb) 6545 { 6546 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6547 6548 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6549 list_del_init(&ddb_idx->list); 6550 vfree(ddb_idx); 6551 } 6552 } 6553 6554 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 6555 struct dev_db_entry *fw_ddb_entry) 6556 { 6557 struct iscsi_endpoint *ep; 6558 struct sockaddr_in *addr; 6559 struct sockaddr_in6 *addr6; 6560 struct sockaddr *t_addr; 6561 struct sockaddr_storage *dst_addr; 6562 char *ip; 6563 6564 /* TODO: need to destroy on unload iscsi_endpoint*/ 6565 dst_addr = vmalloc(sizeof(*dst_addr)); 6566 if (!dst_addr) 6567 return NULL; 6568 6569 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { 6570 t_addr = (struct sockaddr *)dst_addr; 6571 t_addr->sa_family = AF_INET6; 6572 addr6 = (struct sockaddr_in6 *)dst_addr; 6573 ip = (char *)&addr6->sin6_addr; 6574 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 6575 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6576 6577 } else { 6578 t_addr = (struct sockaddr *)dst_addr; 6579 t_addr->sa_family = AF_INET; 6580 addr = (struct sockaddr_in *)dst_addr; 6581 ip = (char *)&addr->sin_addr; 6582 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); 6583 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); 6584 } 6585 6586 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); 6587 vfree(dst_addr); 6588 return ep; 6589 } 6590 6591 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) 6592 { 6593 if (ql4xdisablesysfsboot) 6594 return QLA_SUCCESS; 6595 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) 6596 return QLA_ERROR; 6597 return QLA_SUCCESS; 6598 } 6599 6600 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 6601 struct ddb_entry *ddb_entry, 6602 uint16_t idx) 6603 { 6604 uint16_t def_timeout; 6605 6606 ddb_entry->ddb_type = FLASH_DDB; 6607 ddb_entry->fw_ddb_index = INVALID_ENTRY; 6608 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 6609 ddb_entry->ha = ha; 6610 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; 6611 ddb_entry->ddb_change = qla4xxx_flash_ddb_change; 6612 ddb_entry->chap_tbl_idx = INVALID_ENTRY; 6613 6614 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 6615 atomic_set(&ddb_entry->relogin_timer, 0); 6616 atomic_set(&ddb_entry->relogin_retry_count, 0); 6617 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 6618 ddb_entry->default_relogin_timeout = 6619 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? 6620 def_timeout : LOGIN_TOV; 6621 ddb_entry->default_time2wait = 6622 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 6623 6624 if (ql4xdisablesysfsboot && 6625 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) 6626 set_bit(DF_BOOT_TGT, &ddb_entry->flags); 6627 } 6628 6629 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) 6630 { 6631 uint32_t idx = 0; 6632 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ 6633 uint32_t sts[MBOX_REG_COUNT]; 6634 uint32_t ip_state; 6635 unsigned long wtime; 6636 int ret; 6637 6638 wtime = jiffies + (HZ * IP_CONFIG_TOV); 6639 do { 6640 for (idx = 0; idx < IP_ADDR_COUNT; idx++) { 6641 if (ip_idx[idx] == -1) 6642 continue; 6643 6644 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); 6645 6646 if (ret == QLA_ERROR) { 6647 ip_idx[idx] = -1; 6648 continue; 6649 } 6650 6651 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; 6652 6653 DEBUG2(ql4_printk(KERN_INFO, ha, 6654 "Waiting for IP state for idx = %d, state = 0x%x\n", 6655 ip_idx[idx], ip_state)); 6656 if (ip_state == IP_ADDRSTATE_UNCONFIGURED || 6657 ip_state == IP_ADDRSTATE_INVALID || 6658 ip_state == IP_ADDRSTATE_PREFERRED || 6659 ip_state == IP_ADDRSTATE_DEPRICATED || 6660 ip_state == IP_ADDRSTATE_DISABLING) 6661 ip_idx[idx] = -1; 6662 } 6663 6664 /* Break if all IP states checked */ 6665 if ((ip_idx[0] == -1) && 6666 (ip_idx[1] == -1) && 6667 (ip_idx[2] == -1) && 6668 (ip_idx[3] == -1)) 6669 break; 6670 schedule_timeout_uninterruptible(HZ); 6671 } while (time_after(wtime, jiffies)); 6672 } 6673 6674 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, 6675 struct dev_db_entry *flash_ddb_entry) 6676 { 6677 uint16_t options = 0; 6678 size_t ip_len = IP_ADDR_LEN; 6679 6680 options = le16_to_cpu(fw_ddb_entry->options); 6681 if (options & DDB_OPT_IPV6_DEVICE) 6682 ip_len = IPv6_ADDR_LEN; 6683 6684 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) 6685 return QLA_ERROR; 6686 6687 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], 6688 sizeof(fw_ddb_entry->isid))) 6689 return QLA_ERROR; 6690 6691 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, 6692 sizeof(fw_ddb_entry->port))) 6693 return QLA_ERROR; 6694 6695 return QLA_SUCCESS; 6696 } 6697 6698 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, 6699 struct dev_db_entry *fw_ddb_entry, 6700 uint32_t fw_idx, uint32_t *flash_index) 6701 { 6702 struct dev_db_entry *flash_ddb_entry; 6703 dma_addr_t flash_ddb_entry_dma; 6704 uint32_t idx = 0; 6705 int max_ddbs; 6706 int ret = QLA_ERROR, status; 6707 6708 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6709 MAX_DEV_DB_ENTRIES; 6710 6711 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6712 &flash_ddb_entry_dma); 6713 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { 6714 ql4_printk(KERN_ERR, ha, "Out of memory\n"); 6715 goto exit_find_st_idx; 6716 } 6717 6718 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6719 flash_ddb_entry_dma, fw_idx); 6720 if (status == QLA_SUCCESS) { 6721 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6722 if (status == QLA_SUCCESS) { 6723 *flash_index = fw_idx; 6724 ret = QLA_SUCCESS; 6725 goto exit_find_st_idx; 6726 } 6727 } 6728 6729 for (idx = 0; idx < max_ddbs; idx++) { 6730 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, 6731 flash_ddb_entry_dma, idx); 6732 if (status == QLA_ERROR) 6733 continue; 6734 6735 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); 6736 if (status == QLA_SUCCESS) { 6737 *flash_index = idx; 6738 ret = QLA_SUCCESS; 6739 goto exit_find_st_idx; 6740 } 6741 } 6742 6743 if (idx == max_ddbs) 6744 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", 6745 fw_idx); 6746 6747 exit_find_st_idx: 6748 if (flash_ddb_entry) 6749 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, 6750 flash_ddb_entry_dma); 6751 6752 return ret; 6753 } 6754 6755 static void qla4xxx_build_st_list(struct scsi_qla_host *ha, 6756 struct list_head *list_st) 6757 { 6758 struct qla_ddb_index *st_ddb_idx; 6759 int max_ddbs; 6760 int fw_idx_size; 6761 struct dev_db_entry *fw_ddb_entry; 6762 dma_addr_t fw_ddb_dma; 6763 int ret; 6764 uint32_t idx = 0, next_idx = 0; 6765 uint32_t state = 0, conn_err = 0; 6766 uint32_t flash_index = -1; 6767 uint16_t conn_id = 0; 6768 6769 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6770 &fw_ddb_dma); 6771 if (fw_ddb_entry == NULL) { 6772 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 6773 goto exit_st_list; 6774 } 6775 6776 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6777 MAX_DEV_DB_ENTRIES; 6778 fw_idx_size = sizeof(struct qla_ddb_index); 6779 6780 for (idx = 0; idx < max_ddbs; idx = next_idx) { 6781 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 6782 NULL, &next_idx, &state, 6783 &conn_err, NULL, &conn_id); 6784 if (ret == QLA_ERROR) 6785 break; 6786 6787 /* Ignore DDB if invalid state (unassigned) */ 6788 if (state == DDB_DS_UNASSIGNED) 6789 goto continue_next_st; 6790 6791 /* Check if ST, add to the list_st */ 6792 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 6793 goto continue_next_st; 6794 6795 st_ddb_idx = vzalloc(fw_idx_size); 6796 if (!st_ddb_idx) 6797 break; 6798 6799 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, 6800 &flash_index); 6801 if (ret == QLA_ERROR) { 6802 ql4_printk(KERN_ERR, ha, 6803 "No flash entry for ST at idx [%d]\n", idx); 6804 st_ddb_idx->flash_ddb_idx = idx; 6805 } else { 6806 ql4_printk(KERN_INFO, ha, 6807 "ST at idx [%d] is stored at flash [%d]\n", 6808 idx, flash_index); 6809 st_ddb_idx->flash_ddb_idx = flash_index; 6810 } 6811 6812 st_ddb_idx->fw_ddb_idx = idx; 6813 6814 list_add_tail(&st_ddb_idx->list, list_st); 6815 continue_next_st: 6816 if (next_idx == 0) 6817 break; 6818 } 6819 6820 exit_st_list: 6821 if (fw_ddb_entry) 6822 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 6823 } 6824 6825 /** 6826 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list 6827 * @ha: pointer to adapter structure 6828 * @list_ddb: List from which failed ddb to be removed 6829 * 6830 * Iterate over the list of DDBs and find and remove DDBs that are either in 6831 * no connection active state or failed state 6832 **/ 6833 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, 6834 struct list_head *list_ddb) 6835 { 6836 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6837 uint32_t next_idx = 0; 6838 uint32_t state = 0, conn_err = 0; 6839 int ret; 6840 6841 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6842 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, 6843 NULL, 0, NULL, &next_idx, &state, 6844 &conn_err, NULL, NULL); 6845 if (ret == QLA_ERROR) 6846 continue; 6847 6848 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 6849 state == DDB_DS_SESSION_FAILED) { 6850 list_del_init(&ddb_idx->list); 6851 vfree(ddb_idx); 6852 } 6853 } 6854 } 6855 6856 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, 6857 struct ddb_entry *ddb_entry, 6858 struct dev_db_entry *fw_ddb_entry) 6859 { 6860 struct iscsi_cls_session *cls_sess; 6861 struct iscsi_session *sess; 6862 uint32_t max_ddbs = 0; 6863 uint16_t ddb_link = -1; 6864 6865 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 6866 MAX_DEV_DB_ENTRIES; 6867 6868 cls_sess = ddb_entry->sess; 6869 sess = cls_sess->dd_data; 6870 6871 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6872 if (ddb_link < max_ddbs) 6873 sess->discovery_parent_idx = ddb_link; 6874 else 6875 sess->discovery_parent_idx = DDB_NO_LINK; 6876 } 6877 6878 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, 6879 struct dev_db_entry *fw_ddb_entry, 6880 int is_reset, uint16_t idx) 6881 { 6882 struct iscsi_cls_session *cls_sess; 6883 struct iscsi_session *sess; 6884 struct iscsi_cls_conn *cls_conn; 6885 struct iscsi_endpoint *ep; 6886 uint16_t cmds_max = 32; 6887 uint16_t conn_id = 0; 6888 uint32_t initial_cmdsn = 0; 6889 int ret = QLA_SUCCESS; 6890 6891 struct ddb_entry *ddb_entry = NULL; 6892 6893 /* Create session object, with INVALID_ENTRY, 6894 * the targer_id would get set when we issue the login 6895 */ 6896 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, 6897 cmds_max, sizeof(struct ddb_entry), 6898 sizeof(struct ql4_task_data), 6899 initial_cmdsn, INVALID_ENTRY); 6900 if (!cls_sess) { 6901 ret = QLA_ERROR; 6902 goto exit_setup; 6903 } 6904 6905 /* 6906 * so calling module_put function to decrement the 6907 * reference count. 6908 **/ 6909 module_put(qla4xxx_iscsi_transport.owner); 6910 sess = cls_sess->dd_data; 6911 ddb_entry = sess->dd_data; 6912 ddb_entry->sess = cls_sess; 6913 6914 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 6915 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, 6916 sizeof(struct dev_db_entry)); 6917 6918 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); 6919 6920 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); 6921 6922 if (!cls_conn) { 6923 ret = QLA_ERROR; 6924 goto exit_setup; 6925 } 6926 6927 ddb_entry->conn = cls_conn; 6928 6929 /* Setup ep, for displaying attributes in sysfs */ 6930 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); 6931 if (ep) { 6932 ep->conn = cls_conn; 6933 cls_conn->ep = ep; 6934 } else { 6935 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); 6936 ret = QLA_ERROR; 6937 goto exit_setup; 6938 } 6939 6940 /* Update sess/conn params */ 6941 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); 6942 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); 6943 6944 if (is_reset == RESET_ADAPTER) { 6945 iscsi_block_session(cls_sess); 6946 /* Use the relogin path to discover new devices 6947 * by short-circuting the logic of setting 6948 * timer to relogin - instead set the flags 6949 * to initiate login right away. 6950 */ 6951 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); 6952 set_bit(DF_RELOGIN, &ddb_entry->flags); 6953 } 6954 6955 exit_setup: 6956 return ret; 6957 } 6958 6959 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, 6960 struct list_head *list_ddb, 6961 struct dev_db_entry *fw_ddb_entry) 6962 { 6963 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 6964 uint16_t ddb_link; 6965 6966 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 6967 6968 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { 6969 if (ddb_idx->fw_ddb_idx == ddb_link) { 6970 DEBUG2(ql4_printk(KERN_INFO, ha, 6971 "Updating NT parent idx from [%d] to [%d]\n", 6972 ddb_link, ddb_idx->flash_ddb_idx)); 6973 fw_ddb_entry->ddb_link = 6974 cpu_to_le16(ddb_idx->flash_ddb_idx); 6975 return; 6976 } 6977 } 6978 } 6979 6980 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, 6981 struct list_head *list_nt, 6982 struct list_head *list_st, 6983 int is_reset) 6984 { 6985 struct dev_db_entry *fw_ddb_entry; 6986 struct ddb_entry *ddb_entry = NULL; 6987 dma_addr_t fw_ddb_dma; 6988 int max_ddbs; 6989 int fw_idx_size; 6990 int ret; 6991 uint32_t idx = 0, next_idx = 0; 6992 uint32_t state = 0, conn_err = 0; 6993 uint32_t ddb_idx = -1; 6994 uint16_t conn_id = 0; 6995 uint16_t ddb_link = -1; 6996 struct qla_ddb_index *nt_ddb_idx; 6997 6998 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 6999 &fw_ddb_dma); 7000 if (fw_ddb_entry == NULL) { 7001 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7002 goto exit_nt_list; 7003 } 7004 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7005 MAX_DEV_DB_ENTRIES; 7006 fw_idx_size = sizeof(struct qla_ddb_index); 7007 7008 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7009 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7010 NULL, &next_idx, &state, 7011 &conn_err, NULL, &conn_id); 7012 if (ret == QLA_ERROR) 7013 break; 7014 7015 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) 7016 goto continue_next_nt; 7017 7018 /* Check if NT, then add to list it */ 7019 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 7020 goto continue_next_nt; 7021 7022 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); 7023 if (ddb_link < max_ddbs) 7024 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); 7025 7026 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || 7027 state == DDB_DS_SESSION_FAILED) && 7028 (is_reset == INIT_ADAPTER)) 7029 goto continue_next_nt; 7030 7031 DEBUG2(ql4_printk(KERN_INFO, ha, 7032 "Adding DDB to session = 0x%x\n", idx)); 7033 7034 if (is_reset == INIT_ADAPTER) { 7035 nt_ddb_idx = vmalloc(fw_idx_size); 7036 if (!nt_ddb_idx) 7037 break; 7038 7039 nt_ddb_idx->fw_ddb_idx = idx; 7040 7041 /* Copy original isid as it may get updated in function 7042 * qla4xxx_update_isid(). We need original isid in 7043 * function qla4xxx_compare_tuple_ddb to find duplicate 7044 * target */ 7045 memcpy(&nt_ddb_idx->flash_isid[0], 7046 &fw_ddb_entry->isid[0], 7047 sizeof(nt_ddb_idx->flash_isid)); 7048 7049 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, 7050 fw_ddb_entry); 7051 if (ret == QLA_SUCCESS) { 7052 /* free nt_ddb_idx and do not add to list_nt */ 7053 vfree(nt_ddb_idx); 7054 goto continue_next_nt; 7055 } 7056 7057 /* Copy updated isid */ 7058 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, 7059 sizeof(struct dev_db_entry)); 7060 7061 list_add_tail(&nt_ddb_idx->list, list_nt); 7062 } else if (is_reset == RESET_ADAPTER) { 7063 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, 7064 &ddb_idx); 7065 if (ret == QLA_SUCCESS) { 7066 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, 7067 ddb_idx); 7068 if (ddb_entry != NULL) 7069 qla4xxx_update_sess_disc_idx(ha, 7070 ddb_entry, 7071 fw_ddb_entry); 7072 goto continue_next_nt; 7073 } 7074 } 7075 7076 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); 7077 if (ret == QLA_ERROR) 7078 goto exit_nt_list; 7079 7080 continue_next_nt: 7081 if (next_idx == 0) 7082 break; 7083 } 7084 7085 exit_nt_list: 7086 if (fw_ddb_entry) 7087 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7088 } 7089 7090 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, 7091 struct list_head *list_nt, 7092 uint16_t target_id) 7093 { 7094 struct dev_db_entry *fw_ddb_entry; 7095 dma_addr_t fw_ddb_dma; 7096 int max_ddbs; 7097 int fw_idx_size; 7098 int ret; 7099 uint32_t idx = 0, next_idx = 0; 7100 uint32_t state = 0, conn_err = 0; 7101 uint16_t conn_id = 0; 7102 struct qla_ddb_index *nt_ddb_idx; 7103 7104 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7105 &fw_ddb_dma); 7106 if (fw_ddb_entry == NULL) { 7107 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 7108 goto exit_new_nt_list; 7109 } 7110 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 7111 MAX_DEV_DB_ENTRIES; 7112 fw_idx_size = sizeof(struct qla_ddb_index); 7113 7114 for (idx = 0; idx < max_ddbs; idx = next_idx) { 7115 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, 7116 NULL, &next_idx, &state, 7117 &conn_err, NULL, &conn_id); 7118 if (ret == QLA_ERROR) 7119 break; 7120 7121 /* Check if NT, then add it to list */ 7122 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7123 goto continue_next_new_nt; 7124 7125 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) 7126 goto continue_next_new_nt; 7127 7128 DEBUG2(ql4_printk(KERN_INFO, ha, 7129 "Adding DDB to session = 0x%x\n", idx)); 7130 7131 nt_ddb_idx = vmalloc(fw_idx_size); 7132 if (!nt_ddb_idx) 7133 break; 7134 7135 nt_ddb_idx->fw_ddb_idx = idx; 7136 7137 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7138 if (ret == QLA_SUCCESS) { 7139 /* free nt_ddb_idx and do not add to list_nt */ 7140 vfree(nt_ddb_idx); 7141 goto continue_next_new_nt; 7142 } 7143 7144 if (target_id < max_ddbs) 7145 fw_ddb_entry->ddb_link = cpu_to_le16(target_id); 7146 7147 list_add_tail(&nt_ddb_idx->list, list_nt); 7148 7149 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7150 idx); 7151 if (ret == QLA_ERROR) 7152 goto exit_new_nt_list; 7153 7154 continue_next_new_nt: 7155 if (next_idx == 0) 7156 break; 7157 } 7158 7159 exit_new_nt_list: 7160 if (fw_ddb_entry) 7161 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7162 } 7163 7164 /** 7165 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry 7166 * @dev: dev associated with the sysfs entry 7167 * @data: pointer to flashnode session object 7168 * 7169 * Returns: 7170 * 1: if flashnode entry is non-persistent 7171 * 0: if flashnode entry is persistent 7172 **/ 7173 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) 7174 { 7175 struct iscsi_bus_flash_session *fnode_sess; 7176 7177 if (!iscsi_flashnode_bus_match(dev, NULL)) 7178 return 0; 7179 7180 fnode_sess = iscsi_dev_to_flash_session(dev); 7181 7182 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); 7183 } 7184 7185 /** 7186 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target 7187 * @ha: pointer to host 7188 * @fw_ddb_entry: flash ddb data 7189 * @idx: target index 7190 * @user: if set then this call is made from userland else from kernel 7191 * 7192 * Returns: 7193 * On sucess: QLA_SUCCESS 7194 * On failure: QLA_ERROR 7195 * 7196 * This create separate sysfs entries for session and connection attributes of 7197 * the given fw ddb entry. 7198 * If this is invoked as a result of a userspace call then the entry is marked 7199 * as nonpersistent using flash_state field. 7200 **/ 7201 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 7202 struct dev_db_entry *fw_ddb_entry, 7203 uint16_t *idx, int user) 7204 { 7205 struct iscsi_bus_flash_session *fnode_sess = NULL; 7206 struct iscsi_bus_flash_conn *fnode_conn = NULL; 7207 int rc = QLA_ERROR; 7208 7209 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, 7210 &qla4xxx_iscsi_transport, 0); 7211 if (!fnode_sess) { 7212 ql4_printk(KERN_ERR, ha, 7213 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", 7214 __func__, *idx, ha->host_no); 7215 goto exit_tgt_create; 7216 } 7217 7218 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, 7219 &qla4xxx_iscsi_transport, 0); 7220 if (!fnode_conn) { 7221 ql4_printk(KERN_ERR, ha, 7222 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", 7223 __func__, *idx, ha->host_no); 7224 goto free_sess; 7225 } 7226 7227 if (user) { 7228 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; 7229 } else { 7230 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7231 7232 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) 7233 fnode_sess->is_boot_target = 1; 7234 else 7235 fnode_sess->is_boot_target = 0; 7236 } 7237 7238 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7239 fw_ddb_entry); 7240 7241 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7242 __func__, fnode_sess->dev.kobj.name); 7243 7244 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7245 __func__, fnode_conn->dev.kobj.name); 7246 7247 return QLA_SUCCESS; 7248 7249 free_sess: 7250 iscsi_destroy_flashnode_sess(fnode_sess); 7251 7252 exit_tgt_create: 7253 return QLA_ERROR; 7254 } 7255 7256 /** 7257 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash 7258 * @shost: pointer to host 7259 * @buf: type of ddb entry (ipv4/ipv6) 7260 * @len: length of buf 7261 * 7262 * This creates new ddb entry in the flash by finding first free index and 7263 * storing default ddb there. And then create sysfs entry for the new ddb entry. 7264 **/ 7265 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, 7266 int len) 7267 { 7268 struct scsi_qla_host *ha = to_qla_host(shost); 7269 struct dev_db_entry *fw_ddb_entry = NULL; 7270 dma_addr_t fw_ddb_entry_dma; 7271 struct device *dev; 7272 uint16_t idx = 0; 7273 uint16_t max_ddbs = 0; 7274 uint32_t options = 0; 7275 uint32_t rval = QLA_ERROR; 7276 7277 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && 7278 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { 7279 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", 7280 __func__)); 7281 goto exit_ddb_add; 7282 } 7283 7284 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 7285 MAX_DEV_DB_ENTRIES; 7286 7287 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7288 &fw_ddb_entry_dma, GFP_KERNEL); 7289 if (!fw_ddb_entry) { 7290 DEBUG2(ql4_printk(KERN_ERR, ha, 7291 "%s: Unable to allocate dma buffer\n", 7292 __func__)); 7293 goto exit_ddb_add; 7294 } 7295 7296 dev = iscsi_find_flashnode_sess(ha->host, NULL, 7297 qla4xxx_sysfs_ddb_is_non_persistent); 7298 if (dev) { 7299 ql4_printk(KERN_ERR, ha, 7300 "%s: A non-persistent entry %s found\n", 7301 __func__, dev->kobj.name); 7302 put_device(dev); 7303 goto exit_ddb_add; 7304 } 7305 7306 /* Index 0 and 1 are reserved for boot target entries */ 7307 for (idx = 2; idx < max_ddbs; idx++) { 7308 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, 7309 fw_ddb_entry_dma, idx)) 7310 break; 7311 } 7312 7313 if (idx == max_ddbs) 7314 goto exit_ddb_add; 7315 7316 if (!strncasecmp("ipv6", buf, 4)) 7317 options |= IPV6_DEFAULT_DDB_ENTRY; 7318 7319 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7320 if (rval == QLA_ERROR) 7321 goto exit_ddb_add; 7322 7323 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); 7324 7325 exit_ddb_add: 7326 if (fw_ddb_entry) 7327 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7328 fw_ddb_entry, fw_ddb_entry_dma); 7329 if (rval == QLA_SUCCESS) 7330 return idx; 7331 else 7332 return -EIO; 7333 } 7334 7335 /** 7336 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash 7337 * @fnode_sess: pointer to session attrs of flash ddb entry 7338 * @fnode_conn: pointer to connection attrs of flash ddb entry 7339 * 7340 * This writes the contents of target ddb buffer to Flash with a valid cookie 7341 * value in order to make the ddb entry persistent. 7342 **/ 7343 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, 7344 struct iscsi_bus_flash_conn *fnode_conn) 7345 { 7346 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7347 struct scsi_qla_host *ha = to_qla_host(shost); 7348 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; 7349 struct dev_db_entry *fw_ddb_entry = NULL; 7350 dma_addr_t fw_ddb_entry_dma; 7351 uint32_t options = 0; 7352 int rval = 0; 7353 7354 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7355 &fw_ddb_entry_dma, GFP_KERNEL); 7356 if (!fw_ddb_entry) { 7357 DEBUG2(ql4_printk(KERN_ERR, ha, 7358 "%s: Unable to allocate dma buffer\n", 7359 __func__)); 7360 rval = -ENOMEM; 7361 goto exit_ddb_apply; 7362 } 7363 7364 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7365 options |= IPV6_DEFAULT_DDB_ENTRY; 7366 7367 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7368 if (rval == QLA_ERROR) 7369 goto exit_ddb_apply; 7370 7371 dev_db_start_offset += (fnode_sess->target_id * 7372 sizeof(*fw_ddb_entry)); 7373 7374 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7375 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7376 7377 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 7378 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); 7379 7380 if (rval == QLA_SUCCESS) { 7381 fnode_sess->flash_state = DEV_DB_PERSISTENT; 7382 ql4_printk(KERN_INFO, ha, 7383 "%s: flash node %u of host %lu written to flash\n", 7384 __func__, fnode_sess->target_id, ha->host_no); 7385 } else { 7386 rval = -EIO; 7387 ql4_printk(KERN_ERR, ha, 7388 "%s: Error while writing flash node %u of host %lu to flash\n", 7389 __func__, fnode_sess->target_id, ha->host_no); 7390 } 7391 7392 exit_ddb_apply: 7393 if (fw_ddb_entry) 7394 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7395 fw_ddb_entry, fw_ddb_entry_dma); 7396 return rval; 7397 } 7398 7399 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, 7400 struct dev_db_entry *fw_ddb_entry, 7401 uint16_t idx) 7402 { 7403 struct dev_db_entry *ddb_entry = NULL; 7404 dma_addr_t ddb_entry_dma; 7405 unsigned long wtime; 7406 uint32_t mbx_sts = 0; 7407 uint32_t state = 0, conn_err = 0; 7408 uint16_t tmo = 0; 7409 int ret = 0; 7410 7411 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7412 &ddb_entry_dma, GFP_KERNEL); 7413 if (!ddb_entry) { 7414 DEBUG2(ql4_printk(KERN_ERR, ha, 7415 "%s: Unable to allocate dma buffer\n", 7416 __func__)); 7417 return QLA_ERROR; 7418 } 7419 7420 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); 7421 7422 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); 7423 if (ret != QLA_SUCCESS) { 7424 DEBUG2(ql4_printk(KERN_ERR, ha, 7425 "%s: Unable to set ddb entry for index %d\n", 7426 __func__, idx)); 7427 goto exit_ddb_conn_open; 7428 } 7429 7430 qla4xxx_conn_open(ha, idx); 7431 7432 /* To ensure that sendtargets is done, wait for at least 12 secs */ 7433 tmo = ((ha->def_timeout > LOGIN_TOV) && 7434 (ha->def_timeout < LOGIN_TOV * 10) ? 7435 ha->def_timeout : LOGIN_TOV); 7436 7437 DEBUG2(ql4_printk(KERN_INFO, ha, 7438 "Default time to wait for login to ddb %d\n", tmo)); 7439 7440 wtime = jiffies + (HZ * tmo); 7441 do { 7442 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, 7443 NULL, &state, &conn_err, NULL, 7444 NULL); 7445 if (ret == QLA_ERROR) 7446 continue; 7447 7448 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 7449 state == DDB_DS_SESSION_FAILED) 7450 break; 7451 7452 schedule_timeout_uninterruptible(HZ / 10); 7453 } while (time_after(wtime, jiffies)); 7454 7455 exit_ddb_conn_open: 7456 if (ddb_entry) 7457 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), 7458 ddb_entry, ddb_entry_dma); 7459 return ret; 7460 } 7461 7462 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, 7463 struct dev_db_entry *fw_ddb_entry, 7464 uint16_t target_id) 7465 { 7466 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; 7467 struct list_head list_nt; 7468 uint16_t ddb_index; 7469 int ret = 0; 7470 7471 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { 7472 ql4_printk(KERN_WARNING, ha, 7473 "%s: A discovery already in progress!\n", __func__); 7474 return QLA_ERROR; 7475 } 7476 7477 INIT_LIST_HEAD(&list_nt); 7478 7479 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7480 7481 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 7482 if (ret == QLA_ERROR) 7483 goto exit_login_st_clr_bit; 7484 7485 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); 7486 if (ret == QLA_ERROR) 7487 goto exit_login_st; 7488 7489 qla4xxx_build_new_nt_list(ha, &list_nt, target_id); 7490 7491 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { 7492 list_del_init(&ddb_idx->list); 7493 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); 7494 vfree(ddb_idx); 7495 } 7496 7497 exit_login_st: 7498 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { 7499 ql4_printk(KERN_ERR, ha, 7500 "Unable to clear DDB index = 0x%x\n", ddb_index); 7501 } 7502 7503 clear_bit(ddb_index, ha->ddb_idx_map); 7504 7505 exit_login_st_clr_bit: 7506 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); 7507 return ret; 7508 } 7509 7510 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, 7511 struct dev_db_entry *fw_ddb_entry, 7512 uint16_t idx) 7513 { 7514 int ret = QLA_ERROR; 7515 7516 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); 7517 if (ret != QLA_SUCCESS) 7518 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, 7519 idx); 7520 else 7521 ret = -EPERM; 7522 7523 return ret; 7524 } 7525 7526 /** 7527 * qla4xxx_sysfs_ddb_login - Login to the specified target 7528 * @fnode_sess: pointer to session attrs of flash ddb entry 7529 * @fnode_conn: pointer to connection attrs of flash ddb entry 7530 * 7531 * This logs in to the specified target 7532 **/ 7533 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, 7534 struct iscsi_bus_flash_conn *fnode_conn) 7535 { 7536 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7537 struct scsi_qla_host *ha = to_qla_host(shost); 7538 struct dev_db_entry *fw_ddb_entry = NULL; 7539 dma_addr_t fw_ddb_entry_dma; 7540 uint32_t options = 0; 7541 int ret = 0; 7542 7543 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { 7544 ql4_printk(KERN_ERR, ha, 7545 "%s: Target info is not persistent\n", __func__); 7546 ret = -EIO; 7547 goto exit_ddb_login; 7548 } 7549 7550 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7551 &fw_ddb_entry_dma, GFP_KERNEL); 7552 if (!fw_ddb_entry) { 7553 DEBUG2(ql4_printk(KERN_ERR, ha, 7554 "%s: Unable to allocate dma buffer\n", 7555 __func__)); 7556 ret = -ENOMEM; 7557 goto exit_ddb_login; 7558 } 7559 7560 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7561 options |= IPV6_DEFAULT_DDB_ENTRY; 7562 7563 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); 7564 if (ret == QLA_ERROR) 7565 goto exit_ddb_login; 7566 7567 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); 7568 fw_ddb_entry->cookie = DDB_VALID_COOKIE; 7569 7570 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) 7571 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, 7572 fnode_sess->target_id); 7573 else 7574 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, 7575 fnode_sess->target_id); 7576 7577 if (ret > 0) 7578 ret = -EIO; 7579 7580 exit_ddb_login: 7581 if (fw_ddb_entry) 7582 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7583 fw_ddb_entry, fw_ddb_entry_dma); 7584 return ret; 7585 } 7586 7587 /** 7588 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target 7589 * @cls_sess: pointer to session to be logged out 7590 * 7591 * This performs session log out from the specified target 7592 **/ 7593 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) 7594 { 7595 struct iscsi_session *sess; 7596 struct ddb_entry *ddb_entry = NULL; 7597 struct scsi_qla_host *ha; 7598 struct dev_db_entry *fw_ddb_entry = NULL; 7599 dma_addr_t fw_ddb_entry_dma; 7600 unsigned long flags; 7601 unsigned long wtime; 7602 uint32_t ddb_state; 7603 int options; 7604 int ret = 0; 7605 7606 sess = cls_sess->dd_data; 7607 ddb_entry = sess->dd_data; 7608 ha = ddb_entry->ha; 7609 7610 if (ddb_entry->ddb_type != FLASH_DDB) { 7611 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", 7612 __func__); 7613 ret = -ENXIO; 7614 goto exit_ddb_logout; 7615 } 7616 7617 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 7618 ql4_printk(KERN_ERR, ha, 7619 "%s: Logout from boot target entry is not permitted.\n", 7620 __func__); 7621 ret = -EPERM; 7622 goto exit_ddb_logout; 7623 } 7624 7625 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7626 &fw_ddb_entry_dma, GFP_KERNEL); 7627 if (!fw_ddb_entry) { 7628 ql4_printk(KERN_ERR, ha, 7629 "%s: Unable to allocate dma buffer\n", __func__); 7630 ret = -ENOMEM; 7631 goto exit_ddb_logout; 7632 } 7633 7634 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) 7635 goto ddb_logout_init; 7636 7637 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7638 fw_ddb_entry, fw_ddb_entry_dma, 7639 NULL, NULL, &ddb_state, NULL, 7640 NULL, NULL); 7641 if (ret == QLA_ERROR) 7642 goto ddb_logout_init; 7643 7644 if (ddb_state == DDB_DS_SESSION_ACTIVE) 7645 goto ddb_logout_init; 7646 7647 /* wait until next relogin is triggered using DF_RELOGIN and 7648 * clear DF_RELOGIN to avoid invocation of further relogin 7649 */ 7650 wtime = jiffies + (HZ * RELOGIN_TOV); 7651 do { 7652 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) 7653 goto ddb_logout_init; 7654 7655 schedule_timeout_uninterruptible(HZ); 7656 } while ((time_after(wtime, jiffies))); 7657 7658 ddb_logout_init: 7659 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 7660 atomic_set(&ddb_entry->relogin_timer, 0); 7661 7662 options = LOGOUT_OPTION_CLOSE_SESSION; 7663 qla4xxx_session_logout_ddb(ha, ddb_entry, options); 7664 7665 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); 7666 wtime = jiffies + (HZ * LOGOUT_TOV); 7667 do { 7668 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 7669 fw_ddb_entry, fw_ddb_entry_dma, 7670 NULL, NULL, &ddb_state, NULL, 7671 NULL, NULL); 7672 if (ret == QLA_ERROR) 7673 goto ddb_logout_clr_sess; 7674 7675 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 7676 (ddb_state == DDB_DS_SESSION_FAILED)) 7677 goto ddb_logout_clr_sess; 7678 7679 schedule_timeout_uninterruptible(HZ); 7680 } while ((time_after(wtime, jiffies))); 7681 7682 ddb_logout_clr_sess: 7683 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 7684 /* 7685 * we have decremented the reference count of the driver 7686 * when we setup the session to have the driver unload 7687 * to be seamless without actually destroying the 7688 * session 7689 **/ 7690 try_module_get(qla4xxx_iscsi_transport.owner); 7691 iscsi_destroy_endpoint(ddb_entry->conn->ep); 7692 7693 spin_lock_irqsave(&ha->hardware_lock, flags); 7694 qla4xxx_free_ddb(ha, ddb_entry); 7695 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); 7696 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7697 7698 iscsi_session_teardown(ddb_entry->sess); 7699 7700 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); 7701 ret = QLA_SUCCESS; 7702 7703 exit_ddb_logout: 7704 if (fw_ddb_entry) 7705 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 7706 fw_ddb_entry, fw_ddb_entry_dma); 7707 return ret; 7708 } 7709 7710 /** 7711 * qla4xxx_sysfs_ddb_logout - Logout from the specified target 7712 * @fnode_sess: pointer to session attrs of flash ddb entry 7713 * @fnode_conn: pointer to connection attrs of flash ddb entry 7714 * 7715 * This performs log out from the specified target 7716 **/ 7717 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, 7718 struct iscsi_bus_flash_conn *fnode_conn) 7719 { 7720 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7721 struct scsi_qla_host *ha = to_qla_host(shost); 7722 struct ql4_tuple_ddb *flash_tddb = NULL; 7723 struct ql4_tuple_ddb *tmp_tddb = NULL; 7724 struct dev_db_entry *fw_ddb_entry = NULL; 7725 struct ddb_entry *ddb_entry = NULL; 7726 dma_addr_t fw_ddb_dma; 7727 uint32_t next_idx = 0; 7728 uint32_t state = 0, conn_err = 0; 7729 uint16_t conn_id = 0; 7730 int idx, index; 7731 int status, ret = 0; 7732 7733 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 7734 &fw_ddb_dma); 7735 if (fw_ddb_entry == NULL) { 7736 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); 7737 ret = -ENOMEM; 7738 goto exit_ddb_logout; 7739 } 7740 7741 flash_tddb = vzalloc(sizeof(*flash_tddb)); 7742 if (!flash_tddb) { 7743 ql4_printk(KERN_WARNING, ha, 7744 "%s:Memory Allocation failed.\n", __func__); 7745 ret = -ENOMEM; 7746 goto exit_ddb_logout; 7747 } 7748 7749 tmp_tddb = vzalloc(sizeof(*tmp_tddb)); 7750 if (!tmp_tddb) { 7751 ql4_printk(KERN_WARNING, ha, 7752 "%s:Memory Allocation failed.\n", __func__); 7753 ret = -ENOMEM; 7754 goto exit_ddb_logout; 7755 } 7756 7757 if (!fnode_sess->targetname) { 7758 ql4_printk(KERN_ERR, ha, 7759 "%s:Cannot logout from SendTarget entry\n", 7760 __func__); 7761 ret = -EPERM; 7762 goto exit_ddb_logout; 7763 } 7764 7765 if (fnode_sess->is_boot_target) { 7766 ql4_printk(KERN_ERR, ha, 7767 "%s: Logout from boot target entry is not permitted.\n", 7768 __func__); 7769 ret = -EPERM; 7770 goto exit_ddb_logout; 7771 } 7772 7773 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, 7774 ISCSI_NAME_SIZE); 7775 7776 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7777 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); 7778 else 7779 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); 7780 7781 flash_tddb->tpgt = fnode_sess->tpgt; 7782 flash_tddb->port = fnode_conn->port; 7783 7784 COPY_ISID(flash_tddb->isid, fnode_sess->isid); 7785 7786 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 7787 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 7788 if (ddb_entry == NULL) 7789 continue; 7790 7791 if (ddb_entry->ddb_type != FLASH_DDB) 7792 continue; 7793 7794 index = ddb_entry->sess->target_id; 7795 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, 7796 fw_ddb_dma, NULL, &next_idx, 7797 &state, &conn_err, NULL, 7798 &conn_id); 7799 if (status == QLA_ERROR) { 7800 ret = -ENOMEM; 7801 break; 7802 } 7803 7804 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); 7805 7806 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, 7807 true); 7808 if (status == QLA_SUCCESS) { 7809 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); 7810 break; 7811 } 7812 } 7813 7814 if (idx == MAX_DDB_ENTRIES) 7815 ret = -ESRCH; 7816 7817 exit_ddb_logout: 7818 if (flash_tddb) 7819 vfree(flash_tddb); 7820 if (tmp_tddb) 7821 vfree(tmp_tddb); 7822 if (fw_ddb_entry) 7823 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 7824 7825 return ret; 7826 } 7827 7828 static int 7829 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, 7830 int param, char *buf) 7831 { 7832 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 7833 struct scsi_qla_host *ha = to_qla_host(shost); 7834 struct iscsi_bus_flash_conn *fnode_conn; 7835 struct ql4_chap_table chap_tbl; 7836 struct device *dev; 7837 int parent_type; 7838 int rc = 0; 7839 7840 dev = iscsi_find_flashnode_conn(fnode_sess); 7841 if (!dev) 7842 return -EIO; 7843 7844 fnode_conn = iscsi_dev_to_flash_conn(dev); 7845 7846 switch (param) { 7847 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 7848 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); 7849 break; 7850 case ISCSI_FLASHNODE_PORTAL_TYPE: 7851 rc = sprintf(buf, "%s\n", fnode_sess->portal_type); 7852 break; 7853 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 7854 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); 7855 break; 7856 case ISCSI_FLASHNODE_DISCOVERY_SESS: 7857 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); 7858 break; 7859 case ISCSI_FLASHNODE_ENTRY_EN: 7860 rc = sprintf(buf, "%u\n", fnode_sess->entry_state); 7861 break; 7862 case ISCSI_FLASHNODE_HDR_DGST_EN: 7863 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); 7864 break; 7865 case ISCSI_FLASHNODE_DATA_DGST_EN: 7866 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); 7867 break; 7868 case ISCSI_FLASHNODE_IMM_DATA_EN: 7869 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); 7870 break; 7871 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 7872 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); 7873 break; 7874 case ISCSI_FLASHNODE_DATASEQ_INORDER: 7875 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); 7876 break; 7877 case ISCSI_FLASHNODE_PDU_INORDER: 7878 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); 7879 break; 7880 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 7881 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); 7882 break; 7883 case ISCSI_FLASHNODE_SNACK_REQ_EN: 7884 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); 7885 break; 7886 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 7887 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); 7888 break; 7889 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 7890 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); 7891 break; 7892 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 7893 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); 7894 break; 7895 case ISCSI_FLASHNODE_ERL: 7896 rc = sprintf(buf, "%u\n", fnode_sess->erl); 7897 break; 7898 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 7899 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); 7900 break; 7901 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 7902 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); 7903 break; 7904 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 7905 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); 7906 break; 7907 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 7908 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); 7909 break; 7910 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 7911 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); 7912 break; 7913 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 7914 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); 7915 break; 7916 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 7917 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); 7918 break; 7919 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 7920 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); 7921 break; 7922 case ISCSI_FLASHNODE_FIRST_BURST: 7923 rc = sprintf(buf, "%u\n", fnode_sess->first_burst); 7924 break; 7925 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 7926 rc = sprintf(buf, "%u\n", fnode_sess->time2wait); 7927 break; 7928 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 7929 rc = sprintf(buf, "%u\n", fnode_sess->time2retain); 7930 break; 7931 case ISCSI_FLASHNODE_MAX_R2T: 7932 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); 7933 break; 7934 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 7935 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); 7936 break; 7937 case ISCSI_FLASHNODE_ISID: 7938 rc = sprintf(buf, "%pm\n", fnode_sess->isid); 7939 break; 7940 case ISCSI_FLASHNODE_TSID: 7941 rc = sprintf(buf, "%u\n", fnode_sess->tsid); 7942 break; 7943 case ISCSI_FLASHNODE_PORT: 7944 rc = sprintf(buf, "%d\n", fnode_conn->port); 7945 break; 7946 case ISCSI_FLASHNODE_MAX_BURST: 7947 rc = sprintf(buf, "%u\n", fnode_sess->max_burst); 7948 break; 7949 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 7950 rc = sprintf(buf, "%u\n", 7951 fnode_sess->default_taskmgmt_timeout); 7952 break; 7953 case ISCSI_FLASHNODE_IPADDR: 7954 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7955 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); 7956 else 7957 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); 7958 break; 7959 case ISCSI_FLASHNODE_ALIAS: 7960 if (fnode_sess->targetalias) 7961 rc = sprintf(buf, "%s\n", fnode_sess->targetalias); 7962 else 7963 rc = sprintf(buf, "\n"); 7964 break; 7965 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 7966 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7967 rc = sprintf(buf, "%pI6\n", 7968 fnode_conn->redirect_ipaddr); 7969 else 7970 rc = sprintf(buf, "%pI4\n", 7971 fnode_conn->redirect_ipaddr); 7972 break; 7973 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 7974 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); 7975 break; 7976 case ISCSI_FLASHNODE_LOCAL_PORT: 7977 rc = sprintf(buf, "%u\n", fnode_conn->local_port); 7978 break; 7979 case ISCSI_FLASHNODE_IPV4_TOS: 7980 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); 7981 break; 7982 case ISCSI_FLASHNODE_IPV6_TC: 7983 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7984 rc = sprintf(buf, "%u\n", 7985 fnode_conn->ipv6_traffic_class); 7986 else 7987 rc = sprintf(buf, "\n"); 7988 break; 7989 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 7990 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); 7991 break; 7992 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 7993 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) 7994 rc = sprintf(buf, "%pI6\n", 7995 fnode_conn->link_local_ipv6_addr); 7996 else 7997 rc = sprintf(buf, "\n"); 7998 break; 7999 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8000 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); 8001 break; 8002 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 8003 if (fnode_sess->discovery_parent_type == DDB_ISNS) 8004 parent_type = ISCSI_DISC_PARENT_ISNS; 8005 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 8006 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8007 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) 8008 parent_type = ISCSI_DISC_PARENT_SENDTGT; 8009 else 8010 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 8011 8012 rc = sprintf(buf, "%s\n", 8013 iscsi_get_discovery_parent_name(parent_type)); 8014 break; 8015 case ISCSI_FLASHNODE_NAME: 8016 if (fnode_sess->targetname) 8017 rc = sprintf(buf, "%s\n", fnode_sess->targetname); 8018 else 8019 rc = sprintf(buf, "\n"); 8020 break; 8021 case ISCSI_FLASHNODE_TPGT: 8022 rc = sprintf(buf, "%u\n", fnode_sess->tpgt); 8023 break; 8024 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8025 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); 8026 break; 8027 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8028 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); 8029 break; 8030 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8031 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); 8032 break; 8033 case ISCSI_FLASHNODE_USERNAME: 8034 if (fnode_sess->chap_auth_en) { 8035 qla4xxx_get_uni_chap_at_index(ha, 8036 chap_tbl.name, 8037 chap_tbl.secret, 8038 fnode_sess->chap_out_idx); 8039 rc = sprintf(buf, "%s\n", chap_tbl.name); 8040 } else { 8041 rc = sprintf(buf, "\n"); 8042 } 8043 break; 8044 case ISCSI_FLASHNODE_PASSWORD: 8045 if (fnode_sess->chap_auth_en) { 8046 qla4xxx_get_uni_chap_at_index(ha, 8047 chap_tbl.name, 8048 chap_tbl.secret, 8049 fnode_sess->chap_out_idx); 8050 rc = sprintf(buf, "%s\n", chap_tbl.secret); 8051 } else { 8052 rc = sprintf(buf, "\n"); 8053 } 8054 break; 8055 case ISCSI_FLASHNODE_STATSN: 8056 rc = sprintf(buf, "%u\n", fnode_conn->statsn); 8057 break; 8058 case ISCSI_FLASHNODE_EXP_STATSN: 8059 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); 8060 break; 8061 case ISCSI_FLASHNODE_IS_BOOT_TGT: 8062 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); 8063 break; 8064 default: 8065 rc = -ENOSYS; 8066 break; 8067 } 8068 8069 put_device(dev); 8070 return rc; 8071 } 8072 8073 /** 8074 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry 8075 * @fnode_sess: pointer to session attrs of flash ddb entry 8076 * @fnode_conn: pointer to connection attrs of flash ddb entry 8077 * @data: Parameters and their values to update 8078 * @len: len of data 8079 * 8080 * This sets the parameter of flash ddb entry and writes them to flash 8081 **/ 8082 static int 8083 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, 8084 struct iscsi_bus_flash_conn *fnode_conn, 8085 void *data, int len) 8086 { 8087 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8088 struct scsi_qla_host *ha = to_qla_host(shost); 8089 struct iscsi_flashnode_param_info *fnode_param; 8090 struct ql4_chap_table chap_tbl; 8091 struct nlattr *attr; 8092 uint16_t chap_out_idx = INVALID_ENTRY; 8093 int rc = QLA_ERROR; 8094 uint32_t rem = len; 8095 8096 memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); 8097 nla_for_each_attr(attr, data, len, rem) { 8098 fnode_param = nla_data(attr); 8099 8100 switch (fnode_param->param) { 8101 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: 8102 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; 8103 break; 8104 case ISCSI_FLASHNODE_PORTAL_TYPE: 8105 memcpy(fnode_sess->portal_type, fnode_param->value, 8106 strlen(fnode_sess->portal_type)); 8107 break; 8108 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: 8109 fnode_sess->auto_snd_tgt_disable = 8110 fnode_param->value[0]; 8111 break; 8112 case ISCSI_FLASHNODE_DISCOVERY_SESS: 8113 fnode_sess->discovery_sess = fnode_param->value[0]; 8114 break; 8115 case ISCSI_FLASHNODE_ENTRY_EN: 8116 fnode_sess->entry_state = fnode_param->value[0]; 8117 break; 8118 case ISCSI_FLASHNODE_HDR_DGST_EN: 8119 fnode_conn->hdrdgst_en = fnode_param->value[0]; 8120 break; 8121 case ISCSI_FLASHNODE_DATA_DGST_EN: 8122 fnode_conn->datadgst_en = fnode_param->value[0]; 8123 break; 8124 case ISCSI_FLASHNODE_IMM_DATA_EN: 8125 fnode_sess->imm_data_en = fnode_param->value[0]; 8126 break; 8127 case ISCSI_FLASHNODE_INITIAL_R2T_EN: 8128 fnode_sess->initial_r2t_en = fnode_param->value[0]; 8129 break; 8130 case ISCSI_FLASHNODE_DATASEQ_INORDER: 8131 fnode_sess->dataseq_inorder_en = fnode_param->value[0]; 8132 break; 8133 case ISCSI_FLASHNODE_PDU_INORDER: 8134 fnode_sess->pdu_inorder_en = fnode_param->value[0]; 8135 break; 8136 case ISCSI_FLASHNODE_CHAP_AUTH_EN: 8137 fnode_sess->chap_auth_en = fnode_param->value[0]; 8138 /* Invalidate chap index if chap auth is disabled */ 8139 if (!fnode_sess->chap_auth_en) 8140 fnode_sess->chap_out_idx = INVALID_ENTRY; 8141 8142 break; 8143 case ISCSI_FLASHNODE_SNACK_REQ_EN: 8144 fnode_conn->snack_req_en = fnode_param->value[0]; 8145 break; 8146 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: 8147 fnode_sess->discovery_logout_en = fnode_param->value[0]; 8148 break; 8149 case ISCSI_FLASHNODE_BIDI_CHAP_EN: 8150 fnode_sess->bidi_chap_en = fnode_param->value[0]; 8151 break; 8152 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: 8153 fnode_sess->discovery_auth_optional = 8154 fnode_param->value[0]; 8155 break; 8156 case ISCSI_FLASHNODE_ERL: 8157 fnode_sess->erl = fnode_param->value[0]; 8158 break; 8159 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: 8160 fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; 8161 break; 8162 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: 8163 fnode_conn->tcp_nagle_disable = fnode_param->value[0]; 8164 break; 8165 case ISCSI_FLASHNODE_TCP_WSF_DISABLE: 8166 fnode_conn->tcp_wsf_disable = fnode_param->value[0]; 8167 break; 8168 case ISCSI_FLASHNODE_TCP_TIMER_SCALE: 8169 fnode_conn->tcp_timer_scale = fnode_param->value[0]; 8170 break; 8171 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: 8172 fnode_conn->tcp_timestamp_en = fnode_param->value[0]; 8173 break; 8174 case ISCSI_FLASHNODE_IP_FRAG_DISABLE: 8175 fnode_conn->fragment_disable = fnode_param->value[0]; 8176 break; 8177 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: 8178 fnode_conn->max_recv_dlength = 8179 *(unsigned *)fnode_param->value; 8180 break; 8181 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: 8182 fnode_conn->max_xmit_dlength = 8183 *(unsigned *)fnode_param->value; 8184 break; 8185 case ISCSI_FLASHNODE_FIRST_BURST: 8186 fnode_sess->first_burst = 8187 *(unsigned *)fnode_param->value; 8188 break; 8189 case ISCSI_FLASHNODE_DEF_TIME2WAIT: 8190 fnode_sess->time2wait = *(uint16_t *)fnode_param->value; 8191 break; 8192 case ISCSI_FLASHNODE_DEF_TIME2RETAIN: 8193 fnode_sess->time2retain = 8194 *(uint16_t *)fnode_param->value; 8195 break; 8196 case ISCSI_FLASHNODE_MAX_R2T: 8197 fnode_sess->max_r2t = 8198 *(uint16_t *)fnode_param->value; 8199 break; 8200 case ISCSI_FLASHNODE_KEEPALIVE_TMO: 8201 fnode_conn->keepalive_timeout = 8202 *(uint16_t *)fnode_param->value; 8203 break; 8204 case ISCSI_FLASHNODE_ISID: 8205 memcpy(fnode_sess->isid, fnode_param->value, 8206 sizeof(fnode_sess->isid)); 8207 break; 8208 case ISCSI_FLASHNODE_TSID: 8209 fnode_sess->tsid = *(uint16_t *)fnode_param->value; 8210 break; 8211 case ISCSI_FLASHNODE_PORT: 8212 fnode_conn->port = *(uint16_t *)fnode_param->value; 8213 break; 8214 case ISCSI_FLASHNODE_MAX_BURST: 8215 fnode_sess->max_burst = *(unsigned *)fnode_param->value; 8216 break; 8217 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: 8218 fnode_sess->default_taskmgmt_timeout = 8219 *(uint16_t *)fnode_param->value; 8220 break; 8221 case ISCSI_FLASHNODE_IPADDR: 8222 memcpy(fnode_conn->ipaddress, fnode_param->value, 8223 IPv6_ADDR_LEN); 8224 break; 8225 case ISCSI_FLASHNODE_ALIAS: 8226 rc = iscsi_switch_str_param(&fnode_sess->targetalias, 8227 (char *)fnode_param->value); 8228 break; 8229 case ISCSI_FLASHNODE_REDIRECT_IPADDR: 8230 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, 8231 IPv6_ADDR_LEN); 8232 break; 8233 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: 8234 fnode_conn->max_segment_size = 8235 *(unsigned *)fnode_param->value; 8236 break; 8237 case ISCSI_FLASHNODE_LOCAL_PORT: 8238 fnode_conn->local_port = 8239 *(uint16_t *)fnode_param->value; 8240 break; 8241 case ISCSI_FLASHNODE_IPV4_TOS: 8242 fnode_conn->ipv4_tos = fnode_param->value[0]; 8243 break; 8244 case ISCSI_FLASHNODE_IPV6_TC: 8245 fnode_conn->ipv6_traffic_class = fnode_param->value[0]; 8246 break; 8247 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: 8248 fnode_conn->ipv6_flow_label = fnode_param->value[0]; 8249 break; 8250 case ISCSI_FLASHNODE_NAME: 8251 rc = iscsi_switch_str_param(&fnode_sess->targetname, 8252 (char *)fnode_param->value); 8253 break; 8254 case ISCSI_FLASHNODE_TPGT: 8255 fnode_sess->tpgt = *(uint16_t *)fnode_param->value; 8256 break; 8257 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: 8258 memcpy(fnode_conn->link_local_ipv6_addr, 8259 fnode_param->value, IPv6_ADDR_LEN); 8260 break; 8261 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 8262 fnode_sess->discovery_parent_idx = 8263 *(uint16_t *)fnode_param->value; 8264 break; 8265 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 8266 fnode_conn->tcp_xmit_wsf = 8267 *(uint8_t *)fnode_param->value; 8268 break; 8269 case ISCSI_FLASHNODE_TCP_RECV_WSF: 8270 fnode_conn->tcp_recv_wsf = 8271 *(uint8_t *)fnode_param->value; 8272 break; 8273 case ISCSI_FLASHNODE_STATSN: 8274 fnode_conn->statsn = *(uint32_t *)fnode_param->value; 8275 break; 8276 case ISCSI_FLASHNODE_EXP_STATSN: 8277 fnode_conn->exp_statsn = 8278 *(uint32_t *)fnode_param->value; 8279 break; 8280 case ISCSI_FLASHNODE_CHAP_OUT_IDX: 8281 chap_out_idx = *(uint16_t *)fnode_param->value; 8282 if (!qla4xxx_get_uni_chap_at_index(ha, 8283 chap_tbl.name, 8284 chap_tbl.secret, 8285 chap_out_idx)) { 8286 fnode_sess->chap_out_idx = chap_out_idx; 8287 /* Enable chap auth if chap index is valid */ 8288 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; 8289 } 8290 break; 8291 default: 8292 ql4_printk(KERN_ERR, ha, 8293 "%s: No such sysfs attribute\n", __func__); 8294 rc = -ENOSYS; 8295 goto exit_set_param; 8296 } 8297 } 8298 8299 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); 8300 8301 exit_set_param: 8302 return rc; 8303 } 8304 8305 /** 8306 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry 8307 * @fnode_sess: pointer to session attrs of flash ddb entry 8308 * 8309 * This invalidates the flash ddb entry at the given index 8310 **/ 8311 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) 8312 { 8313 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 8314 struct scsi_qla_host *ha = to_qla_host(shost); 8315 uint32_t dev_db_start_offset; 8316 uint32_t dev_db_end_offset; 8317 struct dev_db_entry *fw_ddb_entry = NULL; 8318 dma_addr_t fw_ddb_entry_dma; 8319 uint16_t *ddb_cookie = NULL; 8320 size_t ddb_size = 0; 8321 void *pddb = NULL; 8322 int target_id; 8323 int rc = 0; 8324 8325 if (fnode_sess->is_boot_target) { 8326 rc = -EPERM; 8327 DEBUG2(ql4_printk(KERN_ERR, ha, 8328 "%s: Deletion of boot target entry is not permitted.\n", 8329 __func__)); 8330 goto exit_ddb_del; 8331 } 8332 8333 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) 8334 goto sysfs_ddb_del; 8335 8336 if (is_qla40XX(ha)) { 8337 dev_db_start_offset = FLASH_OFFSET_DB_INFO; 8338 dev_db_end_offset = FLASH_OFFSET_DB_END; 8339 dev_db_start_offset += (fnode_sess->target_id * 8340 sizeof(*fw_ddb_entry)); 8341 ddb_size = sizeof(*fw_ddb_entry); 8342 } else { 8343 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + 8344 (ha->hw.flt_region_ddb << 2); 8345 /* flt_ddb_size is DDB table size for both ports 8346 * so divide it by 2 to calculate the offset for second port 8347 */ 8348 if (ha->port_num == 1) 8349 dev_db_start_offset += (ha->hw.flt_ddb_size / 2); 8350 8351 dev_db_end_offset = dev_db_start_offset + 8352 (ha->hw.flt_ddb_size / 2); 8353 8354 dev_db_start_offset += (fnode_sess->target_id * 8355 sizeof(*fw_ddb_entry)); 8356 dev_db_start_offset += offsetof(struct dev_db_entry, cookie); 8357 8358 ddb_size = sizeof(*ddb_cookie); 8359 } 8360 8361 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", 8362 __func__, dev_db_start_offset, dev_db_end_offset)); 8363 8364 if (dev_db_start_offset > dev_db_end_offset) { 8365 rc = -EIO; 8366 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", 8367 __func__, fnode_sess->target_id)); 8368 goto exit_ddb_del; 8369 } 8370 8371 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, 8372 &fw_ddb_entry_dma, GFP_KERNEL); 8373 if (!pddb) { 8374 rc = -ENOMEM; 8375 DEBUG2(ql4_printk(KERN_ERR, ha, 8376 "%s: Unable to allocate dma buffer\n", 8377 __func__)); 8378 goto exit_ddb_del; 8379 } 8380 8381 if (is_qla40XX(ha)) { 8382 fw_ddb_entry = pddb; 8383 memset(fw_ddb_entry, 0, ddb_size); 8384 ddb_cookie = &fw_ddb_entry->cookie; 8385 } else { 8386 ddb_cookie = pddb; 8387 } 8388 8389 /* invalidate the cookie */ 8390 *ddb_cookie = 0xFFEE; 8391 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 8392 ddb_size, FLASH_OPT_RMW_COMMIT); 8393 8394 sysfs_ddb_del: 8395 target_id = fnode_sess->target_id; 8396 iscsi_destroy_flashnode_sess(fnode_sess); 8397 ql4_printk(KERN_INFO, ha, 8398 "%s: session and conn entries for flashnode %u of host %lu deleted\n", 8399 __func__, target_id, ha->host_no); 8400 exit_ddb_del: 8401 if (pddb) 8402 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, 8403 fw_ddb_entry_dma); 8404 return rc; 8405 } 8406 8407 /** 8408 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs 8409 * @ha: pointer to adapter structure 8410 * 8411 * Export the firmware DDB for all send targets and normal targets to sysfs. 8412 **/ 8413 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) 8414 { 8415 struct dev_db_entry *fw_ddb_entry = NULL; 8416 dma_addr_t fw_ddb_entry_dma; 8417 uint16_t max_ddbs; 8418 uint16_t idx = 0; 8419 int ret = QLA_SUCCESS; 8420 8421 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 8422 sizeof(*fw_ddb_entry), 8423 &fw_ddb_entry_dma, GFP_KERNEL); 8424 if (!fw_ddb_entry) { 8425 DEBUG2(ql4_printk(KERN_ERR, ha, 8426 "%s: Unable to allocate dma buffer\n", 8427 __func__)); 8428 return -ENOMEM; 8429 } 8430 8431 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 8432 MAX_DEV_DB_ENTRIES; 8433 8434 for (idx = 0; idx < max_ddbs; idx++) { 8435 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, 8436 idx)) 8437 continue; 8438 8439 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); 8440 if (ret) { 8441 ret = -EIO; 8442 break; 8443 } 8444 } 8445 8446 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, 8447 fw_ddb_entry_dma); 8448 8449 return ret; 8450 } 8451 8452 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) 8453 { 8454 iscsi_destroy_all_flashnode(ha->host); 8455 } 8456 8457 /** 8458 * qla4xxx_build_ddb_list - Build ddb list and setup sessions 8459 * @ha: pointer to adapter structure 8460 * @is_reset: Is this init path or reset path 8461 * 8462 * Create a list of sendtargets (st) from firmware DDBs, issue send targets 8463 * using connection open, then create the list of normal targets (nt) 8464 * from firmware DDBs. Based on the list of nt setup session and connection 8465 * objects. 8466 **/ 8467 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 8468 { 8469 uint16_t tmo = 0; 8470 struct list_head list_st, list_nt; 8471 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; 8472 unsigned long wtime; 8473 8474 if (!test_bit(AF_LINK_UP, &ha->flags)) { 8475 set_bit(AF_BUILD_DDB_LIST, &ha->flags); 8476 ha->is_reset = is_reset; 8477 return; 8478 } 8479 8480 INIT_LIST_HEAD(&list_st); 8481 INIT_LIST_HEAD(&list_nt); 8482 8483 qla4xxx_build_st_list(ha, &list_st); 8484 8485 /* Before issuing conn open mbox, ensure all IPs states are configured 8486 * Note, conn open fails if IPs are not configured 8487 */ 8488 qla4xxx_wait_for_ip_configuration(ha); 8489 8490 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 8491 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 8492 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 8493 } 8494 8495 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 8496 tmo = ((ha->def_timeout > LOGIN_TOV) && 8497 (ha->def_timeout < LOGIN_TOV * 10) ? 8498 ha->def_timeout : LOGIN_TOV); 8499 8500 DEBUG2(ql4_printk(KERN_INFO, ha, 8501 "Default time to wait for build ddb %d\n", tmo)); 8502 8503 wtime = jiffies + (HZ * tmo); 8504 do { 8505 if (list_empty(&list_st)) 8506 break; 8507 8508 qla4xxx_remove_failed_ddb(ha, &list_st); 8509 schedule_timeout_uninterruptible(HZ / 10); 8510 } while (time_after(wtime, jiffies)); 8511 8512 8513 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); 8514 8515 qla4xxx_free_ddb_list(&list_st); 8516 qla4xxx_free_ddb_list(&list_nt); 8517 8518 qla4xxx_free_ddb_index(ha); 8519 } 8520 8521 /** 8522 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login 8523 * response. 8524 * @ha: pointer to adapter structure 8525 * 8526 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be 8527 * set in DDB and we will wait for login response of boot targets during 8528 * probe. 8529 **/ 8530 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) 8531 { 8532 struct ddb_entry *ddb_entry; 8533 struct dev_db_entry *fw_ddb_entry = NULL; 8534 dma_addr_t fw_ddb_entry_dma; 8535 unsigned long wtime; 8536 uint32_t ddb_state; 8537 int max_ddbs, idx, ret; 8538 8539 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : 8540 MAX_DEV_DB_ENTRIES; 8541 8542 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8543 &fw_ddb_entry_dma, GFP_KERNEL); 8544 if (!fw_ddb_entry) { 8545 ql4_printk(KERN_ERR, ha, 8546 "%s: Unable to allocate dma buffer\n", __func__); 8547 goto exit_login_resp; 8548 } 8549 8550 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); 8551 8552 for (idx = 0; idx < max_ddbs; idx++) { 8553 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8554 if (ddb_entry == NULL) 8555 continue; 8556 8557 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { 8558 DEBUG2(ql4_printk(KERN_INFO, ha, 8559 "%s: DDB index [%d]\n", __func__, 8560 ddb_entry->fw_ddb_index)); 8561 do { 8562 ret = qla4xxx_get_fwddb_entry(ha, 8563 ddb_entry->fw_ddb_index, 8564 fw_ddb_entry, fw_ddb_entry_dma, 8565 NULL, NULL, &ddb_state, NULL, 8566 NULL, NULL); 8567 if (ret == QLA_ERROR) 8568 goto exit_login_resp; 8569 8570 if ((ddb_state == DDB_DS_SESSION_ACTIVE) || 8571 (ddb_state == DDB_DS_SESSION_FAILED)) 8572 break; 8573 8574 schedule_timeout_uninterruptible(HZ); 8575 8576 } while ((time_after(wtime, jiffies))); 8577 8578 if (!time_after(wtime, jiffies)) { 8579 DEBUG2(ql4_printk(KERN_INFO, ha, 8580 "%s: Login response wait timer expired\n", 8581 __func__)); 8582 goto exit_login_resp; 8583 } 8584 } 8585 } 8586 8587 exit_login_resp: 8588 if (fw_ddb_entry) 8589 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8590 fw_ddb_entry, fw_ddb_entry_dma); 8591 } 8592 8593 /** 8594 * qla4xxx_probe_adapter - callback function to probe HBA 8595 * @pdev: pointer to pci_dev structure 8596 * @pci_device_id: pointer to pci_device entry 8597 * 8598 * This routine will probe for Qlogic 4xxx iSCSI host adapters. 8599 * It returns zero if successful. It also initializes all data necessary for 8600 * the driver. 8601 **/ 8602 static int qla4xxx_probe_adapter(struct pci_dev *pdev, 8603 const struct pci_device_id *ent) 8604 { 8605 int ret = -ENODEV, status; 8606 struct Scsi_Host *host; 8607 struct scsi_qla_host *ha; 8608 uint8_t init_retry_count = 0; 8609 char buf[34]; 8610 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; 8611 uint32_t dev_state; 8612 8613 if (pci_enable_device(pdev)) 8614 return -1; 8615 8616 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); 8617 if (host == NULL) { 8618 printk(KERN_WARNING 8619 "qla4xxx: Couldn't allocate host from scsi layer!\n"); 8620 goto probe_disable_device; 8621 } 8622 8623 /* Clear our data area */ 8624 ha = to_qla_host(host); 8625 memset(ha, 0, sizeof(*ha)); 8626 8627 /* Save the information from PCI BIOS. */ 8628 ha->pdev = pdev; 8629 ha->host = host; 8630 ha->host_no = host->host_no; 8631 ha->func_num = PCI_FUNC(ha->pdev->devfn); 8632 8633 pci_enable_pcie_error_reporting(pdev); 8634 8635 /* Setup Runtime configurable options */ 8636 if (is_qla8022(ha)) { 8637 ha->isp_ops = &qla4_82xx_isp_ops; 8638 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; 8639 ha->qdr_sn_window = -1; 8640 ha->ddr_mn_window = -1; 8641 ha->curr_window = 255; 8642 nx_legacy_intr = &legacy_intr[ha->func_num]; 8643 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 8644 ha->nx_legacy_intr.tgt_status_reg = 8645 nx_legacy_intr->tgt_status_reg; 8646 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 8647 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 8648 } else if (is_qla8032(ha) || is_qla8042(ha)) { 8649 ha->isp_ops = &qla4_83xx_isp_ops; 8650 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; 8651 } else { 8652 ha->isp_ops = &qla4xxx_isp_ops; 8653 } 8654 8655 if (is_qla80XX(ha)) { 8656 rwlock_init(&ha->hw_lock); 8657 ha->pf_bit = ha->func_num << 16; 8658 /* Set EEH reset type to fundamental if required by hba */ 8659 pdev->needs_freset = 1; 8660 } 8661 8662 /* Configure PCI I/O space. */ 8663 ret = ha->isp_ops->iospace_config(ha); 8664 if (ret) 8665 goto probe_failed_ioconfig; 8666 8667 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", 8668 pdev->device, pdev->irq, ha->reg); 8669 8670 qla4xxx_config_dma_addressing(ha); 8671 8672 /* Initialize lists and spinlocks. */ 8673 INIT_LIST_HEAD(&ha->free_srb_q); 8674 8675 mutex_init(&ha->mbox_sem); 8676 mutex_init(&ha->chap_sem); 8677 init_completion(&ha->mbx_intr_comp); 8678 init_completion(&ha->disable_acb_comp); 8679 init_completion(&ha->idc_comp); 8680 init_completion(&ha->link_up_comp); 8681 8682 spin_lock_init(&ha->hardware_lock); 8683 spin_lock_init(&ha->work_lock); 8684 8685 /* Initialize work list */ 8686 INIT_LIST_HEAD(&ha->work_list); 8687 8688 /* Allocate dma buffers */ 8689 if (qla4xxx_mem_alloc(ha)) { 8690 ql4_printk(KERN_WARNING, ha, 8691 "[ERROR] Failed to allocate memory for adapter\n"); 8692 8693 ret = -ENOMEM; 8694 goto probe_failed; 8695 } 8696 8697 host->cmd_per_lun = 3; 8698 host->max_channel = 0; 8699 host->max_lun = MAX_LUNS - 1; 8700 host->max_id = MAX_TARGETS; 8701 host->max_cmd_len = IOCB_MAX_CDB_LEN; 8702 host->can_queue = MAX_SRBS ; 8703 host->transportt = qla4xxx_scsi_transport; 8704 8705 pci_set_drvdata(pdev, ha); 8706 8707 ret = scsi_add_host(host, &pdev->dev); 8708 if (ret) 8709 goto probe_failed; 8710 8711 if (is_qla80XX(ha)) 8712 qla4_8xxx_get_flash_info(ha); 8713 8714 if (is_qla8032(ha) || is_qla8042(ha)) { 8715 qla4_83xx_read_reset_template(ha); 8716 /* 8717 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. 8718 * If DONRESET_BIT0 is set, drivers should not set dev_state 8719 * to NEED_RESET. But if NEED_RESET is set, drivers should 8720 * should honor the reset. 8721 */ 8722 if (ql4xdontresethba == 1) 8723 qla4_83xx_set_idc_dontreset(ha); 8724 } 8725 8726 /* 8727 * Initialize the Host adapter request/response queues and 8728 * firmware 8729 * NOTE: interrupts enabled upon successful completion 8730 */ 8731 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8732 8733 /* Dont retry adapter initialization if IRQ allocation failed */ 8734 if (is_qla80XX(ha) && (status == QLA_ERROR)) 8735 goto skip_retry_init; 8736 8737 while ((!test_bit(AF_ONLINE, &ha->flags)) && 8738 init_retry_count++ < MAX_INIT_RETRIES) { 8739 8740 if (is_qla80XX(ha)) { 8741 ha->isp_ops->idc_lock(ha); 8742 dev_state = qla4_8xxx_rd_direct(ha, 8743 QLA8XXX_CRB_DEV_STATE); 8744 ha->isp_ops->idc_unlock(ha); 8745 if (dev_state == QLA8XXX_DEV_FAILED) { 8746 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 8747 "initialize adapter. H/W is in failed state\n", 8748 __func__); 8749 break; 8750 } 8751 } 8752 DEBUG2(printk("scsi: %s: retrying adapter initialization " 8753 "(%d)\n", __func__, init_retry_count)); 8754 8755 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 8756 continue; 8757 8758 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8759 if (is_qla80XX(ha) && (status == QLA_ERROR)) { 8760 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) 8761 goto skip_retry_init; 8762 } 8763 } 8764 8765 skip_retry_init: 8766 if (!test_bit(AF_ONLINE, &ha->flags)) { 8767 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 8768 8769 if ((is_qla8022(ha) && ql4xdontresethba) || 8770 ((is_qla8032(ha) || is_qla8042(ha)) && 8771 qla4_83xx_idc_dontreset(ha))) { 8772 /* Put the device in failed state. */ 8773 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 8774 ha->isp_ops->idc_lock(ha); 8775 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 8776 QLA8XXX_DEV_FAILED); 8777 ha->isp_ops->idc_unlock(ha); 8778 } 8779 ret = -ENODEV; 8780 goto remove_host; 8781 } 8782 8783 /* Startup the kernel thread for this host adapter. */ 8784 DEBUG2(printk("scsi: %s: Starting kernel thread for " 8785 "qla4xxx_dpc\n", __func__)); 8786 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); 8787 ha->dpc_thread = create_singlethread_workqueue(buf); 8788 if (!ha->dpc_thread) { 8789 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); 8790 ret = -ENODEV; 8791 goto remove_host; 8792 } 8793 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 8794 8795 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, 8796 ha->host_no); 8797 if (!ha->task_wq) { 8798 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); 8799 ret = -ENODEV; 8800 goto remove_host; 8801 } 8802 8803 /* 8804 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc 8805 * (which is called indirectly by qla4xxx_initialize_adapter), 8806 * so that irqs will be registered after crbinit but before 8807 * mbx_intr_enable. 8808 */ 8809 if (is_qla40XX(ha)) { 8810 ret = qla4xxx_request_irqs(ha); 8811 if (ret) { 8812 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 8813 "interrupt %d already in use.\n", pdev->irq); 8814 goto remove_host; 8815 } 8816 } 8817 8818 pci_save_state(ha->pdev); 8819 ha->isp_ops->enable_intrs(ha); 8820 8821 /* Start timer thread. */ 8822 qla4xxx_start_timer(ha, 1); 8823 8824 set_bit(AF_INIT_DONE, &ha->flags); 8825 8826 qla4_8xxx_alloc_sysfs_attr(ha); 8827 8828 printk(KERN_INFO 8829 " QLogic iSCSI HBA Driver version: %s\n" 8830 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 8831 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 8832 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, 8833 ha->fw_info.fw_patch, ha->fw_info.fw_build); 8834 8835 /* Set the driver version */ 8836 if (is_qla80XX(ha)) 8837 qla4_8xxx_set_param(ha, SET_DRVR_VERSION); 8838 8839 if (qla4xxx_setup_boot_info(ha)) 8840 ql4_printk(KERN_ERR, ha, 8841 "%s: No iSCSI boot target configured\n", __func__); 8842 8843 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); 8844 /* Perform the build ddb list and login to each */ 8845 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 8846 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 8847 qla4xxx_wait_login_resp_boot_tgt(ha); 8848 8849 qla4xxx_create_chap_list(ha); 8850 8851 qla4xxx_create_ifaces(ha); 8852 return 0; 8853 8854 remove_host: 8855 scsi_remove_host(ha->host); 8856 8857 probe_failed: 8858 qla4xxx_free_adapter(ha); 8859 8860 probe_failed_ioconfig: 8861 pci_disable_pcie_error_reporting(pdev); 8862 scsi_host_put(ha->host); 8863 8864 probe_disable_device: 8865 pci_disable_device(pdev); 8866 8867 return ret; 8868 } 8869 8870 /** 8871 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize 8872 * @ha: pointer to adapter structure 8873 * 8874 * Mark the other ISP-4xxx port to indicate that the driver is being removed, 8875 * so that the other port will not re-initialize while in the process of 8876 * removing the ha due to driver unload or hba hotplug. 8877 **/ 8878 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) 8879 { 8880 struct scsi_qla_host *other_ha = NULL; 8881 struct pci_dev *other_pdev = NULL; 8882 int fn = ISP4XXX_PCI_FN_2; 8883 8884 /*iscsi function numbers for ISP4xxx is 1 and 3*/ 8885 if (PCI_FUNC(ha->pdev->devfn) & BIT_1) 8886 fn = ISP4XXX_PCI_FN_1; 8887 8888 other_pdev = 8889 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 8890 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 8891 fn)); 8892 8893 /* Get other_ha if other_pdev is valid and state is enable*/ 8894 if (other_pdev) { 8895 if (atomic_read(&other_pdev->enable_cnt)) { 8896 other_ha = pci_get_drvdata(other_pdev); 8897 if (other_ha) { 8898 set_bit(AF_HA_REMOVAL, &other_ha->flags); 8899 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " 8900 "Prevent %s reinit\n", __func__, 8901 dev_name(&other_ha->pdev->dev))); 8902 } 8903 } 8904 pci_dev_put(other_pdev); 8905 } 8906 } 8907 8908 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, 8909 struct ddb_entry *ddb_entry) 8910 { 8911 struct dev_db_entry *fw_ddb_entry = NULL; 8912 dma_addr_t fw_ddb_entry_dma; 8913 unsigned long wtime; 8914 uint32_t ddb_state; 8915 int options; 8916 int status; 8917 8918 options = LOGOUT_OPTION_CLOSE_SESSION; 8919 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { 8920 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); 8921 goto clear_ddb; 8922 } 8923 8924 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8925 &fw_ddb_entry_dma, GFP_KERNEL); 8926 if (!fw_ddb_entry) { 8927 ql4_printk(KERN_ERR, ha, 8928 "%s: Unable to allocate dma buffer\n", __func__); 8929 goto clear_ddb; 8930 } 8931 8932 wtime = jiffies + (HZ * LOGOUT_TOV); 8933 do { 8934 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 8935 fw_ddb_entry, fw_ddb_entry_dma, 8936 NULL, NULL, &ddb_state, NULL, 8937 NULL, NULL); 8938 if (status == QLA_ERROR) 8939 goto free_ddb; 8940 8941 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || 8942 (ddb_state == DDB_DS_SESSION_FAILED)) 8943 goto free_ddb; 8944 8945 schedule_timeout_uninterruptible(HZ); 8946 } while ((time_after(wtime, jiffies))); 8947 8948 free_ddb: 8949 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 8950 fw_ddb_entry, fw_ddb_entry_dma); 8951 clear_ddb: 8952 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 8953 } 8954 8955 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) 8956 { 8957 struct ddb_entry *ddb_entry; 8958 int idx; 8959 8960 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 8961 8962 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); 8963 if ((ddb_entry != NULL) && 8964 (ddb_entry->ddb_type == FLASH_DDB)) { 8965 8966 qla4xxx_destroy_ddb(ha, ddb_entry); 8967 /* 8968 * we have decremented the reference count of the driver 8969 * when we setup the session to have the driver unload 8970 * to be seamless without actually destroying the 8971 * session 8972 **/ 8973 try_module_get(qla4xxx_iscsi_transport.owner); 8974 iscsi_destroy_endpoint(ddb_entry->conn->ep); 8975 qla4xxx_free_ddb(ha, ddb_entry); 8976 iscsi_session_teardown(ddb_entry->sess); 8977 } 8978 } 8979 } 8980 /** 8981 * qla4xxx_remove_adapter - callback function to remove adapter. 8982 * @pci_dev: PCI device pointer 8983 **/ 8984 static void qla4xxx_remove_adapter(struct pci_dev *pdev) 8985 { 8986 struct scsi_qla_host *ha; 8987 8988 /* 8989 * If the PCI device is disabled then it means probe_adapter had 8990 * failed and resources already cleaned up on probe_adapter exit. 8991 */ 8992 if (!pci_is_enabled(pdev)) 8993 return; 8994 8995 ha = pci_get_drvdata(pdev); 8996 8997 if (is_qla40XX(ha)) 8998 qla4xxx_prevent_other_port_reinit(ha); 8999 9000 /* destroy iface from sysfs */ 9001 qla4xxx_destroy_ifaces(ha); 9002 9003 if ((!ql4xdisablesysfsboot) && ha->boot_kset) 9004 iscsi_boot_destroy_kset(ha->boot_kset); 9005 9006 qla4xxx_destroy_fw_ddb_session(ha); 9007 qla4_8xxx_free_sysfs_attr(ha); 9008 9009 qla4xxx_sysfs_ddb_remove(ha); 9010 scsi_remove_host(ha->host); 9011 9012 qla4xxx_free_adapter(ha); 9013 9014 scsi_host_put(ha->host); 9015 9016 pci_disable_pcie_error_reporting(pdev); 9017 pci_disable_device(pdev); 9018 } 9019 9020 /** 9021 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. 9022 * @ha: HA context 9023 * 9024 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 9025 * supported addressing method. 9026 */ 9027 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 9028 { 9029 int retval; 9030 9031 /* Update our PCI device dma_mask for full 64 bit mask */ 9032 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) { 9033 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 9034 dev_dbg(&ha->pdev->dev, 9035 "Failed to set 64 bit PCI consistent mask; " 9036 "using 32 bit.\n"); 9037 retval = pci_set_consistent_dma_mask(ha->pdev, 9038 DMA_BIT_MASK(32)); 9039 } 9040 } else 9041 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 9042 } 9043 9044 static int qla4xxx_slave_alloc(struct scsi_device *sdev) 9045 { 9046 struct iscsi_cls_session *cls_sess; 9047 struct iscsi_session *sess; 9048 struct ddb_entry *ddb; 9049 int queue_depth = QL4_DEF_QDEPTH; 9050 9051 cls_sess = starget_to_session(sdev->sdev_target); 9052 sess = cls_sess->dd_data; 9053 ddb = sess->dd_data; 9054 9055 sdev->hostdata = ddb; 9056 9057 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 9058 queue_depth = ql4xmaxqdepth; 9059 9060 scsi_change_queue_depth(sdev, queue_depth); 9061 return 0; 9062 } 9063 9064 /** 9065 * qla4xxx_del_from_active_array - returns an active srb 9066 * @ha: Pointer to host adapter structure. 9067 * @index: index into the active_array 9068 * 9069 * This routine removes and returns the srb at the specified index 9070 **/ 9071 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 9072 uint32_t index) 9073 { 9074 struct srb *srb = NULL; 9075 struct scsi_cmnd *cmd = NULL; 9076 9077 cmd = scsi_host_find_tag(ha->host, index); 9078 if (!cmd) 9079 return srb; 9080 9081 srb = (struct srb *)CMD_SP(cmd); 9082 if (!srb) 9083 return srb; 9084 9085 /* update counters */ 9086 if (srb->flags & SRB_DMA_VALID) { 9087 ha->iocb_cnt -= srb->iocb_cnt; 9088 if (srb->cmd) 9089 srb->cmd->host_scribble = 9090 (unsigned char *)(unsigned long) MAX_SRBS; 9091 } 9092 return srb; 9093 } 9094 9095 /** 9096 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 9097 * @ha: Pointer to host adapter structure. 9098 * @cmd: Scsi Command to wait on. 9099 * 9100 * This routine waits for the command to be returned by the Firmware 9101 * for some max time. 9102 **/ 9103 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, 9104 struct scsi_cmnd *cmd) 9105 { 9106 int done = 0; 9107 struct srb *rp; 9108 uint32_t max_wait_time = EH_WAIT_CMD_TOV; 9109 int ret = SUCCESS; 9110 9111 /* Dont wait on command if PCI error is being handled 9112 * by PCI AER driver 9113 */ 9114 if (unlikely(pci_channel_offline(ha->pdev)) || 9115 (test_bit(AF_EEH_BUSY, &ha->flags))) { 9116 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", 9117 ha->host_no, __func__); 9118 return ret; 9119 } 9120 9121 do { 9122 /* Checking to see if its returned to OS */ 9123 rp = (struct srb *) CMD_SP(cmd); 9124 if (rp == NULL) { 9125 done++; 9126 break; 9127 } 9128 9129 msleep(2000); 9130 } while (max_wait_time--); 9131 9132 return done; 9133 } 9134 9135 /** 9136 * qla4xxx_wait_for_hba_online - waits for HBA to come online 9137 * @ha: Pointer to host adapter structure 9138 **/ 9139 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) 9140 { 9141 unsigned long wait_online; 9142 9143 wait_online = jiffies + (HBA_ONLINE_TOV * HZ); 9144 while (time_before(jiffies, wait_online)) { 9145 9146 if (adapter_up(ha)) 9147 return QLA_SUCCESS; 9148 9149 msleep(2000); 9150 } 9151 9152 return QLA_ERROR; 9153 } 9154 9155 /** 9156 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. 9157 * @ha: pointer to HBA 9158 * @t: target id 9159 * @l: lun id 9160 * 9161 * This function waits for all outstanding commands to a lun to complete. It 9162 * returns 0 if all pending commands are returned and 1 otherwise. 9163 **/ 9164 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, 9165 struct scsi_target *stgt, 9166 struct scsi_device *sdev) 9167 { 9168 int cnt; 9169 int status = 0; 9170 struct scsi_cmnd *cmd; 9171 9172 /* 9173 * Waiting for all commands for the designated target or dev 9174 * in the active array 9175 */ 9176 for (cnt = 0; cnt < ha->host->can_queue; cnt++) { 9177 cmd = scsi_host_find_tag(ha->host, cnt); 9178 if (cmd && stgt == scsi_target(cmd->device) && 9179 (!sdev || sdev == cmd->device)) { 9180 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9181 status++; 9182 break; 9183 } 9184 } 9185 } 9186 return status; 9187 } 9188 9189 /** 9190 * qla4xxx_eh_abort - callback for abort task. 9191 * @cmd: Pointer to Linux's SCSI command structure 9192 * 9193 * This routine is called by the Linux OS to abort the specified 9194 * command. 9195 **/ 9196 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) 9197 { 9198 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9199 unsigned int id = cmd->device->id; 9200 uint64_t lun = cmd->device->lun; 9201 unsigned long flags; 9202 struct srb *srb = NULL; 9203 int ret = SUCCESS; 9204 int wait = 0; 9205 int rval; 9206 9207 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", 9208 ha->host_no, id, lun, cmd, cmd->cmnd[0]); 9209 9210 rval = qla4xxx_isp_check_reg(ha); 9211 if (rval != QLA_SUCCESS) { 9212 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9213 return FAILED; 9214 } 9215 9216 spin_lock_irqsave(&ha->hardware_lock, flags); 9217 srb = (struct srb *) CMD_SP(cmd); 9218 if (!srb) { 9219 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9220 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", 9221 ha->host_no, id, lun); 9222 return SUCCESS; 9223 } 9224 kref_get(&srb->srb_ref); 9225 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9226 9227 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 9228 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", 9229 ha->host_no, id, lun)); 9230 ret = FAILED; 9231 } else { 9232 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", 9233 ha->host_no, id, lun)); 9234 wait = 1; 9235 } 9236 9237 kref_put(&srb->srb_ref, qla4xxx_srb_compl); 9238 9239 /* Wait for command to complete */ 9240 if (wait) { 9241 if (!qla4xxx_eh_wait_on_command(ha, cmd)) { 9242 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", 9243 ha->host_no, id, lun)); 9244 ret = FAILED; 9245 } 9246 } 9247 9248 ql4_printk(KERN_INFO, ha, 9249 "scsi%ld:%d:%llu: Abort command - %s\n", 9250 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); 9251 9252 return ret; 9253 } 9254 9255 /** 9256 * qla4xxx_eh_device_reset - callback for target reset. 9257 * @cmd: Pointer to Linux's SCSI command structure 9258 * 9259 * This routine is called by the Linux OS to reset all luns on the 9260 * specified target. 9261 **/ 9262 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) 9263 { 9264 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9265 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9266 int ret = FAILED, stat; 9267 int rval; 9268 9269 if (!ddb_entry) 9270 return ret; 9271 9272 ret = iscsi_block_scsi_eh(cmd); 9273 if (ret) 9274 return ret; 9275 ret = FAILED; 9276 9277 ql4_printk(KERN_INFO, ha, 9278 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, 9279 cmd->device->channel, cmd->device->id, cmd->device->lun); 9280 9281 DEBUG2(printk(KERN_INFO 9282 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 9283 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 9284 cmd, jiffies, cmd->request->timeout / HZ, 9285 ha->dpc_flags, cmd->result, cmd->allowed)); 9286 9287 rval = qla4xxx_isp_check_reg(ha); 9288 if (rval != QLA_SUCCESS) { 9289 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9290 return FAILED; 9291 } 9292 9293 /* FIXME: wait for hba to go online */ 9294 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9295 if (stat != QLA_SUCCESS) { 9296 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); 9297 goto eh_dev_reset_done; 9298 } 9299 9300 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9301 cmd->device)) { 9302 ql4_printk(KERN_INFO, ha, 9303 "DEVICE RESET FAILED - waiting for " 9304 "commands.\n"); 9305 goto eh_dev_reset_done; 9306 } 9307 9308 /* Send marker. */ 9309 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9310 MM_LUN_RESET) != QLA_SUCCESS) 9311 goto eh_dev_reset_done; 9312 9313 ql4_printk(KERN_INFO, ha, 9314 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", 9315 ha->host_no, cmd->device->channel, cmd->device->id, 9316 cmd->device->lun); 9317 9318 ret = SUCCESS; 9319 9320 eh_dev_reset_done: 9321 9322 return ret; 9323 } 9324 9325 /** 9326 * qla4xxx_eh_target_reset - callback for target reset. 9327 * @cmd: Pointer to Linux's SCSI command structure 9328 * 9329 * This routine is called by the Linux OS to reset the target. 9330 **/ 9331 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) 9332 { 9333 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9334 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9335 int stat, ret; 9336 int rval; 9337 9338 if (!ddb_entry) 9339 return FAILED; 9340 9341 ret = iscsi_block_scsi_eh(cmd); 9342 if (ret) 9343 return ret; 9344 9345 starget_printk(KERN_INFO, scsi_target(cmd->device), 9346 "WARM TARGET RESET ISSUED.\n"); 9347 9348 DEBUG2(printk(KERN_INFO 9349 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 9350 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 9351 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, 9352 ha->dpc_flags, cmd->result, cmd->allowed)); 9353 9354 rval = qla4xxx_isp_check_reg(ha); 9355 if (rval != QLA_SUCCESS) { 9356 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9357 return FAILED; 9358 } 9359 9360 stat = qla4xxx_reset_target(ha, ddb_entry); 9361 if (stat != QLA_SUCCESS) { 9362 starget_printk(KERN_INFO, scsi_target(cmd->device), 9363 "WARM TARGET RESET FAILED.\n"); 9364 return FAILED; 9365 } 9366 9367 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), 9368 NULL)) { 9369 starget_printk(KERN_INFO, scsi_target(cmd->device), 9370 "WARM TARGET DEVICE RESET FAILED - " 9371 "waiting for commands.\n"); 9372 return FAILED; 9373 } 9374 9375 /* Send marker. */ 9376 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, 9377 MM_TGT_WARM_RESET) != QLA_SUCCESS) { 9378 starget_printk(KERN_INFO, scsi_target(cmd->device), 9379 "WARM TARGET DEVICE RESET FAILED - " 9380 "marker iocb failed.\n"); 9381 return FAILED; 9382 } 9383 9384 starget_printk(KERN_INFO, scsi_target(cmd->device), 9385 "WARM TARGET RESET SUCCEEDED.\n"); 9386 return SUCCESS; 9387 } 9388 9389 /** 9390 * qla4xxx_is_eh_active - check if error handler is running 9391 * @shost: Pointer to SCSI Host struct 9392 * 9393 * This routine finds that if reset host is called in EH 9394 * scenario or from some application like sg_reset 9395 **/ 9396 static int qla4xxx_is_eh_active(struct Scsi_Host *shost) 9397 { 9398 if (shost->shost_state == SHOST_RECOVERY) 9399 return 1; 9400 return 0; 9401 } 9402 9403 /** 9404 * qla4xxx_eh_host_reset - kernel callback 9405 * @cmd: Pointer to Linux's SCSI command structure 9406 * 9407 * This routine is invoked by the Linux kernel to perform fatal error 9408 * recovery on the specified adapter. 9409 **/ 9410 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) 9411 { 9412 int return_status = FAILED; 9413 struct scsi_qla_host *ha; 9414 int rval; 9415 9416 ha = to_qla_host(cmd->device->host); 9417 9418 rval = qla4xxx_isp_check_reg(ha); 9419 if (rval != QLA_SUCCESS) { 9420 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); 9421 return FAILED; 9422 } 9423 9424 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9425 qla4_83xx_set_idc_dontreset(ha); 9426 9427 /* 9428 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other 9429 * protocol drivers, we should not set device_state to NEED_RESET 9430 */ 9431 if (ql4xdontresethba || 9432 ((is_qla8032(ha) || is_qla8042(ha)) && 9433 qla4_83xx_idc_dontreset(ha))) { 9434 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 9435 ha->host_no, __func__)); 9436 9437 /* Clear outstanding srb in queues */ 9438 if (qla4xxx_is_eh_active(cmd->device->host)) 9439 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); 9440 9441 return FAILED; 9442 } 9443 9444 ql4_printk(KERN_INFO, ha, 9445 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, 9446 cmd->device->channel, cmd->device->id, cmd->device->lun); 9447 9448 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 9449 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " 9450 "DEAD.\n", ha->host_no, cmd->device->channel, 9451 __func__)); 9452 9453 return FAILED; 9454 } 9455 9456 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9457 if (is_qla80XX(ha)) 9458 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 9459 else 9460 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9461 } 9462 9463 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) 9464 return_status = SUCCESS; 9465 9466 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", 9467 return_status == FAILED ? "FAILED" : "SUCCEEDED"); 9468 9469 return return_status; 9470 } 9471 9472 static int qla4xxx_context_reset(struct scsi_qla_host *ha) 9473 { 9474 uint32_t mbox_cmd[MBOX_REG_COUNT]; 9475 uint32_t mbox_sts[MBOX_REG_COUNT]; 9476 struct addr_ctrl_blk_def *acb = NULL; 9477 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); 9478 int rval = QLA_SUCCESS; 9479 dma_addr_t acb_dma; 9480 9481 acb = dma_alloc_coherent(&ha->pdev->dev, 9482 sizeof(struct addr_ctrl_blk_def), 9483 &acb_dma, GFP_KERNEL); 9484 if (!acb) { 9485 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", 9486 __func__); 9487 rval = -ENOMEM; 9488 goto exit_port_reset; 9489 } 9490 9491 memset(acb, 0, acb_len); 9492 9493 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); 9494 if (rval != QLA_SUCCESS) { 9495 rval = -EIO; 9496 goto exit_free_acb; 9497 } 9498 9499 rval = qla4xxx_disable_acb(ha); 9500 if (rval != QLA_SUCCESS) { 9501 rval = -EIO; 9502 goto exit_free_acb; 9503 } 9504 9505 wait_for_completion_timeout(&ha->disable_acb_comp, 9506 DISABLE_ACB_TOV * HZ); 9507 9508 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); 9509 if (rval != QLA_SUCCESS) { 9510 rval = -EIO; 9511 goto exit_free_acb; 9512 } 9513 9514 exit_free_acb: 9515 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), 9516 acb, acb_dma); 9517 exit_port_reset: 9518 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, 9519 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); 9520 return rval; 9521 } 9522 9523 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) 9524 { 9525 struct scsi_qla_host *ha = to_qla_host(shost); 9526 int rval = QLA_SUCCESS; 9527 uint32_t idc_ctrl; 9528 9529 if (ql4xdontresethba) { 9530 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", 9531 __func__)); 9532 rval = -EPERM; 9533 goto exit_host_reset; 9534 } 9535 9536 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 9537 goto recover_adapter; 9538 9539 switch (reset_type) { 9540 case SCSI_ADAPTER_RESET: 9541 set_bit(DPC_RESET_HA, &ha->dpc_flags); 9542 break; 9543 case SCSI_FIRMWARE_RESET: 9544 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9545 if (is_qla80XX(ha)) 9546 /* set firmware context reset */ 9547 set_bit(DPC_RESET_HA_FW_CONTEXT, 9548 &ha->dpc_flags); 9549 else { 9550 rval = qla4xxx_context_reset(ha); 9551 goto exit_host_reset; 9552 } 9553 } 9554 break; 9555 } 9556 9557 recover_adapter: 9558 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if 9559 * reset is issued by application */ 9560 if ((is_qla8032(ha) || is_qla8042(ha)) && 9561 test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 9562 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 9563 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 9564 (idc_ctrl | GRACEFUL_RESET_BIT1)); 9565 } 9566 9567 rval = qla4xxx_recover_adapter(ha); 9568 if (rval != QLA_SUCCESS) { 9569 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", 9570 __func__)); 9571 rval = -EIO; 9572 } 9573 9574 exit_host_reset: 9575 return rval; 9576 } 9577 9578 /* PCI AER driver recovers from all correctable errors w/o 9579 * driver intervention. For uncorrectable errors PCI AER 9580 * driver calls the following device driver's callbacks 9581 * 9582 * - Fatal Errors - link_reset 9583 * - Non-Fatal Errors - driver's error_detected() which 9584 * returns CAN_RECOVER, NEED_RESET or DISCONNECT. 9585 * 9586 * PCI AER driver calls 9587 * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled() 9588 * returns RECOVERED or NEED_RESET if fw_hung 9589 * NEED_RESET - driver's slot_reset() 9590 * DISCONNECT - device is dead & cannot recover 9591 * RECOVERED - driver's resume() 9592 */ 9593 static pci_ers_result_t 9594 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9595 { 9596 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9597 9598 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", 9599 ha->host_no, __func__, state); 9600 9601 if (!is_aer_supported(ha)) 9602 return PCI_ERS_RESULT_NONE; 9603 9604 switch (state) { 9605 case pci_channel_io_normal: 9606 clear_bit(AF_EEH_BUSY, &ha->flags); 9607 return PCI_ERS_RESULT_CAN_RECOVER; 9608 case pci_channel_io_frozen: 9609 set_bit(AF_EEH_BUSY, &ha->flags); 9610 qla4xxx_mailbox_premature_completion(ha); 9611 qla4xxx_free_irqs(ha); 9612 pci_disable_device(pdev); 9613 /* Return back all IOs */ 9614 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 9615 return PCI_ERS_RESULT_NEED_RESET; 9616 case pci_channel_io_perm_failure: 9617 set_bit(AF_EEH_BUSY, &ha->flags); 9618 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); 9619 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 9620 return PCI_ERS_RESULT_DISCONNECT; 9621 } 9622 return PCI_ERS_RESULT_NEED_RESET; 9623 } 9624 9625 /** 9626 * qla4xxx_pci_mmio_enabled() gets called if 9627 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER 9628 * and read/write to the device still works. 9629 **/ 9630 static pci_ers_result_t 9631 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) 9632 { 9633 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9634 9635 if (!is_aer_supported(ha)) 9636 return PCI_ERS_RESULT_NONE; 9637 9638 return PCI_ERS_RESULT_RECOVERED; 9639 } 9640 9641 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 9642 { 9643 uint32_t rval = QLA_ERROR; 9644 int fn; 9645 struct pci_dev *other_pdev = NULL; 9646 9647 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); 9648 9649 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9650 9651 if (test_bit(AF_ONLINE, &ha->flags)) { 9652 clear_bit(AF_ONLINE, &ha->flags); 9653 clear_bit(AF_LINK_UP, &ha->flags); 9654 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 9655 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 9656 } 9657 9658 fn = PCI_FUNC(ha->pdev->devfn); 9659 if (is_qla8022(ha)) { 9660 while (fn > 0) { 9661 fn--; 9662 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", 9663 ha->host_no, __func__, fn); 9664 /* Get the pci device given the domain, bus, 9665 * slot/function number */ 9666 other_pdev = pci_get_domain_bus_and_slot( 9667 pci_domain_nr(ha->pdev->bus), 9668 ha->pdev->bus->number, 9669 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 9670 fn)); 9671 9672 if (!other_pdev) 9673 continue; 9674 9675 if (atomic_read(&other_pdev->enable_cnt)) { 9676 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", 9677 ha->host_no, __func__, fn); 9678 pci_dev_put(other_pdev); 9679 break; 9680 } 9681 pci_dev_put(other_pdev); 9682 } 9683 } else { 9684 /* this case is meant for ISP83xx/ISP84xx only */ 9685 if (qla4_83xx_can_perform_reset(ha)) { 9686 /* reset fn as iSCSI is going to perform the reset */ 9687 fn = 0; 9688 } 9689 } 9690 9691 /* The first function on the card, the reset owner will 9692 * start & initialize the firmware. The other functions 9693 * on the card will reset the firmware context 9694 */ 9695 if (!fn) { 9696 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " 9697 "0x%x is the owner\n", ha->host_no, __func__, 9698 ha->pdev->devfn); 9699 9700 ha->isp_ops->idc_lock(ha); 9701 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9702 QLA8XXX_DEV_COLD); 9703 ha->isp_ops->idc_unlock(ha); 9704 9705 rval = qla4_8xxx_update_idc_reg(ha); 9706 if (rval == QLA_ERROR) { 9707 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", 9708 ha->host_no, __func__); 9709 ha->isp_ops->idc_lock(ha); 9710 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9711 QLA8XXX_DEV_FAILED); 9712 ha->isp_ops->idc_unlock(ha); 9713 goto exit_error_recovery; 9714 } 9715 9716 clear_bit(AF_FW_RECOVERY, &ha->flags); 9717 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9718 9719 if (rval != QLA_SUCCESS) { 9720 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9721 "FAILED\n", ha->host_no, __func__); 9722 qla4xxx_free_irqs(ha); 9723 ha->isp_ops->idc_lock(ha); 9724 qla4_8xxx_clear_drv_active(ha); 9725 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9726 QLA8XXX_DEV_FAILED); 9727 ha->isp_ops->idc_unlock(ha); 9728 } else { 9729 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9730 "READY\n", ha->host_no, __func__); 9731 ha->isp_ops->idc_lock(ha); 9732 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9733 QLA8XXX_DEV_READY); 9734 /* Clear driver state register */ 9735 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); 9736 qla4_8xxx_set_drv_active(ha); 9737 ha->isp_ops->idc_unlock(ha); 9738 ha->isp_ops->enable_intrs(ha); 9739 } 9740 } else { 9741 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 9742 "the reset owner\n", ha->host_no, __func__, 9743 ha->pdev->devfn); 9744 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == 9745 QLA8XXX_DEV_READY)) { 9746 clear_bit(AF_FW_RECOVERY, &ha->flags); 9747 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9748 if (rval == QLA_SUCCESS) 9749 ha->isp_ops->enable_intrs(ha); 9750 else 9751 qla4xxx_free_irqs(ha); 9752 9753 ha->isp_ops->idc_lock(ha); 9754 qla4_8xxx_set_drv_active(ha); 9755 ha->isp_ops->idc_unlock(ha); 9756 } 9757 } 9758 exit_error_recovery: 9759 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 9760 return rval; 9761 } 9762 9763 static pci_ers_result_t 9764 qla4xxx_pci_slot_reset(struct pci_dev *pdev) 9765 { 9766 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 9767 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9768 int rc; 9769 9770 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", 9771 ha->host_no, __func__); 9772 9773 if (!is_aer_supported(ha)) 9774 return PCI_ERS_RESULT_NONE; 9775 9776 /* Restore the saved state of PCIe device - 9777 * BAR registers, PCI Config space, PCIX, MSI, 9778 * IOV states 9779 */ 9780 pci_restore_state(pdev); 9781 9782 /* pci_restore_state() clears the saved_state flag of the device 9783 * save restored state which resets saved_state flag 9784 */ 9785 pci_save_state(pdev); 9786 9787 /* Initialize device or resume if in suspended state */ 9788 rc = pci_enable_device(pdev); 9789 if (rc) { 9790 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " 9791 "device after reset\n", ha->host_no, __func__); 9792 goto exit_slot_reset; 9793 } 9794 9795 ha->isp_ops->disable_intrs(ha); 9796 9797 if (is_qla80XX(ha)) { 9798 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 9799 ret = PCI_ERS_RESULT_RECOVERED; 9800 goto exit_slot_reset; 9801 } else 9802 goto exit_slot_reset; 9803 } 9804 9805 exit_slot_reset: 9806 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" 9807 "device after reset\n", ha->host_no, __func__, ret); 9808 return ret; 9809 } 9810 9811 static void 9812 qla4xxx_pci_resume(struct pci_dev *pdev) 9813 { 9814 struct scsi_qla_host *ha = pci_get_drvdata(pdev); 9815 int ret; 9816 9817 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", 9818 ha->host_no, __func__); 9819 9820 ret = qla4xxx_wait_for_hba_online(ha); 9821 if (ret != QLA_SUCCESS) { 9822 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " 9823 "resume I/O from slot/link_reset\n", ha->host_no, 9824 __func__); 9825 } 9826 9827 pci_cleanup_aer_uncorrect_error_status(pdev); 9828 clear_bit(AF_EEH_BUSY, &ha->flags); 9829 } 9830 9831 static const struct pci_error_handlers qla4xxx_err_handler = { 9832 .error_detected = qla4xxx_pci_error_detected, 9833 .mmio_enabled = qla4xxx_pci_mmio_enabled, 9834 .slot_reset = qla4xxx_pci_slot_reset, 9835 .resume = qla4xxx_pci_resume, 9836 }; 9837 9838 static struct pci_device_id qla4xxx_pci_tbl[] = { 9839 { 9840 .vendor = PCI_VENDOR_ID_QLOGIC, 9841 .device = PCI_DEVICE_ID_QLOGIC_ISP4010, 9842 .subvendor = PCI_ANY_ID, 9843 .subdevice = PCI_ANY_ID, 9844 }, 9845 { 9846 .vendor = PCI_VENDOR_ID_QLOGIC, 9847 .device = PCI_DEVICE_ID_QLOGIC_ISP4022, 9848 .subvendor = PCI_ANY_ID, 9849 .subdevice = PCI_ANY_ID, 9850 }, 9851 { 9852 .vendor = PCI_VENDOR_ID_QLOGIC, 9853 .device = PCI_DEVICE_ID_QLOGIC_ISP4032, 9854 .subvendor = PCI_ANY_ID, 9855 .subdevice = PCI_ANY_ID, 9856 }, 9857 { 9858 .vendor = PCI_VENDOR_ID_QLOGIC, 9859 .device = PCI_DEVICE_ID_QLOGIC_ISP8022, 9860 .subvendor = PCI_ANY_ID, 9861 .subdevice = PCI_ANY_ID, 9862 }, 9863 { 9864 .vendor = PCI_VENDOR_ID_QLOGIC, 9865 .device = PCI_DEVICE_ID_QLOGIC_ISP8324, 9866 .subvendor = PCI_ANY_ID, 9867 .subdevice = PCI_ANY_ID, 9868 }, 9869 { 9870 .vendor = PCI_VENDOR_ID_QLOGIC, 9871 .device = PCI_DEVICE_ID_QLOGIC_ISP8042, 9872 .subvendor = PCI_ANY_ID, 9873 .subdevice = PCI_ANY_ID, 9874 }, 9875 {0, 0}, 9876 }; 9877 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 9878 9879 static struct pci_driver qla4xxx_pci_driver = { 9880 .name = DRIVER_NAME, 9881 .id_table = qla4xxx_pci_tbl, 9882 .probe = qla4xxx_probe_adapter, 9883 .remove = qla4xxx_remove_adapter, 9884 .err_handler = &qla4xxx_err_handler, 9885 }; 9886 9887 static int __init qla4xxx_module_init(void) 9888 { 9889 int ret; 9890 9891 if (ql4xqfulltracking) 9892 qla4xxx_driver_template.track_queue_depth = 1; 9893 9894 /* Allocate cache for SRBs. */ 9895 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9896 SLAB_HWCACHE_ALIGN, NULL); 9897 if (srb_cachep == NULL) { 9898 printk(KERN_ERR 9899 "%s: Unable to allocate SRB cache..." 9900 "Failing load!\n", DRIVER_NAME); 9901 ret = -ENOMEM; 9902 goto no_srp_cache; 9903 } 9904 9905 /* Derive version string. */ 9906 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); 9907 if (ql4xextended_error_logging) 9908 strcat(qla4xxx_version_str, "-debug"); 9909 9910 qla4xxx_scsi_transport = 9911 iscsi_register_transport(&qla4xxx_iscsi_transport); 9912 if (!qla4xxx_scsi_transport){ 9913 ret = -ENODEV; 9914 goto release_srb_cache; 9915 } 9916 9917 ret = pci_register_driver(&qla4xxx_pci_driver); 9918 if (ret) 9919 goto unregister_transport; 9920 9921 printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); 9922 return 0; 9923 9924 unregister_transport: 9925 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9926 release_srb_cache: 9927 kmem_cache_destroy(srb_cachep); 9928 no_srp_cache: 9929 return ret; 9930 } 9931 9932 static void __exit qla4xxx_module_exit(void) 9933 { 9934 pci_unregister_driver(&qla4xxx_pci_driver); 9935 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 9936 kmem_cache_destroy(srb_cachep); 9937 } 9938 9939 module_init(qla4xxx_module_init); 9940 module_exit(qla4xxx_module_exit); 9941 9942 MODULE_AUTHOR("QLogic Corporation"); 9943 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); 9944 MODULE_LICENSE("GPL"); 9945 MODULE_VERSION(QLA4XXX_DRIVER_VERSION); 9946