xref: /linux/drivers/scsi/qla4xxx/ql4_os.c (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12 
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15 
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
22 
23 /*
24  * Driver version
25  */
26 static char qla4xxx_version_str[40];
27 
28 /*
29  * SRB allocation cache
30  */
31 static struct kmem_cache *srb_cachep;
32 
33 /*
34  * Module parameter information and variables
35  */
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 		 " Set to disable exporting boot targets to sysfs.\n"
40 		 "\t\t  0 - Export boot targets\n"
41 		 "\t\t  1 - Do not export boot targets (Default)");
42 
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 		 " Don't reset the HBA for driver recovery.\n"
47 		 "\t\t  0 - It will reset HBA (Default)\n"
48 		 "\t\t  1 - It will NOT reset HBA");
49 
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 		 " Option to enable extended error logging.\n"
54 		 "\t\t  0 - no logging (Default)\n"
55 		 "\t\t  2 - debug logging");
56 
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 		 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 		 "\t\t  0 = enable INTx interrupt mechanism.\n"
62 		 "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
63 		 "\t\t  2 = enable MSI interrupt mechanism.");
64 
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 		 " Maximum queue depth to report for target devices.\n"
70 		 "\t\t  Default: 32.");
71 
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 		 " Enable or disable dynamic tracking and adjustment of\n"
76 		 "\t\t scsi device queue depth.\n"
77 		 "\t\t  0 - Disable.\n"
78 		 "\t\t  1 - Enable. (Default)");
79 
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 		" Target Session Recovery Timeout.\n"
84 		"\t\t  Default: 120 sec.");
85 
86 int ql4xmdcapmask = 0;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 		 " Set the Minidump driver capture mask level.\n"
90 		 "\t\t  Default is 0 (firmware default capture mask)\n"
91 		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
92 
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 		 " Set to enable minidump.\n"
97 		 "\t\t  0 - disable minidump\n"
98 		 "\t\t  1 - enable minidump (Default)");
99 
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
101 /*
102  * SCSI host template entry points
103  */
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
105 
106 /*
107  * iSCSI template entry points
108  */
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 				     enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 				  enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 				  enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 				   uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 				   enum iscsi_param_type param_type,
119 				   int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 						 struct sockaddr *dst_addr,
123 						 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 				enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 			     struct iscsi_cls_conn *cls_conn,
133 			     uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 			uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 				   struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 			     uint32_t iface_type, uint32_t payload_size,
148 			     uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 				 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void  *data,
153 				  int len);
154 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len);
155 
156 /*
157  * SCSI host template entry points
158  */
159 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
161 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
162 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
163 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
164 static int qla4xxx_slave_alloc(struct scsi_device *device);
165 static umode_t qla4_attr_is_visible(int param_type, int param);
166 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
167 
168 /*
169  * iSCSI Flash DDB sysfs entry points
170  */
171 static int
172 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
173 			    struct iscsi_bus_flash_conn *fnode_conn,
174 			    void *data, int len);
175 static int
176 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
177 			    int param, char *buf);
178 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
179 				 int len);
180 static int
181 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
182 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
183 				   struct iscsi_bus_flash_conn *fnode_conn);
184 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
185 				    struct iscsi_bus_flash_conn *fnode_conn);
186 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
187 
188 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
189     QLA82XX_LEGACY_INTR_CONFIG;
190 
191 static struct scsi_host_template qla4xxx_driver_template = {
192 	.module			= THIS_MODULE,
193 	.name			= DRIVER_NAME,
194 	.proc_name		= DRIVER_NAME,
195 	.queuecommand		= qla4xxx_queuecommand,
196 
197 	.eh_abort_handler	= qla4xxx_eh_abort,
198 	.eh_device_reset_handler = qla4xxx_eh_device_reset,
199 	.eh_target_reset_handler = qla4xxx_eh_target_reset,
200 	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
201 	.eh_timed_out		= qla4xxx_eh_cmd_timed_out,
202 
203 	.slave_alloc		= qla4xxx_slave_alloc,
204 	.change_queue_depth	= scsi_change_queue_depth,
205 
206 	.this_id		= -1,
207 	.cmd_per_lun		= 3,
208 	.use_clustering		= ENABLE_CLUSTERING,
209 	.sg_tablesize		= SG_ALL,
210 
211 	.max_sectors		= 0xFFFF,
212 	.shost_attrs		= qla4xxx_host_attrs,
213 	.host_reset		= qla4xxx_host_reset,
214 	.vendor_id		= SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
215 	.use_blk_tags		= 1,
216 };
217 
218 static struct iscsi_transport qla4xxx_iscsi_transport = {
219 	.owner			= THIS_MODULE,
220 	.name			= DRIVER_NAME,
221 	.caps			= CAP_TEXT_NEGO |
222 				  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
223 				  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
224 				  CAP_MULTI_R2T,
225 	.attr_is_visible	= qla4_attr_is_visible,
226 	.create_session         = qla4xxx_session_create,
227 	.destroy_session        = qla4xxx_session_destroy,
228 	.start_conn             = qla4xxx_conn_start,
229 	.create_conn            = qla4xxx_conn_create,
230 	.bind_conn              = qla4xxx_conn_bind,
231 	.stop_conn              = iscsi_conn_stop,
232 	.destroy_conn           = qla4xxx_conn_destroy,
233 	.set_param              = iscsi_set_param,
234 	.get_conn_param		= qla4xxx_conn_get_param,
235 	.get_session_param	= qla4xxx_session_get_param,
236 	.get_ep_param           = qla4xxx_get_ep_param,
237 	.ep_connect		= qla4xxx_ep_connect,
238 	.ep_poll		= qla4xxx_ep_poll,
239 	.ep_disconnect		= qla4xxx_ep_disconnect,
240 	.get_stats		= qla4xxx_conn_get_stats,
241 	.send_pdu		= iscsi_conn_send_pdu,
242 	.xmit_task		= qla4xxx_task_xmit,
243 	.cleanup_task		= qla4xxx_task_cleanup,
244 	.alloc_pdu		= qla4xxx_alloc_pdu,
245 
246 	.get_host_param		= qla4xxx_host_get_param,
247 	.set_iface_param	= qla4xxx_iface_set_param,
248 	.get_iface_param	= qla4xxx_get_iface_param,
249 	.bsg_request		= qla4xxx_bsg_request,
250 	.send_ping		= qla4xxx_send_ping,
251 	.get_chap		= qla4xxx_get_chap_list,
252 	.delete_chap		= qla4xxx_delete_chap,
253 	.set_chap		= qla4xxx_set_chap_entry,
254 	.get_flashnode_param	= qla4xxx_sysfs_ddb_get_param,
255 	.set_flashnode_param	= qla4xxx_sysfs_ddb_set_param,
256 	.new_flashnode		= qla4xxx_sysfs_ddb_add,
257 	.del_flashnode		= qla4xxx_sysfs_ddb_delete,
258 	.login_flashnode	= qla4xxx_sysfs_ddb_login,
259 	.logout_flashnode	= qla4xxx_sysfs_ddb_logout,
260 	.logout_flashnode_sid	= qla4xxx_sysfs_ddb_logout_sid,
261 	.get_host_stats		= qla4xxx_get_host_stats,
262 };
263 
264 static struct scsi_transport_template *qla4xxx_scsi_transport;
265 
266 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
267 			     uint32_t iface_type, uint32_t payload_size,
268 			     uint32_t pid, struct sockaddr *dst_addr)
269 {
270 	struct scsi_qla_host *ha = to_qla_host(shost);
271 	struct sockaddr_in *addr;
272 	struct sockaddr_in6 *addr6;
273 	uint32_t options = 0;
274 	uint8_t ipaddr[IPv6_ADDR_LEN];
275 	int rval;
276 
277 	memset(ipaddr, 0, IPv6_ADDR_LEN);
278 	/* IPv4 to IPv4 */
279 	if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
280 	    (dst_addr->sa_family == AF_INET)) {
281 		addr = (struct sockaddr_in *)dst_addr;
282 		memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
283 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
284 				  "dest: %pI4\n", __func__,
285 				  &ha->ip_config.ip_address, ipaddr));
286 		rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
287 					 ipaddr);
288 		if (rval)
289 			rval = -EINVAL;
290 	} else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
291 		   (dst_addr->sa_family == AF_INET6)) {
292 		/* IPv6 to IPv6 */
293 		addr6 = (struct sockaddr_in6 *)dst_addr;
294 		memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
295 
296 		options |= PING_IPV6_PROTOCOL_ENABLE;
297 
298 		/* Ping using LinkLocal address */
299 		if ((iface_num == 0) || (iface_num == 1)) {
300 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
301 					  "src: %pI6 dest: %pI6\n", __func__,
302 					  &ha->ip_config.ipv6_link_local_addr,
303 					  ipaddr));
304 			options |= PING_IPV6_LINKLOCAL_ADDR;
305 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
306 						 pid, ipaddr);
307 		} else {
308 			ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
309 				   "not supported\n", __func__, iface_num);
310 			rval = -ENOSYS;
311 			goto exit_send_ping;
312 		}
313 
314 		/*
315 		 * If ping using LinkLocal address fails, try ping using
316 		 * IPv6 address
317 		 */
318 		if (rval != QLA_SUCCESS) {
319 			options &= ~PING_IPV6_LINKLOCAL_ADDR;
320 			if (iface_num == 0) {
321 				options |= PING_IPV6_ADDR0;
322 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
323 						  "Ping src: %pI6 "
324 						  "dest: %pI6\n", __func__,
325 						  &ha->ip_config.ipv6_addr0,
326 						  ipaddr));
327 			} else if (iface_num == 1) {
328 				options |= PING_IPV6_ADDR1;
329 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
330 						  "Ping src: %pI6 "
331 						  "dest: %pI6\n", __func__,
332 						  &ha->ip_config.ipv6_addr1,
333 						  ipaddr));
334 			}
335 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
336 						 pid, ipaddr);
337 			if (rval)
338 				rval = -EINVAL;
339 		}
340 	} else
341 		rval = -ENOSYS;
342 exit_send_ping:
343 	return rval;
344 }
345 
346 static umode_t qla4_attr_is_visible(int param_type, int param)
347 {
348 	switch (param_type) {
349 	case ISCSI_HOST_PARAM:
350 		switch (param) {
351 		case ISCSI_HOST_PARAM_HWADDRESS:
352 		case ISCSI_HOST_PARAM_IPADDRESS:
353 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
354 		case ISCSI_HOST_PARAM_PORT_STATE:
355 		case ISCSI_HOST_PARAM_PORT_SPEED:
356 			return S_IRUGO;
357 		default:
358 			return 0;
359 		}
360 	case ISCSI_PARAM:
361 		switch (param) {
362 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
363 		case ISCSI_PARAM_PERSISTENT_PORT:
364 		case ISCSI_PARAM_CONN_ADDRESS:
365 		case ISCSI_PARAM_CONN_PORT:
366 		case ISCSI_PARAM_TARGET_NAME:
367 		case ISCSI_PARAM_TPGT:
368 		case ISCSI_PARAM_TARGET_ALIAS:
369 		case ISCSI_PARAM_MAX_BURST:
370 		case ISCSI_PARAM_MAX_R2T:
371 		case ISCSI_PARAM_FIRST_BURST:
372 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
373 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
374 		case ISCSI_PARAM_IFACE_NAME:
375 		case ISCSI_PARAM_CHAP_OUT_IDX:
376 		case ISCSI_PARAM_CHAP_IN_IDX:
377 		case ISCSI_PARAM_USERNAME:
378 		case ISCSI_PARAM_PASSWORD:
379 		case ISCSI_PARAM_USERNAME_IN:
380 		case ISCSI_PARAM_PASSWORD_IN:
381 		case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
382 		case ISCSI_PARAM_DISCOVERY_SESS:
383 		case ISCSI_PARAM_PORTAL_TYPE:
384 		case ISCSI_PARAM_CHAP_AUTH_EN:
385 		case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
386 		case ISCSI_PARAM_BIDI_CHAP_EN:
387 		case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
388 		case ISCSI_PARAM_DEF_TIME2WAIT:
389 		case ISCSI_PARAM_DEF_TIME2RETAIN:
390 		case ISCSI_PARAM_HDRDGST_EN:
391 		case ISCSI_PARAM_DATADGST_EN:
392 		case ISCSI_PARAM_INITIAL_R2T_EN:
393 		case ISCSI_PARAM_IMM_DATA_EN:
394 		case ISCSI_PARAM_PDU_INORDER_EN:
395 		case ISCSI_PARAM_DATASEQ_INORDER_EN:
396 		case ISCSI_PARAM_MAX_SEGMENT_SIZE:
397 		case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
398 		case ISCSI_PARAM_TCP_WSF_DISABLE:
399 		case ISCSI_PARAM_TCP_NAGLE_DISABLE:
400 		case ISCSI_PARAM_TCP_TIMER_SCALE:
401 		case ISCSI_PARAM_TCP_TIMESTAMP_EN:
402 		case ISCSI_PARAM_TCP_XMIT_WSF:
403 		case ISCSI_PARAM_TCP_RECV_WSF:
404 		case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
405 		case ISCSI_PARAM_IPV4_TOS:
406 		case ISCSI_PARAM_IPV6_TC:
407 		case ISCSI_PARAM_IPV6_FLOW_LABEL:
408 		case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
409 		case ISCSI_PARAM_KEEPALIVE_TMO:
410 		case ISCSI_PARAM_LOCAL_PORT:
411 		case ISCSI_PARAM_ISID:
412 		case ISCSI_PARAM_TSID:
413 		case ISCSI_PARAM_DEF_TASKMGMT_TMO:
414 		case ISCSI_PARAM_ERL:
415 		case ISCSI_PARAM_STATSN:
416 		case ISCSI_PARAM_EXP_STATSN:
417 		case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
418 		case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
419 		case ISCSI_PARAM_LOCAL_IPADDR:
420 			return S_IRUGO;
421 		default:
422 			return 0;
423 		}
424 	case ISCSI_NET_PARAM:
425 		switch (param) {
426 		case ISCSI_NET_PARAM_IPV4_ADDR:
427 		case ISCSI_NET_PARAM_IPV4_SUBNET:
428 		case ISCSI_NET_PARAM_IPV4_GW:
429 		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
430 		case ISCSI_NET_PARAM_IFACE_ENABLE:
431 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
432 		case ISCSI_NET_PARAM_IPV6_ADDR:
433 		case ISCSI_NET_PARAM_IPV6_ROUTER:
434 		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
435 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
436 		case ISCSI_NET_PARAM_VLAN_ID:
437 		case ISCSI_NET_PARAM_VLAN_PRIORITY:
438 		case ISCSI_NET_PARAM_VLAN_ENABLED:
439 		case ISCSI_NET_PARAM_MTU:
440 		case ISCSI_NET_PARAM_PORT:
441 		case ISCSI_NET_PARAM_IPADDR_STATE:
442 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
443 		case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
444 		case ISCSI_NET_PARAM_DELAYED_ACK_EN:
445 		case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
446 		case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
447 		case ISCSI_NET_PARAM_TCP_WSF:
448 		case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
449 		case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
450 		case ISCSI_NET_PARAM_CACHE_ID:
451 		case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
452 		case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
453 		case ISCSI_NET_PARAM_IPV4_TOS_EN:
454 		case ISCSI_NET_PARAM_IPV4_TOS:
455 		case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
456 		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
457 		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
458 		case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
459 		case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
460 		case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
461 		case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
462 		case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
463 		case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
464 		case ISCSI_NET_PARAM_REDIRECT_EN:
465 		case ISCSI_NET_PARAM_IPV4_TTL:
466 		case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
467 		case ISCSI_NET_PARAM_IPV6_MLD_EN:
468 		case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
469 		case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
470 		case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
471 		case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
472 		case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
473 		case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
474 		case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
475 		case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
476 			return S_IRUGO;
477 		default:
478 			return 0;
479 		}
480 	case ISCSI_IFACE_PARAM:
481 		switch (param) {
482 		case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
483 		case ISCSI_IFACE_PARAM_HDRDGST_EN:
484 		case ISCSI_IFACE_PARAM_DATADGST_EN:
485 		case ISCSI_IFACE_PARAM_IMM_DATA_EN:
486 		case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
487 		case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
488 		case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
489 		case ISCSI_IFACE_PARAM_ERL:
490 		case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
491 		case ISCSI_IFACE_PARAM_FIRST_BURST:
492 		case ISCSI_IFACE_PARAM_MAX_R2T:
493 		case ISCSI_IFACE_PARAM_MAX_BURST:
494 		case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
495 		case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
496 		case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
497 		case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
498 		case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
499 		case ISCSI_IFACE_PARAM_INITIATOR_NAME:
500 			return S_IRUGO;
501 		default:
502 			return 0;
503 		}
504 	case ISCSI_FLASHNODE_PARAM:
505 		switch (param) {
506 		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
507 		case ISCSI_FLASHNODE_PORTAL_TYPE:
508 		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
509 		case ISCSI_FLASHNODE_DISCOVERY_SESS:
510 		case ISCSI_FLASHNODE_ENTRY_EN:
511 		case ISCSI_FLASHNODE_HDR_DGST_EN:
512 		case ISCSI_FLASHNODE_DATA_DGST_EN:
513 		case ISCSI_FLASHNODE_IMM_DATA_EN:
514 		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
515 		case ISCSI_FLASHNODE_DATASEQ_INORDER:
516 		case ISCSI_FLASHNODE_PDU_INORDER:
517 		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
518 		case ISCSI_FLASHNODE_SNACK_REQ_EN:
519 		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
520 		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
521 		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
522 		case ISCSI_FLASHNODE_ERL:
523 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
524 		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
525 		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
526 		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
527 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
528 		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
529 		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
530 		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
531 		case ISCSI_FLASHNODE_FIRST_BURST:
532 		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
533 		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
534 		case ISCSI_FLASHNODE_MAX_R2T:
535 		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
536 		case ISCSI_FLASHNODE_ISID:
537 		case ISCSI_FLASHNODE_TSID:
538 		case ISCSI_FLASHNODE_PORT:
539 		case ISCSI_FLASHNODE_MAX_BURST:
540 		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
541 		case ISCSI_FLASHNODE_IPADDR:
542 		case ISCSI_FLASHNODE_ALIAS:
543 		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
544 		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
545 		case ISCSI_FLASHNODE_LOCAL_PORT:
546 		case ISCSI_FLASHNODE_IPV4_TOS:
547 		case ISCSI_FLASHNODE_IPV6_TC:
548 		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
549 		case ISCSI_FLASHNODE_NAME:
550 		case ISCSI_FLASHNODE_TPGT:
551 		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
552 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
553 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
554 		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
555 		case ISCSI_FLASHNODE_TCP_RECV_WSF:
556 		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
557 		case ISCSI_FLASHNODE_USERNAME:
558 		case ISCSI_FLASHNODE_PASSWORD:
559 		case ISCSI_FLASHNODE_STATSN:
560 		case ISCSI_FLASHNODE_EXP_STATSN:
561 		case ISCSI_FLASHNODE_IS_BOOT_TGT:
562 			return S_IRUGO;
563 		default:
564 			return 0;
565 		}
566 	}
567 
568 	return 0;
569 }
570 
571 /**
572  * qla4xxx_create chap_list - Create CHAP list from FLASH
573  * @ha: pointer to adapter structure
574  *
575  * Read flash and make a list of CHAP entries, during login when a CHAP entry
576  * is received, it will be checked in this list. If entry exist then the CHAP
577  * entry index is set in the DDB. If CHAP entry does not exist in this list
578  * then a new entry is added in FLASH in CHAP table and the index obtained is
579  * used in the DDB.
580  **/
581 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
582 {
583 	int rval = 0;
584 	uint8_t *chap_flash_data = NULL;
585 	uint32_t offset;
586 	dma_addr_t chap_dma;
587 	uint32_t chap_size = 0;
588 
589 	if (is_qla40XX(ha))
590 		chap_size = MAX_CHAP_ENTRIES_40XX *
591 			    sizeof(struct ql4_chap_table);
592 	else	/* Single region contains CHAP info for both
593 		 * ports which is divided into half for each port.
594 		 */
595 		chap_size = ha->hw.flt_chap_size / 2;
596 
597 	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
598 					     &chap_dma, GFP_KERNEL);
599 	if (!chap_flash_data) {
600 		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
601 		return;
602 	}
603 
604 	if (is_qla40XX(ha)) {
605 		offset = FLASH_CHAP_OFFSET;
606 	} else {
607 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
608 		if (ha->port_num == 1)
609 			offset += chap_size;
610 	}
611 
612 	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
613 	if (rval != QLA_SUCCESS)
614 		goto exit_chap_list;
615 
616 	if (ha->chap_list == NULL)
617 		ha->chap_list = vmalloc(chap_size);
618 	if (ha->chap_list == NULL) {
619 		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
620 		goto exit_chap_list;
621 	}
622 
623 	memset(ha->chap_list, 0, chap_size);
624 	memcpy(ha->chap_list, chap_flash_data, chap_size);
625 
626 exit_chap_list:
627 	dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma);
628 }
629 
630 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
631 				     int16_t chap_index,
632 				     struct ql4_chap_table **chap_entry)
633 {
634 	int rval = QLA_ERROR;
635 	int max_chap_entries;
636 
637 	if (!ha->chap_list) {
638 		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
639 		rval = QLA_ERROR;
640 		goto exit_get_chap;
641 	}
642 
643 	if (is_qla80XX(ha))
644 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
645 				   sizeof(struct ql4_chap_table);
646 	else
647 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
648 
649 	if (chap_index > max_chap_entries) {
650 		ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
651 		rval = QLA_ERROR;
652 		goto exit_get_chap;
653 	}
654 
655 	*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
656 	if ((*chap_entry)->cookie !=
657 	     __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
658 		rval = QLA_ERROR;
659 		*chap_entry = NULL;
660 	} else {
661 		rval = QLA_SUCCESS;
662 	}
663 
664 exit_get_chap:
665 	return rval;
666 }
667 
668 /**
669  * qla4xxx_find_free_chap_index - Find the first free chap index
670  * @ha: pointer to adapter structure
671  * @chap_index: CHAP index to be returned
672  *
673  * Find the first free chap index available in the chap table
674  *
675  * Note: Caller should acquire the chap lock before getting here.
676  **/
677 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
678 					uint16_t *chap_index)
679 {
680 	int i, rval;
681 	int free_index = -1;
682 	int max_chap_entries = 0;
683 	struct ql4_chap_table *chap_table;
684 
685 	if (is_qla80XX(ha))
686 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
687 						sizeof(struct ql4_chap_table);
688 	else
689 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
690 
691 	if (!ha->chap_list) {
692 		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
693 		rval = QLA_ERROR;
694 		goto exit_find_chap;
695 	}
696 
697 	for (i = 0; i < max_chap_entries; i++) {
698 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
699 
700 		if ((chap_table->cookie !=
701 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
702 		   (i > MAX_RESRV_CHAP_IDX)) {
703 				free_index = i;
704 				break;
705 		}
706 	}
707 
708 	if (free_index != -1) {
709 		*chap_index = free_index;
710 		rval = QLA_SUCCESS;
711 	} else {
712 		rval = QLA_ERROR;
713 	}
714 
715 exit_find_chap:
716 	return rval;
717 }
718 
719 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
720 				  uint32_t *num_entries, char *buf)
721 {
722 	struct scsi_qla_host *ha = to_qla_host(shost);
723 	struct ql4_chap_table *chap_table;
724 	struct iscsi_chap_rec *chap_rec;
725 	int max_chap_entries = 0;
726 	int valid_chap_entries = 0;
727 	int ret = 0, i;
728 
729 	if (is_qla80XX(ha))
730 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
731 					sizeof(struct ql4_chap_table);
732 	else
733 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
734 
735 	ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
736 			__func__, *num_entries, chap_tbl_idx);
737 
738 	if (!buf) {
739 		ret = -ENOMEM;
740 		goto exit_get_chap_list;
741 	}
742 
743 	qla4xxx_create_chap_list(ha);
744 
745 	chap_rec = (struct iscsi_chap_rec *) buf;
746 	mutex_lock(&ha->chap_sem);
747 	for (i = chap_tbl_idx; i < max_chap_entries; i++) {
748 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
749 		if (chap_table->cookie !=
750 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE))
751 			continue;
752 
753 		chap_rec->chap_tbl_idx = i;
754 		strlcpy(chap_rec->username, chap_table->name,
755 			ISCSI_CHAP_AUTH_NAME_MAX_LEN);
756 		strlcpy(chap_rec->password, chap_table->secret,
757 			QL4_CHAP_MAX_SECRET_LEN);
758 		chap_rec->password_length = chap_table->secret_len;
759 
760 		if (chap_table->flags & BIT_7) /* local */
761 			chap_rec->chap_type = CHAP_TYPE_OUT;
762 
763 		if (chap_table->flags & BIT_6) /* peer */
764 			chap_rec->chap_type = CHAP_TYPE_IN;
765 
766 		chap_rec++;
767 
768 		valid_chap_entries++;
769 		if (valid_chap_entries == *num_entries)
770 			break;
771 		else
772 			continue;
773 	}
774 	mutex_unlock(&ha->chap_sem);
775 
776 exit_get_chap_list:
777 	ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
778 			__func__,  valid_chap_entries);
779 	*num_entries = valid_chap_entries;
780 	return ret;
781 }
782 
783 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
784 {
785 	int ret = 0;
786 	uint16_t *chap_tbl_idx = (uint16_t *) data;
787 	struct iscsi_cls_session *cls_session;
788 	struct iscsi_session *sess;
789 	struct ddb_entry *ddb_entry;
790 
791 	if (!iscsi_is_session_dev(dev))
792 		goto exit_is_chap_active;
793 
794 	cls_session = iscsi_dev_to_session(dev);
795 	sess = cls_session->dd_data;
796 	ddb_entry = sess->dd_data;
797 
798 	if (iscsi_session_chkready(cls_session))
799 		goto exit_is_chap_active;
800 
801 	if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
802 		ret = 1;
803 
804 exit_is_chap_active:
805 	return ret;
806 }
807 
808 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
809 				  uint16_t chap_tbl_idx)
810 {
811 	int ret = 0;
812 
813 	ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
814 				    __qla4xxx_is_chap_active);
815 
816 	return ret;
817 }
818 
819 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
820 {
821 	struct scsi_qla_host *ha = to_qla_host(shost);
822 	struct ql4_chap_table *chap_table;
823 	dma_addr_t chap_dma;
824 	int max_chap_entries = 0;
825 	uint32_t offset = 0;
826 	uint32_t chap_size;
827 	int ret = 0;
828 
829 	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
830 	if (chap_table == NULL)
831 		return -ENOMEM;
832 
833 	memset(chap_table, 0, sizeof(struct ql4_chap_table));
834 
835 	if (is_qla80XX(ha))
836 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
837 				   sizeof(struct ql4_chap_table);
838 	else
839 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
840 
841 	if (chap_tbl_idx > max_chap_entries) {
842 		ret = -EINVAL;
843 		goto exit_delete_chap;
844 	}
845 
846 	/* Check if chap index is in use.
847 	 * If chap is in use don't delet chap entry */
848 	ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
849 	if (ret) {
850 		ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
851 			   "delete from flash\n", chap_tbl_idx);
852 		ret = -EBUSY;
853 		goto exit_delete_chap;
854 	}
855 
856 	chap_size = sizeof(struct ql4_chap_table);
857 	if (is_qla40XX(ha))
858 		offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
859 	else {
860 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
861 		/* flt_chap_size is CHAP table size for both ports
862 		 * so divide it by 2 to calculate the offset for second port
863 		 */
864 		if (ha->port_num == 1)
865 			offset += (ha->hw.flt_chap_size / 2);
866 		offset += (chap_tbl_idx * chap_size);
867 	}
868 
869 	ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
870 	if (ret != QLA_SUCCESS) {
871 		ret = -EINVAL;
872 		goto exit_delete_chap;
873 	}
874 
875 	DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
876 			  __le16_to_cpu(chap_table->cookie)));
877 
878 	if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
879 		ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
880 		goto exit_delete_chap;
881 	}
882 
883 	chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
884 
885 	offset = FLASH_CHAP_OFFSET |
886 			(chap_tbl_idx * sizeof(struct ql4_chap_table));
887 	ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
888 				FLASH_OPT_RMW_COMMIT);
889 	if (ret == QLA_SUCCESS && ha->chap_list) {
890 		mutex_lock(&ha->chap_sem);
891 		/* Update ha chap_list cache */
892 		memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
893 			chap_table, sizeof(struct ql4_chap_table));
894 		mutex_unlock(&ha->chap_sem);
895 	}
896 	if (ret != QLA_SUCCESS)
897 		ret =  -EINVAL;
898 
899 exit_delete_chap:
900 	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
901 	return ret;
902 }
903 
904 /**
905  * qla4xxx_set_chap_entry - Make chap entry with given information
906  * @shost: pointer to host
907  * @data: chap info - credentials, index and type to make chap entry
908  * @len: length of data
909  *
910  * Add or update chap entry with the given information
911  **/
912 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
913 {
914 	struct scsi_qla_host *ha = to_qla_host(shost);
915 	struct iscsi_chap_rec chap_rec;
916 	struct ql4_chap_table *chap_entry = NULL;
917 	struct iscsi_param_info *param_info;
918 	struct nlattr *attr;
919 	int max_chap_entries = 0;
920 	int type;
921 	int rem = len;
922 	int rc = 0;
923 	int size;
924 
925 	memset(&chap_rec, 0, sizeof(chap_rec));
926 
927 	nla_for_each_attr(attr, data, len, rem) {
928 		param_info = nla_data(attr);
929 
930 		switch (param_info->param) {
931 		case ISCSI_CHAP_PARAM_INDEX:
932 			chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
933 			break;
934 		case ISCSI_CHAP_PARAM_CHAP_TYPE:
935 			chap_rec.chap_type = param_info->value[0];
936 			break;
937 		case ISCSI_CHAP_PARAM_USERNAME:
938 			size = min_t(size_t, sizeof(chap_rec.username),
939 				     param_info->len);
940 			memcpy(chap_rec.username, param_info->value, size);
941 			break;
942 		case ISCSI_CHAP_PARAM_PASSWORD:
943 			size = min_t(size_t, sizeof(chap_rec.password),
944 				     param_info->len);
945 			memcpy(chap_rec.password, param_info->value, size);
946 			break;
947 		case ISCSI_CHAP_PARAM_PASSWORD_LEN:
948 			chap_rec.password_length = param_info->value[0];
949 			break;
950 		default:
951 			ql4_printk(KERN_ERR, ha,
952 				   "%s: No such sysfs attribute\n", __func__);
953 			rc = -ENOSYS;
954 			goto exit_set_chap;
955 		};
956 	}
957 
958 	if (chap_rec.chap_type == CHAP_TYPE_IN)
959 		type = BIDI_CHAP;
960 	else
961 		type = LOCAL_CHAP;
962 
963 	if (is_qla80XX(ha))
964 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
965 				   sizeof(struct ql4_chap_table);
966 	else
967 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
968 
969 	mutex_lock(&ha->chap_sem);
970 	if (chap_rec.chap_tbl_idx < max_chap_entries) {
971 		rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
972 					       &chap_entry);
973 		if (!rc) {
974 			if (!(type == qla4xxx_get_chap_type(chap_entry))) {
975 				ql4_printk(KERN_INFO, ha,
976 					   "Type mismatch for CHAP entry %d\n",
977 					   chap_rec.chap_tbl_idx);
978 				rc = -EINVAL;
979 				goto exit_unlock_chap;
980 			}
981 
982 			/* If chap index is in use then don't modify it */
983 			rc = qla4xxx_is_chap_active(shost,
984 						    chap_rec.chap_tbl_idx);
985 			if (rc) {
986 				ql4_printk(KERN_INFO, ha,
987 					   "CHAP entry %d is in use\n",
988 					   chap_rec.chap_tbl_idx);
989 				rc = -EBUSY;
990 				goto exit_unlock_chap;
991 			}
992 		}
993 	} else {
994 		rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
995 		if (rc) {
996 			ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
997 			rc = -EBUSY;
998 			goto exit_unlock_chap;
999 		}
1000 	}
1001 
1002 	rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
1003 			      chap_rec.chap_tbl_idx, type);
1004 
1005 exit_unlock_chap:
1006 	mutex_unlock(&ha->chap_sem);
1007 
1008 exit_set_chap:
1009 	return rc;
1010 }
1011 
1012 
1013 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
1014 {
1015 	struct scsi_qla_host *ha = to_qla_host(shost);
1016 	struct iscsi_offload_host_stats *host_stats = NULL;
1017 	int host_stats_size;
1018 	int ret = 0;
1019 	int ddb_idx = 0;
1020 	struct ql_iscsi_stats *ql_iscsi_stats = NULL;
1021 	int stats_size;
1022 	dma_addr_t iscsi_stats_dma;
1023 
1024 	DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__));
1025 
1026 	host_stats_size = sizeof(struct iscsi_offload_host_stats);
1027 
1028 	if (host_stats_size != len) {
1029 		ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n",
1030 			   __func__, len, host_stats_size);
1031 		ret = -EINVAL;
1032 		goto exit_host_stats;
1033 	}
1034 	host_stats = (struct iscsi_offload_host_stats *)buf;
1035 
1036 	if (!buf) {
1037 		ret = -ENOMEM;
1038 		goto exit_host_stats;
1039 	}
1040 
1041 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1042 
1043 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1044 					    &iscsi_stats_dma, GFP_KERNEL);
1045 	if (!ql_iscsi_stats) {
1046 		ql4_printk(KERN_ERR, ha,
1047 			   "Unable to allocate memory for iscsi stats\n");
1048 		ret = -ENOMEM;
1049 		goto exit_host_stats;
1050 	}
1051 
1052 	ret =  qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size,
1053 				     iscsi_stats_dma);
1054 	if (ret != QLA_SUCCESS) {
1055 		ql4_printk(KERN_ERR, ha,
1056 			   "Unable to retrieve iscsi stats\n");
1057 		ret = -EIO;
1058 		goto exit_host_stats;
1059 	}
1060 	host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames);
1061 	host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes);
1062 	host_stats->mactx_multicast_frames =
1063 			le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames);
1064 	host_stats->mactx_broadcast_frames =
1065 			le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames);
1066 	host_stats->mactx_pause_frames =
1067 			le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames);
1068 	host_stats->mactx_control_frames =
1069 			le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames);
1070 	host_stats->mactx_deferral =
1071 			le64_to_cpu(ql_iscsi_stats->mac_tx_deferral);
1072 	host_stats->mactx_excess_deferral =
1073 			le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral);
1074 	host_stats->mactx_late_collision =
1075 			le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision);
1076 	host_stats->mactx_abort	= le64_to_cpu(ql_iscsi_stats->mac_tx_abort);
1077 	host_stats->mactx_single_collision =
1078 			le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision);
1079 	host_stats->mactx_multiple_collision =
1080 			le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision);
1081 	host_stats->mactx_collision =
1082 			le64_to_cpu(ql_iscsi_stats->mac_tx_collision);
1083 	host_stats->mactx_frames_dropped =
1084 			le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped);
1085 	host_stats->mactx_jumbo_frames =
1086 			le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames);
1087 	host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames);
1088 	host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes);
1089 	host_stats->macrx_unknown_control_frames =
1090 		le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames);
1091 	host_stats->macrx_pause_frames =
1092 			le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames);
1093 	host_stats->macrx_control_frames =
1094 			le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames);
1095 	host_stats->macrx_dribble =
1096 			le64_to_cpu(ql_iscsi_stats->mac_rx_dribble);
1097 	host_stats->macrx_frame_length_error =
1098 			le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error);
1099 	host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber);
1100 	host_stats->macrx_carrier_sense_error =
1101 		le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error);
1102 	host_stats->macrx_frame_discarded =
1103 			le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded);
1104 	host_stats->macrx_frames_dropped =
1105 			le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped);
1106 	host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error);
1107 	host_stats->mac_encoding_error =
1108 			le64_to_cpu(ql_iscsi_stats->mac_encoding_error);
1109 	host_stats->macrx_length_error_large =
1110 			le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large);
1111 	host_stats->macrx_length_error_small =
1112 			le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small);
1113 	host_stats->macrx_multicast_frames =
1114 			le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames);
1115 	host_stats->macrx_broadcast_frames =
1116 			le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames);
1117 	host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets);
1118 	host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes);
1119 	host_stats->iptx_fragments =
1120 			le64_to_cpu(ql_iscsi_stats->ip_tx_fragments);
1121 	host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets);
1122 	host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes);
1123 	host_stats->iprx_fragments =
1124 			le64_to_cpu(ql_iscsi_stats->ip_rx_fragments);
1125 	host_stats->ip_datagram_reassembly =
1126 			le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly);
1127 	host_stats->ip_invalid_address_error =
1128 			le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error);
1129 	host_stats->ip_error_packets =
1130 			le64_to_cpu(ql_iscsi_stats->ip_error_packets);
1131 	host_stats->ip_fragrx_overlap =
1132 			le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap);
1133 	host_stats->ip_fragrx_outoforder =
1134 			le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder);
1135 	host_stats->ip_datagram_reassembly_timeout =
1136 		le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout);
1137 	host_stats->ipv6tx_packets =
1138 			le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets);
1139 	host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes);
1140 	host_stats->ipv6tx_fragments =
1141 			le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments);
1142 	host_stats->ipv6rx_packets =
1143 			le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets);
1144 	host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes);
1145 	host_stats->ipv6rx_fragments =
1146 			le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments);
1147 	host_stats->ipv6_datagram_reassembly =
1148 			le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly);
1149 	host_stats->ipv6_invalid_address_error =
1150 		le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error);
1151 	host_stats->ipv6_error_packets =
1152 			le64_to_cpu(ql_iscsi_stats->ipv6_error_packets);
1153 	host_stats->ipv6_fragrx_overlap =
1154 			le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap);
1155 	host_stats->ipv6_fragrx_outoforder =
1156 			le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder);
1157 	host_stats->ipv6_datagram_reassembly_timeout =
1158 		le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout);
1159 	host_stats->tcptx_segments =
1160 			le64_to_cpu(ql_iscsi_stats->tcp_tx_segments);
1161 	host_stats->tcptx_bytes	= le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes);
1162 	host_stats->tcprx_segments =
1163 			le64_to_cpu(ql_iscsi_stats->tcp_rx_segments);
1164 	host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte);
1165 	host_stats->tcp_duplicate_ack_retx =
1166 			le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx);
1167 	host_stats->tcp_retx_timer_expired =
1168 			le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired);
1169 	host_stats->tcprx_duplicate_ack	=
1170 			le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack);
1171 	host_stats->tcprx_pure_ackr =
1172 			le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr);
1173 	host_stats->tcptx_delayed_ack =
1174 			le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack);
1175 	host_stats->tcptx_pure_ack =
1176 			le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack);
1177 	host_stats->tcprx_segment_error =
1178 			le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error);
1179 	host_stats->tcprx_segment_outoforder =
1180 			le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder);
1181 	host_stats->tcprx_window_probe =
1182 			le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe);
1183 	host_stats->tcprx_window_update =
1184 			le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update);
1185 	host_stats->tcptx_window_probe_persist =
1186 		le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist);
1187 	host_stats->ecc_error_correction =
1188 			le64_to_cpu(ql_iscsi_stats->ecc_error_correction);
1189 	host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx);
1190 	host_stats->iscsi_data_bytes_tx =
1191 			le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx);
1192 	host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx);
1193 	host_stats->iscsi_data_bytes_rx	=
1194 			le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx);
1195 	host_stats->iscsi_io_completed =
1196 			le64_to_cpu(ql_iscsi_stats->iscsi_io_completed);
1197 	host_stats->iscsi_unexpected_io_rx =
1198 			le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx);
1199 	host_stats->iscsi_format_error =
1200 			le64_to_cpu(ql_iscsi_stats->iscsi_format_error);
1201 	host_stats->iscsi_hdr_digest_error =
1202 			le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error);
1203 	host_stats->iscsi_data_digest_error =
1204 			le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error);
1205 	host_stats->iscsi_sequence_error =
1206 			le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
1207 exit_host_stats:
1208 	if (ql_iscsi_stats)
1209 		dma_free_coherent(&ha->pdev->dev, host_stats_size,
1210 				  ql_iscsi_stats, iscsi_stats_dma);
1211 
1212 	ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
1213 		   __func__);
1214 	return ret;
1215 }
1216 
1217 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
1218 				   enum iscsi_param_type param_type,
1219 				   int param, char *buf)
1220 {
1221 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
1222 	struct scsi_qla_host *ha = to_qla_host(shost);
1223 	int ival;
1224 	char *pval = NULL;
1225 	int len = -ENOSYS;
1226 
1227 	if (param_type == ISCSI_NET_PARAM) {
1228 		switch (param) {
1229 		case ISCSI_NET_PARAM_IPV4_ADDR:
1230 			len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1231 			break;
1232 		case ISCSI_NET_PARAM_IPV4_SUBNET:
1233 			len = sprintf(buf, "%pI4\n",
1234 				      &ha->ip_config.subnet_mask);
1235 			break;
1236 		case ISCSI_NET_PARAM_IPV4_GW:
1237 			len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
1238 			break;
1239 		case ISCSI_NET_PARAM_IFACE_ENABLE:
1240 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1241 				OP_STATE(ha->ip_config.ipv4_options,
1242 					 IPOPT_IPV4_PROTOCOL_ENABLE, pval);
1243 			} else {
1244 				OP_STATE(ha->ip_config.ipv6_options,
1245 					 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval);
1246 			}
1247 
1248 			len = sprintf(buf, "%s\n", pval);
1249 			break;
1250 		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1251 			len = sprintf(buf, "%s\n",
1252 				      (ha->ip_config.tcp_options &
1253 				       TCPOPT_DHCP_ENABLE) ?
1254 				      "dhcp" : "static");
1255 			break;
1256 		case ISCSI_NET_PARAM_IPV6_ADDR:
1257 			if (iface->iface_num == 0)
1258 				len = sprintf(buf, "%pI6\n",
1259 					      &ha->ip_config.ipv6_addr0);
1260 			if (iface->iface_num == 1)
1261 				len = sprintf(buf, "%pI6\n",
1262 					      &ha->ip_config.ipv6_addr1);
1263 			break;
1264 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1265 			len = sprintf(buf, "%pI6\n",
1266 				      &ha->ip_config.ipv6_link_local_addr);
1267 			break;
1268 		case ISCSI_NET_PARAM_IPV6_ROUTER:
1269 			len = sprintf(buf, "%pI6\n",
1270 				      &ha->ip_config.ipv6_default_router_addr);
1271 			break;
1272 		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1273 			pval = (ha->ip_config.ipv6_addl_options &
1274 				IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
1275 				"nd" : "static";
1276 
1277 			len = sprintf(buf, "%s\n", pval);
1278 			break;
1279 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1280 			pval = (ha->ip_config.ipv6_addl_options &
1281 				IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
1282 				"auto" : "static";
1283 
1284 			len = sprintf(buf, "%s\n", pval);
1285 			break;
1286 		case ISCSI_NET_PARAM_VLAN_ID:
1287 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1288 				ival = ha->ip_config.ipv4_vlan_tag &
1289 				       ISCSI_MAX_VLAN_ID;
1290 			else
1291 				ival = ha->ip_config.ipv6_vlan_tag &
1292 				       ISCSI_MAX_VLAN_ID;
1293 
1294 			len = sprintf(buf, "%d\n", ival);
1295 			break;
1296 		case ISCSI_NET_PARAM_VLAN_PRIORITY:
1297 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1298 				ival = (ha->ip_config.ipv4_vlan_tag >> 13) &
1299 				       ISCSI_MAX_VLAN_PRIORITY;
1300 			else
1301 				ival = (ha->ip_config.ipv6_vlan_tag >> 13) &
1302 				       ISCSI_MAX_VLAN_PRIORITY;
1303 
1304 			len = sprintf(buf, "%d\n", ival);
1305 			break;
1306 		case ISCSI_NET_PARAM_VLAN_ENABLED:
1307 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1308 				OP_STATE(ha->ip_config.ipv4_options,
1309 					 IPOPT_VLAN_TAGGING_ENABLE, pval);
1310 			} else {
1311 				OP_STATE(ha->ip_config.ipv6_options,
1312 					 IPV6_OPT_VLAN_TAGGING_ENABLE, pval);
1313 			}
1314 			len = sprintf(buf, "%s\n", pval);
1315 			break;
1316 		case ISCSI_NET_PARAM_MTU:
1317 			len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
1318 			break;
1319 		case ISCSI_NET_PARAM_PORT:
1320 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1321 				len = sprintf(buf, "%d\n",
1322 					      ha->ip_config.ipv4_port);
1323 			else
1324 				len = sprintf(buf, "%d\n",
1325 					      ha->ip_config.ipv6_port);
1326 			break;
1327 		case ISCSI_NET_PARAM_IPADDR_STATE:
1328 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1329 				pval = iscsi_get_ipaddress_state_name(
1330 						ha->ip_config.ipv4_addr_state);
1331 			} else {
1332 				if (iface->iface_num == 0)
1333 					pval = iscsi_get_ipaddress_state_name(
1334 						ha->ip_config.ipv6_addr0_state);
1335 				else if (iface->iface_num == 1)
1336 					pval = iscsi_get_ipaddress_state_name(
1337 						ha->ip_config.ipv6_addr1_state);
1338 			}
1339 
1340 			len = sprintf(buf, "%s\n", pval);
1341 			break;
1342 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
1343 			pval = iscsi_get_ipaddress_state_name(
1344 					ha->ip_config.ipv6_link_local_state);
1345 			len = sprintf(buf, "%s\n", pval);
1346 			break;
1347 		case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
1348 			pval = iscsi_get_router_state_name(
1349 				      ha->ip_config.ipv6_default_router_state);
1350 			len = sprintf(buf, "%s\n", pval);
1351 			break;
1352 		case ISCSI_NET_PARAM_DELAYED_ACK_EN:
1353 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1354 				OP_STATE(~ha->ip_config.tcp_options,
1355 					 TCPOPT_DELAYED_ACK_DISABLE, pval);
1356 			} else {
1357 				OP_STATE(~ha->ip_config.ipv6_tcp_options,
1358 					 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval);
1359 			}
1360 			len = sprintf(buf, "%s\n", pval);
1361 			break;
1362 		case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
1363 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1364 				OP_STATE(~ha->ip_config.tcp_options,
1365 					 TCPOPT_NAGLE_ALGO_DISABLE, pval);
1366 			} else {
1367 				OP_STATE(~ha->ip_config.ipv6_tcp_options,
1368 					 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval);
1369 			}
1370 			len = sprintf(buf, "%s\n", pval);
1371 			break;
1372 		case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
1373 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1374 				OP_STATE(~ha->ip_config.tcp_options,
1375 					 TCPOPT_WINDOW_SCALE_DISABLE, pval);
1376 			} else {
1377 				OP_STATE(~ha->ip_config.ipv6_tcp_options,
1378 					 IPV6_TCPOPT_WINDOW_SCALE_DISABLE,
1379 					 pval);
1380 			}
1381 			len = sprintf(buf, "%s\n", pval);
1382 			break;
1383 		case ISCSI_NET_PARAM_TCP_WSF:
1384 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1385 				len = sprintf(buf, "%d\n",
1386 					      ha->ip_config.tcp_wsf);
1387 			else
1388 				len = sprintf(buf, "%d\n",
1389 					      ha->ip_config.ipv6_tcp_wsf);
1390 			break;
1391 		case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
1392 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1393 				ival = (ha->ip_config.tcp_options &
1394 					TCPOPT_TIMER_SCALE) >> 1;
1395 			else
1396 				ival = (ha->ip_config.ipv6_tcp_options &
1397 					IPV6_TCPOPT_TIMER_SCALE) >> 1;
1398 
1399 			len = sprintf(buf, "%d\n", ival);
1400 			break;
1401 		case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
1402 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1403 				OP_STATE(ha->ip_config.tcp_options,
1404 					 TCPOPT_TIMESTAMP_ENABLE, pval);
1405 			} else {
1406 				OP_STATE(ha->ip_config.ipv6_tcp_options,
1407 					 IPV6_TCPOPT_TIMESTAMP_EN, pval);
1408 			}
1409 			len = sprintf(buf, "%s\n", pval);
1410 			break;
1411 		case ISCSI_NET_PARAM_CACHE_ID:
1412 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1413 				len = sprintf(buf, "%d\n",
1414 					      ha->ip_config.ipv4_cache_id);
1415 			else
1416 				len = sprintf(buf, "%d\n",
1417 					      ha->ip_config.ipv6_cache_id);
1418 			break;
1419 		case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
1420 			OP_STATE(ha->ip_config.tcp_options,
1421 				 TCPOPT_DNS_SERVER_IP_EN, pval);
1422 
1423 			len = sprintf(buf, "%s\n", pval);
1424 			break;
1425 		case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
1426 			OP_STATE(ha->ip_config.tcp_options,
1427 				 TCPOPT_SLP_DA_INFO_EN, pval);
1428 
1429 			len = sprintf(buf, "%s\n", pval);
1430 			break;
1431 		case ISCSI_NET_PARAM_IPV4_TOS_EN:
1432 			OP_STATE(ha->ip_config.ipv4_options,
1433 				 IPOPT_IPV4_TOS_EN, pval);
1434 
1435 			len = sprintf(buf, "%s\n", pval);
1436 			break;
1437 		case ISCSI_NET_PARAM_IPV4_TOS:
1438 			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos);
1439 			break;
1440 		case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
1441 			OP_STATE(ha->ip_config.ipv4_options,
1442 				 IPOPT_GRAT_ARP_EN, pval);
1443 
1444 			len = sprintf(buf, "%s\n", pval);
1445 			break;
1446 		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
1447 			OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN,
1448 				 pval);
1449 
1450 			len = sprintf(buf, "%s\n", pval);
1451 			break;
1452 		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
1453 			pval = (ha->ip_config.ipv4_alt_cid_len) ?
1454 			       (char *)ha->ip_config.ipv4_alt_cid : "";
1455 
1456 			len = sprintf(buf, "%s\n", pval);
1457 			break;
1458 		case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
1459 			OP_STATE(ha->ip_config.ipv4_options,
1460 				 IPOPT_REQ_VID_EN, pval);
1461 
1462 			len = sprintf(buf, "%s\n", pval);
1463 			break;
1464 		case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
1465 			OP_STATE(ha->ip_config.ipv4_options,
1466 				 IPOPT_USE_VID_EN, pval);
1467 
1468 			len = sprintf(buf, "%s\n", pval);
1469 			break;
1470 		case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
1471 			pval = (ha->ip_config.ipv4_vid_len) ?
1472 			       (char *)ha->ip_config.ipv4_vid : "";
1473 
1474 			len = sprintf(buf, "%s\n", pval);
1475 			break;
1476 		case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
1477 			OP_STATE(ha->ip_config.ipv4_options,
1478 				 IPOPT_LEARN_IQN_EN, pval);
1479 
1480 			len = sprintf(buf, "%s\n", pval);
1481 			break;
1482 		case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
1483 			OP_STATE(~ha->ip_config.ipv4_options,
1484 				 IPOPT_FRAGMENTATION_DISABLE, pval);
1485 
1486 			len = sprintf(buf, "%s\n", pval);
1487 			break;
1488 		case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
1489 			OP_STATE(ha->ip_config.ipv4_options,
1490 				 IPOPT_IN_FORWARD_EN, pval);
1491 
1492 			len = sprintf(buf, "%s\n", pval);
1493 			break;
1494 		case ISCSI_NET_PARAM_REDIRECT_EN:
1495 			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1496 				OP_STATE(ha->ip_config.ipv4_options,
1497 					 IPOPT_ARP_REDIRECT_EN, pval);
1498 			} else {
1499 				OP_STATE(ha->ip_config.ipv6_options,
1500 					 IPV6_OPT_REDIRECT_EN, pval);
1501 			}
1502 			len = sprintf(buf, "%s\n", pval);
1503 			break;
1504 		case ISCSI_NET_PARAM_IPV4_TTL:
1505 			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl);
1506 			break;
1507 		case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
1508 			OP_STATE(ha->ip_config.ipv6_options,
1509 				 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval);
1510 
1511 			len = sprintf(buf, "%s\n", pval);
1512 			break;
1513 		case ISCSI_NET_PARAM_IPV6_MLD_EN:
1514 			OP_STATE(ha->ip_config.ipv6_addl_options,
1515 				 IPV6_ADDOPT_MLD_EN, pval);
1516 
1517 			len = sprintf(buf, "%s\n", pval);
1518 			break;
1519 		case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
1520 			len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl);
1521 			break;
1522 		case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
1523 			len = sprintf(buf, "%d\n",
1524 				      ha->ip_config.ipv6_traffic_class);
1525 			break;
1526 		case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
1527 			len = sprintf(buf, "%d\n",
1528 				      ha->ip_config.ipv6_hop_limit);
1529 			break;
1530 		case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
1531 			len = sprintf(buf, "%d\n",
1532 				      ha->ip_config.ipv6_nd_reach_time);
1533 			break;
1534 		case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
1535 			len = sprintf(buf, "%d\n",
1536 				      ha->ip_config.ipv6_nd_rexmit_timer);
1537 			break;
1538 		case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
1539 			len = sprintf(buf, "%d\n",
1540 				      ha->ip_config.ipv6_nd_stale_timeout);
1541 			break;
1542 		case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
1543 			len = sprintf(buf, "%d\n",
1544 				      ha->ip_config.ipv6_dup_addr_detect_count);
1545 			break;
1546 		case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
1547 			len = sprintf(buf, "%d\n",
1548 				      ha->ip_config.ipv6_gw_advrt_mtu);
1549 			break;
1550 		default:
1551 			len = -ENOSYS;
1552 		}
1553 	} else if (param_type == ISCSI_IFACE_PARAM) {
1554 		switch (param) {
1555 		case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
1556 			len = sprintf(buf, "%d\n", ha->ip_config.def_timeout);
1557 			break;
1558 		case ISCSI_IFACE_PARAM_HDRDGST_EN:
1559 			OP_STATE(ha->ip_config.iscsi_options,
1560 				 ISCSIOPTS_HEADER_DIGEST_EN, pval);
1561 
1562 			len = sprintf(buf, "%s\n", pval);
1563 			break;
1564 		case ISCSI_IFACE_PARAM_DATADGST_EN:
1565 			OP_STATE(ha->ip_config.iscsi_options,
1566 				 ISCSIOPTS_DATA_DIGEST_EN, pval);
1567 
1568 			len = sprintf(buf, "%s\n", pval);
1569 			break;
1570 		case ISCSI_IFACE_PARAM_IMM_DATA_EN:
1571 			OP_STATE(ha->ip_config.iscsi_options,
1572 				 ISCSIOPTS_IMMEDIATE_DATA_EN, pval);
1573 
1574 			len = sprintf(buf, "%s\n", pval);
1575 			break;
1576 		case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
1577 			OP_STATE(ha->ip_config.iscsi_options,
1578 				 ISCSIOPTS_INITIAL_R2T_EN, pval);
1579 
1580 			len = sprintf(buf, "%s\n", pval);
1581 			break;
1582 		case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
1583 			OP_STATE(ha->ip_config.iscsi_options,
1584 				 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval);
1585 
1586 			len = sprintf(buf, "%s\n", pval);
1587 			break;
1588 		case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
1589 			OP_STATE(ha->ip_config.iscsi_options,
1590 				 ISCSIOPTS_DATA_PDU_INORDER_EN, pval);
1591 
1592 			len = sprintf(buf, "%s\n", pval);
1593 			break;
1594 		case ISCSI_IFACE_PARAM_ERL:
1595 			len = sprintf(buf, "%d\n",
1596 				      (ha->ip_config.iscsi_options &
1597 				       ISCSIOPTS_ERL));
1598 			break;
1599 		case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
1600 			len = sprintf(buf, "%u\n",
1601 				      ha->ip_config.iscsi_max_pdu_size *
1602 				      BYTE_UNITS);
1603 			break;
1604 		case ISCSI_IFACE_PARAM_FIRST_BURST:
1605 			len = sprintf(buf, "%u\n",
1606 				      ha->ip_config.iscsi_first_burst_len *
1607 				      BYTE_UNITS);
1608 			break;
1609 		case ISCSI_IFACE_PARAM_MAX_R2T:
1610 			len = sprintf(buf, "%d\n",
1611 				      ha->ip_config.iscsi_max_outstnd_r2t);
1612 			break;
1613 		case ISCSI_IFACE_PARAM_MAX_BURST:
1614 			len = sprintf(buf, "%u\n",
1615 				      ha->ip_config.iscsi_max_burst_len *
1616 				      BYTE_UNITS);
1617 			break;
1618 		case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
1619 			OP_STATE(ha->ip_config.iscsi_options,
1620 				 ISCSIOPTS_CHAP_AUTH_EN, pval);
1621 
1622 			len = sprintf(buf, "%s\n", pval);
1623 			break;
1624 		case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
1625 			OP_STATE(ha->ip_config.iscsi_options,
1626 				 ISCSIOPTS_BIDI_CHAP_EN, pval);
1627 
1628 			len = sprintf(buf, "%s\n", pval);
1629 			break;
1630 		case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
1631 			OP_STATE(ha->ip_config.iscsi_options,
1632 				 ISCSIOPTS_DISCOVERY_AUTH_EN, pval);
1633 
1634 			len = sprintf(buf, "%s\n", pval);
1635 			break;
1636 		case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
1637 			OP_STATE(ha->ip_config.iscsi_options,
1638 				 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval);
1639 
1640 			len = sprintf(buf, "%s\n", pval);
1641 			break;
1642 		case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
1643 			OP_STATE(ha->ip_config.iscsi_options,
1644 				 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval);
1645 
1646 			len = sprintf(buf, "%s\n", pval);
1647 			break;
1648 		case ISCSI_IFACE_PARAM_INITIATOR_NAME:
1649 			len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name);
1650 			break;
1651 		default:
1652 			len = -ENOSYS;
1653 		}
1654 	}
1655 
1656 	return len;
1657 }
1658 
1659 static struct iscsi_endpoint *
1660 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1661 		   int non_blocking)
1662 {
1663 	int ret;
1664 	struct iscsi_endpoint *ep;
1665 	struct qla_endpoint *qla_ep;
1666 	struct scsi_qla_host *ha;
1667 	struct sockaddr_in *addr;
1668 	struct sockaddr_in6 *addr6;
1669 
1670 	if (!shost) {
1671 		ret = -ENXIO;
1672 		pr_err("%s: shost is NULL\n", __func__);
1673 		return ERR_PTR(ret);
1674 	}
1675 
1676 	ha = iscsi_host_priv(shost);
1677 	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
1678 	if (!ep) {
1679 		ret = -ENOMEM;
1680 		return ERR_PTR(ret);
1681 	}
1682 
1683 	qla_ep = ep->dd_data;
1684 	memset(qla_ep, 0, sizeof(struct qla_endpoint));
1685 	if (dst_addr->sa_family == AF_INET) {
1686 		memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
1687 		addr = (struct sockaddr_in *)&qla_ep->dst_addr;
1688 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
1689 				  (char *)&addr->sin_addr));
1690 	} else if (dst_addr->sa_family == AF_INET6) {
1691 		memcpy(&qla_ep->dst_addr, dst_addr,
1692 		       sizeof(struct sockaddr_in6));
1693 		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
1694 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
1695 				  (char *)&addr6->sin6_addr));
1696 	} else {
1697 		ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
1698 			   __func__);
1699 	}
1700 
1701 	qla_ep->host = shost;
1702 
1703 	return ep;
1704 }
1705 
1706 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1707 {
1708 	struct qla_endpoint *qla_ep;
1709 	struct scsi_qla_host *ha;
1710 	int ret = 0;
1711 
1712 	qla_ep = ep->dd_data;
1713 	ha = to_qla_host(qla_ep->host);
1714 	DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
1715 
1716 	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
1717 		ret = 1;
1718 
1719 	return ret;
1720 }
1721 
1722 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
1723 {
1724 	struct qla_endpoint *qla_ep;
1725 	struct scsi_qla_host *ha;
1726 
1727 	qla_ep = ep->dd_data;
1728 	ha = to_qla_host(qla_ep->host);
1729 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1730 			  ha->host_no));
1731 	iscsi_destroy_endpoint(ep);
1732 }
1733 
1734 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
1735 				enum iscsi_param param,
1736 				char *buf)
1737 {
1738 	struct qla_endpoint *qla_ep = ep->dd_data;
1739 	struct sockaddr *dst_addr;
1740 	struct scsi_qla_host *ha;
1741 
1742 	if (!qla_ep)
1743 		return -ENOTCONN;
1744 
1745 	ha = to_qla_host(qla_ep->host);
1746 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1747 			  ha->host_no));
1748 
1749 	switch (param) {
1750 	case ISCSI_PARAM_CONN_PORT:
1751 	case ISCSI_PARAM_CONN_ADDRESS:
1752 		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1753 		if (!dst_addr)
1754 			return -ENOTCONN;
1755 
1756 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1757 						 &qla_ep->dst_addr, param, buf);
1758 	default:
1759 		return -ENOSYS;
1760 	}
1761 }
1762 
1763 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1764 				   struct iscsi_stats *stats)
1765 {
1766 	struct iscsi_session *sess;
1767 	struct iscsi_cls_session *cls_sess;
1768 	struct ddb_entry *ddb_entry;
1769 	struct scsi_qla_host *ha;
1770 	struct ql_iscsi_stats *ql_iscsi_stats;
1771 	int stats_size;
1772 	int ret;
1773 	dma_addr_t iscsi_stats_dma;
1774 
1775 	cls_sess = iscsi_conn_to_session(cls_conn);
1776 	sess = cls_sess->dd_data;
1777 	ddb_entry = sess->dd_data;
1778 	ha = ddb_entry->ha;
1779 
1780 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1781 			  ha->host_no));
1782 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1783 	/* Allocate memory */
1784 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1785 					    &iscsi_stats_dma, GFP_KERNEL);
1786 	if (!ql_iscsi_stats) {
1787 		ql4_printk(KERN_ERR, ha,
1788 			   "Unable to allocate memory for iscsi stats\n");
1789 		goto exit_get_stats;
1790 	}
1791 
1792 	ret =  qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
1793 				     iscsi_stats_dma);
1794 	if (ret != QLA_SUCCESS) {
1795 		ql4_printk(KERN_ERR, ha,
1796 			   "Unable to retrieve iscsi stats\n");
1797 		goto free_stats;
1798 	}
1799 
1800 	/* octets */
1801 	stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
1802 	stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
1803 	/* xmit pdus */
1804 	stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
1805 	stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
1806 	stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
1807 	stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
1808 	stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
1809 	stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
1810 	stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
1811 	stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
1812 	/* recv pdus */
1813 	stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
1814 	stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
1815 	stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
1816 	stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
1817 	stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
1818 	stats->logoutrsp_pdus =
1819 			le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
1820 	stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
1821 	stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
1822 	stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
1823 
1824 free_stats:
1825 	dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
1826 			  iscsi_stats_dma);
1827 exit_get_stats:
1828 	return;
1829 }
1830 
1831 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
1832 {
1833 	struct iscsi_cls_session *session;
1834 	struct iscsi_session *sess;
1835 	unsigned long flags;
1836 	enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
1837 
1838 	session = starget_to_session(scsi_target(sc->device));
1839 	sess = session->dd_data;
1840 
1841 	spin_lock_irqsave(&session->lock, flags);
1842 	if (session->state == ISCSI_SESSION_FAILED)
1843 		ret = BLK_EH_RESET_TIMER;
1844 	spin_unlock_irqrestore(&session->lock, flags);
1845 
1846 	return ret;
1847 }
1848 
1849 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
1850 {
1851 	struct scsi_qla_host *ha = to_qla_host(shost);
1852 	struct iscsi_cls_host *ihost = shost->shost_data;
1853 	uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
1854 
1855 	qla4xxx_get_firmware_state(ha);
1856 
1857 	switch (ha->addl_fw_state & 0x0F00) {
1858 	case FW_ADDSTATE_LINK_SPEED_10MBPS:
1859 		speed = ISCSI_PORT_SPEED_10MBPS;
1860 		break;
1861 	case FW_ADDSTATE_LINK_SPEED_100MBPS:
1862 		speed = ISCSI_PORT_SPEED_100MBPS;
1863 		break;
1864 	case FW_ADDSTATE_LINK_SPEED_1GBPS:
1865 		speed = ISCSI_PORT_SPEED_1GBPS;
1866 		break;
1867 	case FW_ADDSTATE_LINK_SPEED_10GBPS:
1868 		speed = ISCSI_PORT_SPEED_10GBPS;
1869 		break;
1870 	}
1871 	ihost->port_speed = speed;
1872 }
1873 
1874 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
1875 {
1876 	struct scsi_qla_host *ha = to_qla_host(shost);
1877 	struct iscsi_cls_host *ihost = shost->shost_data;
1878 	uint32_t state = ISCSI_PORT_STATE_DOWN;
1879 
1880 	if (test_bit(AF_LINK_UP, &ha->flags))
1881 		state = ISCSI_PORT_STATE_UP;
1882 
1883 	ihost->port_state = state;
1884 }
1885 
1886 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
1887 				  enum iscsi_host_param param, char *buf)
1888 {
1889 	struct scsi_qla_host *ha = to_qla_host(shost);
1890 	int len;
1891 
1892 	switch (param) {
1893 	case ISCSI_HOST_PARAM_HWADDRESS:
1894 		len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
1895 		break;
1896 	case ISCSI_HOST_PARAM_IPADDRESS:
1897 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1898 		break;
1899 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
1900 		len = sprintf(buf, "%s\n", ha->name_string);
1901 		break;
1902 	case ISCSI_HOST_PARAM_PORT_STATE:
1903 		qla4xxx_set_port_state(shost);
1904 		len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1905 		break;
1906 	case ISCSI_HOST_PARAM_PORT_SPEED:
1907 		qla4xxx_set_port_speed(shost);
1908 		len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1909 		break;
1910 	default:
1911 		return -ENOSYS;
1912 	}
1913 
1914 	return len;
1915 }
1916 
1917 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1918 {
1919 	if (ha->iface_ipv4)
1920 		return;
1921 
1922 	/* IPv4 */
1923 	ha->iface_ipv4 = iscsi_create_iface(ha->host,
1924 					    &qla4xxx_iscsi_transport,
1925 					    ISCSI_IFACE_TYPE_IPV4, 0, 0);
1926 	if (!ha->iface_ipv4)
1927 		ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1928 			   "iface0.\n");
1929 }
1930 
1931 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1932 {
1933 	if (!ha->iface_ipv6_0)
1934 		/* IPv6 iface-0 */
1935 		ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1936 						      &qla4xxx_iscsi_transport,
1937 						      ISCSI_IFACE_TYPE_IPV6, 0,
1938 						      0);
1939 	if (!ha->iface_ipv6_0)
1940 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1941 			   "iface0.\n");
1942 
1943 	if (!ha->iface_ipv6_1)
1944 		/* IPv6 iface-1 */
1945 		ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1946 						      &qla4xxx_iscsi_transport,
1947 						      ISCSI_IFACE_TYPE_IPV6, 1,
1948 						      0);
1949 	if (!ha->iface_ipv6_1)
1950 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1951 			   "iface1.\n");
1952 }
1953 
1954 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1955 {
1956 	if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1957 		qla4xxx_create_ipv4_iface(ha);
1958 
1959 	if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1960 		qla4xxx_create_ipv6_iface(ha);
1961 }
1962 
1963 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1964 {
1965 	if (ha->iface_ipv4) {
1966 		iscsi_destroy_iface(ha->iface_ipv4);
1967 		ha->iface_ipv4 = NULL;
1968 	}
1969 }
1970 
1971 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1972 {
1973 	if (ha->iface_ipv6_0) {
1974 		iscsi_destroy_iface(ha->iface_ipv6_0);
1975 		ha->iface_ipv6_0 = NULL;
1976 	}
1977 	if (ha->iface_ipv6_1) {
1978 		iscsi_destroy_iface(ha->iface_ipv6_1);
1979 		ha->iface_ipv6_1 = NULL;
1980 	}
1981 }
1982 
1983 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
1984 {
1985 	qla4xxx_destroy_ipv4_iface(ha);
1986 	qla4xxx_destroy_ipv6_iface(ha);
1987 }
1988 
1989 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1990 			     struct iscsi_iface_param_info *iface_param,
1991 			     struct addr_ctrl_blk *init_fw_cb)
1992 {
1993 	/*
1994 	 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1995 	 * iface_num 1 is valid only for IPv6 Addr.
1996 	 */
1997 	switch (iface_param->param) {
1998 	case ISCSI_NET_PARAM_IPV6_ADDR:
1999 		if (iface_param->iface_num & 0x1)
2000 			/* IPv6 Addr 1 */
2001 			memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
2002 			       sizeof(init_fw_cb->ipv6_addr1));
2003 		else
2004 			/* IPv6 Addr 0 */
2005 			memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
2006 			       sizeof(init_fw_cb->ipv6_addr0));
2007 		break;
2008 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
2009 		if (iface_param->iface_num & 0x1)
2010 			break;
2011 		memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
2012 		       sizeof(init_fw_cb->ipv6_if_id));
2013 		break;
2014 	case ISCSI_NET_PARAM_IPV6_ROUTER:
2015 		if (iface_param->iface_num & 0x1)
2016 			break;
2017 		memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
2018 		       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
2019 		break;
2020 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
2021 		/* Autocfg applies to even interface */
2022 		if (iface_param->iface_num & 0x1)
2023 			break;
2024 
2025 		if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
2026 			init_fw_cb->ipv6_addtl_opts &=
2027 				cpu_to_le16(
2028 				  ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
2029 		else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
2030 			init_fw_cb->ipv6_addtl_opts |=
2031 				cpu_to_le16(
2032 				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
2033 		else
2034 			ql4_printk(KERN_ERR, ha,
2035 				   "Invalid autocfg setting for IPv6 addr\n");
2036 		break;
2037 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
2038 		/* Autocfg applies to even interface */
2039 		if (iface_param->iface_num & 0x1)
2040 			break;
2041 
2042 		if (iface_param->value[0] ==
2043 		    ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
2044 			init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
2045 					IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
2046 		else if (iface_param->value[0] ==
2047 			 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
2048 			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
2049 				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
2050 		else
2051 			ql4_printk(KERN_ERR, ha,
2052 				   "Invalid autocfg setting for IPv6 linklocal addr\n");
2053 		break;
2054 	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
2055 		/* Autocfg applies to even interface */
2056 		if (iface_param->iface_num & 0x1)
2057 			break;
2058 
2059 		if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
2060 			memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
2061 			       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
2062 		break;
2063 	case ISCSI_NET_PARAM_IFACE_ENABLE:
2064 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
2065 			init_fw_cb->ipv6_opts |=
2066 				cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
2067 			qla4xxx_create_ipv6_iface(ha);
2068 		} else {
2069 			init_fw_cb->ipv6_opts &=
2070 				cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
2071 					    0xFFFF);
2072 			qla4xxx_destroy_ipv6_iface(ha);
2073 		}
2074 		break;
2075 	case ISCSI_NET_PARAM_VLAN_TAG:
2076 		if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
2077 			break;
2078 		init_fw_cb->ipv6_vlan_tag =
2079 				cpu_to_be16(*(uint16_t *)iface_param->value);
2080 		break;
2081 	case ISCSI_NET_PARAM_VLAN_ENABLED:
2082 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
2083 			init_fw_cb->ipv6_opts |=
2084 				cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
2085 		else
2086 			init_fw_cb->ipv6_opts &=
2087 				cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
2088 		break;
2089 	case ISCSI_NET_PARAM_MTU:
2090 		init_fw_cb->eth_mtu_size =
2091 				cpu_to_le16(*(uint16_t *)iface_param->value);
2092 		break;
2093 	case ISCSI_NET_PARAM_PORT:
2094 		/* Autocfg applies to even interface */
2095 		if (iface_param->iface_num & 0x1)
2096 			break;
2097 
2098 		init_fw_cb->ipv6_port =
2099 				cpu_to_le16(*(uint16_t *)iface_param->value);
2100 		break;
2101 	case ISCSI_NET_PARAM_DELAYED_ACK_EN:
2102 		if (iface_param->iface_num & 0x1)
2103 			break;
2104 		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2105 			init_fw_cb->ipv6_tcp_opts |=
2106 				cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
2107 		else
2108 			init_fw_cb->ipv6_tcp_opts &=
2109 				cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
2110 					    0xFFFF);
2111 		break;
2112 	case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2113 		if (iface_param->iface_num & 0x1)
2114 			break;
2115 		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2116 			init_fw_cb->ipv6_tcp_opts |=
2117 				cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
2118 		else
2119 			init_fw_cb->ipv6_tcp_opts &=
2120 				cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
2121 		break;
2122 	case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
2123 		if (iface_param->iface_num & 0x1)
2124 			break;
2125 		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2126 			init_fw_cb->ipv6_tcp_opts |=
2127 				cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
2128 		else
2129 			init_fw_cb->ipv6_tcp_opts &=
2130 				cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
2131 		break;
2132 	case ISCSI_NET_PARAM_TCP_WSF:
2133 		if (iface_param->iface_num & 0x1)
2134 			break;
2135 		init_fw_cb->ipv6_tcp_wsf = iface_param->value[0];
2136 		break;
2137 	case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
2138 		if (iface_param->iface_num & 0x1)
2139 			break;
2140 		init_fw_cb->ipv6_tcp_opts &=
2141 					cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE);
2142 		init_fw_cb->ipv6_tcp_opts |=
2143 				cpu_to_le16((iface_param->value[0] << 1) &
2144 					    IPV6_TCPOPT_TIMER_SCALE);
2145 		break;
2146 	case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
2147 		if (iface_param->iface_num & 0x1)
2148 			break;
2149 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2150 			init_fw_cb->ipv6_tcp_opts |=
2151 				cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN);
2152 		else
2153 			init_fw_cb->ipv6_tcp_opts &=
2154 				cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN);
2155 		break;
2156 	case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
2157 		if (iface_param->iface_num & 0x1)
2158 			break;
2159 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2160 			init_fw_cb->ipv6_opts |=
2161 				cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
2162 		else
2163 			init_fw_cb->ipv6_opts &=
2164 				cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
2165 		break;
2166 	case ISCSI_NET_PARAM_REDIRECT_EN:
2167 		if (iface_param->iface_num & 0x1)
2168 			break;
2169 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2170 			init_fw_cb->ipv6_opts |=
2171 				cpu_to_le16(IPV6_OPT_REDIRECT_EN);
2172 		else
2173 			init_fw_cb->ipv6_opts &=
2174 				cpu_to_le16(~IPV6_OPT_REDIRECT_EN);
2175 		break;
2176 	case ISCSI_NET_PARAM_IPV6_MLD_EN:
2177 		if (iface_param->iface_num & 0x1)
2178 			break;
2179 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2180 			init_fw_cb->ipv6_addtl_opts |=
2181 				cpu_to_le16(IPV6_ADDOPT_MLD_EN);
2182 		else
2183 			init_fw_cb->ipv6_addtl_opts &=
2184 				cpu_to_le16(~IPV6_ADDOPT_MLD_EN);
2185 		break;
2186 	case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
2187 		if (iface_param->iface_num & 0x1)
2188 			break;
2189 		init_fw_cb->ipv6_flow_lbl =
2190 				cpu_to_le16(*(uint16_t *)iface_param->value);
2191 		break;
2192 	case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
2193 		if (iface_param->iface_num & 0x1)
2194 			break;
2195 		init_fw_cb->ipv6_traffic_class = iface_param->value[0];
2196 		break;
2197 	case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
2198 		if (iface_param->iface_num & 0x1)
2199 			break;
2200 		init_fw_cb->ipv6_hop_limit = iface_param->value[0];
2201 		break;
2202 	case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
2203 		if (iface_param->iface_num & 0x1)
2204 			break;
2205 		init_fw_cb->ipv6_nd_reach_time =
2206 				cpu_to_le32(*(uint32_t *)iface_param->value);
2207 		break;
2208 	case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
2209 		if (iface_param->iface_num & 0x1)
2210 			break;
2211 		init_fw_cb->ipv6_nd_rexmit_timer =
2212 				cpu_to_le32(*(uint32_t *)iface_param->value);
2213 		break;
2214 	case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
2215 		if (iface_param->iface_num & 0x1)
2216 			break;
2217 		init_fw_cb->ipv6_nd_stale_timeout =
2218 				cpu_to_le32(*(uint32_t *)iface_param->value);
2219 		break;
2220 	case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
2221 		if (iface_param->iface_num & 0x1)
2222 			break;
2223 		init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0];
2224 		break;
2225 	case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
2226 		if (iface_param->iface_num & 0x1)
2227 			break;
2228 		init_fw_cb->ipv6_gw_advrt_mtu =
2229 				cpu_to_le32(*(uint32_t *)iface_param->value);
2230 		break;
2231 	default:
2232 		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
2233 			   iface_param->param);
2234 		break;
2235 	}
2236 }
2237 
2238 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
2239 			     struct iscsi_iface_param_info *iface_param,
2240 			     struct addr_ctrl_blk *init_fw_cb)
2241 {
2242 	switch (iface_param->param) {
2243 	case ISCSI_NET_PARAM_IPV4_ADDR:
2244 		memcpy(init_fw_cb->ipv4_addr, iface_param->value,
2245 		       sizeof(init_fw_cb->ipv4_addr));
2246 		break;
2247 	case ISCSI_NET_PARAM_IPV4_SUBNET:
2248 		memcpy(init_fw_cb->ipv4_subnet,	iface_param->value,
2249 		       sizeof(init_fw_cb->ipv4_subnet));
2250 		break;
2251 	case ISCSI_NET_PARAM_IPV4_GW:
2252 		memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
2253 		       sizeof(init_fw_cb->ipv4_gw_addr));
2254 		break;
2255 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
2256 		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
2257 			init_fw_cb->ipv4_tcp_opts |=
2258 					cpu_to_le16(TCPOPT_DHCP_ENABLE);
2259 		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
2260 			init_fw_cb->ipv4_tcp_opts &=
2261 					cpu_to_le16(~TCPOPT_DHCP_ENABLE);
2262 		else
2263 			ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
2264 		break;
2265 	case ISCSI_NET_PARAM_IFACE_ENABLE:
2266 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
2267 			init_fw_cb->ipv4_ip_opts |=
2268 				cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
2269 			qla4xxx_create_ipv4_iface(ha);
2270 		} else {
2271 			init_fw_cb->ipv4_ip_opts &=
2272 				cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
2273 					    0xFFFF);
2274 			qla4xxx_destroy_ipv4_iface(ha);
2275 		}
2276 		break;
2277 	case ISCSI_NET_PARAM_VLAN_TAG:
2278 		if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
2279 			break;
2280 		init_fw_cb->ipv4_vlan_tag =
2281 				cpu_to_be16(*(uint16_t *)iface_param->value);
2282 		break;
2283 	case ISCSI_NET_PARAM_VLAN_ENABLED:
2284 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
2285 			init_fw_cb->ipv4_ip_opts |=
2286 					cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
2287 		else
2288 			init_fw_cb->ipv4_ip_opts &=
2289 					cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
2290 		break;
2291 	case ISCSI_NET_PARAM_MTU:
2292 		init_fw_cb->eth_mtu_size =
2293 				cpu_to_le16(*(uint16_t *)iface_param->value);
2294 		break;
2295 	case ISCSI_NET_PARAM_PORT:
2296 		init_fw_cb->ipv4_port =
2297 				cpu_to_le16(*(uint16_t *)iface_param->value);
2298 		break;
2299 	case ISCSI_NET_PARAM_DELAYED_ACK_EN:
2300 		if (iface_param->iface_num & 0x1)
2301 			break;
2302 		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2303 			init_fw_cb->ipv4_tcp_opts |=
2304 				cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
2305 		else
2306 			init_fw_cb->ipv4_tcp_opts &=
2307 				cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
2308 					    0xFFFF);
2309 		break;
2310 	case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2311 		if (iface_param->iface_num & 0x1)
2312 			break;
2313 		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2314 			init_fw_cb->ipv4_tcp_opts |=
2315 				cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE);
2316 		else
2317 			init_fw_cb->ipv4_tcp_opts &=
2318 				cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE);
2319 		break;
2320 	case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
2321 		if (iface_param->iface_num & 0x1)
2322 			break;
2323 		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2324 			init_fw_cb->ipv4_tcp_opts |=
2325 				cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE);
2326 		else
2327 			init_fw_cb->ipv4_tcp_opts &=
2328 				cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE);
2329 		break;
2330 	case ISCSI_NET_PARAM_TCP_WSF:
2331 		if (iface_param->iface_num & 0x1)
2332 			break;
2333 		init_fw_cb->ipv4_tcp_wsf = iface_param->value[0];
2334 		break;
2335 	case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
2336 		if (iface_param->iface_num & 0x1)
2337 			break;
2338 		init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE);
2339 		init_fw_cb->ipv4_tcp_opts |=
2340 				cpu_to_le16((iface_param->value[0] << 1) &
2341 					    TCPOPT_TIMER_SCALE);
2342 		break;
2343 	case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
2344 		if (iface_param->iface_num & 0x1)
2345 			break;
2346 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2347 			init_fw_cb->ipv4_tcp_opts |=
2348 				cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE);
2349 		else
2350 			init_fw_cb->ipv4_tcp_opts &=
2351 				cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE);
2352 		break;
2353 	case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
2354 		if (iface_param->iface_num & 0x1)
2355 			break;
2356 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2357 			init_fw_cb->ipv4_tcp_opts |=
2358 				cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN);
2359 		else
2360 			init_fw_cb->ipv4_tcp_opts &=
2361 				cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN);
2362 		break;
2363 	case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
2364 		if (iface_param->iface_num & 0x1)
2365 			break;
2366 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2367 			init_fw_cb->ipv4_tcp_opts |=
2368 				cpu_to_le16(TCPOPT_SLP_DA_INFO_EN);
2369 		else
2370 			init_fw_cb->ipv4_tcp_opts &=
2371 				cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN);
2372 		break;
2373 	case ISCSI_NET_PARAM_IPV4_TOS_EN:
2374 		if (iface_param->iface_num & 0x1)
2375 			break;
2376 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2377 			init_fw_cb->ipv4_ip_opts |=
2378 				cpu_to_le16(IPOPT_IPV4_TOS_EN);
2379 		else
2380 			init_fw_cb->ipv4_ip_opts &=
2381 				cpu_to_le16(~IPOPT_IPV4_TOS_EN);
2382 		break;
2383 	case ISCSI_NET_PARAM_IPV4_TOS:
2384 		if (iface_param->iface_num & 0x1)
2385 			break;
2386 		init_fw_cb->ipv4_tos = iface_param->value[0];
2387 		break;
2388 	case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
2389 		if (iface_param->iface_num & 0x1)
2390 			break;
2391 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2392 			init_fw_cb->ipv4_ip_opts |=
2393 					cpu_to_le16(IPOPT_GRAT_ARP_EN);
2394 		else
2395 			init_fw_cb->ipv4_ip_opts &=
2396 					cpu_to_le16(~IPOPT_GRAT_ARP_EN);
2397 		break;
2398 	case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
2399 		if (iface_param->iface_num & 0x1)
2400 			break;
2401 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2402 			init_fw_cb->ipv4_ip_opts |=
2403 				cpu_to_le16(IPOPT_ALT_CID_EN);
2404 		else
2405 			init_fw_cb->ipv4_ip_opts &=
2406 				cpu_to_le16(~IPOPT_ALT_CID_EN);
2407 		break;
2408 	case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
2409 		if (iface_param->iface_num & 0x1)
2410 			break;
2411 		memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value,
2412 		       (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1));
2413 		init_fw_cb->ipv4_dhcp_alt_cid_len =
2414 					strlen(init_fw_cb->ipv4_dhcp_alt_cid);
2415 		break;
2416 	case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
2417 		if (iface_param->iface_num & 0x1)
2418 			break;
2419 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2420 			init_fw_cb->ipv4_ip_opts |=
2421 					cpu_to_le16(IPOPT_REQ_VID_EN);
2422 		else
2423 			init_fw_cb->ipv4_ip_opts &=
2424 					cpu_to_le16(~IPOPT_REQ_VID_EN);
2425 		break;
2426 	case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
2427 		if (iface_param->iface_num & 0x1)
2428 			break;
2429 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2430 			init_fw_cb->ipv4_ip_opts |=
2431 					cpu_to_le16(IPOPT_USE_VID_EN);
2432 		else
2433 			init_fw_cb->ipv4_ip_opts &=
2434 					cpu_to_le16(~IPOPT_USE_VID_EN);
2435 		break;
2436 	case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
2437 		if (iface_param->iface_num & 0x1)
2438 			break;
2439 		memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value,
2440 		       (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1));
2441 		init_fw_cb->ipv4_dhcp_vid_len =
2442 					strlen(init_fw_cb->ipv4_dhcp_vid);
2443 		break;
2444 	case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
2445 		if (iface_param->iface_num & 0x1)
2446 			break;
2447 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2448 			init_fw_cb->ipv4_ip_opts |=
2449 					cpu_to_le16(IPOPT_LEARN_IQN_EN);
2450 		else
2451 			init_fw_cb->ipv4_ip_opts &=
2452 					cpu_to_le16(~IPOPT_LEARN_IQN_EN);
2453 		break;
2454 	case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
2455 		if (iface_param->iface_num & 0x1)
2456 			break;
2457 		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2458 			init_fw_cb->ipv4_ip_opts |=
2459 				cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE);
2460 		else
2461 			init_fw_cb->ipv4_ip_opts &=
2462 				cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE);
2463 		break;
2464 	case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
2465 		if (iface_param->iface_num & 0x1)
2466 			break;
2467 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2468 			init_fw_cb->ipv4_ip_opts |=
2469 				cpu_to_le16(IPOPT_IN_FORWARD_EN);
2470 		else
2471 			init_fw_cb->ipv4_ip_opts &=
2472 				cpu_to_le16(~IPOPT_IN_FORWARD_EN);
2473 		break;
2474 	case ISCSI_NET_PARAM_REDIRECT_EN:
2475 		if (iface_param->iface_num & 0x1)
2476 			break;
2477 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2478 			init_fw_cb->ipv4_ip_opts |=
2479 				cpu_to_le16(IPOPT_ARP_REDIRECT_EN);
2480 		else
2481 			init_fw_cb->ipv4_ip_opts &=
2482 				cpu_to_le16(~IPOPT_ARP_REDIRECT_EN);
2483 		break;
2484 	case ISCSI_NET_PARAM_IPV4_TTL:
2485 		if (iface_param->iface_num & 0x1)
2486 			break;
2487 		init_fw_cb->ipv4_ttl = iface_param->value[0];
2488 		break;
2489 	default:
2490 		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
2491 			   iface_param->param);
2492 		break;
2493 	}
2494 }
2495 
2496 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha,
2497 				    struct iscsi_iface_param_info *iface_param,
2498 				    struct addr_ctrl_blk *init_fw_cb)
2499 {
2500 	switch (iface_param->param) {
2501 	case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
2502 		if (iface_param->iface_num & 0x1)
2503 			break;
2504 		init_fw_cb->def_timeout =
2505 				cpu_to_le16(*(uint16_t *)iface_param->value);
2506 		break;
2507 	case ISCSI_IFACE_PARAM_HDRDGST_EN:
2508 		if (iface_param->iface_num & 0x1)
2509 			break;
2510 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2511 			init_fw_cb->iscsi_opts |=
2512 				cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN);
2513 		else
2514 			init_fw_cb->iscsi_opts &=
2515 				cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN);
2516 		break;
2517 	case ISCSI_IFACE_PARAM_DATADGST_EN:
2518 		if (iface_param->iface_num & 0x1)
2519 			break;
2520 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2521 			init_fw_cb->iscsi_opts |=
2522 				cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN);
2523 		else
2524 			init_fw_cb->iscsi_opts &=
2525 				cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN);
2526 		break;
2527 	case ISCSI_IFACE_PARAM_IMM_DATA_EN:
2528 		if (iface_param->iface_num & 0x1)
2529 			break;
2530 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2531 			init_fw_cb->iscsi_opts |=
2532 				cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN);
2533 		else
2534 			init_fw_cb->iscsi_opts &=
2535 				cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN);
2536 		break;
2537 	case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
2538 		if (iface_param->iface_num & 0x1)
2539 			break;
2540 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2541 			init_fw_cb->iscsi_opts |=
2542 				cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN);
2543 		else
2544 			init_fw_cb->iscsi_opts &=
2545 				cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN);
2546 		break;
2547 	case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
2548 		if (iface_param->iface_num & 0x1)
2549 			break;
2550 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2551 			init_fw_cb->iscsi_opts |=
2552 				cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN);
2553 		else
2554 			init_fw_cb->iscsi_opts &=
2555 				cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN);
2556 		break;
2557 	case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
2558 		if (iface_param->iface_num & 0x1)
2559 			break;
2560 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2561 			init_fw_cb->iscsi_opts |=
2562 				cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN);
2563 		else
2564 			init_fw_cb->iscsi_opts &=
2565 				cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN);
2566 		break;
2567 	case ISCSI_IFACE_PARAM_ERL:
2568 		if (iface_param->iface_num & 0x1)
2569 			break;
2570 		init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL);
2571 		init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] &
2572 						      ISCSIOPTS_ERL);
2573 		break;
2574 	case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
2575 		if (iface_param->iface_num & 0x1)
2576 			break;
2577 		init_fw_cb->iscsi_max_pdu_size =
2578 				cpu_to_le32(*(uint32_t *)iface_param->value) /
2579 				BYTE_UNITS;
2580 		break;
2581 	case ISCSI_IFACE_PARAM_FIRST_BURST:
2582 		if (iface_param->iface_num & 0x1)
2583 			break;
2584 		init_fw_cb->iscsi_fburst_len =
2585 				cpu_to_le32(*(uint32_t *)iface_param->value) /
2586 				BYTE_UNITS;
2587 		break;
2588 	case ISCSI_IFACE_PARAM_MAX_R2T:
2589 		if (iface_param->iface_num & 0x1)
2590 			break;
2591 		init_fw_cb->iscsi_max_outstnd_r2t =
2592 				cpu_to_le16(*(uint16_t *)iface_param->value);
2593 		break;
2594 	case ISCSI_IFACE_PARAM_MAX_BURST:
2595 		if (iface_param->iface_num & 0x1)
2596 			break;
2597 		init_fw_cb->iscsi_max_burst_len =
2598 				cpu_to_le32(*(uint32_t *)iface_param->value) /
2599 				BYTE_UNITS;
2600 		break;
2601 	case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
2602 		if (iface_param->iface_num & 0x1)
2603 			break;
2604 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2605 			init_fw_cb->iscsi_opts |=
2606 				cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN);
2607 		else
2608 			init_fw_cb->iscsi_opts &=
2609 				cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN);
2610 		break;
2611 	case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
2612 		if (iface_param->iface_num & 0x1)
2613 			break;
2614 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2615 			init_fw_cb->iscsi_opts |=
2616 				cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN);
2617 		else
2618 			init_fw_cb->iscsi_opts &=
2619 				cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN);
2620 		break;
2621 	case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
2622 		if (iface_param->iface_num & 0x1)
2623 			break;
2624 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2625 			init_fw_cb->iscsi_opts |=
2626 				cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN);
2627 		else
2628 			init_fw_cb->iscsi_opts &=
2629 				cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN);
2630 		break;
2631 	case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
2632 		if (iface_param->iface_num & 0x1)
2633 			break;
2634 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2635 			init_fw_cb->iscsi_opts |=
2636 				cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN);
2637 		else
2638 			init_fw_cb->iscsi_opts &=
2639 				cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN);
2640 		break;
2641 	case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
2642 		if (iface_param->iface_num & 0x1)
2643 			break;
2644 		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2645 			init_fw_cb->iscsi_opts |=
2646 				cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN);
2647 		else
2648 			init_fw_cb->iscsi_opts &=
2649 				cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN);
2650 		break;
2651 	default:
2652 		ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n",
2653 			   iface_param->param);
2654 		break;
2655 	}
2656 }
2657 
2658 static void
2659 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
2660 {
2661 	struct addr_ctrl_blk_def *acb;
2662 	acb = (struct addr_ctrl_blk_def *)init_fw_cb;
2663 	memset(acb->reserved1, 0, sizeof(acb->reserved1));
2664 	memset(acb->reserved2, 0, sizeof(acb->reserved2));
2665 	memset(acb->reserved3, 0, sizeof(acb->reserved3));
2666 	memset(acb->reserved4, 0, sizeof(acb->reserved4));
2667 	memset(acb->reserved5, 0, sizeof(acb->reserved5));
2668 	memset(acb->reserved6, 0, sizeof(acb->reserved6));
2669 	memset(acb->reserved7, 0, sizeof(acb->reserved7));
2670 	memset(acb->reserved8, 0, sizeof(acb->reserved8));
2671 	memset(acb->reserved9, 0, sizeof(acb->reserved9));
2672 	memset(acb->reserved10, 0, sizeof(acb->reserved10));
2673 	memset(acb->reserved11, 0, sizeof(acb->reserved11));
2674 	memset(acb->reserved12, 0, sizeof(acb->reserved12));
2675 	memset(acb->reserved13, 0, sizeof(acb->reserved13));
2676 	memset(acb->reserved14, 0, sizeof(acb->reserved14));
2677 	memset(acb->reserved15, 0, sizeof(acb->reserved15));
2678 }
2679 
2680 static int
2681 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
2682 {
2683 	struct scsi_qla_host *ha = to_qla_host(shost);
2684 	int rval = 0;
2685 	struct iscsi_iface_param_info *iface_param = NULL;
2686 	struct addr_ctrl_blk *init_fw_cb = NULL;
2687 	dma_addr_t init_fw_cb_dma;
2688 	uint32_t mbox_cmd[MBOX_REG_COUNT];
2689 	uint32_t mbox_sts[MBOX_REG_COUNT];
2690 	uint32_t rem = len;
2691 	struct nlattr *attr;
2692 
2693 	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
2694 					sizeof(struct addr_ctrl_blk),
2695 					&init_fw_cb_dma, GFP_KERNEL);
2696 	if (!init_fw_cb) {
2697 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
2698 			   __func__);
2699 		return -ENOMEM;
2700 	}
2701 
2702 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
2703 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
2704 	memset(&mbox_sts, 0, sizeof(mbox_sts));
2705 
2706 	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
2707 		ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
2708 		rval = -EIO;
2709 		goto exit_init_fw_cb;
2710 	}
2711 
2712 	nla_for_each_attr(attr, data, len, rem) {
2713 		iface_param = nla_data(attr);
2714 
2715 		if (iface_param->param_type == ISCSI_NET_PARAM) {
2716 			switch (iface_param->iface_type) {
2717 			case ISCSI_IFACE_TYPE_IPV4:
2718 				switch (iface_param->iface_num) {
2719 				case 0:
2720 					qla4xxx_set_ipv4(ha, iface_param,
2721 							 init_fw_cb);
2722 					break;
2723 				default:
2724 				/* Cannot have more than one IPv4 interface */
2725 					ql4_printk(KERN_ERR, ha,
2726 						   "Invalid IPv4 iface number = %d\n",
2727 						   iface_param->iface_num);
2728 					break;
2729 				}
2730 				break;
2731 			case ISCSI_IFACE_TYPE_IPV6:
2732 				switch (iface_param->iface_num) {
2733 				case 0:
2734 				case 1:
2735 					qla4xxx_set_ipv6(ha, iface_param,
2736 							 init_fw_cb);
2737 					break;
2738 				default:
2739 				/* Cannot have more than two IPv6 interface */
2740 					ql4_printk(KERN_ERR, ha,
2741 						   "Invalid IPv6 iface number = %d\n",
2742 						   iface_param->iface_num);
2743 					break;
2744 				}
2745 				break;
2746 			default:
2747 				ql4_printk(KERN_ERR, ha,
2748 					   "Invalid iface type\n");
2749 				break;
2750 			}
2751 		} else if (iface_param->param_type == ISCSI_IFACE_PARAM) {
2752 				qla4xxx_set_iscsi_param(ha, iface_param,
2753 							init_fw_cb);
2754 		} else {
2755 			continue;
2756 		}
2757 	}
2758 
2759 	init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
2760 
2761 	rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
2762 				 sizeof(struct addr_ctrl_blk),
2763 				 FLASH_OPT_RMW_COMMIT);
2764 	if (rval != QLA_SUCCESS) {
2765 		ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
2766 			   __func__);
2767 		rval = -EIO;
2768 		goto exit_init_fw_cb;
2769 	}
2770 
2771 	rval = qla4xxx_disable_acb(ha);
2772 	if (rval != QLA_SUCCESS) {
2773 		ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
2774 			   __func__);
2775 		rval = -EIO;
2776 		goto exit_init_fw_cb;
2777 	}
2778 
2779 	wait_for_completion_timeout(&ha->disable_acb_comp,
2780 				    DISABLE_ACB_TOV * HZ);
2781 
2782 	qla4xxx_initcb_to_acb(init_fw_cb);
2783 
2784 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
2785 	if (rval != QLA_SUCCESS) {
2786 		ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
2787 			   __func__);
2788 		rval = -EIO;
2789 		goto exit_init_fw_cb;
2790 	}
2791 
2792 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
2793 	qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
2794 				  init_fw_cb_dma);
2795 
2796 exit_init_fw_cb:
2797 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
2798 			  init_fw_cb, init_fw_cb_dma);
2799 
2800 	return rval;
2801 }
2802 
2803 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
2804 				     enum iscsi_param param, char *buf)
2805 {
2806 	struct iscsi_session *sess = cls_sess->dd_data;
2807 	struct ddb_entry *ddb_entry = sess->dd_data;
2808 	struct scsi_qla_host *ha = ddb_entry->ha;
2809 	struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
2810 	struct ql4_chap_table chap_tbl;
2811 	int rval, len;
2812 	uint16_t idx;
2813 
2814 	memset(&chap_tbl, 0, sizeof(chap_tbl));
2815 	switch (param) {
2816 	case ISCSI_PARAM_CHAP_IN_IDX:
2817 		rval = qla4xxx_get_chap_index(ha, sess->username_in,
2818 					      sess->password_in, BIDI_CHAP,
2819 					      &idx);
2820 		if (rval)
2821 			len = sprintf(buf, "\n");
2822 		else
2823 			len = sprintf(buf, "%hu\n", idx);
2824 		break;
2825 	case ISCSI_PARAM_CHAP_OUT_IDX:
2826 		if (ddb_entry->ddb_type == FLASH_DDB) {
2827 			if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
2828 				idx = ddb_entry->chap_tbl_idx;
2829 				rval = QLA_SUCCESS;
2830 			} else {
2831 				rval = QLA_ERROR;
2832 			}
2833 		} else {
2834 			rval = qla4xxx_get_chap_index(ha, sess->username,
2835 						      sess->password,
2836 						      LOCAL_CHAP, &idx);
2837 		}
2838 		if (rval)
2839 			len = sprintf(buf, "\n");
2840 		else
2841 			len = sprintf(buf, "%hu\n", idx);
2842 		break;
2843 	case ISCSI_PARAM_USERNAME:
2844 	case ISCSI_PARAM_PASSWORD:
2845 		/* First, populate session username and password for FLASH DDB,
2846 		 * if not already done. This happens when session login fails
2847 		 * for a FLASH DDB.
2848 		 */
2849 		if (ddb_entry->ddb_type == FLASH_DDB &&
2850 		    ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
2851 		    !sess->username && !sess->password) {
2852 			idx = ddb_entry->chap_tbl_idx;
2853 			rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
2854 							    chap_tbl.secret,
2855 							    idx);
2856 			if (!rval) {
2857 				iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
2858 						(char *)chap_tbl.name,
2859 						strlen((char *)chap_tbl.name));
2860 				iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
2861 						(char *)chap_tbl.secret,
2862 						chap_tbl.secret_len);
2863 			}
2864 		}
2865 		/* allow fall-through */
2866 	default:
2867 		return iscsi_session_get_param(cls_sess, param, buf);
2868 	}
2869 
2870 	return len;
2871 }
2872 
2873 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
2874 				  enum iscsi_param param, char *buf)
2875 {
2876 	struct iscsi_conn *conn;
2877 	struct qla_conn *qla_conn;
2878 	struct sockaddr *dst_addr;
2879 
2880 	conn = cls_conn->dd_data;
2881 	qla_conn = conn->dd_data;
2882 	dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
2883 
2884 	switch (param) {
2885 	case ISCSI_PARAM_CONN_PORT:
2886 	case ISCSI_PARAM_CONN_ADDRESS:
2887 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2888 						 dst_addr, param, buf);
2889 	default:
2890 		return iscsi_conn_get_param(cls_conn, param, buf);
2891 	}
2892 }
2893 
2894 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
2895 {
2896 	uint32_t mbx_sts = 0;
2897 	uint16_t tmp_ddb_index;
2898 	int ret;
2899 
2900 get_ddb_index:
2901 	tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
2902 
2903 	if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
2904 		DEBUG2(ql4_printk(KERN_INFO, ha,
2905 				  "Free DDB index not available\n"));
2906 		ret = QLA_ERROR;
2907 		goto exit_get_ddb_index;
2908 	}
2909 
2910 	if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
2911 		goto get_ddb_index;
2912 
2913 	DEBUG2(ql4_printk(KERN_INFO, ha,
2914 			  "Found a free DDB index at %d\n", tmp_ddb_index));
2915 	ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
2916 	if (ret == QLA_ERROR) {
2917 		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
2918 			ql4_printk(KERN_INFO, ha,
2919 				   "DDB index = %d not available trying next\n",
2920 				   tmp_ddb_index);
2921 			goto get_ddb_index;
2922 		}
2923 		DEBUG2(ql4_printk(KERN_INFO, ha,
2924 				  "Free FW DDB not available\n"));
2925 	}
2926 
2927 	*ddb_index = tmp_ddb_index;
2928 
2929 exit_get_ddb_index:
2930 	return ret;
2931 }
2932 
2933 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
2934 				   struct ddb_entry *ddb_entry,
2935 				   char *existing_ipaddr,
2936 				   char *user_ipaddr)
2937 {
2938 	uint8_t dst_ipaddr[IPv6_ADDR_LEN];
2939 	char formatted_ipaddr[DDB_IPADDR_LEN];
2940 	int status = QLA_SUCCESS, ret = 0;
2941 
2942 	if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
2943 		ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
2944 			       '\0', NULL);
2945 		if (ret == 0) {
2946 			status = QLA_ERROR;
2947 			goto out_match;
2948 		}
2949 		ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
2950 	} else {
2951 		ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
2952 			       '\0', NULL);
2953 		if (ret == 0) {
2954 			status = QLA_ERROR;
2955 			goto out_match;
2956 		}
2957 		ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
2958 	}
2959 
2960 	if (strcmp(existing_ipaddr, formatted_ipaddr))
2961 		status = QLA_ERROR;
2962 
2963 out_match:
2964 	return status;
2965 }
2966 
2967 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
2968 				      struct iscsi_cls_conn *cls_conn)
2969 {
2970 	int idx = 0, max_ddbs, rval;
2971 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
2972 	struct iscsi_session *sess, *existing_sess;
2973 	struct iscsi_conn *conn, *existing_conn;
2974 	struct ddb_entry *ddb_entry;
2975 
2976 	sess = cls_sess->dd_data;
2977 	conn = cls_conn->dd_data;
2978 
2979 	if (sess->targetname == NULL ||
2980 	    conn->persistent_address == NULL ||
2981 	    conn->persistent_port == 0)
2982 		return QLA_ERROR;
2983 
2984 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
2985 				     MAX_DEV_DB_ENTRIES;
2986 
2987 	for (idx = 0; idx < max_ddbs; idx++) {
2988 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
2989 		if (ddb_entry == NULL)
2990 			continue;
2991 
2992 		if (ddb_entry->ddb_type != FLASH_DDB)
2993 			continue;
2994 
2995 		existing_sess = ddb_entry->sess->dd_data;
2996 		existing_conn = ddb_entry->conn->dd_data;
2997 
2998 		if (existing_sess->targetname == NULL ||
2999 		    existing_conn->persistent_address == NULL ||
3000 		    existing_conn->persistent_port == 0)
3001 			continue;
3002 
3003 		DEBUG2(ql4_printk(KERN_INFO, ha,
3004 				  "IQN = %s User IQN = %s\n",
3005 				  existing_sess->targetname,
3006 				  sess->targetname));
3007 
3008 		DEBUG2(ql4_printk(KERN_INFO, ha,
3009 				  "IP = %s User IP = %s\n",
3010 				  existing_conn->persistent_address,
3011 				  conn->persistent_address));
3012 
3013 		DEBUG2(ql4_printk(KERN_INFO, ha,
3014 				  "Port = %d User Port = %d\n",
3015 				  existing_conn->persistent_port,
3016 				  conn->persistent_port));
3017 
3018 		if (strcmp(existing_sess->targetname, sess->targetname))
3019 			continue;
3020 		rval = qla4xxx_match_ipaddress(ha, ddb_entry,
3021 					existing_conn->persistent_address,
3022 					conn->persistent_address);
3023 		if (rval == QLA_ERROR)
3024 			continue;
3025 		if (existing_conn->persistent_port != conn->persistent_port)
3026 			continue;
3027 		break;
3028 	}
3029 
3030 	if (idx == max_ddbs)
3031 		return QLA_ERROR;
3032 
3033 	DEBUG2(ql4_printk(KERN_INFO, ha,
3034 			  "Match found in fwdb sessions\n"));
3035 	return QLA_SUCCESS;
3036 }
3037 
3038 static struct iscsi_cls_session *
3039 qla4xxx_session_create(struct iscsi_endpoint *ep,
3040 			uint16_t cmds_max, uint16_t qdepth,
3041 			uint32_t initial_cmdsn)
3042 {
3043 	struct iscsi_cls_session *cls_sess;
3044 	struct scsi_qla_host *ha;
3045 	struct qla_endpoint *qla_ep;
3046 	struct ddb_entry *ddb_entry;
3047 	uint16_t ddb_index;
3048 	struct iscsi_session *sess;
3049 	struct sockaddr *dst_addr;
3050 	int ret;
3051 
3052 	if (!ep) {
3053 		printk(KERN_ERR "qla4xxx: missing ep.\n");
3054 		return NULL;
3055 	}
3056 
3057 	qla_ep = ep->dd_data;
3058 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
3059 	ha = to_qla_host(qla_ep->host);
3060 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3061 			  ha->host_no));
3062 
3063 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
3064 	if (ret == QLA_ERROR)
3065 		return NULL;
3066 
3067 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
3068 				       cmds_max, sizeof(struct ddb_entry),
3069 				       sizeof(struct ql4_task_data),
3070 				       initial_cmdsn, ddb_index);
3071 	if (!cls_sess)
3072 		return NULL;
3073 
3074 	sess = cls_sess->dd_data;
3075 	ddb_entry = sess->dd_data;
3076 	ddb_entry->fw_ddb_index = ddb_index;
3077 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
3078 	ddb_entry->ha = ha;
3079 	ddb_entry->sess = cls_sess;
3080 	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
3081 	ddb_entry->ddb_change = qla4xxx_ddb_change;
3082 	clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
3083 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
3084 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
3085 	ha->tot_ddbs++;
3086 
3087 	return cls_sess;
3088 }
3089 
3090 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
3091 {
3092 	struct iscsi_session *sess;
3093 	struct ddb_entry *ddb_entry;
3094 	struct scsi_qla_host *ha;
3095 	unsigned long flags, wtime;
3096 	struct dev_db_entry *fw_ddb_entry = NULL;
3097 	dma_addr_t fw_ddb_entry_dma;
3098 	uint32_t ddb_state;
3099 	int ret;
3100 
3101 	sess = cls_sess->dd_data;
3102 	ddb_entry = sess->dd_data;
3103 	ha = ddb_entry->ha;
3104 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3105 			  ha->host_no));
3106 
3107 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3108 					  &fw_ddb_entry_dma, GFP_KERNEL);
3109 	if (!fw_ddb_entry) {
3110 		ql4_printk(KERN_ERR, ha,
3111 			   "%s: Unable to allocate dma buffer\n", __func__);
3112 		goto destroy_session;
3113 	}
3114 
3115 	wtime = jiffies + (HZ * LOGOUT_TOV);
3116 	do {
3117 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
3118 					      fw_ddb_entry, fw_ddb_entry_dma,
3119 					      NULL, NULL, &ddb_state, NULL,
3120 					      NULL, NULL);
3121 		if (ret == QLA_ERROR)
3122 			goto destroy_session;
3123 
3124 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
3125 		    (ddb_state == DDB_DS_SESSION_FAILED))
3126 			goto destroy_session;
3127 
3128 		schedule_timeout_uninterruptible(HZ);
3129 	} while ((time_after(wtime, jiffies)));
3130 
3131 destroy_session:
3132 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
3133 	if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
3134 		clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
3135 	spin_lock_irqsave(&ha->hardware_lock, flags);
3136 	qla4xxx_free_ddb(ha, ddb_entry);
3137 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3138 
3139 	iscsi_session_teardown(cls_sess);
3140 
3141 	if (fw_ddb_entry)
3142 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3143 				  fw_ddb_entry, fw_ddb_entry_dma);
3144 }
3145 
3146 static struct iscsi_cls_conn *
3147 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
3148 {
3149 	struct iscsi_cls_conn *cls_conn;
3150 	struct iscsi_session *sess;
3151 	struct ddb_entry *ddb_entry;
3152 	struct scsi_qla_host *ha;
3153 
3154 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
3155 				    conn_idx);
3156 	if (!cls_conn) {
3157 		pr_info("%s: Can not create connection for conn_idx = %u\n",
3158 			__func__, conn_idx);
3159 		return NULL;
3160 	}
3161 
3162 	sess = cls_sess->dd_data;
3163 	ddb_entry = sess->dd_data;
3164 	ddb_entry->conn = cls_conn;
3165 
3166 	ha = ddb_entry->ha;
3167 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
3168 			  conn_idx));
3169 	return cls_conn;
3170 }
3171 
3172 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
3173 			     struct iscsi_cls_conn *cls_conn,
3174 			     uint64_t transport_fd, int is_leading)
3175 {
3176 	struct iscsi_conn *conn;
3177 	struct qla_conn *qla_conn;
3178 	struct iscsi_endpoint *ep;
3179 	struct ddb_entry *ddb_entry;
3180 	struct scsi_qla_host *ha;
3181 	struct iscsi_session *sess;
3182 
3183 	sess = cls_session->dd_data;
3184 	ddb_entry = sess->dd_data;
3185 	ha = ddb_entry->ha;
3186 
3187 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3188 			  cls_session->sid, cls_conn->cid));
3189 
3190 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
3191 		return -EINVAL;
3192 	ep = iscsi_lookup_endpoint(transport_fd);
3193 	conn = cls_conn->dd_data;
3194 	qla_conn = conn->dd_data;
3195 	qla_conn->qla_ep = ep->dd_data;
3196 	return 0;
3197 }
3198 
3199 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
3200 {
3201 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
3202 	struct iscsi_session *sess;
3203 	struct ddb_entry *ddb_entry;
3204 	struct scsi_qla_host *ha;
3205 	struct dev_db_entry *fw_ddb_entry = NULL;
3206 	dma_addr_t fw_ddb_entry_dma;
3207 	uint32_t mbx_sts = 0;
3208 	int ret = 0;
3209 	int status = QLA_SUCCESS;
3210 
3211 	sess = cls_sess->dd_data;
3212 	ddb_entry = sess->dd_data;
3213 	ha = ddb_entry->ha;
3214 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3215 			  cls_sess->sid, cls_conn->cid));
3216 
3217 	/* Check if we have  matching FW DDB, if yes then do not
3218 	 * login to this target. This could cause target to logout previous
3219 	 * connection
3220 	 */
3221 	ret = qla4xxx_match_fwdb_session(ha, cls_conn);
3222 	if (ret == QLA_SUCCESS) {
3223 		ql4_printk(KERN_INFO, ha,
3224 			   "Session already exist in FW.\n");
3225 		ret = -EEXIST;
3226 		goto exit_conn_start;
3227 	}
3228 
3229 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3230 					  &fw_ddb_entry_dma, GFP_KERNEL);
3231 	if (!fw_ddb_entry) {
3232 		ql4_printk(KERN_ERR, ha,
3233 			   "%s: Unable to allocate dma buffer\n", __func__);
3234 		ret = -ENOMEM;
3235 		goto exit_conn_start;
3236 	}
3237 
3238 	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
3239 	if (ret) {
3240 		/* If iscsid is stopped and started then no need to do
3241 		* set param again since ddb state will be already
3242 		* active and FW does not allow set ddb to an
3243 		* active session.
3244 		*/
3245 		if (mbx_sts)
3246 			if (ddb_entry->fw_ddb_device_state ==
3247 						DDB_DS_SESSION_ACTIVE) {
3248 				ddb_entry->unblock_sess(ddb_entry->sess);
3249 				goto exit_set_param;
3250 			}
3251 
3252 		ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
3253 			   __func__, ddb_entry->fw_ddb_index);
3254 		goto exit_conn_start;
3255 	}
3256 
3257 	status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
3258 	if (status == QLA_ERROR) {
3259 		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
3260 			   sess->targetname);
3261 		ret = -EINVAL;
3262 		goto exit_conn_start;
3263 	}
3264 
3265 	if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
3266 		ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
3267 
3268 	DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
3269 		      ddb_entry->fw_ddb_device_state));
3270 
3271 exit_set_param:
3272 	ret = 0;
3273 
3274 exit_conn_start:
3275 	if (fw_ddb_entry)
3276 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3277 				  fw_ddb_entry, fw_ddb_entry_dma);
3278 	return ret;
3279 }
3280 
3281 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
3282 {
3283 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
3284 	struct iscsi_session *sess;
3285 	struct scsi_qla_host *ha;
3286 	struct ddb_entry *ddb_entry;
3287 	int options;
3288 
3289 	sess = cls_sess->dd_data;
3290 	ddb_entry = sess->dd_data;
3291 	ha = ddb_entry->ha;
3292 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
3293 			  cls_conn->cid));
3294 
3295 	options = LOGOUT_OPTION_CLOSE_SESSION;
3296 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
3297 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
3298 }
3299 
3300 static void qla4xxx_task_work(struct work_struct *wdata)
3301 {
3302 	struct ql4_task_data *task_data;
3303 	struct scsi_qla_host *ha;
3304 	struct passthru_status *sts;
3305 	struct iscsi_task *task;
3306 	struct iscsi_hdr *hdr;
3307 	uint8_t *data;
3308 	uint32_t data_len;
3309 	struct iscsi_conn *conn;
3310 	int hdr_len;
3311 	itt_t itt;
3312 
3313 	task_data = container_of(wdata, struct ql4_task_data, task_work);
3314 	ha = task_data->ha;
3315 	task = task_data->task;
3316 	sts = &task_data->sts;
3317 	hdr_len = sizeof(struct iscsi_hdr);
3318 
3319 	DEBUG3(printk(KERN_INFO "Status returned\n"));
3320 	DEBUG3(qla4xxx_dump_buffer(sts, 64));
3321 	DEBUG3(printk(KERN_INFO "Response buffer"));
3322 	DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
3323 
3324 	conn = task->conn;
3325 
3326 	switch (sts->completionStatus) {
3327 	case PASSTHRU_STATUS_COMPLETE:
3328 		hdr = (struct iscsi_hdr *)task_data->resp_buffer;
3329 		/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
3330 		itt = sts->handle;
3331 		hdr->itt = itt;
3332 		data = task_data->resp_buffer + hdr_len;
3333 		data_len = task_data->resp_len - hdr_len;
3334 		iscsi_complete_pdu(conn, hdr, data, data_len);
3335 		break;
3336 	default:
3337 		ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
3338 			   sts->completionStatus);
3339 		break;
3340 	}
3341 	return;
3342 }
3343 
3344 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3345 {
3346 	struct ql4_task_data *task_data;
3347 	struct iscsi_session *sess;
3348 	struct ddb_entry *ddb_entry;
3349 	struct scsi_qla_host *ha;
3350 	int hdr_len;
3351 
3352 	sess = task->conn->session;
3353 	ddb_entry = sess->dd_data;
3354 	ha = ddb_entry->ha;
3355 	task_data = task->dd_data;
3356 	memset(task_data, 0, sizeof(struct ql4_task_data));
3357 
3358 	if (task->sc) {
3359 		ql4_printk(KERN_INFO, ha,
3360 			   "%s: SCSI Commands not implemented\n", __func__);
3361 		return -EINVAL;
3362 	}
3363 
3364 	hdr_len = sizeof(struct iscsi_hdr);
3365 	task_data->ha = ha;
3366 	task_data->task = task;
3367 
3368 	if (task->data_count) {
3369 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
3370 						     task->data_count,
3371 						     PCI_DMA_TODEVICE);
3372 	}
3373 
3374 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
3375 		      __func__, task->conn->max_recv_dlength, hdr_len));
3376 
3377 	task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
3378 	task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
3379 						    task_data->resp_len,
3380 						    &task_data->resp_dma,
3381 						    GFP_ATOMIC);
3382 	if (!task_data->resp_buffer)
3383 		goto exit_alloc_pdu;
3384 
3385 	task_data->req_len = task->data_count + hdr_len;
3386 	task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
3387 						   task_data->req_len,
3388 						   &task_data->req_dma,
3389 						   GFP_ATOMIC);
3390 	if (!task_data->req_buffer)
3391 		goto exit_alloc_pdu;
3392 
3393 	task->hdr = task_data->req_buffer;
3394 
3395 	INIT_WORK(&task_data->task_work, qla4xxx_task_work);
3396 
3397 	return 0;
3398 
3399 exit_alloc_pdu:
3400 	if (task_data->resp_buffer)
3401 		dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
3402 				  task_data->resp_buffer, task_data->resp_dma);
3403 
3404 	if (task_data->req_buffer)
3405 		dma_free_coherent(&ha->pdev->dev, task_data->req_len,
3406 				  task_data->req_buffer, task_data->req_dma);
3407 	return -ENOMEM;
3408 }
3409 
3410 static void qla4xxx_task_cleanup(struct iscsi_task *task)
3411 {
3412 	struct ql4_task_data *task_data;
3413 	struct iscsi_session *sess;
3414 	struct ddb_entry *ddb_entry;
3415 	struct scsi_qla_host *ha;
3416 	int hdr_len;
3417 
3418 	hdr_len = sizeof(struct iscsi_hdr);
3419 	sess = task->conn->session;
3420 	ddb_entry = sess->dd_data;
3421 	ha = ddb_entry->ha;
3422 	task_data = task->dd_data;
3423 
3424 	if (task->data_count) {
3425 		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
3426 				 task->data_count, PCI_DMA_TODEVICE);
3427 	}
3428 
3429 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
3430 		      __func__, task->conn->max_recv_dlength, hdr_len));
3431 
3432 	dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
3433 			  task_data->resp_buffer, task_data->resp_dma);
3434 	dma_free_coherent(&ha->pdev->dev, task_data->req_len,
3435 			  task_data->req_buffer, task_data->req_dma);
3436 	return;
3437 }
3438 
3439 static int qla4xxx_task_xmit(struct iscsi_task *task)
3440 {
3441 	struct scsi_cmnd *sc = task->sc;
3442 	struct iscsi_session *sess = task->conn->session;
3443 	struct ddb_entry *ddb_entry = sess->dd_data;
3444 	struct scsi_qla_host *ha = ddb_entry->ha;
3445 
3446 	if (!sc)
3447 		return qla4xxx_send_passthru0(task);
3448 
3449 	ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
3450 		   __func__);
3451 	return -ENOSYS;
3452 }
3453 
3454 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
3455 					 struct iscsi_bus_flash_conn *conn,
3456 					 struct dev_db_entry *fw_ddb_entry)
3457 {
3458 	unsigned long options = 0;
3459 	int rc = 0;
3460 
3461 	options = le16_to_cpu(fw_ddb_entry->options);
3462 	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
3463 	if (test_bit(OPT_IPV6_DEVICE, &options)) {
3464 		rc = iscsi_switch_str_param(&sess->portal_type,
3465 					    PORTAL_TYPE_IPV6);
3466 		if (rc)
3467 			goto exit_copy;
3468 	} else {
3469 		rc = iscsi_switch_str_param(&sess->portal_type,
3470 					    PORTAL_TYPE_IPV4);
3471 		if (rc)
3472 			goto exit_copy;
3473 	}
3474 
3475 	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
3476 					      &options);
3477 	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
3478 	sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
3479 
3480 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3481 	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
3482 	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
3483 	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
3484 	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
3485 	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
3486 					    &options);
3487 	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
3488 	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
3489 	conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
3490 	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
3491 					     &options);
3492 	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
3493 	sess->discovery_auth_optional =
3494 			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
3495 	if (test_bit(ISCSIOPT_ERL1, &options))
3496 		sess->erl |= BIT_1;
3497 	if (test_bit(ISCSIOPT_ERL0, &options))
3498 		sess->erl |= BIT_0;
3499 
3500 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
3501 	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
3502 	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
3503 	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
3504 	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
3505 		conn->tcp_timer_scale |= BIT_3;
3506 	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
3507 		conn->tcp_timer_scale |= BIT_2;
3508 	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
3509 		conn->tcp_timer_scale |= BIT_1;
3510 
3511 	conn->tcp_timer_scale >>= 1;
3512 	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
3513 
3514 	options = le16_to_cpu(fw_ddb_entry->ip_options);
3515 	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
3516 
3517 	conn->max_recv_dlength = BYTE_UNITS *
3518 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
3519 	conn->max_xmit_dlength = BYTE_UNITS *
3520 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
3521 	sess->first_burst = BYTE_UNITS *
3522 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
3523 	sess->max_burst = BYTE_UNITS *
3524 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
3525 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
3526 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3527 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
3528 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
3529 	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
3530 	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
3531 	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
3532 	conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
3533 	conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
3534 	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
3535 	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
3536 	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
3537 	sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
3538 	sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
3539 	sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3540 	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
3541 
3542 	sess->default_taskmgmt_timeout =
3543 				le16_to_cpu(fw_ddb_entry->def_timeout);
3544 	conn->port = le16_to_cpu(fw_ddb_entry->port);
3545 
3546 	options = le16_to_cpu(fw_ddb_entry->options);
3547 	conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
3548 	if (!conn->ipaddress) {
3549 		rc = -ENOMEM;
3550 		goto exit_copy;
3551 	}
3552 
3553 	conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
3554 	if (!conn->redirect_ipaddr) {
3555 		rc = -ENOMEM;
3556 		goto exit_copy;
3557 	}
3558 
3559 	memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
3560 	memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
3561 
3562 	if (test_bit(OPT_IPV6_DEVICE, &options)) {
3563 		conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
3564 
3565 		conn->link_local_ipv6_addr = kmemdup(
3566 					fw_ddb_entry->link_local_ipv6_addr,
3567 					IPv6_ADDR_LEN, GFP_KERNEL);
3568 		if (!conn->link_local_ipv6_addr) {
3569 			rc = -ENOMEM;
3570 			goto exit_copy;
3571 		}
3572 	} else {
3573 		conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
3574 	}
3575 
3576 	if (fw_ddb_entry->iscsi_name[0]) {
3577 		rc = iscsi_switch_str_param(&sess->targetname,
3578 					    (char *)fw_ddb_entry->iscsi_name);
3579 		if (rc)
3580 			goto exit_copy;
3581 	}
3582 
3583 	if (fw_ddb_entry->iscsi_alias[0]) {
3584 		rc = iscsi_switch_str_param(&sess->targetalias,
3585 					    (char *)fw_ddb_entry->iscsi_alias);
3586 		if (rc)
3587 			goto exit_copy;
3588 	}
3589 
3590 	COPY_ISID(sess->isid, fw_ddb_entry->isid);
3591 
3592 exit_copy:
3593 	return rc;
3594 }
3595 
3596 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
3597 				       struct iscsi_bus_flash_conn *conn,
3598 				       struct dev_db_entry *fw_ddb_entry)
3599 {
3600 	uint16_t options;
3601 	int rc = 0;
3602 
3603 	options = le16_to_cpu(fw_ddb_entry->options);
3604 	SET_BITVAL(conn->is_fw_assigned_ipv6,  options, BIT_11);
3605 	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
3606 		options |= BIT_8;
3607 	else
3608 		options &= ~BIT_8;
3609 
3610 	SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
3611 	SET_BITVAL(sess->discovery_sess, options, BIT_4);
3612 	SET_BITVAL(sess->entry_state, options, BIT_3);
3613 	fw_ddb_entry->options = cpu_to_le16(options);
3614 
3615 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3616 	SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
3617 	SET_BITVAL(conn->datadgst_en, options, BIT_12);
3618 	SET_BITVAL(sess->imm_data_en, options, BIT_11);
3619 	SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
3620 	SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
3621 	SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
3622 	SET_BITVAL(sess->chap_auth_en, options, BIT_7);
3623 	SET_BITVAL(conn->snack_req_en, options, BIT_6);
3624 	SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
3625 	SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
3626 	SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
3627 	SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
3628 	SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
3629 	fw_ddb_entry->iscsi_options = cpu_to_le16(options);
3630 
3631 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
3632 	SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
3633 	SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
3634 	SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
3635 	SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
3636 	SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
3637 	SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
3638 	SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
3639 	fw_ddb_entry->tcp_options = cpu_to_le16(options);
3640 
3641 	options = le16_to_cpu(fw_ddb_entry->ip_options);
3642 	SET_BITVAL(conn->fragment_disable, options, BIT_4);
3643 	fw_ddb_entry->ip_options = cpu_to_le16(options);
3644 
3645 	fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
3646 	fw_ddb_entry->iscsi_max_rcv_data_seg_len =
3647 			       cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
3648 	fw_ddb_entry->iscsi_max_snd_data_seg_len =
3649 			       cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
3650 	fw_ddb_entry->iscsi_first_burst_len =
3651 				cpu_to_le16(sess->first_burst / BYTE_UNITS);
3652 	fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
3653 					    BYTE_UNITS);
3654 	fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
3655 	fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
3656 	fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
3657 	fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
3658 	fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
3659 	fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
3660 	fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
3661 	fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
3662 	fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
3663 	fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
3664 	fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
3665 	fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
3666 	fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
3667 	fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
3668 	fw_ddb_entry->port = cpu_to_le16(conn->port);
3669 	fw_ddb_entry->def_timeout =
3670 				cpu_to_le16(sess->default_taskmgmt_timeout);
3671 
3672 	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
3673 		fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
3674 	else
3675 		fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
3676 
3677 	if (conn->ipaddress)
3678 		memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
3679 		       sizeof(fw_ddb_entry->ip_addr));
3680 
3681 	if (conn->redirect_ipaddr)
3682 		memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
3683 		       sizeof(fw_ddb_entry->tgt_addr));
3684 
3685 	if (conn->link_local_ipv6_addr)
3686 		memcpy(fw_ddb_entry->link_local_ipv6_addr,
3687 		       conn->link_local_ipv6_addr,
3688 		       sizeof(fw_ddb_entry->link_local_ipv6_addr));
3689 
3690 	if (sess->targetname)
3691 		memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
3692 		       sizeof(fw_ddb_entry->iscsi_name));
3693 
3694 	if (sess->targetalias)
3695 		memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
3696 		       sizeof(fw_ddb_entry->iscsi_alias));
3697 
3698 	COPY_ISID(fw_ddb_entry->isid, sess->isid);
3699 
3700 	return rc;
3701 }
3702 
3703 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
3704 					     struct iscsi_session *sess,
3705 					     struct dev_db_entry *fw_ddb_entry)
3706 {
3707 	unsigned long options = 0;
3708 	uint16_t ddb_link;
3709 	uint16_t disc_parent;
3710 	char ip_addr[DDB_IPADDR_LEN];
3711 
3712 	options = le16_to_cpu(fw_ddb_entry->options);
3713 	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
3714 	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
3715 					      &options);
3716 	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
3717 
3718 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3719 	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
3720 	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
3721 	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
3722 	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
3723 	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
3724 					    &options);
3725 	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
3726 	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
3727 	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
3728 					     &options);
3729 	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
3730 	sess->discovery_auth_optional =
3731 			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
3732 	if (test_bit(ISCSIOPT_ERL1, &options))
3733 		sess->erl |= BIT_1;
3734 	if (test_bit(ISCSIOPT_ERL0, &options))
3735 		sess->erl |= BIT_0;
3736 
3737 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
3738 	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
3739 	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
3740 	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
3741 	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
3742 		conn->tcp_timer_scale |= BIT_3;
3743 	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
3744 		conn->tcp_timer_scale |= BIT_2;
3745 	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
3746 		conn->tcp_timer_scale |= BIT_1;
3747 
3748 	conn->tcp_timer_scale >>= 1;
3749 	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
3750 
3751 	options = le16_to_cpu(fw_ddb_entry->ip_options);
3752 	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
3753 
3754 	conn->max_recv_dlength = BYTE_UNITS *
3755 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
3756 	conn->max_xmit_dlength = BYTE_UNITS *
3757 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
3758 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
3759 	sess->first_burst = BYTE_UNITS *
3760 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
3761 	sess->max_burst = BYTE_UNITS *
3762 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
3763 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3764 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
3765 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
3766 	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
3767 	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
3768 	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
3769 	conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
3770 	conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
3771 	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
3772 	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
3773 	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
3774 	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
3775 	COPY_ISID(sess->isid, fw_ddb_entry->isid);
3776 
3777 	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
3778 	if (ddb_link == DDB_ISNS)
3779 		disc_parent = ISCSI_DISC_PARENT_ISNS;
3780 	else if (ddb_link == DDB_NO_LINK)
3781 		disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
3782 	else if (ddb_link < MAX_DDB_ENTRIES)
3783 		disc_parent = ISCSI_DISC_PARENT_SENDTGT;
3784 	else
3785 		disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
3786 
3787 	iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
3788 			iscsi_get_discovery_parent_name(disc_parent), 0);
3789 
3790 	iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
3791 			(char *)fw_ddb_entry->iscsi_alias, 0);
3792 
3793 	options = le16_to_cpu(fw_ddb_entry->options);
3794 	if (options & DDB_OPT_IPV6_DEVICE) {
3795 		memset(ip_addr, 0, sizeof(ip_addr));
3796 		sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr);
3797 		iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR,
3798 				(char *)ip_addr, 0);
3799 	}
3800 }
3801 
3802 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
3803 				     struct dev_db_entry *fw_ddb_entry,
3804 				     struct iscsi_cls_session *cls_sess,
3805 				     struct iscsi_cls_conn *cls_conn)
3806 {
3807 	int buflen = 0;
3808 	struct iscsi_session *sess;
3809 	struct ddb_entry *ddb_entry;
3810 	struct ql4_chap_table chap_tbl;
3811 	struct iscsi_conn *conn;
3812 	char ip_addr[DDB_IPADDR_LEN];
3813 	uint16_t options = 0;
3814 
3815 	sess = cls_sess->dd_data;
3816 	ddb_entry = sess->dd_data;
3817 	conn = cls_conn->dd_data;
3818 	memset(&chap_tbl, 0, sizeof(chap_tbl));
3819 
3820 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3821 
3822 	qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
3823 
3824 	sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
3825 	conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
3826 
3827 	memset(ip_addr, 0, sizeof(ip_addr));
3828 	options = le16_to_cpu(fw_ddb_entry->options);
3829 	if (options & DDB_OPT_IPV6_DEVICE) {
3830 		iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
3831 
3832 		memset(ip_addr, 0, sizeof(ip_addr));
3833 		sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
3834 	} else {
3835 		iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
3836 		sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
3837 	}
3838 
3839 	iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
3840 			(char *)ip_addr, buflen);
3841 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
3842 			(char *)fw_ddb_entry->iscsi_name, buflen);
3843 	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
3844 			(char *)ha->name_string, buflen);
3845 
3846 	if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
3847 		if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
3848 						   chap_tbl.secret,
3849 						   ddb_entry->chap_tbl_idx)) {
3850 			iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
3851 					(char *)chap_tbl.name,
3852 					strlen((char *)chap_tbl.name));
3853 			iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
3854 					(char *)chap_tbl.secret,
3855 					chap_tbl.secret_len);
3856 		}
3857 	}
3858 }
3859 
3860 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
3861 					     struct ddb_entry *ddb_entry)
3862 {
3863 	struct iscsi_cls_session *cls_sess;
3864 	struct iscsi_cls_conn *cls_conn;
3865 	uint32_t ddb_state;
3866 	dma_addr_t fw_ddb_entry_dma;
3867 	struct dev_db_entry *fw_ddb_entry;
3868 
3869 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3870 					  &fw_ddb_entry_dma, GFP_KERNEL);
3871 	if (!fw_ddb_entry) {
3872 		ql4_printk(KERN_ERR, ha,
3873 			   "%s: Unable to allocate dma buffer\n", __func__);
3874 		goto exit_session_conn_fwddb_param;
3875 	}
3876 
3877 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
3878 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
3879 				    NULL, NULL, NULL) == QLA_ERROR) {
3880 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
3881 				  "get_ddb_entry for fw_ddb_index %d\n",
3882 				  ha->host_no, __func__,
3883 				  ddb_entry->fw_ddb_index));
3884 		goto exit_session_conn_fwddb_param;
3885 	}
3886 
3887 	cls_sess = ddb_entry->sess;
3888 
3889 	cls_conn = ddb_entry->conn;
3890 
3891 	/* Update params */
3892 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
3893 
3894 exit_session_conn_fwddb_param:
3895 	if (fw_ddb_entry)
3896 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3897 				  fw_ddb_entry, fw_ddb_entry_dma);
3898 }
3899 
3900 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
3901 				       struct ddb_entry *ddb_entry)
3902 {
3903 	struct iscsi_cls_session *cls_sess;
3904 	struct iscsi_cls_conn *cls_conn;
3905 	struct iscsi_session *sess;
3906 	struct iscsi_conn *conn;
3907 	uint32_t ddb_state;
3908 	dma_addr_t fw_ddb_entry_dma;
3909 	struct dev_db_entry *fw_ddb_entry;
3910 
3911 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3912 					  &fw_ddb_entry_dma, GFP_KERNEL);
3913 	if (!fw_ddb_entry) {
3914 		ql4_printk(KERN_ERR, ha,
3915 			   "%s: Unable to allocate dma buffer\n", __func__);
3916 		goto exit_session_conn_param;
3917 	}
3918 
3919 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
3920 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
3921 				    NULL, NULL, NULL) == QLA_ERROR) {
3922 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
3923 				  "get_ddb_entry for fw_ddb_index %d\n",
3924 				  ha->host_no, __func__,
3925 				  ddb_entry->fw_ddb_index));
3926 		goto exit_session_conn_param;
3927 	}
3928 
3929 	cls_sess = ddb_entry->sess;
3930 	sess = cls_sess->dd_data;
3931 
3932 	cls_conn = ddb_entry->conn;
3933 	conn = cls_conn->dd_data;
3934 
3935 	/* Update timers after login */
3936 	ddb_entry->default_relogin_timeout =
3937 		(le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
3938 		 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
3939 		 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
3940 	ddb_entry->default_time2wait =
3941 				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3942 
3943 	/* Update params */
3944 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3945 	qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
3946 
3947 	memcpy(sess->initiatorname, ha->name_string,
3948 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
3949 
3950 exit_session_conn_param:
3951 	if (fw_ddb_entry)
3952 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3953 				  fw_ddb_entry, fw_ddb_entry_dma);
3954 }
3955 
3956 /*
3957  * Timer routines
3958  */
3959 
3960 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
3961 				unsigned long interval)
3962 {
3963 	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
3964 		     __func__, ha->host->host_no));
3965 	init_timer(&ha->timer);
3966 	ha->timer.expires = jiffies + interval * HZ;
3967 	ha->timer.data = (unsigned long)ha;
3968 	ha->timer.function = (void (*)(unsigned long))func;
3969 	add_timer(&ha->timer);
3970 	ha->timer_active = 1;
3971 }
3972 
3973 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
3974 {
3975 	del_timer_sync(&ha->timer);
3976 	ha->timer_active = 0;
3977 }
3978 
3979 /***
3980  * qla4xxx_mark_device_missing - blocks the session
3981  * @cls_session: Pointer to the session to be blocked
3982  * @ddb_entry: Pointer to device database entry
3983  *
3984  * This routine marks a device missing and close connection.
3985  **/
3986 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
3987 {
3988 	iscsi_block_session(cls_session);
3989 }
3990 
3991 /**
3992  * qla4xxx_mark_all_devices_missing - mark all devices as missing.
3993  * @ha: Pointer to host adapter structure.
3994  *
3995  * This routine marks a device missing and resets the relogin retry count.
3996  **/
3997 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
3998 {
3999 	iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
4000 }
4001 
4002 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
4003 				       struct ddb_entry *ddb_entry,
4004 				       struct scsi_cmnd *cmd)
4005 {
4006 	struct srb *srb;
4007 
4008 	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
4009 	if (!srb)
4010 		return srb;
4011 
4012 	kref_init(&srb->srb_ref);
4013 	srb->ha = ha;
4014 	srb->ddb = ddb_entry;
4015 	srb->cmd = cmd;
4016 	srb->flags = 0;
4017 	CMD_SP(cmd) = (void *)srb;
4018 
4019 	return srb;
4020 }
4021 
4022 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
4023 {
4024 	struct scsi_cmnd *cmd = srb->cmd;
4025 
4026 	if (srb->flags & SRB_DMA_VALID) {
4027 		scsi_dma_unmap(cmd);
4028 		srb->flags &= ~SRB_DMA_VALID;
4029 	}
4030 	CMD_SP(cmd) = NULL;
4031 }
4032 
4033 void qla4xxx_srb_compl(struct kref *ref)
4034 {
4035 	struct srb *srb = container_of(ref, struct srb, srb_ref);
4036 	struct scsi_cmnd *cmd = srb->cmd;
4037 	struct scsi_qla_host *ha = srb->ha;
4038 
4039 	qla4xxx_srb_free_dma(ha, srb);
4040 
4041 	mempool_free(srb, ha->srb_mempool);
4042 
4043 	cmd->scsi_done(cmd);
4044 }
4045 
4046 /**
4047  * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
4048  * @host: scsi host
4049  * @cmd: Pointer to Linux's SCSI command structure
4050  *
4051  * Remarks:
4052  * This routine is invoked by Linux to send a SCSI command to the driver.
4053  * The mid-level driver tries to ensure that queuecommand never gets
4054  * invoked concurrently with itself or the interrupt handler (although
4055  * the interrupt handler may call this routine as part of request-
4056  * completion handling).   Unfortunely, it sometimes calls the scheduler
4057  * in interrupt context which is a big NO! NO!.
4058  **/
4059 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
4060 {
4061 	struct scsi_qla_host *ha = to_qla_host(host);
4062 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
4063 	struct iscsi_cls_session *sess = ddb_entry->sess;
4064 	struct srb *srb;
4065 	int rval;
4066 
4067 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4068 		if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
4069 			cmd->result = DID_NO_CONNECT << 16;
4070 		else
4071 			cmd->result = DID_REQUEUE << 16;
4072 		goto qc_fail_command;
4073 	}
4074 
4075 	if (!sess) {
4076 		cmd->result = DID_IMM_RETRY << 16;
4077 		goto qc_fail_command;
4078 	}
4079 
4080 	rval = iscsi_session_chkready(sess);
4081 	if (rval) {
4082 		cmd->result = rval;
4083 		goto qc_fail_command;
4084 	}
4085 
4086 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4087 	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
4088 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4089 	    test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
4090 	    test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
4091 	    !test_bit(AF_ONLINE, &ha->flags) ||
4092 	    !test_bit(AF_LINK_UP, &ha->flags) ||
4093 	    test_bit(AF_LOOPBACK, &ha->flags) ||
4094 	    test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
4095 	    test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
4096 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
4097 		goto qc_host_busy;
4098 
4099 	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
4100 	if (!srb)
4101 		goto qc_host_busy;
4102 
4103 	rval = qla4xxx_send_command_to_isp(ha, srb);
4104 	if (rval != QLA_SUCCESS)
4105 		goto qc_host_busy_free_sp;
4106 
4107 	return 0;
4108 
4109 qc_host_busy_free_sp:
4110 	qla4xxx_srb_free_dma(ha, srb);
4111 	mempool_free(srb, ha->srb_mempool);
4112 
4113 qc_host_busy:
4114 	return SCSI_MLQUEUE_HOST_BUSY;
4115 
4116 qc_fail_command:
4117 	cmd->scsi_done(cmd);
4118 
4119 	return 0;
4120 }
4121 
4122 /**
4123  * qla4xxx_mem_free - frees memory allocated to adapter
4124  * @ha: Pointer to host adapter structure.
4125  *
4126  * Frees memory previously allocated by qla4xxx_mem_alloc
4127  **/
4128 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
4129 {
4130 	if (ha->queues)
4131 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
4132 				  ha->queues_dma);
4133 
4134 	 if (ha->fw_dump)
4135 		vfree(ha->fw_dump);
4136 
4137 	ha->queues_len = 0;
4138 	ha->queues = NULL;
4139 	ha->queues_dma = 0;
4140 	ha->request_ring = NULL;
4141 	ha->request_dma = 0;
4142 	ha->response_ring = NULL;
4143 	ha->response_dma = 0;
4144 	ha->shadow_regs = NULL;
4145 	ha->shadow_regs_dma = 0;
4146 	ha->fw_dump = NULL;
4147 	ha->fw_dump_size = 0;
4148 
4149 	/* Free srb pool. */
4150 	if (ha->srb_mempool)
4151 		mempool_destroy(ha->srb_mempool);
4152 
4153 	ha->srb_mempool = NULL;
4154 
4155 	if (ha->chap_dma_pool)
4156 		dma_pool_destroy(ha->chap_dma_pool);
4157 
4158 	if (ha->chap_list)
4159 		vfree(ha->chap_list);
4160 	ha->chap_list = NULL;
4161 
4162 	if (ha->fw_ddb_dma_pool)
4163 		dma_pool_destroy(ha->fw_ddb_dma_pool);
4164 
4165 	/* release io space registers  */
4166 	if (is_qla8022(ha)) {
4167 		if (ha->nx_pcibase)
4168 			iounmap(
4169 			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
4170 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
4171 		if (ha->nx_pcibase)
4172 			iounmap(
4173 			    (struct device_reg_83xx __iomem *)ha->nx_pcibase);
4174 	} else if (ha->reg) {
4175 		iounmap(ha->reg);
4176 	}
4177 
4178 	if (ha->reset_tmplt.buff)
4179 		vfree(ha->reset_tmplt.buff);
4180 
4181 	pci_release_regions(ha->pdev);
4182 }
4183 
4184 /**
4185  * qla4xxx_mem_alloc - allocates memory for use by adapter.
4186  * @ha: Pointer to host adapter structure
4187  *
4188  * Allocates DMA memory for request and response queues. Also allocates memory
4189  * for srbs.
4190  **/
4191 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
4192 {
4193 	unsigned long align;
4194 
4195 	/* Allocate contiguous block of DMA memory for queues. */
4196 	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
4197 			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
4198 			  sizeof(struct shadow_regs) +
4199 			  MEM_ALIGN_VALUE +
4200 			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4201 	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
4202 					&ha->queues_dma, GFP_KERNEL);
4203 	if (ha->queues == NULL) {
4204 		ql4_printk(KERN_WARNING, ha,
4205 		    "Memory Allocation failed - queues.\n");
4206 
4207 		goto mem_alloc_error_exit;
4208 	}
4209 	memset(ha->queues, 0, ha->queues_len);
4210 
4211 	/*
4212 	 * As per RISC alignment requirements -- the bus-address must be a
4213 	 * multiple of the request-ring size (in bytes).
4214 	 */
4215 	align = 0;
4216 	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
4217 		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
4218 					   (MEM_ALIGN_VALUE - 1));
4219 
4220 	/* Update request and response queue pointers. */
4221 	ha->request_dma = ha->queues_dma + align;
4222 	ha->request_ring = (struct queue_entry *) (ha->queues + align);
4223 	ha->response_dma = ha->queues_dma + align +
4224 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
4225 	ha->response_ring = (struct queue_entry *) (ha->queues + align +
4226 						    (REQUEST_QUEUE_DEPTH *
4227 						     QUEUE_SIZE));
4228 	ha->shadow_regs_dma = ha->queues_dma + align +
4229 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
4230 		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
4231 	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
4232 						  (REQUEST_QUEUE_DEPTH *
4233 						   QUEUE_SIZE) +
4234 						  (RESPONSE_QUEUE_DEPTH *
4235 						   QUEUE_SIZE));
4236 
4237 	/* Allocate memory for srb pool. */
4238 	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
4239 					 mempool_free_slab, srb_cachep);
4240 	if (ha->srb_mempool == NULL) {
4241 		ql4_printk(KERN_WARNING, ha,
4242 		    "Memory Allocation failed - SRB Pool.\n");
4243 
4244 		goto mem_alloc_error_exit;
4245 	}
4246 
4247 	ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
4248 					    CHAP_DMA_BLOCK_SIZE, 8, 0);
4249 
4250 	if (ha->chap_dma_pool == NULL) {
4251 		ql4_printk(KERN_WARNING, ha,
4252 		    "%s: chap_dma_pool allocation failed..\n", __func__);
4253 		goto mem_alloc_error_exit;
4254 	}
4255 
4256 	ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
4257 					      DDB_DMA_BLOCK_SIZE, 8, 0);
4258 
4259 	if (ha->fw_ddb_dma_pool == NULL) {
4260 		ql4_printk(KERN_WARNING, ha,
4261 			   "%s: fw_ddb_dma_pool allocation failed..\n",
4262 			   __func__);
4263 		goto mem_alloc_error_exit;
4264 	}
4265 
4266 	return QLA_SUCCESS;
4267 
4268 mem_alloc_error_exit:
4269 	qla4xxx_mem_free(ha);
4270 	return QLA_ERROR;
4271 }
4272 
4273 /**
4274  * qla4_8xxx_check_temp - Check the ISP82XX temperature.
4275  * @ha: adapter block pointer.
4276  *
4277  * Note: The caller should not hold the idc lock.
4278  **/
4279 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
4280 {
4281 	uint32_t temp, temp_state, temp_val;
4282 	int status = QLA_SUCCESS;
4283 
4284 	temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
4285 
4286 	temp_state = qla82xx_get_temp_state(temp);
4287 	temp_val = qla82xx_get_temp_val(temp);
4288 
4289 	if (temp_state == QLA82XX_TEMP_PANIC) {
4290 		ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
4291 			   " exceeds maximum allowed. Hardware has been shut"
4292 			   " down.\n", temp_val);
4293 		status = QLA_ERROR;
4294 	} else if (temp_state == QLA82XX_TEMP_WARN) {
4295 		if (ha->temperature == QLA82XX_TEMP_NORMAL)
4296 			ql4_printk(KERN_WARNING, ha, "Device temperature %d"
4297 				   " degrees C exceeds operating range."
4298 				   " Immediate action needed.\n", temp_val);
4299 	} else {
4300 		if (ha->temperature == QLA82XX_TEMP_WARN)
4301 			ql4_printk(KERN_INFO, ha, "Device temperature is"
4302 				   " now %d degrees C in normal range.\n",
4303 				   temp_val);
4304 	}
4305 	ha->temperature = temp_state;
4306 	return status;
4307 }
4308 
4309 /**
4310  * qla4_8xxx_check_fw_alive  - Check firmware health
4311  * @ha: Pointer to host adapter structure.
4312  *
4313  * Context: Interrupt
4314  **/
4315 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
4316 {
4317 	uint32_t fw_heartbeat_counter;
4318 	int status = QLA_SUCCESS;
4319 
4320 	fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
4321 						   QLA8XXX_PEG_ALIVE_COUNTER);
4322 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
4323 	if (fw_heartbeat_counter == 0xffffffff) {
4324 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
4325 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
4326 		    ha->host_no, __func__));
4327 		return status;
4328 	}
4329 
4330 	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
4331 		ha->seconds_since_last_heartbeat++;
4332 		/* FW not alive after 2 seconds */
4333 		if (ha->seconds_since_last_heartbeat == 2) {
4334 			ha->seconds_since_last_heartbeat = 0;
4335 			qla4_8xxx_dump_peg_reg(ha);
4336 			status = QLA_ERROR;
4337 		}
4338 	} else
4339 		ha->seconds_since_last_heartbeat = 0;
4340 
4341 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
4342 	return status;
4343 }
4344 
4345 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
4346 {
4347 	uint32_t halt_status;
4348 	int halt_status_unrecoverable = 0;
4349 
4350 	halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
4351 
4352 	if (is_qla8022(ha)) {
4353 		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4354 			   __func__);
4355 		qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
4356 				CRB_NIU_XG_PAUSE_CTL_P0 |
4357 				CRB_NIU_XG_PAUSE_CTL_P1);
4358 
4359 		if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
4360 			ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
4361 				   __func__);
4362 		if (halt_status & HALT_STATUS_UNRECOVERABLE)
4363 			halt_status_unrecoverable = 1;
4364 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
4365 		if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
4366 			ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
4367 				   __func__);
4368 		else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
4369 			halt_status_unrecoverable = 1;
4370 	}
4371 
4372 	/*
4373 	 * Since we cannot change dev_state in interrupt context,
4374 	 * set appropriate DPC flag then wakeup DPC
4375 	 */
4376 	if (halt_status_unrecoverable) {
4377 		set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
4378 	} else {
4379 		ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
4380 			   __func__);
4381 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
4382 	}
4383 	qla4xxx_mailbox_premature_completion(ha);
4384 	qla4xxx_wake_dpc(ha);
4385 }
4386 
4387 /**
4388  * qla4_8xxx_watchdog - Poll dev state
4389  * @ha: Pointer to host adapter structure.
4390  *
4391  * Context: Interrupt
4392  **/
4393 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
4394 {
4395 	uint32_t dev_state;
4396 	uint32_t idc_ctrl;
4397 
4398 	if (is_qla8032(ha) &&
4399 	    (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
4400 		WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
4401 			  __func__, ha->func_num);
4402 
4403 	/* don't poll if reset is going on */
4404 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
4405 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4406 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
4407 		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
4408 
4409 		if (qla4_8xxx_check_temp(ha)) {
4410 			if (is_qla8022(ha)) {
4411 				ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
4412 				qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
4413 						CRB_NIU_XG_PAUSE_CTL_P0 |
4414 						CRB_NIU_XG_PAUSE_CTL_P1);
4415 			}
4416 			set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
4417 			qla4xxx_wake_dpc(ha);
4418 		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
4419 			   !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
4420 
4421 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
4422 				   __func__);
4423 
4424 			if (is_qla8032(ha) || is_qla8042(ha)) {
4425 				idc_ctrl = qla4_83xx_rd_reg(ha,
4426 							QLA83XX_IDC_DRV_CTRL);
4427 				if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
4428 					ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
4429 						   __func__);
4430 					qla4xxx_mailbox_premature_completion(
4431 									    ha);
4432 				}
4433 			}
4434 
4435 			if ((is_qla8032(ha) || is_qla8042(ha)) ||
4436 			    (is_qla8022(ha) && !ql4xdontresethba)) {
4437 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
4438 				qla4xxx_wake_dpc(ha);
4439 			}
4440 		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
4441 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
4442 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
4443 			    __func__);
4444 			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
4445 			qla4xxx_wake_dpc(ha);
4446 		} else  {
4447 			/* Check firmware health */
4448 			if (qla4_8xxx_check_fw_alive(ha))
4449 				qla4_8xxx_process_fw_error(ha);
4450 		}
4451 	}
4452 }
4453 
4454 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
4455 {
4456 	struct iscsi_session *sess;
4457 	struct ddb_entry *ddb_entry;
4458 	struct scsi_qla_host *ha;
4459 
4460 	sess = cls_sess->dd_data;
4461 	ddb_entry = sess->dd_data;
4462 	ha = ddb_entry->ha;
4463 
4464 	if (!(ddb_entry->ddb_type == FLASH_DDB))
4465 		return;
4466 
4467 	if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
4468 	    !iscsi_is_session_online(cls_sess)) {
4469 		if (atomic_read(&ddb_entry->retry_relogin_timer) !=
4470 		    INVALID_ENTRY) {
4471 			if (atomic_read(&ddb_entry->retry_relogin_timer) ==
4472 					0) {
4473 				atomic_set(&ddb_entry->retry_relogin_timer,
4474 					   INVALID_ENTRY);
4475 				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4476 				set_bit(DF_RELOGIN, &ddb_entry->flags);
4477 				DEBUG2(ql4_printk(KERN_INFO, ha,
4478 				       "%s: index [%d] login device\n",
4479 					__func__, ddb_entry->fw_ddb_index));
4480 			} else
4481 				atomic_dec(&ddb_entry->retry_relogin_timer);
4482 		}
4483 	}
4484 
4485 	/* Wait for relogin to timeout */
4486 	if (atomic_read(&ddb_entry->relogin_timer) &&
4487 	    (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
4488 		/*
4489 		 * If the relogin times out and the device is
4490 		 * still NOT ONLINE then try and relogin again.
4491 		 */
4492 		if (!iscsi_is_session_online(cls_sess)) {
4493 			/* Reset retry relogin timer */
4494 			atomic_inc(&ddb_entry->relogin_retry_count);
4495 			DEBUG2(ql4_printk(KERN_INFO, ha,
4496 				"%s: index[%d] relogin timed out-retrying"
4497 				" relogin (%d), retry (%d)\n", __func__,
4498 				ddb_entry->fw_ddb_index,
4499 				atomic_read(&ddb_entry->relogin_retry_count),
4500 				ddb_entry->default_time2wait + 4));
4501 			set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4502 			atomic_set(&ddb_entry->retry_relogin_timer,
4503 				   ddb_entry->default_time2wait + 4);
4504 		}
4505 	}
4506 }
4507 
4508 /**
4509  * qla4xxx_timer - checks every second for work to do.
4510  * @ha: Pointer to host adapter structure.
4511  **/
4512 static void qla4xxx_timer(struct scsi_qla_host *ha)
4513 {
4514 	int start_dpc = 0;
4515 	uint16_t w;
4516 
4517 	iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
4518 
4519 	/* If we are in the middle of AER/EEH processing
4520 	 * skip any processing and reschedule the timer
4521 	 */
4522 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4523 		mod_timer(&ha->timer, jiffies + HZ);
4524 		return;
4525 	}
4526 
4527 	/* Hardware read to trigger an EEH error during mailbox waits. */
4528 	if (!pci_channel_offline(ha->pdev))
4529 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
4530 
4531 	if (is_qla80XX(ha))
4532 		qla4_8xxx_watchdog(ha);
4533 
4534 	if (is_qla40XX(ha)) {
4535 		/* Check for heartbeat interval. */
4536 		if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
4537 		    ha->heartbeat_interval != 0) {
4538 			ha->seconds_since_last_heartbeat++;
4539 			if (ha->seconds_since_last_heartbeat >
4540 			    ha->heartbeat_interval + 2)
4541 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
4542 		}
4543 	}
4544 
4545 	/* Process any deferred work. */
4546 	if (!list_empty(&ha->work_list))
4547 		start_dpc++;
4548 
4549 	/* Wakeup the dpc routine for this adapter, if needed. */
4550 	if (start_dpc ||
4551 	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4552 	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
4553 	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
4554 	     test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
4555 	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4556 	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
4557 	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
4558 	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
4559 	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
4560 	     test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
4561 	     test_bit(DPC_AEN, &ha->dpc_flags)) {
4562 		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
4563 			      " - dpc flags = 0x%lx\n",
4564 			      ha->host_no, __func__, ha->dpc_flags));
4565 		qla4xxx_wake_dpc(ha);
4566 	}
4567 
4568 	/* Reschedule timer thread to call us back in one second */
4569 	mod_timer(&ha->timer, jiffies + HZ);
4570 
4571 	DEBUG2(ha->seconds_since_last_intr++);
4572 }
4573 
4574 /**
4575  * qla4xxx_cmd_wait - waits for all outstanding commands to complete
4576  * @ha: Pointer to host adapter structure.
4577  *
4578  * This routine stalls the driver until all outstanding commands are returned.
4579  * Caller must release the Hardware Lock prior to calling this routine.
4580  **/
4581 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
4582 {
4583 	uint32_t index = 0;
4584 	unsigned long flags;
4585 	struct scsi_cmnd *cmd;
4586 	unsigned long wtime;
4587 	uint32_t wtmo;
4588 
4589 	if (is_qla40XX(ha))
4590 		wtmo = WAIT_CMD_TOV;
4591 	else
4592 		wtmo = ha->nx_reset_timeout / 2;
4593 
4594 	wtime = jiffies + (wtmo * HZ);
4595 
4596 	DEBUG2(ql4_printk(KERN_INFO, ha,
4597 			  "Wait up to %u seconds for cmds to complete\n",
4598 			  wtmo));
4599 
4600 	while (!time_after_eq(jiffies, wtime)) {
4601 		spin_lock_irqsave(&ha->hardware_lock, flags);
4602 		/* Find a command that hasn't completed. */
4603 		for (index = 0; index < ha->host->can_queue; index++) {
4604 			cmd = scsi_host_find_tag(ha->host, index);
4605 			/*
4606 			 * We cannot just check if the index is valid,
4607 			 * becase if we are run from the scsi eh, then
4608 			 * the scsi/block layer is going to prevent
4609 			 * the tag from being released.
4610 			 */
4611 			if (cmd != NULL && CMD_SP(cmd))
4612 				break;
4613 		}
4614 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
4615 
4616 		/* If No Commands are pending, wait is complete */
4617 		if (index == ha->host->can_queue)
4618 			return QLA_SUCCESS;
4619 
4620 		msleep(1000);
4621 	}
4622 	/* If we timed out on waiting for commands to come back
4623 	 * return ERROR. */
4624 	return QLA_ERROR;
4625 }
4626 
4627 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
4628 {
4629 	uint32_t ctrl_status;
4630 	unsigned long flags = 0;
4631 
4632 	DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
4633 
4634 	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
4635 		return QLA_ERROR;
4636 
4637 	spin_lock_irqsave(&ha->hardware_lock, flags);
4638 
4639 	/*
4640 	 * If the SCSI Reset Interrupt bit is set, clear it.
4641 	 * Otherwise, the Soft Reset won't work.
4642 	 */
4643 	ctrl_status = readw(&ha->reg->ctrl_status);
4644 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
4645 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
4646 
4647 	/* Issue Soft Reset */
4648 	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
4649 	readl(&ha->reg->ctrl_status);
4650 
4651 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4652 	return QLA_SUCCESS;
4653 }
4654 
4655 /**
4656  * qla4xxx_soft_reset - performs soft reset.
4657  * @ha: Pointer to host adapter structure.
4658  **/
4659 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
4660 {
4661 	uint32_t max_wait_time;
4662 	unsigned long flags = 0;
4663 	int status;
4664 	uint32_t ctrl_status;
4665 
4666 	status = qla4xxx_hw_reset(ha);
4667 	if (status != QLA_SUCCESS)
4668 		return status;
4669 
4670 	status = QLA_ERROR;
4671 	/* Wait until the Network Reset Intr bit is cleared */
4672 	max_wait_time = RESET_INTR_TOV;
4673 	do {
4674 		spin_lock_irqsave(&ha->hardware_lock, flags);
4675 		ctrl_status = readw(&ha->reg->ctrl_status);
4676 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
4677 
4678 		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
4679 			break;
4680 
4681 		msleep(1000);
4682 	} while ((--max_wait_time));
4683 
4684 	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
4685 		DEBUG2(printk(KERN_WARNING
4686 			      "scsi%ld: Network Reset Intr not cleared by "
4687 			      "Network function, clearing it now!\n",
4688 			      ha->host_no));
4689 		spin_lock_irqsave(&ha->hardware_lock, flags);
4690 		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
4691 		readl(&ha->reg->ctrl_status);
4692 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
4693 	}
4694 
4695 	/* Wait until the firmware tells us the Soft Reset is done */
4696 	max_wait_time = SOFT_RESET_TOV;
4697 	do {
4698 		spin_lock_irqsave(&ha->hardware_lock, flags);
4699 		ctrl_status = readw(&ha->reg->ctrl_status);
4700 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
4701 
4702 		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
4703 			status = QLA_SUCCESS;
4704 			break;
4705 		}
4706 
4707 		msleep(1000);
4708 	} while ((--max_wait_time));
4709 
4710 	/*
4711 	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
4712 	 * after the soft reset has taken place.
4713 	 */
4714 	spin_lock_irqsave(&ha->hardware_lock, flags);
4715 	ctrl_status = readw(&ha->reg->ctrl_status);
4716 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
4717 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
4718 		readl(&ha->reg->ctrl_status);
4719 	}
4720 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4721 
4722 	/* If soft reset fails then most probably the bios on other
4723 	 * function is also enabled.
4724 	 * Since the initialization is sequential the other fn
4725 	 * wont be able to acknowledge the soft reset.
4726 	 * Issue a force soft reset to workaround this scenario.
4727 	 */
4728 	if (max_wait_time == 0) {
4729 		/* Issue Force Soft Reset */
4730 		spin_lock_irqsave(&ha->hardware_lock, flags);
4731 		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
4732 		readl(&ha->reg->ctrl_status);
4733 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
4734 		/* Wait until the firmware tells us the Soft Reset is done */
4735 		max_wait_time = SOFT_RESET_TOV;
4736 		do {
4737 			spin_lock_irqsave(&ha->hardware_lock, flags);
4738 			ctrl_status = readw(&ha->reg->ctrl_status);
4739 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
4740 
4741 			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
4742 				status = QLA_SUCCESS;
4743 				break;
4744 			}
4745 
4746 			msleep(1000);
4747 		} while ((--max_wait_time));
4748 	}
4749 
4750 	return status;
4751 }
4752 
4753 /**
4754  * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
4755  * @ha: Pointer to host adapter structure.
4756  * @res: returned scsi status
4757  *
4758  * This routine is called just prior to a HARD RESET to return all
4759  * outstanding commands back to the Operating System.
4760  * Caller should make sure that the following locks are released
4761  * before this calling routine: Hardware lock, and io_request_lock.
4762  **/
4763 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
4764 {
4765 	struct srb *srb;
4766 	int i;
4767 	unsigned long flags;
4768 
4769 	spin_lock_irqsave(&ha->hardware_lock, flags);
4770 	for (i = 0; i < ha->host->can_queue; i++) {
4771 		srb = qla4xxx_del_from_active_array(ha, i);
4772 		if (srb != NULL) {
4773 			srb->cmd->result = res;
4774 			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
4775 		}
4776 	}
4777 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4778 }
4779 
4780 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
4781 {
4782 	clear_bit(AF_ONLINE, &ha->flags);
4783 
4784 	/* Disable the board */
4785 	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
4786 
4787 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4788 	qla4xxx_mark_all_devices_missing(ha);
4789 	clear_bit(AF_INIT_DONE, &ha->flags);
4790 }
4791 
4792 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
4793 {
4794 	struct iscsi_session *sess;
4795 	struct ddb_entry *ddb_entry;
4796 
4797 	sess = cls_session->dd_data;
4798 	ddb_entry = sess->dd_data;
4799 	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
4800 
4801 	if (ddb_entry->ddb_type == FLASH_DDB)
4802 		iscsi_block_session(ddb_entry->sess);
4803 	else
4804 		iscsi_session_failure(cls_session->dd_data,
4805 				      ISCSI_ERR_CONN_FAILED);
4806 }
4807 
4808 /**
4809  * qla4xxx_recover_adapter - recovers adapter after a fatal error
4810  * @ha: Pointer to host adapter structure.
4811  **/
4812 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
4813 {
4814 	int status = QLA_ERROR;
4815 	uint8_t reset_chip = 0;
4816 	uint32_t dev_state;
4817 	unsigned long wait;
4818 
4819 	/* Stall incoming I/O until we are done */
4820 	scsi_block_requests(ha->host);
4821 	clear_bit(AF_ONLINE, &ha->flags);
4822 	clear_bit(AF_LINK_UP, &ha->flags);
4823 
4824 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
4825 
4826 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4827 
4828 	if ((is_qla8032(ha) || is_qla8042(ha)) &&
4829 	    !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
4830 		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4831 			   __func__);
4832 		/* disable pause frame for ISP83xx */
4833 		qla4_83xx_disable_pause(ha);
4834 	}
4835 
4836 	iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
4837 
4838 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
4839 		reset_chip = 1;
4840 
4841 	/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
4842 	 * do not reset adapter, jump to initialize_adapter */
4843 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4844 		status = QLA_SUCCESS;
4845 		goto recover_ha_init_adapter;
4846 	}
4847 
4848 	/* For the ISP-8xxx adapter, issue a stop_firmware if invoked
4849 	 * from eh_host_reset or ioctl module */
4850 	if (is_qla80XX(ha) && !reset_chip &&
4851 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
4852 
4853 		DEBUG2(ql4_printk(KERN_INFO, ha,
4854 		    "scsi%ld: %s - Performing stop_firmware...\n",
4855 		    ha->host_no, __func__));
4856 		status = ha->isp_ops->reset_firmware(ha);
4857 		if (status == QLA_SUCCESS) {
4858 			ha->isp_ops->disable_intrs(ha);
4859 			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4860 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4861 		} else {
4862 			/* If the stop_firmware fails then
4863 			 * reset the entire chip */
4864 			reset_chip = 1;
4865 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
4866 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
4867 		}
4868 	}
4869 
4870 	/* Issue full chip reset if recovering from a catastrophic error,
4871 	 * or if stop_firmware fails for ISP-8xxx.
4872 	 * This is the default case for ISP-4xxx */
4873 	if (is_qla40XX(ha) || reset_chip) {
4874 		if (is_qla40XX(ha))
4875 			goto chip_reset;
4876 
4877 		/* Check if 8XXX firmware is alive or not
4878 		 * We may have arrived here from NEED_RESET
4879 		 * detection only */
4880 		if (test_bit(AF_FW_RECOVERY, &ha->flags))
4881 			goto chip_reset;
4882 
4883 		wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
4884 		while (time_before(jiffies, wait)) {
4885 			if (qla4_8xxx_check_fw_alive(ha)) {
4886 				qla4xxx_mailbox_premature_completion(ha);
4887 				break;
4888 			}
4889 
4890 			set_current_state(TASK_UNINTERRUPTIBLE);
4891 			schedule_timeout(HZ);
4892 		}
4893 chip_reset:
4894 		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
4895 			qla4xxx_cmd_wait(ha);
4896 
4897 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4898 		DEBUG2(ql4_printk(KERN_INFO, ha,
4899 		    "scsi%ld: %s - Performing chip reset..\n",
4900 		    ha->host_no, __func__));
4901 		status = ha->isp_ops->reset_chip(ha);
4902 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4903 	}
4904 
4905 	/* Flush any pending ddb changed AENs */
4906 	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4907 
4908 recover_ha_init_adapter:
4909 	/* Upon successful firmware/chip reset, re-initialize the adapter */
4910 	if (status == QLA_SUCCESS) {
4911 		/* For ISP-4xxx, force function 1 to always initialize
4912 		 * before function 3 to prevent both funcions from
4913 		 * stepping on top of the other */
4914 		if (is_qla40XX(ha) && (ha->mac_index == 3))
4915 			ssleep(6);
4916 
4917 		/* NOTE: AF_ONLINE flag set upon successful completion of
4918 		 * qla4xxx_initialize_adapter */
4919 		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
4920 		if (is_qla80XX(ha) && (status == QLA_ERROR)) {
4921 			status = qla4_8xxx_check_init_adapter_retry(ha);
4922 			if (status == QLA_ERROR) {
4923 				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
4924 					   ha->host_no, __func__);
4925 				qla4xxx_dead_adapter_cleanup(ha);
4926 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4927 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4928 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
4929 					  &ha->dpc_flags);
4930 				goto exit_recover;
4931 			}
4932 		}
4933 	}
4934 
4935 	/* Retry failed adapter initialization, if necessary
4936 	 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
4937 	 * case to prevent ping-pong resets between functions */
4938 	if (!test_bit(AF_ONLINE, &ha->flags) &&
4939 	    !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4940 		/* Adapter initialization failed, see if we can retry
4941 		 * resetting the ha.
4942 		 * Since we don't want to block the DPC for too long
4943 		 * with multiple resets in the same thread,
4944 		 * utilize DPC to retry */
4945 		if (is_qla80XX(ha)) {
4946 			ha->isp_ops->idc_lock(ha);
4947 			dev_state = qla4_8xxx_rd_direct(ha,
4948 							QLA8XXX_CRB_DEV_STATE);
4949 			ha->isp_ops->idc_unlock(ha);
4950 			if (dev_state == QLA8XXX_DEV_FAILED) {
4951 				ql4_printk(KERN_INFO, ha, "%s: don't retry "
4952 					   "recover adapter. H/W is in Failed "
4953 					   "state\n", __func__);
4954 				qla4xxx_dead_adapter_cleanup(ha);
4955 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4956 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4957 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
4958 						&ha->dpc_flags);
4959 				status = QLA_ERROR;
4960 
4961 				goto exit_recover;
4962 			}
4963 		}
4964 
4965 		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
4966 			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
4967 			DEBUG2(printk("scsi%ld: recover adapter - retrying "
4968 				      "(%d) more times\n", ha->host_no,
4969 				      ha->retry_reset_ha_cnt));
4970 			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4971 			status = QLA_ERROR;
4972 		} else {
4973 			if (ha->retry_reset_ha_cnt > 0) {
4974 				/* Schedule another Reset HA--DPC will retry */
4975 				ha->retry_reset_ha_cnt--;
4976 				DEBUG2(printk("scsi%ld: recover adapter - "
4977 					      "retry remaining %d\n",
4978 					      ha->host_no,
4979 					      ha->retry_reset_ha_cnt));
4980 				status = QLA_ERROR;
4981 			}
4982 
4983 			if (ha->retry_reset_ha_cnt == 0) {
4984 				/* Recover adapter retries have been exhausted.
4985 				 * Adapter DEAD */
4986 				DEBUG2(printk("scsi%ld: recover adapter "
4987 					      "failed - board disabled\n",
4988 					      ha->host_no));
4989 				qla4xxx_dead_adapter_cleanup(ha);
4990 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4991 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4992 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
4993 					  &ha->dpc_flags);
4994 				status = QLA_ERROR;
4995 			}
4996 		}
4997 	} else {
4998 		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4999 		clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5000 		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
5001 	}
5002 
5003 exit_recover:
5004 	ha->adapter_error_count++;
5005 
5006 	if (test_bit(AF_ONLINE, &ha->flags))
5007 		ha->isp_ops->enable_intrs(ha);
5008 
5009 	scsi_unblock_requests(ha->host);
5010 
5011 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5012 	DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
5013 	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
5014 
5015 	return status;
5016 }
5017 
5018 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
5019 {
5020 	struct iscsi_session *sess;
5021 	struct ddb_entry *ddb_entry;
5022 	struct scsi_qla_host *ha;
5023 
5024 	sess = cls_session->dd_data;
5025 	ddb_entry = sess->dd_data;
5026 	ha = ddb_entry->ha;
5027 	if (!iscsi_is_session_online(cls_session)) {
5028 		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
5029 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5030 				   " unblock session\n", ha->host_no, __func__,
5031 				   ddb_entry->fw_ddb_index);
5032 			iscsi_unblock_session(ddb_entry->sess);
5033 		} else {
5034 			/* Trigger relogin */
5035 			if (ddb_entry->ddb_type == FLASH_DDB) {
5036 				if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
5037 				      test_bit(DF_DISABLE_RELOGIN,
5038 					       &ddb_entry->flags)))
5039 					qla4xxx_arm_relogin_timer(ddb_entry);
5040 			} else
5041 				iscsi_session_failure(cls_session->dd_data,
5042 						      ISCSI_ERR_CONN_FAILED);
5043 		}
5044 	}
5045 }
5046 
5047 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
5048 {
5049 	struct iscsi_session *sess;
5050 	struct ddb_entry *ddb_entry;
5051 	struct scsi_qla_host *ha;
5052 
5053 	sess = cls_session->dd_data;
5054 	ddb_entry = sess->dd_data;
5055 	ha = ddb_entry->ha;
5056 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5057 		   " unblock session\n", ha->host_no, __func__,
5058 		   ddb_entry->fw_ddb_index);
5059 
5060 	iscsi_unblock_session(ddb_entry->sess);
5061 
5062 	/* Start scan target */
5063 	if (test_bit(AF_ONLINE, &ha->flags)) {
5064 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5065 			   " start scan\n", ha->host_no, __func__,
5066 			   ddb_entry->fw_ddb_index);
5067 		scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
5068 	}
5069 	return QLA_SUCCESS;
5070 }
5071 
5072 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
5073 {
5074 	struct iscsi_session *sess;
5075 	struct ddb_entry *ddb_entry;
5076 	struct scsi_qla_host *ha;
5077 	int status = QLA_SUCCESS;
5078 
5079 	sess = cls_session->dd_data;
5080 	ddb_entry = sess->dd_data;
5081 	ha = ddb_entry->ha;
5082 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5083 		   " unblock user space session\n", ha->host_no, __func__,
5084 		   ddb_entry->fw_ddb_index);
5085 
5086 	if (!iscsi_is_session_online(cls_session)) {
5087 		iscsi_conn_start(ddb_entry->conn);
5088 		iscsi_conn_login_event(ddb_entry->conn,
5089 				       ISCSI_CONN_STATE_LOGGED_IN);
5090 	} else {
5091 		ql4_printk(KERN_INFO, ha,
5092 			   "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
5093 			   ha->host_no, __func__, ddb_entry->fw_ddb_index,
5094 			   cls_session->sid);
5095 		status = QLA_ERROR;
5096 	}
5097 
5098 	return status;
5099 }
5100 
5101 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
5102 {
5103 	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
5104 }
5105 
5106 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
5107 {
5108 	uint16_t relogin_timer;
5109 	struct iscsi_session *sess;
5110 	struct ddb_entry *ddb_entry;
5111 	struct scsi_qla_host *ha;
5112 
5113 	sess = cls_sess->dd_data;
5114 	ddb_entry = sess->dd_data;
5115 	ha = ddb_entry->ha;
5116 
5117 	relogin_timer = max(ddb_entry->default_relogin_timeout,
5118 			    (uint16_t)RELOGIN_TOV);
5119 	atomic_set(&ddb_entry->relogin_timer, relogin_timer);
5120 
5121 	DEBUG2(ql4_printk(KERN_INFO, ha,
5122 			  "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
5123 			  ddb_entry->fw_ddb_index, relogin_timer));
5124 
5125 	qla4xxx_login_flash_ddb(cls_sess);
5126 }
5127 
5128 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
5129 {
5130 	struct iscsi_session *sess;
5131 	struct ddb_entry *ddb_entry;
5132 	struct scsi_qla_host *ha;
5133 
5134 	sess = cls_sess->dd_data;
5135 	ddb_entry = sess->dd_data;
5136 	ha = ddb_entry->ha;
5137 
5138 	if (!(ddb_entry->ddb_type == FLASH_DDB))
5139 		return;
5140 
5141 	if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
5142 		return;
5143 
5144 	if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
5145 	    !iscsi_is_session_online(cls_sess)) {
5146 		DEBUG2(ql4_printk(KERN_INFO, ha,
5147 				  "relogin issued\n"));
5148 		qla4xxx_relogin_flash_ddb(cls_sess);
5149 	}
5150 }
5151 
5152 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
5153 {
5154 	if (ha->dpc_thread)
5155 		queue_work(ha->dpc_thread, &ha->dpc_work);
5156 }
5157 
5158 static struct qla4_work_evt *
5159 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
5160 		   enum qla4_work_type type)
5161 {
5162 	struct qla4_work_evt *e;
5163 	uint32_t size = sizeof(struct qla4_work_evt) + data_size;
5164 
5165 	e = kzalloc(size, GFP_ATOMIC);
5166 	if (!e)
5167 		return NULL;
5168 
5169 	INIT_LIST_HEAD(&e->list);
5170 	e->type = type;
5171 	return e;
5172 }
5173 
5174 static void qla4xxx_post_work(struct scsi_qla_host *ha,
5175 			     struct qla4_work_evt *e)
5176 {
5177 	unsigned long flags;
5178 
5179 	spin_lock_irqsave(&ha->work_lock, flags);
5180 	list_add_tail(&e->list, &ha->work_list);
5181 	spin_unlock_irqrestore(&ha->work_lock, flags);
5182 	qla4xxx_wake_dpc(ha);
5183 }
5184 
5185 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
5186 			  enum iscsi_host_event_code aen_code,
5187 			  uint32_t data_size, uint8_t *data)
5188 {
5189 	struct qla4_work_evt *e;
5190 
5191 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
5192 	if (!e)
5193 		return QLA_ERROR;
5194 
5195 	e->u.aen.code = aen_code;
5196 	e->u.aen.data_size = data_size;
5197 	memcpy(e->u.aen.data, data, data_size);
5198 
5199 	qla4xxx_post_work(ha, e);
5200 
5201 	return QLA_SUCCESS;
5202 }
5203 
5204 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
5205 			       uint32_t status, uint32_t pid,
5206 			       uint32_t data_size, uint8_t *data)
5207 {
5208 	struct qla4_work_evt *e;
5209 
5210 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
5211 	if (!e)
5212 		return QLA_ERROR;
5213 
5214 	e->u.ping.status = status;
5215 	e->u.ping.pid = pid;
5216 	e->u.ping.data_size = data_size;
5217 	memcpy(e->u.ping.data, data, data_size);
5218 
5219 	qla4xxx_post_work(ha, e);
5220 
5221 	return QLA_SUCCESS;
5222 }
5223 
5224 static void qla4xxx_do_work(struct scsi_qla_host *ha)
5225 {
5226 	struct qla4_work_evt *e, *tmp;
5227 	unsigned long flags;
5228 	LIST_HEAD(work);
5229 
5230 	spin_lock_irqsave(&ha->work_lock, flags);
5231 	list_splice_init(&ha->work_list, &work);
5232 	spin_unlock_irqrestore(&ha->work_lock, flags);
5233 
5234 	list_for_each_entry_safe(e, tmp, &work, list) {
5235 		list_del_init(&e->list);
5236 
5237 		switch (e->type) {
5238 		case QLA4_EVENT_AEN:
5239 			iscsi_post_host_event(ha->host_no,
5240 					      &qla4xxx_iscsi_transport,
5241 					      e->u.aen.code,
5242 					      e->u.aen.data_size,
5243 					      e->u.aen.data);
5244 			break;
5245 		case QLA4_EVENT_PING_STATUS:
5246 			iscsi_ping_comp_event(ha->host_no,
5247 					      &qla4xxx_iscsi_transport,
5248 					      e->u.ping.status,
5249 					      e->u.ping.pid,
5250 					      e->u.ping.data_size,
5251 					      e->u.ping.data);
5252 			break;
5253 		default:
5254 			ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
5255 				   "supported", e->type);
5256 		}
5257 		kfree(e);
5258 	}
5259 }
5260 
5261 /**
5262  * qla4xxx_do_dpc - dpc routine
5263  * @data: in our case pointer to adapter structure
5264  *
5265  * This routine is a task that is schedule by the interrupt handler
5266  * to perform the background processing for interrupts.  We put it
5267  * on a task queue that is consumed whenever the scheduler runs; that's
5268  * so you can do anything (i.e. put the process to sleep etc).  In fact,
5269  * the mid-level tries to sleep when it reaches the driver threshold
5270  * "host->can_queue". This can cause a panic if we were in our interrupt code.
5271  **/
5272 static void qla4xxx_do_dpc(struct work_struct *work)
5273 {
5274 	struct scsi_qla_host *ha =
5275 		container_of(work, struct scsi_qla_host, dpc_work);
5276 	int status = QLA_ERROR;
5277 
5278 	DEBUG2(ql4_printk(KERN_INFO, ha,
5279 			  "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
5280 			  ha->host_no, __func__, ha->flags, ha->dpc_flags));
5281 
5282 	/* Initialization not yet finished. Don't do anything yet. */
5283 	if (!test_bit(AF_INIT_DONE, &ha->flags))
5284 		return;
5285 
5286 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
5287 		DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
5288 		    ha->host_no, __func__, ha->flags));
5289 		return;
5290 	}
5291 
5292 	/* post events to application */
5293 	qla4xxx_do_work(ha);
5294 
5295 	if (is_qla80XX(ha)) {
5296 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
5297 			if (is_qla8032(ha) || is_qla8042(ha)) {
5298 				ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
5299 					   __func__);
5300 				/* disable pause frame for ISP83xx */
5301 				qla4_83xx_disable_pause(ha);
5302 			}
5303 
5304 			ha->isp_ops->idc_lock(ha);
5305 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
5306 					    QLA8XXX_DEV_FAILED);
5307 			ha->isp_ops->idc_unlock(ha);
5308 			ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
5309 			qla4_8xxx_device_state_handler(ha);
5310 		}
5311 
5312 		if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
5313 			if (is_qla8042(ha)) {
5314 				if (ha->idc_info.info2 &
5315 				    ENABLE_INTERNAL_LOOPBACK) {
5316 					ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
5317 						   __func__);
5318 					status = qla4_84xx_config_acb(ha,
5319 							    ACB_CONFIG_DISABLE);
5320 					if (status != QLA_SUCCESS) {
5321 						ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
5322 							   __func__);
5323 					}
5324 				}
5325 			}
5326 			qla4_83xx_post_idc_ack(ha);
5327 			clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
5328 		}
5329 
5330 		if (is_qla8042(ha) &&
5331 		    test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
5332 			ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
5333 				   __func__);
5334 			if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
5335 			    QLA_SUCCESS) {
5336 				ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
5337 					   __func__);
5338 			}
5339 			clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
5340 		}
5341 
5342 		if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
5343 			qla4_8xxx_need_qsnt_handler(ha);
5344 		}
5345 	}
5346 
5347 	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
5348 	    (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
5349 	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
5350 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
5351 		if ((is_qla8022(ha) && ql4xdontresethba) ||
5352 		    ((is_qla8032(ha) || is_qla8042(ha)) &&
5353 		     qla4_83xx_idc_dontreset(ha))) {
5354 			DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5355 			    ha->host_no, __func__));
5356 			clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5357 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
5358 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5359 			goto dpc_post_reset_ha;
5360 		}
5361 		if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
5362 		    test_bit(DPC_RESET_HA, &ha->dpc_flags))
5363 			qla4xxx_recover_adapter(ha);
5364 
5365 		if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
5366 			uint8_t wait_time = RESET_INTR_TOV;
5367 
5368 			while ((readw(&ha->reg->ctrl_status) &
5369 				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
5370 				if (--wait_time == 0)
5371 					break;
5372 				msleep(1000);
5373 			}
5374 			if (wait_time == 0)
5375 				DEBUG2(printk("scsi%ld: %s: SR|FSR "
5376 					      "bit not cleared-- resetting\n",
5377 					      ha->host_no, __func__));
5378 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
5379 			if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
5380 				qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
5381 				status = qla4xxx_recover_adapter(ha);
5382 			}
5383 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
5384 			if (status == QLA_SUCCESS)
5385 				ha->isp_ops->enable_intrs(ha);
5386 		}
5387 	}
5388 
5389 dpc_post_reset_ha:
5390 	/* ---- process AEN? --- */
5391 	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
5392 		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
5393 
5394 	/* ---- Get DHCP IP Address? --- */
5395 	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
5396 		qla4xxx_get_dhcp_ip_address(ha);
5397 
5398 	/* ---- relogin device? --- */
5399 	if (adapter_up(ha) &&
5400 	    test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
5401 		iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
5402 	}
5403 
5404 	/* ---- link change? --- */
5405 	if (!test_bit(AF_LOOPBACK, &ha->flags) &&
5406 	    test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
5407 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
5408 			/* ---- link down? --- */
5409 			qla4xxx_mark_all_devices_missing(ha);
5410 		} else {
5411 			/* ---- link up? --- *
5412 			 * F/W will auto login to all devices ONLY ONCE after
5413 			 * link up during driver initialization and runtime
5414 			 * fatal error recovery.  Therefore, the driver must
5415 			 * manually relogin to devices when recovering from
5416 			 * connection failures, logouts, expired KATO, etc. */
5417 			if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
5418 				qla4xxx_build_ddb_list(ha, ha->is_reset);
5419 				iscsi_host_for_each_session(ha->host,
5420 						qla4xxx_login_flash_ddb);
5421 			} else
5422 				qla4xxx_relogin_all_devices(ha);
5423 		}
5424 	}
5425 	if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
5426 		if (qla4xxx_sysfs_ddb_export(ha))
5427 			ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
5428 				   __func__);
5429 	}
5430 }
5431 
5432 /**
5433  * qla4xxx_free_adapter - release the adapter
5434  * @ha: pointer to adapter structure
5435  **/
5436 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
5437 {
5438 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5439 
5440 	/* Turn-off interrupts on the card. */
5441 	ha->isp_ops->disable_intrs(ha);
5442 
5443 	if (is_qla40XX(ha)) {
5444 		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
5445 		       &ha->reg->ctrl_status);
5446 		readl(&ha->reg->ctrl_status);
5447 	} else if (is_qla8022(ha)) {
5448 		writel(0, &ha->qla4_82xx_reg->host_int);
5449 		readl(&ha->qla4_82xx_reg->host_int);
5450 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
5451 		writel(0, &ha->qla4_83xx_reg->risc_intr);
5452 		readl(&ha->qla4_83xx_reg->risc_intr);
5453 	}
5454 
5455 	/* Remove timer thread, if present */
5456 	if (ha->timer_active)
5457 		qla4xxx_stop_timer(ha);
5458 
5459 	/* Kill the kernel thread for this host */
5460 	if (ha->dpc_thread)
5461 		destroy_workqueue(ha->dpc_thread);
5462 
5463 	/* Kill the kernel thread for this host */
5464 	if (ha->task_wq)
5465 		destroy_workqueue(ha->task_wq);
5466 
5467 	/* Put firmware in known state */
5468 	ha->isp_ops->reset_firmware(ha);
5469 
5470 	if (is_qla80XX(ha)) {
5471 		ha->isp_ops->idc_lock(ha);
5472 		qla4_8xxx_clear_drv_active(ha);
5473 		ha->isp_ops->idc_unlock(ha);
5474 	}
5475 
5476 	/* Detach interrupts */
5477 	qla4xxx_free_irqs(ha);
5478 
5479 	/* free extra memory */
5480 	qla4xxx_mem_free(ha);
5481 }
5482 
5483 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
5484 {
5485 	int status = 0;
5486 	unsigned long mem_base, mem_len, db_base, db_len;
5487 	struct pci_dev *pdev = ha->pdev;
5488 
5489 	status = pci_request_regions(pdev, DRIVER_NAME);
5490 	if (status) {
5491 		printk(KERN_WARNING
5492 		    "scsi(%ld) Failed to reserve PIO regions (%s) "
5493 		    "status=%d\n", ha->host_no, pci_name(pdev), status);
5494 		goto iospace_error_exit;
5495 	}
5496 
5497 	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
5498 	    __func__, pdev->revision));
5499 	ha->revision_id = pdev->revision;
5500 
5501 	/* remap phys address */
5502 	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
5503 	mem_len = pci_resource_len(pdev, 0);
5504 	DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
5505 	    __func__, mem_base, mem_len));
5506 
5507 	/* mapping of pcibase pointer */
5508 	ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
5509 	if (!ha->nx_pcibase) {
5510 		printk(KERN_ERR
5511 		    "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
5512 		pci_release_regions(ha->pdev);
5513 		goto iospace_error_exit;
5514 	}
5515 
5516 	/* Mapping of IO base pointer, door bell read and write pointer */
5517 
5518 	/* mapping of IO base pointer */
5519 	if (is_qla8022(ha)) {
5520 		ha->qla4_82xx_reg = (struct device_reg_82xx  __iomem *)
5521 				    ((uint8_t *)ha->nx_pcibase + 0xbc000 +
5522 				     (ha->pdev->devfn << 11));
5523 		ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
5524 				    QLA82XX_CAM_RAM_DB2);
5525 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
5526 		ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
5527 				    ((uint8_t *)ha->nx_pcibase);
5528 	}
5529 
5530 	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
5531 	db_len = pci_resource_len(pdev, 4);
5532 
5533 	return 0;
5534 iospace_error_exit:
5535 	return -ENOMEM;
5536 }
5537 
5538 /***
5539  * qla4xxx_iospace_config - maps registers
5540  * @ha: pointer to adapter structure
5541  *
5542  * This routines maps HBA's registers from the pci address space
5543  * into the kernel virtual address space for memory mapped i/o.
5544  **/
5545 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
5546 {
5547 	unsigned long pio, pio_len, pio_flags;
5548 	unsigned long mmio, mmio_len, mmio_flags;
5549 
5550 	pio = pci_resource_start(ha->pdev, 0);
5551 	pio_len = pci_resource_len(ha->pdev, 0);
5552 	pio_flags = pci_resource_flags(ha->pdev, 0);
5553 	if (pio_flags & IORESOURCE_IO) {
5554 		if (pio_len < MIN_IOBASE_LEN) {
5555 			ql4_printk(KERN_WARNING, ha,
5556 				"Invalid PCI I/O region size\n");
5557 			pio = 0;
5558 		}
5559 	} else {
5560 		ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
5561 		pio = 0;
5562 	}
5563 
5564 	/* Use MMIO operations for all accesses. */
5565 	mmio = pci_resource_start(ha->pdev, 1);
5566 	mmio_len = pci_resource_len(ha->pdev, 1);
5567 	mmio_flags = pci_resource_flags(ha->pdev, 1);
5568 
5569 	if (!(mmio_flags & IORESOURCE_MEM)) {
5570 		ql4_printk(KERN_ERR, ha,
5571 		    "region #0 not an MMIO resource, aborting\n");
5572 
5573 		goto iospace_error_exit;
5574 	}
5575 
5576 	if (mmio_len < MIN_IOBASE_LEN) {
5577 		ql4_printk(KERN_ERR, ha,
5578 		    "Invalid PCI mem region size, aborting\n");
5579 		goto iospace_error_exit;
5580 	}
5581 
5582 	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
5583 		ql4_printk(KERN_WARNING, ha,
5584 		    "Failed to reserve PIO/MMIO regions\n");
5585 
5586 		goto iospace_error_exit;
5587 	}
5588 
5589 	ha->pio_address = pio;
5590 	ha->pio_length = pio_len;
5591 	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
5592 	if (!ha->reg) {
5593 		ql4_printk(KERN_ERR, ha,
5594 		    "cannot remap MMIO, aborting\n");
5595 
5596 		goto iospace_error_exit;
5597 	}
5598 
5599 	return 0;
5600 
5601 iospace_error_exit:
5602 	return -ENOMEM;
5603 }
5604 
5605 static struct isp_operations qla4xxx_isp_ops = {
5606 	.iospace_config         = qla4xxx_iospace_config,
5607 	.pci_config             = qla4xxx_pci_config,
5608 	.disable_intrs          = qla4xxx_disable_intrs,
5609 	.enable_intrs           = qla4xxx_enable_intrs,
5610 	.start_firmware         = qla4xxx_start_firmware,
5611 	.intr_handler           = qla4xxx_intr_handler,
5612 	.interrupt_service_routine = qla4xxx_interrupt_service_routine,
5613 	.reset_chip             = qla4xxx_soft_reset,
5614 	.reset_firmware         = qla4xxx_hw_reset,
5615 	.queue_iocb             = qla4xxx_queue_iocb,
5616 	.complete_iocb          = qla4xxx_complete_iocb,
5617 	.rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
5618 	.rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
5619 	.get_sys_info           = qla4xxx_get_sys_info,
5620 	.queue_mailbox_command	= qla4xxx_queue_mbox_cmd,
5621 	.process_mailbox_interrupt = qla4xxx_process_mbox_intr,
5622 };
5623 
5624 static struct isp_operations qla4_82xx_isp_ops = {
5625 	.iospace_config         = qla4_8xxx_iospace_config,
5626 	.pci_config             = qla4_8xxx_pci_config,
5627 	.disable_intrs          = qla4_82xx_disable_intrs,
5628 	.enable_intrs           = qla4_82xx_enable_intrs,
5629 	.start_firmware         = qla4_8xxx_load_risc,
5630 	.restart_firmware	= qla4_82xx_try_start_fw,
5631 	.intr_handler           = qla4_82xx_intr_handler,
5632 	.interrupt_service_routine = qla4_82xx_interrupt_service_routine,
5633 	.need_reset		= qla4_8xxx_need_reset,
5634 	.reset_chip             = qla4_82xx_isp_reset,
5635 	.reset_firmware         = qla4_8xxx_stop_firmware,
5636 	.queue_iocb             = qla4_82xx_queue_iocb,
5637 	.complete_iocb          = qla4_82xx_complete_iocb,
5638 	.rd_shdw_req_q_out      = qla4_82xx_rd_shdw_req_q_out,
5639 	.rd_shdw_rsp_q_in       = qla4_82xx_rd_shdw_rsp_q_in,
5640 	.get_sys_info           = qla4_8xxx_get_sys_info,
5641 	.rd_reg_direct		= qla4_82xx_rd_32,
5642 	.wr_reg_direct		= qla4_82xx_wr_32,
5643 	.rd_reg_indirect	= qla4_82xx_md_rd_32,
5644 	.wr_reg_indirect	= qla4_82xx_md_wr_32,
5645 	.idc_lock		= qla4_82xx_idc_lock,
5646 	.idc_unlock		= qla4_82xx_idc_unlock,
5647 	.rom_lock_recovery	= qla4_82xx_rom_lock_recovery,
5648 	.queue_mailbox_command	= qla4_82xx_queue_mbox_cmd,
5649 	.process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
5650 };
5651 
5652 static struct isp_operations qla4_83xx_isp_ops = {
5653 	.iospace_config		= qla4_8xxx_iospace_config,
5654 	.pci_config		= qla4_8xxx_pci_config,
5655 	.disable_intrs		= qla4_83xx_disable_intrs,
5656 	.enable_intrs		= qla4_83xx_enable_intrs,
5657 	.start_firmware		= qla4_8xxx_load_risc,
5658 	.restart_firmware	= qla4_83xx_start_firmware,
5659 	.intr_handler		= qla4_83xx_intr_handler,
5660 	.interrupt_service_routine = qla4_83xx_interrupt_service_routine,
5661 	.need_reset		= qla4_8xxx_need_reset,
5662 	.reset_chip		= qla4_83xx_isp_reset,
5663 	.reset_firmware		= qla4_8xxx_stop_firmware,
5664 	.queue_iocb		= qla4_83xx_queue_iocb,
5665 	.complete_iocb		= qla4_83xx_complete_iocb,
5666 	.rd_shdw_req_q_out	= qla4xxx_rd_shdw_req_q_out,
5667 	.rd_shdw_rsp_q_in	= qla4xxx_rd_shdw_rsp_q_in,
5668 	.get_sys_info		= qla4_8xxx_get_sys_info,
5669 	.rd_reg_direct		= qla4_83xx_rd_reg,
5670 	.wr_reg_direct		= qla4_83xx_wr_reg,
5671 	.rd_reg_indirect	= qla4_83xx_rd_reg_indirect,
5672 	.wr_reg_indirect	= qla4_83xx_wr_reg_indirect,
5673 	.idc_lock		= qla4_83xx_drv_lock,
5674 	.idc_unlock		= qla4_83xx_drv_unlock,
5675 	.rom_lock_recovery	= qla4_83xx_rom_lock_recovery,
5676 	.queue_mailbox_command	= qla4_83xx_queue_mbox_cmd,
5677 	.process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
5678 };
5679 
5680 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
5681 {
5682 	return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
5683 }
5684 
5685 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
5686 {
5687 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
5688 }
5689 
5690 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
5691 {
5692 	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
5693 }
5694 
5695 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
5696 {
5697 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
5698 }
5699 
5700 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
5701 {
5702 	struct scsi_qla_host *ha = data;
5703 	char *str = buf;
5704 	int rc;
5705 
5706 	switch (type) {
5707 	case ISCSI_BOOT_ETH_FLAGS:
5708 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
5709 		break;
5710 	case ISCSI_BOOT_ETH_INDEX:
5711 		rc = sprintf(str, "0\n");
5712 		break;
5713 	case ISCSI_BOOT_ETH_MAC:
5714 		rc = sysfs_format_mac(str, ha->my_mac,
5715 				      MAC_ADDR_LEN);
5716 		break;
5717 	default:
5718 		rc = -ENOSYS;
5719 		break;
5720 	}
5721 	return rc;
5722 }
5723 
5724 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
5725 {
5726 	int rc;
5727 
5728 	switch (type) {
5729 	case ISCSI_BOOT_ETH_FLAGS:
5730 	case ISCSI_BOOT_ETH_MAC:
5731 	case ISCSI_BOOT_ETH_INDEX:
5732 		rc = S_IRUGO;
5733 		break;
5734 	default:
5735 		rc = 0;
5736 		break;
5737 	}
5738 	return rc;
5739 }
5740 
5741 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
5742 {
5743 	struct scsi_qla_host *ha = data;
5744 	char *str = buf;
5745 	int rc;
5746 
5747 	switch (type) {
5748 	case ISCSI_BOOT_INI_INITIATOR_NAME:
5749 		rc = sprintf(str, "%s\n", ha->name_string);
5750 		break;
5751 	default:
5752 		rc = -ENOSYS;
5753 		break;
5754 	}
5755 	return rc;
5756 }
5757 
5758 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
5759 {
5760 	int rc;
5761 
5762 	switch (type) {
5763 	case ISCSI_BOOT_INI_INITIATOR_NAME:
5764 		rc = S_IRUGO;
5765 		break;
5766 	default:
5767 		rc = 0;
5768 		break;
5769 	}
5770 	return rc;
5771 }
5772 
5773 static ssize_t
5774 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
5775 			   char *buf)
5776 {
5777 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
5778 	char *str = buf;
5779 	int rc;
5780 
5781 	switch (type) {
5782 	case ISCSI_BOOT_TGT_NAME:
5783 		rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
5784 		break;
5785 	case ISCSI_BOOT_TGT_IP_ADDR:
5786 		if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
5787 			rc = sprintf(buf, "%pI4\n",
5788 				     &boot_conn->dest_ipaddr.ip_address);
5789 		else
5790 			rc = sprintf(str, "%pI6\n",
5791 				     &boot_conn->dest_ipaddr.ip_address);
5792 		break;
5793 	case ISCSI_BOOT_TGT_PORT:
5794 			rc = sprintf(str, "%d\n", boot_conn->dest_port);
5795 		break;
5796 	case ISCSI_BOOT_TGT_CHAP_NAME:
5797 		rc = sprintf(str,  "%.*s\n",
5798 			     boot_conn->chap.target_chap_name_length,
5799 			     (char *)&boot_conn->chap.target_chap_name);
5800 		break;
5801 	case ISCSI_BOOT_TGT_CHAP_SECRET:
5802 		rc = sprintf(str,  "%.*s\n",
5803 			     boot_conn->chap.target_secret_length,
5804 			     (char *)&boot_conn->chap.target_secret);
5805 		break;
5806 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5807 		rc = sprintf(str,  "%.*s\n",
5808 			     boot_conn->chap.intr_chap_name_length,
5809 			     (char *)&boot_conn->chap.intr_chap_name);
5810 		break;
5811 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5812 		rc = sprintf(str,  "%.*s\n",
5813 			     boot_conn->chap.intr_secret_length,
5814 			     (char *)&boot_conn->chap.intr_secret);
5815 		break;
5816 	case ISCSI_BOOT_TGT_FLAGS:
5817 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
5818 		break;
5819 	case ISCSI_BOOT_TGT_NIC_ASSOC:
5820 		rc = sprintf(str, "0\n");
5821 		break;
5822 	default:
5823 		rc = -ENOSYS;
5824 		break;
5825 	}
5826 	return rc;
5827 }
5828 
5829 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
5830 {
5831 	struct scsi_qla_host *ha = data;
5832 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
5833 
5834 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
5835 }
5836 
5837 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
5838 {
5839 	struct scsi_qla_host *ha = data;
5840 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
5841 
5842 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
5843 }
5844 
5845 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
5846 {
5847 	int rc;
5848 
5849 	switch (type) {
5850 	case ISCSI_BOOT_TGT_NAME:
5851 	case ISCSI_BOOT_TGT_IP_ADDR:
5852 	case ISCSI_BOOT_TGT_PORT:
5853 	case ISCSI_BOOT_TGT_CHAP_NAME:
5854 	case ISCSI_BOOT_TGT_CHAP_SECRET:
5855 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5856 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5857 	case ISCSI_BOOT_TGT_NIC_ASSOC:
5858 	case ISCSI_BOOT_TGT_FLAGS:
5859 		rc = S_IRUGO;
5860 		break;
5861 	default:
5862 		rc = 0;
5863 		break;
5864 	}
5865 	return rc;
5866 }
5867 
5868 static void qla4xxx_boot_release(void *data)
5869 {
5870 	struct scsi_qla_host *ha = data;
5871 
5872 	scsi_host_put(ha->host);
5873 }
5874 
5875 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
5876 {
5877 	dma_addr_t buf_dma;
5878 	uint32_t addr, pri_addr, sec_addr;
5879 	uint32_t offset;
5880 	uint16_t func_num;
5881 	uint8_t val;
5882 	uint8_t *buf = NULL;
5883 	size_t size = 13 * sizeof(uint8_t);
5884 	int ret = QLA_SUCCESS;
5885 
5886 	func_num = PCI_FUNC(ha->pdev->devfn);
5887 
5888 	ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
5889 		   __func__, ha->pdev->device, func_num);
5890 
5891 	if (is_qla40XX(ha)) {
5892 		if (func_num == 1) {
5893 			addr = NVRAM_PORT0_BOOT_MODE;
5894 			pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
5895 			sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
5896 		} else if (func_num == 3) {
5897 			addr = NVRAM_PORT1_BOOT_MODE;
5898 			pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
5899 			sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
5900 		} else {
5901 			ret = QLA_ERROR;
5902 			goto exit_boot_info;
5903 		}
5904 
5905 		/* Check Boot Mode */
5906 		val = rd_nvram_byte(ha, addr);
5907 		if (!(val & 0x07)) {
5908 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
5909 					  "options : 0x%x\n", __func__, val));
5910 			ret = QLA_ERROR;
5911 			goto exit_boot_info;
5912 		}
5913 
5914 		/* get primary valid target index */
5915 		val = rd_nvram_byte(ha, pri_addr);
5916 		if (val & BIT_7)
5917 			ddb_index[0] = (val & 0x7f);
5918 
5919 		/* get secondary valid target index */
5920 		val = rd_nvram_byte(ha, sec_addr);
5921 		if (val & BIT_7)
5922 			ddb_index[1] = (val & 0x7f);
5923 
5924 	} else if (is_qla80XX(ha)) {
5925 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
5926 					 &buf_dma, GFP_KERNEL);
5927 		if (!buf) {
5928 			DEBUG2(ql4_printk(KERN_ERR, ha,
5929 					  "%s: Unable to allocate dma buffer\n",
5930 					   __func__));
5931 			ret = QLA_ERROR;
5932 			goto exit_boot_info;
5933 		}
5934 
5935 		if (ha->port_num == 0)
5936 			offset = BOOT_PARAM_OFFSET_PORT0;
5937 		else if (ha->port_num == 1)
5938 			offset = BOOT_PARAM_OFFSET_PORT1;
5939 		else {
5940 			ret = QLA_ERROR;
5941 			goto exit_boot_info_free;
5942 		}
5943 		addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
5944 		       offset;
5945 		if (qla4xxx_get_flash(ha, buf_dma, addr,
5946 				      13 * sizeof(uint8_t)) != QLA_SUCCESS) {
5947 			DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
5948 					  " failed\n", ha->host_no, __func__));
5949 			ret = QLA_ERROR;
5950 			goto exit_boot_info_free;
5951 		}
5952 		/* Check Boot Mode */
5953 		if (!(buf[1] & 0x07)) {
5954 			DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
5955 					  " : 0x%x\n", buf[1]));
5956 			ret = QLA_ERROR;
5957 			goto exit_boot_info_free;
5958 		}
5959 
5960 		/* get primary valid target index */
5961 		if (buf[2] & BIT_7)
5962 			ddb_index[0] = buf[2] & 0x7f;
5963 
5964 		/* get secondary valid target index */
5965 		if (buf[11] & BIT_7)
5966 			ddb_index[1] = buf[11] & 0x7f;
5967 	} else {
5968 		ret = QLA_ERROR;
5969 		goto exit_boot_info;
5970 	}
5971 
5972 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
5973 			  " target ID %d\n", __func__, ddb_index[0],
5974 			  ddb_index[1]));
5975 
5976 exit_boot_info_free:
5977 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
5978 exit_boot_info:
5979 	ha->pri_ddb_idx = ddb_index[0];
5980 	ha->sec_ddb_idx = ddb_index[1];
5981 	return ret;
5982 }
5983 
5984 /**
5985  * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
5986  * @ha: pointer to adapter structure
5987  * @username: CHAP username to be returned
5988  * @password: CHAP password to be returned
5989  *
5990  * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
5991  * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
5992  * So from the CHAP cache find the first BIDI CHAP entry and set it
5993  * to the boot record in sysfs.
5994  **/
5995 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
5996 			    char *password)
5997 {
5998 	int i, ret = -EINVAL;
5999 	int max_chap_entries = 0;
6000 	struct ql4_chap_table *chap_table;
6001 
6002 	if (is_qla80XX(ha))
6003 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
6004 						sizeof(struct ql4_chap_table);
6005 	else
6006 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
6007 
6008 	if (!ha->chap_list) {
6009 		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
6010 		return ret;
6011 	}
6012 
6013 	mutex_lock(&ha->chap_sem);
6014 	for (i = 0; i < max_chap_entries; i++) {
6015 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
6016 		if (chap_table->cookie !=
6017 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
6018 			continue;
6019 		}
6020 
6021 		if (chap_table->flags & BIT_7) /* local */
6022 			continue;
6023 
6024 		if (!(chap_table->flags & BIT_6)) /* Not BIDI */
6025 			continue;
6026 
6027 		strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
6028 		strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
6029 		ret = 0;
6030 		break;
6031 	}
6032 	mutex_unlock(&ha->chap_sem);
6033 
6034 	return ret;
6035 }
6036 
6037 
6038 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
6039 				   struct ql4_boot_session_info *boot_sess,
6040 				   uint16_t ddb_index)
6041 {
6042 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
6043 	struct dev_db_entry *fw_ddb_entry;
6044 	dma_addr_t fw_ddb_entry_dma;
6045 	uint16_t idx;
6046 	uint16_t options;
6047 	int ret = QLA_SUCCESS;
6048 
6049 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6050 					  &fw_ddb_entry_dma, GFP_KERNEL);
6051 	if (!fw_ddb_entry) {
6052 		DEBUG2(ql4_printk(KERN_ERR, ha,
6053 				  "%s: Unable to allocate dma buffer.\n",
6054 				  __func__));
6055 		ret = QLA_ERROR;
6056 		return ret;
6057 	}
6058 
6059 	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
6060 				   fw_ddb_entry_dma, ddb_index)) {
6061 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
6062 				  "index [%d]\n", __func__, ddb_index));
6063 		ret = QLA_ERROR;
6064 		goto exit_boot_target;
6065 	}
6066 
6067 	/* Update target name and IP from DDB */
6068 	memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
6069 	       min(sizeof(boot_sess->target_name),
6070 		   sizeof(fw_ddb_entry->iscsi_name)));
6071 
6072 	options = le16_to_cpu(fw_ddb_entry->options);
6073 	if (options & DDB_OPT_IPV6_DEVICE) {
6074 		memcpy(&boot_conn->dest_ipaddr.ip_address,
6075 		       &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
6076 	} else {
6077 		boot_conn->dest_ipaddr.ip_type = 0x1;
6078 		memcpy(&boot_conn->dest_ipaddr.ip_address,
6079 		       &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
6080 	}
6081 
6082 	boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
6083 
6084 	/* update chap information */
6085 	idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
6086 
6087 	if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options))	{
6088 
6089 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
6090 
6091 		ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
6092 				       target_chap_name,
6093 				       (char *)&boot_conn->chap.target_secret,
6094 				       idx);
6095 		if (ret) {
6096 			ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
6097 			ret = QLA_ERROR;
6098 			goto exit_boot_target;
6099 		}
6100 
6101 		boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
6102 		boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
6103 	}
6104 
6105 	if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
6106 
6107 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
6108 
6109 		ret = qla4xxx_get_bidi_chap(ha,
6110 				    (char *)&boot_conn->chap.intr_chap_name,
6111 				    (char *)&boot_conn->chap.intr_secret);
6112 
6113 		if (ret) {
6114 			ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
6115 			ret = QLA_ERROR;
6116 			goto exit_boot_target;
6117 		}
6118 
6119 		boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
6120 		boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
6121 	}
6122 
6123 exit_boot_target:
6124 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6125 			  fw_ddb_entry, fw_ddb_entry_dma);
6126 	return ret;
6127 }
6128 
6129 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
6130 {
6131 	uint16_t ddb_index[2];
6132 	int ret = QLA_ERROR;
6133 	int rval;
6134 
6135 	memset(ddb_index, 0, sizeof(ddb_index));
6136 	ddb_index[0] = 0xffff;
6137 	ddb_index[1] = 0xffff;
6138 	ret = get_fw_boot_info(ha, ddb_index);
6139 	if (ret != QLA_SUCCESS) {
6140 		DEBUG2(ql4_printk(KERN_INFO, ha,
6141 				"%s: No boot target configured.\n", __func__));
6142 		return ret;
6143 	}
6144 
6145 	if (ql4xdisablesysfsboot)
6146 		return QLA_SUCCESS;
6147 
6148 	if (ddb_index[0] == 0xffff)
6149 		goto sec_target;
6150 
6151 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
6152 				      ddb_index[0]);
6153 	if (rval != QLA_SUCCESS) {
6154 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
6155 				  "configured\n", __func__));
6156 	} else
6157 		ret = QLA_SUCCESS;
6158 
6159 sec_target:
6160 	if (ddb_index[1] == 0xffff)
6161 		goto exit_get_boot_info;
6162 
6163 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
6164 				      ddb_index[1]);
6165 	if (rval != QLA_SUCCESS) {
6166 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
6167 				  " configured\n", __func__));
6168 	} else
6169 		ret = QLA_SUCCESS;
6170 
6171 exit_get_boot_info:
6172 	return ret;
6173 }
6174 
6175 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
6176 {
6177 	struct iscsi_boot_kobj *boot_kobj;
6178 
6179 	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
6180 		return QLA_ERROR;
6181 
6182 	if (ql4xdisablesysfsboot) {
6183 		ql4_printk(KERN_INFO, ha,
6184 			   "%s: syfsboot disabled - driver will trigger login "
6185 			   "and publish session for discovery .\n", __func__);
6186 		return QLA_SUCCESS;
6187 	}
6188 
6189 
6190 	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
6191 	if (!ha->boot_kset)
6192 		goto kset_free;
6193 
6194 	if (!scsi_host_get(ha->host))
6195 		goto kset_free;
6196 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
6197 					     qla4xxx_show_boot_tgt_pri_info,
6198 					     qla4xxx_tgt_get_attr_visibility,
6199 					     qla4xxx_boot_release);
6200 	if (!boot_kobj)
6201 		goto put_host;
6202 
6203 	if (!scsi_host_get(ha->host))
6204 		goto kset_free;
6205 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
6206 					     qla4xxx_show_boot_tgt_sec_info,
6207 					     qla4xxx_tgt_get_attr_visibility,
6208 					     qla4xxx_boot_release);
6209 	if (!boot_kobj)
6210 		goto put_host;
6211 
6212 	if (!scsi_host_get(ha->host))
6213 		goto kset_free;
6214 	boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
6215 					       qla4xxx_show_boot_ini_info,
6216 					       qla4xxx_ini_get_attr_visibility,
6217 					       qla4xxx_boot_release);
6218 	if (!boot_kobj)
6219 		goto put_host;
6220 
6221 	if (!scsi_host_get(ha->host))
6222 		goto kset_free;
6223 	boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
6224 					       qla4xxx_show_boot_eth_info,
6225 					       qla4xxx_eth_get_attr_visibility,
6226 					       qla4xxx_boot_release);
6227 	if (!boot_kobj)
6228 		goto put_host;
6229 
6230 	return QLA_SUCCESS;
6231 
6232 put_host:
6233 	scsi_host_put(ha->host);
6234 kset_free:
6235 	iscsi_boot_destroy_kset(ha->boot_kset);
6236 	return -ENOMEM;
6237 }
6238 
6239 
6240 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
6241 				  struct ql4_tuple_ddb *tddb)
6242 {
6243 	struct scsi_qla_host *ha;
6244 	struct iscsi_cls_session *cls_sess;
6245 	struct iscsi_cls_conn *cls_conn;
6246 	struct iscsi_session *sess;
6247 	struct iscsi_conn *conn;
6248 
6249 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
6250 	ha = ddb_entry->ha;
6251 	cls_sess = ddb_entry->sess;
6252 	sess = cls_sess->dd_data;
6253 	cls_conn = ddb_entry->conn;
6254 	conn = cls_conn->dd_data;
6255 
6256 	tddb->tpgt = sess->tpgt;
6257 	tddb->port = conn->persistent_port;
6258 	strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
6259 	strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
6260 }
6261 
6262 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
6263 				      struct ql4_tuple_ddb *tddb,
6264 				      uint8_t *flash_isid)
6265 {
6266 	uint16_t options = 0;
6267 
6268 	tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
6269 	memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
6270 	       min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
6271 
6272 	options = le16_to_cpu(fw_ddb_entry->options);
6273 	if (options & DDB_OPT_IPV6_DEVICE)
6274 		sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
6275 	else
6276 		sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
6277 
6278 	tddb->port = le16_to_cpu(fw_ddb_entry->port);
6279 
6280 	if (flash_isid == NULL)
6281 		memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
6282 		       sizeof(tddb->isid));
6283 	else
6284 		memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
6285 }
6286 
6287 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
6288 				     struct ql4_tuple_ddb *old_tddb,
6289 				     struct ql4_tuple_ddb *new_tddb,
6290 				     uint8_t is_isid_compare)
6291 {
6292 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
6293 		return QLA_ERROR;
6294 
6295 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
6296 		return QLA_ERROR;
6297 
6298 	if (old_tddb->port != new_tddb->port)
6299 		return QLA_ERROR;
6300 
6301 	/* For multi sessions, driver generates the ISID, so do not compare
6302 	 * ISID in reset path since it would be a comparison between the
6303 	 * driver generated ISID and firmware generated ISID. This could
6304 	 * lead to adding duplicated DDBs in the list as driver generated
6305 	 * ISID would not match firmware generated ISID.
6306 	 */
6307 	if (is_isid_compare) {
6308 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
6309 			"%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
6310 			__func__, old_tddb->isid[5], old_tddb->isid[4],
6311 			old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
6312 			old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
6313 			new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
6314 			new_tddb->isid[0]));
6315 
6316 		if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
6317 			   sizeof(old_tddb->isid)))
6318 			return QLA_ERROR;
6319 	}
6320 
6321 	DEBUG2(ql4_printk(KERN_INFO, ha,
6322 			  "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
6323 			  old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
6324 			  old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
6325 			  new_tddb->ip_addr, new_tddb->iscsi_name));
6326 
6327 	return QLA_SUCCESS;
6328 }
6329 
6330 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
6331 				     struct dev_db_entry *fw_ddb_entry,
6332 				     uint32_t *index)
6333 {
6334 	struct ddb_entry *ddb_entry;
6335 	struct ql4_tuple_ddb *fw_tddb = NULL;
6336 	struct ql4_tuple_ddb *tmp_tddb = NULL;
6337 	int idx;
6338 	int ret = QLA_ERROR;
6339 
6340 	fw_tddb = vzalloc(sizeof(*fw_tddb));
6341 	if (!fw_tddb) {
6342 		DEBUG2(ql4_printk(KERN_WARNING, ha,
6343 				  "Memory Allocation failed.\n"));
6344 		ret = QLA_SUCCESS;
6345 		goto exit_check;
6346 	}
6347 
6348 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6349 	if (!tmp_tddb) {
6350 		DEBUG2(ql4_printk(KERN_WARNING, ha,
6351 				  "Memory Allocation failed.\n"));
6352 		ret = QLA_SUCCESS;
6353 		goto exit_check;
6354 	}
6355 
6356 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
6357 
6358 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6359 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6360 		if (ddb_entry == NULL)
6361 			continue;
6362 
6363 		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
6364 		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
6365 			ret = QLA_SUCCESS; /* found */
6366 			if (index != NULL)
6367 				*index = idx;
6368 			goto exit_check;
6369 		}
6370 	}
6371 
6372 exit_check:
6373 	if (fw_tddb)
6374 		vfree(fw_tddb);
6375 	if (tmp_tddb)
6376 		vfree(tmp_tddb);
6377 	return ret;
6378 }
6379 
6380 /**
6381  * qla4xxx_check_existing_isid - check if target with same isid exist
6382  *				 in target list
6383  * @list_nt: list of target
6384  * @isid: isid to check
6385  *
6386  * This routine return QLA_SUCCESS if target with same isid exist
6387  **/
6388 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
6389 {
6390 	struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
6391 	struct dev_db_entry *fw_ddb_entry;
6392 
6393 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6394 		fw_ddb_entry = &nt_ddb_idx->fw_ddb;
6395 
6396 		if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
6397 			   sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
6398 			return QLA_SUCCESS;
6399 		}
6400 	}
6401 	return QLA_ERROR;
6402 }
6403 
6404 /**
6405  * qla4xxx_update_isid - compare ddbs and updated isid
6406  * @ha: Pointer to host adapter structure.
6407  * @list_nt: list of nt target
6408  * @fw_ddb_entry: firmware ddb entry
6409  *
6410  * This routine update isid if ddbs have same iqn, same isid and
6411  * different IP addr.
6412  * Return QLA_SUCCESS if isid is updated.
6413  **/
6414 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
6415 			       struct list_head *list_nt,
6416 			       struct dev_db_entry *fw_ddb_entry)
6417 {
6418 	uint8_t base_value, i;
6419 
6420 	base_value = fw_ddb_entry->isid[1] & 0x1f;
6421 	for (i = 0; i < 8; i++) {
6422 		fw_ddb_entry->isid[1] = (base_value | (i << 5));
6423 		if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
6424 			break;
6425 	}
6426 
6427 	if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
6428 		return QLA_ERROR;
6429 
6430 	return QLA_SUCCESS;
6431 }
6432 
6433 /**
6434  * qla4xxx_should_update_isid - check if isid need to update
6435  * @ha: Pointer to host adapter structure.
6436  * @old_tddb: ddb tuple
6437  * @new_tddb: ddb tuple
6438  *
6439  * Return QLA_SUCCESS if different IP, different PORT, same iqn,
6440  * same isid
6441  **/
6442 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
6443 				      struct ql4_tuple_ddb *old_tddb,
6444 				      struct ql4_tuple_ddb *new_tddb)
6445 {
6446 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
6447 		/* Same ip */
6448 		if (old_tddb->port == new_tddb->port)
6449 			return QLA_ERROR;
6450 	}
6451 
6452 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
6453 		/* different iqn */
6454 		return QLA_ERROR;
6455 
6456 	if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
6457 		   sizeof(old_tddb->isid)))
6458 		/* different isid */
6459 		return QLA_ERROR;
6460 
6461 	return QLA_SUCCESS;
6462 }
6463 
6464 /**
6465  * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
6466  * @ha: Pointer to host adapter structure.
6467  * @list_nt: list of nt target.
6468  * @fw_ddb_entry: firmware ddb entry.
6469  *
6470  * This routine check if fw_ddb_entry already exists in list_nt to avoid
6471  * duplicate ddb in list_nt.
6472  * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
6473  * Note: This function also update isid of DDB if required.
6474  **/
6475 
6476 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
6477 				       struct list_head *list_nt,
6478 				       struct dev_db_entry *fw_ddb_entry)
6479 {
6480 	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
6481 	struct ql4_tuple_ddb *fw_tddb = NULL;
6482 	struct ql4_tuple_ddb *tmp_tddb = NULL;
6483 	int rval, ret = QLA_ERROR;
6484 
6485 	fw_tddb = vzalloc(sizeof(*fw_tddb));
6486 	if (!fw_tddb) {
6487 		DEBUG2(ql4_printk(KERN_WARNING, ha,
6488 				  "Memory Allocation failed.\n"));
6489 		ret = QLA_SUCCESS;
6490 		goto exit_check;
6491 	}
6492 
6493 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6494 	if (!tmp_tddb) {
6495 		DEBUG2(ql4_printk(KERN_WARNING, ha,
6496 				  "Memory Allocation failed.\n"));
6497 		ret = QLA_SUCCESS;
6498 		goto exit_check;
6499 	}
6500 
6501 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
6502 
6503 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6504 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
6505 					  nt_ddb_idx->flash_isid);
6506 		ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
6507 		/* found duplicate ddb */
6508 		if (ret == QLA_SUCCESS)
6509 			goto exit_check;
6510 	}
6511 
6512 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6513 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
6514 
6515 		ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
6516 		if (ret == QLA_SUCCESS) {
6517 			rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
6518 			if (rval == QLA_SUCCESS)
6519 				ret = QLA_ERROR;
6520 			else
6521 				ret = QLA_SUCCESS;
6522 
6523 			goto exit_check;
6524 		}
6525 	}
6526 
6527 exit_check:
6528 	if (fw_tddb)
6529 		vfree(fw_tddb);
6530 	if (tmp_tddb)
6531 		vfree(tmp_tddb);
6532 	return ret;
6533 }
6534 
6535 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
6536 {
6537 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
6538 
6539 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6540 		list_del_init(&ddb_idx->list);
6541 		vfree(ddb_idx);
6542 	}
6543 }
6544 
6545 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
6546 					struct dev_db_entry *fw_ddb_entry)
6547 {
6548 	struct iscsi_endpoint *ep;
6549 	struct sockaddr_in *addr;
6550 	struct sockaddr_in6 *addr6;
6551 	struct sockaddr *t_addr;
6552 	struct sockaddr_storage *dst_addr;
6553 	char *ip;
6554 
6555 	/* TODO: need to destroy on unload iscsi_endpoint*/
6556 	dst_addr = vmalloc(sizeof(*dst_addr));
6557 	if (!dst_addr)
6558 		return NULL;
6559 
6560 	if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
6561 		t_addr = (struct sockaddr *)dst_addr;
6562 		t_addr->sa_family = AF_INET6;
6563 		addr6 = (struct sockaddr_in6 *)dst_addr;
6564 		ip = (char *)&addr6->sin6_addr;
6565 		memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
6566 		addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
6567 
6568 	} else {
6569 		t_addr = (struct sockaddr *)dst_addr;
6570 		t_addr->sa_family = AF_INET;
6571 		addr = (struct sockaddr_in *)dst_addr;
6572 		ip = (char *)&addr->sin_addr;
6573 		memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
6574 		addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
6575 	}
6576 
6577 	ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
6578 	vfree(dst_addr);
6579 	return ep;
6580 }
6581 
6582 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
6583 {
6584 	if (ql4xdisablesysfsboot)
6585 		return QLA_SUCCESS;
6586 	if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
6587 		return QLA_ERROR;
6588 	return QLA_SUCCESS;
6589 }
6590 
6591 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
6592 					  struct ddb_entry *ddb_entry,
6593 					  uint16_t idx)
6594 {
6595 	uint16_t def_timeout;
6596 
6597 	ddb_entry->ddb_type = FLASH_DDB;
6598 	ddb_entry->fw_ddb_index = INVALID_ENTRY;
6599 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
6600 	ddb_entry->ha = ha;
6601 	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
6602 	ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
6603 	ddb_entry->chap_tbl_idx = INVALID_ENTRY;
6604 
6605 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6606 	atomic_set(&ddb_entry->relogin_timer, 0);
6607 	atomic_set(&ddb_entry->relogin_retry_count, 0);
6608 	def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
6609 	ddb_entry->default_relogin_timeout =
6610 		(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
6611 		def_timeout : LOGIN_TOV;
6612 	ddb_entry->default_time2wait =
6613 		le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
6614 
6615 	if (ql4xdisablesysfsboot &&
6616 	    (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
6617 		set_bit(DF_BOOT_TGT, &ddb_entry->flags);
6618 }
6619 
6620 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
6621 {
6622 	uint32_t idx = 0;
6623 	uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
6624 	uint32_t sts[MBOX_REG_COUNT];
6625 	uint32_t ip_state;
6626 	unsigned long wtime;
6627 	int ret;
6628 
6629 	wtime = jiffies + (HZ * IP_CONFIG_TOV);
6630 	do {
6631 		for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
6632 			if (ip_idx[idx] == -1)
6633 				continue;
6634 
6635 			ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
6636 
6637 			if (ret == QLA_ERROR) {
6638 				ip_idx[idx] = -1;
6639 				continue;
6640 			}
6641 
6642 			ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
6643 
6644 			DEBUG2(ql4_printk(KERN_INFO, ha,
6645 					  "Waiting for IP state for idx = %d, state = 0x%x\n",
6646 					  ip_idx[idx], ip_state));
6647 			if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
6648 			    ip_state == IP_ADDRSTATE_INVALID ||
6649 			    ip_state == IP_ADDRSTATE_PREFERRED ||
6650 			    ip_state == IP_ADDRSTATE_DEPRICATED ||
6651 			    ip_state == IP_ADDRSTATE_DISABLING)
6652 				ip_idx[idx] = -1;
6653 		}
6654 
6655 		/* Break if all IP states checked */
6656 		if ((ip_idx[0] == -1) &&
6657 		    (ip_idx[1] == -1) &&
6658 		    (ip_idx[2] == -1) &&
6659 		    (ip_idx[3] == -1))
6660 			break;
6661 		schedule_timeout_uninterruptible(HZ);
6662 	} while (time_after(wtime, jiffies));
6663 }
6664 
6665 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
6666 				  struct dev_db_entry *flash_ddb_entry)
6667 {
6668 	uint16_t options = 0;
6669 	size_t ip_len = IP_ADDR_LEN;
6670 
6671 	options = le16_to_cpu(fw_ddb_entry->options);
6672 	if (options & DDB_OPT_IPV6_DEVICE)
6673 		ip_len = IPv6_ADDR_LEN;
6674 
6675 	if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
6676 		return QLA_ERROR;
6677 
6678 	if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
6679 		   sizeof(fw_ddb_entry->isid)))
6680 		return QLA_ERROR;
6681 
6682 	if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
6683 		   sizeof(fw_ddb_entry->port)))
6684 		return QLA_ERROR;
6685 
6686 	return QLA_SUCCESS;
6687 }
6688 
6689 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
6690 				     struct dev_db_entry *fw_ddb_entry,
6691 				     uint32_t fw_idx, uint32_t *flash_index)
6692 {
6693 	struct dev_db_entry *flash_ddb_entry;
6694 	dma_addr_t flash_ddb_entry_dma;
6695 	uint32_t idx = 0;
6696 	int max_ddbs;
6697 	int ret = QLA_ERROR, status;
6698 
6699 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6700 				     MAX_DEV_DB_ENTRIES;
6701 
6702 	flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6703 					 &flash_ddb_entry_dma);
6704 	if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
6705 		ql4_printk(KERN_ERR, ha, "Out of memory\n");
6706 		goto exit_find_st_idx;
6707 	}
6708 
6709 	status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
6710 					  flash_ddb_entry_dma, fw_idx);
6711 	if (status == QLA_SUCCESS) {
6712 		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
6713 		if (status == QLA_SUCCESS) {
6714 			*flash_index = fw_idx;
6715 			ret = QLA_SUCCESS;
6716 			goto exit_find_st_idx;
6717 		}
6718 	}
6719 
6720 	for (idx = 0; idx < max_ddbs; idx++) {
6721 		status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
6722 						  flash_ddb_entry_dma, idx);
6723 		if (status == QLA_ERROR)
6724 			continue;
6725 
6726 		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
6727 		if (status == QLA_SUCCESS) {
6728 			*flash_index = idx;
6729 			ret = QLA_SUCCESS;
6730 			goto exit_find_st_idx;
6731 		}
6732 	}
6733 
6734 	if (idx == max_ddbs)
6735 		ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
6736 			   fw_idx);
6737 
6738 exit_find_st_idx:
6739 	if (flash_ddb_entry)
6740 		dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
6741 			      flash_ddb_entry_dma);
6742 
6743 	return ret;
6744 }
6745 
6746 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
6747 				  struct list_head *list_st)
6748 {
6749 	struct qla_ddb_index  *st_ddb_idx;
6750 	int max_ddbs;
6751 	int fw_idx_size;
6752 	struct dev_db_entry *fw_ddb_entry;
6753 	dma_addr_t fw_ddb_dma;
6754 	int ret;
6755 	uint32_t idx = 0, next_idx = 0;
6756 	uint32_t state = 0, conn_err = 0;
6757 	uint32_t flash_index = -1;
6758 	uint16_t conn_id = 0;
6759 
6760 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6761 				      &fw_ddb_dma);
6762 	if (fw_ddb_entry == NULL) {
6763 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
6764 		goto exit_st_list;
6765 	}
6766 
6767 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6768 				     MAX_DEV_DB_ENTRIES;
6769 	fw_idx_size = sizeof(struct qla_ddb_index);
6770 
6771 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
6772 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
6773 					      NULL, &next_idx, &state,
6774 					      &conn_err, NULL, &conn_id);
6775 		if (ret == QLA_ERROR)
6776 			break;
6777 
6778 		/* Ignore DDB if invalid state (unassigned) */
6779 		if (state == DDB_DS_UNASSIGNED)
6780 			goto continue_next_st;
6781 
6782 		/* Check if ST, add to the list_st */
6783 		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
6784 			goto continue_next_st;
6785 
6786 		st_ddb_idx = vzalloc(fw_idx_size);
6787 		if (!st_ddb_idx)
6788 			break;
6789 
6790 		ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
6791 						&flash_index);
6792 		if (ret == QLA_ERROR) {
6793 			ql4_printk(KERN_ERR, ha,
6794 				   "No flash entry for ST at idx [%d]\n", idx);
6795 			st_ddb_idx->flash_ddb_idx = idx;
6796 		} else {
6797 			ql4_printk(KERN_INFO, ha,
6798 				   "ST at idx [%d] is stored at flash [%d]\n",
6799 				   idx, flash_index);
6800 			st_ddb_idx->flash_ddb_idx = flash_index;
6801 		}
6802 
6803 		st_ddb_idx->fw_ddb_idx = idx;
6804 
6805 		list_add_tail(&st_ddb_idx->list, list_st);
6806 continue_next_st:
6807 		if (next_idx == 0)
6808 			break;
6809 	}
6810 
6811 exit_st_list:
6812 	if (fw_ddb_entry)
6813 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6814 }
6815 
6816 /**
6817  * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
6818  * @ha: pointer to adapter structure
6819  * @list_ddb: List from which failed ddb to be removed
6820  *
6821  * Iterate over the list of DDBs and find and remove DDBs that are either in
6822  * no connection active state or failed state
6823  **/
6824 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
6825 				      struct list_head *list_ddb)
6826 {
6827 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
6828 	uint32_t next_idx = 0;
6829 	uint32_t state = 0, conn_err = 0;
6830 	int ret;
6831 
6832 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6833 		ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
6834 					      NULL, 0, NULL, &next_idx, &state,
6835 					      &conn_err, NULL, NULL);
6836 		if (ret == QLA_ERROR)
6837 			continue;
6838 
6839 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
6840 		    state == DDB_DS_SESSION_FAILED) {
6841 			list_del_init(&ddb_idx->list);
6842 			vfree(ddb_idx);
6843 		}
6844 	}
6845 }
6846 
6847 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
6848 					 struct ddb_entry *ddb_entry,
6849 					 struct dev_db_entry *fw_ddb_entry)
6850 {
6851 	struct iscsi_cls_session *cls_sess;
6852 	struct iscsi_session *sess;
6853 	uint32_t max_ddbs = 0;
6854 	uint16_t ddb_link = -1;
6855 
6856 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6857 				     MAX_DEV_DB_ENTRIES;
6858 
6859 	cls_sess = ddb_entry->sess;
6860 	sess = cls_sess->dd_data;
6861 
6862 	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
6863 	if (ddb_link < max_ddbs)
6864 		sess->discovery_parent_idx = ddb_link;
6865 	else
6866 		sess->discovery_parent_idx = DDB_NO_LINK;
6867 }
6868 
6869 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
6870 				   struct dev_db_entry *fw_ddb_entry,
6871 				   int is_reset, uint16_t idx)
6872 {
6873 	struct iscsi_cls_session *cls_sess;
6874 	struct iscsi_session *sess;
6875 	struct iscsi_cls_conn *cls_conn;
6876 	struct iscsi_endpoint *ep;
6877 	uint16_t cmds_max = 32;
6878 	uint16_t conn_id = 0;
6879 	uint32_t initial_cmdsn = 0;
6880 	int ret = QLA_SUCCESS;
6881 
6882 	struct ddb_entry *ddb_entry = NULL;
6883 
6884 	/* Create session object, with INVALID_ENTRY,
6885 	 * the targer_id would get set when we issue the login
6886 	 */
6887 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
6888 				       cmds_max, sizeof(struct ddb_entry),
6889 				       sizeof(struct ql4_task_data),
6890 				       initial_cmdsn, INVALID_ENTRY);
6891 	if (!cls_sess) {
6892 		ret = QLA_ERROR;
6893 		goto exit_setup;
6894 	}
6895 
6896 	/*
6897 	 * so calling module_put function to decrement the
6898 	 * reference count.
6899 	 **/
6900 	module_put(qla4xxx_iscsi_transport.owner);
6901 	sess = cls_sess->dd_data;
6902 	ddb_entry = sess->dd_data;
6903 	ddb_entry->sess = cls_sess;
6904 
6905 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
6906 	memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
6907 	       sizeof(struct dev_db_entry));
6908 
6909 	qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
6910 
6911 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
6912 
6913 	if (!cls_conn) {
6914 		ret = QLA_ERROR;
6915 		goto exit_setup;
6916 	}
6917 
6918 	ddb_entry->conn = cls_conn;
6919 
6920 	/* Setup ep, for displaying attributes in sysfs */
6921 	ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
6922 	if (ep) {
6923 		ep->conn = cls_conn;
6924 		cls_conn->ep = ep;
6925 	} else {
6926 		DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
6927 		ret = QLA_ERROR;
6928 		goto exit_setup;
6929 	}
6930 
6931 	/* Update sess/conn params */
6932 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
6933 	qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
6934 
6935 	if (is_reset == RESET_ADAPTER) {
6936 		iscsi_block_session(cls_sess);
6937 		/* Use the relogin path to discover new devices
6938 		 *  by short-circuting the logic of setting
6939 		 *  timer to relogin - instead set the flags
6940 		 *  to initiate login right away.
6941 		 */
6942 		set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
6943 		set_bit(DF_RELOGIN, &ddb_entry->flags);
6944 	}
6945 
6946 exit_setup:
6947 	return ret;
6948 }
6949 
6950 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
6951 				       struct list_head *list_ddb,
6952 				       struct dev_db_entry *fw_ddb_entry)
6953 {
6954 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
6955 	uint16_t ddb_link;
6956 
6957 	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
6958 
6959 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6960 		if (ddb_idx->fw_ddb_idx == ddb_link) {
6961 			DEBUG2(ql4_printk(KERN_INFO, ha,
6962 					  "Updating NT parent idx from [%d] to [%d]\n",
6963 					  ddb_link, ddb_idx->flash_ddb_idx));
6964 			fw_ddb_entry->ddb_link =
6965 					    cpu_to_le16(ddb_idx->flash_ddb_idx);
6966 			return;
6967 		}
6968 	}
6969 }
6970 
6971 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
6972 				  struct list_head *list_nt,
6973 				  struct list_head *list_st,
6974 				  int is_reset)
6975 {
6976 	struct dev_db_entry *fw_ddb_entry;
6977 	struct ddb_entry *ddb_entry = NULL;
6978 	dma_addr_t fw_ddb_dma;
6979 	int max_ddbs;
6980 	int fw_idx_size;
6981 	int ret;
6982 	uint32_t idx = 0, next_idx = 0;
6983 	uint32_t state = 0, conn_err = 0;
6984 	uint32_t ddb_idx = -1;
6985 	uint16_t conn_id = 0;
6986 	uint16_t ddb_link = -1;
6987 	struct qla_ddb_index  *nt_ddb_idx;
6988 
6989 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6990 				      &fw_ddb_dma);
6991 	if (fw_ddb_entry == NULL) {
6992 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
6993 		goto exit_nt_list;
6994 	}
6995 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6996 				     MAX_DEV_DB_ENTRIES;
6997 	fw_idx_size = sizeof(struct qla_ddb_index);
6998 
6999 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
7000 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
7001 					      NULL, &next_idx, &state,
7002 					      &conn_err, NULL, &conn_id);
7003 		if (ret == QLA_ERROR)
7004 			break;
7005 
7006 		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
7007 			goto continue_next_nt;
7008 
7009 		/* Check if NT, then add to list it */
7010 		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
7011 			goto continue_next_nt;
7012 
7013 		ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
7014 		if (ddb_link < max_ddbs)
7015 			qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
7016 
7017 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
7018 		    state == DDB_DS_SESSION_FAILED) &&
7019 		    (is_reset == INIT_ADAPTER))
7020 			goto continue_next_nt;
7021 
7022 		DEBUG2(ql4_printk(KERN_INFO, ha,
7023 				  "Adding  DDB to session = 0x%x\n", idx));
7024 
7025 		if (is_reset == INIT_ADAPTER) {
7026 			nt_ddb_idx = vmalloc(fw_idx_size);
7027 			if (!nt_ddb_idx)
7028 				break;
7029 
7030 			nt_ddb_idx->fw_ddb_idx = idx;
7031 
7032 			/* Copy original isid as it may get updated in function
7033 			 * qla4xxx_update_isid(). We need original isid in
7034 			 * function qla4xxx_compare_tuple_ddb to find duplicate
7035 			 * target */
7036 			memcpy(&nt_ddb_idx->flash_isid[0],
7037 			       &fw_ddb_entry->isid[0],
7038 			       sizeof(nt_ddb_idx->flash_isid));
7039 
7040 			ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
7041 							  fw_ddb_entry);
7042 			if (ret == QLA_SUCCESS) {
7043 				/* free nt_ddb_idx and do not add to list_nt */
7044 				vfree(nt_ddb_idx);
7045 				goto continue_next_nt;
7046 			}
7047 
7048 			/* Copy updated isid */
7049 			memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
7050 			       sizeof(struct dev_db_entry));
7051 
7052 			list_add_tail(&nt_ddb_idx->list, list_nt);
7053 		} else if (is_reset == RESET_ADAPTER) {
7054 			ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
7055 							&ddb_idx);
7056 			if (ret == QLA_SUCCESS) {
7057 				ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
7058 								       ddb_idx);
7059 				if (ddb_entry != NULL)
7060 					qla4xxx_update_sess_disc_idx(ha,
7061 								     ddb_entry,
7062 								  fw_ddb_entry);
7063 				goto continue_next_nt;
7064 			}
7065 		}
7066 
7067 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
7068 		if (ret == QLA_ERROR)
7069 			goto exit_nt_list;
7070 
7071 continue_next_nt:
7072 		if (next_idx == 0)
7073 			break;
7074 	}
7075 
7076 exit_nt_list:
7077 	if (fw_ddb_entry)
7078 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7079 }
7080 
7081 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
7082 				      struct list_head *list_nt,
7083 				      uint16_t target_id)
7084 {
7085 	struct dev_db_entry *fw_ddb_entry;
7086 	dma_addr_t fw_ddb_dma;
7087 	int max_ddbs;
7088 	int fw_idx_size;
7089 	int ret;
7090 	uint32_t idx = 0, next_idx = 0;
7091 	uint32_t state = 0, conn_err = 0;
7092 	uint16_t conn_id = 0;
7093 	struct qla_ddb_index  *nt_ddb_idx;
7094 
7095 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
7096 				      &fw_ddb_dma);
7097 	if (fw_ddb_entry == NULL) {
7098 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
7099 		goto exit_new_nt_list;
7100 	}
7101 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7102 				     MAX_DEV_DB_ENTRIES;
7103 	fw_idx_size = sizeof(struct qla_ddb_index);
7104 
7105 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
7106 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
7107 					      NULL, &next_idx, &state,
7108 					      &conn_err, NULL, &conn_id);
7109 		if (ret == QLA_ERROR)
7110 			break;
7111 
7112 		/* Check if NT, then add it to list */
7113 		if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
7114 			goto continue_next_new_nt;
7115 
7116 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
7117 			goto continue_next_new_nt;
7118 
7119 		DEBUG2(ql4_printk(KERN_INFO, ha,
7120 				  "Adding  DDB to session = 0x%x\n", idx));
7121 
7122 		nt_ddb_idx = vmalloc(fw_idx_size);
7123 		if (!nt_ddb_idx)
7124 			break;
7125 
7126 		nt_ddb_idx->fw_ddb_idx = idx;
7127 
7128 		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
7129 		if (ret == QLA_SUCCESS) {
7130 			/* free nt_ddb_idx and do not add to list_nt */
7131 			vfree(nt_ddb_idx);
7132 			goto continue_next_new_nt;
7133 		}
7134 
7135 		if (target_id < max_ddbs)
7136 			fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
7137 
7138 		list_add_tail(&nt_ddb_idx->list, list_nt);
7139 
7140 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
7141 					      idx);
7142 		if (ret == QLA_ERROR)
7143 			goto exit_new_nt_list;
7144 
7145 continue_next_new_nt:
7146 		if (next_idx == 0)
7147 			break;
7148 	}
7149 
7150 exit_new_nt_list:
7151 	if (fw_ddb_entry)
7152 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7153 }
7154 
7155 /**
7156  * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
7157  * @dev: dev associated with the sysfs entry
7158  * @data: pointer to flashnode session object
7159  *
7160  * Returns:
7161  *	1: if flashnode entry is non-persistent
7162  *	0: if flashnode entry is persistent
7163  **/
7164 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
7165 {
7166 	struct iscsi_bus_flash_session *fnode_sess;
7167 
7168 	if (!iscsi_flashnode_bus_match(dev, NULL))
7169 		return 0;
7170 
7171 	fnode_sess = iscsi_dev_to_flash_session(dev);
7172 
7173 	return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
7174 }
7175 
7176 /**
7177  * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
7178  * @ha: pointer to host
7179  * @fw_ddb_entry: flash ddb data
7180  * @idx: target index
7181  * @user: if set then this call is made from userland else from kernel
7182  *
7183  * Returns:
7184  * On sucess: QLA_SUCCESS
7185  * On failure: QLA_ERROR
7186  *
7187  * This create separate sysfs entries for session and connection attributes of
7188  * the given fw ddb entry.
7189  * If this is invoked as a result of a userspace call then the entry is marked
7190  * as nonpersistent using flash_state field.
7191  **/
7192 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
7193 					struct dev_db_entry *fw_ddb_entry,
7194 					uint16_t *idx, int user)
7195 {
7196 	struct iscsi_bus_flash_session *fnode_sess = NULL;
7197 	struct iscsi_bus_flash_conn *fnode_conn = NULL;
7198 	int rc = QLA_ERROR;
7199 
7200 	fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
7201 						 &qla4xxx_iscsi_transport, 0);
7202 	if (!fnode_sess) {
7203 		ql4_printk(KERN_ERR, ha,
7204 			   "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
7205 			   __func__, *idx, ha->host_no);
7206 		goto exit_tgt_create;
7207 	}
7208 
7209 	fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
7210 						 &qla4xxx_iscsi_transport, 0);
7211 	if (!fnode_conn) {
7212 		ql4_printk(KERN_ERR, ha,
7213 			   "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
7214 			   __func__, *idx, ha->host_no);
7215 		goto free_sess;
7216 	}
7217 
7218 	if (user) {
7219 		fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
7220 	} else {
7221 		fnode_sess->flash_state = DEV_DB_PERSISTENT;
7222 
7223 		if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
7224 			fnode_sess->is_boot_target = 1;
7225 		else
7226 			fnode_sess->is_boot_target = 0;
7227 	}
7228 
7229 	rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
7230 					   fw_ddb_entry);
7231 
7232 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7233 		   __func__, fnode_sess->dev.kobj.name);
7234 
7235 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7236 		   __func__, fnode_conn->dev.kobj.name);
7237 
7238 	return QLA_SUCCESS;
7239 
7240 free_sess:
7241 	iscsi_destroy_flashnode_sess(fnode_sess);
7242 
7243 exit_tgt_create:
7244 	return QLA_ERROR;
7245 }
7246 
7247 /**
7248  * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
7249  * @shost: pointer to host
7250  * @buf: type of ddb entry (ipv4/ipv6)
7251  * @len: length of buf
7252  *
7253  * This creates new ddb entry in the flash by finding first free index and
7254  * storing default ddb there. And then create sysfs entry for the new ddb entry.
7255  **/
7256 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
7257 				 int len)
7258 {
7259 	struct scsi_qla_host *ha = to_qla_host(shost);
7260 	struct dev_db_entry *fw_ddb_entry = NULL;
7261 	dma_addr_t fw_ddb_entry_dma;
7262 	struct device *dev;
7263 	uint16_t idx = 0;
7264 	uint16_t max_ddbs = 0;
7265 	uint32_t options = 0;
7266 	uint32_t rval = QLA_ERROR;
7267 
7268 	if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
7269 	    strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
7270 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
7271 				  __func__));
7272 		goto exit_ddb_add;
7273 	}
7274 
7275 	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
7276 				     MAX_DEV_DB_ENTRIES;
7277 
7278 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7279 					  &fw_ddb_entry_dma, GFP_KERNEL);
7280 	if (!fw_ddb_entry) {
7281 		DEBUG2(ql4_printk(KERN_ERR, ha,
7282 				  "%s: Unable to allocate dma buffer\n",
7283 				  __func__));
7284 		goto exit_ddb_add;
7285 	}
7286 
7287 	dev = iscsi_find_flashnode_sess(ha->host, NULL,
7288 					qla4xxx_sysfs_ddb_is_non_persistent);
7289 	if (dev) {
7290 		ql4_printk(KERN_ERR, ha,
7291 			   "%s: A non-persistent entry %s found\n",
7292 			   __func__, dev->kobj.name);
7293 		put_device(dev);
7294 		goto exit_ddb_add;
7295 	}
7296 
7297 	/* Index 0 and 1 are reserved for boot target entries */
7298 	for (idx = 2; idx < max_ddbs; idx++) {
7299 		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
7300 					     fw_ddb_entry_dma, idx))
7301 			break;
7302 	}
7303 
7304 	if (idx == max_ddbs)
7305 		goto exit_ddb_add;
7306 
7307 	if (!strncasecmp("ipv6", buf, 4))
7308 		options |= IPV6_DEFAULT_DDB_ENTRY;
7309 
7310 	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7311 	if (rval == QLA_ERROR)
7312 		goto exit_ddb_add;
7313 
7314 	rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
7315 
7316 exit_ddb_add:
7317 	if (fw_ddb_entry)
7318 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7319 				  fw_ddb_entry, fw_ddb_entry_dma);
7320 	if (rval == QLA_SUCCESS)
7321 		return idx;
7322 	else
7323 		return -EIO;
7324 }
7325 
7326 /**
7327  * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
7328  * @fnode_sess: pointer to session attrs of flash ddb entry
7329  * @fnode_conn: pointer to connection attrs of flash ddb entry
7330  *
7331  * This writes the contents of target ddb buffer to Flash with a valid cookie
7332  * value in order to make the ddb entry persistent.
7333  **/
7334 static int  qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
7335 				    struct iscsi_bus_flash_conn *fnode_conn)
7336 {
7337 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7338 	struct scsi_qla_host *ha = to_qla_host(shost);
7339 	uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
7340 	struct dev_db_entry *fw_ddb_entry = NULL;
7341 	dma_addr_t fw_ddb_entry_dma;
7342 	uint32_t options = 0;
7343 	int rval = 0;
7344 
7345 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7346 					  &fw_ddb_entry_dma, GFP_KERNEL);
7347 	if (!fw_ddb_entry) {
7348 		DEBUG2(ql4_printk(KERN_ERR, ha,
7349 				  "%s: Unable to allocate dma buffer\n",
7350 				  __func__));
7351 		rval = -ENOMEM;
7352 		goto exit_ddb_apply;
7353 	}
7354 
7355 	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7356 		options |= IPV6_DEFAULT_DDB_ENTRY;
7357 
7358 	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7359 	if (rval == QLA_ERROR)
7360 		goto exit_ddb_apply;
7361 
7362 	dev_db_start_offset += (fnode_sess->target_id *
7363 				sizeof(*fw_ddb_entry));
7364 
7365 	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
7366 	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
7367 
7368 	rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
7369 				 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
7370 
7371 	if (rval == QLA_SUCCESS) {
7372 		fnode_sess->flash_state = DEV_DB_PERSISTENT;
7373 		ql4_printk(KERN_INFO, ha,
7374 			   "%s: flash node %u of host %lu written to flash\n",
7375 			   __func__, fnode_sess->target_id, ha->host_no);
7376 	} else {
7377 		rval = -EIO;
7378 		ql4_printk(KERN_ERR, ha,
7379 			   "%s: Error while writing flash node %u of host %lu to flash\n",
7380 			   __func__, fnode_sess->target_id, ha->host_no);
7381 	}
7382 
7383 exit_ddb_apply:
7384 	if (fw_ddb_entry)
7385 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7386 				  fw_ddb_entry, fw_ddb_entry_dma);
7387 	return rval;
7388 }
7389 
7390 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
7391 					   struct dev_db_entry *fw_ddb_entry,
7392 					   uint16_t idx)
7393 {
7394 	struct dev_db_entry *ddb_entry = NULL;
7395 	dma_addr_t ddb_entry_dma;
7396 	unsigned long wtime;
7397 	uint32_t mbx_sts = 0;
7398 	uint32_t state = 0, conn_err = 0;
7399 	uint16_t tmo = 0;
7400 	int ret = 0;
7401 
7402 	ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
7403 				       &ddb_entry_dma, GFP_KERNEL);
7404 	if (!ddb_entry) {
7405 		DEBUG2(ql4_printk(KERN_ERR, ha,
7406 				  "%s: Unable to allocate dma buffer\n",
7407 				  __func__));
7408 		return QLA_ERROR;
7409 	}
7410 
7411 	memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
7412 
7413 	ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
7414 	if (ret != QLA_SUCCESS) {
7415 		DEBUG2(ql4_printk(KERN_ERR, ha,
7416 				  "%s: Unable to set ddb entry for index %d\n",
7417 				  __func__, idx));
7418 		goto exit_ddb_conn_open;
7419 	}
7420 
7421 	qla4xxx_conn_open(ha, idx);
7422 
7423 	/* To ensure that sendtargets is done, wait for at least 12 secs */
7424 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
7425 	       (ha->def_timeout < LOGIN_TOV * 10) ?
7426 	       ha->def_timeout : LOGIN_TOV);
7427 
7428 	DEBUG2(ql4_printk(KERN_INFO, ha,
7429 			  "Default time to wait for login to ddb %d\n", tmo));
7430 
7431 	wtime = jiffies + (HZ * tmo);
7432 	do {
7433 		ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
7434 					      NULL, &state, &conn_err, NULL,
7435 					      NULL);
7436 		if (ret == QLA_ERROR)
7437 			continue;
7438 
7439 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
7440 		    state == DDB_DS_SESSION_FAILED)
7441 			break;
7442 
7443 		schedule_timeout_uninterruptible(HZ / 10);
7444 	} while (time_after(wtime, jiffies));
7445 
7446 exit_ddb_conn_open:
7447 	if (ddb_entry)
7448 		dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
7449 				  ddb_entry, ddb_entry_dma);
7450 	return ret;
7451 }
7452 
7453 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
7454 				struct dev_db_entry *fw_ddb_entry,
7455 				uint16_t target_id)
7456 {
7457 	struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
7458 	struct list_head list_nt;
7459 	uint16_t ddb_index;
7460 	int ret = 0;
7461 
7462 	if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
7463 		ql4_printk(KERN_WARNING, ha,
7464 			   "%s: A discovery already in progress!\n", __func__);
7465 		return QLA_ERROR;
7466 	}
7467 
7468 	INIT_LIST_HEAD(&list_nt);
7469 
7470 	set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
7471 
7472 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
7473 	if (ret == QLA_ERROR)
7474 		goto exit_login_st_clr_bit;
7475 
7476 	ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
7477 	if (ret == QLA_ERROR)
7478 		goto exit_login_st;
7479 
7480 	qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
7481 
7482 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
7483 		list_del_init(&ddb_idx->list);
7484 		qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
7485 		vfree(ddb_idx);
7486 	}
7487 
7488 exit_login_st:
7489 	if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
7490 		ql4_printk(KERN_ERR, ha,
7491 			   "Unable to clear DDB index = 0x%x\n", ddb_index);
7492 	}
7493 
7494 	clear_bit(ddb_index, ha->ddb_idx_map);
7495 
7496 exit_login_st_clr_bit:
7497 	clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
7498 	return ret;
7499 }
7500 
7501 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
7502 				struct dev_db_entry *fw_ddb_entry,
7503 				uint16_t idx)
7504 {
7505 	int ret = QLA_ERROR;
7506 
7507 	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
7508 	if (ret != QLA_SUCCESS)
7509 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
7510 					      idx);
7511 	else
7512 		ret = -EPERM;
7513 
7514 	return ret;
7515 }
7516 
7517 /**
7518  * qla4xxx_sysfs_ddb_login - Login to the specified target
7519  * @fnode_sess: pointer to session attrs of flash ddb entry
7520  * @fnode_conn: pointer to connection attrs of flash ddb entry
7521  *
7522  * This logs in to the specified target
7523  **/
7524 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
7525 				   struct iscsi_bus_flash_conn *fnode_conn)
7526 {
7527 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7528 	struct scsi_qla_host *ha = to_qla_host(shost);
7529 	struct dev_db_entry *fw_ddb_entry = NULL;
7530 	dma_addr_t fw_ddb_entry_dma;
7531 	uint32_t options = 0;
7532 	int ret = 0;
7533 
7534 	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
7535 		ql4_printk(KERN_ERR, ha,
7536 			   "%s: Target info is not persistent\n", __func__);
7537 		ret = -EIO;
7538 		goto exit_ddb_login;
7539 	}
7540 
7541 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7542 					  &fw_ddb_entry_dma, GFP_KERNEL);
7543 	if (!fw_ddb_entry) {
7544 		DEBUG2(ql4_printk(KERN_ERR, ha,
7545 				  "%s: Unable to allocate dma buffer\n",
7546 				  __func__));
7547 		ret = -ENOMEM;
7548 		goto exit_ddb_login;
7549 	}
7550 
7551 	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7552 		options |= IPV6_DEFAULT_DDB_ENTRY;
7553 
7554 	ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7555 	if (ret == QLA_ERROR)
7556 		goto exit_ddb_login;
7557 
7558 	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
7559 	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
7560 
7561 	if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
7562 		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
7563 					   fnode_sess->target_id);
7564 	else
7565 		ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
7566 					   fnode_sess->target_id);
7567 
7568 	if (ret > 0)
7569 		ret = -EIO;
7570 
7571 exit_ddb_login:
7572 	if (fw_ddb_entry)
7573 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7574 				  fw_ddb_entry, fw_ddb_entry_dma);
7575 	return ret;
7576 }
7577 
7578 /**
7579  * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
7580  * @cls_sess: pointer to session to be logged out
7581  *
7582  * This performs session log out from the specified target
7583  **/
7584 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
7585 {
7586 	struct iscsi_session *sess;
7587 	struct ddb_entry *ddb_entry = NULL;
7588 	struct scsi_qla_host *ha;
7589 	struct dev_db_entry *fw_ddb_entry = NULL;
7590 	dma_addr_t fw_ddb_entry_dma;
7591 	unsigned long flags;
7592 	unsigned long wtime;
7593 	uint32_t ddb_state;
7594 	int options;
7595 	int ret = 0;
7596 
7597 	sess = cls_sess->dd_data;
7598 	ddb_entry = sess->dd_data;
7599 	ha = ddb_entry->ha;
7600 
7601 	if (ddb_entry->ddb_type != FLASH_DDB) {
7602 		ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
7603 			   __func__);
7604 		ret = -ENXIO;
7605 		goto exit_ddb_logout;
7606 	}
7607 
7608 	if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
7609 		ql4_printk(KERN_ERR, ha,
7610 			   "%s: Logout from boot target entry is not permitted.\n",
7611 			   __func__);
7612 		ret = -EPERM;
7613 		goto exit_ddb_logout;
7614 	}
7615 
7616 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7617 					  &fw_ddb_entry_dma, GFP_KERNEL);
7618 	if (!fw_ddb_entry) {
7619 		ql4_printk(KERN_ERR, ha,
7620 			   "%s: Unable to allocate dma buffer\n", __func__);
7621 		ret = -ENOMEM;
7622 		goto exit_ddb_logout;
7623 	}
7624 
7625 	if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
7626 		goto ddb_logout_init;
7627 
7628 	ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
7629 				      fw_ddb_entry, fw_ddb_entry_dma,
7630 				      NULL, NULL, &ddb_state, NULL,
7631 				      NULL, NULL);
7632 	if (ret == QLA_ERROR)
7633 		goto ddb_logout_init;
7634 
7635 	if (ddb_state == DDB_DS_SESSION_ACTIVE)
7636 		goto ddb_logout_init;
7637 
7638 	/* wait until next relogin is triggered using DF_RELOGIN and
7639 	 * clear DF_RELOGIN to avoid invocation of further relogin
7640 	 */
7641 	wtime = jiffies + (HZ * RELOGIN_TOV);
7642 	do {
7643 		if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
7644 			goto ddb_logout_init;
7645 
7646 		schedule_timeout_uninterruptible(HZ);
7647 	} while ((time_after(wtime, jiffies)));
7648 
7649 ddb_logout_init:
7650 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
7651 	atomic_set(&ddb_entry->relogin_timer, 0);
7652 
7653 	options = LOGOUT_OPTION_CLOSE_SESSION;
7654 	qla4xxx_session_logout_ddb(ha, ddb_entry, options);
7655 
7656 	memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
7657 	wtime = jiffies + (HZ * LOGOUT_TOV);
7658 	do {
7659 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
7660 					      fw_ddb_entry, fw_ddb_entry_dma,
7661 					      NULL, NULL, &ddb_state, NULL,
7662 					      NULL, NULL);
7663 		if (ret == QLA_ERROR)
7664 			goto ddb_logout_clr_sess;
7665 
7666 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
7667 		    (ddb_state == DDB_DS_SESSION_FAILED))
7668 			goto ddb_logout_clr_sess;
7669 
7670 		schedule_timeout_uninterruptible(HZ);
7671 	} while ((time_after(wtime, jiffies)));
7672 
7673 ddb_logout_clr_sess:
7674 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7675 	/*
7676 	 * we have decremented the reference count of the driver
7677 	 * when we setup the session to have the driver unload
7678 	 * to be seamless without actually destroying the
7679 	 * session
7680 	 **/
7681 	try_module_get(qla4xxx_iscsi_transport.owner);
7682 	iscsi_destroy_endpoint(ddb_entry->conn->ep);
7683 
7684 	spin_lock_irqsave(&ha->hardware_lock, flags);
7685 	qla4xxx_free_ddb(ha, ddb_entry);
7686 	clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
7687 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
7688 
7689 	iscsi_session_teardown(ddb_entry->sess);
7690 
7691 	clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
7692 	ret = QLA_SUCCESS;
7693 
7694 exit_ddb_logout:
7695 	if (fw_ddb_entry)
7696 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7697 				  fw_ddb_entry, fw_ddb_entry_dma);
7698 	return ret;
7699 }
7700 
7701 /**
7702  * qla4xxx_sysfs_ddb_logout - Logout from the specified target
7703  * @fnode_sess: pointer to session attrs of flash ddb entry
7704  * @fnode_conn: pointer to connection attrs of flash ddb entry
7705  *
7706  * This performs log out from the specified target
7707  **/
7708 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
7709 				    struct iscsi_bus_flash_conn *fnode_conn)
7710 {
7711 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7712 	struct scsi_qla_host *ha = to_qla_host(shost);
7713 	struct ql4_tuple_ddb *flash_tddb = NULL;
7714 	struct ql4_tuple_ddb *tmp_tddb = NULL;
7715 	struct dev_db_entry *fw_ddb_entry = NULL;
7716 	struct ddb_entry *ddb_entry = NULL;
7717 	dma_addr_t fw_ddb_dma;
7718 	uint32_t next_idx = 0;
7719 	uint32_t state = 0, conn_err = 0;
7720 	uint16_t conn_id = 0;
7721 	int idx, index;
7722 	int status, ret = 0;
7723 
7724 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
7725 				      &fw_ddb_dma);
7726 	if (fw_ddb_entry == NULL) {
7727 		ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
7728 		ret = -ENOMEM;
7729 		goto exit_ddb_logout;
7730 	}
7731 
7732 	flash_tddb = vzalloc(sizeof(*flash_tddb));
7733 	if (!flash_tddb) {
7734 		ql4_printk(KERN_WARNING, ha,
7735 			   "%s:Memory Allocation failed.\n", __func__);
7736 		ret = -ENOMEM;
7737 		goto exit_ddb_logout;
7738 	}
7739 
7740 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
7741 	if (!tmp_tddb) {
7742 		ql4_printk(KERN_WARNING, ha,
7743 			   "%s:Memory Allocation failed.\n", __func__);
7744 		ret = -ENOMEM;
7745 		goto exit_ddb_logout;
7746 	}
7747 
7748 	if (!fnode_sess->targetname) {
7749 		ql4_printk(KERN_ERR, ha,
7750 			   "%s:Cannot logout from SendTarget entry\n",
7751 			   __func__);
7752 		ret = -EPERM;
7753 		goto exit_ddb_logout;
7754 	}
7755 
7756 	if (fnode_sess->is_boot_target) {
7757 		ql4_printk(KERN_ERR, ha,
7758 			   "%s: Logout from boot target entry is not permitted.\n",
7759 			   __func__);
7760 		ret = -EPERM;
7761 		goto exit_ddb_logout;
7762 	}
7763 
7764 	strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname,
7765 		ISCSI_NAME_SIZE);
7766 
7767 	if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7768 		sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
7769 	else
7770 		sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
7771 
7772 	flash_tddb->tpgt = fnode_sess->tpgt;
7773 	flash_tddb->port = fnode_conn->port;
7774 
7775 	COPY_ISID(flash_tddb->isid, fnode_sess->isid);
7776 
7777 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7778 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7779 		if (ddb_entry == NULL)
7780 			continue;
7781 
7782 		if (ddb_entry->ddb_type != FLASH_DDB)
7783 			continue;
7784 
7785 		index = ddb_entry->sess->target_id;
7786 		status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
7787 						 fw_ddb_dma, NULL, &next_idx,
7788 						 &state, &conn_err, NULL,
7789 						 &conn_id);
7790 		if (status == QLA_ERROR) {
7791 			ret = -ENOMEM;
7792 			break;
7793 		}
7794 
7795 		qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
7796 
7797 		status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
7798 						   true);
7799 		if (status == QLA_SUCCESS) {
7800 			ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
7801 			break;
7802 		}
7803 	}
7804 
7805 	if (idx == MAX_DDB_ENTRIES)
7806 		ret = -ESRCH;
7807 
7808 exit_ddb_logout:
7809 	if (flash_tddb)
7810 		vfree(flash_tddb);
7811 	if (tmp_tddb)
7812 		vfree(tmp_tddb);
7813 	if (fw_ddb_entry)
7814 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7815 
7816 	return ret;
7817 }
7818 
7819 static int
7820 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
7821 			    int param, char *buf)
7822 {
7823 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7824 	struct scsi_qla_host *ha = to_qla_host(shost);
7825 	struct iscsi_bus_flash_conn *fnode_conn;
7826 	struct ql4_chap_table chap_tbl;
7827 	struct device *dev;
7828 	int parent_type;
7829 	int rc = 0;
7830 
7831 	dev = iscsi_find_flashnode_conn(fnode_sess);
7832 	if (!dev)
7833 		return -EIO;
7834 
7835 	fnode_conn = iscsi_dev_to_flash_conn(dev);
7836 
7837 	switch (param) {
7838 	case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
7839 		rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
7840 		break;
7841 	case ISCSI_FLASHNODE_PORTAL_TYPE:
7842 		rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
7843 		break;
7844 	case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
7845 		rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
7846 		break;
7847 	case ISCSI_FLASHNODE_DISCOVERY_SESS:
7848 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
7849 		break;
7850 	case ISCSI_FLASHNODE_ENTRY_EN:
7851 		rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
7852 		break;
7853 	case ISCSI_FLASHNODE_HDR_DGST_EN:
7854 		rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
7855 		break;
7856 	case ISCSI_FLASHNODE_DATA_DGST_EN:
7857 		rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
7858 		break;
7859 	case ISCSI_FLASHNODE_IMM_DATA_EN:
7860 		rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
7861 		break;
7862 	case ISCSI_FLASHNODE_INITIAL_R2T_EN:
7863 		rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
7864 		break;
7865 	case ISCSI_FLASHNODE_DATASEQ_INORDER:
7866 		rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
7867 		break;
7868 	case ISCSI_FLASHNODE_PDU_INORDER:
7869 		rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
7870 		break;
7871 	case ISCSI_FLASHNODE_CHAP_AUTH_EN:
7872 		rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
7873 		break;
7874 	case ISCSI_FLASHNODE_SNACK_REQ_EN:
7875 		rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
7876 		break;
7877 	case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
7878 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
7879 		break;
7880 	case ISCSI_FLASHNODE_BIDI_CHAP_EN:
7881 		rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
7882 		break;
7883 	case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
7884 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
7885 		break;
7886 	case ISCSI_FLASHNODE_ERL:
7887 		rc = sprintf(buf, "%u\n", fnode_sess->erl);
7888 		break;
7889 	case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
7890 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
7891 		break;
7892 	case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
7893 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
7894 		break;
7895 	case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
7896 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
7897 		break;
7898 	case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
7899 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
7900 		break;
7901 	case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
7902 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
7903 		break;
7904 	case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
7905 		rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
7906 		break;
7907 	case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
7908 		rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
7909 		break;
7910 	case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
7911 		rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
7912 		break;
7913 	case ISCSI_FLASHNODE_FIRST_BURST:
7914 		rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
7915 		break;
7916 	case ISCSI_FLASHNODE_DEF_TIME2WAIT:
7917 		rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
7918 		break;
7919 	case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
7920 		rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
7921 		break;
7922 	case ISCSI_FLASHNODE_MAX_R2T:
7923 		rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
7924 		break;
7925 	case ISCSI_FLASHNODE_KEEPALIVE_TMO:
7926 		rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
7927 		break;
7928 	case ISCSI_FLASHNODE_ISID:
7929 		rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
7930 			     fnode_sess->isid[0], fnode_sess->isid[1],
7931 			     fnode_sess->isid[2], fnode_sess->isid[3],
7932 			     fnode_sess->isid[4], fnode_sess->isid[5]);
7933 		break;
7934 	case ISCSI_FLASHNODE_TSID:
7935 		rc = sprintf(buf, "%u\n", fnode_sess->tsid);
7936 		break;
7937 	case ISCSI_FLASHNODE_PORT:
7938 		rc = sprintf(buf, "%d\n", fnode_conn->port);
7939 		break;
7940 	case ISCSI_FLASHNODE_MAX_BURST:
7941 		rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
7942 		break;
7943 	case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
7944 		rc = sprintf(buf, "%u\n",
7945 			     fnode_sess->default_taskmgmt_timeout);
7946 		break;
7947 	case ISCSI_FLASHNODE_IPADDR:
7948 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7949 			rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
7950 		else
7951 			rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
7952 		break;
7953 	case ISCSI_FLASHNODE_ALIAS:
7954 		if (fnode_sess->targetalias)
7955 			rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
7956 		else
7957 			rc = sprintf(buf, "\n");
7958 		break;
7959 	case ISCSI_FLASHNODE_REDIRECT_IPADDR:
7960 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7961 			rc = sprintf(buf, "%pI6\n",
7962 				     fnode_conn->redirect_ipaddr);
7963 		else
7964 			rc = sprintf(buf, "%pI4\n",
7965 				     fnode_conn->redirect_ipaddr);
7966 		break;
7967 	case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
7968 		rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
7969 		break;
7970 	case ISCSI_FLASHNODE_LOCAL_PORT:
7971 		rc = sprintf(buf, "%u\n", fnode_conn->local_port);
7972 		break;
7973 	case ISCSI_FLASHNODE_IPV4_TOS:
7974 		rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
7975 		break;
7976 	case ISCSI_FLASHNODE_IPV6_TC:
7977 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7978 			rc = sprintf(buf, "%u\n",
7979 				     fnode_conn->ipv6_traffic_class);
7980 		else
7981 			rc = sprintf(buf, "\n");
7982 		break;
7983 	case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
7984 		rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
7985 		break;
7986 	case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
7987 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7988 			rc = sprintf(buf, "%pI6\n",
7989 				     fnode_conn->link_local_ipv6_addr);
7990 		else
7991 			rc = sprintf(buf, "\n");
7992 		break;
7993 	case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
7994 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
7995 		break;
7996 	case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
7997 		if (fnode_sess->discovery_parent_type == DDB_ISNS)
7998 			parent_type = ISCSI_DISC_PARENT_ISNS;
7999 		else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
8000 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
8001 		else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
8002 			parent_type = ISCSI_DISC_PARENT_SENDTGT;
8003 		else
8004 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
8005 
8006 		rc = sprintf(buf, "%s\n",
8007 			     iscsi_get_discovery_parent_name(parent_type));
8008 		break;
8009 	case ISCSI_FLASHNODE_NAME:
8010 		if (fnode_sess->targetname)
8011 			rc = sprintf(buf, "%s\n", fnode_sess->targetname);
8012 		else
8013 			rc = sprintf(buf, "\n");
8014 		break;
8015 	case ISCSI_FLASHNODE_TPGT:
8016 		rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
8017 		break;
8018 	case ISCSI_FLASHNODE_TCP_XMIT_WSF:
8019 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
8020 		break;
8021 	case ISCSI_FLASHNODE_TCP_RECV_WSF:
8022 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
8023 		break;
8024 	case ISCSI_FLASHNODE_CHAP_OUT_IDX:
8025 		rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
8026 		break;
8027 	case ISCSI_FLASHNODE_USERNAME:
8028 		if (fnode_sess->chap_auth_en) {
8029 			qla4xxx_get_uni_chap_at_index(ha,
8030 						      chap_tbl.name,
8031 						      chap_tbl.secret,
8032 						      fnode_sess->chap_out_idx);
8033 			rc = sprintf(buf, "%s\n", chap_tbl.name);
8034 		} else {
8035 			rc = sprintf(buf, "\n");
8036 		}
8037 		break;
8038 	case ISCSI_FLASHNODE_PASSWORD:
8039 		if (fnode_sess->chap_auth_en) {
8040 			qla4xxx_get_uni_chap_at_index(ha,
8041 						      chap_tbl.name,
8042 						      chap_tbl.secret,
8043 						      fnode_sess->chap_out_idx);
8044 			rc = sprintf(buf, "%s\n", chap_tbl.secret);
8045 		} else {
8046 			rc = sprintf(buf, "\n");
8047 		}
8048 		break;
8049 	case ISCSI_FLASHNODE_STATSN:
8050 		rc = sprintf(buf, "%u\n", fnode_conn->statsn);
8051 		break;
8052 	case ISCSI_FLASHNODE_EXP_STATSN:
8053 		rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
8054 		break;
8055 	case ISCSI_FLASHNODE_IS_BOOT_TGT:
8056 		rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
8057 		break;
8058 	default:
8059 		rc = -ENOSYS;
8060 		break;
8061 	}
8062 
8063 	put_device(dev);
8064 	return rc;
8065 }
8066 
8067 /**
8068  * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
8069  * @fnode_sess: pointer to session attrs of flash ddb entry
8070  * @fnode_conn: pointer to connection attrs of flash ddb entry
8071  * @data: Parameters and their values to update
8072  * @len: len of data
8073  *
8074  * This sets the parameter of flash ddb entry and writes them to flash
8075  **/
8076 static int
8077 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
8078 			    struct iscsi_bus_flash_conn *fnode_conn,
8079 			    void *data, int len)
8080 {
8081 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
8082 	struct scsi_qla_host *ha = to_qla_host(shost);
8083 	struct iscsi_flashnode_param_info *fnode_param;
8084 	struct ql4_chap_table chap_tbl;
8085 	struct nlattr *attr;
8086 	uint16_t chap_out_idx = INVALID_ENTRY;
8087 	int rc = QLA_ERROR;
8088 	uint32_t rem = len;
8089 
8090 	memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
8091 	nla_for_each_attr(attr, data, len, rem) {
8092 		fnode_param = nla_data(attr);
8093 
8094 		switch (fnode_param->param) {
8095 		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
8096 			fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
8097 			break;
8098 		case ISCSI_FLASHNODE_PORTAL_TYPE:
8099 			memcpy(fnode_sess->portal_type, fnode_param->value,
8100 			       strlen(fnode_sess->portal_type));
8101 			break;
8102 		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
8103 			fnode_sess->auto_snd_tgt_disable =
8104 							fnode_param->value[0];
8105 			break;
8106 		case ISCSI_FLASHNODE_DISCOVERY_SESS:
8107 			fnode_sess->discovery_sess = fnode_param->value[0];
8108 			break;
8109 		case ISCSI_FLASHNODE_ENTRY_EN:
8110 			fnode_sess->entry_state = fnode_param->value[0];
8111 			break;
8112 		case ISCSI_FLASHNODE_HDR_DGST_EN:
8113 			fnode_conn->hdrdgst_en = fnode_param->value[0];
8114 			break;
8115 		case ISCSI_FLASHNODE_DATA_DGST_EN:
8116 			fnode_conn->datadgst_en = fnode_param->value[0];
8117 			break;
8118 		case ISCSI_FLASHNODE_IMM_DATA_EN:
8119 			fnode_sess->imm_data_en = fnode_param->value[0];
8120 			break;
8121 		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
8122 			fnode_sess->initial_r2t_en = fnode_param->value[0];
8123 			break;
8124 		case ISCSI_FLASHNODE_DATASEQ_INORDER:
8125 			fnode_sess->dataseq_inorder_en = fnode_param->value[0];
8126 			break;
8127 		case ISCSI_FLASHNODE_PDU_INORDER:
8128 			fnode_sess->pdu_inorder_en = fnode_param->value[0];
8129 			break;
8130 		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
8131 			fnode_sess->chap_auth_en = fnode_param->value[0];
8132 			/* Invalidate chap index if chap auth is disabled */
8133 			if (!fnode_sess->chap_auth_en)
8134 				fnode_sess->chap_out_idx = INVALID_ENTRY;
8135 
8136 			break;
8137 		case ISCSI_FLASHNODE_SNACK_REQ_EN:
8138 			fnode_conn->snack_req_en = fnode_param->value[0];
8139 			break;
8140 		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
8141 			fnode_sess->discovery_logout_en = fnode_param->value[0];
8142 			break;
8143 		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
8144 			fnode_sess->bidi_chap_en = fnode_param->value[0];
8145 			break;
8146 		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
8147 			fnode_sess->discovery_auth_optional =
8148 							fnode_param->value[0];
8149 			break;
8150 		case ISCSI_FLASHNODE_ERL:
8151 			fnode_sess->erl = fnode_param->value[0];
8152 			break;
8153 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
8154 			fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
8155 			break;
8156 		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
8157 			fnode_conn->tcp_nagle_disable = fnode_param->value[0];
8158 			break;
8159 		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
8160 			fnode_conn->tcp_wsf_disable = fnode_param->value[0];
8161 			break;
8162 		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
8163 			fnode_conn->tcp_timer_scale = fnode_param->value[0];
8164 			break;
8165 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
8166 			fnode_conn->tcp_timestamp_en = fnode_param->value[0];
8167 			break;
8168 		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
8169 			fnode_conn->fragment_disable = fnode_param->value[0];
8170 			break;
8171 		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
8172 			fnode_conn->max_recv_dlength =
8173 					*(unsigned *)fnode_param->value;
8174 			break;
8175 		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
8176 			fnode_conn->max_xmit_dlength =
8177 					*(unsigned *)fnode_param->value;
8178 			break;
8179 		case ISCSI_FLASHNODE_FIRST_BURST:
8180 			fnode_sess->first_burst =
8181 					*(unsigned *)fnode_param->value;
8182 			break;
8183 		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
8184 			fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
8185 			break;
8186 		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
8187 			fnode_sess->time2retain =
8188 						*(uint16_t *)fnode_param->value;
8189 			break;
8190 		case ISCSI_FLASHNODE_MAX_R2T:
8191 			fnode_sess->max_r2t =
8192 					*(uint16_t *)fnode_param->value;
8193 			break;
8194 		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
8195 			fnode_conn->keepalive_timeout =
8196 				*(uint16_t *)fnode_param->value;
8197 			break;
8198 		case ISCSI_FLASHNODE_ISID:
8199 			memcpy(fnode_sess->isid, fnode_param->value,
8200 			       sizeof(fnode_sess->isid));
8201 			break;
8202 		case ISCSI_FLASHNODE_TSID:
8203 			fnode_sess->tsid = *(uint16_t *)fnode_param->value;
8204 			break;
8205 		case ISCSI_FLASHNODE_PORT:
8206 			fnode_conn->port = *(uint16_t *)fnode_param->value;
8207 			break;
8208 		case ISCSI_FLASHNODE_MAX_BURST:
8209 			fnode_sess->max_burst = *(unsigned *)fnode_param->value;
8210 			break;
8211 		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
8212 			fnode_sess->default_taskmgmt_timeout =
8213 						*(uint16_t *)fnode_param->value;
8214 			break;
8215 		case ISCSI_FLASHNODE_IPADDR:
8216 			memcpy(fnode_conn->ipaddress, fnode_param->value,
8217 			       IPv6_ADDR_LEN);
8218 			break;
8219 		case ISCSI_FLASHNODE_ALIAS:
8220 			rc = iscsi_switch_str_param(&fnode_sess->targetalias,
8221 						    (char *)fnode_param->value);
8222 			break;
8223 		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
8224 			memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
8225 			       IPv6_ADDR_LEN);
8226 			break;
8227 		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
8228 			fnode_conn->max_segment_size =
8229 					*(unsigned *)fnode_param->value;
8230 			break;
8231 		case ISCSI_FLASHNODE_LOCAL_PORT:
8232 			fnode_conn->local_port =
8233 						*(uint16_t *)fnode_param->value;
8234 			break;
8235 		case ISCSI_FLASHNODE_IPV4_TOS:
8236 			fnode_conn->ipv4_tos = fnode_param->value[0];
8237 			break;
8238 		case ISCSI_FLASHNODE_IPV6_TC:
8239 			fnode_conn->ipv6_traffic_class = fnode_param->value[0];
8240 			break;
8241 		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
8242 			fnode_conn->ipv6_flow_label = fnode_param->value[0];
8243 			break;
8244 		case ISCSI_FLASHNODE_NAME:
8245 			rc = iscsi_switch_str_param(&fnode_sess->targetname,
8246 						    (char *)fnode_param->value);
8247 			break;
8248 		case ISCSI_FLASHNODE_TPGT:
8249 			fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
8250 			break;
8251 		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
8252 			memcpy(fnode_conn->link_local_ipv6_addr,
8253 			       fnode_param->value, IPv6_ADDR_LEN);
8254 			break;
8255 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
8256 			fnode_sess->discovery_parent_idx =
8257 						*(uint16_t *)fnode_param->value;
8258 			break;
8259 		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
8260 			fnode_conn->tcp_xmit_wsf =
8261 						*(uint8_t *)fnode_param->value;
8262 			break;
8263 		case ISCSI_FLASHNODE_TCP_RECV_WSF:
8264 			fnode_conn->tcp_recv_wsf =
8265 						*(uint8_t *)fnode_param->value;
8266 			break;
8267 		case ISCSI_FLASHNODE_STATSN:
8268 			fnode_conn->statsn = *(uint32_t *)fnode_param->value;
8269 			break;
8270 		case ISCSI_FLASHNODE_EXP_STATSN:
8271 			fnode_conn->exp_statsn =
8272 						*(uint32_t *)fnode_param->value;
8273 			break;
8274 		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
8275 			chap_out_idx = *(uint16_t *)fnode_param->value;
8276 			if (!qla4xxx_get_uni_chap_at_index(ha,
8277 							   chap_tbl.name,
8278 							   chap_tbl.secret,
8279 							   chap_out_idx)) {
8280 				fnode_sess->chap_out_idx = chap_out_idx;
8281 				/* Enable chap auth if chap index is valid */
8282 				fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
8283 			}
8284 			break;
8285 		default:
8286 			ql4_printk(KERN_ERR, ha,
8287 				   "%s: No such sysfs attribute\n", __func__);
8288 			rc = -ENOSYS;
8289 			goto exit_set_param;
8290 		}
8291 	}
8292 
8293 	rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
8294 
8295 exit_set_param:
8296 	return rc;
8297 }
8298 
8299 /**
8300  * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
8301  * @fnode_sess: pointer to session attrs of flash ddb entry
8302  *
8303  * This invalidates the flash ddb entry at the given index
8304  **/
8305 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
8306 {
8307 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
8308 	struct scsi_qla_host *ha = to_qla_host(shost);
8309 	uint32_t dev_db_start_offset;
8310 	uint32_t dev_db_end_offset;
8311 	struct dev_db_entry *fw_ddb_entry = NULL;
8312 	dma_addr_t fw_ddb_entry_dma;
8313 	uint16_t *ddb_cookie = NULL;
8314 	size_t ddb_size = 0;
8315 	void *pddb = NULL;
8316 	int target_id;
8317 	int rc = 0;
8318 
8319 	if (fnode_sess->is_boot_target) {
8320 		rc = -EPERM;
8321 		DEBUG2(ql4_printk(KERN_ERR, ha,
8322 				  "%s: Deletion of boot target entry is not permitted.\n",
8323 				  __func__));
8324 		goto exit_ddb_del;
8325 	}
8326 
8327 	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
8328 		goto sysfs_ddb_del;
8329 
8330 	if (is_qla40XX(ha)) {
8331 		dev_db_start_offset = FLASH_OFFSET_DB_INFO;
8332 		dev_db_end_offset = FLASH_OFFSET_DB_END;
8333 		dev_db_start_offset += (fnode_sess->target_id *
8334 				       sizeof(*fw_ddb_entry));
8335 		ddb_size = sizeof(*fw_ddb_entry);
8336 	} else {
8337 		dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
8338 				      (ha->hw.flt_region_ddb << 2);
8339 		/* flt_ddb_size is DDB table size for both ports
8340 		 * so divide it by 2 to calculate the offset for second port
8341 		 */
8342 		if (ha->port_num == 1)
8343 			dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
8344 
8345 		dev_db_end_offset = dev_db_start_offset +
8346 				    (ha->hw.flt_ddb_size / 2);
8347 
8348 		dev_db_start_offset += (fnode_sess->target_id *
8349 				       sizeof(*fw_ddb_entry));
8350 		dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
8351 
8352 		ddb_size = sizeof(*ddb_cookie);
8353 	}
8354 
8355 	DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
8356 			  __func__, dev_db_start_offset, dev_db_end_offset));
8357 
8358 	if (dev_db_start_offset > dev_db_end_offset) {
8359 		rc = -EIO;
8360 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
8361 				  __func__, fnode_sess->target_id));
8362 		goto exit_ddb_del;
8363 	}
8364 
8365 	pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
8366 				  &fw_ddb_entry_dma, GFP_KERNEL);
8367 	if (!pddb) {
8368 		rc = -ENOMEM;
8369 		DEBUG2(ql4_printk(KERN_ERR, ha,
8370 				  "%s: Unable to allocate dma buffer\n",
8371 				  __func__));
8372 		goto exit_ddb_del;
8373 	}
8374 
8375 	if (is_qla40XX(ha)) {
8376 		fw_ddb_entry = pddb;
8377 		memset(fw_ddb_entry, 0, ddb_size);
8378 		ddb_cookie = &fw_ddb_entry->cookie;
8379 	} else {
8380 		ddb_cookie = pddb;
8381 	}
8382 
8383 	/* invalidate the cookie */
8384 	*ddb_cookie = 0xFFEE;
8385 	qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
8386 			  ddb_size, FLASH_OPT_RMW_COMMIT);
8387 
8388 sysfs_ddb_del:
8389 	target_id = fnode_sess->target_id;
8390 	iscsi_destroy_flashnode_sess(fnode_sess);
8391 	ql4_printk(KERN_INFO, ha,
8392 		   "%s: session and conn entries for flashnode %u of host %lu deleted\n",
8393 		   __func__, target_id, ha->host_no);
8394 exit_ddb_del:
8395 	if (pddb)
8396 		dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
8397 				  fw_ddb_entry_dma);
8398 	return rc;
8399 }
8400 
8401 /**
8402  * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
8403  * @ha: pointer to adapter structure
8404  *
8405  * Export the firmware DDB for all send targets and normal targets to sysfs.
8406  **/
8407 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
8408 {
8409 	struct dev_db_entry *fw_ddb_entry = NULL;
8410 	dma_addr_t fw_ddb_entry_dma;
8411 	uint16_t max_ddbs;
8412 	uint16_t idx = 0;
8413 	int ret = QLA_SUCCESS;
8414 
8415 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
8416 					  sizeof(*fw_ddb_entry),
8417 					  &fw_ddb_entry_dma, GFP_KERNEL);
8418 	if (!fw_ddb_entry) {
8419 		DEBUG2(ql4_printk(KERN_ERR, ha,
8420 				  "%s: Unable to allocate dma buffer\n",
8421 				  __func__));
8422 		return -ENOMEM;
8423 	}
8424 
8425 	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
8426 				     MAX_DEV_DB_ENTRIES;
8427 
8428 	for (idx = 0; idx < max_ddbs; idx++) {
8429 		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
8430 					     idx))
8431 			continue;
8432 
8433 		ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
8434 		if (ret) {
8435 			ret = -EIO;
8436 			break;
8437 		}
8438 	}
8439 
8440 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
8441 			  fw_ddb_entry_dma);
8442 
8443 	return ret;
8444 }
8445 
8446 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
8447 {
8448 	iscsi_destroy_all_flashnode(ha->host);
8449 }
8450 
8451 /**
8452  * qla4xxx_build_ddb_list - Build ddb list and setup sessions
8453  * @ha: pointer to adapter structure
8454  * @is_reset: Is this init path or reset path
8455  *
8456  * Create a list of sendtargets (st) from firmware DDBs, issue send targets
8457  * using connection open, then create the list of normal targets (nt)
8458  * from firmware DDBs. Based on the list of nt setup session and connection
8459  * objects.
8460  **/
8461 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
8462 {
8463 	uint16_t tmo = 0;
8464 	struct list_head list_st, list_nt;
8465 	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
8466 	unsigned long wtime;
8467 
8468 	if (!test_bit(AF_LINK_UP, &ha->flags)) {
8469 		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
8470 		ha->is_reset = is_reset;
8471 		return;
8472 	}
8473 
8474 	INIT_LIST_HEAD(&list_st);
8475 	INIT_LIST_HEAD(&list_nt);
8476 
8477 	qla4xxx_build_st_list(ha, &list_st);
8478 
8479 	/* Before issuing conn open mbox, ensure all IPs states are configured
8480 	 * Note, conn open fails if IPs are not configured
8481 	 */
8482 	qla4xxx_wait_for_ip_configuration(ha);
8483 
8484 	/* Go thru the STs and fire the sendtargets by issuing conn open mbx */
8485 	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
8486 		qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
8487 	}
8488 
8489 	/* Wait to ensure all sendtargets are done for min 12 sec wait */
8490 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
8491 	       (ha->def_timeout < LOGIN_TOV * 10) ?
8492 	       ha->def_timeout : LOGIN_TOV);
8493 
8494 	DEBUG2(ql4_printk(KERN_INFO, ha,
8495 			  "Default time to wait for build ddb %d\n", tmo));
8496 
8497 	wtime = jiffies + (HZ * tmo);
8498 	do {
8499 		if (list_empty(&list_st))
8500 			break;
8501 
8502 		qla4xxx_remove_failed_ddb(ha, &list_st);
8503 		schedule_timeout_uninterruptible(HZ / 10);
8504 	} while (time_after(wtime, jiffies));
8505 
8506 
8507 	qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
8508 
8509 	qla4xxx_free_ddb_list(&list_st);
8510 	qla4xxx_free_ddb_list(&list_nt);
8511 
8512 	qla4xxx_free_ddb_index(ha);
8513 }
8514 
8515 /**
8516  * qla4xxx_wait_login_resp_boot_tgt -  Wait for iSCSI boot target login
8517  * response.
8518  * @ha: pointer to adapter structure
8519  *
8520  * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
8521  * set in DDB and we will wait for login response of boot targets during
8522  * probe.
8523  **/
8524 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
8525 {
8526 	struct ddb_entry *ddb_entry;
8527 	struct dev_db_entry *fw_ddb_entry = NULL;
8528 	dma_addr_t fw_ddb_entry_dma;
8529 	unsigned long wtime;
8530 	uint32_t ddb_state;
8531 	int max_ddbs, idx, ret;
8532 
8533 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
8534 				     MAX_DEV_DB_ENTRIES;
8535 
8536 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8537 					  &fw_ddb_entry_dma, GFP_KERNEL);
8538 	if (!fw_ddb_entry) {
8539 		ql4_printk(KERN_ERR, ha,
8540 			   "%s: Unable to allocate dma buffer\n", __func__);
8541 		goto exit_login_resp;
8542 	}
8543 
8544 	wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
8545 
8546 	for (idx = 0; idx < max_ddbs; idx++) {
8547 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
8548 		if (ddb_entry == NULL)
8549 			continue;
8550 
8551 		if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
8552 			DEBUG2(ql4_printk(KERN_INFO, ha,
8553 					  "%s: DDB index [%d]\n", __func__,
8554 					  ddb_entry->fw_ddb_index));
8555 			do {
8556 				ret = qla4xxx_get_fwddb_entry(ha,
8557 						ddb_entry->fw_ddb_index,
8558 						fw_ddb_entry, fw_ddb_entry_dma,
8559 						NULL, NULL, &ddb_state, NULL,
8560 						NULL, NULL);
8561 				if (ret == QLA_ERROR)
8562 					goto exit_login_resp;
8563 
8564 				if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
8565 				    (ddb_state == DDB_DS_SESSION_FAILED))
8566 					break;
8567 
8568 				schedule_timeout_uninterruptible(HZ);
8569 
8570 			} while ((time_after(wtime, jiffies)));
8571 
8572 			if (!time_after(wtime, jiffies)) {
8573 				DEBUG2(ql4_printk(KERN_INFO, ha,
8574 						  "%s: Login response wait timer expired\n",
8575 						  __func__));
8576 				 goto exit_login_resp;
8577 			}
8578 		}
8579 	}
8580 
8581 exit_login_resp:
8582 	if (fw_ddb_entry)
8583 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8584 				  fw_ddb_entry, fw_ddb_entry_dma);
8585 }
8586 
8587 /**
8588  * qla4xxx_probe_adapter - callback function to probe HBA
8589  * @pdev: pointer to pci_dev structure
8590  * @pci_device_id: pointer to pci_device entry
8591  *
8592  * This routine will probe for Qlogic 4xxx iSCSI host adapters.
8593  * It returns zero if successful. It also initializes all data necessary for
8594  * the driver.
8595  **/
8596 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
8597 				 const struct pci_device_id *ent)
8598 {
8599 	int ret = -ENODEV, status;
8600 	struct Scsi_Host *host;
8601 	struct scsi_qla_host *ha;
8602 	uint8_t init_retry_count = 0;
8603 	char buf[34];
8604 	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
8605 	uint32_t dev_state;
8606 
8607 	if (pci_enable_device(pdev))
8608 		return -1;
8609 
8610 	host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
8611 	if (host == NULL) {
8612 		printk(KERN_WARNING
8613 		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
8614 		goto probe_disable_device;
8615 	}
8616 
8617 	/* Clear our data area */
8618 	ha = to_qla_host(host);
8619 	memset(ha, 0, sizeof(*ha));
8620 
8621 	/* Save the information from PCI BIOS.	*/
8622 	ha->pdev = pdev;
8623 	ha->host = host;
8624 	ha->host_no = host->host_no;
8625 	ha->func_num = PCI_FUNC(ha->pdev->devfn);
8626 
8627 	pci_enable_pcie_error_reporting(pdev);
8628 
8629 	/* Setup Runtime configurable options */
8630 	if (is_qla8022(ha)) {
8631 		ha->isp_ops = &qla4_82xx_isp_ops;
8632 		ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
8633 		ha->qdr_sn_window = -1;
8634 		ha->ddr_mn_window = -1;
8635 		ha->curr_window = 255;
8636 		nx_legacy_intr = &legacy_intr[ha->func_num];
8637 		ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
8638 		ha->nx_legacy_intr.tgt_status_reg =
8639 			nx_legacy_intr->tgt_status_reg;
8640 		ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
8641 		ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
8642 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
8643 		ha->isp_ops = &qla4_83xx_isp_ops;
8644 		ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
8645 	} else {
8646 		ha->isp_ops = &qla4xxx_isp_ops;
8647 	}
8648 
8649 	if (is_qla80XX(ha)) {
8650 		rwlock_init(&ha->hw_lock);
8651 		ha->pf_bit = ha->func_num << 16;
8652 		/* Set EEH reset type to fundamental if required by hba */
8653 		pdev->needs_freset = 1;
8654 	}
8655 
8656 	/* Configure PCI I/O space. */
8657 	ret = ha->isp_ops->iospace_config(ha);
8658 	if (ret)
8659 		goto probe_failed_ioconfig;
8660 
8661 	ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
8662 		   pdev->device, pdev->irq, ha->reg);
8663 
8664 	qla4xxx_config_dma_addressing(ha);
8665 
8666 	/* Initialize lists and spinlocks. */
8667 	INIT_LIST_HEAD(&ha->free_srb_q);
8668 
8669 	mutex_init(&ha->mbox_sem);
8670 	mutex_init(&ha->chap_sem);
8671 	init_completion(&ha->mbx_intr_comp);
8672 	init_completion(&ha->disable_acb_comp);
8673 	init_completion(&ha->idc_comp);
8674 	init_completion(&ha->link_up_comp);
8675 	init_completion(&ha->disable_acb_comp);
8676 
8677 	spin_lock_init(&ha->hardware_lock);
8678 	spin_lock_init(&ha->work_lock);
8679 
8680 	/* Initialize work list */
8681 	INIT_LIST_HEAD(&ha->work_list);
8682 
8683 	/* Allocate dma buffers */
8684 	if (qla4xxx_mem_alloc(ha)) {
8685 		ql4_printk(KERN_WARNING, ha,
8686 		    "[ERROR] Failed to allocate memory for adapter\n");
8687 
8688 		ret = -ENOMEM;
8689 		goto probe_failed;
8690 	}
8691 
8692 	host->cmd_per_lun = 3;
8693 	host->max_channel = 0;
8694 	host->max_lun = MAX_LUNS - 1;
8695 	host->max_id = MAX_TARGETS;
8696 	host->max_cmd_len = IOCB_MAX_CDB_LEN;
8697 	host->can_queue = MAX_SRBS ;
8698 	host->transportt = qla4xxx_scsi_transport;
8699 
8700 	ret = scsi_init_shared_tag_map(host, MAX_SRBS);
8701 	if (ret) {
8702 		ql4_printk(KERN_WARNING, ha,
8703 			   "%s: scsi_init_shared_tag_map failed\n", __func__);
8704 		goto probe_failed;
8705 	}
8706 
8707 	pci_set_drvdata(pdev, ha);
8708 
8709 	ret = scsi_add_host(host, &pdev->dev);
8710 	if (ret)
8711 		goto probe_failed;
8712 
8713 	if (is_qla80XX(ha))
8714 		qla4_8xxx_get_flash_info(ha);
8715 
8716 	if (is_qla8032(ha) || is_qla8042(ha)) {
8717 		qla4_83xx_read_reset_template(ha);
8718 		/*
8719 		 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
8720 		 * If DONRESET_BIT0 is set, drivers should not set dev_state
8721 		 * to NEED_RESET. But if NEED_RESET is set, drivers should
8722 		 * should honor the reset.
8723 		 */
8724 		if (ql4xdontresethba == 1)
8725 			qla4_83xx_set_idc_dontreset(ha);
8726 	}
8727 
8728 	/*
8729 	 * Initialize the Host adapter request/response queues and
8730 	 * firmware
8731 	 * NOTE: interrupts enabled upon successful completion
8732 	 */
8733 	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8734 
8735 	/* Dont retry adapter initialization if IRQ allocation failed */
8736 	if (is_qla80XX(ha) && (status == QLA_ERROR))
8737 		goto skip_retry_init;
8738 
8739 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
8740 	    init_retry_count++ < MAX_INIT_RETRIES) {
8741 
8742 		if (is_qla80XX(ha)) {
8743 			ha->isp_ops->idc_lock(ha);
8744 			dev_state = qla4_8xxx_rd_direct(ha,
8745 							QLA8XXX_CRB_DEV_STATE);
8746 			ha->isp_ops->idc_unlock(ha);
8747 			if (dev_state == QLA8XXX_DEV_FAILED) {
8748 				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
8749 				    "initialize adapter. H/W is in failed state\n",
8750 				    __func__);
8751 				break;
8752 			}
8753 		}
8754 		DEBUG2(printk("scsi: %s: retrying adapter initialization "
8755 			      "(%d)\n", __func__, init_retry_count));
8756 
8757 		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
8758 			continue;
8759 
8760 		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8761 		if (is_qla80XX(ha) && (status == QLA_ERROR)) {
8762 			if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
8763 				goto skip_retry_init;
8764 		}
8765 	}
8766 
8767 skip_retry_init:
8768 	if (!test_bit(AF_ONLINE, &ha->flags)) {
8769 		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
8770 
8771 		if ((is_qla8022(ha) && ql4xdontresethba) ||
8772 		    ((is_qla8032(ha) || is_qla8042(ha)) &&
8773 		     qla4_83xx_idc_dontreset(ha))) {
8774 			/* Put the device in failed state. */
8775 			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
8776 			ha->isp_ops->idc_lock(ha);
8777 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8778 					    QLA8XXX_DEV_FAILED);
8779 			ha->isp_ops->idc_unlock(ha);
8780 		}
8781 		ret = -ENODEV;
8782 		goto remove_host;
8783 	}
8784 
8785 	/* Startup the kernel thread for this host adapter. */
8786 	DEBUG2(printk("scsi: %s: Starting kernel thread for "
8787 		      "qla4xxx_dpc\n", __func__));
8788 	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
8789 	ha->dpc_thread = create_singlethread_workqueue(buf);
8790 	if (!ha->dpc_thread) {
8791 		ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
8792 		ret = -ENODEV;
8793 		goto remove_host;
8794 	}
8795 	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
8796 
8797 	ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
8798 				      ha->host_no);
8799 	if (!ha->task_wq) {
8800 		ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
8801 		ret = -ENODEV;
8802 		goto remove_host;
8803 	}
8804 
8805 	/*
8806 	 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
8807 	 * (which is called indirectly by qla4xxx_initialize_adapter),
8808 	 * so that irqs will be registered after crbinit but before
8809 	 * mbx_intr_enable.
8810 	 */
8811 	if (is_qla40XX(ha)) {
8812 		ret = qla4xxx_request_irqs(ha);
8813 		if (ret) {
8814 			ql4_printk(KERN_WARNING, ha, "Failed to reserve "
8815 			    "interrupt %d already in use.\n", pdev->irq);
8816 			goto remove_host;
8817 		}
8818 	}
8819 
8820 	pci_save_state(ha->pdev);
8821 	ha->isp_ops->enable_intrs(ha);
8822 
8823 	/* Start timer thread. */
8824 	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
8825 
8826 	set_bit(AF_INIT_DONE, &ha->flags);
8827 
8828 	qla4_8xxx_alloc_sysfs_attr(ha);
8829 
8830 	printk(KERN_INFO
8831 	       " QLogic iSCSI HBA Driver version: %s\n"
8832 	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
8833 	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
8834 	       ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
8835 	       ha->fw_info.fw_patch, ha->fw_info.fw_build);
8836 
8837 	/* Set the driver version */
8838 	if (is_qla80XX(ha))
8839 		qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
8840 
8841 	if (qla4xxx_setup_boot_info(ha))
8842 		ql4_printk(KERN_ERR, ha,
8843 			   "%s: No iSCSI boot target configured\n", __func__);
8844 
8845 	set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
8846 	/* Perform the build ddb list and login to each */
8847 	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
8848 	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
8849 	qla4xxx_wait_login_resp_boot_tgt(ha);
8850 
8851 	qla4xxx_create_chap_list(ha);
8852 
8853 	qla4xxx_create_ifaces(ha);
8854 	return 0;
8855 
8856 remove_host:
8857 	scsi_remove_host(ha->host);
8858 
8859 probe_failed:
8860 	qla4xxx_free_adapter(ha);
8861 
8862 probe_failed_ioconfig:
8863 	pci_disable_pcie_error_reporting(pdev);
8864 	scsi_host_put(ha->host);
8865 
8866 probe_disable_device:
8867 	pci_disable_device(pdev);
8868 
8869 	return ret;
8870 }
8871 
8872 /**
8873  * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
8874  * @ha: pointer to adapter structure
8875  *
8876  * Mark the other ISP-4xxx port to indicate that the driver is being removed,
8877  * so that the other port will not re-initialize while in the process of
8878  * removing the ha due to driver unload or hba hotplug.
8879  **/
8880 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
8881 {
8882 	struct scsi_qla_host *other_ha = NULL;
8883 	struct pci_dev *other_pdev = NULL;
8884 	int fn = ISP4XXX_PCI_FN_2;
8885 
8886 	/*iscsi function numbers for ISP4xxx is 1 and 3*/
8887 	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
8888 		fn = ISP4XXX_PCI_FN_1;
8889 
8890 	other_pdev =
8891 		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
8892 		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
8893 		fn));
8894 
8895 	/* Get other_ha if other_pdev is valid and state is enable*/
8896 	if (other_pdev) {
8897 		if (atomic_read(&other_pdev->enable_cnt)) {
8898 			other_ha = pci_get_drvdata(other_pdev);
8899 			if (other_ha) {
8900 				set_bit(AF_HA_REMOVAL, &other_ha->flags);
8901 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
8902 				    "Prevent %s reinit\n", __func__,
8903 				    dev_name(&other_ha->pdev->dev)));
8904 			}
8905 		}
8906 		pci_dev_put(other_pdev);
8907 	}
8908 }
8909 
8910 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
8911 		struct ddb_entry *ddb_entry)
8912 {
8913 	struct dev_db_entry *fw_ddb_entry = NULL;
8914 	dma_addr_t fw_ddb_entry_dma;
8915 	unsigned long wtime;
8916 	uint32_t ddb_state;
8917 	int options;
8918 	int status;
8919 
8920 	options = LOGOUT_OPTION_CLOSE_SESSION;
8921 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
8922 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
8923 		goto clear_ddb;
8924 	}
8925 
8926 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8927 					  &fw_ddb_entry_dma, GFP_KERNEL);
8928 	if (!fw_ddb_entry) {
8929 		ql4_printk(KERN_ERR, ha,
8930 			   "%s: Unable to allocate dma buffer\n", __func__);
8931 		goto clear_ddb;
8932 	}
8933 
8934 	wtime = jiffies + (HZ * LOGOUT_TOV);
8935 	do {
8936 		status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
8937 						 fw_ddb_entry, fw_ddb_entry_dma,
8938 						 NULL, NULL, &ddb_state, NULL,
8939 						 NULL, NULL);
8940 		if (status == QLA_ERROR)
8941 			goto free_ddb;
8942 
8943 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
8944 		    (ddb_state == DDB_DS_SESSION_FAILED))
8945 			goto free_ddb;
8946 
8947 		schedule_timeout_uninterruptible(HZ);
8948 	} while ((time_after(wtime, jiffies)));
8949 
8950 free_ddb:
8951 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8952 			  fw_ddb_entry, fw_ddb_entry_dma);
8953 clear_ddb:
8954 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
8955 }
8956 
8957 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
8958 {
8959 	struct ddb_entry *ddb_entry;
8960 	int idx;
8961 
8962 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
8963 
8964 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
8965 		if ((ddb_entry != NULL) &&
8966 		    (ddb_entry->ddb_type == FLASH_DDB)) {
8967 
8968 			qla4xxx_destroy_ddb(ha, ddb_entry);
8969 			/*
8970 			 * we have decremented the reference count of the driver
8971 			 * when we setup the session to have the driver unload
8972 			 * to be seamless without actually destroying the
8973 			 * session
8974 			 **/
8975 			try_module_get(qla4xxx_iscsi_transport.owner);
8976 			iscsi_destroy_endpoint(ddb_entry->conn->ep);
8977 			qla4xxx_free_ddb(ha, ddb_entry);
8978 			iscsi_session_teardown(ddb_entry->sess);
8979 		}
8980 	}
8981 }
8982 /**
8983  * qla4xxx_remove_adapter - callback function to remove adapter.
8984  * @pci_dev: PCI device pointer
8985  **/
8986 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
8987 {
8988 	struct scsi_qla_host *ha;
8989 
8990 	/*
8991 	 * If the PCI device is disabled then it means probe_adapter had
8992 	 * failed and resources already cleaned up on probe_adapter exit.
8993 	 */
8994 	if (!pci_is_enabled(pdev))
8995 		return;
8996 
8997 	ha = pci_get_drvdata(pdev);
8998 
8999 	if (is_qla40XX(ha))
9000 		qla4xxx_prevent_other_port_reinit(ha);
9001 
9002 	/* destroy iface from sysfs */
9003 	qla4xxx_destroy_ifaces(ha);
9004 
9005 	if ((!ql4xdisablesysfsboot) && ha->boot_kset)
9006 		iscsi_boot_destroy_kset(ha->boot_kset);
9007 
9008 	qla4xxx_destroy_fw_ddb_session(ha);
9009 	qla4_8xxx_free_sysfs_attr(ha);
9010 
9011 	qla4xxx_sysfs_ddb_remove(ha);
9012 	scsi_remove_host(ha->host);
9013 
9014 	qla4xxx_free_adapter(ha);
9015 
9016 	scsi_host_put(ha->host);
9017 
9018 	pci_disable_pcie_error_reporting(pdev);
9019 	pci_disable_device(pdev);
9020 }
9021 
9022 /**
9023  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
9024  * @ha: HA context
9025  *
9026  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
9027  * supported addressing method.
9028  */
9029 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
9030 {
9031 	int retval;
9032 
9033 	/* Update our PCI device dma_mask for full 64 bit mask */
9034 	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
9035 		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
9036 			dev_dbg(&ha->pdev->dev,
9037 				  "Failed to set 64 bit PCI consistent mask; "
9038 				   "using 32 bit.\n");
9039 			retval = pci_set_consistent_dma_mask(ha->pdev,
9040 							     DMA_BIT_MASK(32));
9041 		}
9042 	} else
9043 		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
9044 }
9045 
9046 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
9047 {
9048 	struct iscsi_cls_session *cls_sess;
9049 	struct iscsi_session *sess;
9050 	struct ddb_entry *ddb;
9051 	int queue_depth = QL4_DEF_QDEPTH;
9052 
9053 	cls_sess = starget_to_session(sdev->sdev_target);
9054 	sess = cls_sess->dd_data;
9055 	ddb = sess->dd_data;
9056 
9057 	sdev->hostdata = ddb;
9058 
9059 	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
9060 		queue_depth = ql4xmaxqdepth;
9061 
9062 	scsi_change_queue_depth(sdev, queue_depth);
9063 	return 0;
9064 }
9065 
9066 /**
9067  * qla4xxx_del_from_active_array - returns an active srb
9068  * @ha: Pointer to host adapter structure.
9069  * @index: index into the active_array
9070  *
9071  * This routine removes and returns the srb at the specified index
9072  **/
9073 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
9074     uint32_t index)
9075 {
9076 	struct srb *srb = NULL;
9077 	struct scsi_cmnd *cmd = NULL;
9078 
9079 	cmd = scsi_host_find_tag(ha->host, index);
9080 	if (!cmd)
9081 		return srb;
9082 
9083 	srb = (struct srb *)CMD_SP(cmd);
9084 	if (!srb)
9085 		return srb;
9086 
9087 	/* update counters */
9088 	if (srb->flags & SRB_DMA_VALID) {
9089 		ha->iocb_cnt -= srb->iocb_cnt;
9090 		if (srb->cmd)
9091 			srb->cmd->host_scribble =
9092 				(unsigned char *)(unsigned long) MAX_SRBS;
9093 	}
9094 	return srb;
9095 }
9096 
9097 /**
9098  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
9099  * @ha: Pointer to host adapter structure.
9100  * @cmd: Scsi Command to wait on.
9101  *
9102  * This routine waits for the command to be returned by the Firmware
9103  * for some max time.
9104  **/
9105 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
9106 				      struct scsi_cmnd *cmd)
9107 {
9108 	int done = 0;
9109 	struct srb *rp;
9110 	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
9111 	int ret = SUCCESS;
9112 
9113 	/* Dont wait on command if PCI error is being handled
9114 	 * by PCI AER driver
9115 	 */
9116 	if (unlikely(pci_channel_offline(ha->pdev)) ||
9117 	    (test_bit(AF_EEH_BUSY, &ha->flags))) {
9118 		ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
9119 		    ha->host_no, __func__);
9120 		return ret;
9121 	}
9122 
9123 	do {
9124 		/* Checking to see if its returned to OS */
9125 		rp = (struct srb *) CMD_SP(cmd);
9126 		if (rp == NULL) {
9127 			done++;
9128 			break;
9129 		}
9130 
9131 		msleep(2000);
9132 	} while (max_wait_time--);
9133 
9134 	return done;
9135 }
9136 
9137 /**
9138  * qla4xxx_wait_for_hba_online - waits for HBA to come online
9139  * @ha: Pointer to host adapter structure
9140  **/
9141 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
9142 {
9143 	unsigned long wait_online;
9144 
9145 	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
9146 	while (time_before(jiffies, wait_online)) {
9147 
9148 		if (adapter_up(ha))
9149 			return QLA_SUCCESS;
9150 
9151 		msleep(2000);
9152 	}
9153 
9154 	return QLA_ERROR;
9155 }
9156 
9157 /**
9158  * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
9159  * @ha: pointer to HBA
9160  * @t: target id
9161  * @l: lun id
9162  *
9163  * This function waits for all outstanding commands to a lun to complete. It
9164  * returns 0 if all pending commands are returned and 1 otherwise.
9165  **/
9166 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
9167 					struct scsi_target *stgt,
9168 					struct scsi_device *sdev)
9169 {
9170 	int cnt;
9171 	int status = 0;
9172 	struct scsi_cmnd *cmd;
9173 
9174 	/*
9175 	 * Waiting for all commands for the designated target or dev
9176 	 * in the active array
9177 	 */
9178 	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
9179 		cmd = scsi_host_find_tag(ha->host, cnt);
9180 		if (cmd && stgt == scsi_target(cmd->device) &&
9181 		    (!sdev || sdev == cmd->device)) {
9182 			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
9183 				status++;
9184 				break;
9185 			}
9186 		}
9187 	}
9188 	return status;
9189 }
9190 
9191 /**
9192  * qla4xxx_eh_abort - callback for abort task.
9193  * @cmd: Pointer to Linux's SCSI command structure
9194  *
9195  * This routine is called by the Linux OS to abort the specified
9196  * command.
9197  **/
9198 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
9199 {
9200 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9201 	unsigned int id = cmd->device->id;
9202 	uint64_t lun = cmd->device->lun;
9203 	unsigned long flags;
9204 	struct srb *srb = NULL;
9205 	int ret = SUCCESS;
9206 	int wait = 0;
9207 
9208 	ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
9209 		   ha->host_no, id, lun, cmd, cmd->cmnd[0]);
9210 
9211 	spin_lock_irqsave(&ha->hardware_lock, flags);
9212 	srb = (struct srb *) CMD_SP(cmd);
9213 	if (!srb) {
9214 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
9215 		ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
9216 			   ha->host_no, id, lun);
9217 		return SUCCESS;
9218 	}
9219 	kref_get(&srb->srb_ref);
9220 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
9221 
9222 	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
9223 		DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n",
9224 		    ha->host_no, id, lun));
9225 		ret = FAILED;
9226 	} else {
9227 		DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n",
9228 		    ha->host_no, id, lun));
9229 		wait = 1;
9230 	}
9231 
9232 	kref_put(&srb->srb_ref, qla4xxx_srb_compl);
9233 
9234 	/* Wait for command to complete */
9235 	if (wait) {
9236 		if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
9237 			DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n",
9238 			    ha->host_no, id, lun));
9239 			ret = FAILED;
9240 		}
9241 	}
9242 
9243 	ql4_printk(KERN_INFO, ha,
9244 	    "scsi%ld:%d:%llu: Abort command - %s\n",
9245 	    ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
9246 
9247 	return ret;
9248 }
9249 
9250 /**
9251  * qla4xxx_eh_device_reset - callback for target reset.
9252  * @cmd: Pointer to Linux's SCSI command structure
9253  *
9254  * This routine is called by the Linux OS to reset all luns on the
9255  * specified target.
9256  **/
9257 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
9258 {
9259 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9260 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
9261 	int ret = FAILED, stat;
9262 
9263 	if (!ddb_entry)
9264 		return ret;
9265 
9266 	ret = iscsi_block_scsi_eh(cmd);
9267 	if (ret)
9268 		return ret;
9269 	ret = FAILED;
9270 
9271 	ql4_printk(KERN_INFO, ha,
9272 		   "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no,
9273 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
9274 
9275 	DEBUG2(printk(KERN_INFO
9276 		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
9277 		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
9278 		      cmd, jiffies, cmd->request->timeout / HZ,
9279 		      ha->dpc_flags, cmd->result, cmd->allowed));
9280 
9281 	/* FIXME: wait for hba to go online */
9282 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
9283 	if (stat != QLA_SUCCESS) {
9284 		ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
9285 		goto eh_dev_reset_done;
9286 	}
9287 
9288 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
9289 					 cmd->device)) {
9290 		ql4_printk(KERN_INFO, ha,
9291 			   "DEVICE RESET FAILED - waiting for "
9292 			   "commands.\n");
9293 		goto eh_dev_reset_done;
9294 	}
9295 
9296 	/* Send marker. */
9297 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
9298 		MM_LUN_RESET) != QLA_SUCCESS)
9299 		goto eh_dev_reset_done;
9300 
9301 	ql4_printk(KERN_INFO, ha,
9302 		   "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n",
9303 		   ha->host_no, cmd->device->channel, cmd->device->id,
9304 		   cmd->device->lun);
9305 
9306 	ret = SUCCESS;
9307 
9308 eh_dev_reset_done:
9309 
9310 	return ret;
9311 }
9312 
9313 /**
9314  * qla4xxx_eh_target_reset - callback for target reset.
9315  * @cmd: Pointer to Linux's SCSI command structure
9316  *
9317  * This routine is called by the Linux OS to reset the target.
9318  **/
9319 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
9320 {
9321 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9322 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
9323 	int stat, ret;
9324 
9325 	if (!ddb_entry)
9326 		return FAILED;
9327 
9328 	ret = iscsi_block_scsi_eh(cmd);
9329 	if (ret)
9330 		return ret;
9331 
9332 	starget_printk(KERN_INFO, scsi_target(cmd->device),
9333 		       "WARM TARGET RESET ISSUED.\n");
9334 
9335 	DEBUG2(printk(KERN_INFO
9336 		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
9337 		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
9338 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
9339 		      ha->dpc_flags, cmd->result, cmd->allowed));
9340 
9341 	stat = qla4xxx_reset_target(ha, ddb_entry);
9342 	if (stat != QLA_SUCCESS) {
9343 		starget_printk(KERN_INFO, scsi_target(cmd->device),
9344 			       "WARM TARGET RESET FAILED.\n");
9345 		return FAILED;
9346 	}
9347 
9348 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
9349 					 NULL)) {
9350 		starget_printk(KERN_INFO, scsi_target(cmd->device),
9351 			       "WARM TARGET DEVICE RESET FAILED - "
9352 			       "waiting for commands.\n");
9353 		return FAILED;
9354 	}
9355 
9356 	/* Send marker. */
9357 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
9358 		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
9359 		starget_printk(KERN_INFO, scsi_target(cmd->device),
9360 			       "WARM TARGET DEVICE RESET FAILED - "
9361 			       "marker iocb failed.\n");
9362 		return FAILED;
9363 	}
9364 
9365 	starget_printk(KERN_INFO, scsi_target(cmd->device),
9366 		       "WARM TARGET RESET SUCCEEDED.\n");
9367 	return SUCCESS;
9368 }
9369 
9370 /**
9371  * qla4xxx_is_eh_active - check if error handler is running
9372  * @shost: Pointer to SCSI Host struct
9373  *
9374  * This routine finds that if reset host is called in EH
9375  * scenario or from some application like sg_reset
9376  **/
9377 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
9378 {
9379 	if (shost->shost_state == SHOST_RECOVERY)
9380 		return 1;
9381 	return 0;
9382 }
9383 
9384 /**
9385  * qla4xxx_eh_host_reset - kernel callback
9386  * @cmd: Pointer to Linux's SCSI command structure
9387  *
9388  * This routine is invoked by the Linux kernel to perform fatal error
9389  * recovery on the specified adapter.
9390  **/
9391 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
9392 {
9393 	int return_status = FAILED;
9394 	struct scsi_qla_host *ha;
9395 
9396 	ha = to_qla_host(cmd->device->host);
9397 
9398 	if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
9399 		qla4_83xx_set_idc_dontreset(ha);
9400 
9401 	/*
9402 	 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
9403 	 * protocol drivers, we should not set device_state to NEED_RESET
9404 	 */
9405 	if (ql4xdontresethba ||
9406 	    ((is_qla8032(ha) || is_qla8042(ha)) &&
9407 	     qla4_83xx_idc_dontreset(ha))) {
9408 		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
9409 		     ha->host_no, __func__));
9410 
9411 		/* Clear outstanding srb in queues */
9412 		if (qla4xxx_is_eh_active(cmd->device->host))
9413 			qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
9414 
9415 		return FAILED;
9416 	}
9417 
9418 	ql4_printk(KERN_INFO, ha,
9419 		   "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no,
9420 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
9421 
9422 	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
9423 		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
9424 			      "DEAD.\n", ha->host_no, cmd->device->channel,
9425 			      __func__));
9426 
9427 		return FAILED;
9428 	}
9429 
9430 	if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9431 		if (is_qla80XX(ha))
9432 			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
9433 		else
9434 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
9435 	}
9436 
9437 	if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
9438 		return_status = SUCCESS;
9439 
9440 	ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
9441 		   return_status == FAILED ? "FAILED" : "SUCCEEDED");
9442 
9443 	return return_status;
9444 }
9445 
9446 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
9447 {
9448 	uint32_t mbox_cmd[MBOX_REG_COUNT];
9449 	uint32_t mbox_sts[MBOX_REG_COUNT];
9450 	struct addr_ctrl_blk_def *acb = NULL;
9451 	uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
9452 	int rval = QLA_SUCCESS;
9453 	dma_addr_t acb_dma;
9454 
9455 	acb = dma_alloc_coherent(&ha->pdev->dev,
9456 				 sizeof(struct addr_ctrl_blk_def),
9457 				 &acb_dma, GFP_KERNEL);
9458 	if (!acb) {
9459 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
9460 			   __func__);
9461 		rval = -ENOMEM;
9462 		goto exit_port_reset;
9463 	}
9464 
9465 	memset(acb, 0, acb_len);
9466 
9467 	rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
9468 	if (rval != QLA_SUCCESS) {
9469 		rval = -EIO;
9470 		goto exit_free_acb;
9471 	}
9472 
9473 	rval = qla4xxx_disable_acb(ha);
9474 	if (rval != QLA_SUCCESS) {
9475 		rval = -EIO;
9476 		goto exit_free_acb;
9477 	}
9478 
9479 	wait_for_completion_timeout(&ha->disable_acb_comp,
9480 				    DISABLE_ACB_TOV * HZ);
9481 
9482 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
9483 	if (rval != QLA_SUCCESS) {
9484 		rval = -EIO;
9485 		goto exit_free_acb;
9486 	}
9487 
9488 exit_free_acb:
9489 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
9490 			  acb, acb_dma);
9491 exit_port_reset:
9492 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
9493 			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
9494 	return rval;
9495 }
9496 
9497 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
9498 {
9499 	struct scsi_qla_host *ha = to_qla_host(shost);
9500 	int rval = QLA_SUCCESS;
9501 	uint32_t idc_ctrl;
9502 
9503 	if (ql4xdontresethba) {
9504 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
9505 				  __func__));
9506 		rval = -EPERM;
9507 		goto exit_host_reset;
9508 	}
9509 
9510 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
9511 		goto recover_adapter;
9512 
9513 	switch (reset_type) {
9514 	case SCSI_ADAPTER_RESET:
9515 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
9516 		break;
9517 	case SCSI_FIRMWARE_RESET:
9518 		if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9519 			if (is_qla80XX(ha))
9520 				/* set firmware context reset */
9521 				set_bit(DPC_RESET_HA_FW_CONTEXT,
9522 					&ha->dpc_flags);
9523 			else {
9524 				rval = qla4xxx_context_reset(ha);
9525 				goto exit_host_reset;
9526 			}
9527 		}
9528 		break;
9529 	}
9530 
9531 recover_adapter:
9532 	/* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
9533 	 * reset is issued by application */
9534 	if ((is_qla8032(ha) || is_qla8042(ha)) &&
9535 	    test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9536 		idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
9537 		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
9538 				 (idc_ctrl | GRACEFUL_RESET_BIT1));
9539 	}
9540 
9541 	rval = qla4xxx_recover_adapter(ha);
9542 	if (rval != QLA_SUCCESS) {
9543 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
9544 				  __func__));
9545 		rval = -EIO;
9546 	}
9547 
9548 exit_host_reset:
9549 	return rval;
9550 }
9551 
9552 /* PCI AER driver recovers from all correctable errors w/o
9553  * driver intervention. For uncorrectable errors PCI AER
9554  * driver calls the following device driver's callbacks
9555  *
9556  * - Fatal Errors - link_reset
9557  * - Non-Fatal Errors - driver's pci_error_detected() which
9558  * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
9559  *
9560  * PCI AER driver calls
9561  * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
9562  *               returns RECOVERED or NEED_RESET if fw_hung
9563  * NEED_RESET - driver's slot_reset()
9564  * DISCONNECT - device is dead & cannot recover
9565  * RECOVERED - driver's pci_resume()
9566  */
9567 static pci_ers_result_t
9568 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9569 {
9570 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9571 
9572 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
9573 	    ha->host_no, __func__, state);
9574 
9575 	if (!is_aer_supported(ha))
9576 		return PCI_ERS_RESULT_NONE;
9577 
9578 	switch (state) {
9579 	case pci_channel_io_normal:
9580 		clear_bit(AF_EEH_BUSY, &ha->flags);
9581 		return PCI_ERS_RESULT_CAN_RECOVER;
9582 	case pci_channel_io_frozen:
9583 		set_bit(AF_EEH_BUSY, &ha->flags);
9584 		qla4xxx_mailbox_premature_completion(ha);
9585 		qla4xxx_free_irqs(ha);
9586 		pci_disable_device(pdev);
9587 		/* Return back all IOs */
9588 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
9589 		return PCI_ERS_RESULT_NEED_RESET;
9590 	case pci_channel_io_perm_failure:
9591 		set_bit(AF_EEH_BUSY, &ha->flags);
9592 		set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
9593 		qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
9594 		return PCI_ERS_RESULT_DISCONNECT;
9595 	}
9596 	return PCI_ERS_RESULT_NEED_RESET;
9597 }
9598 
9599 /**
9600  * qla4xxx_pci_mmio_enabled() gets called if
9601  * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
9602  * and read/write to the device still works.
9603  **/
9604 static pci_ers_result_t
9605 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
9606 {
9607 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9608 
9609 	if (!is_aer_supported(ha))
9610 		return PCI_ERS_RESULT_NONE;
9611 
9612 	return PCI_ERS_RESULT_RECOVERED;
9613 }
9614 
9615 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
9616 {
9617 	uint32_t rval = QLA_ERROR;
9618 	int fn;
9619 	struct pci_dev *other_pdev = NULL;
9620 
9621 	ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
9622 
9623 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
9624 
9625 	if (test_bit(AF_ONLINE, &ha->flags)) {
9626 		clear_bit(AF_ONLINE, &ha->flags);
9627 		clear_bit(AF_LINK_UP, &ha->flags);
9628 		iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
9629 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
9630 	}
9631 
9632 	fn = PCI_FUNC(ha->pdev->devfn);
9633 	if (is_qla8022(ha)) {
9634 		while (fn > 0) {
9635 			fn--;
9636 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
9637 				   ha->host_no, __func__, fn);
9638 			/* Get the pci device given the domain, bus,
9639 			 * slot/function number */
9640 			other_pdev = pci_get_domain_bus_and_slot(
9641 					   pci_domain_nr(ha->pdev->bus),
9642 					   ha->pdev->bus->number,
9643 					   PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
9644 					   fn));
9645 
9646 			if (!other_pdev)
9647 				continue;
9648 
9649 			if (atomic_read(&other_pdev->enable_cnt)) {
9650 				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
9651 					   ha->host_no, __func__, fn);
9652 				pci_dev_put(other_pdev);
9653 				break;
9654 			}
9655 			pci_dev_put(other_pdev);
9656 		}
9657 	} else {
9658 		/* this case is meant for ISP83xx/ISP84xx only */
9659 		if (qla4_83xx_can_perform_reset(ha)) {
9660 			/* reset fn as iSCSI is going to perform the reset */
9661 			fn = 0;
9662 		}
9663 	}
9664 
9665 	/* The first function on the card, the reset owner will
9666 	 * start & initialize the firmware. The other functions
9667 	 * on the card will reset the firmware context
9668 	 */
9669 	if (!fn) {
9670 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
9671 		    "0x%x is the owner\n", ha->host_no, __func__,
9672 		    ha->pdev->devfn);
9673 
9674 		ha->isp_ops->idc_lock(ha);
9675 		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9676 				    QLA8XXX_DEV_COLD);
9677 		ha->isp_ops->idc_unlock(ha);
9678 
9679 		rval = qla4_8xxx_update_idc_reg(ha);
9680 		if (rval == QLA_ERROR) {
9681 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
9682 				   ha->host_no, __func__);
9683 			ha->isp_ops->idc_lock(ha);
9684 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9685 					    QLA8XXX_DEV_FAILED);
9686 			ha->isp_ops->idc_unlock(ha);
9687 			goto exit_error_recovery;
9688 		}
9689 
9690 		clear_bit(AF_FW_RECOVERY, &ha->flags);
9691 		rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
9692 
9693 		if (rval != QLA_SUCCESS) {
9694 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
9695 			    "FAILED\n", ha->host_no, __func__);
9696 			qla4xxx_free_irqs(ha);
9697 			ha->isp_ops->idc_lock(ha);
9698 			qla4_8xxx_clear_drv_active(ha);
9699 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9700 					    QLA8XXX_DEV_FAILED);
9701 			ha->isp_ops->idc_unlock(ha);
9702 		} else {
9703 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
9704 			    "READY\n", ha->host_no, __func__);
9705 			ha->isp_ops->idc_lock(ha);
9706 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9707 					    QLA8XXX_DEV_READY);
9708 			/* Clear driver state register */
9709 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
9710 			qla4_8xxx_set_drv_active(ha);
9711 			ha->isp_ops->idc_unlock(ha);
9712 			ha->isp_ops->enable_intrs(ha);
9713 		}
9714 	} else {
9715 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
9716 		    "the reset owner\n", ha->host_no, __func__,
9717 		    ha->pdev->devfn);
9718 		if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
9719 		     QLA8XXX_DEV_READY)) {
9720 			clear_bit(AF_FW_RECOVERY, &ha->flags);
9721 			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
9722 			if (rval == QLA_SUCCESS)
9723 				ha->isp_ops->enable_intrs(ha);
9724 			else
9725 				qla4xxx_free_irqs(ha);
9726 
9727 			ha->isp_ops->idc_lock(ha);
9728 			qla4_8xxx_set_drv_active(ha);
9729 			ha->isp_ops->idc_unlock(ha);
9730 		}
9731 	}
9732 exit_error_recovery:
9733 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
9734 	return rval;
9735 }
9736 
9737 static pci_ers_result_t
9738 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
9739 {
9740 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
9741 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9742 	int rc;
9743 
9744 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
9745 	    ha->host_no, __func__);
9746 
9747 	if (!is_aer_supported(ha))
9748 		return PCI_ERS_RESULT_NONE;
9749 
9750 	/* Restore the saved state of PCIe device -
9751 	 * BAR registers, PCI Config space, PCIX, MSI,
9752 	 * IOV states
9753 	 */
9754 	pci_restore_state(pdev);
9755 
9756 	/* pci_restore_state() clears the saved_state flag of the device
9757 	 * save restored state which resets saved_state flag
9758 	 */
9759 	pci_save_state(pdev);
9760 
9761 	/* Initialize device or resume if in suspended state */
9762 	rc = pci_enable_device(pdev);
9763 	if (rc) {
9764 		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
9765 		    "device after reset\n", ha->host_no, __func__);
9766 		goto exit_slot_reset;
9767 	}
9768 
9769 	ha->isp_ops->disable_intrs(ha);
9770 
9771 	if (is_qla80XX(ha)) {
9772 		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
9773 			ret = PCI_ERS_RESULT_RECOVERED;
9774 			goto exit_slot_reset;
9775 		} else
9776 			goto exit_slot_reset;
9777 	}
9778 
9779 exit_slot_reset:
9780 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
9781 	    "device after reset\n", ha->host_no, __func__, ret);
9782 	return ret;
9783 }
9784 
9785 static void
9786 qla4xxx_pci_resume(struct pci_dev *pdev)
9787 {
9788 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9789 	int ret;
9790 
9791 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
9792 	    ha->host_no, __func__);
9793 
9794 	ret = qla4xxx_wait_for_hba_online(ha);
9795 	if (ret != QLA_SUCCESS) {
9796 		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
9797 		    "resume I/O from slot/link_reset\n", ha->host_no,
9798 		     __func__);
9799 	}
9800 
9801 	pci_cleanup_aer_uncorrect_error_status(pdev);
9802 	clear_bit(AF_EEH_BUSY, &ha->flags);
9803 }
9804 
9805 static const struct pci_error_handlers qla4xxx_err_handler = {
9806 	.error_detected = qla4xxx_pci_error_detected,
9807 	.mmio_enabled = qla4xxx_pci_mmio_enabled,
9808 	.slot_reset = qla4xxx_pci_slot_reset,
9809 	.resume = qla4xxx_pci_resume,
9810 };
9811 
9812 static struct pci_device_id qla4xxx_pci_tbl[] = {
9813 	{
9814 		.vendor		= PCI_VENDOR_ID_QLOGIC,
9815 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
9816 		.subvendor	= PCI_ANY_ID,
9817 		.subdevice	= PCI_ANY_ID,
9818 	},
9819 	{
9820 		.vendor		= PCI_VENDOR_ID_QLOGIC,
9821 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
9822 		.subvendor	= PCI_ANY_ID,
9823 		.subdevice	= PCI_ANY_ID,
9824 	},
9825 	{
9826 		.vendor		= PCI_VENDOR_ID_QLOGIC,
9827 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
9828 		.subvendor	= PCI_ANY_ID,
9829 		.subdevice	= PCI_ANY_ID,
9830 	},
9831 	{
9832 		.vendor         = PCI_VENDOR_ID_QLOGIC,
9833 		.device         = PCI_DEVICE_ID_QLOGIC_ISP8022,
9834 		.subvendor      = PCI_ANY_ID,
9835 		.subdevice      = PCI_ANY_ID,
9836 	},
9837 	{
9838 		.vendor		= PCI_VENDOR_ID_QLOGIC,
9839 		.device		= PCI_DEVICE_ID_QLOGIC_ISP8324,
9840 		.subvendor	= PCI_ANY_ID,
9841 		.subdevice	= PCI_ANY_ID,
9842 	},
9843 	{
9844 		.vendor		= PCI_VENDOR_ID_QLOGIC,
9845 		.device		= PCI_DEVICE_ID_QLOGIC_ISP8042,
9846 		.subvendor	= PCI_ANY_ID,
9847 		.subdevice	= PCI_ANY_ID,
9848 	},
9849 	{0, 0},
9850 };
9851 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
9852 
9853 static struct pci_driver qla4xxx_pci_driver = {
9854 	.name		= DRIVER_NAME,
9855 	.id_table	= qla4xxx_pci_tbl,
9856 	.probe		= qla4xxx_probe_adapter,
9857 	.remove		= qla4xxx_remove_adapter,
9858 	.err_handler = &qla4xxx_err_handler,
9859 };
9860 
9861 static int __init qla4xxx_module_init(void)
9862 {
9863 	int ret;
9864 
9865 	if (ql4xqfulltracking)
9866 		qla4xxx_driver_template.track_queue_depth = 1;
9867 
9868 	/* Allocate cache for SRBs. */
9869 	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
9870 				       SLAB_HWCACHE_ALIGN, NULL);
9871 	if (srb_cachep == NULL) {
9872 		printk(KERN_ERR
9873 		       "%s: Unable to allocate SRB cache..."
9874 		       "Failing load!\n", DRIVER_NAME);
9875 		ret = -ENOMEM;
9876 		goto no_srp_cache;
9877 	}
9878 
9879 	/* Derive version string. */
9880 	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
9881 	if (ql4xextended_error_logging)
9882 		strcat(qla4xxx_version_str, "-debug");
9883 
9884 	qla4xxx_scsi_transport =
9885 		iscsi_register_transport(&qla4xxx_iscsi_transport);
9886 	if (!qla4xxx_scsi_transport){
9887 		ret = -ENODEV;
9888 		goto release_srb_cache;
9889 	}
9890 
9891 	ret = pci_register_driver(&qla4xxx_pci_driver);
9892 	if (ret)
9893 		goto unregister_transport;
9894 
9895 	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
9896 	return 0;
9897 
9898 unregister_transport:
9899 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
9900 release_srb_cache:
9901 	kmem_cache_destroy(srb_cachep);
9902 no_srp_cache:
9903 	return ret;
9904 }
9905 
9906 static void __exit qla4xxx_module_exit(void)
9907 {
9908 	pci_unregister_driver(&qla4xxx_pci_driver);
9909 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
9910 	kmem_cache_destroy(srb_cachep);
9911 }
9912 
9913 module_init(qla4xxx_module_init);
9914 module_exit(qla4xxx_module_exit);
9915 
9916 MODULE_AUTHOR("QLogic Corporation");
9917 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
9918 MODULE_LICENSE("GPL");
9919 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
9920