xref: /linux/drivers/scsi/qla2xxx/qla_init.c (revision 507e190946297c34a27d9366b0661d5e506fdd03)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9 
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #include "qla_devtbl.h"
15 
16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h>
18 #endif
19 
20 #include <target/target_core_base.h>
21 #include "qla_target.h"
22 
23 /*
24 *  QLogic ISP2x00 Hardware Support Function Prototypes.
25 */
26 static int qla2x00_isp_firmware(scsi_qla_host_t *);
27 static int qla2x00_setup_chip(scsi_qla_host_t *);
28 static int qla2x00_fw_ready(scsi_qla_host_t *);
29 static int qla2x00_configure_hba(scsi_qla_host_t *);
30 static int qla2x00_configure_loop(scsi_qla_host_t *);
31 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
32 static int qla2x00_configure_fabric(scsi_qla_host_t *);
33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
34 static int qla2x00_restart_isp(scsi_qla_host_t *);
35 
36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37 static int qla84xx_init_chip(scsi_qla_host_t *);
38 static int qla25xx_init_queues(struct qla_hw_data *);
39 static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
40 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
41 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
42     struct event_arg *);
43 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
44     struct event_arg *);
45 
46 /* SRB Extensions ---------------------------------------------------------- */
47 
48 void
49 qla2x00_sp_timeout(unsigned long __data)
50 {
51 	srb_t *sp = (srb_t *)__data;
52 	struct srb_iocb *iocb;
53 	scsi_qla_host_t *vha = sp->vha;
54 	struct req_que *req;
55 	unsigned long flags;
56 
57 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
58 	req = vha->hw->req_q_map[0];
59 	req->outstanding_cmds[sp->handle] = NULL;
60 	iocb = &sp->u.iocb_cmd;
61 	iocb->timeout(sp);
62 	sp->free(sp);
63 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
64 }
65 
66 void
67 qla2x00_sp_free(void *ptr)
68 {
69 	srb_t *sp = ptr;
70 	struct srb_iocb *iocb = &sp->u.iocb_cmd;
71 
72 	del_timer(&iocb->timer);
73 	qla2x00_rel_sp(sp);
74 }
75 
76 /* Asynchronous Login/Logout Routines -------------------------------------- */
77 
78 unsigned long
79 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
80 {
81 	unsigned long tmo;
82 	struct qla_hw_data *ha = vha->hw;
83 
84 	/* Firmware should use switch negotiated r_a_tov for timeout. */
85 	tmo = ha->r_a_tov / 10 * 2;
86 	if (IS_QLAFX00(ha)) {
87 		tmo = FX00_DEF_RATOV * 2;
88 	} else if (!IS_FWI2_CAPABLE(ha)) {
89 		/*
90 		 * Except for earlier ISPs where the timeout is seeded from the
91 		 * initialization control block.
92 		 */
93 		tmo = ha->login_timeout;
94 	}
95 	return tmo;
96 }
97 
98 void
99 qla2x00_async_iocb_timeout(void *data)
100 {
101 	srb_t *sp = data;
102 	fc_port_t *fcport = sp->fcport;
103 	struct srb_iocb *lio = &sp->u.iocb_cmd;
104 	struct event_arg ea;
105 
106 	ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
107 	    "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
108 	    sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
109 
110 	fcport->flags &= ~FCF_ASYNC_SENT;
111 
112 	switch (sp->type) {
113 	case SRB_LOGIN_CMD:
114 		/* Retry as needed. */
115 		lio->u.logio.data[0] = MBS_COMMAND_ERROR;
116 		lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
117 			QLA_LOGIO_LOGIN_RETRIED : 0;
118 		memset(&ea, 0, sizeof(ea));
119 		ea.event = FCME_PLOGI_DONE;
120 		ea.fcport = sp->fcport;
121 		ea.data[0] = lio->u.logio.data[0];
122 		ea.data[1] = lio->u.logio.data[1];
123 		ea.sp = sp;
124 		qla24xx_handle_plogi_done_event(fcport->vha, &ea);
125 		break;
126 	case SRB_LOGOUT_CMD:
127 		qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
128 		break;
129 	case SRB_CT_PTHRU_CMD:
130 	case SRB_MB_IOCB:
131 	case SRB_NACK_PLOGI:
132 	case SRB_NACK_PRLI:
133 	case SRB_NACK_LOGO:
134 		sp->done(sp, QLA_FUNCTION_TIMEOUT);
135 		break;
136 	}
137 }
138 
139 static void
140 qla2x00_async_login_sp_done(void *ptr, int res)
141 {
142 	srb_t *sp = ptr;
143 	struct scsi_qla_host *vha = sp->vha;
144 	struct srb_iocb *lio = &sp->u.iocb_cmd;
145 	struct event_arg ea;
146 
147 	ql_dbg(ql_dbg_disc, vha, 0x20dd,
148 	    "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
149 
150 	sp->fcport->flags &= ~FCF_ASYNC_SENT;
151 	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
152 		memset(&ea, 0, sizeof(ea));
153 		ea.event = FCME_PLOGI_DONE;
154 		ea.fcport = sp->fcport;
155 		ea.data[0] = lio->u.logio.data[0];
156 		ea.data[1] = lio->u.logio.data[1];
157 		ea.iop[0] = lio->u.logio.iop[0];
158 		ea.iop[1] = lio->u.logio.iop[1];
159 		ea.sp = sp;
160 		qla2x00_fcport_event_handler(vha, &ea);
161 	}
162 
163 	sp->free(sp);
164 }
165 
166 int
167 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
168     uint16_t *data)
169 {
170 	srb_t *sp;
171 	struct srb_iocb *lio;
172 	int rval = QLA_FUNCTION_FAILED;
173 
174 	if (!vha->flags.online)
175 		goto done;
176 
177 	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
178 	    (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
179 	    (fcport->fw_login_state == DSC_LS_PRLI_PEND))
180 		goto done;
181 
182 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
183 	if (!sp)
184 		goto done;
185 
186 	fcport->flags |= FCF_ASYNC_SENT;
187 	fcport->logout_completed = 0;
188 
189 	sp->type = SRB_LOGIN_CMD;
190 	sp->name = "login";
191 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
192 
193 	lio = &sp->u.iocb_cmd;
194 	lio->timeout = qla2x00_async_iocb_timeout;
195 	sp->done = qla2x00_async_login_sp_done;
196 	lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
197 
198 	if (fcport->fc4f_nvme)
199 		lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
200 
201 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
202 		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
203 	rval = qla2x00_start_sp(sp);
204 	if (rval != QLA_SUCCESS) {
205 		fcport->flags &= ~FCF_ASYNC_SENT;
206 		fcport->flags |= FCF_LOGIN_NEEDED;
207 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
208 		goto done_free_sp;
209 	}
210 
211 	ql_dbg(ql_dbg_disc, vha, 0x2072,
212 	    "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
213 		"retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
214 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
215 	    fcport->login_retry);
216 	return rval;
217 
218 done_free_sp:
219 	sp->free(sp);
220 done:
221 	fcport->flags &= ~FCF_ASYNC_SENT;
222 	return rval;
223 }
224 
225 static void
226 qla2x00_async_logout_sp_done(void *ptr, int res)
227 {
228 	srb_t *sp = ptr;
229 	struct srb_iocb *lio = &sp->u.iocb_cmd;
230 
231 	sp->fcport->flags &= ~FCF_ASYNC_SENT;
232 	if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
233 		qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
234 		    lio->u.logio.data);
235 	sp->free(sp);
236 }
237 
238 int
239 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
240 {
241 	srb_t *sp;
242 	struct srb_iocb *lio;
243 	int rval;
244 
245 	rval = QLA_FUNCTION_FAILED;
246 	fcport->flags |= FCF_ASYNC_SENT;
247 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
248 	if (!sp)
249 		goto done;
250 
251 	sp->type = SRB_LOGOUT_CMD;
252 	sp->name = "logout";
253 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
254 
255 	lio = &sp->u.iocb_cmd;
256 	lio->timeout = qla2x00_async_iocb_timeout;
257 	sp->done = qla2x00_async_logout_sp_done;
258 	rval = qla2x00_start_sp(sp);
259 	if (rval != QLA_SUCCESS)
260 		goto done_free_sp;
261 
262 	ql_dbg(ql_dbg_disc, vha, 0x2070,
263 	    "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
264 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
265 		fcport->d_id.b.area, fcport->d_id.b.al_pa,
266 		fcport->port_name);
267 	return rval;
268 
269 done_free_sp:
270 	sp->free(sp);
271 done:
272 	fcport->flags &= ~FCF_ASYNC_SENT;
273 	return rval;
274 }
275 
276 static void
277 qla2x00_async_adisc_sp_done(void *ptr, int res)
278 {
279 	srb_t *sp = ptr;
280 	struct scsi_qla_host *vha = sp->vha;
281 	struct srb_iocb *lio = &sp->u.iocb_cmd;
282 
283 	if (!test_bit(UNLOADING, &vha->dpc_flags))
284 		qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
285 		    lio->u.logio.data);
286 	sp->free(sp);
287 }
288 
289 int
290 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
291     uint16_t *data)
292 {
293 	srb_t *sp;
294 	struct srb_iocb *lio;
295 	int rval;
296 
297 	rval = QLA_FUNCTION_FAILED;
298 	fcport->flags |= FCF_ASYNC_SENT;
299 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
300 	if (!sp)
301 		goto done;
302 
303 	sp->type = SRB_ADISC_CMD;
304 	sp->name = "adisc";
305 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
306 
307 	lio = &sp->u.iocb_cmd;
308 	lio->timeout = qla2x00_async_iocb_timeout;
309 	sp->done = qla2x00_async_adisc_sp_done;
310 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
311 		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
312 	rval = qla2x00_start_sp(sp);
313 	if (rval != QLA_SUCCESS)
314 		goto done_free_sp;
315 
316 	ql_dbg(ql_dbg_disc, vha, 0x206f,
317 	    "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
318 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
319 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
320 	return rval;
321 
322 done_free_sp:
323 	sp->free(sp);
324 done:
325 	fcport->flags &= ~FCF_ASYNC_SENT;
326 	return rval;
327 }
328 
329 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
330 	struct event_arg *ea)
331 {
332 	fc_port_t *fcport, *conflict_fcport;
333 	struct get_name_list_extended *e;
334 	u16 i, n, found = 0, loop_id;
335 	port_id_t id;
336 	u64 wwn;
337 	u8 opt = 0, current_login_state;
338 
339 	fcport = ea->fcport;
340 
341 	if (ea->rc) { /* rval */
342 		if (fcport->login_retry == 0) {
343 			fcport->login_retry = vha->hw->login_retry_count;
344 			ql_dbg(ql_dbg_disc, vha, 0x20de,
345 			    "GNL failed Port login retry %8phN, retry cnt=%d.\n",
346 			    fcport->port_name, fcport->login_retry);
347 		}
348 		return;
349 	}
350 
351 	if (fcport->last_rscn_gen != fcport->rscn_gen) {
352 		ql_dbg(ql_dbg_disc, vha, 0x20df,
353 		    "%s %8phC rscn gen changed rscn %d|%d \n",
354 		    __func__, fcport->port_name,
355 		    fcport->last_rscn_gen, fcport->rscn_gen);
356 		qla24xx_post_gidpn_work(vha, fcport);
357 		return;
358 	} else if (fcport->last_login_gen != fcport->login_gen) {
359 		ql_dbg(ql_dbg_disc, vha, 0x20e0,
360 		    "%s %8phC login gen changed login %d|%d\n",
361 		    __func__, fcport->port_name,
362 		    fcport->last_login_gen, fcport->login_gen);
363 		return;
364 	}
365 
366 	n = ea->data[0] / sizeof(struct get_name_list_extended);
367 
368 	ql_dbg(ql_dbg_disc, vha, 0x20e1,
369 	    "%s %d %8phC n %d %02x%02x%02x lid %d \n",
370 	    __func__, __LINE__, fcport->port_name, n,
371 	    fcport->d_id.b.domain, fcport->d_id.b.area,
372 	    fcport->d_id.b.al_pa, fcport->loop_id);
373 
374 	for (i = 0; i < n; i++) {
375 		e = &vha->gnl.l[i];
376 		wwn = wwn_to_u64(e->port_name);
377 
378 		if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
379 			continue;
380 
381 		found = 1;
382 		id.b.domain = e->port_id[2];
383 		id.b.area = e->port_id[1];
384 		id.b.al_pa = e->port_id[0];
385 		id.b.rsvd_1 = 0;
386 
387 		loop_id = le16_to_cpu(e->nport_handle);
388 		loop_id = (loop_id & 0x7fff);
389 
390 		ql_dbg(ql_dbg_disc, vha, 0x20e2,
391 		    "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
392 		    __func__, fcport->port_name,
393 		    e->current_login_state, fcport->fw_login_state,
394 		    id.b.domain, id.b.area, id.b.al_pa,
395 		    fcport->d_id.b.domain, fcport->d_id.b.area,
396 		    fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
397 
398 		if ((id.b24 != fcport->d_id.b24) ||
399 		    ((fcport->loop_id != FC_NO_LOOP_ID) &&
400 			(fcport->loop_id != loop_id))) {
401 			ql_dbg(ql_dbg_disc, vha, 0x20e3,
402 			    "%s %d %8phC post del sess\n",
403 			    __func__, __LINE__, fcport->port_name);
404 			qlt_schedule_sess_for_deletion(fcport, 1);
405 			return;
406 		}
407 
408 		fcport->loop_id = loop_id;
409 
410 		wwn = wwn_to_u64(fcport->port_name);
411 		qlt_find_sess_invalidate_other(vha, wwn,
412 			id, loop_id, &conflict_fcport);
413 
414 		if (conflict_fcport) {
415 			/*
416 			 * Another share fcport share the same loop_id &
417 			 * nport id. Conflict fcport needs to finish
418 			 * cleanup before this fcport can proceed to login.
419 			 */
420 			conflict_fcport->conflict = fcport;
421 			fcport->login_pause = 1;
422 		}
423 
424 		if  (fcport->fc4f_nvme)
425 			current_login_state = e->current_login_state >> 4;
426 		else
427 			current_login_state = e->current_login_state & 0xf;
428 
429 		switch (current_login_state) {
430 		case DSC_LS_PRLI_COMP:
431 			ql_dbg(ql_dbg_disc, vha, 0x20e4,
432 			    "%s %d %8phC post gpdb\n",
433 			    __func__, __LINE__, fcport->port_name);
434 			opt = PDO_FORCE_ADISC;
435 			qla24xx_post_gpdb_work(vha, fcport, opt);
436 			break;
437 		case DSC_LS_PORT_UNAVAIL:
438 		default:
439 			if (fcport->loop_id == FC_NO_LOOP_ID) {
440 				qla2x00_find_new_loop_id(vha, fcport);
441 				fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
442 			}
443 			ql_dbg(ql_dbg_disc, vha, 0x20e5,
444 			    "%s %d %8phC\n",
445 			    __func__, __LINE__, fcport->port_name);
446 			qla24xx_fcport_handle_login(vha, fcport);
447 			break;
448 		}
449 	}
450 
451 	if (!found) {
452 		/* fw has no record of this port */
453 		if (fcport->loop_id == FC_NO_LOOP_ID) {
454 			qla2x00_find_new_loop_id(vha, fcport);
455 			fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
456 		} else {
457 			for (i = 0; i < n; i++) {
458 				e = &vha->gnl.l[i];
459 				id.b.domain = e->port_id[0];
460 				id.b.area = e->port_id[1];
461 				id.b.al_pa = e->port_id[2];
462 				id.b.rsvd_1 = 0;
463 				loop_id = le16_to_cpu(e->nport_handle);
464 
465 				if (fcport->d_id.b24 == id.b24) {
466 					conflict_fcport =
467 					    qla2x00_find_fcport_by_wwpn(vha,
468 						e->port_name, 0);
469 
470 					ql_dbg(ql_dbg_disc, vha, 0x20e6,
471 					    "%s %d %8phC post del sess\n",
472 					    __func__, __LINE__,
473 					    conflict_fcport->port_name);
474 					qlt_schedule_sess_for_deletion
475 						(conflict_fcport, 1);
476 				}
477 
478 				if (fcport->loop_id == loop_id) {
479 					/* FW already picked this loop id for another fcport */
480 					qla2x00_find_new_loop_id(vha, fcport);
481 				}
482 			}
483 		}
484 		qla24xx_fcport_handle_login(vha, fcport);
485 	}
486 } /* gnl_event */
487 
488 static void
489 qla24xx_async_gnl_sp_done(void *s, int res)
490 {
491 	struct srb *sp = s;
492 	struct scsi_qla_host *vha = sp->vha;
493 	unsigned long flags;
494 	struct fc_port *fcport = NULL, *tf;
495 	u16 i, n = 0, loop_id;
496 	struct event_arg ea;
497 	struct get_name_list_extended *e;
498 	u64 wwn;
499 	struct list_head h;
500 
501 	ql_dbg(ql_dbg_disc, vha, 0x20e7,
502 	    "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
503 	    sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
504 	    sp->u.iocb_cmd.u.mbx.in_mb[2]);
505 
506 	memset(&ea, 0, sizeof(ea));
507 	ea.sp = sp;
508 	ea.rc = res;
509 	ea.event = FCME_GNL_DONE;
510 
511 	if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
512 	    sizeof(struct get_name_list_extended)) {
513 		n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
514 		    sizeof(struct get_name_list_extended);
515 		ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
516 	}
517 
518 	for (i = 0; i < n; i++) {
519 		e = &vha->gnl.l[i];
520 		loop_id = le16_to_cpu(e->nport_handle);
521 		/* mask out reserve bit */
522 		loop_id = (loop_id & 0x7fff);
523 		set_bit(loop_id, vha->hw->loop_id_map);
524 		wwn = wwn_to_u64(e->port_name);
525 
526 		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
527 		    "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
528 		    __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
529 		    e->port_id[0], e->current_login_state, e->last_login_state,
530 		    (loop_id & 0x7fff));
531 	}
532 
533 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
534 	vha->gnl.sent = 0;
535 
536 	INIT_LIST_HEAD(&h);
537 	fcport = tf = NULL;
538 	if (!list_empty(&vha->gnl.fcports))
539 		list_splice_init(&vha->gnl.fcports, &h);
540 
541 	list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
542 		list_del_init(&fcport->gnl_entry);
543 		fcport->flags &= ~FCF_ASYNC_SENT;
544 		ea.fcport = fcport;
545 
546 		qla2x00_fcport_event_handler(vha, &ea);
547 	}
548 
549 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
550 
551 	sp->free(sp);
552 }
553 
554 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
555 {
556 	srb_t *sp;
557 	struct srb_iocb *mbx;
558 	int rval = QLA_FUNCTION_FAILED;
559 	unsigned long flags;
560 	u16 *mb;
561 
562 	if (!vha->flags.online)
563 		goto done;
564 
565 	ql_dbg(ql_dbg_disc, vha, 0x20d9,
566 	    "Async-gnlist WWPN %8phC \n", fcport->port_name);
567 
568 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
569 	fcport->flags |= FCF_ASYNC_SENT;
570 	fcport->disc_state = DSC_GNL;
571 	fcport->last_rscn_gen = fcport->rscn_gen;
572 	fcport->last_login_gen = fcport->login_gen;
573 
574 	list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
575 	if (vha->gnl.sent) {
576 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
577 		rval = QLA_SUCCESS;
578 		goto done;
579 	}
580 	vha->gnl.sent = 1;
581 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
582 
583 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
584 	if (!sp)
585 		goto done;
586 	sp->type = SRB_MB_IOCB;
587 	sp->name = "gnlist";
588 	sp->gen1 = fcport->rscn_gen;
589 	sp->gen2 = fcport->login_gen;
590 
591 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
592 
593 	mb = sp->u.iocb_cmd.u.mbx.out_mb;
594 	mb[0] = MBC_PORT_NODE_NAME_LIST;
595 	mb[1] = BIT_2 | BIT_3;
596 	mb[2] = MSW(vha->gnl.ldma);
597 	mb[3] = LSW(vha->gnl.ldma);
598 	mb[6] = MSW(MSD(vha->gnl.ldma));
599 	mb[7] = LSW(MSD(vha->gnl.ldma));
600 	mb[8] = vha->gnl.size;
601 	mb[9] = vha->vp_idx;
602 
603 	mbx = &sp->u.iocb_cmd;
604 	mbx->timeout = qla2x00_async_iocb_timeout;
605 
606 	sp->done = qla24xx_async_gnl_sp_done;
607 
608 	rval = qla2x00_start_sp(sp);
609 	if (rval != QLA_SUCCESS)
610 		goto done_free_sp;
611 
612 	ql_dbg(ql_dbg_disc, vha, 0x20da,
613 	    "Async-%s - OUT WWPN %8phC hndl %x\n",
614 	    sp->name, fcport->port_name, sp->handle);
615 
616 	return rval;
617 
618 done_free_sp:
619 	sp->free(sp);
620 done:
621 	fcport->flags &= ~FCF_ASYNC_SENT;
622 	return rval;
623 }
624 
625 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
626 {
627 	struct qla_work_evt *e;
628 
629 	e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
630 	if (!e)
631 		return QLA_FUNCTION_FAILED;
632 
633 	e->u.fcport.fcport = fcport;
634 	return qla2x00_post_work(vha, e);
635 }
636 
637 static
638 void qla24xx_async_gpdb_sp_done(void *s, int res)
639 {
640 	struct srb *sp = s;
641 	struct scsi_qla_host *vha = sp->vha;
642 	struct qla_hw_data *ha = vha->hw;
643 	struct port_database_24xx *pd;
644 	fc_port_t *fcport = sp->fcport;
645 	u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
646 	int rval = QLA_SUCCESS;
647 	struct event_arg ea;
648 
649 	ql_dbg(ql_dbg_disc, vha, 0x20db,
650 	    "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
651 	    sp->name, res, fcport->port_name, mb[1], mb[2]);
652 
653 	fcport->flags &= ~FCF_ASYNC_SENT;
654 
655 	if (res) {
656 		rval = res;
657 		goto gpd_error_out;
658 	}
659 
660 	pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
661 
662 	rval = __qla24xx_parse_gpdb(vha, fcport, pd);
663 
664 gpd_error_out:
665 	memset(&ea, 0, sizeof(ea));
666 	ea.event = FCME_GPDB_DONE;
667 	ea.rc = rval;
668 	ea.fcport = fcport;
669 	ea.sp = sp;
670 
671 	qla2x00_fcport_event_handler(vha, &ea);
672 
673 	dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
674 		sp->u.iocb_cmd.u.mbx.in_dma);
675 
676 	sp->free(sp);
677 }
678 
679 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
680 {
681 	struct qla_work_evt *e;
682 
683 	e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
684 	if (!e)
685 		return QLA_FUNCTION_FAILED;
686 
687 	e->u.fcport.fcport = fcport;
688 
689 	return qla2x00_post_work(vha, e);
690 }
691 
692 static void
693 qla2x00_async_prli_sp_done(void *ptr, int res)
694 {
695 	srb_t *sp = ptr;
696 	struct scsi_qla_host *vha = sp->vha;
697 	struct srb_iocb *lio = &sp->u.iocb_cmd;
698 	struct event_arg ea;
699 
700 	ql_dbg(ql_dbg_disc, vha, 0x2129,
701 	    "%s %8phC res %d \n", __func__,
702 	    sp->fcport->port_name, res);
703 
704 	sp->fcport->flags &= ~FCF_ASYNC_SENT;
705 
706 	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
707 		memset(&ea, 0, sizeof(ea));
708 		ea.event = FCME_PRLI_DONE;
709 		ea.fcport = sp->fcport;
710 		ea.data[0] = lio->u.logio.data[0];
711 		ea.data[1] = lio->u.logio.data[1];
712 		ea.iop[0] = lio->u.logio.iop[0];
713 		ea.iop[1] = lio->u.logio.iop[1];
714 		ea.sp = sp;
715 
716 		qla2x00_fcport_event_handler(vha, &ea);
717 	}
718 
719 	sp->free(sp);
720 }
721 
722 int
723 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
724 {
725 	srb_t *sp;
726 	struct srb_iocb *lio;
727 	int rval = QLA_FUNCTION_FAILED;
728 
729 	if (!vha->flags.online)
730 		return rval;
731 
732 	if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
733 	    fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
734 	    fcport->fw_login_state == DSC_LS_PRLI_PEND)
735 		return rval;
736 
737 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
738 	if (!sp)
739 		return rval;
740 
741 	fcport->flags |= FCF_ASYNC_SENT;
742 	fcport->logout_completed = 0;
743 
744 	sp->type = SRB_PRLI_CMD;
745 	sp->name = "prli";
746 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
747 
748 	lio = &sp->u.iocb_cmd;
749 	lio->timeout = qla2x00_async_iocb_timeout;
750 	sp->done = qla2x00_async_prli_sp_done;
751 	lio->u.logio.flags = 0;
752 
753 	if  (fcport->fc4f_nvme)
754 		lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
755 
756 	rval = qla2x00_start_sp(sp);
757 	if (rval != QLA_SUCCESS) {
758 		fcport->flags &= ~FCF_ASYNC_SENT;
759 		fcport->flags |= FCF_LOGIN_NEEDED;
760 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
761 		goto done_free_sp;
762 	}
763 
764 	ql_dbg(ql_dbg_disc, vha, 0x211b,
765 	    "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
766 	    fcport->port_name, sp->handle, fcport->loop_id,
767 	    fcport->d_id.b24, fcport->login_retry);
768 
769 	return rval;
770 
771 done_free_sp:
772 	sp->free(sp);
773 	fcport->flags &= ~FCF_ASYNC_SENT;
774 	return rval;
775 }
776 
777 static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
778     u8 opt)
779 {
780 	struct qla_work_evt *e;
781 
782 	e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
783 	if (!e)
784 		return QLA_FUNCTION_FAILED;
785 
786 	e->u.fcport.fcport = fcport;
787 	e->u.fcport.opt = opt;
788 	return qla2x00_post_work(vha, e);
789 }
790 
791 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
792 {
793 	srb_t *sp;
794 	struct srb_iocb *mbx;
795 	int rval = QLA_FUNCTION_FAILED;
796 	u16 *mb;
797 	dma_addr_t pd_dma;
798 	struct port_database_24xx *pd;
799 	struct qla_hw_data *ha = vha->hw;
800 
801 	if (!vha->flags.online)
802 		goto done;
803 
804 	fcport->flags |= FCF_ASYNC_SENT;
805 	fcport->disc_state = DSC_GPDB;
806 
807 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
808 	if (!sp)
809 		goto done;
810 
811 	pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
812 	if (pd == NULL) {
813 		ql_log(ql_log_warn, vha, 0xd043,
814 		    "Failed to allocate port database structure.\n");
815 		goto done_free_sp;
816 	}
817 	memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
818 
819 	sp->type = SRB_MB_IOCB;
820 	sp->name = "gpdb";
821 	sp->gen1 = fcport->rscn_gen;
822 	sp->gen2 = fcport->login_gen;
823 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
824 
825 	mb = sp->u.iocb_cmd.u.mbx.out_mb;
826 	mb[0] = MBC_GET_PORT_DATABASE;
827 	mb[1] = fcport->loop_id;
828 	mb[2] = MSW(pd_dma);
829 	mb[3] = LSW(pd_dma);
830 	mb[6] = MSW(MSD(pd_dma));
831 	mb[7] = LSW(MSD(pd_dma));
832 	mb[9] = vha->vp_idx;
833 	mb[10] = opt;
834 
835 	mbx = &sp->u.iocb_cmd;
836 	mbx->timeout = qla2x00_async_iocb_timeout;
837 	mbx->u.mbx.in = (void *)pd;
838 	mbx->u.mbx.in_dma = pd_dma;
839 
840 	sp->done = qla24xx_async_gpdb_sp_done;
841 
842 	rval = qla2x00_start_sp(sp);
843 	if (rval != QLA_SUCCESS)
844 		goto done_free_sp;
845 
846 	ql_dbg(ql_dbg_disc, vha, 0x20dc,
847 	    "Async-%s %8phC hndl %x opt %x\n",
848 	    sp->name, fcport->port_name, sp->handle, opt);
849 
850 	return rval;
851 
852 done_free_sp:
853 	if (pd)
854 		dma_pool_free(ha->s_dma_pool, pd, pd_dma);
855 
856 	sp->free(sp);
857 done:
858 	fcport->flags &= ~FCF_ASYNC_SENT;
859 	qla24xx_post_gpdb_work(vha, fcport, opt);
860 	return rval;
861 }
862 
863 static
864 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
865 {
866 	int rval = ea->rc;
867 	fc_port_t *fcport = ea->fcport;
868 	unsigned long flags;
869 
870 	fcport->flags &= ~FCF_ASYNC_SENT;
871 
872 	ql_dbg(ql_dbg_disc, vha, 0x20d2,
873 	    "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
874 	    fcport->disc_state, fcport->fw_login_state, rval);
875 
876 	if (ea->sp->gen2 != fcport->login_gen) {
877 		/* target side must have changed it. */
878 		ql_dbg(ql_dbg_disc, vha, 0x20d3,
879 		    "%s %8phC generation changed rscn %d|%d login %d|%d \n",
880 		    __func__, fcport->port_name, fcport->last_rscn_gen,
881 		    fcport->rscn_gen, fcport->last_login_gen,
882 		    fcport->login_gen);
883 		return;
884 	} else if (ea->sp->gen1 != fcport->rscn_gen) {
885 		ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
886 		    __func__, __LINE__, fcport->port_name);
887 		qla24xx_post_gidpn_work(vha, fcport);
888 		return;
889 	}
890 
891 	if (rval != QLA_SUCCESS) {
892 		ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
893 		    __func__, __LINE__, fcport->port_name);
894 		qlt_schedule_sess_for_deletion_lock(fcport);
895 		return;
896 	}
897 
898 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
899 	ea->fcport->login_gen++;
900 	ea->fcport->deleted = 0;
901 	ea->fcport->logout_on_delete = 1;
902 
903 	if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
904 		vha->fcport_count++;
905 		ea->fcport->login_succ = 1;
906 
907 		if (!IS_IIDMA_CAPABLE(vha->hw) ||
908 		    !vha->hw->flags.gpsc_supported) {
909 			ql_dbg(ql_dbg_disc, vha, 0x20d6,
910 			    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
911 			    __func__, __LINE__, fcport->port_name,
912 			    vha->fcport_count);
913 
914 			qla24xx_post_upd_fcport_work(vha, fcport);
915 		} else {
916 			ql_dbg(ql_dbg_disc, vha, 0x20d7,
917 			    "%s %d %8phC post gpsc fcp_cnt %d\n",
918 			    __func__, __LINE__, fcport->port_name,
919 			    vha->fcport_count);
920 
921 			qla24xx_post_gpsc_work(vha, fcport);
922 		}
923 	}
924 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
925 } /* gpdb event */
926 
927 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
928 {
929 	if (fcport->login_retry == 0)
930 		return 0;
931 
932 	if (fcport->scan_state != QLA_FCPORT_FOUND)
933 		return 0;
934 
935 	ql_dbg(ql_dbg_disc, vha, 0x20d8,
936 	    "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
937 	    __func__, fcport->port_name, fcport->disc_state,
938 	    fcport->fw_login_state, fcport->login_pause, fcport->flags,
939 	    fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
940 	    fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
941 	    fcport->loop_id);
942 
943 	fcport->login_retry--;
944 
945 	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
946 	    (fcport->fw_login_state == DSC_LS_PRLI_PEND))
947 		return 0;
948 
949 	if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
950 		if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
951 			return 0;
952 	}
953 
954 	/* for pure Target Mode. Login will not be initiated */
955 	if (vha->host->active_mode == MODE_TARGET)
956 		return 0;
957 
958 	if (fcport->flags & FCF_ASYNC_SENT) {
959 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
960 		return 0;
961 	}
962 
963 	switch (fcport->disc_state) {
964 	case DSC_DELETED:
965 		if (fcport->loop_id == FC_NO_LOOP_ID) {
966 			ql_dbg(ql_dbg_disc, vha, 0x20bd,
967 			    "%s %d %8phC post gnl\n",
968 			    __func__, __LINE__, fcport->port_name);
969 			qla24xx_async_gnl(vha, fcport);
970 		} else {
971 			ql_dbg(ql_dbg_disc, vha, 0x20bf,
972 			    "%s %d %8phC post login\n",
973 			    __func__, __LINE__, fcport->port_name);
974 			fcport->disc_state = DSC_LOGIN_PEND;
975 			qla2x00_post_async_login_work(vha, fcport, NULL);
976 		}
977 		break;
978 
979 	case DSC_GNL:
980 		if (fcport->login_pause) {
981 			fcport->last_rscn_gen = fcport->rscn_gen;
982 			fcport->last_login_gen = fcport->login_gen;
983 			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
984 			break;
985 		}
986 
987 		if (fcport->flags & FCF_FCP2_DEVICE) {
988 			u8 opt = PDO_FORCE_ADISC;
989 
990 			ql_dbg(ql_dbg_disc, vha, 0x20c9,
991 			    "%s %d %8phC post gpdb\n",
992 			    __func__, __LINE__, fcport->port_name);
993 
994 			fcport->disc_state = DSC_GPDB;
995 			qla24xx_post_gpdb_work(vha, fcport, opt);
996 		} else {
997 			ql_dbg(ql_dbg_disc, vha, 0x20cf,
998 			    "%s %d %8phC post login\n",
999 			    __func__, __LINE__, fcport->port_name);
1000 			fcport->disc_state = DSC_LOGIN_PEND;
1001 			qla2x00_post_async_login_work(vha, fcport, NULL);
1002 		}
1003 
1004 		break;
1005 
1006 	case DSC_LOGIN_FAILED:
1007 		ql_dbg(ql_dbg_disc, vha, 0x20d0,
1008 		    "%s %d %8phC post gidpn\n",
1009 		    __func__, __LINE__, fcport->port_name);
1010 
1011 		qla24xx_post_gidpn_work(vha, fcport);
1012 		break;
1013 
1014 	case DSC_LOGIN_COMPLETE:
1015 		/* recheck login state */
1016 		ql_dbg(ql_dbg_disc, vha, 0x20d1,
1017 		    "%s %d %8phC post gpdb\n",
1018 		    __func__, __LINE__, fcport->port_name);
1019 
1020 		qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
1021 		break;
1022 
1023 	default:
1024 		break;
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 static
1031 void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
1032 {
1033 	fcport->rscn_gen++;
1034 
1035 	ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
1036 	    "%s %8phC DS %d LS %d\n",
1037 	    __func__, fcport->port_name, fcport->disc_state,
1038 	    fcport->fw_login_state);
1039 
1040 	if (fcport->flags & FCF_ASYNC_SENT)
1041 		return;
1042 
1043 	switch (fcport->disc_state) {
1044 	case DSC_DELETED:
1045 	case DSC_LOGIN_COMPLETE:
1046 		qla24xx_post_gidpn_work(fcport->vha, fcport);
1047 		break;
1048 
1049 	default:
1050 		break;
1051 	}
1052 }
1053 
1054 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1055 	u8 *port_name, void *pla)
1056 {
1057 	struct qla_work_evt *e;
1058 	e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1059 	if (!e)
1060 		return QLA_FUNCTION_FAILED;
1061 
1062 	e->u.new_sess.id = *id;
1063 	e->u.new_sess.pla = pla;
1064 	memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1065 
1066 	return qla2x00_post_work(vha, e);
1067 }
1068 
1069 static
1070 int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
1071 	struct event_arg *ea)
1072 {
1073 	fc_port_t *fcport = ea->fcport;
1074 
1075 	if (test_bit(UNLOADING, &vha->dpc_flags))
1076 		return 0;
1077 
1078 	switch (vha->host->active_mode) {
1079 	case MODE_INITIATOR:
1080 	case MODE_DUAL:
1081 		if (fcport->scan_state == QLA_FCPORT_FOUND)
1082 			qla24xx_fcport_handle_login(vha, fcport);
1083 		break;
1084 
1085 	case MODE_TARGET:
1086 	default:
1087 		/* no-op */
1088 		break;
1089 	}
1090 
1091 	return 0;
1092 }
1093 
1094 static
1095 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1096 	struct event_arg *ea)
1097 {
1098 	fc_port_t *fcport = ea->fcport;
1099 
1100 	if (fcport->scan_state != QLA_FCPORT_FOUND) {
1101 		fcport->login_retry++;
1102 		return;
1103 	}
1104 
1105 	ql_dbg(ql_dbg_disc, vha, 0x2102,
1106 	    "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1107 	    __func__, fcport->port_name, fcport->disc_state,
1108 	    fcport->fw_login_state, fcport->login_pause,
1109 	    fcport->deleted, fcport->conflict,
1110 	    fcport->last_rscn_gen, fcport->rscn_gen,
1111 	    fcport->last_login_gen, fcport->login_gen,
1112 	    fcport->flags);
1113 
1114 	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1115 	    (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1116 		return;
1117 
1118 	if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1119 		if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
1120 			return;
1121 	}
1122 
1123 	if (fcport->flags & FCF_ASYNC_SENT) {
1124 		fcport->login_retry++;
1125 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1126 		return;
1127 	}
1128 
1129 	if (fcport->disc_state == DSC_DELETE_PEND) {
1130 		fcport->login_retry++;
1131 		return;
1132 	}
1133 
1134 	if (fcport->last_rscn_gen != fcport->rscn_gen) {
1135 		ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
1136 		    __func__, __LINE__, fcport->port_name);
1137 
1138 		qla24xx_async_gidpn(vha, fcport);
1139 		return;
1140 	}
1141 
1142 	qla24xx_fcport_handle_login(vha, fcport);
1143 }
1144 
1145 void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1146 {
1147 	fc_port_t *fcport, *f, *tf;
1148 	uint32_t id = 0, mask, rid;
1149 	int rc;
1150 
1151 	switch (ea->event) {
1152 	case FCME_RELOGIN:
1153 	case FCME_RSCN:
1154 	case FCME_GIDPN_DONE:
1155 	case FCME_GPSC_DONE:
1156 	case FCME_GPNID_DONE:
1157 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
1158 		    test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1159 			return;
1160 		break;
1161 	default:
1162 		break;
1163 	}
1164 
1165 	switch (ea->event) {
1166 	case FCME_RELOGIN:
1167 		if (test_bit(UNLOADING, &vha->dpc_flags))
1168 			return;
1169 
1170 		qla24xx_handle_relogin_event(vha, ea);
1171 		break;
1172 	case FCME_RSCN:
1173 		if (test_bit(UNLOADING, &vha->dpc_flags))
1174 			return;
1175 		switch (ea->id.b.rsvd_1) {
1176 		case RSCN_PORT_ADDR:
1177 			fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1178 			if (!fcport) {
1179 				/* cable moved */
1180 				rc = qla24xx_post_gpnid_work(vha, &ea->id);
1181 				if (rc) {
1182 					ql_log(ql_log_warn, vha, 0xd044,
1183 					    "RSCN GPNID work failed %02x%02x%02x\n",
1184 					    ea->id.b.domain, ea->id.b.area,
1185 					    ea->id.b.al_pa);
1186 				}
1187 			} else {
1188 				ea->fcport = fcport;
1189 				qla24xx_handle_rscn_event(fcport, ea);
1190 			}
1191 			break;
1192 		case RSCN_AREA_ADDR:
1193 		case RSCN_DOM_ADDR:
1194 			if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
1195 				mask = 0xffff00;
1196 				ql_dbg(ql_dbg_async, vha, 0x5044,
1197 				    "RSCN: Area 0x%06x was affected\n",
1198 				    ea->id.b24);
1199 			} else {
1200 				mask = 0xff0000;
1201 				ql_dbg(ql_dbg_async, vha, 0x507a,
1202 				    "RSCN: Domain 0x%06x was affected\n",
1203 				    ea->id.b24);
1204 			}
1205 
1206 			rid = ea->id.b24 & mask;
1207 			list_for_each_entry_safe(f, tf, &vha->vp_fcports,
1208 			    list) {
1209 				id = f->d_id.b24 & mask;
1210 				if (rid == id) {
1211 					ea->fcport = f;
1212 					qla24xx_handle_rscn_event(f, ea);
1213 				}
1214 			}
1215 			break;
1216 		case RSCN_FAB_ADDR:
1217 		default:
1218 			ql_log(ql_log_warn, vha, 0xd045,
1219 			    "RSCN: Fabric was affected. Addr format %d\n",
1220 			    ea->id.b.rsvd_1);
1221 			qla2x00_mark_all_devices_lost(vha, 1);
1222 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1223 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1224 		}
1225 		break;
1226 	case FCME_GIDPN_DONE:
1227 		qla24xx_handle_gidpn_event(vha, ea);
1228 		break;
1229 	case FCME_GNL_DONE:
1230 		qla24xx_handle_gnl_done_event(vha, ea);
1231 		break;
1232 	case FCME_GPSC_DONE:
1233 		qla24xx_post_upd_fcport_work(vha, ea->fcport);
1234 		break;
1235 	case FCME_PLOGI_DONE:	/* Initiator side sent LLIOCB */
1236 		qla24xx_handle_plogi_done_event(vha, ea);
1237 		break;
1238 	case FCME_PRLI_DONE:
1239 		qla24xx_handle_prli_done_event(vha, ea);
1240 		break;
1241 	case FCME_GPDB_DONE:
1242 		qla24xx_handle_gpdb_event(vha, ea);
1243 		break;
1244 	case FCME_GPNID_DONE:
1245 		qla24xx_handle_gpnid_event(vha, ea);
1246 		break;
1247 	case FCME_GFFID_DONE:
1248 		qla24xx_handle_gffid_event(vha, ea);
1249 		break;
1250 	case FCME_DELETE_DONE:
1251 		qla24xx_handle_delete_done_event(vha, ea);
1252 		break;
1253 	default:
1254 		BUG_ON(1);
1255 		break;
1256 	}
1257 }
1258 
1259 static void
1260 qla2x00_tmf_iocb_timeout(void *data)
1261 {
1262 	srb_t *sp = data;
1263 	struct srb_iocb *tmf = &sp->u.iocb_cmd;
1264 
1265 	tmf->u.tmf.comp_status = CS_TIMEOUT;
1266 	complete(&tmf->u.tmf.comp);
1267 }
1268 
1269 static void
1270 qla2x00_tmf_sp_done(void *ptr, int res)
1271 {
1272 	srb_t *sp = ptr;
1273 	struct srb_iocb *tmf = &sp->u.iocb_cmd;
1274 
1275 	complete(&tmf->u.tmf.comp);
1276 }
1277 
1278 int
1279 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1280 	uint32_t tag)
1281 {
1282 	struct scsi_qla_host *vha = fcport->vha;
1283 	struct srb_iocb *tm_iocb;
1284 	srb_t *sp;
1285 	int rval = QLA_FUNCTION_FAILED;
1286 
1287 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1288 	if (!sp)
1289 		goto done;
1290 
1291 	tm_iocb = &sp->u.iocb_cmd;
1292 	sp->type = SRB_TM_CMD;
1293 	sp->name = "tmf";
1294 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1295 	tm_iocb->u.tmf.flags = flags;
1296 	tm_iocb->u.tmf.lun = lun;
1297 	tm_iocb->u.tmf.data = tag;
1298 	sp->done = qla2x00_tmf_sp_done;
1299 	tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1300 	init_completion(&tm_iocb->u.tmf.comp);
1301 
1302 	rval = qla2x00_start_sp(sp);
1303 	if (rval != QLA_SUCCESS)
1304 		goto done_free_sp;
1305 
1306 	ql_dbg(ql_dbg_taskm, vha, 0x802f,
1307 	    "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1308 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1309 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
1310 
1311 	wait_for_completion(&tm_iocb->u.tmf.comp);
1312 
1313 	rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
1314 	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
1315 
1316 	if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
1317 		ql_dbg(ql_dbg_taskm, vha, 0x8030,
1318 		    "TM IOCB failed (%x).\n", rval);
1319 	}
1320 
1321 	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1322 		flags = tm_iocb->u.tmf.flags;
1323 		lun = (uint16_t)tm_iocb->u.tmf.lun;
1324 
1325 		/* Issue Marker IOCB */
1326 		qla2x00_marker(vha, vha->hw->req_q_map[0],
1327 		    vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
1328 		    flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1329 	}
1330 
1331 done_free_sp:
1332 	sp->free(sp);
1333 done:
1334 	return rval;
1335 }
1336 
1337 static void
1338 qla24xx_abort_iocb_timeout(void *data)
1339 {
1340 	srb_t *sp = data;
1341 	struct srb_iocb *abt = &sp->u.iocb_cmd;
1342 
1343 	abt->u.abt.comp_status = CS_TIMEOUT;
1344 	complete(&abt->u.abt.comp);
1345 }
1346 
1347 static void
1348 qla24xx_abort_sp_done(void *ptr, int res)
1349 {
1350 	srb_t *sp = ptr;
1351 	struct srb_iocb *abt = &sp->u.iocb_cmd;
1352 
1353 	complete(&abt->u.abt.comp);
1354 }
1355 
1356 int
1357 qla24xx_async_abort_cmd(srb_t *cmd_sp)
1358 {
1359 	scsi_qla_host_t *vha = cmd_sp->vha;
1360 	fc_port_t *fcport = cmd_sp->fcport;
1361 	struct srb_iocb *abt_iocb;
1362 	srb_t *sp;
1363 	int rval = QLA_FUNCTION_FAILED;
1364 
1365 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1366 	if (!sp)
1367 		goto done;
1368 
1369 	abt_iocb = &sp->u.iocb_cmd;
1370 	sp->type = SRB_ABT_CMD;
1371 	sp->name = "abort";
1372 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1373 	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1374 	sp->done = qla24xx_abort_sp_done;
1375 	abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1376 	init_completion(&abt_iocb->u.abt.comp);
1377 
1378 	rval = qla2x00_start_sp(sp);
1379 	if (rval != QLA_SUCCESS)
1380 		goto done_free_sp;
1381 
1382 	ql_dbg(ql_dbg_async, vha, 0x507c,
1383 	    "Abort command issued - hdl=%x, target_id=%x\n",
1384 	    cmd_sp->handle, fcport->tgt_id);
1385 
1386 	wait_for_completion(&abt_iocb->u.abt.comp);
1387 
1388 	rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1389 	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
1390 
1391 done_free_sp:
1392 	sp->free(sp);
1393 done:
1394 	return rval;
1395 }
1396 
1397 int
1398 qla24xx_async_abort_command(srb_t *sp)
1399 {
1400 	unsigned long   flags = 0;
1401 
1402 	uint32_t	handle;
1403 	fc_port_t	*fcport = sp->fcport;
1404 	struct scsi_qla_host *vha = fcport->vha;
1405 	struct qla_hw_data *ha = vha->hw;
1406 	struct req_que *req = vha->req;
1407 
1408 	spin_lock_irqsave(&ha->hardware_lock, flags);
1409 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1410 		if (req->outstanding_cmds[handle] == sp)
1411 			break;
1412 	}
1413 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1414 	if (handle == req->num_outstanding_cmds) {
1415 		/* Command not found. */
1416 		return QLA_FUNCTION_FAILED;
1417 	}
1418 	if (sp->type == SRB_FXIOCB_DCMD)
1419 		return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1420 		    FXDISC_ABORT_IOCTL);
1421 
1422 	return qla24xx_async_abort_cmd(sp);
1423 }
1424 
1425 static void
1426 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1427 {
1428 	switch (ea->data[0]) {
1429 	case MBS_COMMAND_COMPLETE:
1430 		ql_dbg(ql_dbg_disc, vha, 0x2118,
1431 		    "%s %d %8phC post gpdb\n",
1432 		    __func__, __LINE__, ea->fcport->port_name);
1433 
1434 		ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1435 		ea->fcport->logout_on_delete = 1;
1436 		qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1437 		break;
1438 	default:
1439 		ql_dbg(ql_dbg_disc, vha, 0x2119,
1440 		    "%s %d %8phC unhandle event of %x\n",
1441 		    __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
1442 		break;
1443 	}
1444 }
1445 
1446 static void
1447 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1448 {
1449 	port_id_t cid;	/* conflict Nport id */
1450 
1451 	switch (ea->data[0]) {
1452 	case MBS_COMMAND_COMPLETE:
1453 		/*
1454 		 * Driver must validate login state - If PRLI not complete,
1455 		 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1456 		 * requests.
1457 		 */
1458 		if (ea->fcport->fc4f_nvme) {
1459 			ql_dbg(ql_dbg_disc, vha, 0x2117,
1460 				"%s %d %8phC post prli\n",
1461 				__func__, __LINE__, ea->fcport->port_name);
1462 			qla24xx_post_prli_work(vha, ea->fcport);
1463 		} else {
1464 			ql_dbg(ql_dbg_disc, vha, 0x20ea,
1465 				"%s %d %8phC post gpdb\n",
1466 				__func__, __LINE__, ea->fcport->port_name);
1467 			ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1468 			ea->fcport->logout_on_delete = 1;
1469 			qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1470 		}
1471 		break;
1472 	case MBS_COMMAND_ERROR:
1473 		ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
1474 		    __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1475 
1476 		ea->fcport->flags &= ~FCF_ASYNC_SENT;
1477 		ea->fcport->disc_state = DSC_LOGIN_FAILED;
1478 		if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
1479 			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1480 		else
1481 			qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
1482 		break;
1483 	case MBS_LOOP_ID_USED:
1484 		/* data[1] = IO PARAM 1 = nport ID  */
1485 		cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1486 		cid.b.area   = (ea->iop[1] >>  8) & 0xff;
1487 		cid.b.al_pa  = ea->iop[1] & 0xff;
1488 		cid.b.rsvd_1 = 0;
1489 
1490 		ql_dbg(ql_dbg_disc, vha, 0x20ec,
1491 		    "%s %d %8phC LoopID 0x%x in use post gnl\n",
1492 		    __func__, __LINE__, ea->fcport->port_name,
1493 		    ea->fcport->loop_id);
1494 
1495 		if (IS_SW_RESV_ADDR(cid)) {
1496 			set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1497 			ea->fcport->loop_id = FC_NO_LOOP_ID;
1498 		} else {
1499 			qla2x00_clear_loop_id(ea->fcport);
1500 		}
1501 		qla24xx_post_gnl_work(vha, ea->fcport);
1502 		break;
1503 	case MBS_PORT_ID_USED:
1504 		ql_dbg(ql_dbg_disc, vha, 0x20ed,
1505 		    "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
1506 		    __func__, __LINE__, ea->fcport->port_name,
1507 		    ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1508 		    ea->fcport->d_id.b.al_pa);
1509 
1510 		qla2x00_clear_loop_id(ea->fcport);
1511 		qla24xx_post_gidpn_work(vha, ea->fcport);
1512 		break;
1513 	}
1514 	return;
1515 }
1516 
1517 void
1518 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1519     uint16_t *data)
1520 {
1521 	qla2x00_mark_device_lost(vha, fcport, 1, 0);
1522 	qlt_logo_completion_handler(fcport, data[0]);
1523 	fcport->login_gen++;
1524 	return;
1525 }
1526 
1527 void
1528 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1529     uint16_t *data)
1530 {
1531 	if (data[0] == MBS_COMMAND_COMPLETE) {
1532 		qla2x00_update_fcport(vha, fcport);
1533 
1534 		return;
1535 	}
1536 
1537 	/* Retry login. */
1538 	fcport->flags &= ~FCF_ASYNC_SENT;
1539 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1540 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1541 	else
1542 		qla2x00_mark_device_lost(vha, fcport, 1, 0);
1543 
1544 	return;
1545 }
1546 
1547 /****************************************************************************/
1548 /*                QLogic ISP2x00 Hardware Support Functions.                */
1549 /****************************************************************************/
1550 
1551 static int
1552 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
1553 {
1554 	int rval = QLA_SUCCESS;
1555 	struct qla_hw_data *ha = vha->hw;
1556 	uint32_t idc_major_ver, idc_minor_ver;
1557 	uint16_t config[4];
1558 
1559 	qla83xx_idc_lock(vha, 0);
1560 
1561 	/* SV: TODO: Assign initialization timeout from
1562 	 * flash-info / other param
1563 	 */
1564 	ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
1565 	ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
1566 
1567 	/* Set our fcoe function presence */
1568 	if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
1569 		ql_dbg(ql_dbg_p3p, vha, 0xb077,
1570 		    "Error while setting DRV-Presence.\n");
1571 		rval = QLA_FUNCTION_FAILED;
1572 		goto exit;
1573 	}
1574 
1575 	/* Decide the reset ownership */
1576 	qla83xx_reset_ownership(vha);
1577 
1578 	/*
1579 	 * On first protocol driver load:
1580 	 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
1581 	 * register.
1582 	 * Others: Check compatibility with current IDC Major version.
1583 	 */
1584 	qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
1585 	if (ha->flags.nic_core_reset_owner) {
1586 		/* Set IDC Major version */
1587 		idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
1588 		qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
1589 
1590 		/* Clearing IDC-Lock-Recovery register */
1591 		qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
1592 	} else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
1593 		/*
1594 		 * Clear further IDC participation if we are not compatible with
1595 		 * the current IDC Major Version.
1596 		 */
1597 		ql_log(ql_log_warn, vha, 0xb07d,
1598 		    "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
1599 		    idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
1600 		__qla83xx_clear_drv_presence(vha);
1601 		rval = QLA_FUNCTION_FAILED;
1602 		goto exit;
1603 	}
1604 	/* Each function sets its supported Minor version. */
1605 	qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
1606 	idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
1607 	qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
1608 
1609 	if (ha->flags.nic_core_reset_owner) {
1610 		memset(config, 0, sizeof(config));
1611 		if (!qla81xx_get_port_config(vha, config))
1612 			qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
1613 			    QLA8XXX_DEV_READY);
1614 	}
1615 
1616 	rval = qla83xx_idc_state_handler(vha);
1617 
1618 exit:
1619 	qla83xx_idc_unlock(vha, 0);
1620 
1621 	return rval;
1622 }
1623 
1624 /*
1625 * qla2x00_initialize_adapter
1626 *      Initialize board.
1627 *
1628 * Input:
1629 *      ha = adapter block pointer.
1630 *
1631 * Returns:
1632 *      0 = success
1633 */
1634 int
1635 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1636 {
1637 	int	rval;
1638 	struct qla_hw_data *ha = vha->hw;
1639 	struct req_que *req = ha->req_q_map[0];
1640 
1641 	memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
1642 	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1643 
1644 	/* Clear adapter flags. */
1645 	vha->flags.online = 0;
1646 	ha->flags.chip_reset_done = 0;
1647 	vha->flags.reset_active = 0;
1648 	ha->flags.pci_channel_io_perm_failure = 0;
1649 	ha->flags.eeh_busy = 0;
1650 	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
1651 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1652 	atomic_set(&vha->loop_state, LOOP_DOWN);
1653 	vha->device_flags = DFLG_NO_CABLE;
1654 	vha->dpc_flags = 0;
1655 	vha->flags.management_server_logged_in = 0;
1656 	vha->marker_needed = 0;
1657 	ha->isp_abort_cnt = 0;
1658 	ha->beacon_blink_led = 0;
1659 
1660 	set_bit(0, ha->req_qid_map);
1661 	set_bit(0, ha->rsp_qid_map);
1662 
1663 	ql_dbg(ql_dbg_init, vha, 0x0040,
1664 	    "Configuring PCI space...\n");
1665 	rval = ha->isp_ops->pci_config(vha);
1666 	if (rval) {
1667 		ql_log(ql_log_warn, vha, 0x0044,
1668 		    "Unable to configure PCI space.\n");
1669 		return (rval);
1670 	}
1671 
1672 	ha->isp_ops->reset_chip(vha);
1673 
1674 	rval = qla2xxx_get_flash_info(vha);
1675 	if (rval) {
1676 		ql_log(ql_log_fatal, vha, 0x004f,
1677 		    "Unable to validate FLASH data.\n");
1678 		return rval;
1679 	}
1680 
1681 	if (IS_QLA8044(ha)) {
1682 		qla8044_read_reset_template(vha);
1683 
1684 		/* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
1685 		 * If DONRESET_BIT0 is set, drivers should not set dev_state
1686 		 * to NEED_RESET. But if NEED_RESET is set, drivers should
1687 		 * should honor the reset. */
1688 		if (ql2xdontresethba == 1)
1689 			qla8044_set_idc_dontreset(vha);
1690 	}
1691 
1692 	ha->isp_ops->get_flash_version(vha, req->ring);
1693 	ql_dbg(ql_dbg_init, vha, 0x0061,
1694 	    "Configure NVRAM parameters...\n");
1695 
1696 	ha->isp_ops->nvram_config(vha);
1697 
1698 	if (ha->flags.disable_serdes) {
1699 		/* Mask HBA via NVRAM settings? */
1700 		ql_log(ql_log_info, vha, 0x0077,
1701 		    "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
1702 		return QLA_FUNCTION_FAILED;
1703 	}
1704 
1705 	ql_dbg(ql_dbg_init, vha, 0x0078,
1706 	    "Verifying loaded RISC code...\n");
1707 
1708 	if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
1709 		rval = ha->isp_ops->chip_diag(vha);
1710 		if (rval)
1711 			return (rval);
1712 		rval = qla2x00_setup_chip(vha);
1713 		if (rval)
1714 			return (rval);
1715 	}
1716 
1717 	if (IS_QLA84XX(ha)) {
1718 		ha->cs84xx = qla84xx_get_chip(vha);
1719 		if (!ha->cs84xx) {
1720 			ql_log(ql_log_warn, vha, 0x00d0,
1721 			    "Unable to configure ISP84XX.\n");
1722 			return QLA_FUNCTION_FAILED;
1723 		}
1724 	}
1725 
1726 	if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
1727 		rval = qla2x00_init_rings(vha);
1728 
1729 	ha->flags.chip_reset_done = 1;
1730 
1731 	if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
1732 		/* Issue verify 84xx FW IOCB to complete 84xx initialization */
1733 		rval = qla84xx_init_chip(vha);
1734 		if (rval != QLA_SUCCESS) {
1735 			ql_log(ql_log_warn, vha, 0x00d4,
1736 			    "Unable to initialize ISP84XX.\n");
1737 			qla84xx_put_chip(vha);
1738 		}
1739 	}
1740 
1741 	/* Load the NIC Core f/w if we are the first protocol driver. */
1742 	if (IS_QLA8031(ha)) {
1743 		rval = qla83xx_nic_core_fw_load(vha);
1744 		if (rval)
1745 			ql_log(ql_log_warn, vha, 0x0124,
1746 			    "Error in initializing NIC Core f/w.\n");
1747 	}
1748 
1749 	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
1750 		qla24xx_read_fcp_prio_cfg(vha);
1751 
1752 	if (IS_P3P_TYPE(ha))
1753 		qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
1754 	else
1755 		qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
1756 
1757 	return (rval);
1758 }
1759 
1760 /**
1761  * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
1762  * @ha: HA context
1763  *
1764  * Returns 0 on success.
1765  */
1766 int
1767 qla2100_pci_config(scsi_qla_host_t *vha)
1768 {
1769 	uint16_t w;
1770 	unsigned long flags;
1771 	struct qla_hw_data *ha = vha->hw;
1772 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1773 
1774 	pci_set_master(ha->pdev);
1775 	pci_try_set_mwi(ha->pdev);
1776 
1777 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1778 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1779 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1780 
1781 	pci_disable_rom(ha->pdev);
1782 
1783 	/* Get PCI bus information. */
1784 	spin_lock_irqsave(&ha->hardware_lock, flags);
1785 	ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1786 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1787 
1788 	return QLA_SUCCESS;
1789 }
1790 
1791 /**
1792  * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
1793  * @ha: HA context
1794  *
1795  * Returns 0 on success.
1796  */
1797 int
1798 qla2300_pci_config(scsi_qla_host_t *vha)
1799 {
1800 	uint16_t	w;
1801 	unsigned long   flags = 0;
1802 	uint32_t	cnt;
1803 	struct qla_hw_data *ha = vha->hw;
1804 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1805 
1806 	pci_set_master(ha->pdev);
1807 	pci_try_set_mwi(ha->pdev);
1808 
1809 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1810 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1811 
1812 	if (IS_QLA2322(ha) || IS_QLA6322(ha))
1813 		w &= ~PCI_COMMAND_INTX_DISABLE;
1814 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1815 
1816 	/*
1817 	 * If this is a 2300 card and not 2312, reset the
1818 	 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
1819 	 * the 2310 also reports itself as a 2300 so we need to get the
1820 	 * fb revision level -- a 6 indicates it really is a 2300 and
1821 	 * not a 2310.
1822 	 */
1823 	if (IS_QLA2300(ha)) {
1824 		spin_lock_irqsave(&ha->hardware_lock, flags);
1825 
1826 		/* Pause RISC. */
1827 		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1828 		for (cnt = 0; cnt < 30000; cnt++) {
1829 			if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
1830 				break;
1831 
1832 			udelay(10);
1833 		}
1834 
1835 		/* Select FPM registers. */
1836 		WRT_REG_WORD(&reg->ctrl_status, 0x20);
1837 		RD_REG_WORD(&reg->ctrl_status);
1838 
1839 		/* Get the fb rev level */
1840 		ha->fb_rev = RD_FB_CMD_REG(ha, reg);
1841 
1842 		if (ha->fb_rev == FPM_2300)
1843 			pci_clear_mwi(ha->pdev);
1844 
1845 		/* Deselect FPM registers. */
1846 		WRT_REG_WORD(&reg->ctrl_status, 0x0);
1847 		RD_REG_WORD(&reg->ctrl_status);
1848 
1849 		/* Release RISC module. */
1850 		WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1851 		for (cnt = 0; cnt < 30000; cnt++) {
1852 			if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
1853 				break;
1854 
1855 			udelay(10);
1856 		}
1857 
1858 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1859 	}
1860 
1861 	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1862 
1863 	pci_disable_rom(ha->pdev);
1864 
1865 	/* Get PCI bus information. */
1866 	spin_lock_irqsave(&ha->hardware_lock, flags);
1867 	ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1868 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1869 
1870 	return QLA_SUCCESS;
1871 }
1872 
1873 /**
1874  * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
1875  * @ha: HA context
1876  *
1877  * Returns 0 on success.
1878  */
1879 int
1880 qla24xx_pci_config(scsi_qla_host_t *vha)
1881 {
1882 	uint16_t w;
1883 	unsigned long flags = 0;
1884 	struct qla_hw_data *ha = vha->hw;
1885 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1886 
1887 	pci_set_master(ha->pdev);
1888 	pci_try_set_mwi(ha->pdev);
1889 
1890 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1891 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1892 	w &= ~PCI_COMMAND_INTX_DISABLE;
1893 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1894 
1895 	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1896 
1897 	/* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
1898 	if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
1899 		pcix_set_mmrbc(ha->pdev, 2048);
1900 
1901 	/* PCIe -- adjust Maximum Read Request Size (2048). */
1902 	if (pci_is_pcie(ha->pdev))
1903 		pcie_set_readrq(ha->pdev, 4096);
1904 
1905 	pci_disable_rom(ha->pdev);
1906 
1907 	ha->chip_revision = ha->pdev->revision;
1908 
1909 	/* Get PCI bus information. */
1910 	spin_lock_irqsave(&ha->hardware_lock, flags);
1911 	ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
1912 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1913 
1914 	return QLA_SUCCESS;
1915 }
1916 
1917 /**
1918  * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
1919  * @ha: HA context
1920  *
1921  * Returns 0 on success.
1922  */
1923 int
1924 qla25xx_pci_config(scsi_qla_host_t *vha)
1925 {
1926 	uint16_t w;
1927 	struct qla_hw_data *ha = vha->hw;
1928 
1929 	pci_set_master(ha->pdev);
1930 	pci_try_set_mwi(ha->pdev);
1931 
1932 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1933 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1934 	w &= ~PCI_COMMAND_INTX_DISABLE;
1935 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1936 
1937 	/* PCIe -- adjust Maximum Read Request Size (2048). */
1938 	if (pci_is_pcie(ha->pdev))
1939 		pcie_set_readrq(ha->pdev, 4096);
1940 
1941 	pci_disable_rom(ha->pdev);
1942 
1943 	ha->chip_revision = ha->pdev->revision;
1944 
1945 	return QLA_SUCCESS;
1946 }
1947 
1948 /**
1949  * qla2x00_isp_firmware() - Choose firmware image.
1950  * @ha: HA context
1951  *
1952  * Returns 0 on success.
1953  */
1954 static int
1955 qla2x00_isp_firmware(scsi_qla_host_t *vha)
1956 {
1957 	int  rval;
1958 	uint16_t loop_id, topo, sw_cap;
1959 	uint8_t domain, area, al_pa;
1960 	struct qla_hw_data *ha = vha->hw;
1961 
1962 	/* Assume loading risc code */
1963 	rval = QLA_FUNCTION_FAILED;
1964 
1965 	if (ha->flags.disable_risc_code_load) {
1966 		ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
1967 
1968 		/* Verify checksum of loaded RISC code. */
1969 		rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
1970 		if (rval == QLA_SUCCESS) {
1971 			/* And, verify we are not in ROM code. */
1972 			rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
1973 			    &area, &domain, &topo, &sw_cap);
1974 		}
1975 	}
1976 
1977 	if (rval)
1978 		ql_dbg(ql_dbg_init, vha, 0x007a,
1979 		    "**** Load RISC code ****.\n");
1980 
1981 	return (rval);
1982 }
1983 
1984 /**
1985  * qla2x00_reset_chip() - Reset ISP chip.
1986  * @ha: HA context
1987  *
1988  * Returns 0 on success.
1989  */
1990 void
1991 qla2x00_reset_chip(scsi_qla_host_t *vha)
1992 {
1993 	unsigned long   flags = 0;
1994 	struct qla_hw_data *ha = vha->hw;
1995 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1996 	uint32_t	cnt;
1997 	uint16_t	cmd;
1998 
1999 	if (unlikely(pci_channel_offline(ha->pdev)))
2000 		return;
2001 
2002 	ha->isp_ops->disable_intrs(ha);
2003 
2004 	spin_lock_irqsave(&ha->hardware_lock, flags);
2005 
2006 	/* Turn off master enable */
2007 	cmd = 0;
2008 	pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2009 	cmd &= ~PCI_COMMAND_MASTER;
2010 	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2011 
2012 	if (!IS_QLA2100(ha)) {
2013 		/* Pause RISC. */
2014 		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
2015 		if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2016 			for (cnt = 0; cnt < 30000; cnt++) {
2017 				if ((RD_REG_WORD(&reg->hccr) &
2018 				    HCCR_RISC_PAUSE) != 0)
2019 					break;
2020 				udelay(100);
2021 			}
2022 		} else {
2023 			RD_REG_WORD(&reg->hccr);	/* PCI Posting. */
2024 			udelay(10);
2025 		}
2026 
2027 		/* Select FPM registers. */
2028 		WRT_REG_WORD(&reg->ctrl_status, 0x20);
2029 		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
2030 
2031 		/* FPM Soft Reset. */
2032 		WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
2033 		RD_REG_WORD(&reg->fpm_diag_config);	/* PCI Posting. */
2034 
2035 		/* Toggle Fpm Reset. */
2036 		if (!IS_QLA2200(ha)) {
2037 			WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
2038 			RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2039 		}
2040 
2041 		/* Select frame buffer registers. */
2042 		WRT_REG_WORD(&reg->ctrl_status, 0x10);
2043 		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
2044 
2045 		/* Reset frame buffer FIFOs. */
2046 		if (IS_QLA2200(ha)) {
2047 			WRT_FB_CMD_REG(ha, reg, 0xa000);
2048 			RD_FB_CMD_REG(ha, reg);		/* PCI Posting. */
2049 		} else {
2050 			WRT_FB_CMD_REG(ha, reg, 0x00fc);
2051 
2052 			/* Read back fb_cmd until zero or 3 seconds max */
2053 			for (cnt = 0; cnt < 3000; cnt++) {
2054 				if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2055 					break;
2056 				udelay(100);
2057 			}
2058 		}
2059 
2060 		/* Select RISC module registers. */
2061 		WRT_REG_WORD(&reg->ctrl_status, 0);
2062 		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
2063 
2064 		/* Reset RISC processor. */
2065 		WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2066 		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
2067 
2068 		/* Release RISC processor. */
2069 		WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2070 		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
2071 	}
2072 
2073 	WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
2074 	WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
2075 
2076 	/* Reset ISP chip. */
2077 	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2078 
2079 	/* Wait for RISC to recover from reset. */
2080 	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2081 		/*
2082 		 * It is necessary to for a delay here since the card doesn't
2083 		 * respond to PCI reads during a reset. On some architectures
2084 		 * this will result in an MCA.
2085 		 */
2086 		udelay(20);
2087 		for (cnt = 30000; cnt; cnt--) {
2088 			if ((RD_REG_WORD(&reg->ctrl_status) &
2089 			    CSR_ISP_SOFT_RESET) == 0)
2090 				break;
2091 			udelay(100);
2092 		}
2093 	} else
2094 		udelay(10);
2095 
2096 	/* Reset RISC processor. */
2097 	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2098 
2099 	WRT_REG_WORD(&reg->semaphore, 0);
2100 
2101 	/* Release RISC processor. */
2102 	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2103 	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
2104 
2105 	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2106 		for (cnt = 0; cnt < 30000; cnt++) {
2107 			if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2108 				break;
2109 
2110 			udelay(100);
2111 		}
2112 	} else
2113 		udelay(100);
2114 
2115 	/* Turn on master enable */
2116 	cmd |= PCI_COMMAND_MASTER;
2117 	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2118 
2119 	/* Disable RISC pause on FPM parity error. */
2120 	if (!IS_QLA2100(ha)) {
2121 		WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
2122 		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
2123 	}
2124 
2125 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2126 }
2127 
2128 /**
2129  * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2130  *
2131  * Returns 0 on success.
2132  */
2133 static int
2134 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2135 {
2136 	uint16_t mb[4] = {0x1010, 0, 1, 0};
2137 
2138 	if (!IS_QLA81XX(vha->hw))
2139 		return QLA_SUCCESS;
2140 
2141 	return qla81xx_write_mpi_register(vha, mb);
2142 }
2143 
2144 /**
2145  * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2146  * @ha: HA context
2147  *
2148  * Returns 0 on success.
2149  */
2150 static inline int
2151 qla24xx_reset_risc(scsi_qla_host_t *vha)
2152 {
2153 	unsigned long flags = 0;
2154 	struct qla_hw_data *ha = vha->hw;
2155 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2156 	uint32_t cnt;
2157 	uint16_t wd;
2158 	static int abts_cnt; /* ISP abort retry counts */
2159 	int rval = QLA_SUCCESS;
2160 
2161 	spin_lock_irqsave(&ha->hardware_lock, flags);
2162 
2163 	/* Reset RISC. */
2164 	WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2165 	for (cnt = 0; cnt < 30000; cnt++) {
2166 		if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2167 			break;
2168 
2169 		udelay(10);
2170 	}
2171 
2172 	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
2173 		set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2174 
2175 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2176 	    "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2177 	    RD_REG_DWORD(&reg->hccr),
2178 	    RD_REG_DWORD(&reg->ctrl_status),
2179 	    (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
2180 
2181 	WRT_REG_DWORD(&reg->ctrl_status,
2182 	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2183 	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
2184 
2185 	udelay(100);
2186 
2187 	/* Wait for firmware to complete NVRAM accesses. */
2188 	RD_REG_WORD(&reg->mailbox0);
2189 	for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2190 	    rval == QLA_SUCCESS; cnt--) {
2191 		barrier();
2192 		if (cnt)
2193 			udelay(5);
2194 		else
2195 			rval = QLA_FUNCTION_TIMEOUT;
2196 	}
2197 
2198 	if (rval == QLA_SUCCESS)
2199 		set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2200 
2201 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2202 	    "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2203 	    RD_REG_DWORD(&reg->hccr),
2204 	    RD_REG_DWORD(&reg->mailbox0));
2205 
2206 	/* Wait for soft-reset to complete. */
2207 	RD_REG_DWORD(&reg->ctrl_status);
2208 	for (cnt = 0; cnt < 60; cnt++) {
2209 		barrier();
2210 		if ((RD_REG_DWORD(&reg->ctrl_status) &
2211 		    CSRX_ISP_SOFT_RESET) == 0)
2212 			break;
2213 
2214 		udelay(5);
2215 	}
2216 	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
2217 		set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2218 
2219 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2220 	    "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2221 	    RD_REG_DWORD(&reg->hccr),
2222 	    RD_REG_DWORD(&reg->ctrl_status));
2223 
2224 	/* If required, do an MPI FW reset now */
2225 	if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2226 		if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2227 			if (++abts_cnt < 5) {
2228 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2229 				set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2230 			} else {
2231 				/*
2232 				 * We exhausted the ISP abort retries. We have to
2233 				 * set the board offline.
2234 				 */
2235 				abts_cnt = 0;
2236 				vha->flags.online = 0;
2237 			}
2238 		}
2239 	}
2240 
2241 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2242 	RD_REG_DWORD(&reg->hccr);
2243 
2244 	WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2245 	RD_REG_DWORD(&reg->hccr);
2246 
2247 	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2248 	RD_REG_DWORD(&reg->hccr);
2249 
2250 	RD_REG_WORD(&reg->mailbox0);
2251 	for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
2252 	    rval == QLA_SUCCESS; cnt--) {
2253 		barrier();
2254 		if (cnt)
2255 			udelay(5);
2256 		else
2257 			rval = QLA_FUNCTION_TIMEOUT;
2258 	}
2259 	if (rval == QLA_SUCCESS)
2260 		set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2261 
2262 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2263 	    "Host Risc 0x%x, mailbox0 0x%x\n",
2264 	    RD_REG_DWORD(&reg->hccr),
2265 	     RD_REG_WORD(&reg->mailbox0));
2266 
2267 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2268 
2269 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2270 	    "Driver in %s mode\n",
2271 	    IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2272 
2273 	if (IS_NOPOLLING_TYPE(ha))
2274 		ha->isp_ops->enable_intrs(ha);
2275 
2276 	return rval;
2277 }
2278 
2279 static void
2280 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2281 {
2282 	struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2283 
2284 	WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2285 	*data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2286 
2287 }
2288 
2289 static void
2290 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2291 {
2292 	struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2293 
2294 	WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2295 	WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2296 }
2297 
2298 static void
2299 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2300 {
2301 	uint32_t wd32 = 0;
2302 	uint delta_msec = 100;
2303 	uint elapsed_msec = 0;
2304 	uint timeout_msec;
2305 	ulong n;
2306 
2307 	if (vha->hw->pdev->subsystem_device != 0x0175 &&
2308 	    vha->hw->pdev->subsystem_device != 0x0240)
2309 		return;
2310 
2311 	WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2312 	udelay(100);
2313 
2314 attempt:
2315 	timeout_msec = TIMEOUT_SEMAPHORE;
2316 	n = timeout_msec / delta_msec;
2317 	while (n--) {
2318 		qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2319 		qla25xx_read_risc_sema_reg(vha, &wd32);
2320 		if (wd32 & RISC_SEMAPHORE)
2321 			break;
2322 		msleep(delta_msec);
2323 		elapsed_msec += delta_msec;
2324 		if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2325 			goto force;
2326 	}
2327 
2328 	if (!(wd32 & RISC_SEMAPHORE))
2329 		goto force;
2330 
2331 	if (!(wd32 & RISC_SEMAPHORE_FORCE))
2332 		goto acquired;
2333 
2334 	qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2335 	timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2336 	n = timeout_msec / delta_msec;
2337 	while (n--) {
2338 		qla25xx_read_risc_sema_reg(vha, &wd32);
2339 		if (!(wd32 & RISC_SEMAPHORE_FORCE))
2340 			break;
2341 		msleep(delta_msec);
2342 		elapsed_msec += delta_msec;
2343 		if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2344 			goto force;
2345 	}
2346 
2347 	if (wd32 & RISC_SEMAPHORE_FORCE)
2348 		qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2349 
2350 	goto attempt;
2351 
2352 force:
2353 	qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2354 
2355 acquired:
2356 	return;
2357 }
2358 
2359 /**
2360  * qla24xx_reset_chip() - Reset ISP24xx chip.
2361  * @ha: HA context
2362  *
2363  * Returns 0 on success.
2364  */
2365 void
2366 qla24xx_reset_chip(scsi_qla_host_t *vha)
2367 {
2368 	struct qla_hw_data *ha = vha->hw;
2369 
2370 	if (pci_channel_offline(ha->pdev) &&
2371 	    ha->flags.pci_channel_io_perm_failure) {
2372 		return;
2373 	}
2374 
2375 	ha->isp_ops->disable_intrs(ha);
2376 
2377 	qla25xx_manipulate_risc_semaphore(vha);
2378 
2379 	/* Perform RISC reset. */
2380 	qla24xx_reset_risc(vha);
2381 }
2382 
2383 /**
2384  * qla2x00_chip_diag() - Test chip for proper operation.
2385  * @ha: HA context
2386  *
2387  * Returns 0 on success.
2388  */
2389 int
2390 qla2x00_chip_diag(scsi_qla_host_t *vha)
2391 {
2392 	int		rval;
2393 	struct qla_hw_data *ha = vha->hw;
2394 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2395 	unsigned long	flags = 0;
2396 	uint16_t	data;
2397 	uint32_t	cnt;
2398 	uint16_t	mb[5];
2399 	struct req_que *req = ha->req_q_map[0];
2400 
2401 	/* Assume a failed state */
2402 	rval = QLA_FUNCTION_FAILED;
2403 
2404 	ql_dbg(ql_dbg_init, vha, 0x007b,
2405 	    "Testing device at %lx.\n", (u_long)&reg->flash_address);
2406 
2407 	spin_lock_irqsave(&ha->hardware_lock, flags);
2408 
2409 	/* Reset ISP chip. */
2410 	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2411 
2412 	/*
2413 	 * We need to have a delay here since the card will not respond while
2414 	 * in reset causing an MCA on some architectures.
2415 	 */
2416 	udelay(20);
2417 	data = qla2x00_debounce_register(&reg->ctrl_status);
2418 	for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2419 		udelay(5);
2420 		data = RD_REG_WORD(&reg->ctrl_status);
2421 		barrier();
2422 	}
2423 
2424 	if (!cnt)
2425 		goto chip_diag_failed;
2426 
2427 	ql_dbg(ql_dbg_init, vha, 0x007c,
2428 	    "Reset register cleared by chip reset.\n");
2429 
2430 	/* Reset RISC processor. */
2431 	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2432 	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2433 
2434 	/* Workaround for QLA2312 PCI parity error */
2435 	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2436 		data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2437 		for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2438 			udelay(5);
2439 			data = RD_MAILBOX_REG(ha, reg, 0);
2440 			barrier();
2441 		}
2442 	} else
2443 		udelay(10);
2444 
2445 	if (!cnt)
2446 		goto chip_diag_failed;
2447 
2448 	/* Check product ID of chip */
2449 	ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
2450 
2451 	mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2452 	mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2453 	mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2454 	mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2455 	if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2456 	    mb[3] != PROD_ID_3) {
2457 		ql_log(ql_log_warn, vha, 0x0062,
2458 		    "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2459 		    mb[1], mb[2], mb[3]);
2460 
2461 		goto chip_diag_failed;
2462 	}
2463 	ha->product_id[0] = mb[1];
2464 	ha->product_id[1] = mb[2];
2465 	ha->product_id[2] = mb[3];
2466 	ha->product_id[3] = mb[4];
2467 
2468 	/* Adjust fw RISC transfer size */
2469 	if (req->length > 1024)
2470 		ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2471 	else
2472 		ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
2473 		    req->length;
2474 
2475 	if (IS_QLA2200(ha) &&
2476 	    RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
2477 		/* Limit firmware transfer size with a 2200A */
2478 		ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
2479 
2480 		ha->device_type |= DT_ISP2200A;
2481 		ha->fw_transfer_size = 128;
2482 	}
2483 
2484 	/* Wrap Incoming Mailboxes Test. */
2485 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2486 
2487 	ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
2488 	rval = qla2x00_mbx_reg_test(vha);
2489 	if (rval)
2490 		ql_log(ql_log_warn, vha, 0x0080,
2491 		    "Failed mailbox send register test.\n");
2492 	else
2493 		/* Flag a successful rval */
2494 		rval = QLA_SUCCESS;
2495 	spin_lock_irqsave(&ha->hardware_lock, flags);
2496 
2497 chip_diag_failed:
2498 	if (rval)
2499 		ql_log(ql_log_info, vha, 0x0081,
2500 		    "Chip diagnostics **** FAILED ****.\n");
2501 
2502 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2503 
2504 	return (rval);
2505 }
2506 
2507 /**
2508  * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2509  * @ha: HA context
2510  *
2511  * Returns 0 on success.
2512  */
2513 int
2514 qla24xx_chip_diag(scsi_qla_host_t *vha)
2515 {
2516 	int rval;
2517 	struct qla_hw_data *ha = vha->hw;
2518 	struct req_que *req = ha->req_q_map[0];
2519 
2520 	if (IS_P3P_TYPE(ha))
2521 		return QLA_SUCCESS;
2522 
2523 	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
2524 
2525 	rval = qla2x00_mbx_reg_test(vha);
2526 	if (rval) {
2527 		ql_log(ql_log_warn, vha, 0x0082,
2528 		    "Failed mailbox send register test.\n");
2529 	} else {
2530 		/* Flag a successful rval */
2531 		rval = QLA_SUCCESS;
2532 	}
2533 
2534 	return rval;
2535 }
2536 
2537 void
2538 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
2539 {
2540 	int rval;
2541 	uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
2542 	    eft_size, fce_size, mq_size;
2543 	dma_addr_t tc_dma;
2544 	void *tc;
2545 	struct qla_hw_data *ha = vha->hw;
2546 	struct req_que *req = ha->req_q_map[0];
2547 	struct rsp_que *rsp = ha->rsp_q_map[0];
2548 
2549 	if (ha->fw_dump) {
2550 		ql_dbg(ql_dbg_init, vha, 0x00bd,
2551 		    "Firmware dump already allocated.\n");
2552 		return;
2553 	}
2554 
2555 	ha->fw_dumped = 0;
2556 	ha->fw_dump_cap_flags = 0;
2557 	dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2558 	req_q_size = rsp_q_size = 0;
2559 
2560 	if (IS_QLA27XX(ha))
2561 		goto try_fce;
2562 
2563 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2564 		fixed_size = sizeof(struct qla2100_fw_dump);
2565 	} else if (IS_QLA23XX(ha)) {
2566 		fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2567 		mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2568 		    sizeof(uint16_t);
2569 	} else if (IS_FWI2_CAPABLE(ha)) {
2570 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
2571 			fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
2572 		else if (IS_QLA81XX(ha))
2573 			fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
2574 		else if (IS_QLA25XX(ha))
2575 			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
2576 		else
2577 			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
2578 
2579 		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
2580 		    sizeof(uint32_t);
2581 		if (ha->mqenable) {
2582 			if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
2583 				mq_size = sizeof(struct qla2xxx_mq_chain);
2584 			/*
2585 			 * Allocate maximum buffer size for all queues.
2586 			 * Resizing must be done at end-of-dump processing.
2587 			 */
2588 			mq_size += ha->max_req_queues *
2589 			    (req->length * sizeof(request_t));
2590 			mq_size += ha->max_rsp_queues *
2591 			    (rsp->length * sizeof(response_t));
2592 		}
2593 		if (ha->tgt.atio_ring)
2594 			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
2595 		/* Allocate memory for Fibre Channel Event Buffer. */
2596 		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2597 		    !IS_QLA27XX(ha))
2598 			goto try_eft;
2599 
2600 try_fce:
2601 		if (ha->fce)
2602 			dma_free_coherent(&ha->pdev->dev,
2603 			    FCE_SIZE, ha->fce, ha->fce_dma);
2604 
2605 		/* Allocate memory for Fibre Channel Event Buffer. */
2606 		tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
2607 					 GFP_KERNEL);
2608 		if (!tc) {
2609 			ql_log(ql_log_warn, vha, 0x00be,
2610 			    "Unable to allocate (%d KB) for FCE.\n",
2611 			    FCE_SIZE / 1024);
2612 			goto try_eft;
2613 		}
2614 
2615 		rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
2616 		    ha->fce_mb, &ha->fce_bufs);
2617 		if (rval) {
2618 			ql_log(ql_log_warn, vha, 0x00bf,
2619 			    "Unable to initialize FCE (%d).\n", rval);
2620 			dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
2621 			    tc_dma);
2622 			ha->flags.fce_enabled = 0;
2623 			goto try_eft;
2624 		}
2625 		ql_dbg(ql_dbg_init, vha, 0x00c0,
2626 		    "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
2627 
2628 		fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
2629 		ha->flags.fce_enabled = 1;
2630 		ha->fce_dma = tc_dma;
2631 		ha->fce = tc;
2632 
2633 try_eft:
2634 		if (ha->eft)
2635 			dma_free_coherent(&ha->pdev->dev,
2636 			    EFT_SIZE, ha->eft, ha->eft_dma);
2637 
2638 		/* Allocate memory for Extended Trace Buffer. */
2639 		tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
2640 					 GFP_KERNEL);
2641 		if (!tc) {
2642 			ql_log(ql_log_warn, vha, 0x00c1,
2643 			    "Unable to allocate (%d KB) for EFT.\n",
2644 			    EFT_SIZE / 1024);
2645 			goto cont_alloc;
2646 		}
2647 
2648 		rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
2649 		if (rval) {
2650 			ql_log(ql_log_warn, vha, 0x00c2,
2651 			    "Unable to initialize EFT (%d).\n", rval);
2652 			dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
2653 			    tc_dma);
2654 			goto cont_alloc;
2655 		}
2656 		ql_dbg(ql_dbg_init, vha, 0x00c3,
2657 		    "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
2658 
2659 		eft_size = EFT_SIZE;
2660 		ha->eft_dma = tc_dma;
2661 		ha->eft = tc;
2662 	}
2663 
2664 cont_alloc:
2665 	if (IS_QLA27XX(ha)) {
2666 		if (!ha->fw_dump_template) {
2667 			ql_log(ql_log_warn, vha, 0x00ba,
2668 			    "Failed missing fwdump template\n");
2669 			return;
2670 		}
2671 		dump_size = qla27xx_fwdt_calculate_dump_size(vha);
2672 		ql_dbg(ql_dbg_init, vha, 0x00fa,
2673 		    "-> allocating fwdump (%x bytes)...\n", dump_size);
2674 		goto allocate;
2675 	}
2676 
2677 	req_q_size = req->length * sizeof(request_t);
2678 	rsp_q_size = rsp->length * sizeof(response_t);
2679 	dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2680 	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
2681 	ha->chain_offset = dump_size;
2682 	dump_size += mq_size + fce_size;
2683 
2684 	if (ha->exchoffld_buf)
2685 		dump_size += sizeof(struct qla2xxx_offld_chain) +
2686 			ha->exchoffld_size;
2687 	if (ha->exlogin_buf)
2688 		dump_size += sizeof(struct qla2xxx_offld_chain) +
2689 			ha->exlogin_size;
2690 
2691 allocate:
2692 	ha->fw_dump = vmalloc(dump_size);
2693 	if (!ha->fw_dump) {
2694 		ql_log(ql_log_warn, vha, 0x00c4,
2695 		    "Unable to allocate (%d KB) for firmware dump.\n",
2696 		    dump_size / 1024);
2697 
2698 		if (ha->fce) {
2699 			dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2700 			    ha->fce_dma);
2701 			ha->fce = NULL;
2702 			ha->fce_dma = 0;
2703 		}
2704 
2705 		if (ha->eft) {
2706 			dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
2707 			    ha->eft_dma);
2708 			ha->eft = NULL;
2709 			ha->eft_dma = 0;
2710 		}
2711 		return;
2712 	}
2713 	ha->fw_dump_len = dump_size;
2714 	ql_dbg(ql_dbg_init, vha, 0x00c5,
2715 	    "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
2716 
2717 	if (IS_QLA27XX(ha))
2718 		return;
2719 
2720 	ha->fw_dump->signature[0] = 'Q';
2721 	ha->fw_dump->signature[1] = 'L';
2722 	ha->fw_dump->signature[2] = 'G';
2723 	ha->fw_dump->signature[3] = 'C';
2724 	ha->fw_dump->version = htonl(1);
2725 
2726 	ha->fw_dump->fixed_size = htonl(fixed_size);
2727 	ha->fw_dump->mem_size = htonl(mem_size);
2728 	ha->fw_dump->req_q_size = htonl(req_q_size);
2729 	ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
2730 
2731 	ha->fw_dump->eft_size = htonl(eft_size);
2732 	ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
2733 	ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
2734 
2735 	ha->fw_dump->header_size =
2736 	    htonl(offsetof(struct qla2xxx_fw_dump, isp));
2737 }
2738 
2739 static int
2740 qla81xx_mpi_sync(scsi_qla_host_t *vha)
2741 {
2742 #define MPS_MASK	0xe0
2743 	int rval;
2744 	uint16_t dc;
2745 	uint32_t dw;
2746 
2747 	if (!IS_QLA81XX(vha->hw))
2748 		return QLA_SUCCESS;
2749 
2750 	rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
2751 	if (rval != QLA_SUCCESS) {
2752 		ql_log(ql_log_warn, vha, 0x0105,
2753 		    "Unable to acquire semaphore.\n");
2754 		goto done;
2755 	}
2756 
2757 	pci_read_config_word(vha->hw->pdev, 0x54, &dc);
2758 	rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
2759 	if (rval != QLA_SUCCESS) {
2760 		ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
2761 		goto done_release;
2762 	}
2763 
2764 	dc &= MPS_MASK;
2765 	if (dc == (dw & MPS_MASK))
2766 		goto done_release;
2767 
2768 	dw &= ~MPS_MASK;
2769 	dw |= dc;
2770 	rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
2771 	if (rval != QLA_SUCCESS) {
2772 		ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
2773 	}
2774 
2775 done_release:
2776 	rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
2777 	if (rval != QLA_SUCCESS) {
2778 		ql_log(ql_log_warn, vha, 0x006d,
2779 		    "Unable to release semaphore.\n");
2780 	}
2781 
2782 done:
2783 	return rval;
2784 }
2785 
2786 int
2787 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
2788 {
2789 	/* Don't try to reallocate the array */
2790 	if (req->outstanding_cmds)
2791 		return QLA_SUCCESS;
2792 
2793 	if (!IS_FWI2_CAPABLE(ha))
2794 		req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
2795 	else {
2796 		if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
2797 			req->num_outstanding_cmds = ha->cur_fw_xcb_count;
2798 		else
2799 			req->num_outstanding_cmds = ha->cur_fw_iocb_count;
2800 	}
2801 
2802 	req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2803 	    req->num_outstanding_cmds, GFP_KERNEL);
2804 
2805 	if (!req->outstanding_cmds) {
2806 		/*
2807 		 * Try to allocate a minimal size just so we can get through
2808 		 * initialization.
2809 		 */
2810 		req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
2811 		req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2812 		    req->num_outstanding_cmds, GFP_KERNEL);
2813 
2814 		if (!req->outstanding_cmds) {
2815 			ql_log(ql_log_fatal, NULL, 0x0126,
2816 			    "Failed to allocate memory for "
2817 			    "outstanding_cmds for req_que %p.\n", req);
2818 			req->num_outstanding_cmds = 0;
2819 			return QLA_FUNCTION_FAILED;
2820 		}
2821 	}
2822 
2823 	return QLA_SUCCESS;
2824 }
2825 
2826 /**
2827  * qla2x00_setup_chip() - Load and start RISC firmware.
2828  * @ha: HA context
2829  *
2830  * Returns 0 on success.
2831  */
2832 static int
2833 qla2x00_setup_chip(scsi_qla_host_t *vha)
2834 {
2835 	int rval;
2836 	uint32_t srisc_address = 0;
2837 	struct qla_hw_data *ha = vha->hw;
2838 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2839 	unsigned long flags;
2840 	uint16_t fw_major_version;
2841 
2842 	if (IS_P3P_TYPE(ha)) {
2843 		rval = ha->isp_ops->load_risc(vha, &srisc_address);
2844 		if (rval == QLA_SUCCESS) {
2845 			qla2x00_stop_firmware(vha);
2846 			goto enable_82xx_npiv;
2847 		} else
2848 			goto failed;
2849 	}
2850 
2851 	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
2852 		/* Disable SRAM, Instruction RAM and GP RAM parity.  */
2853 		spin_lock_irqsave(&ha->hardware_lock, flags);
2854 		WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
2855 		RD_REG_WORD(&reg->hccr);
2856 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2857 	}
2858 
2859 	qla81xx_mpi_sync(vha);
2860 
2861 	/* Load firmware sequences */
2862 	rval = ha->isp_ops->load_risc(vha, &srisc_address);
2863 	if (rval == QLA_SUCCESS) {
2864 		ql_dbg(ql_dbg_init, vha, 0x00c9,
2865 		    "Verifying Checksum of loaded RISC code.\n");
2866 
2867 		rval = qla2x00_verify_checksum(vha, srisc_address);
2868 		if (rval == QLA_SUCCESS) {
2869 			/* Start firmware execution. */
2870 			ql_dbg(ql_dbg_init, vha, 0x00ca,
2871 			    "Starting firmware.\n");
2872 
2873 			if (ql2xexlogins)
2874 				ha->flags.exlogins_enabled = 1;
2875 
2876 			if (qla_is_exch_offld_enabled(vha))
2877 				ha->flags.exchoffld_enabled = 1;
2878 
2879 			rval = qla2x00_execute_fw(vha, srisc_address);
2880 			/* Retrieve firmware information. */
2881 			if (rval == QLA_SUCCESS) {
2882 				rval = qla2x00_set_exlogins_buffer(vha);
2883 				if (rval != QLA_SUCCESS)
2884 					goto failed;
2885 
2886 				rval = qla2x00_set_exchoffld_buffer(vha);
2887 				if (rval != QLA_SUCCESS)
2888 					goto failed;
2889 
2890 enable_82xx_npiv:
2891 				fw_major_version = ha->fw_major_version;
2892 				if (IS_P3P_TYPE(ha))
2893 					qla82xx_check_md_needed(vha);
2894 				else
2895 					rval = qla2x00_get_fw_version(vha);
2896 				if (rval != QLA_SUCCESS)
2897 					goto failed;
2898 				ha->flags.npiv_supported = 0;
2899 				if (IS_QLA2XXX_MIDTYPE(ha) &&
2900 					 (ha->fw_attributes & BIT_2)) {
2901 					ha->flags.npiv_supported = 1;
2902 					if ((!ha->max_npiv_vports) ||
2903 					    ((ha->max_npiv_vports + 1) %
2904 					    MIN_MULTI_ID_FABRIC))
2905 						ha->max_npiv_vports =
2906 						    MIN_MULTI_ID_FABRIC - 1;
2907 				}
2908 				qla2x00_get_resource_cnts(vha);
2909 
2910 				/*
2911 				 * Allocate the array of outstanding commands
2912 				 * now that we know the firmware resources.
2913 				 */
2914 				rval = qla2x00_alloc_outstanding_cmds(ha,
2915 				    vha->req);
2916 				if (rval != QLA_SUCCESS)
2917 					goto failed;
2918 
2919 				if (!fw_major_version && ql2xallocfwdump
2920 				    && !(IS_P3P_TYPE(ha)))
2921 					qla2x00_alloc_fw_dump(vha);
2922 			} else {
2923 				goto failed;
2924 			}
2925 		} else {
2926 			ql_log(ql_log_fatal, vha, 0x00cd,
2927 			    "ISP Firmware failed checksum.\n");
2928 			goto failed;
2929 		}
2930 	} else
2931 		goto failed;
2932 
2933 	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
2934 		/* Enable proper parity. */
2935 		spin_lock_irqsave(&ha->hardware_lock, flags);
2936 		if (IS_QLA2300(ha))
2937 			/* SRAM parity */
2938 			WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
2939 		else
2940 			/* SRAM, Instruction RAM and GP RAM parity */
2941 			WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
2942 		RD_REG_WORD(&reg->hccr);
2943 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2944 	}
2945 
2946 	if (IS_QLA27XX(ha))
2947 		ha->flags.fac_supported = 1;
2948 	else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
2949 		uint32_t size;
2950 
2951 		rval = qla81xx_fac_get_sector_size(vha, &size);
2952 		if (rval == QLA_SUCCESS) {
2953 			ha->flags.fac_supported = 1;
2954 			ha->fdt_block_size = size << 2;
2955 		} else {
2956 			ql_log(ql_log_warn, vha, 0x00ce,
2957 			    "Unsupported FAC firmware (%d.%02d.%02d).\n",
2958 			    ha->fw_major_version, ha->fw_minor_version,
2959 			    ha->fw_subminor_version);
2960 
2961 			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2962 				ha->flags.fac_supported = 0;
2963 				rval = QLA_SUCCESS;
2964 			}
2965 		}
2966 	}
2967 failed:
2968 	if (rval) {
2969 		ql_log(ql_log_fatal, vha, 0x00cf,
2970 		    "Setup chip ****FAILED****.\n");
2971 	}
2972 
2973 	return (rval);
2974 }
2975 
2976 /**
2977  * qla2x00_init_response_q_entries() - Initializes response queue entries.
2978  * @ha: HA context
2979  *
2980  * Beginning of request ring has initialization control block already built
2981  * by nvram config routine.
2982  *
2983  * Returns 0 on success.
2984  */
2985 void
2986 qla2x00_init_response_q_entries(struct rsp_que *rsp)
2987 {
2988 	uint16_t cnt;
2989 	response_t *pkt;
2990 
2991 	rsp->ring_ptr = rsp->ring;
2992 	rsp->ring_index    = 0;
2993 	rsp->status_srb = NULL;
2994 	pkt = rsp->ring_ptr;
2995 	for (cnt = 0; cnt < rsp->length; cnt++) {
2996 		pkt->signature = RESPONSE_PROCESSED;
2997 		pkt++;
2998 	}
2999 }
3000 
3001 /**
3002  * qla2x00_update_fw_options() - Read and process firmware options.
3003  * @ha: HA context
3004  *
3005  * Returns 0 on success.
3006  */
3007 void
3008 qla2x00_update_fw_options(scsi_qla_host_t *vha)
3009 {
3010 	uint16_t swing, emphasis, tx_sens, rx_sens;
3011 	struct qla_hw_data *ha = vha->hw;
3012 
3013 	memset(ha->fw_options, 0, sizeof(ha->fw_options));
3014 	qla2x00_get_fw_options(vha, ha->fw_options);
3015 
3016 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
3017 		return;
3018 
3019 	/* Serial Link options. */
3020 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3021 	    "Serial link options.\n");
3022 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3023 	    (uint8_t *)&ha->fw_seriallink_options,
3024 	    sizeof(ha->fw_seriallink_options));
3025 
3026 	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3027 	if (ha->fw_seriallink_options[3] & BIT_2) {
3028 		ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3029 
3030 		/*  1G settings */
3031 		swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3032 		emphasis = (ha->fw_seriallink_options[2] &
3033 		    (BIT_4 | BIT_3)) >> 3;
3034 		tx_sens = ha->fw_seriallink_options[0] &
3035 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3036 		rx_sens = (ha->fw_seriallink_options[0] &
3037 		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3038 		ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3039 		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3040 			if (rx_sens == 0x0)
3041 				rx_sens = 0x3;
3042 			ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3043 		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3044 			ha->fw_options[10] |= BIT_5 |
3045 			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3046 			    (tx_sens & (BIT_1 | BIT_0));
3047 
3048 		/*  2G settings */
3049 		swing = (ha->fw_seriallink_options[2] &
3050 		    (BIT_7 | BIT_6 | BIT_5)) >> 5;
3051 		emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3052 		tx_sens = ha->fw_seriallink_options[1] &
3053 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3054 		rx_sens = (ha->fw_seriallink_options[1] &
3055 		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3056 		ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3057 		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3058 			if (rx_sens == 0x0)
3059 				rx_sens = 0x3;
3060 			ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3061 		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3062 			ha->fw_options[11] |= BIT_5 |
3063 			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3064 			    (tx_sens & (BIT_1 | BIT_0));
3065 	}
3066 
3067 	/* FCP2 options. */
3068 	/*  Return command IOCBs without waiting for an ABTS to complete. */
3069 	ha->fw_options[3] |= BIT_13;
3070 
3071 	/* LED scheme. */
3072 	if (ha->flags.enable_led_scheme)
3073 		ha->fw_options[2] |= BIT_12;
3074 
3075 	/* Detect ISP6312. */
3076 	if (IS_QLA6312(ha))
3077 		ha->fw_options[2] |= BIT_13;
3078 
3079 	/* Set Retry FLOGI in case of P2P connection */
3080 	if (ha->operating_mode == P2P) {
3081 		ha->fw_options[2] |= BIT_3;
3082 		ql_dbg(ql_dbg_disc, vha, 0x2100,
3083 		    "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3084 			__func__, ha->fw_options[2]);
3085 	}
3086 
3087 	/* Update firmware options. */
3088 	qla2x00_set_fw_options(vha, ha->fw_options);
3089 }
3090 
3091 void
3092 qla24xx_update_fw_options(scsi_qla_host_t *vha)
3093 {
3094 	int rval;
3095 	struct qla_hw_data *ha = vha->hw;
3096 
3097 	if (IS_P3P_TYPE(ha))
3098 		return;
3099 
3100 	/*  Hold status IOCBs until ABTS response received. */
3101 	if (ql2xfwholdabts)
3102 		ha->fw_options[3] |= BIT_12;
3103 
3104 	/* Set Retry FLOGI in case of P2P connection */
3105 	if (ha->operating_mode == P2P) {
3106 		ha->fw_options[2] |= BIT_3;
3107 		ql_dbg(ql_dbg_disc, vha, 0x2101,
3108 		    "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3109 			__func__, ha->fw_options[2]);
3110 	}
3111 
3112 	/* Move PUREX, ABTS RX & RIDA to ATIOQ */
3113 	if (ql2xmvasynctoatio &&
3114 	    (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
3115 		if (qla_tgt_mode_enabled(vha) ||
3116 		    qla_dual_mode_enabled(vha))
3117 			ha->fw_options[2] |= BIT_11;
3118 		else
3119 			ha->fw_options[2] &= ~BIT_11;
3120 	}
3121 
3122 	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3123 		/*
3124 		 * Tell FW to track each exchange to prevent
3125 		 * driver from using stale exchange.
3126 		 */
3127 		if (qla_tgt_mode_enabled(vha) ||
3128 		    qla_dual_mode_enabled(vha))
3129 			ha->fw_options[2] |= BIT_4;
3130 		else
3131 			ha->fw_options[2] &= ~BIT_4;
3132 	}
3133 
3134 	ql_dbg(ql_dbg_init, vha, 0x00e8,
3135 	    "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3136 	    __func__, ha->fw_options[1], ha->fw_options[2],
3137 	    ha->fw_options[3], vha->host->active_mode);
3138 
3139 	if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3140 		qla2x00_set_fw_options(vha, ha->fw_options);
3141 
3142 	/* Update Serial Link options. */
3143 	if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
3144 		return;
3145 
3146 	rval = qla2x00_set_serdes_params(vha,
3147 	    le16_to_cpu(ha->fw_seriallink_options24[1]),
3148 	    le16_to_cpu(ha->fw_seriallink_options24[2]),
3149 	    le16_to_cpu(ha->fw_seriallink_options24[3]));
3150 	if (rval != QLA_SUCCESS) {
3151 		ql_log(ql_log_warn, vha, 0x0104,
3152 		    "Unable to update Serial Link options (%x).\n", rval);
3153 	}
3154 }
3155 
3156 void
3157 qla2x00_config_rings(struct scsi_qla_host *vha)
3158 {
3159 	struct qla_hw_data *ha = vha->hw;
3160 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3161 	struct req_que *req = ha->req_q_map[0];
3162 	struct rsp_que *rsp = ha->rsp_q_map[0];
3163 
3164 	/* Setup ring parameters in initialization control block. */
3165 	ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3166 	ha->init_cb->response_q_inpointer = cpu_to_le16(0);
3167 	ha->init_cb->request_q_length = cpu_to_le16(req->length);
3168 	ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3169 	ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3170 	ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3171 	ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3172 	ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
3173 
3174 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3175 	WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3176 	WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3177 	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3178 	RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg));		/* PCI Posting. */
3179 }
3180 
3181 void
3182 qla24xx_config_rings(struct scsi_qla_host *vha)
3183 {
3184 	struct qla_hw_data *ha = vha->hw;
3185 	device_reg_t *reg = ISP_QUE_REG(ha, 0);
3186 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3187 	struct qla_msix_entry *msix;
3188 	struct init_cb_24xx *icb;
3189 	uint16_t rid = 0;
3190 	struct req_que *req = ha->req_q_map[0];
3191 	struct rsp_que *rsp = ha->rsp_q_map[0];
3192 
3193 	/* Setup ring parameters in initialization control block. */
3194 	icb = (struct init_cb_24xx *)ha->init_cb;
3195 	icb->request_q_outpointer = cpu_to_le16(0);
3196 	icb->response_q_inpointer = cpu_to_le16(0);
3197 	icb->request_q_length = cpu_to_le16(req->length);
3198 	icb->response_q_length = cpu_to_le16(rsp->length);
3199 	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3200 	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3201 	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3202 	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
3203 
3204 	/* Setup ATIO queue dma pointers for target mode */
3205 	icb->atio_q_inpointer = cpu_to_le16(0);
3206 	icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3207 	icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
3208 	icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
3209 
3210 	if (IS_SHADOW_REG_CAPABLE(ha))
3211 		icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
3212 
3213 	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3214 		icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3215 		icb->rid = cpu_to_le16(rid);
3216 		if (ha->flags.msix_enabled) {
3217 			msix = &ha->msix_entries[1];
3218 			ql_dbg(ql_dbg_init, vha, 0x0019,
3219 			    "Registering vector 0x%x for base que.\n",
3220 			    msix->entry);
3221 			icb->msix = cpu_to_le16(msix->entry);
3222 		}
3223 		/* Use alternate PCI bus number */
3224 		if (MSB(rid))
3225 			icb->firmware_options_2 |= cpu_to_le32(BIT_19);
3226 		/* Use alternate PCI devfn */
3227 		if (LSB(rid))
3228 			icb->firmware_options_2 |= cpu_to_le32(BIT_18);
3229 
3230 		/* Use Disable MSIX Handshake mode for capable adapters */
3231 		if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3232 		    (ha->flags.msix_enabled)) {
3233 			icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3234 			ha->flags.disable_msix_handshake = 1;
3235 			ql_dbg(ql_dbg_init, vha, 0x00fe,
3236 			    "MSIX Handshake Disable Mode turned on.\n");
3237 		} else {
3238 			icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3239 		}
3240 		icb->firmware_options_2 |= cpu_to_le32(BIT_23);
3241 
3242 		WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
3243 		WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
3244 		WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
3245 		WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
3246 	} else {
3247 		WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
3248 		WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
3249 		WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
3250 		WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
3251 	}
3252 	qlt_24xx_config_rings(vha);
3253 
3254 	/* PCI posting */
3255 	RD_REG_DWORD(&ioreg->hccr);
3256 }
3257 
3258 /**
3259  * qla2x00_init_rings() - Initializes firmware.
3260  * @ha: HA context
3261  *
3262  * Beginning of request ring has initialization control block already built
3263  * by nvram config routine.
3264  *
3265  * Returns 0 on success.
3266  */
3267 int
3268 qla2x00_init_rings(scsi_qla_host_t *vha)
3269 {
3270 	int	rval;
3271 	unsigned long flags = 0;
3272 	int cnt, que;
3273 	struct qla_hw_data *ha = vha->hw;
3274 	struct req_que *req;
3275 	struct rsp_que *rsp;
3276 	struct mid_init_cb_24xx *mid_init_cb =
3277 	    (struct mid_init_cb_24xx *) ha->init_cb;
3278 
3279 	spin_lock_irqsave(&ha->hardware_lock, flags);
3280 
3281 	/* Clear outstanding commands array. */
3282 	for (que = 0; que < ha->max_req_queues; que++) {
3283 		req = ha->req_q_map[que];
3284 		if (!req || !test_bit(que, ha->req_qid_map))
3285 			continue;
3286 		req->out_ptr = (void *)(req->ring + req->length);
3287 		*req->out_ptr = 0;
3288 		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
3289 			req->outstanding_cmds[cnt] = NULL;
3290 
3291 		req->current_outstanding_cmd = 1;
3292 
3293 		/* Initialize firmware. */
3294 		req->ring_ptr  = req->ring;
3295 		req->ring_index    = 0;
3296 		req->cnt      = req->length;
3297 	}
3298 
3299 	for (que = 0; que < ha->max_rsp_queues; que++) {
3300 		rsp = ha->rsp_q_map[que];
3301 		if (!rsp || !test_bit(que, ha->rsp_qid_map))
3302 			continue;
3303 		rsp->in_ptr = (void *)(rsp->ring + rsp->length);
3304 		*rsp->in_ptr = 0;
3305 		/* Initialize response queue entries */
3306 		if (IS_QLAFX00(ha))
3307 			qlafx00_init_response_q_entries(rsp);
3308 		else
3309 			qla2x00_init_response_q_entries(rsp);
3310 	}
3311 
3312 	ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
3313 	ha->tgt.atio_ring_index = 0;
3314 	/* Initialize ATIO queue entries */
3315 	qlt_init_atio_q_entries(vha);
3316 
3317 	ha->isp_ops->config_rings(vha);
3318 
3319 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3320 
3321 	ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
3322 
3323 	if (IS_QLAFX00(ha)) {
3324 		rval = qlafx00_init_firmware(vha, ha->init_cb_size);
3325 		goto next_check;
3326 	}
3327 
3328 	/* Update any ISP specific firmware options before initialization. */
3329 	ha->isp_ops->update_fw_options(vha);
3330 
3331 	if (ha->flags.npiv_supported) {
3332 		if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
3333 			ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
3334 		mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
3335 	}
3336 
3337 	if (IS_FWI2_CAPABLE(ha)) {
3338 		mid_init_cb->options = cpu_to_le16(BIT_1);
3339 		mid_init_cb->init_cb.execution_throttle =
3340 		    cpu_to_le16(ha->cur_fw_xcb_count);
3341 		ha->flags.dport_enabled =
3342 		    (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
3343 		ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
3344 		    (ha->flags.dport_enabled) ? "enabled" : "disabled");
3345 		/* FA-WWPN Status */
3346 		ha->flags.fawwpn_enabled =
3347 		    (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
3348 		ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
3349 		    (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
3350 	}
3351 
3352 	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
3353 next_check:
3354 	if (rval) {
3355 		ql_log(ql_log_fatal, vha, 0x00d2,
3356 		    "Init Firmware **** FAILED ****.\n");
3357 	} else {
3358 		ql_dbg(ql_dbg_init, vha, 0x00d3,
3359 		    "Init Firmware -- success.\n");
3360 		QLA_FW_STARTED(ha);
3361 	}
3362 
3363 	return (rval);
3364 }
3365 
3366 /**
3367  * qla2x00_fw_ready() - Waits for firmware ready.
3368  * @ha: HA context
3369  *
3370  * Returns 0 on success.
3371  */
3372 static int
3373 qla2x00_fw_ready(scsi_qla_host_t *vha)
3374 {
3375 	int		rval;
3376 	unsigned long	wtime, mtime, cs84xx_time;
3377 	uint16_t	min_wait;	/* Minimum wait time if loop is down */
3378 	uint16_t	wait_time;	/* Wait time if loop is coming ready */
3379 	uint16_t	state[6];
3380 	struct qla_hw_data *ha = vha->hw;
3381 
3382 	if (IS_QLAFX00(vha->hw))
3383 		return qlafx00_fw_ready(vha);
3384 
3385 	rval = QLA_SUCCESS;
3386 
3387 	/* Time to wait for loop down */
3388 	if (IS_P3P_TYPE(ha))
3389 		min_wait = 30;
3390 	else
3391 		min_wait = 20;
3392 
3393 	/*
3394 	 * Firmware should take at most one RATOV to login, plus 5 seconds for
3395 	 * our own processing.
3396 	 */
3397 	if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
3398 		wait_time = min_wait;
3399 	}
3400 
3401 	/* Min wait time if loop down */
3402 	mtime = jiffies + (min_wait * HZ);
3403 
3404 	/* wait time before firmware ready */
3405 	wtime = jiffies + (wait_time * HZ);
3406 
3407 	/* Wait for ISP to finish LIP */
3408 	if (!vha->flags.init_done)
3409 		ql_log(ql_log_info, vha, 0x801e,
3410 		    "Waiting for LIP to complete.\n");
3411 
3412 	do {
3413 		memset(state, -1, sizeof(state));
3414 		rval = qla2x00_get_firmware_state(vha, state);
3415 		if (rval == QLA_SUCCESS) {
3416 			if (state[0] < FSTATE_LOSS_OF_SYNC) {
3417 				vha->device_flags &= ~DFLG_NO_CABLE;
3418 			}
3419 			if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
3420 				ql_dbg(ql_dbg_taskm, vha, 0x801f,
3421 				    "fw_state=%x 84xx=%x.\n", state[0],
3422 				    state[2]);
3423 				if ((state[2] & FSTATE_LOGGED_IN) &&
3424 				     (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
3425 					ql_dbg(ql_dbg_taskm, vha, 0x8028,
3426 					    "Sending verify iocb.\n");
3427 
3428 					cs84xx_time = jiffies;
3429 					rval = qla84xx_init_chip(vha);
3430 					if (rval != QLA_SUCCESS) {
3431 						ql_log(ql_log_warn,
3432 						    vha, 0x8007,
3433 						    "Init chip failed.\n");
3434 						break;
3435 					}
3436 
3437 					/* Add time taken to initialize. */
3438 					cs84xx_time = jiffies - cs84xx_time;
3439 					wtime += cs84xx_time;
3440 					mtime += cs84xx_time;
3441 					ql_dbg(ql_dbg_taskm, vha, 0x8008,
3442 					    "Increasing wait time by %ld. "
3443 					    "New time %ld.\n", cs84xx_time,
3444 					    wtime);
3445 				}
3446 			} else if (state[0] == FSTATE_READY) {
3447 				ql_dbg(ql_dbg_taskm, vha, 0x8037,
3448 				    "F/W Ready - OK.\n");
3449 
3450 				qla2x00_get_retry_cnt(vha, &ha->retry_count,
3451 				    &ha->login_timeout, &ha->r_a_tov);
3452 
3453 				rval = QLA_SUCCESS;
3454 				break;
3455 			}
3456 
3457 			rval = QLA_FUNCTION_FAILED;
3458 
3459 			if (atomic_read(&vha->loop_down_timer) &&
3460 			    state[0] != FSTATE_READY) {
3461 				/* Loop down. Timeout on min_wait for states
3462 				 * other than Wait for Login.
3463 				 */
3464 				if (time_after_eq(jiffies, mtime)) {
3465 					ql_log(ql_log_info, vha, 0x8038,
3466 					    "Cable is unplugged...\n");
3467 
3468 					vha->device_flags |= DFLG_NO_CABLE;
3469 					break;
3470 				}
3471 			}
3472 		} else {
3473 			/* Mailbox cmd failed. Timeout on min_wait. */
3474 			if (time_after_eq(jiffies, mtime) ||
3475 				ha->flags.isp82xx_fw_hung)
3476 				break;
3477 		}
3478 
3479 		if (time_after_eq(jiffies, wtime))
3480 			break;
3481 
3482 		/* Delay for a while */
3483 		msleep(500);
3484 	} while (1);
3485 
3486 	ql_dbg(ql_dbg_taskm, vha, 0x803a,
3487 	    "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
3488 	    state[1], state[2], state[3], state[4], state[5], jiffies);
3489 
3490 	if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
3491 		ql_log(ql_log_warn, vha, 0x803b,
3492 		    "Firmware ready **** FAILED ****.\n");
3493 	}
3494 
3495 	return (rval);
3496 }
3497 
3498 /*
3499 *  qla2x00_configure_hba
3500 *      Setup adapter context.
3501 *
3502 * Input:
3503 *      ha = adapter state pointer.
3504 *
3505 * Returns:
3506 *      0 = success
3507 *
3508 * Context:
3509 *      Kernel context.
3510 */
3511 static int
3512 qla2x00_configure_hba(scsi_qla_host_t *vha)
3513 {
3514 	int       rval;
3515 	uint16_t      loop_id;
3516 	uint16_t      topo;
3517 	uint16_t      sw_cap;
3518 	uint8_t       al_pa;
3519 	uint8_t       area;
3520 	uint8_t       domain;
3521 	char		connect_type[22];
3522 	struct qla_hw_data *ha = vha->hw;
3523 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3524 	port_id_t id;
3525 
3526 	/* Get host addresses. */
3527 	rval = qla2x00_get_adapter_id(vha,
3528 	    &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
3529 	if (rval != QLA_SUCCESS) {
3530 		if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
3531 		    IS_CNA_CAPABLE(ha) ||
3532 		    (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
3533 			ql_dbg(ql_dbg_disc, vha, 0x2008,
3534 			    "Loop is in a transition state.\n");
3535 		} else {
3536 			ql_log(ql_log_warn, vha, 0x2009,
3537 			    "Unable to get host loop ID.\n");
3538 			if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
3539 			    (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
3540 				ql_log(ql_log_warn, vha, 0x1151,
3541 				    "Doing link init.\n");
3542 				if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
3543 					return rval;
3544 			}
3545 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3546 		}
3547 		return (rval);
3548 	}
3549 
3550 	if (topo == 4) {
3551 		ql_log(ql_log_info, vha, 0x200a,
3552 		    "Cannot get topology - retrying.\n");
3553 		return (QLA_FUNCTION_FAILED);
3554 	}
3555 
3556 	vha->loop_id = loop_id;
3557 
3558 	/* initialize */
3559 	ha->min_external_loopid = SNS_FIRST_LOOP_ID;
3560 	ha->operating_mode = LOOP;
3561 	ha->switch_cap = 0;
3562 
3563 	switch (topo) {
3564 	case 0:
3565 		ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
3566 		ha->current_topology = ISP_CFG_NL;
3567 		strcpy(connect_type, "(Loop)");
3568 		break;
3569 
3570 	case 1:
3571 		ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
3572 		ha->switch_cap = sw_cap;
3573 		ha->current_topology = ISP_CFG_FL;
3574 		strcpy(connect_type, "(FL_Port)");
3575 		break;
3576 
3577 	case 2:
3578 		ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
3579 		ha->operating_mode = P2P;
3580 		ha->current_topology = ISP_CFG_N;
3581 		strcpy(connect_type, "(N_Port-to-N_Port)");
3582 		break;
3583 
3584 	case 3:
3585 		ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
3586 		ha->switch_cap = sw_cap;
3587 		ha->operating_mode = P2P;
3588 		ha->current_topology = ISP_CFG_F;
3589 		strcpy(connect_type, "(F_Port)");
3590 		break;
3591 
3592 	default:
3593 		ql_dbg(ql_dbg_disc, vha, 0x200f,
3594 		    "HBA in unknown topology %x, using NL.\n", topo);
3595 		ha->current_topology = ISP_CFG_NL;
3596 		strcpy(connect_type, "(Loop)");
3597 		break;
3598 	}
3599 
3600 	/* Save Host port and loop ID. */
3601 	/* byte order - Big Endian */
3602 	id.b.domain = domain;
3603 	id.b.area = area;
3604 	id.b.al_pa = al_pa;
3605 	id.b.rsvd_1 = 0;
3606 	qlt_update_host_map(vha, id);
3607 
3608 	if (!vha->flags.init_done)
3609 		ql_log(ql_log_info, vha, 0x2010,
3610 		    "Topology - %s, Host Loop address 0x%x.\n",
3611 		    connect_type, vha->loop_id);
3612 
3613 	return(rval);
3614 }
3615 
3616 inline void
3617 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
3618 	char *def)
3619 {
3620 	char *st, *en;
3621 	uint16_t index;
3622 	struct qla_hw_data *ha = vha->hw;
3623 	int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
3624 	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
3625 
3626 	if (memcmp(model, BINZERO, len) != 0) {
3627 		strncpy(ha->model_number, model, len);
3628 		st = en = ha->model_number;
3629 		en += len - 1;
3630 		while (en > st) {
3631 			if (*en != 0x20 && *en != 0x00)
3632 				break;
3633 			*en-- = '\0';
3634 		}
3635 
3636 		index = (ha->pdev->subsystem_device & 0xff);
3637 		if (use_tbl &&
3638 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
3639 		    index < QLA_MODEL_NAMES)
3640 			strncpy(ha->model_desc,
3641 			    qla2x00_model_name[index * 2 + 1],
3642 			    sizeof(ha->model_desc) - 1);
3643 	} else {
3644 		index = (ha->pdev->subsystem_device & 0xff);
3645 		if (use_tbl &&
3646 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
3647 		    index < QLA_MODEL_NAMES) {
3648 			strcpy(ha->model_number,
3649 			    qla2x00_model_name[index * 2]);
3650 			strncpy(ha->model_desc,
3651 			    qla2x00_model_name[index * 2 + 1],
3652 			    sizeof(ha->model_desc) - 1);
3653 		} else {
3654 			strcpy(ha->model_number, def);
3655 		}
3656 	}
3657 	if (IS_FWI2_CAPABLE(ha))
3658 		qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
3659 		    sizeof(ha->model_desc));
3660 }
3661 
3662 /* On sparc systems, obtain port and node WWN from firmware
3663  * properties.
3664  */
3665 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
3666 {
3667 #ifdef CONFIG_SPARC
3668 	struct qla_hw_data *ha = vha->hw;
3669 	struct pci_dev *pdev = ha->pdev;
3670 	struct device_node *dp = pci_device_to_OF_node(pdev);
3671 	const u8 *val;
3672 	int len;
3673 
3674 	val = of_get_property(dp, "port-wwn", &len);
3675 	if (val && len >= WWN_SIZE)
3676 		memcpy(nv->port_name, val, WWN_SIZE);
3677 
3678 	val = of_get_property(dp, "node-wwn", &len);
3679 	if (val && len >= WWN_SIZE)
3680 		memcpy(nv->node_name, val, WWN_SIZE);
3681 #endif
3682 }
3683 
3684 /*
3685 * NVRAM configuration for ISP 2xxx
3686 *
3687 * Input:
3688 *      ha                = adapter block pointer.
3689 *
3690 * Output:
3691 *      initialization control block in response_ring
3692 *      host adapters parameters in host adapter block
3693 *
3694 * Returns:
3695 *      0 = success.
3696 */
3697 int
3698 qla2x00_nvram_config(scsi_qla_host_t *vha)
3699 {
3700 	int             rval;
3701 	uint8_t         chksum = 0;
3702 	uint16_t        cnt;
3703 	uint8_t         *dptr1, *dptr2;
3704 	struct qla_hw_data *ha = vha->hw;
3705 	init_cb_t       *icb = ha->init_cb;
3706 	nvram_t         *nv = ha->nvram;
3707 	uint8_t         *ptr = ha->nvram;
3708 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3709 
3710 	rval = QLA_SUCCESS;
3711 
3712 	/* Determine NVRAM starting address. */
3713 	ha->nvram_size = sizeof(nvram_t);
3714 	ha->nvram_base = 0;
3715 	if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
3716 		if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
3717 			ha->nvram_base = 0x80;
3718 
3719 	/* Get NVRAM data and calculate checksum. */
3720 	ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
3721 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
3722 		chksum += *ptr++;
3723 
3724 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
3725 	    "Contents of NVRAM.\n");
3726 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
3727 	    (uint8_t *)nv, ha->nvram_size);
3728 
3729 	/* Bad NVRAM data, set defaults parameters. */
3730 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
3731 	    nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
3732 		/* Reset NVRAM data. */
3733 		ql_log(ql_log_warn, vha, 0x0064,
3734 		    "Inconsistent NVRAM "
3735 		    "detected: checksum=0x%x id=%c version=0x%x.\n",
3736 		    chksum, nv->id[0], nv->nvram_version);
3737 		ql_log(ql_log_warn, vha, 0x0065,
3738 		    "Falling back to "
3739 		    "functioning (yet invalid -- WWPN) defaults.\n");
3740 
3741 		/*
3742 		 * Set default initialization control block.
3743 		 */
3744 		memset(nv, 0, ha->nvram_size);
3745 		nv->parameter_block_version = ICB_VERSION;
3746 
3747 		if (IS_QLA23XX(ha)) {
3748 			nv->firmware_options[0] = BIT_2 | BIT_1;
3749 			nv->firmware_options[1] = BIT_7 | BIT_5;
3750 			nv->add_firmware_options[0] = BIT_5;
3751 			nv->add_firmware_options[1] = BIT_5 | BIT_4;
3752 			nv->frame_payload_size = 2048;
3753 			nv->special_options[1] = BIT_7;
3754 		} else if (IS_QLA2200(ha)) {
3755 			nv->firmware_options[0] = BIT_2 | BIT_1;
3756 			nv->firmware_options[1] = BIT_7 | BIT_5;
3757 			nv->add_firmware_options[0] = BIT_5;
3758 			nv->add_firmware_options[1] = BIT_5 | BIT_4;
3759 			nv->frame_payload_size = 1024;
3760 		} else if (IS_QLA2100(ha)) {
3761 			nv->firmware_options[0] = BIT_3 | BIT_1;
3762 			nv->firmware_options[1] = BIT_5;
3763 			nv->frame_payload_size = 1024;
3764 		}
3765 
3766 		nv->max_iocb_allocation = cpu_to_le16(256);
3767 		nv->execution_throttle = cpu_to_le16(16);
3768 		nv->retry_count = 8;
3769 		nv->retry_delay = 1;
3770 
3771 		nv->port_name[0] = 33;
3772 		nv->port_name[3] = 224;
3773 		nv->port_name[4] = 139;
3774 
3775 		qla2xxx_nvram_wwn_from_ofw(vha, nv);
3776 
3777 		nv->login_timeout = 4;
3778 
3779 		/*
3780 		 * Set default host adapter parameters
3781 		 */
3782 		nv->host_p[1] = BIT_2;
3783 		nv->reset_delay = 5;
3784 		nv->port_down_retry_count = 8;
3785 		nv->max_luns_per_target = cpu_to_le16(8);
3786 		nv->link_down_timeout = 60;
3787 
3788 		rval = 1;
3789 	}
3790 
3791 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
3792 	/*
3793 	 * The SN2 does not provide BIOS emulation which means you can't change
3794 	 * potentially bogus BIOS settings. Force the use of default settings
3795 	 * for link rate and frame size.  Hope that the rest of the settings
3796 	 * are valid.
3797 	 */
3798 	if (ia64_platform_is("sn2")) {
3799 		nv->frame_payload_size = 2048;
3800 		if (IS_QLA23XX(ha))
3801 			nv->special_options[1] = BIT_7;
3802 	}
3803 #endif
3804 
3805 	/* Reset Initialization control block */
3806 	memset(icb, 0, ha->init_cb_size);
3807 
3808 	/*
3809 	 * Setup driver NVRAM options.
3810 	 */
3811 	nv->firmware_options[0] |= (BIT_6 | BIT_1);
3812 	nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
3813 	nv->firmware_options[1] |= (BIT_5 | BIT_0);
3814 	nv->firmware_options[1] &= ~BIT_4;
3815 
3816 	if (IS_QLA23XX(ha)) {
3817 		nv->firmware_options[0] |= BIT_2;
3818 		nv->firmware_options[0] &= ~BIT_3;
3819 		nv->special_options[0] &= ~BIT_6;
3820 		nv->add_firmware_options[1] |= BIT_5 | BIT_4;
3821 
3822 		if (IS_QLA2300(ha)) {
3823 			if (ha->fb_rev == FPM_2310) {
3824 				strcpy(ha->model_number, "QLA2310");
3825 			} else {
3826 				strcpy(ha->model_number, "QLA2300");
3827 			}
3828 		} else {
3829 			qla2x00_set_model_info(vha, nv->model_number,
3830 			    sizeof(nv->model_number), "QLA23xx");
3831 		}
3832 	} else if (IS_QLA2200(ha)) {
3833 		nv->firmware_options[0] |= BIT_2;
3834 		/*
3835 		 * 'Point-to-point preferred, else loop' is not a safe
3836 		 * connection mode setting.
3837 		 */
3838 		if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
3839 		    (BIT_5 | BIT_4)) {
3840 			/* Force 'loop preferred, else point-to-point'. */
3841 			nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
3842 			nv->add_firmware_options[0] |= BIT_5;
3843 		}
3844 		strcpy(ha->model_number, "QLA22xx");
3845 	} else /*if (IS_QLA2100(ha))*/ {
3846 		strcpy(ha->model_number, "QLA2100");
3847 	}
3848 
3849 	/*
3850 	 * Copy over NVRAM RISC parameter block to initialization control block.
3851 	 */
3852 	dptr1 = (uint8_t *)icb;
3853 	dptr2 = (uint8_t *)&nv->parameter_block_version;
3854 	cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
3855 	while (cnt--)
3856 		*dptr1++ = *dptr2++;
3857 
3858 	/* Copy 2nd half. */
3859 	dptr1 = (uint8_t *)icb->add_firmware_options;
3860 	cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
3861 	while (cnt--)
3862 		*dptr1++ = *dptr2++;
3863 
3864 	/* Use alternate WWN? */
3865 	if (nv->host_p[1] & BIT_7) {
3866 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
3867 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
3868 	}
3869 
3870 	/* Prepare nodename */
3871 	if ((icb->firmware_options[1] & BIT_6) == 0) {
3872 		/*
3873 		 * Firmware will apply the following mask if the nodename was
3874 		 * not provided.
3875 		 */
3876 		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
3877 		icb->node_name[0] &= 0xF0;
3878 	}
3879 
3880 	/*
3881 	 * Set host adapter parameters.
3882 	 */
3883 
3884 	/*
3885 	 * BIT_7 in the host-parameters section allows for modification to
3886 	 * internal driver logging.
3887 	 */
3888 	if (nv->host_p[0] & BIT_7)
3889 		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
3890 	ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
3891 	/* Always load RISC code on non ISP2[12]00 chips. */
3892 	if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
3893 		ha->flags.disable_risc_code_load = 0;
3894 	ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
3895 	ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
3896 	ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
3897 	ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
3898 	ha->flags.disable_serdes = 0;
3899 
3900 	ha->operating_mode =
3901 	    (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
3902 
3903 	memcpy(ha->fw_seriallink_options, nv->seriallink_options,
3904 	    sizeof(ha->fw_seriallink_options));
3905 
3906 	/* save HBA serial number */
3907 	ha->serial0 = icb->port_name[5];
3908 	ha->serial1 = icb->port_name[6];
3909 	ha->serial2 = icb->port_name[7];
3910 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3911 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
3912 
3913 	icb->execution_throttle = cpu_to_le16(0xFFFF);
3914 
3915 	ha->retry_count = nv->retry_count;
3916 
3917 	/* Set minimum login_timeout to 4 seconds. */
3918 	if (nv->login_timeout != ql2xlogintimeout)
3919 		nv->login_timeout = ql2xlogintimeout;
3920 	if (nv->login_timeout < 4)
3921 		nv->login_timeout = 4;
3922 	ha->login_timeout = nv->login_timeout;
3923 
3924 	/* Set minimum RATOV to 100 tenths of a second. */
3925 	ha->r_a_tov = 100;
3926 
3927 	ha->loop_reset_delay = nv->reset_delay;
3928 
3929 	/* Link Down Timeout = 0:
3930 	 *
3931 	 * 	When Port Down timer expires we will start returning
3932 	 *	I/O's to OS with "DID_NO_CONNECT".
3933 	 *
3934 	 * Link Down Timeout != 0:
3935 	 *
3936 	 *	 The driver waits for the link to come up after link down
3937 	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
3938 	 */
3939 	if (nv->link_down_timeout == 0) {
3940 		ha->loop_down_abort_time =
3941 		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
3942 	} else {
3943 		ha->link_down_timeout =	 nv->link_down_timeout;
3944 		ha->loop_down_abort_time =
3945 		    (LOOP_DOWN_TIME - ha->link_down_timeout);
3946 	}
3947 
3948 	/*
3949 	 * Need enough time to try and get the port back.
3950 	 */
3951 	ha->port_down_retry_count = nv->port_down_retry_count;
3952 	if (qlport_down_retry)
3953 		ha->port_down_retry_count = qlport_down_retry;
3954 	/* Set login_retry_count */
3955 	ha->login_retry_count  = nv->retry_count;
3956 	if (ha->port_down_retry_count == nv->port_down_retry_count &&
3957 	    ha->port_down_retry_count > 3)
3958 		ha->login_retry_count = ha->port_down_retry_count;
3959 	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
3960 		ha->login_retry_count = ha->port_down_retry_count;
3961 	if (ql2xloginretrycount)
3962 		ha->login_retry_count = ql2xloginretrycount;
3963 
3964 	icb->lun_enables = cpu_to_le16(0);
3965 	icb->command_resource_count = 0;
3966 	icb->immediate_notify_resource_count = 0;
3967 	icb->timeout = cpu_to_le16(0);
3968 
3969 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3970 		/* Enable RIO */
3971 		icb->firmware_options[0] &= ~BIT_3;
3972 		icb->add_firmware_options[0] &=
3973 		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
3974 		icb->add_firmware_options[0] |= BIT_2;
3975 		icb->response_accumulation_timer = 3;
3976 		icb->interrupt_delay_timer = 5;
3977 
3978 		vha->flags.process_response_queue = 1;
3979 	} else {
3980 		/* Enable ZIO. */
3981 		if (!vha->flags.init_done) {
3982 			ha->zio_mode = icb->add_firmware_options[0] &
3983 			    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3984 			ha->zio_timer = icb->interrupt_delay_timer ?
3985 			    icb->interrupt_delay_timer: 2;
3986 		}
3987 		icb->add_firmware_options[0] &=
3988 		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
3989 		vha->flags.process_response_queue = 0;
3990 		if (ha->zio_mode != QLA_ZIO_DISABLED) {
3991 			ha->zio_mode = QLA_ZIO_MODE_6;
3992 
3993 			ql_log(ql_log_info, vha, 0x0068,
3994 			    "ZIO mode %d enabled; timer delay (%d us).\n",
3995 			    ha->zio_mode, ha->zio_timer * 100);
3996 
3997 			icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
3998 			icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
3999 			vha->flags.process_response_queue = 1;
4000 		}
4001 	}
4002 
4003 	if (rval) {
4004 		ql_log(ql_log_warn, vha, 0x0069,
4005 		    "NVRAM configuration failed.\n");
4006 	}
4007 	return (rval);
4008 }
4009 
4010 static void
4011 qla2x00_rport_del(void *data)
4012 {
4013 	fc_port_t *fcport = data;
4014 	struct fc_rport *rport;
4015 	unsigned long flags;
4016 
4017 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4018 	rport = fcport->drport ? fcport->drport: fcport->rport;
4019 	fcport->drport = NULL;
4020 	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4021 	if (rport) {
4022 		ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4023 		    "%s %8phN. rport %p roles %x\n",
4024 		    __func__, fcport->port_name, rport,
4025 		    rport->roles);
4026 
4027 		fc_remote_port_delete(rport);
4028 	}
4029 }
4030 
4031 /**
4032  * qla2x00_alloc_fcport() - Allocate a generic fcport.
4033  * @ha: HA context
4034  * @flags: allocation flags
4035  *
4036  * Returns a pointer to the allocated fcport, or NULL, if none available.
4037  */
4038 fc_port_t *
4039 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4040 {
4041 	fc_port_t *fcport;
4042 
4043 	fcport = kzalloc(sizeof(fc_port_t), flags);
4044 	if (!fcport)
4045 		return NULL;
4046 
4047 	/* Setup fcport template structure. */
4048 	fcport->vha = vha;
4049 	fcport->port_type = FCT_UNKNOWN;
4050 	fcport->loop_id = FC_NO_LOOP_ID;
4051 	qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
4052 	fcport->supported_classes = FC_COS_UNSPECIFIED;
4053 
4054 	fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4055 		sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
4056 		flags);
4057 	fcport->disc_state = DSC_DELETED;
4058 	fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4059 	fcport->deleted = QLA_SESS_DELETED;
4060 	fcport->login_retry = vha->hw->login_retry_count;
4061 	fcport->login_retry = 5;
4062 	fcport->logout_on_delete = 1;
4063 
4064 	if (!fcport->ct_desc.ct_sns) {
4065 		ql_log(ql_log_warn, vha, 0xd049,
4066 		    "Failed to allocate ct_sns request.\n");
4067 		kfree(fcport);
4068 		fcport = NULL;
4069 	}
4070 	INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
4071 	INIT_LIST_HEAD(&fcport->gnl_entry);
4072 	INIT_LIST_HEAD(&fcport->list);
4073 
4074 	return fcport;
4075 }
4076 
4077 void
4078 qla2x00_free_fcport(fc_port_t *fcport)
4079 {
4080 	if (fcport->ct_desc.ct_sns) {
4081 		dma_free_coherent(&fcport->vha->hw->pdev->dev,
4082 			sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4083 			fcport->ct_desc.ct_sns_dma);
4084 
4085 		fcport->ct_desc.ct_sns = NULL;
4086 	}
4087 	kfree(fcport);
4088 }
4089 
4090 /*
4091  * qla2x00_configure_loop
4092  *      Updates Fibre Channel Device Database with what is actually on loop.
4093  *
4094  * Input:
4095  *      ha                = adapter block pointer.
4096  *
4097  * Returns:
4098  *      0 = success.
4099  *      1 = error.
4100  *      2 = database was full and device was not configured.
4101  */
4102 static int
4103 qla2x00_configure_loop(scsi_qla_host_t *vha)
4104 {
4105 	int  rval;
4106 	unsigned long flags, save_flags;
4107 	struct qla_hw_data *ha = vha->hw;
4108 	rval = QLA_SUCCESS;
4109 
4110 	/* Get Initiator ID */
4111 	if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4112 		rval = qla2x00_configure_hba(vha);
4113 		if (rval != QLA_SUCCESS) {
4114 			ql_dbg(ql_dbg_disc, vha, 0x2013,
4115 			    "Unable to configure HBA.\n");
4116 			return (rval);
4117 		}
4118 	}
4119 
4120 	save_flags = flags = vha->dpc_flags;
4121 	ql_dbg(ql_dbg_disc, vha, 0x2014,
4122 	    "Configure loop -- dpc flags = 0x%lx.\n", flags);
4123 
4124 	/*
4125 	 * If we have both an RSCN and PORT UPDATE pending then handle them
4126 	 * both at the same time.
4127 	 */
4128 	clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4129 	clear_bit(RSCN_UPDATE, &vha->dpc_flags);
4130 
4131 	qla2x00_get_data_rate(vha);
4132 
4133 	/* Determine what we need to do */
4134 	if (ha->current_topology == ISP_CFG_FL &&
4135 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4136 
4137 		set_bit(RSCN_UPDATE, &flags);
4138 
4139 	} else if (ha->current_topology == ISP_CFG_F &&
4140 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4141 
4142 		set_bit(RSCN_UPDATE, &flags);
4143 		clear_bit(LOCAL_LOOP_UPDATE, &flags);
4144 
4145 	} else if (ha->current_topology == ISP_CFG_N) {
4146 		clear_bit(RSCN_UPDATE, &flags);
4147 	} else if (ha->current_topology == ISP_CFG_NL) {
4148 		clear_bit(RSCN_UPDATE, &flags);
4149 		set_bit(LOCAL_LOOP_UPDATE, &flags);
4150 	} else if (!vha->flags.online ||
4151 	    (test_bit(ABORT_ISP_ACTIVE, &flags))) {
4152 		set_bit(RSCN_UPDATE, &flags);
4153 		set_bit(LOCAL_LOOP_UPDATE, &flags);
4154 	}
4155 
4156 	if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
4157 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4158 			ql_dbg(ql_dbg_disc, vha, 0x2015,
4159 			    "Loop resync needed, failing.\n");
4160 			rval = QLA_FUNCTION_FAILED;
4161 		} else
4162 			rval = qla2x00_configure_local_loop(vha);
4163 	}
4164 
4165 	if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
4166 		if (LOOP_TRANSITION(vha)) {
4167 			ql_dbg(ql_dbg_disc, vha, 0x2099,
4168 			    "Needs RSCN update and loop transition.\n");
4169 			rval = QLA_FUNCTION_FAILED;
4170 		}
4171 		else
4172 			rval = qla2x00_configure_fabric(vha);
4173 	}
4174 
4175 	if (rval == QLA_SUCCESS) {
4176 		if (atomic_read(&vha->loop_down_timer) ||
4177 		    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4178 			rval = QLA_FUNCTION_FAILED;
4179 		} else {
4180 			atomic_set(&vha->loop_state, LOOP_READY);
4181 			ql_dbg(ql_dbg_disc, vha, 0x2069,
4182 			    "LOOP READY.\n");
4183 			ha->flags.fw_init_done = 1;
4184 
4185 			/*
4186 			 * Process any ATIO queue entries that came in
4187 			 * while we weren't online.
4188 			 */
4189 			if (qla_tgt_mode_enabled(vha) ||
4190 			    qla_dual_mode_enabled(vha)) {
4191 				if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
4192 					spin_lock_irqsave(&ha->tgt.atio_lock,
4193 					    flags);
4194 					qlt_24xx_process_atio_queue(vha, 0);
4195 					spin_unlock_irqrestore(
4196 					    &ha->tgt.atio_lock, flags);
4197 				} else {
4198 					spin_lock_irqsave(&ha->hardware_lock,
4199 					    flags);
4200 					qlt_24xx_process_atio_queue(vha, 1);
4201 					spin_unlock_irqrestore(
4202 					    &ha->hardware_lock, flags);
4203 				}
4204 			}
4205 		}
4206 	}
4207 
4208 	if (rval) {
4209 		ql_dbg(ql_dbg_disc, vha, 0x206a,
4210 		    "%s *** FAILED ***.\n", __func__);
4211 	} else {
4212 		ql_dbg(ql_dbg_disc, vha, 0x206b,
4213 		    "%s: exiting normally.\n", __func__);
4214 	}
4215 
4216 	/* Restore state if a resync event occurred during processing */
4217 	if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4218 		if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
4219 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4220 		if (test_bit(RSCN_UPDATE, &save_flags)) {
4221 			set_bit(RSCN_UPDATE, &vha->dpc_flags);
4222 		}
4223 	}
4224 
4225 	return (rval);
4226 }
4227 
4228 
4229 
4230 /*
4231  * qla2x00_configure_local_loop
4232  *	Updates Fibre Channel Device Database with local loop devices.
4233  *
4234  * Input:
4235  *	ha = adapter block pointer.
4236  *
4237  * Returns:
4238  *	0 = success.
4239  */
4240 static int
4241 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4242 {
4243 	int		rval, rval2;
4244 	int		found_devs;
4245 	int		found;
4246 	fc_port_t	*fcport, *new_fcport;
4247 
4248 	uint16_t	index;
4249 	uint16_t	entries;
4250 	char		*id_iter;
4251 	uint16_t	loop_id;
4252 	uint8_t		domain, area, al_pa;
4253 	struct qla_hw_data *ha = vha->hw;
4254 	unsigned long flags;
4255 
4256 	found_devs = 0;
4257 	new_fcport = NULL;
4258 	entries = MAX_FIBRE_DEVICES_LOOP;
4259 
4260 	/* Get list of logged in devices. */
4261 	memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
4262 	rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
4263 	    &entries);
4264 	if (rval != QLA_SUCCESS)
4265 		goto cleanup_allocation;
4266 
4267 	ql_dbg(ql_dbg_disc, vha, 0x2011,
4268 	    "Entries in ID list (%d).\n", entries);
4269 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
4270 	    (uint8_t *)ha->gid_list,
4271 	    entries * sizeof(struct gid_list_info));
4272 
4273 	/* Allocate temporary fcport for any new fcports discovered. */
4274 	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4275 	if (new_fcport == NULL) {
4276 		ql_log(ql_log_warn, vha, 0x2012,
4277 		    "Memory allocation failed for fcport.\n");
4278 		rval = QLA_MEMORY_ALLOC_FAILED;
4279 		goto cleanup_allocation;
4280 	}
4281 	new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4282 
4283 	/*
4284 	 * Mark local devices that were present with FCF_DEVICE_LOST for now.
4285 	 */
4286 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
4287 		if (atomic_read(&fcport->state) == FCS_ONLINE &&
4288 		    fcport->port_type != FCT_BROADCAST &&
4289 		    (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4290 
4291 			ql_dbg(ql_dbg_disc, vha, 0x2096,
4292 			    "Marking port lost loop_id=0x%04x.\n",
4293 			    fcport->loop_id);
4294 
4295 			qla2x00_mark_device_lost(vha, fcport, 0, 0);
4296 		}
4297 	}
4298 
4299 	/* Add devices to port list. */
4300 	id_iter = (char *)ha->gid_list;
4301 	for (index = 0; index < entries; index++) {
4302 		domain = ((struct gid_list_info *)id_iter)->domain;
4303 		area = ((struct gid_list_info *)id_iter)->area;
4304 		al_pa = ((struct gid_list_info *)id_iter)->al_pa;
4305 		if (IS_QLA2100(ha) || IS_QLA2200(ha))
4306 			loop_id = (uint16_t)
4307 			    ((struct gid_list_info *)id_iter)->loop_id_2100;
4308 		else
4309 			loop_id = le16_to_cpu(
4310 			    ((struct gid_list_info *)id_iter)->loop_id);
4311 		id_iter += ha->gid_list_info_size;
4312 
4313 		/* Bypass reserved domain fields. */
4314 		if ((domain & 0xf0) == 0xf0)
4315 			continue;
4316 
4317 		/* Bypass if not same domain and area of adapter. */
4318 		if (area && domain &&
4319 		    (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
4320 			continue;
4321 
4322 		/* Bypass invalid local loop ID. */
4323 		if (loop_id > LAST_LOCAL_LOOP_ID)
4324 			continue;
4325 
4326 		memset(new_fcport->port_name, 0, WWN_SIZE);
4327 
4328 		/* Fill in member data. */
4329 		new_fcport->d_id.b.domain = domain;
4330 		new_fcport->d_id.b.area = area;
4331 		new_fcport->d_id.b.al_pa = al_pa;
4332 		new_fcport->loop_id = loop_id;
4333 
4334 		rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
4335 		if (rval2 != QLA_SUCCESS) {
4336 			ql_dbg(ql_dbg_disc, vha, 0x2097,
4337 			    "Failed to retrieve fcport information "
4338 			    "-- get_port_database=%x, loop_id=0x%04x.\n",
4339 			    rval2, new_fcport->loop_id);
4340 			ql_dbg(ql_dbg_disc, vha, 0x2105,
4341 			    "Scheduling resync.\n");
4342 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4343 			continue;
4344 		}
4345 
4346 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4347 		/* Check for matching device in port list. */
4348 		found = 0;
4349 		fcport = NULL;
4350 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
4351 			if (memcmp(new_fcport->port_name, fcport->port_name,
4352 			    WWN_SIZE))
4353 				continue;
4354 
4355 			fcport->flags &= ~FCF_FABRIC_DEVICE;
4356 			fcport->loop_id = new_fcport->loop_id;
4357 			fcport->port_type = new_fcport->port_type;
4358 			fcport->d_id.b24 = new_fcport->d_id.b24;
4359 			memcpy(fcport->node_name, new_fcport->node_name,
4360 			    WWN_SIZE);
4361 
4362 			if (!fcport->login_succ) {
4363 				vha->fcport_count++;
4364 				fcport->login_succ = 1;
4365 				fcport->disc_state = DSC_LOGIN_COMPLETE;
4366 			}
4367 
4368 			found++;
4369 			break;
4370 		}
4371 
4372 		if (!found) {
4373 			/* New device, add to fcports list. */
4374 			list_add_tail(&new_fcport->list, &vha->vp_fcports);
4375 
4376 			/* Allocate a new replacement fcport. */
4377 			fcport = new_fcport;
4378 			if (!fcport->login_succ) {
4379 				vha->fcport_count++;
4380 				fcport->login_succ = 1;
4381 				fcport->disc_state = DSC_LOGIN_COMPLETE;
4382 			}
4383 
4384 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4385 
4386 			new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4387 
4388 			if (new_fcport == NULL) {
4389 				ql_log(ql_log_warn, vha, 0xd031,
4390 				    "Failed to allocate memory for fcport.\n");
4391 				rval = QLA_MEMORY_ALLOC_FAILED;
4392 				goto cleanup_allocation;
4393 			}
4394 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4395 			new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4396 		}
4397 
4398 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4399 
4400 		/* Base iIDMA settings on HBA port speed. */
4401 		fcport->fp_speed = ha->link_data_rate;
4402 
4403 		qla2x00_update_fcport(vha, fcport);
4404 
4405 		found_devs++;
4406 	}
4407 
4408 cleanup_allocation:
4409 	kfree(new_fcport);
4410 
4411 	if (rval != QLA_SUCCESS) {
4412 		ql_dbg(ql_dbg_disc, vha, 0x2098,
4413 		    "Configure local loop error exit: rval=%x.\n", rval);
4414 	}
4415 
4416 	return (rval);
4417 }
4418 
4419 static void
4420 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
4421 {
4422 	int rval;
4423 	uint16_t mb[MAILBOX_REGISTER_COUNT];
4424 	struct qla_hw_data *ha = vha->hw;
4425 
4426 	if (!IS_IIDMA_CAPABLE(ha))
4427 		return;
4428 
4429 	if (atomic_read(&fcport->state) != FCS_ONLINE)
4430 		return;
4431 
4432 	if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
4433 	    fcport->fp_speed > ha->link_data_rate)
4434 		return;
4435 
4436 	rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
4437 	    mb);
4438 	if (rval != QLA_SUCCESS) {
4439 		ql_dbg(ql_dbg_disc, vha, 0x2004,
4440 		    "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
4441 		    fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
4442 	} else {
4443 		ql_dbg(ql_dbg_disc, vha, 0x2005,
4444 		    "iIDMA adjusted to %s GB/s on %8phN.\n",
4445 		    qla2x00_get_link_speed_str(ha, fcport->fp_speed),
4446 		    fcport->port_name);
4447 	}
4448 }
4449 
4450 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
4451 static void
4452 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
4453 {
4454 	struct fc_rport_identifiers rport_ids;
4455 	struct fc_rport *rport;
4456 	unsigned long flags;
4457 
4458 	rport_ids.node_name = wwn_to_u64(fcport->node_name);
4459 	rport_ids.port_name = wwn_to_u64(fcport->port_name);
4460 	rport_ids.port_id = fcport->d_id.b.domain << 16 |
4461 	    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
4462 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4463 	fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
4464 	if (!rport) {
4465 		ql_log(ql_log_warn, vha, 0x2006,
4466 		    "Unable to allocate fc remote port.\n");
4467 		return;
4468 	}
4469 
4470 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4471 	*((fc_port_t **)rport->dd_data) = fcport;
4472 	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4473 
4474 	rport->supported_classes = fcport->supported_classes;
4475 
4476 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4477 	if (fcport->port_type == FCT_INITIATOR)
4478 		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
4479 	if (fcport->port_type == FCT_TARGET)
4480 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
4481 
4482 	ql_dbg(ql_dbg_disc, vha, 0x20ee,
4483 	    "%s %8phN. rport %p is %s mode\n",
4484 	    __func__, fcport->port_name, rport,
4485 	    (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
4486 
4487 	fc_remote_port_rolechg(rport, rport_ids.roles);
4488 }
4489 
4490 /*
4491  * qla2x00_update_fcport
4492  *	Updates device on list.
4493  *
4494  * Input:
4495  *	ha = adapter block pointer.
4496  *	fcport = port structure pointer.
4497  *
4498  * Return:
4499  *	0  - Success
4500  *  BIT_0 - error
4501  *
4502  * Context:
4503  *	Kernel context.
4504  */
4505 void
4506 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
4507 {
4508 	fcport->vha = vha;
4509 
4510 	if (IS_SW_RESV_ADDR(fcport->d_id))
4511 		return;
4512 
4513 	ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
4514 	    __func__, fcport->port_name);
4515 
4516 	if (IS_QLAFX00(vha->hw)) {
4517 		qla2x00_set_fcport_state(fcport, FCS_ONLINE);
4518 		goto reg_port;
4519 	}
4520 	fcport->login_retry = 0;
4521 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4522 	fcport->disc_state = DSC_LOGIN_COMPLETE;
4523 	fcport->deleted = 0;
4524 	fcport->logout_on_delete = 1;
4525 
4526 	if (fcport->fc4f_nvme) {
4527 		qla_nvme_register_remote(vha, fcport);
4528 		return;
4529 	}
4530 
4531 	qla2x00_set_fcport_state(fcport, FCS_ONLINE);
4532 	qla2x00_iidma_fcport(vha, fcport);
4533 	qla24xx_update_fcport_fcp_prio(vha, fcport);
4534 
4535 reg_port:
4536 	switch (vha->host->active_mode) {
4537 	case MODE_INITIATOR:
4538 		qla2x00_reg_remote_port(vha, fcport);
4539 		break;
4540 	case MODE_TARGET:
4541 		if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4542 			!vha->vha_tgt.qla_tgt->tgt_stopped)
4543 			qlt_fc_port_added(vha, fcport);
4544 		break;
4545 	case MODE_DUAL:
4546 		qla2x00_reg_remote_port(vha, fcport);
4547 		if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4548 			!vha->vha_tgt.qla_tgt->tgt_stopped)
4549 			qlt_fc_port_added(vha, fcport);
4550 		break;
4551 	default:
4552 		break;
4553 	}
4554 }
4555 
4556 /*
4557  * qla2x00_configure_fabric
4558  *      Setup SNS devices with loop ID's.
4559  *
4560  * Input:
4561  *      ha = adapter block pointer.
4562  *
4563  * Returns:
4564  *      0 = success.
4565  *      BIT_0 = error
4566  */
4567 static int
4568 qla2x00_configure_fabric(scsi_qla_host_t *vha)
4569 {
4570 	int	rval;
4571 	fc_port_t	*fcport;
4572 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
4573 	uint16_t	loop_id;
4574 	LIST_HEAD(new_fcports);
4575 	struct qla_hw_data *ha = vha->hw;
4576 	int		discovery_gen;
4577 
4578 	/* If FL port exists, then SNS is present */
4579 	if (IS_FWI2_CAPABLE(ha))
4580 		loop_id = NPH_F_PORT;
4581 	else
4582 		loop_id = SNS_FL_PORT;
4583 	rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
4584 	if (rval != QLA_SUCCESS) {
4585 		ql_dbg(ql_dbg_disc, vha, 0x20a0,
4586 		    "MBX_GET_PORT_NAME failed, No FL Port.\n");
4587 
4588 		vha->device_flags &= ~SWITCH_FOUND;
4589 		return (QLA_SUCCESS);
4590 	}
4591 	vha->device_flags |= SWITCH_FOUND;
4592 
4593 
4594 	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
4595 		rval = qla2x00_send_change_request(vha, 0x3, 0);
4596 		if (rval != QLA_SUCCESS)
4597 			ql_log(ql_log_warn, vha, 0x121,
4598 				"Failed to enable receiving of RSCN requests: 0x%x.\n",
4599 				rval);
4600 	}
4601 
4602 
4603 	do {
4604 		qla2x00_mgmt_svr_login(vha);
4605 
4606 		/* FDMI support. */
4607 		if (ql2xfdmienable &&
4608 		    test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
4609 			qla2x00_fdmi_register(vha);
4610 
4611 		/* Ensure we are logged into the SNS. */
4612 		if (IS_FWI2_CAPABLE(ha))
4613 			loop_id = NPH_SNS;
4614 		else
4615 			loop_id = SIMPLE_NAME_SERVER;
4616 		rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
4617 		    0xfc, mb, BIT_1|BIT_0);
4618 		if (rval != QLA_SUCCESS) {
4619 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4620 			return rval;
4621 		}
4622 		if (mb[0] != MBS_COMMAND_COMPLETE) {
4623 			ql_dbg(ql_dbg_disc, vha, 0x20a1,
4624 			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
4625 			    "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
4626 			    mb[2], mb[6], mb[7]);
4627 			return (QLA_SUCCESS);
4628 		}
4629 
4630 		if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
4631 			if (qla2x00_rft_id(vha)) {
4632 				/* EMPTY */
4633 				ql_dbg(ql_dbg_disc, vha, 0x20a2,
4634 				    "Register FC-4 TYPE failed.\n");
4635 				if (test_bit(LOOP_RESYNC_NEEDED,
4636 				    &vha->dpc_flags))
4637 					break;
4638 			}
4639 			if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
4640 				/* EMPTY */
4641 				ql_dbg(ql_dbg_disc, vha, 0x209a,
4642 				    "Register FC-4 Features failed.\n");
4643 				if (test_bit(LOOP_RESYNC_NEEDED,
4644 				    &vha->dpc_flags))
4645 					break;
4646 			}
4647 			if (vha->flags.nvme_enabled) {
4648 				if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
4649 					ql_dbg(ql_dbg_disc, vha, 0x2049,
4650 					    "Register NVME FC Type Features failed.\n");
4651 				}
4652 			}
4653 			if (qla2x00_rnn_id(vha)) {
4654 				/* EMPTY */
4655 				ql_dbg(ql_dbg_disc, vha, 0x2104,
4656 				    "Register Node Name failed.\n");
4657 				if (test_bit(LOOP_RESYNC_NEEDED,
4658 				    &vha->dpc_flags))
4659 					break;
4660 			} else if (qla2x00_rsnn_nn(vha)) {
4661 				/* EMPTY */
4662 				ql_dbg(ql_dbg_disc, vha, 0x209b,
4663 				    "Register Symbolic Node Name failed.\n");
4664 				if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4665 					break;
4666 			}
4667 		}
4668 
4669 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
4670 			fcport->scan_state = QLA_FCPORT_SCAN;
4671 		}
4672 
4673 		/* Mark the time right before querying FW for connected ports.
4674 		 * This process is long, asynchronous and by the time it's done,
4675 		 * collected information might not be accurate anymore. E.g.
4676 		 * disconnected port might have re-connected and a brand new
4677 		 * session has been created. In this case session's generation
4678 		 * will be newer than discovery_gen. */
4679 		qlt_do_generation_tick(vha, &discovery_gen);
4680 
4681 		rval = qla2x00_find_all_fabric_devs(vha);
4682 		if (rval != QLA_SUCCESS)
4683 			break;
4684 	} while (0);
4685 
4686 	if (!vha->nvme_local_port && vha->flags.nvme_enabled)
4687 		qla_nvme_register_hba(vha);
4688 
4689 	if (rval)
4690 		ql_dbg(ql_dbg_disc, vha, 0x2068,
4691 		    "Configure fabric error exit rval=%d.\n", rval);
4692 
4693 	return (rval);
4694 }
4695 
4696 /*
4697  * qla2x00_find_all_fabric_devs
4698  *
4699  * Input:
4700  *	ha = adapter block pointer.
4701  *	dev = database device entry pointer.
4702  *
4703  * Returns:
4704  *	0 = success.
4705  *
4706  * Context:
4707  *	Kernel context.
4708  */
4709 static int
4710 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
4711 {
4712 	int		rval;
4713 	uint16_t	loop_id;
4714 	fc_port_t	*fcport, *new_fcport;
4715 	int		found;
4716 
4717 	sw_info_t	*swl;
4718 	int		swl_idx;
4719 	int		first_dev, last_dev;
4720 	port_id_t	wrap = {}, nxt_d_id;
4721 	struct qla_hw_data *ha = vha->hw;
4722 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4723 	unsigned long flags;
4724 
4725 	rval = QLA_SUCCESS;
4726 
4727 	/* Try GID_PT to get device list, else GAN. */
4728 	if (!ha->swl)
4729 		ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
4730 		    GFP_KERNEL);
4731 	swl = ha->swl;
4732 	if (!swl) {
4733 		/*EMPTY*/
4734 		ql_dbg(ql_dbg_disc, vha, 0x209c,
4735 		    "GID_PT allocations failed, fallback on GA_NXT.\n");
4736 	} else {
4737 		memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
4738 		if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
4739 			swl = NULL;
4740 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4741 				return rval;
4742 		} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
4743 			swl = NULL;
4744 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4745 				return rval;
4746 		} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
4747 			swl = NULL;
4748 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4749 				return rval;
4750 		} else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
4751 			swl = NULL;
4752 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4753 				return rval;
4754 		}
4755 
4756 		/* If other queries succeeded probe for FC-4 type */
4757 		if (swl) {
4758 			qla2x00_gff_id(vha, swl);
4759 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4760 				return rval;
4761 		}
4762 	}
4763 	swl_idx = 0;
4764 
4765 	/* Allocate temporary fcport for any new fcports discovered. */
4766 	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4767 	if (new_fcport == NULL) {
4768 		ql_log(ql_log_warn, vha, 0x209d,
4769 		    "Failed to allocate memory for fcport.\n");
4770 		return (QLA_MEMORY_ALLOC_FAILED);
4771 	}
4772 	new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
4773 	/* Set start port ID scan at adapter ID. */
4774 	first_dev = 1;
4775 	last_dev = 0;
4776 
4777 	/* Starting free loop ID. */
4778 	loop_id = ha->min_external_loopid;
4779 	for (; loop_id <= ha->max_loop_id; loop_id++) {
4780 		if (qla2x00_is_reserved_id(vha, loop_id))
4781 			continue;
4782 
4783 		if (ha->current_topology == ISP_CFG_FL &&
4784 		    (atomic_read(&vha->loop_down_timer) ||
4785 		     LOOP_TRANSITION(vha))) {
4786 			atomic_set(&vha->loop_down_timer, 0);
4787 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4788 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4789 			break;
4790 		}
4791 
4792 		if (swl != NULL) {
4793 			if (last_dev) {
4794 				wrap.b24 = new_fcport->d_id.b24;
4795 			} else {
4796 				new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
4797 				memcpy(new_fcport->node_name,
4798 				    swl[swl_idx].node_name, WWN_SIZE);
4799 				memcpy(new_fcport->port_name,
4800 				    swl[swl_idx].port_name, WWN_SIZE);
4801 				memcpy(new_fcport->fabric_port_name,
4802 				    swl[swl_idx].fabric_port_name, WWN_SIZE);
4803 				new_fcport->fp_speed = swl[swl_idx].fp_speed;
4804 				new_fcport->fc4_type = swl[swl_idx].fc4_type;
4805 
4806 				new_fcport->nvme_flag = 0;
4807 				if (vha->flags.nvme_enabled &&
4808 				    swl[swl_idx].fc4f_nvme) {
4809 					new_fcport->fc4f_nvme =
4810 					    swl[swl_idx].fc4f_nvme;
4811 					ql_log(ql_log_info, vha, 0x2131,
4812 					    "FOUND: NVME port %8phC as FC Type 28h\n",
4813 					    new_fcport->port_name);
4814 				}
4815 
4816 				if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
4817 					last_dev = 1;
4818 				}
4819 				swl_idx++;
4820 			}
4821 		} else {
4822 			/* Send GA_NXT to the switch */
4823 			rval = qla2x00_ga_nxt(vha, new_fcport);
4824 			if (rval != QLA_SUCCESS) {
4825 				ql_log(ql_log_warn, vha, 0x209e,
4826 				    "SNS scan failed -- assuming "
4827 				    "zero-entry result.\n");
4828 				rval = QLA_SUCCESS;
4829 				break;
4830 			}
4831 		}
4832 
4833 		/* If wrap on switch device list, exit. */
4834 		if (first_dev) {
4835 			wrap.b24 = new_fcport->d_id.b24;
4836 			first_dev = 0;
4837 		} else if (new_fcport->d_id.b24 == wrap.b24) {
4838 			ql_dbg(ql_dbg_disc, vha, 0x209f,
4839 			    "Device wrap (%02x%02x%02x).\n",
4840 			    new_fcport->d_id.b.domain,
4841 			    new_fcport->d_id.b.area,
4842 			    new_fcport->d_id.b.al_pa);
4843 			break;
4844 		}
4845 
4846 		/* Bypass if same physical adapter. */
4847 		if (new_fcport->d_id.b24 == base_vha->d_id.b24)
4848 			continue;
4849 
4850 		/* Bypass virtual ports of the same host. */
4851 		if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
4852 			continue;
4853 
4854 		/* Bypass if same domain and area of adapter. */
4855 		if (((new_fcport->d_id.b24 & 0xffff00) ==
4856 		    (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
4857 			ISP_CFG_FL)
4858 			    continue;
4859 
4860 		/* Bypass reserved domain fields. */
4861 		if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
4862 			continue;
4863 
4864 		/* Bypass ports whose FCP-4 type is not FCP_SCSI */
4865 		if (ql2xgffidenable &&
4866 		    (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
4867 		    new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
4868 			continue;
4869 
4870 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4871 
4872 		/* Locate matching device in database. */
4873 		found = 0;
4874 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
4875 			if (memcmp(new_fcport->port_name, fcport->port_name,
4876 			    WWN_SIZE))
4877 				continue;
4878 
4879 			fcport->scan_state = QLA_FCPORT_FOUND;
4880 
4881 			found++;
4882 
4883 			/* Update port state. */
4884 			memcpy(fcport->fabric_port_name,
4885 			    new_fcport->fabric_port_name, WWN_SIZE);
4886 			fcport->fp_speed = new_fcport->fp_speed;
4887 
4888 			/*
4889 			 * If address the same and state FCS_ONLINE
4890 			 * (or in target mode), nothing changed.
4891 			 */
4892 			if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
4893 			    (atomic_read(&fcport->state) == FCS_ONLINE ||
4894 			     (vha->host->active_mode == MODE_TARGET))) {
4895 				break;
4896 			}
4897 
4898 			/*
4899 			 * If device was not a fabric device before.
4900 			 */
4901 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4902 				fcport->d_id.b24 = new_fcport->d_id.b24;
4903 				qla2x00_clear_loop_id(fcport);
4904 				fcport->flags |= (FCF_FABRIC_DEVICE |
4905 				    FCF_LOGIN_NEEDED);
4906 				break;
4907 			}
4908 
4909 			/*
4910 			 * Port ID changed or device was marked to be updated;
4911 			 * Log it out if still logged in and mark it for
4912 			 * relogin later.
4913 			 */
4914 			if (qla_tgt_mode_enabled(base_vha)) {
4915 				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
4916 					 "port changed FC ID, %8phC"
4917 					 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
4918 					 fcport->port_name,
4919 					 fcport->d_id.b.domain,
4920 					 fcport->d_id.b.area,
4921 					 fcport->d_id.b.al_pa,
4922 					 fcport->loop_id,
4923 					 new_fcport->d_id.b.domain,
4924 					 new_fcport->d_id.b.area,
4925 					 new_fcport->d_id.b.al_pa);
4926 				fcport->d_id.b24 = new_fcport->d_id.b24;
4927 				break;
4928 			}
4929 
4930 			fcport->d_id.b24 = new_fcport->d_id.b24;
4931 			fcport->flags |= FCF_LOGIN_NEEDED;
4932 			break;
4933 		}
4934 
4935 		if (found) {
4936 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4937 			continue;
4938 		}
4939 		/* If device was not in our fcports list, then add it. */
4940 		new_fcport->scan_state = QLA_FCPORT_FOUND;
4941 		list_add_tail(&new_fcport->list, &vha->vp_fcports);
4942 
4943 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4944 
4945 
4946 		/* Allocate a new replacement fcport. */
4947 		nxt_d_id.b24 = new_fcport->d_id.b24;
4948 		new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4949 		if (new_fcport == NULL) {
4950 			ql_log(ql_log_warn, vha, 0xd032,
4951 			    "Memory allocation failed for fcport.\n");
4952 			return (QLA_MEMORY_ALLOC_FAILED);
4953 		}
4954 		new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
4955 		new_fcport->d_id.b24 = nxt_d_id.b24;
4956 	}
4957 
4958 	qla2x00_free_fcport(new_fcport);
4959 
4960 	/*
4961 	 * Logout all previous fabric dev marked lost, except FCP2 devices.
4962 	 */
4963 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
4964 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4965 			break;
4966 
4967 		if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
4968 		    (fcport->flags & FCF_LOGIN_NEEDED) == 0)
4969 			continue;
4970 
4971 		if (fcport->scan_state == QLA_FCPORT_SCAN) {
4972 			if ((qla_dual_mode_enabled(vha) ||
4973 			    qla_ini_mode_enabled(vha)) &&
4974 			    atomic_read(&fcport->state) == FCS_ONLINE) {
4975 				qla2x00_mark_device_lost(vha, fcport,
4976 					ql2xplogiabsentdevice, 0);
4977 				if (fcport->loop_id != FC_NO_LOOP_ID &&
4978 				    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
4979 				    fcport->port_type != FCT_INITIATOR &&
4980 				    fcport->port_type != FCT_BROADCAST) {
4981 					ql_dbg(ql_dbg_disc, vha, 0x20f0,
4982 					    "%s %d %8phC post del sess\n",
4983 					    __func__, __LINE__,
4984 					    fcport->port_name);
4985 
4986 					qlt_schedule_sess_for_deletion_lock
4987 						(fcport);
4988 					continue;
4989 				}
4990 			}
4991 		}
4992 
4993 		if (fcport->scan_state == QLA_FCPORT_FOUND)
4994 			qla24xx_fcport_handle_login(vha, fcport);
4995 	}
4996 	return (rval);
4997 }
4998 
4999 /*
5000  * qla2x00_find_new_loop_id
5001  *	Scan through our port list and find a new usable loop ID.
5002  *
5003  * Input:
5004  *	ha:	adapter state pointer.
5005  *	dev:	port structure pointer.
5006  *
5007  * Returns:
5008  *	qla2x00 local function return status code.
5009  *
5010  * Context:
5011  *	Kernel context.
5012  */
5013 int
5014 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
5015 {
5016 	int	rval;
5017 	struct qla_hw_data *ha = vha->hw;
5018 	unsigned long flags = 0;
5019 
5020 	rval = QLA_SUCCESS;
5021 
5022 	spin_lock_irqsave(&ha->vport_slock, flags);
5023 
5024 	dev->loop_id = find_first_zero_bit(ha->loop_id_map,
5025 	    LOOPID_MAP_SIZE);
5026 	if (dev->loop_id >= LOOPID_MAP_SIZE ||
5027 	    qla2x00_is_reserved_id(vha, dev->loop_id)) {
5028 		dev->loop_id = FC_NO_LOOP_ID;
5029 		rval = QLA_FUNCTION_FAILED;
5030 	} else
5031 		set_bit(dev->loop_id, ha->loop_id_map);
5032 
5033 	spin_unlock_irqrestore(&ha->vport_slock, flags);
5034 
5035 	if (rval == QLA_SUCCESS)
5036 		ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
5037 		    "Assigning new loopid=%x, portid=%x.\n",
5038 		    dev->loop_id, dev->d_id.b24);
5039 	else
5040 		ql_log(ql_log_warn, dev->vha, 0x2087,
5041 		    "No loop_id's available, portid=%x.\n",
5042 		    dev->d_id.b24);
5043 
5044 	return (rval);
5045 }
5046 
5047 
5048 /*
5049  * qla2x00_fabric_login
5050  *	Issue fabric login command.
5051  *
5052  * Input:
5053  *	ha = adapter block pointer.
5054  *	device = pointer to FC device type structure.
5055  *
5056  * Returns:
5057  *      0 - Login successfully
5058  *      1 - Login failed
5059  *      2 - Initiator device
5060  *      3 - Fatal error
5061  */
5062 int
5063 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
5064     uint16_t *next_loopid)
5065 {
5066 	int	rval;
5067 	int	retry;
5068 	uint16_t tmp_loopid;
5069 	uint16_t mb[MAILBOX_REGISTER_COUNT];
5070 	struct qla_hw_data *ha = vha->hw;
5071 
5072 	retry = 0;
5073 	tmp_loopid = 0;
5074 
5075 	for (;;) {
5076 		ql_dbg(ql_dbg_disc, vha, 0x2000,
5077 		    "Trying Fabric Login w/loop id 0x%04x for port "
5078 		    "%02x%02x%02x.\n",
5079 		    fcport->loop_id, fcport->d_id.b.domain,
5080 		    fcport->d_id.b.area, fcport->d_id.b.al_pa);
5081 
5082 		/* Login fcport on switch. */
5083 		rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
5084 		    fcport->d_id.b.domain, fcport->d_id.b.area,
5085 		    fcport->d_id.b.al_pa, mb, BIT_0);
5086 		if (rval != QLA_SUCCESS) {
5087 			return rval;
5088 		}
5089 		if (mb[0] == MBS_PORT_ID_USED) {
5090 			/*
5091 			 * Device has another loop ID.  The firmware team
5092 			 * recommends the driver perform an implicit login with
5093 			 * the specified ID again. The ID we just used is save
5094 			 * here so we return with an ID that can be tried by
5095 			 * the next login.
5096 			 */
5097 			retry++;
5098 			tmp_loopid = fcport->loop_id;
5099 			fcport->loop_id = mb[1];
5100 
5101 			ql_dbg(ql_dbg_disc, vha, 0x2001,
5102 			    "Fabric Login: port in use - next loop "
5103 			    "id=0x%04x, port id= %02x%02x%02x.\n",
5104 			    fcport->loop_id, fcport->d_id.b.domain,
5105 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
5106 
5107 		} else if (mb[0] == MBS_COMMAND_COMPLETE) {
5108 			/*
5109 			 * Login succeeded.
5110 			 */
5111 			if (retry) {
5112 				/* A retry occurred before. */
5113 				*next_loopid = tmp_loopid;
5114 			} else {
5115 				/*
5116 				 * No retry occurred before. Just increment the
5117 				 * ID value for next login.
5118 				 */
5119 				*next_loopid = (fcport->loop_id + 1);
5120 			}
5121 
5122 			if (mb[1] & BIT_0) {
5123 				fcport->port_type = FCT_INITIATOR;
5124 			} else {
5125 				fcport->port_type = FCT_TARGET;
5126 				if (mb[1] & BIT_1) {
5127 					fcport->flags |= FCF_FCP2_DEVICE;
5128 				}
5129 			}
5130 
5131 			if (mb[10] & BIT_0)
5132 				fcport->supported_classes |= FC_COS_CLASS2;
5133 			if (mb[10] & BIT_1)
5134 				fcport->supported_classes |= FC_COS_CLASS3;
5135 
5136 			if (IS_FWI2_CAPABLE(ha)) {
5137 				if (mb[10] & BIT_7)
5138 					fcport->flags |=
5139 					    FCF_CONF_COMP_SUPPORTED;
5140 			}
5141 
5142 			rval = QLA_SUCCESS;
5143 			break;
5144 		} else if (mb[0] == MBS_LOOP_ID_USED) {
5145 			/*
5146 			 * Loop ID already used, try next loop ID.
5147 			 */
5148 			fcport->loop_id++;
5149 			rval = qla2x00_find_new_loop_id(vha, fcport);
5150 			if (rval != QLA_SUCCESS) {
5151 				/* Ran out of loop IDs to use */
5152 				break;
5153 			}
5154 		} else if (mb[0] == MBS_COMMAND_ERROR) {
5155 			/*
5156 			 * Firmware possibly timed out during login. If NO
5157 			 * retries are left to do then the device is declared
5158 			 * dead.
5159 			 */
5160 			*next_loopid = fcport->loop_id;
5161 			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
5162 			    fcport->d_id.b.domain, fcport->d_id.b.area,
5163 			    fcport->d_id.b.al_pa);
5164 			qla2x00_mark_device_lost(vha, fcport, 1, 0);
5165 
5166 			rval = 1;
5167 			break;
5168 		} else {
5169 			/*
5170 			 * unrecoverable / not handled error
5171 			 */
5172 			ql_dbg(ql_dbg_disc, vha, 0x2002,
5173 			    "Failed=%x port_id=%02x%02x%02x loop_id=%x "
5174 			    "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
5175 			    fcport->d_id.b.area, fcport->d_id.b.al_pa,
5176 			    fcport->loop_id, jiffies);
5177 
5178 			*next_loopid = fcport->loop_id;
5179 			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
5180 			    fcport->d_id.b.domain, fcport->d_id.b.area,
5181 			    fcport->d_id.b.al_pa);
5182 			qla2x00_clear_loop_id(fcport);
5183 			fcport->login_retry = 0;
5184 
5185 			rval = 3;
5186 			break;
5187 		}
5188 	}
5189 
5190 	return (rval);
5191 }
5192 
5193 /*
5194  * qla2x00_local_device_login
5195  *	Issue local device login command.
5196  *
5197  * Input:
5198  *	ha = adapter block pointer.
5199  *	loop_id = loop id of device to login to.
5200  *
5201  * Returns (Where's the #define!!!!):
5202  *      0 - Login successfully
5203  *      1 - Login failed
5204  *      3 - Fatal error
5205  */
5206 int
5207 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
5208 {
5209 	int		rval;
5210 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
5211 
5212 	memset(mb, 0, sizeof(mb));
5213 	rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
5214 	if (rval == QLA_SUCCESS) {
5215 		/* Interrogate mailbox registers for any errors */
5216 		if (mb[0] == MBS_COMMAND_ERROR)
5217 			rval = 1;
5218 		else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
5219 			/* device not in PCB table */
5220 			rval = 3;
5221 	}
5222 
5223 	return (rval);
5224 }
5225 
5226 /*
5227  *  qla2x00_loop_resync
5228  *      Resync with fibre channel devices.
5229  *
5230  * Input:
5231  *      ha = adapter block pointer.
5232  *
5233  * Returns:
5234  *      0 = success
5235  */
5236 int
5237 qla2x00_loop_resync(scsi_qla_host_t *vha)
5238 {
5239 	int rval = QLA_SUCCESS;
5240 	uint32_t wait_time;
5241 	struct req_que *req;
5242 	struct rsp_que *rsp;
5243 
5244 	req = vha->req;
5245 	rsp = req->rsp;
5246 
5247 	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5248 	if (vha->flags.online) {
5249 		if (!(rval = qla2x00_fw_ready(vha))) {
5250 			/* Wait at most MAX_TARGET RSCNs for a stable link. */
5251 			wait_time = 256;
5252 			do {
5253 				if (!IS_QLAFX00(vha->hw)) {
5254 					/*
5255 					 * Issue a marker after FW becomes
5256 					 * ready.
5257 					 */
5258 					qla2x00_marker(vha, req, rsp, 0, 0,
5259 						MK_SYNC_ALL);
5260 					vha->marker_needed = 0;
5261 				}
5262 
5263 				/* Remap devices on Loop. */
5264 				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5265 
5266 				if (IS_QLAFX00(vha->hw))
5267 					qlafx00_configure_devices(vha);
5268 				else
5269 					qla2x00_configure_loop(vha);
5270 
5271 				wait_time--;
5272 			} while (!atomic_read(&vha->loop_down_timer) &&
5273 				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5274 				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
5275 				&vha->dpc_flags)));
5276 		}
5277 	}
5278 
5279 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5280 		return (QLA_FUNCTION_FAILED);
5281 
5282 	if (rval)
5283 		ql_dbg(ql_dbg_disc, vha, 0x206c,
5284 		    "%s *** FAILED ***.\n", __func__);
5285 
5286 	return (rval);
5287 }
5288 
5289 /*
5290 * qla2x00_perform_loop_resync
5291 * Description: This function will set the appropriate flags and call
5292 *              qla2x00_loop_resync. If successful loop will be resynced
5293 * Arguments : scsi_qla_host_t pointer
5294 * returm    : Success or Failure
5295 */
5296 
5297 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
5298 {
5299 	int32_t rval = 0;
5300 
5301 	if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
5302 		/*Configure the flags so that resync happens properly*/
5303 		atomic_set(&ha->loop_down_timer, 0);
5304 		if (!(ha->device_flags & DFLG_NO_CABLE)) {
5305 			atomic_set(&ha->loop_state, LOOP_UP);
5306 			set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
5307 			set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
5308 			set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
5309 
5310 			rval = qla2x00_loop_resync(ha);
5311 		} else
5312 			atomic_set(&ha->loop_state, LOOP_DEAD);
5313 
5314 		clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
5315 	}
5316 
5317 	return rval;
5318 }
5319 
5320 void
5321 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
5322 {
5323 	fc_port_t *fcport;
5324 	struct scsi_qla_host *vha;
5325 	struct qla_hw_data *ha = base_vha->hw;
5326 	unsigned long flags;
5327 
5328 	spin_lock_irqsave(&ha->vport_slock, flags);
5329 	/* Go with deferred removal of rport references. */
5330 	list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
5331 		atomic_inc(&vha->vref_count);
5332 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
5333 			if (fcport->drport &&
5334 			    atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
5335 				spin_unlock_irqrestore(&ha->vport_slock, flags);
5336 				qla2x00_rport_del(fcport);
5337 
5338 				spin_lock_irqsave(&ha->vport_slock, flags);
5339 			}
5340 		}
5341 		atomic_dec(&vha->vref_count);
5342 		wake_up(&vha->vref_waitq);
5343 	}
5344 	spin_unlock_irqrestore(&ha->vport_slock, flags);
5345 }
5346 
5347 /* Assumes idc_lock always held on entry */
5348 void
5349 qla83xx_reset_ownership(scsi_qla_host_t *vha)
5350 {
5351 	struct qla_hw_data *ha = vha->hw;
5352 	uint32_t drv_presence, drv_presence_mask;
5353 	uint32_t dev_part_info1, dev_part_info2, class_type;
5354 	uint32_t class_type_mask = 0x3;
5355 	uint16_t fcoe_other_function = 0xffff, i;
5356 
5357 	if (IS_QLA8044(ha)) {
5358 		drv_presence = qla8044_rd_direct(vha,
5359 		    QLA8044_CRB_DRV_ACTIVE_INDEX);
5360 		dev_part_info1 = qla8044_rd_direct(vha,
5361 		    QLA8044_CRB_DEV_PART_INFO_INDEX);
5362 		dev_part_info2 = qla8044_rd_direct(vha,
5363 		    QLA8044_CRB_DEV_PART_INFO2);
5364 	} else {
5365 		qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5366 		qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
5367 		qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
5368 	}
5369 	for (i = 0; i < 8; i++) {
5370 		class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
5371 		if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5372 		    (i != ha->portnum)) {
5373 			fcoe_other_function = i;
5374 			break;
5375 		}
5376 	}
5377 	if (fcoe_other_function == 0xffff) {
5378 		for (i = 0; i < 8; i++) {
5379 			class_type = ((dev_part_info2 >> (i * 4)) &
5380 			    class_type_mask);
5381 			if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5382 			    ((i + 8) != ha->portnum)) {
5383 				fcoe_other_function = i + 8;
5384 				break;
5385 			}
5386 		}
5387 	}
5388 	/*
5389 	 * Prepare drv-presence mask based on fcoe functions present.
5390 	 * However consider only valid physical fcoe function numbers (0-15).
5391 	 */
5392 	drv_presence_mask = ~((1 << (ha->portnum)) |
5393 			((fcoe_other_function == 0xffff) ?
5394 			 0 : (1 << (fcoe_other_function))));
5395 
5396 	/* We are the reset owner iff:
5397 	 *    - No other protocol drivers present.
5398 	 *    - This is the lowest among fcoe functions. */
5399 	if (!(drv_presence & drv_presence_mask) &&
5400 			(ha->portnum < fcoe_other_function)) {
5401 		ql_dbg(ql_dbg_p3p, vha, 0xb07f,
5402 		    "This host is Reset owner.\n");
5403 		ha->flags.nic_core_reset_owner = 1;
5404 	}
5405 }
5406 
5407 static int
5408 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
5409 {
5410 	int rval = QLA_SUCCESS;
5411 	struct qla_hw_data *ha = vha->hw;
5412 	uint32_t drv_ack;
5413 
5414 	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5415 	if (rval == QLA_SUCCESS) {
5416 		drv_ack |= (1 << ha->portnum);
5417 		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5418 	}
5419 
5420 	return rval;
5421 }
5422 
5423 static int
5424 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
5425 {
5426 	int rval = QLA_SUCCESS;
5427 	struct qla_hw_data *ha = vha->hw;
5428 	uint32_t drv_ack;
5429 
5430 	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5431 	if (rval == QLA_SUCCESS) {
5432 		drv_ack &= ~(1 << ha->portnum);
5433 		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5434 	}
5435 
5436 	return rval;
5437 }
5438 
5439 static const char *
5440 qla83xx_dev_state_to_string(uint32_t dev_state)
5441 {
5442 	switch (dev_state) {
5443 	case QLA8XXX_DEV_COLD:
5444 		return "COLD/RE-INIT";
5445 	case QLA8XXX_DEV_INITIALIZING:
5446 		return "INITIALIZING";
5447 	case QLA8XXX_DEV_READY:
5448 		return "READY";
5449 	case QLA8XXX_DEV_NEED_RESET:
5450 		return "NEED RESET";
5451 	case QLA8XXX_DEV_NEED_QUIESCENT:
5452 		return "NEED QUIESCENT";
5453 	case QLA8XXX_DEV_FAILED:
5454 		return "FAILED";
5455 	case QLA8XXX_DEV_QUIESCENT:
5456 		return "QUIESCENT";
5457 	default:
5458 		return "Unknown";
5459 	}
5460 }
5461 
5462 /* Assumes idc-lock always held on entry */
5463 void
5464 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
5465 {
5466 	struct qla_hw_data *ha = vha->hw;
5467 	uint32_t idc_audit_reg = 0, duration_secs = 0;
5468 
5469 	switch (audit_type) {
5470 	case IDC_AUDIT_TIMESTAMP:
5471 		ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
5472 		idc_audit_reg = (ha->portnum) |
5473 		    (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
5474 		qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5475 		break;
5476 
5477 	case IDC_AUDIT_COMPLETION:
5478 		duration_secs = ((jiffies_to_msecs(jiffies) -
5479 		    jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
5480 		idc_audit_reg = (ha->portnum) |
5481 		    (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
5482 		qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5483 		break;
5484 
5485 	default:
5486 		ql_log(ql_log_warn, vha, 0xb078,
5487 		    "Invalid audit type specified.\n");
5488 		break;
5489 	}
5490 }
5491 
5492 /* Assumes idc_lock always held on entry */
5493 static int
5494 qla83xx_initiating_reset(scsi_qla_host_t *vha)
5495 {
5496 	struct qla_hw_data *ha = vha->hw;
5497 	uint32_t  idc_control, dev_state;
5498 
5499 	__qla83xx_get_idc_control(vha, &idc_control);
5500 	if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
5501 		ql_log(ql_log_info, vha, 0xb080,
5502 		    "NIC Core reset has been disabled. idc-control=0x%x\n",
5503 		    idc_control);
5504 		return QLA_FUNCTION_FAILED;
5505 	}
5506 
5507 	/* Set NEED-RESET iff in READY state and we are the reset-owner */
5508 	qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5509 	if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
5510 		qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
5511 		    QLA8XXX_DEV_NEED_RESET);
5512 		ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
5513 		qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
5514 	} else {
5515 		const char *state = qla83xx_dev_state_to_string(dev_state);
5516 		ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
5517 
5518 		/* SV: XXX: Is timeout required here? */
5519 		/* Wait for IDC state change READY -> NEED_RESET */
5520 		while (dev_state == QLA8XXX_DEV_READY) {
5521 			qla83xx_idc_unlock(vha, 0);
5522 			msleep(200);
5523 			qla83xx_idc_lock(vha, 0);
5524 			qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5525 		}
5526 	}
5527 
5528 	/* Send IDC ack by writing to drv-ack register */
5529 	__qla83xx_set_drv_ack(vha);
5530 
5531 	return QLA_SUCCESS;
5532 }
5533 
5534 int
5535 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
5536 {
5537 	return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5538 }
5539 
5540 int
5541 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
5542 {
5543 	return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5544 }
5545 
5546 static int
5547 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
5548 {
5549 	uint32_t drv_presence = 0;
5550 	struct qla_hw_data *ha = vha->hw;
5551 
5552 	qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5553 	if (drv_presence & (1 << ha->portnum))
5554 		return QLA_SUCCESS;
5555 	else
5556 		return QLA_TEST_FAILED;
5557 }
5558 
5559 int
5560 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
5561 {
5562 	int rval = QLA_SUCCESS;
5563 	struct qla_hw_data *ha = vha->hw;
5564 
5565 	ql_dbg(ql_dbg_p3p, vha, 0xb058,
5566 	    "Entered  %s().\n", __func__);
5567 
5568 	if (vha->device_flags & DFLG_DEV_FAILED) {
5569 		ql_log(ql_log_warn, vha, 0xb059,
5570 		    "Device in unrecoverable FAILED state.\n");
5571 		return QLA_FUNCTION_FAILED;
5572 	}
5573 
5574 	qla83xx_idc_lock(vha, 0);
5575 
5576 	if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
5577 		ql_log(ql_log_warn, vha, 0xb05a,
5578 		    "Function=0x%x has been removed from IDC participation.\n",
5579 		    ha->portnum);
5580 		rval = QLA_FUNCTION_FAILED;
5581 		goto exit;
5582 	}
5583 
5584 	qla83xx_reset_ownership(vha);
5585 
5586 	rval = qla83xx_initiating_reset(vha);
5587 
5588 	/*
5589 	 * Perform reset if we are the reset-owner,
5590 	 * else wait till IDC state changes to READY/FAILED.
5591 	 */
5592 	if (rval == QLA_SUCCESS) {
5593 		rval = qla83xx_idc_state_handler(vha);
5594 
5595 		if (rval == QLA_SUCCESS)
5596 			ha->flags.nic_core_hung = 0;
5597 		__qla83xx_clear_drv_ack(vha);
5598 	}
5599 
5600 exit:
5601 	qla83xx_idc_unlock(vha, 0);
5602 
5603 	ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
5604 
5605 	return rval;
5606 }
5607 
5608 int
5609 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
5610 {
5611 	struct qla_hw_data *ha = vha->hw;
5612 	int rval = QLA_FUNCTION_FAILED;
5613 
5614 	if (!IS_MCTP_CAPABLE(ha)) {
5615 		/* This message can be removed from the final version */
5616 		ql_log(ql_log_info, vha, 0x506d,
5617 		    "This board is not MCTP capable\n");
5618 		return rval;
5619 	}
5620 
5621 	if (!ha->mctp_dump) {
5622 		ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
5623 		    MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
5624 
5625 		if (!ha->mctp_dump) {
5626 			ql_log(ql_log_warn, vha, 0x506e,
5627 			    "Failed to allocate memory for mctp dump\n");
5628 			return rval;
5629 		}
5630 	}
5631 
5632 #define MCTP_DUMP_STR_ADDR	0x00000000
5633 	rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
5634 	    MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
5635 	if (rval != QLA_SUCCESS) {
5636 		ql_log(ql_log_warn, vha, 0x506f,
5637 		    "Failed to capture mctp dump\n");
5638 	} else {
5639 		ql_log(ql_log_info, vha, 0x5070,
5640 		    "Mctp dump capture for host (%ld/%p).\n",
5641 		    vha->host_no, ha->mctp_dump);
5642 		ha->mctp_dumped = 1;
5643 	}
5644 
5645 	if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
5646 		ha->flags.nic_core_reset_hdlr_active = 1;
5647 		rval = qla83xx_restart_nic_firmware(vha);
5648 		if (rval)
5649 			/* NIC Core reset failed. */
5650 			ql_log(ql_log_warn, vha, 0x5071,
5651 			    "Failed to restart nic firmware\n");
5652 		else
5653 			ql_dbg(ql_dbg_p3p, vha, 0xb084,
5654 			    "Restarted NIC firmware successfully.\n");
5655 		ha->flags.nic_core_reset_hdlr_active = 0;
5656 	}
5657 
5658 	return rval;
5659 
5660 }
5661 
5662 /*
5663 * qla2x00_quiesce_io
5664 * Description: This function will block the new I/Os
5665 *              Its not aborting any I/Os as context
5666 *              is not destroyed during quiescence
5667 * Arguments: scsi_qla_host_t
5668 * return   : void
5669 */
5670 void
5671 qla2x00_quiesce_io(scsi_qla_host_t *vha)
5672 {
5673 	struct qla_hw_data *ha = vha->hw;
5674 	struct scsi_qla_host *vp;
5675 
5676 	ql_dbg(ql_dbg_dpc, vha, 0x401d,
5677 	    "Quiescing I/O - ha=%p.\n", ha);
5678 
5679 	atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
5680 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
5681 		atomic_set(&vha->loop_state, LOOP_DOWN);
5682 		qla2x00_mark_all_devices_lost(vha, 0);
5683 		list_for_each_entry(vp, &ha->vp_list, list)
5684 			qla2x00_mark_all_devices_lost(vp, 0);
5685 	} else {
5686 		if (!atomic_read(&vha->loop_down_timer))
5687 			atomic_set(&vha->loop_down_timer,
5688 					LOOP_DOWN_TIME);
5689 	}
5690 	/* Wait for pending cmds to complete */
5691 	qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
5692 }
5693 
5694 void
5695 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
5696 {
5697 	struct qla_hw_data *ha = vha->hw;
5698 	struct scsi_qla_host *vp;
5699 	unsigned long flags;
5700 	fc_port_t *fcport;
5701 	u16 i;
5702 
5703 	/* For ISP82XX, driver waits for completion of the commands.
5704 	 * online flag should be set.
5705 	 */
5706 	if (!(IS_P3P_TYPE(ha)))
5707 		vha->flags.online = 0;
5708 	ha->flags.chip_reset_done = 0;
5709 	clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5710 	vha->qla_stats.total_isp_aborts++;
5711 
5712 	ql_log(ql_log_info, vha, 0x00af,
5713 	    "Performing ISP error recovery - ha=%p.\n", ha);
5714 
5715 	/* For ISP82XX, reset_chip is just disabling interrupts.
5716 	 * Driver waits for the completion of the commands.
5717 	 * the interrupts need to be enabled.
5718 	 */
5719 	if (!(IS_P3P_TYPE(ha)))
5720 		ha->isp_ops->reset_chip(vha);
5721 
5722 	ha->flags.n2n_ae = 0;
5723 	ha->flags.lip_ae = 0;
5724 	ha->current_topology = 0;
5725 	ha->flags.fw_started = 0;
5726 	ha->flags.fw_init_done = 0;
5727 	ha->base_qpair->chip_reset++;
5728 	for (i = 0; i < ha->max_qpairs; i++) {
5729 		if (ha->queue_pair_map[i])
5730 			ha->queue_pair_map[i]->chip_reset =
5731 				ha->base_qpair->chip_reset;
5732 	}
5733 
5734 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
5735 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
5736 		atomic_set(&vha->loop_state, LOOP_DOWN);
5737 		qla2x00_mark_all_devices_lost(vha, 0);
5738 
5739 		spin_lock_irqsave(&ha->vport_slock, flags);
5740 		list_for_each_entry(vp, &ha->vp_list, list) {
5741 			atomic_inc(&vp->vref_count);
5742 			spin_unlock_irqrestore(&ha->vport_slock, flags);
5743 
5744 			qla2x00_mark_all_devices_lost(vp, 0);
5745 
5746 			spin_lock_irqsave(&ha->vport_slock, flags);
5747 			atomic_dec(&vp->vref_count);
5748 		}
5749 		spin_unlock_irqrestore(&ha->vport_slock, flags);
5750 	} else {
5751 		if (!atomic_read(&vha->loop_down_timer))
5752 			atomic_set(&vha->loop_down_timer,
5753 			    LOOP_DOWN_TIME);
5754 	}
5755 
5756 	/* Clear all async request states across all VPs. */
5757 	list_for_each_entry(fcport, &vha->vp_fcports, list)
5758 		fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5759 	spin_lock_irqsave(&ha->vport_slock, flags);
5760 	list_for_each_entry(vp, &ha->vp_list, list) {
5761 		atomic_inc(&vp->vref_count);
5762 		spin_unlock_irqrestore(&ha->vport_slock, flags);
5763 
5764 		list_for_each_entry(fcport, &vp->vp_fcports, list)
5765 			fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5766 
5767 		spin_lock_irqsave(&ha->vport_slock, flags);
5768 		atomic_dec(&vp->vref_count);
5769 	}
5770 	spin_unlock_irqrestore(&ha->vport_slock, flags);
5771 
5772 	if (!ha->flags.eeh_busy) {
5773 		/* Make sure for ISP 82XX IO DMA is complete */
5774 		if (IS_P3P_TYPE(ha)) {
5775 			qla82xx_chip_reset_cleanup(vha);
5776 			ql_log(ql_log_info, vha, 0x00b4,
5777 			    "Done chip reset cleanup.\n");
5778 
5779 			/* Done waiting for pending commands.
5780 			 * Reset the online flag.
5781 			 */
5782 			vha->flags.online = 0;
5783 		}
5784 
5785 		/* Requeue all commands in outstanding command list. */
5786 		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
5787 	}
5788 	/* memory barrier */
5789 	wmb();
5790 }
5791 
5792 /*
5793 *  qla2x00_abort_isp
5794 *      Resets ISP and aborts all outstanding commands.
5795 *
5796 * Input:
5797 *      ha           = adapter block pointer.
5798 *
5799 * Returns:
5800 *      0 = success
5801 */
5802 int
5803 qla2x00_abort_isp(scsi_qla_host_t *vha)
5804 {
5805 	int rval;
5806 	uint8_t        status = 0;
5807 	struct qla_hw_data *ha = vha->hw;
5808 	struct scsi_qla_host *vp;
5809 	struct req_que *req = ha->req_q_map[0];
5810 	unsigned long flags;
5811 
5812 	if (vha->flags.online) {
5813 		qla2x00_abort_isp_cleanup(vha);
5814 
5815 		if (IS_QLA8031(ha)) {
5816 			ql_dbg(ql_dbg_p3p, vha, 0xb05c,
5817 			    "Clearing fcoe driver presence.\n");
5818 			if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
5819 				ql_dbg(ql_dbg_p3p, vha, 0xb073,
5820 				    "Error while clearing DRV-Presence.\n");
5821 		}
5822 
5823 		if (unlikely(pci_channel_offline(ha->pdev) &&
5824 		    ha->flags.pci_channel_io_perm_failure)) {
5825 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5826 			status = 0;
5827 			return status;
5828 		}
5829 
5830 		ha->isp_ops->get_flash_version(vha, req->ring);
5831 
5832 		ha->isp_ops->nvram_config(vha);
5833 
5834 		if (!qla2x00_restart_isp(vha)) {
5835 			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5836 
5837 			if (!atomic_read(&vha->loop_down_timer)) {
5838 				/*
5839 				 * Issue marker command only when we are going
5840 				 * to start the I/O .
5841 				 */
5842 				vha->marker_needed = 1;
5843 			}
5844 
5845 			vha->flags.online = 1;
5846 
5847 			ha->isp_ops->enable_intrs(ha);
5848 
5849 			ha->isp_abort_cnt = 0;
5850 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5851 
5852 			if (IS_QLA81XX(ha) || IS_QLA8031(ha))
5853 				qla2x00_get_fw_version(vha);
5854 			if (ha->fce) {
5855 				ha->flags.fce_enabled = 1;
5856 				memset(ha->fce, 0,
5857 				    fce_calc_size(ha->fce_bufs));
5858 				rval = qla2x00_enable_fce_trace(vha,
5859 				    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5860 				    &ha->fce_bufs);
5861 				if (rval) {
5862 					ql_log(ql_log_warn, vha, 0x8033,
5863 					    "Unable to reinitialize FCE "
5864 					    "(%d).\n", rval);
5865 					ha->flags.fce_enabled = 0;
5866 				}
5867 			}
5868 
5869 			if (ha->eft) {
5870 				memset(ha->eft, 0, EFT_SIZE);
5871 				rval = qla2x00_enable_eft_trace(vha,
5872 				    ha->eft_dma, EFT_NUM_BUFFERS);
5873 				if (rval) {
5874 					ql_log(ql_log_warn, vha, 0x8034,
5875 					    "Unable to reinitialize EFT "
5876 					    "(%d).\n", rval);
5877 				}
5878 			}
5879 		} else {	/* failed the ISP abort */
5880 			vha->flags.online = 1;
5881 			if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
5882 				if (ha->isp_abort_cnt == 0) {
5883 					ql_log(ql_log_fatal, vha, 0x8035,
5884 					    "ISP error recover failed - "
5885 					    "board disabled.\n");
5886 					/*
5887 					 * The next call disables the board
5888 					 * completely.
5889 					 */
5890 					ha->isp_ops->reset_adapter(vha);
5891 					vha->flags.online = 0;
5892 					clear_bit(ISP_ABORT_RETRY,
5893 					    &vha->dpc_flags);
5894 					status = 0;
5895 				} else { /* schedule another ISP abort */
5896 					ha->isp_abort_cnt--;
5897 					ql_dbg(ql_dbg_taskm, vha, 0x8020,
5898 					    "ISP abort - retry remaining %d.\n",
5899 					    ha->isp_abort_cnt);
5900 					status = 1;
5901 				}
5902 			} else {
5903 				ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
5904 				ql_dbg(ql_dbg_taskm, vha, 0x8021,
5905 				    "ISP error recovery - retrying (%d) "
5906 				    "more times.\n", ha->isp_abort_cnt);
5907 				set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5908 				status = 1;
5909 			}
5910 		}
5911 
5912 	}
5913 
5914 	if (!status) {
5915 		ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
5916 
5917 		spin_lock_irqsave(&ha->vport_slock, flags);
5918 		list_for_each_entry(vp, &ha->vp_list, list) {
5919 			if (vp->vp_idx) {
5920 				atomic_inc(&vp->vref_count);
5921 				spin_unlock_irqrestore(&ha->vport_slock, flags);
5922 
5923 				qla2x00_vp_abort_isp(vp);
5924 
5925 				spin_lock_irqsave(&ha->vport_slock, flags);
5926 				atomic_dec(&vp->vref_count);
5927 			}
5928 		}
5929 		spin_unlock_irqrestore(&ha->vport_slock, flags);
5930 
5931 		if (IS_QLA8031(ha)) {
5932 			ql_dbg(ql_dbg_p3p, vha, 0xb05d,
5933 			    "Setting back fcoe driver presence.\n");
5934 			if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
5935 				ql_dbg(ql_dbg_p3p, vha, 0xb074,
5936 				    "Error while setting DRV-Presence.\n");
5937 		}
5938 	} else {
5939 		ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
5940 		       __func__);
5941 	}
5942 
5943 	return(status);
5944 }
5945 
5946 /*
5947 *  qla2x00_restart_isp
5948 *      restarts the ISP after a reset
5949 *
5950 * Input:
5951 *      ha = adapter block pointer.
5952 *
5953 * Returns:
5954 *      0 = success
5955 */
5956 static int
5957 qla2x00_restart_isp(scsi_qla_host_t *vha)
5958 {
5959 	int status = 0;
5960 	struct qla_hw_data *ha = vha->hw;
5961 	struct req_que *req = ha->req_q_map[0];
5962 	struct rsp_que *rsp = ha->rsp_q_map[0];
5963 
5964 	/* If firmware needs to be loaded */
5965 	if (qla2x00_isp_firmware(vha)) {
5966 		vha->flags.online = 0;
5967 		status = ha->isp_ops->chip_diag(vha);
5968 		if (!status)
5969 			status = qla2x00_setup_chip(vha);
5970 	}
5971 
5972 	if (!status && !(status = qla2x00_init_rings(vha))) {
5973 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5974 		ha->flags.chip_reset_done = 1;
5975 
5976 		/* Initialize the queues in use */
5977 		qla25xx_init_queues(ha);
5978 
5979 		status = qla2x00_fw_ready(vha);
5980 		if (!status) {
5981 			/* Issue a marker after FW becomes ready. */
5982 			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5983 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5984 		}
5985 
5986 		/* if no cable then assume it's good */
5987 		if ((vha->device_flags & DFLG_NO_CABLE))
5988 			status = 0;
5989 	}
5990 	return (status);
5991 }
5992 
5993 static int
5994 qla25xx_init_queues(struct qla_hw_data *ha)
5995 {
5996 	struct rsp_que *rsp = NULL;
5997 	struct req_que *req = NULL;
5998 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5999 	int ret = -1;
6000 	int i;
6001 
6002 	for (i = 1; i < ha->max_rsp_queues; i++) {
6003 		rsp = ha->rsp_q_map[i];
6004 		if (rsp && test_bit(i, ha->rsp_qid_map)) {
6005 			rsp->options &= ~BIT_0;
6006 			ret = qla25xx_init_rsp_que(base_vha, rsp);
6007 			if (ret != QLA_SUCCESS)
6008 				ql_dbg(ql_dbg_init, base_vha, 0x00ff,
6009 				    "%s Rsp que: %d init failed.\n",
6010 				    __func__, rsp->id);
6011 			else
6012 				ql_dbg(ql_dbg_init, base_vha, 0x0100,
6013 				    "%s Rsp que: %d inited.\n",
6014 				    __func__, rsp->id);
6015 		}
6016 	}
6017 	for (i = 1; i < ha->max_req_queues; i++) {
6018 		req = ha->req_q_map[i];
6019 		if (req && test_bit(i, ha->req_qid_map)) {
6020 			/* Clear outstanding commands array. */
6021 			req->options &= ~BIT_0;
6022 			ret = qla25xx_init_req_que(base_vha, req);
6023 			if (ret != QLA_SUCCESS)
6024 				ql_dbg(ql_dbg_init, base_vha, 0x0101,
6025 				    "%s Req que: %d init failed.\n",
6026 				    __func__, req->id);
6027 			else
6028 				ql_dbg(ql_dbg_init, base_vha, 0x0102,
6029 				    "%s Req que: %d inited.\n",
6030 				    __func__, req->id);
6031 		}
6032 	}
6033 	return ret;
6034 }
6035 
6036 /*
6037 * qla2x00_reset_adapter
6038 *      Reset adapter.
6039 *
6040 * Input:
6041 *      ha = adapter block pointer.
6042 */
6043 void
6044 qla2x00_reset_adapter(scsi_qla_host_t *vha)
6045 {
6046 	unsigned long flags = 0;
6047 	struct qla_hw_data *ha = vha->hw;
6048 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
6049 
6050 	vha->flags.online = 0;
6051 	ha->isp_ops->disable_intrs(ha);
6052 
6053 	spin_lock_irqsave(&ha->hardware_lock, flags);
6054 	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
6055 	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
6056 	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
6057 	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
6058 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6059 }
6060 
6061 void
6062 qla24xx_reset_adapter(scsi_qla_host_t *vha)
6063 {
6064 	unsigned long flags = 0;
6065 	struct qla_hw_data *ha = vha->hw;
6066 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
6067 
6068 	if (IS_P3P_TYPE(ha))
6069 		return;
6070 
6071 	vha->flags.online = 0;
6072 	ha->isp_ops->disable_intrs(ha);
6073 
6074 	spin_lock_irqsave(&ha->hardware_lock, flags);
6075 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
6076 	RD_REG_DWORD(&reg->hccr);
6077 	WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
6078 	RD_REG_DWORD(&reg->hccr);
6079 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6080 
6081 	if (IS_NOPOLLING_TYPE(ha))
6082 		ha->isp_ops->enable_intrs(ha);
6083 }
6084 
6085 /* On sparc systems, obtain port and node WWN from firmware
6086  * properties.
6087  */
6088 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
6089 	struct nvram_24xx *nv)
6090 {
6091 #ifdef CONFIG_SPARC
6092 	struct qla_hw_data *ha = vha->hw;
6093 	struct pci_dev *pdev = ha->pdev;
6094 	struct device_node *dp = pci_device_to_OF_node(pdev);
6095 	const u8 *val;
6096 	int len;
6097 
6098 	val = of_get_property(dp, "port-wwn", &len);
6099 	if (val && len >= WWN_SIZE)
6100 		memcpy(nv->port_name, val, WWN_SIZE);
6101 
6102 	val = of_get_property(dp, "node-wwn", &len);
6103 	if (val && len >= WWN_SIZE)
6104 		memcpy(nv->node_name, val, WWN_SIZE);
6105 #endif
6106 }
6107 
6108 int
6109 qla24xx_nvram_config(scsi_qla_host_t *vha)
6110 {
6111 	int   rval;
6112 	struct init_cb_24xx *icb;
6113 	struct nvram_24xx *nv;
6114 	uint32_t *dptr;
6115 	uint8_t  *dptr1, *dptr2;
6116 	uint32_t chksum;
6117 	uint16_t cnt;
6118 	struct qla_hw_data *ha = vha->hw;
6119 
6120 	rval = QLA_SUCCESS;
6121 	icb = (struct init_cb_24xx *)ha->init_cb;
6122 	nv = ha->nvram;
6123 
6124 	/* Determine NVRAM starting address. */
6125 	if (ha->port_no == 0) {
6126 		ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
6127 		ha->vpd_base = FA_NVRAM_VPD0_ADDR;
6128 	} else {
6129 		ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6130 		ha->vpd_base = FA_NVRAM_VPD1_ADDR;
6131 	}
6132 
6133 	ha->nvram_size = sizeof(struct nvram_24xx);
6134 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
6135 
6136 	/* Get VPD data into cache */
6137 	ha->vpd = ha->nvram + VPD_OFFSET;
6138 	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
6139 	    ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
6140 
6141 	/* Get NVRAM data into cache and calculate checksum. */
6142 	dptr = (uint32_t *)nv;
6143 	ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
6144 	    ha->nvram_size);
6145 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
6146 		chksum += le32_to_cpu(*dptr);
6147 
6148 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
6149 	    "Contents of NVRAM\n");
6150 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
6151 	    (uint8_t *)nv, ha->nvram_size);
6152 
6153 	/* Bad NVRAM data, set defaults parameters. */
6154 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6155 	    || nv->id[3] != ' ' ||
6156 	    nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
6157 		/* Reset NVRAM data. */
6158 		ql_log(ql_log_warn, vha, 0x006b,
6159 		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
6160 		    "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
6161 		ql_log(ql_log_warn, vha, 0x006c,
6162 		    "Falling back to functioning (yet invalid -- WWPN) "
6163 		    "defaults.\n");
6164 
6165 		/*
6166 		 * Set default initialization control block.
6167 		 */
6168 		memset(nv, 0, ha->nvram_size);
6169 		nv->nvram_version = cpu_to_le16(ICB_VERSION);
6170 		nv->version = cpu_to_le16(ICB_VERSION);
6171 		nv->frame_payload_size = 2048;
6172 		nv->execution_throttle = cpu_to_le16(0xFFFF);
6173 		nv->exchange_count = cpu_to_le16(0);
6174 		nv->hard_address = cpu_to_le16(124);
6175 		nv->port_name[0] = 0x21;
6176 		nv->port_name[1] = 0x00 + ha->port_no + 1;
6177 		nv->port_name[2] = 0x00;
6178 		nv->port_name[3] = 0xe0;
6179 		nv->port_name[4] = 0x8b;
6180 		nv->port_name[5] = 0x1c;
6181 		nv->port_name[6] = 0x55;
6182 		nv->port_name[7] = 0x86;
6183 		nv->node_name[0] = 0x20;
6184 		nv->node_name[1] = 0x00;
6185 		nv->node_name[2] = 0x00;
6186 		nv->node_name[3] = 0xe0;
6187 		nv->node_name[4] = 0x8b;
6188 		nv->node_name[5] = 0x1c;
6189 		nv->node_name[6] = 0x55;
6190 		nv->node_name[7] = 0x86;
6191 		qla24xx_nvram_wwn_from_ofw(vha, nv);
6192 		nv->login_retry_count = cpu_to_le16(8);
6193 		nv->interrupt_delay_timer = cpu_to_le16(0);
6194 		nv->login_timeout = cpu_to_le16(0);
6195 		nv->firmware_options_1 =
6196 		    cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6197 		nv->firmware_options_2 = cpu_to_le32(2 << 4);
6198 		nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6199 		nv->firmware_options_3 = cpu_to_le32(2 << 13);
6200 		nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6201 		nv->efi_parameters = cpu_to_le32(0);
6202 		nv->reset_delay = 5;
6203 		nv->max_luns_per_target = cpu_to_le16(128);
6204 		nv->port_down_retry_count = cpu_to_le16(30);
6205 		nv->link_down_timeout = cpu_to_le16(30);
6206 
6207 		rval = 1;
6208 	}
6209 
6210 	if (qla_tgt_mode_enabled(vha)) {
6211 		/* Don't enable full login after initial LIP */
6212 		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6213 		/* Don't enable LIP full login for initiator */
6214 		nv->host_p &= cpu_to_le32(~BIT_10);
6215 	}
6216 
6217 	qlt_24xx_config_nvram_stage1(vha, nv);
6218 
6219 	/* Reset Initialization control block */
6220 	memset(icb, 0, ha->init_cb_size);
6221 
6222 	/* Copy 1st segment. */
6223 	dptr1 = (uint8_t *)icb;
6224 	dptr2 = (uint8_t *)&nv->version;
6225 	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
6226 	while (cnt--)
6227 		*dptr1++ = *dptr2++;
6228 
6229 	icb->login_retry_count = nv->login_retry_count;
6230 	icb->link_down_on_nos = nv->link_down_on_nos;
6231 
6232 	/* Copy 2nd segment. */
6233 	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
6234 	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
6235 	cnt = (uint8_t *)&icb->reserved_3 -
6236 	    (uint8_t *)&icb->interrupt_delay_timer;
6237 	while (cnt--)
6238 		*dptr1++ = *dptr2++;
6239 
6240 	/*
6241 	 * Setup driver NVRAM options.
6242 	 */
6243 	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
6244 	    "QLA2462");
6245 
6246 	qlt_24xx_config_nvram_stage2(vha, icb);
6247 
6248 	if (nv->host_p & cpu_to_le32(BIT_15)) {
6249 		/* Use alternate WWN? */
6250 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6251 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6252 	}
6253 
6254 	/* Prepare nodename */
6255 	if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
6256 		/*
6257 		 * Firmware will apply the following mask if the nodename was
6258 		 * not provided.
6259 		 */
6260 		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
6261 		icb->node_name[0] &= 0xF0;
6262 	}
6263 
6264 	/* Set host adapter parameters. */
6265 	ha->flags.disable_risc_code_load = 0;
6266 	ha->flags.enable_lip_reset = 0;
6267 	ha->flags.enable_lip_full_login =
6268 	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
6269 	ha->flags.enable_target_reset =
6270 	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
6271 	ha->flags.enable_led_scheme = 0;
6272 	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
6273 
6274 	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
6275 	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
6276 
6277 	memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
6278 	    sizeof(ha->fw_seriallink_options24));
6279 
6280 	/* save HBA serial number */
6281 	ha->serial0 = icb->port_name[5];
6282 	ha->serial1 = icb->port_name[6];
6283 	ha->serial2 = icb->port_name[7];
6284 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6285 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
6286 
6287 	icb->execution_throttle = cpu_to_le16(0xFFFF);
6288 
6289 	ha->retry_count = le16_to_cpu(nv->login_retry_count);
6290 
6291 	/* Set minimum login_timeout to 4 seconds. */
6292 	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6293 		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6294 	if (le16_to_cpu(nv->login_timeout) < 4)
6295 		nv->login_timeout = cpu_to_le16(4);
6296 	ha->login_timeout = le16_to_cpu(nv->login_timeout);
6297 
6298 	/* Set minimum RATOV to 100 tenths of a second. */
6299 	ha->r_a_tov = 100;
6300 
6301 	ha->loop_reset_delay = nv->reset_delay;
6302 
6303 	/* Link Down Timeout = 0:
6304 	 *
6305 	 * 	When Port Down timer expires we will start returning
6306 	 *	I/O's to OS with "DID_NO_CONNECT".
6307 	 *
6308 	 * Link Down Timeout != 0:
6309 	 *
6310 	 *	 The driver waits for the link to come up after link down
6311 	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
6312 	 */
6313 	if (le16_to_cpu(nv->link_down_timeout) == 0) {
6314 		ha->loop_down_abort_time =
6315 		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6316 	} else {
6317 		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
6318 		ha->loop_down_abort_time =
6319 		    (LOOP_DOWN_TIME - ha->link_down_timeout);
6320 	}
6321 
6322 	/* Need enough time to try and get the port back. */
6323 	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6324 	if (qlport_down_retry)
6325 		ha->port_down_retry_count = qlport_down_retry;
6326 
6327 	/* Set login_retry_count */
6328 	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
6329 	if (ha->port_down_retry_count ==
6330 	    le16_to_cpu(nv->port_down_retry_count) &&
6331 	    ha->port_down_retry_count > 3)
6332 		ha->login_retry_count = ha->port_down_retry_count;
6333 	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6334 		ha->login_retry_count = ha->port_down_retry_count;
6335 	if (ql2xloginretrycount)
6336 		ha->login_retry_count = ql2xloginretrycount;
6337 
6338 	/* Enable ZIO. */
6339 	if (!vha->flags.init_done) {
6340 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6341 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6342 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6343 		    le16_to_cpu(icb->interrupt_delay_timer): 2;
6344 	}
6345 	icb->firmware_options_2 &= cpu_to_le32(
6346 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
6347 	vha->flags.process_response_queue = 0;
6348 	if (ha->zio_mode != QLA_ZIO_DISABLED) {
6349 		ha->zio_mode = QLA_ZIO_MODE_6;
6350 
6351 		ql_log(ql_log_info, vha, 0x006f,
6352 		    "ZIO mode %d enabled; timer delay (%d us).\n",
6353 		    ha->zio_mode, ha->zio_timer * 100);
6354 
6355 		icb->firmware_options_2 |= cpu_to_le32(
6356 		    (uint32_t)ha->zio_mode);
6357 		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
6358 		vha->flags.process_response_queue = 1;
6359 	}
6360 
6361 	if (rval) {
6362 		ql_log(ql_log_warn, vha, 0x0070,
6363 		    "NVRAM configuration failed.\n");
6364 	}
6365 	return (rval);
6366 }
6367 
6368 uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
6369 {
6370 	struct qla27xx_image_status pri_image_status, sec_image_status;
6371 	uint8_t valid_pri_image, valid_sec_image;
6372 	uint32_t *wptr;
6373 	uint32_t cnt, chksum, size;
6374 	struct qla_hw_data *ha = vha->hw;
6375 
6376 	valid_pri_image = valid_sec_image = 1;
6377 	ha->active_image = 0;
6378 	size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
6379 
6380 	if (!ha->flt_region_img_status_pri) {
6381 		valid_pri_image = 0;
6382 		goto check_sec_image;
6383 	}
6384 
6385 	qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
6386 	    ha->flt_region_img_status_pri, size);
6387 
6388 	if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6389 		ql_dbg(ql_dbg_init, vha, 0x018b,
6390 		    "Primary image signature (0x%x) not valid\n",
6391 		    pri_image_status.signature);
6392 		valid_pri_image = 0;
6393 		goto check_sec_image;
6394 	}
6395 
6396 	wptr = (uint32_t *)(&pri_image_status);
6397 	cnt = size;
6398 
6399 	for (chksum = 0; cnt--; wptr++)
6400 		chksum += le32_to_cpu(*wptr);
6401 
6402 	if (chksum) {
6403 		ql_dbg(ql_dbg_init, vha, 0x018c,
6404 		    "Checksum validation failed for primary image (0x%x)\n",
6405 		    chksum);
6406 		valid_pri_image = 0;
6407 	}
6408 
6409 check_sec_image:
6410 	if (!ha->flt_region_img_status_sec) {
6411 		valid_sec_image = 0;
6412 		goto check_valid_image;
6413 	}
6414 
6415 	qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
6416 	    ha->flt_region_img_status_sec, size);
6417 
6418 	if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6419 		ql_dbg(ql_dbg_init, vha, 0x018d,
6420 		    "Secondary image signature(0x%x) not valid\n",
6421 		    sec_image_status.signature);
6422 		valid_sec_image = 0;
6423 		goto check_valid_image;
6424 	}
6425 
6426 	wptr = (uint32_t *)(&sec_image_status);
6427 	cnt = size;
6428 	for (chksum = 0; cnt--; wptr++)
6429 		chksum += le32_to_cpu(*wptr);
6430 	if (chksum) {
6431 		ql_dbg(ql_dbg_init, vha, 0x018e,
6432 		    "Checksum validation failed for secondary image (0x%x)\n",
6433 		    chksum);
6434 		valid_sec_image = 0;
6435 	}
6436 
6437 check_valid_image:
6438 	if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
6439 		ha->active_image = QLA27XX_PRIMARY_IMAGE;
6440 	if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
6441 		if (!ha->active_image ||
6442 		    pri_image_status.generation_number <
6443 		    sec_image_status.generation_number)
6444 			ha->active_image = QLA27XX_SECONDARY_IMAGE;
6445 	}
6446 
6447 	ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
6448 	    ha->active_image == 0 ? "default bootld and fw" :
6449 	    ha->active_image == 1 ? "primary" :
6450 	    ha->active_image == 2 ? "secondary" :
6451 	    "Invalid");
6452 
6453 	return ha->active_image;
6454 }
6455 
6456 static int
6457 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
6458     uint32_t faddr)
6459 {
6460 	int	rval = QLA_SUCCESS;
6461 	int	segments, fragment;
6462 	uint32_t *dcode, dlen;
6463 	uint32_t risc_addr;
6464 	uint32_t risc_size;
6465 	uint32_t i;
6466 	struct qla_hw_data *ha = vha->hw;
6467 	struct req_que *req = ha->req_q_map[0];
6468 
6469 	ql_dbg(ql_dbg_init, vha, 0x008b,
6470 	    "FW: Loading firmware from flash (%x).\n", faddr);
6471 
6472 	rval = QLA_SUCCESS;
6473 
6474 	segments = FA_RISC_CODE_SEGMENTS;
6475 	dcode = (uint32_t *)req->ring;
6476 	*srisc_addr = 0;
6477 
6478 	if (IS_QLA27XX(ha) &&
6479 	    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
6480 		faddr = ha->flt_region_fw_sec;
6481 
6482 	/* Validate firmware image by checking version. */
6483 	qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
6484 	for (i = 0; i < 4; i++)
6485 		dcode[i] = be32_to_cpu(dcode[i]);
6486 	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6487 	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6488 	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6489 		dcode[3] == 0)) {
6490 		ql_log(ql_log_fatal, vha, 0x008c,
6491 		    "Unable to verify the integrity of flash firmware "
6492 		    "image.\n");
6493 		ql_log(ql_log_fatal, vha, 0x008d,
6494 		    "Firmware data: %08x %08x %08x %08x.\n",
6495 		    dcode[0], dcode[1], dcode[2], dcode[3]);
6496 
6497 		return QLA_FUNCTION_FAILED;
6498 	}
6499 
6500 	while (segments && rval == QLA_SUCCESS) {
6501 		/* Read segment's load information. */
6502 		qla24xx_read_flash_data(vha, dcode, faddr, 4);
6503 
6504 		risc_addr = be32_to_cpu(dcode[2]);
6505 		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6506 		risc_size = be32_to_cpu(dcode[3]);
6507 
6508 		fragment = 0;
6509 		while (risc_size > 0 && rval == QLA_SUCCESS) {
6510 			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6511 			if (dlen > risc_size)
6512 				dlen = risc_size;
6513 
6514 			ql_dbg(ql_dbg_init, vha, 0x008e,
6515 			    "Loading risc segment@ risc addr %x "
6516 			    "number of dwords 0x%x offset 0x%x.\n",
6517 			    risc_addr, dlen, faddr);
6518 
6519 			qla24xx_read_flash_data(vha, dcode, faddr, dlen);
6520 			for (i = 0; i < dlen; i++)
6521 				dcode[i] = swab32(dcode[i]);
6522 
6523 			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
6524 			    dlen);
6525 			if (rval) {
6526 				ql_log(ql_log_fatal, vha, 0x008f,
6527 				    "Failed to load segment %d of firmware.\n",
6528 				    fragment);
6529 				return QLA_FUNCTION_FAILED;
6530 			}
6531 
6532 			faddr += dlen;
6533 			risc_addr += dlen;
6534 			risc_size -= dlen;
6535 			fragment++;
6536 		}
6537 
6538 		/* Next segment. */
6539 		segments--;
6540 	}
6541 
6542 	if (!IS_QLA27XX(ha))
6543 		return rval;
6544 
6545 	if (ha->fw_dump_template)
6546 		vfree(ha->fw_dump_template);
6547 	ha->fw_dump_template = NULL;
6548 	ha->fw_dump_template_len = 0;
6549 
6550 	ql_dbg(ql_dbg_init, vha, 0x0161,
6551 	    "Loading fwdump template from %x\n", faddr);
6552 	qla24xx_read_flash_data(vha, dcode, faddr, 7);
6553 	risc_size = be32_to_cpu(dcode[2]);
6554 	ql_dbg(ql_dbg_init, vha, 0x0162,
6555 	    "-> array size %x dwords\n", risc_size);
6556 	if (risc_size == 0 || risc_size == ~0)
6557 		goto default_template;
6558 
6559 	dlen = (risc_size - 8) * sizeof(*dcode);
6560 	ql_dbg(ql_dbg_init, vha, 0x0163,
6561 	    "-> template allocating %x bytes...\n", dlen);
6562 	ha->fw_dump_template = vmalloc(dlen);
6563 	if (!ha->fw_dump_template) {
6564 		ql_log(ql_log_warn, vha, 0x0164,
6565 		    "Failed fwdump template allocate %x bytes.\n", risc_size);
6566 		goto default_template;
6567 	}
6568 
6569 	faddr += 7;
6570 	risc_size -= 8;
6571 	dcode = ha->fw_dump_template;
6572 	qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
6573 	for (i = 0; i < risc_size; i++)
6574 		dcode[i] = le32_to_cpu(dcode[i]);
6575 
6576 	if (!qla27xx_fwdt_template_valid(dcode)) {
6577 		ql_log(ql_log_warn, vha, 0x0165,
6578 		    "Failed fwdump template validate\n");
6579 		goto default_template;
6580 	}
6581 
6582 	dlen = qla27xx_fwdt_template_size(dcode);
6583 	ql_dbg(ql_dbg_init, vha, 0x0166,
6584 	    "-> template size %x bytes\n", dlen);
6585 	if (dlen > risc_size * sizeof(*dcode)) {
6586 		ql_log(ql_log_warn, vha, 0x0167,
6587 		    "Failed fwdump template exceeds array by %zx bytes\n",
6588 		    (size_t)(dlen - risc_size * sizeof(*dcode)));
6589 		goto default_template;
6590 	}
6591 	ha->fw_dump_template_len = dlen;
6592 	return rval;
6593 
6594 default_template:
6595 	ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
6596 	if (ha->fw_dump_template)
6597 		vfree(ha->fw_dump_template);
6598 	ha->fw_dump_template = NULL;
6599 	ha->fw_dump_template_len = 0;
6600 
6601 	dlen = qla27xx_fwdt_template_default_size();
6602 	ql_dbg(ql_dbg_init, vha, 0x0169,
6603 	    "-> template allocating %x bytes...\n", dlen);
6604 	ha->fw_dump_template = vmalloc(dlen);
6605 	if (!ha->fw_dump_template) {
6606 		ql_log(ql_log_warn, vha, 0x016a,
6607 		    "Failed fwdump template allocate %x bytes.\n", risc_size);
6608 		goto failed_template;
6609 	}
6610 
6611 	dcode = ha->fw_dump_template;
6612 	risc_size = dlen / sizeof(*dcode);
6613 	memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
6614 	for (i = 0; i < risc_size; i++)
6615 		dcode[i] = be32_to_cpu(dcode[i]);
6616 
6617 	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6618 		ql_log(ql_log_warn, vha, 0x016b,
6619 		    "Failed fwdump template validate\n");
6620 		goto failed_template;
6621 	}
6622 
6623 	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6624 	ql_dbg(ql_dbg_init, vha, 0x016c,
6625 	    "-> template size %x bytes\n", dlen);
6626 	ha->fw_dump_template_len = dlen;
6627 	return rval;
6628 
6629 failed_template:
6630 	ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
6631 	if (ha->fw_dump_template)
6632 		vfree(ha->fw_dump_template);
6633 	ha->fw_dump_template = NULL;
6634 	ha->fw_dump_template_len = 0;
6635 	return rval;
6636 }
6637 
6638 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
6639 
6640 int
6641 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6642 {
6643 	int	rval;
6644 	int	i, fragment;
6645 	uint16_t *wcode, *fwcode;
6646 	uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
6647 	struct fw_blob *blob;
6648 	struct qla_hw_data *ha = vha->hw;
6649 	struct req_que *req = ha->req_q_map[0];
6650 
6651 	/* Load firmware blob. */
6652 	blob = qla2x00_request_firmware(vha);
6653 	if (!blob) {
6654 		ql_log(ql_log_info, vha, 0x0083,
6655 		    "Firmware image unavailable.\n");
6656 		ql_log(ql_log_info, vha, 0x0084,
6657 		    "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
6658 		return QLA_FUNCTION_FAILED;
6659 	}
6660 
6661 	rval = QLA_SUCCESS;
6662 
6663 	wcode = (uint16_t *)req->ring;
6664 	*srisc_addr = 0;
6665 	fwcode = (uint16_t *)blob->fw->data;
6666 	fwclen = 0;
6667 
6668 	/* Validate firmware image by checking version. */
6669 	if (blob->fw->size < 8 * sizeof(uint16_t)) {
6670 		ql_log(ql_log_fatal, vha, 0x0085,
6671 		    "Unable to verify integrity of firmware image (%zd).\n",
6672 		    blob->fw->size);
6673 		goto fail_fw_integrity;
6674 	}
6675 	for (i = 0; i < 4; i++)
6676 		wcode[i] = be16_to_cpu(fwcode[i + 4]);
6677 	if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
6678 	    wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
6679 		wcode[2] == 0 && wcode[3] == 0)) {
6680 		ql_log(ql_log_fatal, vha, 0x0086,
6681 		    "Unable to verify integrity of firmware image.\n");
6682 		ql_log(ql_log_fatal, vha, 0x0087,
6683 		    "Firmware data: %04x %04x %04x %04x.\n",
6684 		    wcode[0], wcode[1], wcode[2], wcode[3]);
6685 		goto fail_fw_integrity;
6686 	}
6687 
6688 	seg = blob->segs;
6689 	while (*seg && rval == QLA_SUCCESS) {
6690 		risc_addr = *seg;
6691 		*srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
6692 		risc_size = be16_to_cpu(fwcode[3]);
6693 
6694 		/* Validate firmware image size. */
6695 		fwclen += risc_size * sizeof(uint16_t);
6696 		if (blob->fw->size < fwclen) {
6697 			ql_log(ql_log_fatal, vha, 0x0088,
6698 			    "Unable to verify integrity of firmware image "
6699 			    "(%zd).\n", blob->fw->size);
6700 			goto fail_fw_integrity;
6701 		}
6702 
6703 		fragment = 0;
6704 		while (risc_size > 0 && rval == QLA_SUCCESS) {
6705 			wlen = (uint16_t)(ha->fw_transfer_size >> 1);
6706 			if (wlen > risc_size)
6707 				wlen = risc_size;
6708 			ql_dbg(ql_dbg_init, vha, 0x0089,
6709 			    "Loading risc segment@ risc addr %x number of "
6710 			    "words 0x%x.\n", risc_addr, wlen);
6711 
6712 			for (i = 0; i < wlen; i++)
6713 				wcode[i] = swab16(fwcode[i]);
6714 
6715 			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
6716 			    wlen);
6717 			if (rval) {
6718 				ql_log(ql_log_fatal, vha, 0x008a,
6719 				    "Failed to load segment %d of firmware.\n",
6720 				    fragment);
6721 				break;
6722 			}
6723 
6724 			fwcode += wlen;
6725 			risc_addr += wlen;
6726 			risc_size -= wlen;
6727 			fragment++;
6728 		}
6729 
6730 		/* Next segment. */
6731 		seg++;
6732 	}
6733 	return rval;
6734 
6735 fail_fw_integrity:
6736 	return QLA_FUNCTION_FAILED;
6737 }
6738 
6739 static int
6740 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6741 {
6742 	int	rval;
6743 	int	segments, fragment;
6744 	uint32_t *dcode, dlen;
6745 	uint32_t risc_addr;
6746 	uint32_t risc_size;
6747 	uint32_t i;
6748 	struct fw_blob *blob;
6749 	const uint32_t *fwcode;
6750 	uint32_t fwclen;
6751 	struct qla_hw_data *ha = vha->hw;
6752 	struct req_que *req = ha->req_q_map[0];
6753 
6754 	/* Load firmware blob. */
6755 	blob = qla2x00_request_firmware(vha);
6756 	if (!blob) {
6757 		ql_log(ql_log_warn, vha, 0x0090,
6758 		    "Firmware image unavailable.\n");
6759 		ql_log(ql_log_warn, vha, 0x0091,
6760 		    "Firmware images can be retrieved from: "
6761 		    QLA_FW_URL ".\n");
6762 
6763 		return QLA_FUNCTION_FAILED;
6764 	}
6765 
6766 	ql_dbg(ql_dbg_init, vha, 0x0092,
6767 	    "FW: Loading via request-firmware.\n");
6768 
6769 	rval = QLA_SUCCESS;
6770 
6771 	segments = FA_RISC_CODE_SEGMENTS;
6772 	dcode = (uint32_t *)req->ring;
6773 	*srisc_addr = 0;
6774 	fwcode = (uint32_t *)blob->fw->data;
6775 	fwclen = 0;
6776 
6777 	/* Validate firmware image by checking version. */
6778 	if (blob->fw->size < 8 * sizeof(uint32_t)) {
6779 		ql_log(ql_log_fatal, vha, 0x0093,
6780 		    "Unable to verify integrity of firmware image (%zd).\n",
6781 		    blob->fw->size);
6782 		return QLA_FUNCTION_FAILED;
6783 	}
6784 	for (i = 0; i < 4; i++)
6785 		dcode[i] = be32_to_cpu(fwcode[i + 4]);
6786 	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6787 	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6788 	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6789 		dcode[3] == 0)) {
6790 		ql_log(ql_log_fatal, vha, 0x0094,
6791 		    "Unable to verify integrity of firmware image (%zd).\n",
6792 		    blob->fw->size);
6793 		ql_log(ql_log_fatal, vha, 0x0095,
6794 		    "Firmware data: %08x %08x %08x %08x.\n",
6795 		    dcode[0], dcode[1], dcode[2], dcode[3]);
6796 		return QLA_FUNCTION_FAILED;
6797 	}
6798 
6799 	while (segments && rval == QLA_SUCCESS) {
6800 		risc_addr = be32_to_cpu(fwcode[2]);
6801 		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6802 		risc_size = be32_to_cpu(fwcode[3]);
6803 
6804 		/* Validate firmware image size. */
6805 		fwclen += risc_size * sizeof(uint32_t);
6806 		if (blob->fw->size < fwclen) {
6807 			ql_log(ql_log_fatal, vha, 0x0096,
6808 			    "Unable to verify integrity of firmware image "
6809 			    "(%zd).\n", blob->fw->size);
6810 			return QLA_FUNCTION_FAILED;
6811 		}
6812 
6813 		fragment = 0;
6814 		while (risc_size > 0 && rval == QLA_SUCCESS) {
6815 			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6816 			if (dlen > risc_size)
6817 				dlen = risc_size;
6818 
6819 			ql_dbg(ql_dbg_init, vha, 0x0097,
6820 			    "Loading risc segment@ risc addr %x "
6821 			    "number of dwords 0x%x.\n", risc_addr, dlen);
6822 
6823 			for (i = 0; i < dlen; i++)
6824 				dcode[i] = swab32(fwcode[i]);
6825 
6826 			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
6827 			    dlen);
6828 			if (rval) {
6829 				ql_log(ql_log_fatal, vha, 0x0098,
6830 				    "Failed to load segment %d of firmware.\n",
6831 				    fragment);
6832 				return QLA_FUNCTION_FAILED;
6833 			}
6834 
6835 			fwcode += dlen;
6836 			risc_addr += dlen;
6837 			risc_size -= dlen;
6838 			fragment++;
6839 		}
6840 
6841 		/* Next segment. */
6842 		segments--;
6843 	}
6844 
6845 	if (!IS_QLA27XX(ha))
6846 		return rval;
6847 
6848 	if (ha->fw_dump_template)
6849 		vfree(ha->fw_dump_template);
6850 	ha->fw_dump_template = NULL;
6851 	ha->fw_dump_template_len = 0;
6852 
6853 	ql_dbg(ql_dbg_init, vha, 0x171,
6854 	    "Loading fwdump template from %x\n",
6855 	    (uint32_t)((void *)fwcode - (void *)blob->fw->data));
6856 	risc_size = be32_to_cpu(fwcode[2]);
6857 	ql_dbg(ql_dbg_init, vha, 0x172,
6858 	    "-> array size %x dwords\n", risc_size);
6859 	if (risc_size == 0 || risc_size == ~0)
6860 		goto default_template;
6861 
6862 	dlen = (risc_size - 8) * sizeof(*fwcode);
6863 	ql_dbg(ql_dbg_init, vha, 0x0173,
6864 	    "-> template allocating %x bytes...\n", dlen);
6865 	ha->fw_dump_template = vmalloc(dlen);
6866 	if (!ha->fw_dump_template) {
6867 		ql_log(ql_log_warn, vha, 0x0174,
6868 		    "Failed fwdump template allocate %x bytes.\n", risc_size);
6869 		goto default_template;
6870 	}
6871 
6872 	fwcode += 7;
6873 	risc_size -= 8;
6874 	dcode = ha->fw_dump_template;
6875 	for (i = 0; i < risc_size; i++)
6876 		dcode[i] = le32_to_cpu(fwcode[i]);
6877 
6878 	if (!qla27xx_fwdt_template_valid(dcode)) {
6879 		ql_log(ql_log_warn, vha, 0x0175,
6880 		    "Failed fwdump template validate\n");
6881 		goto default_template;
6882 	}
6883 
6884 	dlen = qla27xx_fwdt_template_size(dcode);
6885 	ql_dbg(ql_dbg_init, vha, 0x0176,
6886 	    "-> template size %x bytes\n", dlen);
6887 	if (dlen > risc_size * sizeof(*fwcode)) {
6888 		ql_log(ql_log_warn, vha, 0x0177,
6889 		    "Failed fwdump template exceeds array by %zx bytes\n",
6890 		    (size_t)(dlen - risc_size * sizeof(*fwcode)));
6891 		goto default_template;
6892 	}
6893 	ha->fw_dump_template_len = dlen;
6894 	return rval;
6895 
6896 default_template:
6897 	ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
6898 	if (ha->fw_dump_template)
6899 		vfree(ha->fw_dump_template);
6900 	ha->fw_dump_template = NULL;
6901 	ha->fw_dump_template_len = 0;
6902 
6903 	dlen = qla27xx_fwdt_template_default_size();
6904 	ql_dbg(ql_dbg_init, vha, 0x0179,
6905 	    "-> template allocating %x bytes...\n", dlen);
6906 	ha->fw_dump_template = vmalloc(dlen);
6907 	if (!ha->fw_dump_template) {
6908 		ql_log(ql_log_warn, vha, 0x017a,
6909 		    "Failed fwdump template allocate %x bytes.\n", risc_size);
6910 		goto failed_template;
6911 	}
6912 
6913 	dcode = ha->fw_dump_template;
6914 	risc_size = dlen / sizeof(*fwcode);
6915 	fwcode = qla27xx_fwdt_template_default();
6916 	for (i = 0; i < risc_size; i++)
6917 		dcode[i] = be32_to_cpu(fwcode[i]);
6918 
6919 	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6920 		ql_log(ql_log_warn, vha, 0x017b,
6921 		    "Failed fwdump template validate\n");
6922 		goto failed_template;
6923 	}
6924 
6925 	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6926 	ql_dbg(ql_dbg_init, vha, 0x017c,
6927 	    "-> template size %x bytes\n", dlen);
6928 	ha->fw_dump_template_len = dlen;
6929 	return rval;
6930 
6931 failed_template:
6932 	ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
6933 	if (ha->fw_dump_template)
6934 		vfree(ha->fw_dump_template);
6935 	ha->fw_dump_template = NULL;
6936 	ha->fw_dump_template_len = 0;
6937 	return rval;
6938 }
6939 
6940 int
6941 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6942 {
6943 	int rval;
6944 
6945 	if (ql2xfwloadbin == 1)
6946 		return qla81xx_load_risc(vha, srisc_addr);
6947 
6948 	/*
6949 	 * FW Load priority:
6950 	 * 1) Firmware via request-firmware interface (.bin file).
6951 	 * 2) Firmware residing in flash.
6952 	 */
6953 	rval = qla24xx_load_risc_blob(vha, srisc_addr);
6954 	if (rval == QLA_SUCCESS)
6955 		return rval;
6956 
6957 	return qla24xx_load_risc_flash(vha, srisc_addr,
6958 	    vha->hw->flt_region_fw);
6959 }
6960 
6961 int
6962 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6963 {
6964 	int rval;
6965 	struct qla_hw_data *ha = vha->hw;
6966 
6967 	if (ql2xfwloadbin == 2)
6968 		goto try_blob_fw;
6969 
6970 	/*
6971 	 * FW Load priority:
6972 	 * 1) Firmware residing in flash.
6973 	 * 2) Firmware via request-firmware interface (.bin file).
6974 	 * 3) Golden-Firmware residing in flash -- limited operation.
6975 	 */
6976 	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
6977 	if (rval == QLA_SUCCESS)
6978 		return rval;
6979 
6980 try_blob_fw:
6981 	rval = qla24xx_load_risc_blob(vha, srisc_addr);
6982 	if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
6983 		return rval;
6984 
6985 	ql_log(ql_log_info, vha, 0x0099,
6986 	    "Attempting to fallback to golden firmware.\n");
6987 	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
6988 	if (rval != QLA_SUCCESS)
6989 		return rval;
6990 
6991 	ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
6992 	ha->flags.running_gold_fw = 1;
6993 	return rval;
6994 }
6995 
6996 void
6997 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
6998 {
6999 	int ret, retries;
7000 	struct qla_hw_data *ha = vha->hw;
7001 
7002 	if (ha->flags.pci_channel_io_perm_failure)
7003 		return;
7004 	if (!IS_FWI2_CAPABLE(ha))
7005 		return;
7006 	if (!ha->fw_major_version)
7007 		return;
7008 	if (!ha->flags.fw_started)
7009 		return;
7010 
7011 	ret = qla2x00_stop_firmware(vha);
7012 	for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
7013 	    ret != QLA_INVALID_COMMAND && retries ; retries--) {
7014 		ha->isp_ops->reset_chip(vha);
7015 		if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
7016 			continue;
7017 		if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
7018 			continue;
7019 		ql_log(ql_log_info, vha, 0x8015,
7020 		    "Attempting retry of stop-firmware command.\n");
7021 		ret = qla2x00_stop_firmware(vha);
7022 	}
7023 
7024 	QLA_FW_STOPPED(ha);
7025 	ha->flags.fw_init_done = 0;
7026 }
7027 
7028 int
7029 qla24xx_configure_vhba(scsi_qla_host_t *vha)
7030 {
7031 	int rval = QLA_SUCCESS;
7032 	int rval2;
7033 	uint16_t mb[MAILBOX_REGISTER_COUNT];
7034 	struct qla_hw_data *ha = vha->hw;
7035 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7036 	struct req_que *req;
7037 	struct rsp_que *rsp;
7038 
7039 	if (!vha->vp_idx)
7040 		return -EINVAL;
7041 
7042 	rval = qla2x00_fw_ready(base_vha);
7043 	if (vha->qpair)
7044 		req = vha->qpair->req;
7045 	else
7046 		req = ha->req_q_map[0];
7047 	rsp = req->rsp;
7048 
7049 	if (rval == QLA_SUCCESS) {
7050 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7051 		qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7052 	}
7053 
7054 	vha->flags.management_server_logged_in = 0;
7055 
7056 	/* Login to SNS first */
7057 	rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
7058 	    BIT_1);
7059 	if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
7060 		if (rval2 == QLA_MEMORY_ALLOC_FAILED)
7061 			ql_dbg(ql_dbg_init, vha, 0x0120,
7062 			    "Failed SNS login: loop_id=%x, rval2=%d\n",
7063 			    NPH_SNS, rval2);
7064 		else
7065 			ql_dbg(ql_dbg_init, vha, 0x0103,
7066 			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
7067 			    "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
7068 			    NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
7069 		return (QLA_FUNCTION_FAILED);
7070 	}
7071 
7072 	atomic_set(&vha->loop_down_timer, 0);
7073 	atomic_set(&vha->loop_state, LOOP_UP);
7074 	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7075 	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
7076 	rval = qla2x00_loop_resync(base_vha);
7077 
7078 	return rval;
7079 }
7080 
7081 /* 84XX Support **************************************************************/
7082 
7083 static LIST_HEAD(qla_cs84xx_list);
7084 static DEFINE_MUTEX(qla_cs84xx_mutex);
7085 
7086 static struct qla_chip_state_84xx *
7087 qla84xx_get_chip(struct scsi_qla_host *vha)
7088 {
7089 	struct qla_chip_state_84xx *cs84xx;
7090 	struct qla_hw_data *ha = vha->hw;
7091 
7092 	mutex_lock(&qla_cs84xx_mutex);
7093 
7094 	/* Find any shared 84xx chip. */
7095 	list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
7096 		if (cs84xx->bus == ha->pdev->bus) {
7097 			kref_get(&cs84xx->kref);
7098 			goto done;
7099 		}
7100 	}
7101 
7102 	cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
7103 	if (!cs84xx)
7104 		goto done;
7105 
7106 	kref_init(&cs84xx->kref);
7107 	spin_lock_init(&cs84xx->access_lock);
7108 	mutex_init(&cs84xx->fw_update_mutex);
7109 	cs84xx->bus = ha->pdev->bus;
7110 
7111 	list_add_tail(&cs84xx->list, &qla_cs84xx_list);
7112 done:
7113 	mutex_unlock(&qla_cs84xx_mutex);
7114 	return cs84xx;
7115 }
7116 
7117 static void
7118 __qla84xx_chip_release(struct kref *kref)
7119 {
7120 	struct qla_chip_state_84xx *cs84xx =
7121 	    container_of(kref, struct qla_chip_state_84xx, kref);
7122 
7123 	mutex_lock(&qla_cs84xx_mutex);
7124 	list_del(&cs84xx->list);
7125 	mutex_unlock(&qla_cs84xx_mutex);
7126 	kfree(cs84xx);
7127 }
7128 
7129 void
7130 qla84xx_put_chip(struct scsi_qla_host *vha)
7131 {
7132 	struct qla_hw_data *ha = vha->hw;
7133 	if (ha->cs84xx)
7134 		kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
7135 }
7136 
7137 static int
7138 qla84xx_init_chip(scsi_qla_host_t *vha)
7139 {
7140 	int rval;
7141 	uint16_t status[2];
7142 	struct qla_hw_data *ha = vha->hw;
7143 
7144 	mutex_lock(&ha->cs84xx->fw_update_mutex);
7145 
7146 	rval = qla84xx_verify_chip(vha, status);
7147 
7148 	mutex_unlock(&ha->cs84xx->fw_update_mutex);
7149 
7150 	return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
7151 	    QLA_SUCCESS;
7152 }
7153 
7154 /* 81XX Support **************************************************************/
7155 
7156 int
7157 qla81xx_nvram_config(scsi_qla_host_t *vha)
7158 {
7159 	int   rval;
7160 	struct init_cb_81xx *icb;
7161 	struct nvram_81xx *nv;
7162 	uint32_t *dptr;
7163 	uint8_t  *dptr1, *dptr2;
7164 	uint32_t chksum;
7165 	uint16_t cnt;
7166 	struct qla_hw_data *ha = vha->hw;
7167 
7168 	rval = QLA_SUCCESS;
7169 	icb = (struct init_cb_81xx *)ha->init_cb;
7170 	nv = ha->nvram;
7171 
7172 	/* Determine NVRAM starting address. */
7173 	ha->nvram_size = sizeof(struct nvram_81xx);
7174 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
7175 	if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
7176 		ha->vpd_size = FA_VPD_SIZE_82XX;
7177 
7178 	/* Get VPD data into cache */
7179 	ha->vpd = ha->nvram + VPD_OFFSET;
7180 	ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
7181 	    ha->vpd_size);
7182 
7183 	/* Get NVRAM data into cache and calculate checksum. */
7184 	ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
7185 	    ha->nvram_size);
7186 	dptr = (uint32_t *)nv;
7187 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7188 		chksum += le32_to_cpu(*dptr);
7189 
7190 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
7191 	    "Contents of NVRAM:\n");
7192 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
7193 	    (uint8_t *)nv, ha->nvram_size);
7194 
7195 	/* Bad NVRAM data, set defaults parameters. */
7196 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
7197 	    || nv->id[3] != ' ' ||
7198 	    nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
7199 		/* Reset NVRAM data. */
7200 		ql_log(ql_log_info, vha, 0x0073,
7201 		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7202 		    "version=0x%x.\n", chksum, nv->id[0],
7203 		    le16_to_cpu(nv->nvram_version));
7204 		ql_log(ql_log_info, vha, 0x0074,
7205 		    "Falling back to functioning (yet invalid -- WWPN) "
7206 		    "defaults.\n");
7207 
7208 		/*
7209 		 * Set default initialization control block.
7210 		 */
7211 		memset(nv, 0, ha->nvram_size);
7212 		nv->nvram_version = cpu_to_le16(ICB_VERSION);
7213 		nv->version = cpu_to_le16(ICB_VERSION);
7214 		nv->frame_payload_size = 2048;
7215 		nv->execution_throttle = cpu_to_le16(0xFFFF);
7216 		nv->exchange_count = cpu_to_le16(0);
7217 		nv->port_name[0] = 0x21;
7218 		nv->port_name[1] = 0x00 + ha->port_no + 1;
7219 		nv->port_name[2] = 0x00;
7220 		nv->port_name[3] = 0xe0;
7221 		nv->port_name[4] = 0x8b;
7222 		nv->port_name[5] = 0x1c;
7223 		nv->port_name[6] = 0x55;
7224 		nv->port_name[7] = 0x86;
7225 		nv->node_name[0] = 0x20;
7226 		nv->node_name[1] = 0x00;
7227 		nv->node_name[2] = 0x00;
7228 		nv->node_name[3] = 0xe0;
7229 		nv->node_name[4] = 0x8b;
7230 		nv->node_name[5] = 0x1c;
7231 		nv->node_name[6] = 0x55;
7232 		nv->node_name[7] = 0x86;
7233 		nv->login_retry_count = cpu_to_le16(8);
7234 		nv->interrupt_delay_timer = cpu_to_le16(0);
7235 		nv->login_timeout = cpu_to_le16(0);
7236 		nv->firmware_options_1 =
7237 		    cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7238 		nv->firmware_options_2 = cpu_to_le32(2 << 4);
7239 		nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7240 		nv->firmware_options_3 = cpu_to_le32(2 << 13);
7241 		nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7242 		nv->efi_parameters = cpu_to_le32(0);
7243 		nv->reset_delay = 5;
7244 		nv->max_luns_per_target = cpu_to_le16(128);
7245 		nv->port_down_retry_count = cpu_to_le16(30);
7246 		nv->link_down_timeout = cpu_to_le16(180);
7247 		nv->enode_mac[0] = 0x00;
7248 		nv->enode_mac[1] = 0xC0;
7249 		nv->enode_mac[2] = 0xDD;
7250 		nv->enode_mac[3] = 0x04;
7251 		nv->enode_mac[4] = 0x05;
7252 		nv->enode_mac[5] = 0x06 + ha->port_no + 1;
7253 
7254 		rval = 1;
7255 	}
7256 
7257 	if (IS_T10_PI_CAPABLE(ha))
7258 		nv->frame_payload_size &= ~7;
7259 
7260 	qlt_81xx_config_nvram_stage1(vha, nv);
7261 
7262 	/* Reset Initialization control block */
7263 	memset(icb, 0, ha->init_cb_size);
7264 
7265 	/* Copy 1st segment. */
7266 	dptr1 = (uint8_t *)icb;
7267 	dptr2 = (uint8_t *)&nv->version;
7268 	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7269 	while (cnt--)
7270 		*dptr1++ = *dptr2++;
7271 
7272 	icb->login_retry_count = nv->login_retry_count;
7273 
7274 	/* Copy 2nd segment. */
7275 	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7276 	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7277 	cnt = (uint8_t *)&icb->reserved_5 -
7278 	    (uint8_t *)&icb->interrupt_delay_timer;
7279 	while (cnt--)
7280 		*dptr1++ = *dptr2++;
7281 
7282 	memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
7283 	/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
7284 	if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
7285 		icb->enode_mac[0] = 0x00;
7286 		icb->enode_mac[1] = 0xC0;
7287 		icb->enode_mac[2] = 0xDD;
7288 		icb->enode_mac[3] = 0x04;
7289 		icb->enode_mac[4] = 0x05;
7290 		icb->enode_mac[5] = 0x06 + ha->port_no + 1;
7291 	}
7292 
7293 	/* Use extended-initialization control block. */
7294 	memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
7295 
7296 	/*
7297 	 * Setup driver NVRAM options.
7298 	 */
7299 	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7300 	    "QLE8XXX");
7301 
7302 	qlt_81xx_config_nvram_stage2(vha, icb);
7303 
7304 	/* Use alternate WWN? */
7305 	if (nv->host_p & cpu_to_le32(BIT_15)) {
7306 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7307 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7308 	}
7309 
7310 	/* Prepare nodename */
7311 	if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7312 		/*
7313 		 * Firmware will apply the following mask if the nodename was
7314 		 * not provided.
7315 		 */
7316 		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7317 		icb->node_name[0] &= 0xF0;
7318 	}
7319 
7320 	/* Set host adapter parameters. */
7321 	ha->flags.disable_risc_code_load = 0;
7322 	ha->flags.enable_lip_reset = 0;
7323 	ha->flags.enable_lip_full_login =
7324 	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
7325 	ha->flags.enable_target_reset =
7326 	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
7327 	ha->flags.enable_led_scheme = 0;
7328 	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
7329 
7330 	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7331 	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
7332 
7333 	/* save HBA serial number */
7334 	ha->serial0 = icb->port_name[5];
7335 	ha->serial1 = icb->port_name[6];
7336 	ha->serial2 = icb->port_name[7];
7337 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7338 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7339 
7340 	icb->execution_throttle = cpu_to_le16(0xFFFF);
7341 
7342 	ha->retry_count = le16_to_cpu(nv->login_retry_count);
7343 
7344 	/* Set minimum login_timeout to 4 seconds. */
7345 	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7346 		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7347 	if (le16_to_cpu(nv->login_timeout) < 4)
7348 		nv->login_timeout = cpu_to_le16(4);
7349 	ha->login_timeout = le16_to_cpu(nv->login_timeout);
7350 
7351 	/* Set minimum RATOV to 100 tenths of a second. */
7352 	ha->r_a_tov = 100;
7353 
7354 	ha->loop_reset_delay = nv->reset_delay;
7355 
7356 	/* Link Down Timeout = 0:
7357 	 *
7358 	 *	When Port Down timer expires we will start returning
7359 	 *	I/O's to OS with "DID_NO_CONNECT".
7360 	 *
7361 	 * Link Down Timeout != 0:
7362 	 *
7363 	 *	 The driver waits for the link to come up after link down
7364 	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
7365 	 */
7366 	if (le16_to_cpu(nv->link_down_timeout) == 0) {
7367 		ha->loop_down_abort_time =
7368 		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7369 	} else {
7370 		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
7371 		ha->loop_down_abort_time =
7372 		    (LOOP_DOWN_TIME - ha->link_down_timeout);
7373 	}
7374 
7375 	/* Need enough time to try and get the port back. */
7376 	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7377 	if (qlport_down_retry)
7378 		ha->port_down_retry_count = qlport_down_retry;
7379 
7380 	/* Set login_retry_count */
7381 	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
7382 	if (ha->port_down_retry_count ==
7383 	    le16_to_cpu(nv->port_down_retry_count) &&
7384 	    ha->port_down_retry_count > 3)
7385 		ha->login_retry_count = ha->port_down_retry_count;
7386 	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7387 		ha->login_retry_count = ha->port_down_retry_count;
7388 	if (ql2xloginretrycount)
7389 		ha->login_retry_count = ql2xloginretrycount;
7390 
7391 	/* if not running MSI-X we need handshaking on interrupts */
7392 	if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
7393 		icb->firmware_options_2 |= cpu_to_le32(BIT_22);
7394 
7395 	/* Enable ZIO. */
7396 	if (!vha->flags.init_done) {
7397 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7398 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7399 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7400 		    le16_to_cpu(icb->interrupt_delay_timer): 2;
7401 	}
7402 	icb->firmware_options_2 &= cpu_to_le32(
7403 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7404 	vha->flags.process_response_queue = 0;
7405 	if (ha->zio_mode != QLA_ZIO_DISABLED) {
7406 		ha->zio_mode = QLA_ZIO_MODE_6;
7407 
7408 		ql_log(ql_log_info, vha, 0x0075,
7409 		    "ZIO mode %d enabled; timer delay (%d us).\n",
7410 		    ha->zio_mode,
7411 		    ha->zio_timer * 100);
7412 
7413 		icb->firmware_options_2 |= cpu_to_le32(
7414 		    (uint32_t)ha->zio_mode);
7415 		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7416 		vha->flags.process_response_queue = 1;
7417 	}
7418 
7419 	 /* enable RIDA Format2 */
7420 	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7421 		icb->firmware_options_3 |= BIT_0;
7422 
7423 	if (rval) {
7424 		ql_log(ql_log_warn, vha, 0x0076,
7425 		    "NVRAM configuration failed.\n");
7426 	}
7427 	return (rval);
7428 }
7429 
7430 int
7431 qla82xx_restart_isp(scsi_qla_host_t *vha)
7432 {
7433 	int status, rval;
7434 	struct qla_hw_data *ha = vha->hw;
7435 	struct req_que *req = ha->req_q_map[0];
7436 	struct rsp_que *rsp = ha->rsp_q_map[0];
7437 	struct scsi_qla_host *vp;
7438 	unsigned long flags;
7439 
7440 	status = qla2x00_init_rings(vha);
7441 	if (!status) {
7442 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7443 		ha->flags.chip_reset_done = 1;
7444 
7445 		status = qla2x00_fw_ready(vha);
7446 		if (!status) {
7447 			/* Issue a marker after FW becomes ready. */
7448 			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7449 			vha->flags.online = 1;
7450 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7451 		}
7452 
7453 		/* if no cable then assume it's good */
7454 		if ((vha->device_flags & DFLG_NO_CABLE))
7455 			status = 0;
7456 	}
7457 
7458 	if (!status) {
7459 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7460 
7461 		if (!atomic_read(&vha->loop_down_timer)) {
7462 			/*
7463 			 * Issue marker command only when we are going
7464 			 * to start the I/O .
7465 			 */
7466 			vha->marker_needed = 1;
7467 		}
7468 
7469 		ha->isp_ops->enable_intrs(ha);
7470 
7471 		ha->isp_abort_cnt = 0;
7472 		clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7473 
7474 		/* Update the firmware version */
7475 		status = qla82xx_check_md_needed(vha);
7476 
7477 		if (ha->fce) {
7478 			ha->flags.fce_enabled = 1;
7479 			memset(ha->fce, 0,
7480 			    fce_calc_size(ha->fce_bufs));
7481 			rval = qla2x00_enable_fce_trace(vha,
7482 			    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7483 			    &ha->fce_bufs);
7484 			if (rval) {
7485 				ql_log(ql_log_warn, vha, 0x8001,
7486 				    "Unable to reinitialize FCE (%d).\n",
7487 				    rval);
7488 				ha->flags.fce_enabled = 0;
7489 			}
7490 		}
7491 
7492 		if (ha->eft) {
7493 			memset(ha->eft, 0, EFT_SIZE);
7494 			rval = qla2x00_enable_eft_trace(vha,
7495 			    ha->eft_dma, EFT_NUM_BUFFERS);
7496 			if (rval) {
7497 				ql_log(ql_log_warn, vha, 0x8010,
7498 				    "Unable to reinitialize EFT (%d).\n",
7499 				    rval);
7500 			}
7501 		}
7502 	}
7503 
7504 	if (!status) {
7505 		ql_dbg(ql_dbg_taskm, vha, 0x8011,
7506 		    "qla82xx_restart_isp succeeded.\n");
7507 
7508 		spin_lock_irqsave(&ha->vport_slock, flags);
7509 		list_for_each_entry(vp, &ha->vp_list, list) {
7510 			if (vp->vp_idx) {
7511 				atomic_inc(&vp->vref_count);
7512 				spin_unlock_irqrestore(&ha->vport_slock, flags);
7513 
7514 				qla2x00_vp_abort_isp(vp);
7515 
7516 				spin_lock_irqsave(&ha->vport_slock, flags);
7517 				atomic_dec(&vp->vref_count);
7518 			}
7519 		}
7520 		spin_unlock_irqrestore(&ha->vport_slock, flags);
7521 
7522 	} else {
7523 		ql_log(ql_log_warn, vha, 0x8016,
7524 		    "qla82xx_restart_isp **** FAILED ****.\n");
7525 	}
7526 
7527 	return status;
7528 }
7529 
7530 void
7531 qla81xx_update_fw_options(scsi_qla_host_t *vha)
7532 {
7533 	struct qla_hw_data *ha = vha->hw;
7534 
7535 	/*  Hold status IOCBs until ABTS response received. */
7536 	if (ql2xfwholdabts)
7537 		ha->fw_options[3] |= BIT_12;
7538 
7539 	/* Set Retry FLOGI in case of P2P connection */
7540 	if (ha->operating_mode == P2P) {
7541 		ha->fw_options[2] |= BIT_3;
7542 		ql_dbg(ql_dbg_disc, vha, 0x2103,
7543 		    "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
7544 			__func__, ha->fw_options[2]);
7545 	}
7546 
7547 	/* Move PUREX, ABTS RX & RIDA to ATIOQ */
7548 	if (ql2xmvasynctoatio) {
7549 		if (qla_tgt_mode_enabled(vha) ||
7550 		    qla_dual_mode_enabled(vha))
7551 			ha->fw_options[2] |= BIT_11;
7552 		else
7553 			ha->fw_options[2] &= ~BIT_11;
7554 	}
7555 
7556 	if (qla_tgt_mode_enabled(vha) ||
7557 	    qla_dual_mode_enabled(vha)) {
7558 		/* FW auto send SCSI status during */
7559 		ha->fw_options[1] |= BIT_8;
7560 		ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
7561 
7562 		/* FW perform Exchange validation */
7563 		ha->fw_options[2] |= BIT_4;
7564 	} else {
7565 		ha->fw_options[1]  &= ~BIT_8;
7566 		ha->fw_options[10] &= 0x00ff;
7567 
7568 		ha->fw_options[2] &= ~BIT_4;
7569 	}
7570 
7571 	if (ql2xetsenable) {
7572 		/* Enable ETS Burst. */
7573 		memset(ha->fw_options, 0, sizeof(ha->fw_options));
7574 		ha->fw_options[2] |= BIT_9;
7575 	}
7576 
7577 	ql_dbg(ql_dbg_init, vha, 0x00e9,
7578 	    "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
7579 	    __func__, ha->fw_options[1], ha->fw_options[2],
7580 	    ha->fw_options[3], vha->host->active_mode);
7581 
7582 	qla2x00_set_fw_options(vha, ha->fw_options);
7583 }
7584 
7585 /*
7586  * qla24xx_get_fcp_prio
7587  *	Gets the fcp cmd priority value for the logged in port.
7588  *	Looks for a match of the port descriptors within
7589  *	each of the fcp prio config entries. If a match is found,
7590  *	the tag (priority) value is returned.
7591  *
7592  * Input:
7593  *	vha = scsi host structure pointer.
7594  *	fcport = port structure pointer.
7595  *
7596  * Return:
7597  *	non-zero (if found)
7598  *	-1 (if not found)
7599  *
7600  * Context:
7601  * 	Kernel context
7602  */
7603 static int
7604 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
7605 {
7606 	int i, entries;
7607 	uint8_t pid_match, wwn_match;
7608 	int priority;
7609 	uint32_t pid1, pid2;
7610 	uint64_t wwn1, wwn2;
7611 	struct qla_fcp_prio_entry *pri_entry;
7612 	struct qla_hw_data *ha = vha->hw;
7613 
7614 	if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
7615 		return -1;
7616 
7617 	priority = -1;
7618 	entries = ha->fcp_prio_cfg->num_entries;
7619 	pri_entry = &ha->fcp_prio_cfg->entry[0];
7620 
7621 	for (i = 0; i < entries; i++) {
7622 		pid_match = wwn_match = 0;
7623 
7624 		if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
7625 			pri_entry++;
7626 			continue;
7627 		}
7628 
7629 		/* check source pid for a match */
7630 		if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
7631 			pid1 = pri_entry->src_pid & INVALID_PORT_ID;
7632 			pid2 = vha->d_id.b24 & INVALID_PORT_ID;
7633 			if (pid1 == INVALID_PORT_ID)
7634 				pid_match++;
7635 			else if (pid1 == pid2)
7636 				pid_match++;
7637 		}
7638 
7639 		/* check destination pid for a match */
7640 		if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
7641 			pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
7642 			pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
7643 			if (pid1 == INVALID_PORT_ID)
7644 				pid_match++;
7645 			else if (pid1 == pid2)
7646 				pid_match++;
7647 		}
7648 
7649 		/* check source WWN for a match */
7650 		if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
7651 			wwn1 = wwn_to_u64(vha->port_name);
7652 			wwn2 = wwn_to_u64(pri_entry->src_wwpn);
7653 			if (wwn2 == (uint64_t)-1)
7654 				wwn_match++;
7655 			else if (wwn1 == wwn2)
7656 				wwn_match++;
7657 		}
7658 
7659 		/* check destination WWN for a match */
7660 		if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
7661 			wwn1 = wwn_to_u64(fcport->port_name);
7662 			wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
7663 			if (wwn2 == (uint64_t)-1)
7664 				wwn_match++;
7665 			else if (wwn1 == wwn2)
7666 				wwn_match++;
7667 		}
7668 
7669 		if (pid_match == 2 || wwn_match == 2) {
7670 			/* Found a matching entry */
7671 			if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
7672 				priority = pri_entry->tag;
7673 			break;
7674 		}
7675 
7676 		pri_entry++;
7677 	}
7678 
7679 	return priority;
7680 }
7681 
7682 /*
7683  * qla24xx_update_fcport_fcp_prio
7684  *	Activates fcp priority for the logged in fc port
7685  *
7686  * Input:
7687  *	vha = scsi host structure pointer.
7688  *	fcp = port structure pointer.
7689  *
7690  * Return:
7691  *	QLA_SUCCESS or QLA_FUNCTION_FAILED
7692  *
7693  * Context:
7694  *	Kernel context.
7695  */
7696 int
7697 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
7698 {
7699 	int ret;
7700 	int priority;
7701 	uint16_t mb[5];
7702 
7703 	if (fcport->port_type != FCT_TARGET ||
7704 	    fcport->loop_id == FC_NO_LOOP_ID)
7705 		return QLA_FUNCTION_FAILED;
7706 
7707 	priority = qla24xx_get_fcp_prio(vha, fcport);
7708 	if (priority < 0)
7709 		return QLA_FUNCTION_FAILED;
7710 
7711 	if (IS_P3P_TYPE(vha->hw)) {
7712 		fcport->fcp_prio = priority & 0xf;
7713 		return QLA_SUCCESS;
7714 	}
7715 
7716 	ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
7717 	if (ret == QLA_SUCCESS) {
7718 		if (fcport->fcp_prio != priority)
7719 			ql_dbg(ql_dbg_user, vha, 0x709e,
7720 			    "Updated FCP_CMND priority - value=%d loop_id=%d "
7721 			    "port_id=%02x%02x%02x.\n", priority,
7722 			    fcport->loop_id, fcport->d_id.b.domain,
7723 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
7724 		fcport->fcp_prio = priority & 0xf;
7725 	} else
7726 		ql_dbg(ql_dbg_user, vha, 0x704f,
7727 		    "Unable to update FCP_CMND priority - ret=0x%x for "
7728 		    "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
7729 		    fcport->d_id.b.domain, fcport->d_id.b.area,
7730 		    fcport->d_id.b.al_pa);
7731 	return  ret;
7732 }
7733 
7734 /*
7735  * qla24xx_update_all_fcp_prio
7736  *	Activates fcp priority for all the logged in ports
7737  *
7738  * Input:
7739  *	ha = adapter block pointer.
7740  *
7741  * Return:
7742  *	QLA_SUCCESS or QLA_FUNCTION_FAILED
7743  *
7744  * Context:
7745  *	Kernel context.
7746  */
7747 int
7748 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
7749 {
7750 	int ret;
7751 	fc_port_t *fcport;
7752 
7753 	ret = QLA_FUNCTION_FAILED;
7754 	/* We need to set priority for all logged in ports */
7755 	list_for_each_entry(fcport, &vha->vp_fcports, list)
7756 		ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
7757 
7758 	return ret;
7759 }
7760 
7761 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
7762 	int vp_idx, bool startqp)
7763 {
7764 	int rsp_id = 0;
7765 	int  req_id = 0;
7766 	int i;
7767 	struct qla_hw_data *ha = vha->hw;
7768 	uint16_t qpair_id = 0;
7769 	struct qla_qpair *qpair = NULL;
7770 	struct qla_msix_entry *msix;
7771 
7772 	if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
7773 		ql_log(ql_log_warn, vha, 0x00181,
7774 		    "FW/Driver is not multi-queue capable.\n");
7775 		return NULL;
7776 	}
7777 
7778 	if (ql2xmqsupport) {
7779 		qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
7780 		if (qpair == NULL) {
7781 			ql_log(ql_log_warn, vha, 0x0182,
7782 			    "Failed to allocate memory for queue pair.\n");
7783 			return NULL;
7784 		}
7785 		memset(qpair, 0, sizeof(struct qla_qpair));
7786 
7787 		qpair->hw = vha->hw;
7788 		qpair->vha = vha;
7789 		qpair->qp_lock_ptr = &qpair->qp_lock;
7790 		spin_lock_init(&qpair->qp_lock);
7791 		qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
7792 
7793 		/* Assign available que pair id */
7794 		mutex_lock(&ha->mq_lock);
7795 		qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
7796 		if (ha->num_qpairs >= ha->max_qpairs) {
7797 			mutex_unlock(&ha->mq_lock);
7798 			ql_log(ql_log_warn, vha, 0x0183,
7799 			    "No resources to create additional q pair.\n");
7800 			goto fail_qid_map;
7801 		}
7802 		ha->num_qpairs++;
7803 		set_bit(qpair_id, ha->qpair_qid_map);
7804 		ha->queue_pair_map[qpair_id] = qpair;
7805 		qpair->id = qpair_id;
7806 		qpair->vp_idx = vp_idx;
7807 		INIT_LIST_HEAD(&qpair->hints_list);
7808 		qpair->chip_reset = ha->base_qpair->chip_reset;
7809 		qpair->enable_class_2 = ha->base_qpair->enable_class_2;
7810 		qpair->enable_explicit_conf =
7811 		    ha->base_qpair->enable_explicit_conf;
7812 
7813 		for (i = 0; i < ha->msix_count; i++) {
7814 			msix = &ha->msix_entries[i];
7815 			if (msix->in_use)
7816 				continue;
7817 			qpair->msix = msix;
7818 			ql_dbg(ql_dbg_multiq, vha, 0xc00f,
7819 			    "Vector %x selected for qpair\n", msix->vector);
7820 			break;
7821 		}
7822 		if (!qpair->msix) {
7823 			ql_log(ql_log_warn, vha, 0x0184,
7824 			    "Out of MSI-X vectors!.\n");
7825 			goto fail_msix;
7826 		}
7827 
7828 		qpair->msix->in_use = 1;
7829 		list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
7830 		qpair->pdev = ha->pdev;
7831 		if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
7832 			qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
7833 
7834 		mutex_unlock(&ha->mq_lock);
7835 
7836 		/* Create response queue first */
7837 		rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
7838 		if (!rsp_id) {
7839 			ql_log(ql_log_warn, vha, 0x0185,
7840 			    "Failed to create response queue.\n");
7841 			goto fail_rsp;
7842 		}
7843 
7844 		qpair->rsp = ha->rsp_q_map[rsp_id];
7845 
7846 		/* Create request queue */
7847 		req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
7848 		    startqp);
7849 		if (!req_id) {
7850 			ql_log(ql_log_warn, vha, 0x0186,
7851 			    "Failed to create request queue.\n");
7852 			goto fail_req;
7853 		}
7854 
7855 		qpair->req = ha->req_q_map[req_id];
7856 		qpair->rsp->req = qpair->req;
7857 		qpair->rsp->qpair = qpair;
7858 		/* init qpair to this cpu. Will adjust at run time. */
7859 		qla_cpu_update(qpair, smp_processor_id());
7860 
7861 		if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
7862 			if (ha->fw_attributes & BIT_4)
7863 				qpair->difdix_supported = 1;
7864 		}
7865 
7866 		qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
7867 		if (!qpair->srb_mempool) {
7868 			ql_log(ql_log_warn, vha, 0xd036,
7869 			    "Failed to create srb mempool for qpair %d\n",
7870 			    qpair->id);
7871 			goto fail_mempool;
7872 		}
7873 
7874 		/* Mark as online */
7875 		qpair->online = 1;
7876 
7877 		if (!vha->flags.qpairs_available)
7878 			vha->flags.qpairs_available = 1;
7879 
7880 		ql_dbg(ql_dbg_multiq, vha, 0xc00d,
7881 		    "Request/Response queue pair created, id %d\n",
7882 		    qpair->id);
7883 		ql_dbg(ql_dbg_init, vha, 0x0187,
7884 		    "Request/Response queue pair created, id %d\n",
7885 		    qpair->id);
7886 	}
7887 	return qpair;
7888 
7889 fail_mempool:
7890 fail_req:
7891 	qla25xx_delete_rsp_que(vha, qpair->rsp);
7892 fail_rsp:
7893 	mutex_lock(&ha->mq_lock);
7894 	qpair->msix->in_use = 0;
7895 	list_del(&qpair->qp_list_elem);
7896 	if (list_empty(&vha->qp_list))
7897 		vha->flags.qpairs_available = 0;
7898 fail_msix:
7899 	ha->queue_pair_map[qpair_id] = NULL;
7900 	clear_bit(qpair_id, ha->qpair_qid_map);
7901 	ha->num_qpairs--;
7902 	mutex_unlock(&ha->mq_lock);
7903 fail_qid_map:
7904 	kfree(qpair);
7905 	return NULL;
7906 }
7907 
7908 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
7909 {
7910 	int ret = QLA_FUNCTION_FAILED;
7911 	struct qla_hw_data *ha = qpair->hw;
7912 
7913 	if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
7914 		goto fail;
7915 
7916 	qpair->delete_in_progress = 1;
7917 	while (atomic_read(&qpair->ref_count))
7918 		msleep(500);
7919 
7920 	ret = qla25xx_delete_req_que(vha, qpair->req);
7921 	if (ret != QLA_SUCCESS)
7922 		goto fail;
7923 	ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
7924 	if (ret != QLA_SUCCESS)
7925 		goto fail;
7926 
7927 	mutex_lock(&ha->mq_lock);
7928 	ha->queue_pair_map[qpair->id] = NULL;
7929 	clear_bit(qpair->id, ha->qpair_qid_map);
7930 	ha->num_qpairs--;
7931 	list_del(&qpair->qp_list_elem);
7932 	if (list_empty(&vha->qp_list)) {
7933 		vha->flags.qpairs_available = 0;
7934 		vha->flags.qpairs_req_created = 0;
7935 		vha->flags.qpairs_rsp_created = 0;
7936 	}
7937 	mempool_destroy(qpair->srb_mempool);
7938 	kfree(qpair);
7939 	mutex_unlock(&ha->mq_lock);
7940 
7941 	return QLA_SUCCESS;
7942 fail:
7943 	return ret;
7944 }
7945