xref: /linux/drivers/scsi/qla2xxx/qla_attr.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 
15 static int qla24xx_vport_disable(struct fc_vport *, bool);
16 
17 /* SYSFS attributes --------------------------------------------------------- */
18 
19 static ssize_t
20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 			   struct bin_attribute *bin_attr,
22 			   char *buf, loff_t off, size_t count)
23 {
24 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25 	    struct device, kobj)));
26 	struct qla_hw_data *ha = vha->hw;
27 	int rval = 0;
28 
29 	if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 		return 0;
31 
32 	mutex_lock(&ha->optrom_mutex);
33 	if (IS_P3P_TYPE(ha)) {
34 		if (off < ha->md_template_size) {
35 			rval = memory_read_from_buffer(buf, count,
36 			    &off, ha->md_tmplt_hdr, ha->md_template_size);
37 		} else {
38 			off -= ha->md_template_size;
39 			rval = memory_read_from_buffer(buf, count,
40 			    &off, ha->md_dump, ha->md_dump_size);
41 		}
42 	} else if (ha->mctp_dumped && ha->mctp_dump_reading) {
43 		rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 		    MCTP_DUMP_SIZE);
45 	} else if (ha->fw_dump_reading) {
46 		rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
47 					ha->fw_dump_len);
48 	} else {
49 		rval = 0;
50 	}
51 	mutex_unlock(&ha->optrom_mutex);
52 	return rval;
53 }
54 
55 static ssize_t
56 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
57 			    struct bin_attribute *bin_attr,
58 			    char *buf, loff_t off, size_t count)
59 {
60 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
61 	    struct device, kobj)));
62 	struct qla_hw_data *ha = vha->hw;
63 	int reading;
64 
65 	if (off != 0)
66 		return (0);
67 
68 	reading = simple_strtol(buf, NULL, 10);
69 	switch (reading) {
70 	case 0:
71 		if (!ha->fw_dump_reading)
72 			break;
73 
74 		ql_log(ql_log_info, vha, 0x705d,
75 		    "Firmware dump cleared on (%ld).\n", vha->host_no);
76 
77 		if (IS_P3P_TYPE(ha)) {
78 			qla82xx_md_free(vha);
79 			qla82xx_md_prep(vha);
80 		}
81 		ha->fw_dump_reading = 0;
82 		ha->fw_dumped = 0;
83 		break;
84 	case 1:
85 		if (ha->fw_dumped && !ha->fw_dump_reading) {
86 			ha->fw_dump_reading = 1;
87 
88 			ql_log(ql_log_info, vha, 0x705e,
89 			    "Raw firmware dump ready for read on (%ld).\n",
90 			    vha->host_no);
91 		}
92 		break;
93 	case 2:
94 		qla2x00_alloc_fw_dump(vha);
95 		break;
96 	case 3:
97 		if (IS_QLA82XX(ha)) {
98 			qla82xx_idc_lock(ha);
99 			qla82xx_set_reset_owner(vha);
100 			qla82xx_idc_unlock(ha);
101 		} else if (IS_QLA8044(ha)) {
102 			qla8044_idc_lock(ha);
103 			qla82xx_set_reset_owner(vha);
104 			qla8044_idc_unlock(ha);
105 		} else
106 			qla2x00_system_error(vha);
107 		break;
108 	case 4:
109 		if (IS_P3P_TYPE(ha)) {
110 			if (ha->md_tmplt_hdr)
111 				ql_dbg(ql_dbg_user, vha, 0x705b,
112 				    "MiniDump supported with this firmware.\n");
113 			else
114 				ql_dbg(ql_dbg_user, vha, 0x709d,
115 				    "MiniDump not supported with this firmware.\n");
116 		}
117 		break;
118 	case 5:
119 		if (IS_P3P_TYPE(ha))
120 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
121 		break;
122 	case 6:
123 		if (!ha->mctp_dump_reading)
124 			break;
125 		ql_log(ql_log_info, vha, 0x70c1,
126 		    "MCTP dump cleared on (%ld).\n", vha->host_no);
127 		ha->mctp_dump_reading = 0;
128 		ha->mctp_dumped = 0;
129 		break;
130 	case 7:
131 		if (ha->mctp_dumped && !ha->mctp_dump_reading) {
132 			ha->mctp_dump_reading = 1;
133 			ql_log(ql_log_info, vha, 0x70c2,
134 			    "Raw mctp dump ready for read on (%ld).\n",
135 			    vha->host_no);
136 		}
137 		break;
138 	}
139 	return count;
140 }
141 
142 static struct bin_attribute sysfs_fw_dump_attr = {
143 	.attr = {
144 		.name = "fw_dump",
145 		.mode = S_IRUSR | S_IWUSR,
146 	},
147 	.size = 0,
148 	.read = qla2x00_sysfs_read_fw_dump,
149 	.write = qla2x00_sysfs_write_fw_dump,
150 };
151 
152 static ssize_t
153 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
154 			 struct bin_attribute *bin_attr,
155 			 char *buf, loff_t off, size_t count)
156 {
157 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
158 	    struct device, kobj)));
159 	struct qla_hw_data *ha = vha->hw;
160 	uint32_t faddr;
161 	struct active_regions active_regions = { };
162 
163 	if (!capable(CAP_SYS_ADMIN))
164 		return 0;
165 
166 	mutex_lock(&ha->optrom_mutex);
167 	if (qla2x00_chip_is_down(vha)) {
168 		mutex_unlock(&ha->optrom_mutex);
169 		return -EAGAIN;
170 	}
171 
172 	if (!IS_NOCACHE_VPD_TYPE(ha)) {
173 		mutex_unlock(&ha->optrom_mutex);
174 		goto skip;
175 	}
176 
177 	faddr = ha->flt_region_nvram;
178 	if (IS_QLA28XX(ha)) {
179 		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
180 			faddr = ha->flt_region_nvram_sec;
181 	}
182 	ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
183 
184 	mutex_unlock(&ha->optrom_mutex);
185 
186 skip:
187 	return memory_read_from_buffer(buf, count, &off, ha->nvram,
188 					ha->nvram_size);
189 }
190 
191 static ssize_t
192 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
193 			  struct bin_attribute *bin_attr,
194 			  char *buf, loff_t off, size_t count)
195 {
196 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
197 	    struct device, kobj)));
198 	struct qla_hw_data *ha = vha->hw;
199 	uint16_t	cnt;
200 
201 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
202 	    !ha->isp_ops->write_nvram)
203 		return -EINVAL;
204 
205 	/* Checksum NVRAM. */
206 	if (IS_FWI2_CAPABLE(ha)) {
207 		uint32_t *iter;
208 		uint32_t chksum;
209 
210 		iter = (uint32_t *)buf;
211 		chksum = 0;
212 		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
213 			chksum += le32_to_cpu(*iter);
214 		chksum = ~chksum + 1;
215 		*iter = cpu_to_le32(chksum);
216 	} else {
217 		uint8_t *iter;
218 		uint8_t chksum;
219 
220 		iter = (uint8_t *)buf;
221 		chksum = 0;
222 		for (cnt = 0; cnt < count - 1; cnt++)
223 			chksum += *iter++;
224 		chksum = ~chksum + 1;
225 		*iter = chksum;
226 	}
227 
228 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
229 		ql_log(ql_log_warn, vha, 0x705f,
230 		    "HBA not online, failing NVRAM update.\n");
231 		return -EAGAIN;
232 	}
233 
234 	mutex_lock(&ha->optrom_mutex);
235 	if (qla2x00_chip_is_down(vha)) {
236 		mutex_unlock(&ha->optrom_mutex);
237 		return -EAGAIN;
238 	}
239 
240 	/* Write NVRAM. */
241 	ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
242 	ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
243 	    count);
244 	mutex_unlock(&ha->optrom_mutex);
245 
246 	ql_dbg(ql_dbg_user, vha, 0x7060,
247 	    "Setting ISP_ABORT_NEEDED\n");
248 	/* NVRAM settings take effect immediately. */
249 	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
250 	qla2xxx_wake_dpc(vha);
251 	qla2x00_wait_for_chip_reset(vha);
252 
253 	return count;
254 }
255 
256 static struct bin_attribute sysfs_nvram_attr = {
257 	.attr = {
258 		.name = "nvram",
259 		.mode = S_IRUSR | S_IWUSR,
260 	},
261 	.size = 512,
262 	.read = qla2x00_sysfs_read_nvram,
263 	.write = qla2x00_sysfs_write_nvram,
264 };
265 
266 static ssize_t
267 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
268 			  struct bin_attribute *bin_attr,
269 			  char *buf, loff_t off, size_t count)
270 {
271 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
272 	    struct device, kobj)));
273 	struct qla_hw_data *ha = vha->hw;
274 	ssize_t rval = 0;
275 
276 	mutex_lock(&ha->optrom_mutex);
277 
278 	if (ha->optrom_state != QLA_SREADING)
279 		goto out;
280 
281 	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
282 	    ha->optrom_region_size);
283 
284 out:
285 	mutex_unlock(&ha->optrom_mutex);
286 
287 	return rval;
288 }
289 
290 static ssize_t
291 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
292 			   struct bin_attribute *bin_attr,
293 			   char *buf, loff_t off, size_t count)
294 {
295 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
296 	    struct device, kobj)));
297 	struct qla_hw_data *ha = vha->hw;
298 
299 	mutex_lock(&ha->optrom_mutex);
300 
301 	if (ha->optrom_state != QLA_SWRITING) {
302 		mutex_unlock(&ha->optrom_mutex);
303 		return -EINVAL;
304 	}
305 	if (off > ha->optrom_region_size) {
306 		mutex_unlock(&ha->optrom_mutex);
307 		return -ERANGE;
308 	}
309 	if (off + count > ha->optrom_region_size)
310 		count = ha->optrom_region_size - off;
311 
312 	memcpy(&ha->optrom_buffer[off], buf, count);
313 	mutex_unlock(&ha->optrom_mutex);
314 
315 	return count;
316 }
317 
318 static struct bin_attribute sysfs_optrom_attr = {
319 	.attr = {
320 		.name = "optrom",
321 		.mode = S_IRUSR | S_IWUSR,
322 	},
323 	.size = 0,
324 	.read = qla2x00_sysfs_read_optrom,
325 	.write = qla2x00_sysfs_write_optrom,
326 };
327 
328 static ssize_t
329 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
330 			       struct bin_attribute *bin_attr,
331 			       char *buf, loff_t off, size_t count)
332 {
333 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
334 	    struct device, kobj)));
335 	struct qla_hw_data *ha = vha->hw;
336 	uint32_t start = 0;
337 	uint32_t size = ha->optrom_size;
338 	int val, valid;
339 	ssize_t rval = count;
340 
341 	if (off)
342 		return -EINVAL;
343 
344 	if (unlikely(pci_channel_offline(ha->pdev)))
345 		return -EAGAIN;
346 
347 	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
348 		return -EINVAL;
349 	if (start > ha->optrom_size)
350 		return -EINVAL;
351 	if (size > ha->optrom_size - start)
352 		size = ha->optrom_size - start;
353 
354 	mutex_lock(&ha->optrom_mutex);
355 	if (qla2x00_chip_is_down(vha)) {
356 		mutex_unlock(&ha->optrom_mutex);
357 		return -EAGAIN;
358 	}
359 	switch (val) {
360 	case 0:
361 		if (ha->optrom_state != QLA_SREADING &&
362 		    ha->optrom_state != QLA_SWRITING) {
363 			rval =  -EINVAL;
364 			goto out;
365 		}
366 		ha->optrom_state = QLA_SWAITING;
367 
368 		ql_dbg(ql_dbg_user, vha, 0x7061,
369 		    "Freeing flash region allocation -- 0x%x bytes.\n",
370 		    ha->optrom_region_size);
371 
372 		vfree(ha->optrom_buffer);
373 		ha->optrom_buffer = NULL;
374 		break;
375 	case 1:
376 		if (ha->optrom_state != QLA_SWAITING) {
377 			rval = -EINVAL;
378 			goto out;
379 		}
380 
381 		ha->optrom_region_start = start;
382 		ha->optrom_region_size = size;
383 
384 		ha->optrom_state = QLA_SREADING;
385 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
386 		if (ha->optrom_buffer == NULL) {
387 			ql_log(ql_log_warn, vha, 0x7062,
388 			    "Unable to allocate memory for optrom retrieval "
389 			    "(%x).\n", ha->optrom_region_size);
390 
391 			ha->optrom_state = QLA_SWAITING;
392 			rval = -ENOMEM;
393 			goto out;
394 		}
395 
396 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
397 			ql_log(ql_log_warn, vha, 0x7063,
398 			    "HBA not online, failing NVRAM update.\n");
399 			rval = -EAGAIN;
400 			goto out;
401 		}
402 
403 		ql_dbg(ql_dbg_user, vha, 0x7064,
404 		    "Reading flash region -- 0x%x/0x%x.\n",
405 		    ha->optrom_region_start, ha->optrom_region_size);
406 
407 		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
408 		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
409 		    ha->optrom_region_start, ha->optrom_region_size);
410 		break;
411 	case 2:
412 		if (ha->optrom_state != QLA_SWAITING) {
413 			rval = -EINVAL;
414 			goto out;
415 		}
416 
417 		/*
418 		 * We need to be more restrictive on which FLASH regions are
419 		 * allowed to be updated via user-space.  Regions accessible
420 		 * via this method include:
421 		 *
422 		 * ISP21xx/ISP22xx/ISP23xx type boards:
423 		 *
424 		 * 	0x000000 -> 0x020000 -- Boot code.
425 		 *
426 		 * ISP2322/ISP24xx type boards:
427 		 *
428 		 * 	0x000000 -> 0x07ffff -- Boot code.
429 		 * 	0x080000 -> 0x0fffff -- Firmware.
430 		 *
431 		 * ISP25xx type boards:
432 		 *
433 		 * 	0x000000 -> 0x07ffff -- Boot code.
434 		 * 	0x080000 -> 0x0fffff -- Firmware.
435 		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
436 		 *
437 		 * > ISP25xx type boards:
438 		 *
439 		 *      None -- should go through BSG.
440 		 */
441 		valid = 0;
442 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
443 			valid = 1;
444 		else if (start == (ha->flt_region_boot * 4) ||
445 		    start == (ha->flt_region_fw * 4))
446 			valid = 1;
447 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
448 			valid = 1;
449 		if (!valid) {
450 			ql_log(ql_log_warn, vha, 0x7065,
451 			    "Invalid start region 0x%x/0x%x.\n", start, size);
452 			rval = -EINVAL;
453 			goto out;
454 		}
455 
456 		ha->optrom_region_start = start;
457 		ha->optrom_region_size = size;
458 
459 		ha->optrom_state = QLA_SWRITING;
460 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
461 		if (ha->optrom_buffer == NULL) {
462 			ql_log(ql_log_warn, vha, 0x7066,
463 			    "Unable to allocate memory for optrom update "
464 			    "(%x)\n", ha->optrom_region_size);
465 
466 			ha->optrom_state = QLA_SWAITING;
467 			rval = -ENOMEM;
468 			goto out;
469 		}
470 
471 		ql_dbg(ql_dbg_user, vha, 0x7067,
472 		    "Staging flash region write -- 0x%x/0x%x.\n",
473 		    ha->optrom_region_start, ha->optrom_region_size);
474 
475 		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
476 		break;
477 	case 3:
478 		if (ha->optrom_state != QLA_SWRITING) {
479 			rval = -EINVAL;
480 			goto out;
481 		}
482 
483 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
484 			ql_log(ql_log_warn, vha, 0x7068,
485 			    "HBA not online, failing flash update.\n");
486 			rval = -EAGAIN;
487 			goto out;
488 		}
489 
490 		ql_dbg(ql_dbg_user, vha, 0x7069,
491 		    "Writing flash region -- 0x%x/0x%x.\n",
492 		    ha->optrom_region_start, ha->optrom_region_size);
493 
494 		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
495 		    ha->optrom_region_start, ha->optrom_region_size);
496 		break;
497 	default:
498 		rval = -EINVAL;
499 	}
500 
501 out:
502 	mutex_unlock(&ha->optrom_mutex);
503 	return rval;
504 }
505 
506 static struct bin_attribute sysfs_optrom_ctl_attr = {
507 	.attr = {
508 		.name = "optrom_ctl",
509 		.mode = S_IWUSR,
510 	},
511 	.size = 0,
512 	.write = qla2x00_sysfs_write_optrom_ctl,
513 };
514 
515 static ssize_t
516 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
517 		       struct bin_attribute *bin_attr,
518 		       char *buf, loff_t off, size_t count)
519 {
520 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
521 	    struct device, kobj)));
522 	struct qla_hw_data *ha = vha->hw;
523 	uint32_t faddr;
524 	struct active_regions active_regions = { };
525 
526 	if (unlikely(pci_channel_offline(ha->pdev)))
527 		return -EAGAIN;
528 
529 	if (!capable(CAP_SYS_ADMIN))
530 		return -EINVAL;
531 
532 	if (IS_NOCACHE_VPD_TYPE(ha))
533 		goto skip;
534 
535 	faddr = ha->flt_region_vpd << 2;
536 
537 	if (IS_QLA28XX(ha)) {
538 		qla28xx_get_aux_images(vha, &active_regions);
539 		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
540 			faddr = ha->flt_region_vpd_sec << 2;
541 
542 		ql_dbg(ql_dbg_init, vha, 0x7070,
543 		    "Loading %s nvram image.\n",
544 		    active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
545 		    "primary" : "secondary");
546 	}
547 
548 	mutex_lock(&ha->optrom_mutex);
549 	if (qla2x00_chip_is_down(vha)) {
550 		mutex_unlock(&ha->optrom_mutex);
551 		return -EAGAIN;
552 	}
553 
554 	ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
555 	mutex_unlock(&ha->optrom_mutex);
556 
557 	ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
558 skip:
559 	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
560 }
561 
562 static ssize_t
563 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
564 			struct bin_attribute *bin_attr,
565 			char *buf, loff_t off, size_t count)
566 {
567 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
568 	    struct device, kobj)));
569 	struct qla_hw_data *ha = vha->hw;
570 	uint8_t *tmp_data;
571 
572 	if (unlikely(pci_channel_offline(ha->pdev)))
573 		return 0;
574 
575 	if (qla2x00_chip_is_down(vha))
576 		return 0;
577 
578 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
579 	    !ha->isp_ops->write_nvram)
580 		return 0;
581 
582 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
583 		ql_log(ql_log_warn, vha, 0x706a,
584 		    "HBA not online, failing VPD update.\n");
585 		return -EAGAIN;
586 	}
587 
588 	mutex_lock(&ha->optrom_mutex);
589 	if (qla2x00_chip_is_down(vha)) {
590 		mutex_unlock(&ha->optrom_mutex);
591 		return -EAGAIN;
592 	}
593 
594 	/* Write NVRAM. */
595 	ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
596 	ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
597 
598 	/* Update flash version information for 4Gb & above. */
599 	if (!IS_FWI2_CAPABLE(ha)) {
600 		mutex_unlock(&ha->optrom_mutex);
601 		return -EINVAL;
602 	}
603 
604 	tmp_data = vmalloc(256);
605 	if (!tmp_data) {
606 		mutex_unlock(&ha->optrom_mutex);
607 		ql_log(ql_log_warn, vha, 0x706b,
608 		    "Unable to allocate memory for VPD information update.\n");
609 		return -ENOMEM;
610 	}
611 	ha->isp_ops->get_flash_version(vha, tmp_data);
612 	vfree(tmp_data);
613 
614 	mutex_unlock(&ha->optrom_mutex);
615 
616 	return count;
617 }
618 
619 static struct bin_attribute sysfs_vpd_attr = {
620 	.attr = {
621 		.name = "vpd",
622 		.mode = S_IRUSR | S_IWUSR,
623 	},
624 	.size = 0,
625 	.read = qla2x00_sysfs_read_vpd,
626 	.write = qla2x00_sysfs_write_vpd,
627 };
628 
629 static ssize_t
630 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
631 		       struct bin_attribute *bin_attr,
632 		       char *buf, loff_t off, size_t count)
633 {
634 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
635 	    struct device, kobj)));
636 	int rval;
637 
638 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
639 		return 0;
640 
641 	mutex_lock(&vha->hw->optrom_mutex);
642 	if (qla2x00_chip_is_down(vha)) {
643 		mutex_unlock(&vha->hw->optrom_mutex);
644 		return 0;
645 	}
646 
647 	rval = qla2x00_read_sfp_dev(vha, buf, count);
648 	mutex_unlock(&vha->hw->optrom_mutex);
649 
650 	if (rval)
651 		return -EIO;
652 
653 	return count;
654 }
655 
656 static struct bin_attribute sysfs_sfp_attr = {
657 	.attr = {
658 		.name = "sfp",
659 		.mode = S_IRUSR | S_IWUSR,
660 	},
661 	.size = SFP_DEV_SIZE,
662 	.read = qla2x00_sysfs_read_sfp,
663 };
664 
665 static ssize_t
666 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
667 			struct bin_attribute *bin_attr,
668 			char *buf, loff_t off, size_t count)
669 {
670 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
671 	    struct device, kobj)));
672 	struct qla_hw_data *ha = vha->hw;
673 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
674 	int type;
675 	uint32_t idc_control;
676 	uint8_t *tmp_data = NULL;
677 
678 	if (off != 0)
679 		return -EINVAL;
680 
681 	type = simple_strtol(buf, NULL, 10);
682 	switch (type) {
683 	case 0x2025c:
684 		ql_log(ql_log_info, vha, 0x706e,
685 		    "Issuing ISP reset.\n");
686 
687 		scsi_block_requests(vha->host);
688 		if (IS_QLA82XX(ha)) {
689 			ha->flags.isp82xx_no_md_cap = 1;
690 			qla82xx_idc_lock(ha);
691 			qla82xx_set_reset_owner(vha);
692 			qla82xx_idc_unlock(ha);
693 		} else if (IS_QLA8044(ha)) {
694 			qla8044_idc_lock(ha);
695 			idc_control = qla8044_rd_reg(ha,
696 			    QLA8044_IDC_DRV_CTRL);
697 			qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
698 			    (idc_control | GRACEFUL_RESET_BIT1));
699 			qla82xx_set_reset_owner(vha);
700 			qla8044_idc_unlock(ha);
701 		} else {
702 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
703 			qla2xxx_wake_dpc(vha);
704 		}
705 		qla2x00_wait_for_chip_reset(vha);
706 		scsi_unblock_requests(vha->host);
707 		break;
708 	case 0x2025d:
709 		if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
710 			return -EPERM;
711 
712 		ql_log(ql_log_info, vha, 0x706f,
713 		    "Issuing MPI reset.\n");
714 
715 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
716 			uint32_t idc_control;
717 
718 			qla83xx_idc_lock(vha, 0);
719 			__qla83xx_get_idc_control(vha, &idc_control);
720 			idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
721 			__qla83xx_set_idc_control(vha, idc_control);
722 			qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
723 			    QLA8XXX_DEV_NEED_RESET);
724 			qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
725 			qla83xx_idc_unlock(vha, 0);
726 			break;
727 		} else {
728 			/* Make sure FC side is not in reset */
729 			qla2x00_wait_for_hba_online(vha);
730 
731 			/* Issue MPI reset */
732 			scsi_block_requests(vha->host);
733 			if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
734 				ql_log(ql_log_warn, vha, 0x7070,
735 				    "MPI reset failed.\n");
736 			scsi_unblock_requests(vha->host);
737 			break;
738 		}
739 	case 0x2025e:
740 		if (!IS_P3P_TYPE(ha) || vha != base_vha) {
741 			ql_log(ql_log_info, vha, 0x7071,
742 			    "FCoE ctx reset not supported.\n");
743 			return -EPERM;
744 		}
745 
746 		ql_log(ql_log_info, vha, 0x7072,
747 		    "Issuing FCoE ctx reset.\n");
748 		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
749 		qla2xxx_wake_dpc(vha);
750 		qla2x00_wait_for_fcoe_ctx_reset(vha);
751 		break;
752 	case 0x2025f:
753 		if (!IS_QLA8031(ha))
754 			return -EPERM;
755 		ql_log(ql_log_info, vha, 0x70bc,
756 		    "Disabling Reset by IDC control\n");
757 		qla83xx_idc_lock(vha, 0);
758 		__qla83xx_get_idc_control(vha, &idc_control);
759 		idc_control |= QLA83XX_IDC_RESET_DISABLED;
760 		__qla83xx_set_idc_control(vha, idc_control);
761 		qla83xx_idc_unlock(vha, 0);
762 		break;
763 	case 0x20260:
764 		if (!IS_QLA8031(ha))
765 			return -EPERM;
766 		ql_log(ql_log_info, vha, 0x70bd,
767 		    "Enabling Reset by IDC control\n");
768 		qla83xx_idc_lock(vha, 0);
769 		__qla83xx_get_idc_control(vha, &idc_control);
770 		idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
771 		__qla83xx_set_idc_control(vha, idc_control);
772 		qla83xx_idc_unlock(vha, 0);
773 		break;
774 	case 0x20261:
775 		ql_dbg(ql_dbg_user, vha, 0x70e0,
776 		    "Updating cache versions without reset ");
777 
778 		tmp_data = vmalloc(256);
779 		if (!tmp_data) {
780 			ql_log(ql_log_warn, vha, 0x70e1,
781 			    "Unable to allocate memory for VPD information update.\n");
782 			return -ENOMEM;
783 		}
784 		ha->isp_ops->get_flash_version(vha, tmp_data);
785 		vfree(tmp_data);
786 		break;
787 	}
788 	return count;
789 }
790 
791 static struct bin_attribute sysfs_reset_attr = {
792 	.attr = {
793 		.name = "reset",
794 		.mode = S_IWUSR,
795 	},
796 	.size = 0,
797 	.write = qla2x00_sysfs_write_reset,
798 };
799 
800 static ssize_t
801 qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
802 			struct bin_attribute *bin_attr,
803 			char *buf, loff_t off, size_t count)
804 {
805 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
806 	    struct device, kobj)));
807 	int type;
808 	port_id_t did;
809 
810 	if (!capable(CAP_SYS_ADMIN))
811 		return 0;
812 
813 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
814 		return 0;
815 
816 	if (qla2x00_chip_is_down(vha))
817 		return 0;
818 
819 	type = simple_strtol(buf, NULL, 10);
820 
821 	did.b.domain = (type & 0x00ff0000) >> 16;
822 	did.b.area = (type & 0x0000ff00) >> 8;
823 	did.b.al_pa = (type & 0x000000ff);
824 
825 	ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
826 	    did.b.domain, did.b.area, did.b.al_pa);
827 
828 	ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
829 
830 	qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
831 	return count;
832 }
833 
834 static struct bin_attribute sysfs_issue_logo_attr = {
835 	.attr = {
836 		.name = "issue_logo",
837 		.mode = S_IWUSR,
838 	},
839 	.size = 0,
840 	.write = qla2x00_issue_logo,
841 };
842 
843 static ssize_t
844 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
845 		       struct bin_attribute *bin_attr,
846 		       char *buf, loff_t off, size_t count)
847 {
848 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
849 	    struct device, kobj)));
850 	struct qla_hw_data *ha = vha->hw;
851 	int rval;
852 	uint16_t actual_size;
853 
854 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
855 		return 0;
856 
857 	if (unlikely(pci_channel_offline(ha->pdev)))
858 		return 0;
859 	mutex_lock(&vha->hw->optrom_mutex);
860 	if (qla2x00_chip_is_down(vha)) {
861 		mutex_unlock(&vha->hw->optrom_mutex);
862 		return 0;
863 	}
864 
865 	if (ha->xgmac_data)
866 		goto do_read;
867 
868 	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
869 	    &ha->xgmac_data_dma, GFP_KERNEL);
870 	if (!ha->xgmac_data) {
871 		mutex_unlock(&vha->hw->optrom_mutex);
872 		ql_log(ql_log_warn, vha, 0x7076,
873 		    "Unable to allocate memory for XGMAC read-data.\n");
874 		return 0;
875 	}
876 
877 do_read:
878 	actual_size = 0;
879 	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
880 
881 	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
882 	    XGMAC_DATA_SIZE, &actual_size);
883 
884 	mutex_unlock(&vha->hw->optrom_mutex);
885 	if (rval != QLA_SUCCESS) {
886 		ql_log(ql_log_warn, vha, 0x7077,
887 		    "Unable to read XGMAC data (%x).\n", rval);
888 		count = 0;
889 	}
890 
891 	count = actual_size > count ? count : actual_size;
892 	memcpy(buf, ha->xgmac_data, count);
893 
894 	return count;
895 }
896 
897 static struct bin_attribute sysfs_xgmac_stats_attr = {
898 	.attr = {
899 		.name = "xgmac_stats",
900 		.mode = S_IRUSR,
901 	},
902 	.size = 0,
903 	.read = qla2x00_sysfs_read_xgmac_stats,
904 };
905 
906 static ssize_t
907 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
908 		       struct bin_attribute *bin_attr,
909 		       char *buf, loff_t off, size_t count)
910 {
911 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
912 	    struct device, kobj)));
913 	struct qla_hw_data *ha = vha->hw;
914 	int rval;
915 
916 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
917 		return 0;
918 
919 	if (ha->dcbx_tlv)
920 		goto do_read;
921 	mutex_lock(&vha->hw->optrom_mutex);
922 	if (qla2x00_chip_is_down(vha)) {
923 		mutex_unlock(&vha->hw->optrom_mutex);
924 		return 0;
925 	}
926 
927 	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
928 	    &ha->dcbx_tlv_dma, GFP_KERNEL);
929 	if (!ha->dcbx_tlv) {
930 		mutex_unlock(&vha->hw->optrom_mutex);
931 		ql_log(ql_log_warn, vha, 0x7078,
932 		    "Unable to allocate memory for DCBX TLV read-data.\n");
933 		return -ENOMEM;
934 	}
935 
936 do_read:
937 	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
938 
939 	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
940 	    DCBX_TLV_DATA_SIZE);
941 
942 	mutex_unlock(&vha->hw->optrom_mutex);
943 
944 	if (rval != QLA_SUCCESS) {
945 		ql_log(ql_log_warn, vha, 0x7079,
946 		    "Unable to read DCBX TLV (%x).\n", rval);
947 		return -EIO;
948 	}
949 
950 	memcpy(buf, ha->dcbx_tlv, count);
951 
952 	return count;
953 }
954 
955 static struct bin_attribute sysfs_dcbx_tlv_attr = {
956 	.attr = {
957 		.name = "dcbx_tlv",
958 		.mode = S_IRUSR,
959 	},
960 	.size = 0,
961 	.read = qla2x00_sysfs_read_dcbx_tlv,
962 };
963 
964 static struct sysfs_entry {
965 	char *name;
966 	struct bin_attribute *attr;
967 	int type;
968 } bin_file_entries[] = {
969 	{ "fw_dump", &sysfs_fw_dump_attr, },
970 	{ "nvram", &sysfs_nvram_attr, },
971 	{ "optrom", &sysfs_optrom_attr, },
972 	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
973 	{ "vpd", &sysfs_vpd_attr, 1 },
974 	{ "sfp", &sysfs_sfp_attr, 1 },
975 	{ "reset", &sysfs_reset_attr, },
976 	{ "issue_logo", &sysfs_issue_logo_attr, },
977 	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
978 	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
979 	{ NULL },
980 };
981 
982 void
983 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
984 {
985 	struct Scsi_Host *host = vha->host;
986 	struct sysfs_entry *iter;
987 	int ret;
988 
989 	for (iter = bin_file_entries; iter->name; iter++) {
990 		if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
991 			continue;
992 		if (iter->type == 2 && !IS_QLA25XX(vha->hw))
993 			continue;
994 		if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
995 			continue;
996 
997 		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
998 		    iter->attr);
999 		if (ret)
1000 			ql_log(ql_log_warn, vha, 0x00f3,
1001 			    "Unable to create sysfs %s binary attribute (%d).\n",
1002 			    iter->name, ret);
1003 		else
1004 			ql_dbg(ql_dbg_init, vha, 0x00f4,
1005 			    "Successfully created sysfs %s binary attribute.\n",
1006 			    iter->name);
1007 	}
1008 }
1009 
1010 void
1011 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
1012 {
1013 	struct Scsi_Host *host = vha->host;
1014 	struct sysfs_entry *iter;
1015 	struct qla_hw_data *ha = vha->hw;
1016 
1017 	for (iter = bin_file_entries; iter->name; iter++) {
1018 		if (iter->type && !IS_FWI2_CAPABLE(ha))
1019 			continue;
1020 		if (iter->type == 2 && !IS_QLA25XX(ha))
1021 			continue;
1022 		if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
1023 			continue;
1024 		if (iter->type == 0x27 &&
1025 		    (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
1026 			continue;
1027 
1028 		sysfs_remove_bin_file(&host->shost_gendev.kobj,
1029 		    iter->attr);
1030 	}
1031 
1032 	if (stop_beacon && ha->beacon_blink_led == 1)
1033 		ha->isp_ops->beacon_off(vha);
1034 }
1035 
1036 /* Scsi_Host attributes. */
1037 
1038 static ssize_t
1039 qla2x00_driver_version_show(struct device *dev,
1040 			  struct device_attribute *attr, char *buf)
1041 {
1042 	return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1043 }
1044 
1045 static ssize_t
1046 qla2x00_fw_version_show(struct device *dev,
1047 			struct device_attribute *attr, char *buf)
1048 {
1049 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1050 	struct qla_hw_data *ha = vha->hw;
1051 	char fw_str[128];
1052 
1053 	return scnprintf(buf, PAGE_SIZE, "%s\n",
1054 	    ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1055 }
1056 
1057 static ssize_t
1058 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1059 			char *buf)
1060 {
1061 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1062 	struct qla_hw_data *ha = vha->hw;
1063 	uint32_t sn;
1064 
1065 	if (IS_QLAFX00(vha->hw)) {
1066 		return scnprintf(buf, PAGE_SIZE, "%s\n",
1067 		    vha->hw->mr.serial_num);
1068 	} else if (IS_FWI2_CAPABLE(ha)) {
1069 		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1070 		return strlen(strcat(buf, "\n"));
1071 	}
1072 
1073 	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1074 	return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1075 	    sn % 100000);
1076 }
1077 
1078 static ssize_t
1079 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1080 		      char *buf)
1081 {
1082 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1083 
1084 	return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1085 }
1086 
1087 static ssize_t
1088 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1089 		    char *buf)
1090 {
1091 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1092 	struct qla_hw_data *ha = vha->hw;
1093 
1094 	if (IS_QLAFX00(vha->hw))
1095 		return scnprintf(buf, PAGE_SIZE, "%s\n",
1096 		    vha->hw->mr.hw_version);
1097 
1098 	return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1099 	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
1100 	    ha->product_id[3]);
1101 }
1102 
1103 static ssize_t
1104 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1105 			char *buf)
1106 {
1107 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1108 
1109 	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1110 }
1111 
1112 static ssize_t
1113 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1114 			char *buf)
1115 {
1116 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1117 
1118 	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1119 }
1120 
1121 static ssize_t
1122 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1123 		      char *buf)
1124 {
1125 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1126 	char pci_info[30];
1127 
1128 	return scnprintf(buf, PAGE_SIZE, "%s\n",
1129 	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
1130 }
1131 
1132 static ssize_t
1133 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1134 			char *buf)
1135 {
1136 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1137 	struct qla_hw_data *ha = vha->hw;
1138 	int len = 0;
1139 
1140 	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1141 	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
1142 	    vha->device_flags & DFLG_NO_CABLE)
1143 		len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1144 	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1145 	    qla2x00_chip_is_down(vha))
1146 		len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1147 	else {
1148 		len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1149 
1150 		switch (ha->current_topology) {
1151 		case ISP_CFG_NL:
1152 			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1153 			break;
1154 		case ISP_CFG_FL:
1155 			len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1156 			break;
1157 		case ISP_CFG_N:
1158 			len += scnprintf(buf + len, PAGE_SIZE-len,
1159 			    "N_Port to N_Port\n");
1160 			break;
1161 		case ISP_CFG_F:
1162 			len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1163 			break;
1164 		default:
1165 			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1166 			break;
1167 		}
1168 	}
1169 	return len;
1170 }
1171 
1172 static ssize_t
1173 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1174 		 char *buf)
1175 {
1176 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1177 	int len = 0;
1178 
1179 	switch (vha->hw->zio_mode) {
1180 	case QLA_ZIO_MODE_6:
1181 		len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1182 		break;
1183 	case QLA_ZIO_DISABLED:
1184 		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1185 		break;
1186 	}
1187 	return len;
1188 }
1189 
1190 static ssize_t
1191 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1192 		  const char *buf, size_t count)
1193 {
1194 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1195 	struct qla_hw_data *ha = vha->hw;
1196 	int val = 0;
1197 	uint16_t zio_mode;
1198 
1199 	if (!IS_ZIO_SUPPORTED(ha))
1200 		return -ENOTSUPP;
1201 
1202 	if (sscanf(buf, "%d", &val) != 1)
1203 		return -EINVAL;
1204 
1205 	if (val)
1206 		zio_mode = QLA_ZIO_MODE_6;
1207 	else
1208 		zio_mode = QLA_ZIO_DISABLED;
1209 
1210 	/* Update per-hba values and queue a reset. */
1211 	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1212 		ha->zio_mode = zio_mode;
1213 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1214 	}
1215 	return strlen(buf);
1216 }
1217 
1218 static ssize_t
1219 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1220 		       char *buf)
1221 {
1222 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1223 
1224 	return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1225 }
1226 
1227 static ssize_t
1228 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1229 			const char *buf, size_t count)
1230 {
1231 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1232 	int val = 0;
1233 	uint16_t zio_timer;
1234 
1235 	if (sscanf(buf, "%d", &val) != 1)
1236 		return -EINVAL;
1237 	if (val > 25500 || val < 100)
1238 		return -ERANGE;
1239 
1240 	zio_timer = (uint16_t)(val / 100);
1241 	vha->hw->zio_timer = zio_timer;
1242 
1243 	return strlen(buf);
1244 }
1245 
1246 static ssize_t
1247 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1248 		       char *buf)
1249 {
1250 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1251 
1252 	return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1253 	    vha->hw->last_zio_threshold);
1254 }
1255 
1256 static ssize_t
1257 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1258     const char *buf, size_t count)
1259 {
1260 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1261 	int val = 0;
1262 
1263 	if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1264 		return -EINVAL;
1265 	if (sscanf(buf, "%d", &val) != 1)
1266 		return -EINVAL;
1267 	if (val < 0 || val > 256)
1268 		return -ERANGE;
1269 
1270 	atomic_set(&vha->hw->zio_threshold, val);
1271 	return strlen(buf);
1272 }
1273 
1274 static ssize_t
1275 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1276 		    char *buf)
1277 {
1278 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1279 	int len = 0;
1280 
1281 	if (vha->hw->beacon_blink_led)
1282 		len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1283 	else
1284 		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1285 	return len;
1286 }
1287 
1288 static ssize_t
1289 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1290 		     const char *buf, size_t count)
1291 {
1292 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1293 	struct qla_hw_data *ha = vha->hw;
1294 	int val = 0;
1295 	int rval;
1296 
1297 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1298 		return -EPERM;
1299 
1300 	if (sscanf(buf, "%d", &val) != 1)
1301 		return -EINVAL;
1302 
1303 	mutex_lock(&vha->hw->optrom_mutex);
1304 	if (qla2x00_chip_is_down(vha)) {
1305 		mutex_unlock(&vha->hw->optrom_mutex);
1306 		ql_log(ql_log_warn, vha, 0x707a,
1307 		    "Abort ISP active -- ignoring beacon request.\n");
1308 		return -EBUSY;
1309 	}
1310 
1311 	if (val)
1312 		rval = ha->isp_ops->beacon_on(vha);
1313 	else
1314 		rval = ha->isp_ops->beacon_off(vha);
1315 
1316 	if (rval != QLA_SUCCESS)
1317 		count = 0;
1318 
1319 	mutex_unlock(&vha->hw->optrom_mutex);
1320 
1321 	return count;
1322 }
1323 
1324 static ssize_t
1325 qla2x00_optrom_bios_version_show(struct device *dev,
1326 				 struct device_attribute *attr, char *buf)
1327 {
1328 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1329 	struct qla_hw_data *ha = vha->hw;
1330 
1331 	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1332 	    ha->bios_revision[0]);
1333 }
1334 
1335 static ssize_t
1336 qla2x00_optrom_efi_version_show(struct device *dev,
1337 				struct device_attribute *attr, char *buf)
1338 {
1339 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1340 	struct qla_hw_data *ha = vha->hw;
1341 
1342 	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1343 	    ha->efi_revision[0]);
1344 }
1345 
1346 static ssize_t
1347 qla2x00_optrom_fcode_version_show(struct device *dev,
1348 				  struct device_attribute *attr, char *buf)
1349 {
1350 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1351 	struct qla_hw_data *ha = vha->hw;
1352 
1353 	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1354 	    ha->fcode_revision[0]);
1355 }
1356 
1357 static ssize_t
1358 qla2x00_optrom_fw_version_show(struct device *dev,
1359 			       struct device_attribute *attr, char *buf)
1360 {
1361 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1362 	struct qla_hw_data *ha = vha->hw;
1363 
1364 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1365 	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1366 	    ha->fw_revision[3]);
1367 }
1368 
1369 static ssize_t
1370 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1371     struct device_attribute *attr, char *buf)
1372 {
1373 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1374 	struct qla_hw_data *ha = vha->hw;
1375 
1376 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1377 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1378 		return scnprintf(buf, PAGE_SIZE, "\n");
1379 
1380 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1381 	    ha->gold_fw_version[0], ha->gold_fw_version[1],
1382 	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
1383 }
1384 
1385 static ssize_t
1386 qla2x00_total_isp_aborts_show(struct device *dev,
1387 			      struct device_attribute *attr, char *buf)
1388 {
1389 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1390 
1391 	return scnprintf(buf, PAGE_SIZE, "%d\n",
1392 	    vha->qla_stats.total_isp_aborts);
1393 }
1394 
1395 static ssize_t
1396 qla24xx_84xx_fw_version_show(struct device *dev,
1397 	struct device_attribute *attr, char *buf)
1398 {
1399 	int rval = QLA_SUCCESS;
1400 	uint16_t status[2] = { 0 };
1401 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1402 	struct qla_hw_data *ha = vha->hw;
1403 
1404 	if (!IS_QLA84XX(ha))
1405 		return scnprintf(buf, PAGE_SIZE, "\n");
1406 
1407 	if (!ha->cs84xx->op_fw_version) {
1408 		rval = qla84xx_verify_chip(vha, status);
1409 
1410 		if (!rval && !status[0])
1411 			return scnprintf(buf, PAGE_SIZE, "%u\n",
1412 			    (uint32_t)ha->cs84xx->op_fw_version);
1413 	}
1414 
1415 	return scnprintf(buf, PAGE_SIZE, "\n");
1416 }
1417 
1418 static ssize_t
1419 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
1420     char *buf)
1421 {
1422 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1423 	struct qla_hw_data *ha = vha->hw;
1424 
1425 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1426 		return scnprintf(buf, PAGE_SIZE, "\n");
1427 
1428 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1429 	    ha->serdes_version[0], ha->serdes_version[1],
1430 	    ha->serdes_version[2]);
1431 }
1432 
1433 static ssize_t
1434 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1435     char *buf)
1436 {
1437 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1438 	struct qla_hw_data *ha = vha->hw;
1439 
1440 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1441 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1442 		return scnprintf(buf, PAGE_SIZE, "\n");
1443 
1444 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1445 	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1446 	    ha->mpi_capabilities);
1447 }
1448 
1449 static ssize_t
1450 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1451     char *buf)
1452 {
1453 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1454 	struct qla_hw_data *ha = vha->hw;
1455 
1456 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1457 		return scnprintf(buf, PAGE_SIZE, "\n");
1458 
1459 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1460 	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1461 }
1462 
1463 static ssize_t
1464 qla2x00_flash_block_size_show(struct device *dev,
1465 			      struct device_attribute *attr, char *buf)
1466 {
1467 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1468 	struct qla_hw_data *ha = vha->hw;
1469 
1470 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1471 }
1472 
1473 static ssize_t
1474 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1475     char *buf)
1476 {
1477 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1478 
1479 	if (!IS_CNA_CAPABLE(vha->hw))
1480 		return scnprintf(buf, PAGE_SIZE, "\n");
1481 
1482 	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1483 }
1484 
1485 static ssize_t
1486 qla2x00_vn_port_mac_address_show(struct device *dev,
1487     struct device_attribute *attr, char *buf)
1488 {
1489 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1490 
1491 	if (!IS_CNA_CAPABLE(vha->hw))
1492 		return scnprintf(buf, PAGE_SIZE, "\n");
1493 
1494 	return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1495 }
1496 
1497 static ssize_t
1498 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1499     char *buf)
1500 {
1501 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1502 
1503 	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1504 }
1505 
1506 static ssize_t
1507 qla2x00_thermal_temp_show(struct device *dev,
1508 	struct device_attribute *attr, char *buf)
1509 {
1510 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1511 	uint16_t temp = 0;
1512 	int rc;
1513 
1514 	mutex_lock(&vha->hw->optrom_mutex);
1515 	if (qla2x00_chip_is_down(vha)) {
1516 		mutex_unlock(&vha->hw->optrom_mutex);
1517 		ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1518 		goto done;
1519 	}
1520 
1521 	if (vha->hw->flags.eeh_busy) {
1522 		mutex_unlock(&vha->hw->optrom_mutex);
1523 		ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1524 		goto done;
1525 	}
1526 
1527 	rc = qla2x00_get_thermal_temp(vha, &temp);
1528 	mutex_unlock(&vha->hw->optrom_mutex);
1529 	if (rc == QLA_SUCCESS)
1530 		return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1531 
1532 done:
1533 	return scnprintf(buf, PAGE_SIZE, "\n");
1534 }
1535 
1536 static ssize_t
1537 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1538     char *buf)
1539 {
1540 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1541 	int rval = QLA_FUNCTION_FAILED;
1542 	uint16_t state[6];
1543 	uint32_t pstate;
1544 
1545 	if (IS_QLAFX00(vha->hw)) {
1546 		pstate = qlafx00_fw_state_show(dev, attr, buf);
1547 		return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1548 	}
1549 
1550 	mutex_lock(&vha->hw->optrom_mutex);
1551 	if (qla2x00_chip_is_down(vha)) {
1552 		mutex_unlock(&vha->hw->optrom_mutex);
1553 		ql_log(ql_log_warn, vha, 0x707c,
1554 		    "ISP reset active.\n");
1555 		goto out;
1556 	} else if (vha->hw->flags.eeh_busy) {
1557 		mutex_unlock(&vha->hw->optrom_mutex);
1558 		goto out;
1559 	}
1560 
1561 	rval = qla2x00_get_firmware_state(vha, state);
1562 	mutex_unlock(&vha->hw->optrom_mutex);
1563 out:
1564 	if (rval != QLA_SUCCESS) {
1565 		memset(state, -1, sizeof(state));
1566 		rval = qla2x00_get_firmware_state(vha, state);
1567 	}
1568 
1569 	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1570 	    state[0], state[1], state[2], state[3], state[4], state[5]);
1571 }
1572 
1573 static ssize_t
1574 qla2x00_diag_requests_show(struct device *dev,
1575 	struct device_attribute *attr, char *buf)
1576 {
1577 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1578 
1579 	if (!IS_BIDI_CAPABLE(vha->hw))
1580 		return scnprintf(buf, PAGE_SIZE, "\n");
1581 
1582 	return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1583 }
1584 
1585 static ssize_t
1586 qla2x00_diag_megabytes_show(struct device *dev,
1587 	struct device_attribute *attr, char *buf)
1588 {
1589 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1590 
1591 	if (!IS_BIDI_CAPABLE(vha->hw))
1592 		return scnprintf(buf, PAGE_SIZE, "\n");
1593 
1594 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
1595 	    vha->bidi_stats.transfer_bytes >> 20);
1596 }
1597 
1598 static ssize_t
1599 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1600 	char *buf)
1601 {
1602 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1603 	struct qla_hw_data *ha = vha->hw;
1604 	uint32_t size;
1605 
1606 	if (!ha->fw_dumped)
1607 		size = 0;
1608 	else if (IS_P3P_TYPE(ha))
1609 		size = ha->md_template_size + ha->md_dump_size;
1610 	else
1611 		size = ha->fw_dump_len;
1612 
1613 	return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1614 }
1615 
1616 static ssize_t
1617 qla2x00_allow_cna_fw_dump_show(struct device *dev,
1618 	struct device_attribute *attr, char *buf)
1619 {
1620 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1621 
1622 	if (!IS_P3P_TYPE(vha->hw))
1623 		return scnprintf(buf, PAGE_SIZE, "\n");
1624 	else
1625 		return scnprintf(buf, PAGE_SIZE, "%s\n",
1626 		    vha->hw->allow_cna_fw_dump ? "true" : "false");
1627 }
1628 
1629 static ssize_t
1630 qla2x00_allow_cna_fw_dump_store(struct device *dev,
1631 	struct device_attribute *attr, const char *buf, size_t count)
1632 {
1633 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1634 	int val = 0;
1635 
1636 	if (!IS_P3P_TYPE(vha->hw))
1637 		return -EINVAL;
1638 
1639 	if (sscanf(buf, "%d", &val) != 1)
1640 		return -EINVAL;
1641 
1642 	vha->hw->allow_cna_fw_dump = val != 0;
1643 
1644 	return strlen(buf);
1645 }
1646 
1647 static ssize_t
1648 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1649 	char *buf)
1650 {
1651 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1652 	struct qla_hw_data *ha = vha->hw;
1653 
1654 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1655 		return scnprintf(buf, PAGE_SIZE, "\n");
1656 
1657 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1658 	    ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1659 }
1660 
1661 static ssize_t
1662 qla2x00_min_supported_speed_show(struct device *dev,
1663     struct device_attribute *attr, char *buf)
1664 {
1665 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1666 	struct qla_hw_data *ha = vha->hw;
1667 
1668 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1669 		return scnprintf(buf, PAGE_SIZE, "\n");
1670 
1671 	return scnprintf(buf, PAGE_SIZE, "%s\n",
1672 	    ha->min_supported_speed == 6 ? "64Gps" :
1673 	    ha->min_supported_speed == 5 ? "32Gps" :
1674 	    ha->min_supported_speed == 4 ? "16Gps" :
1675 	    ha->min_supported_speed == 3 ? "8Gps" :
1676 	    ha->min_supported_speed == 2 ? "4Gps" :
1677 	    ha->min_supported_speed != 0 ? "unknown" : "");
1678 }
1679 
1680 static ssize_t
1681 qla2x00_max_supported_speed_show(struct device *dev,
1682     struct device_attribute *attr, char *buf)
1683 {
1684 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1685 	struct qla_hw_data *ha = vha->hw;
1686 
1687 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1688 		return scnprintf(buf, PAGE_SIZE, "\n");
1689 
1690 	return scnprintf(buf, PAGE_SIZE, "%s\n",
1691 	    ha->max_supported_speed  == 2 ? "64Gps" :
1692 	    ha->max_supported_speed  == 1 ? "32Gps" :
1693 	    ha->max_supported_speed  == 0 ? "16Gps" : "unknown");
1694 }
1695 
1696 static ssize_t
1697 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1698     const char *buf, size_t count)
1699 {
1700 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1701 	ulong type, speed;
1702 	int oldspeed, rval;
1703 	int mode = QLA_SET_DATA_RATE_LR;
1704 	struct qla_hw_data *ha = vha->hw;
1705 
1706 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
1707 		ql_log(ql_log_warn, vha, 0x70d8,
1708 		    "Speed setting not supported \n");
1709 		return -EINVAL;
1710 	}
1711 
1712 	rval = kstrtol(buf, 10, &type);
1713 	if (rval)
1714 		return rval;
1715 	speed = type;
1716 	if (type == 40 || type == 80 || type == 160 ||
1717 	    type == 320) {
1718 		ql_dbg(ql_dbg_user, vha, 0x70d9,
1719 		    "Setting will be affected after a loss of sync\n");
1720 		type = type/10;
1721 		mode = QLA_SET_DATA_RATE_NOLR;
1722 	}
1723 
1724 	oldspeed = ha->set_data_rate;
1725 
1726 	switch (type) {
1727 	case 0:
1728 		ha->set_data_rate = PORT_SPEED_AUTO;
1729 		break;
1730 	case 4:
1731 		ha->set_data_rate = PORT_SPEED_4GB;
1732 		break;
1733 	case 8:
1734 		ha->set_data_rate = PORT_SPEED_8GB;
1735 		break;
1736 	case 16:
1737 		ha->set_data_rate = PORT_SPEED_16GB;
1738 		break;
1739 	case 32:
1740 		ha->set_data_rate = PORT_SPEED_32GB;
1741 		break;
1742 	default:
1743 		ql_log(ql_log_warn, vha, 0x1199,
1744 		    "Unrecognized speed setting:%lx. Setting Autoneg\n",
1745 		    speed);
1746 		ha->set_data_rate = PORT_SPEED_AUTO;
1747 	}
1748 
1749 	if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1750 		return -EINVAL;
1751 
1752 	ql_log(ql_log_info, vha, 0x70da,
1753 	    "Setting speed to %lx Gbps \n", type);
1754 
1755 	rval = qla2x00_set_data_rate(vha, mode);
1756 	if (rval != QLA_SUCCESS)
1757 		return -EIO;
1758 
1759 	return strlen(buf);
1760 }
1761 
1762 static ssize_t
1763 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1764     char *buf)
1765 {
1766 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1767 	struct qla_hw_data *ha = vha->hw;
1768 	ssize_t rval;
1769 	char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
1770 
1771 	rval = qla2x00_get_data_rate(vha);
1772 	if (rval != QLA_SUCCESS) {
1773 		ql_log(ql_log_warn, vha, 0x70db,
1774 		    "Unable to get port speed rval:%zd\n", rval);
1775 		return -EINVAL;
1776 	}
1777 
1778 	ql_log(ql_log_info, vha, 0x70d6,
1779 	    "port speed:%d\n", ha->link_data_rate);
1780 
1781 	return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
1782 }
1783 
1784 /* ----- */
1785 
1786 static ssize_t
1787 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1788 {
1789 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1790 	int len = 0;
1791 
1792 	len += scnprintf(buf + len, PAGE_SIZE-len,
1793 	    "Supported options: enabled | disabled | dual | exclusive\n");
1794 
1795 	/* --- */
1796 	len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1797 
1798 	switch (vha->qlini_mode) {
1799 	case QLA2XXX_INI_MODE_EXCLUSIVE:
1800 		len += scnprintf(buf + len, PAGE_SIZE-len,
1801 		    QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1802 		break;
1803 	case QLA2XXX_INI_MODE_DISABLED:
1804 		len += scnprintf(buf + len, PAGE_SIZE-len,
1805 		    QLA2XXX_INI_MODE_STR_DISABLED);
1806 		break;
1807 	case QLA2XXX_INI_MODE_ENABLED:
1808 		len += scnprintf(buf + len, PAGE_SIZE-len,
1809 		    QLA2XXX_INI_MODE_STR_ENABLED);
1810 		break;
1811 	case QLA2XXX_INI_MODE_DUAL:
1812 		len += scnprintf(buf + len, PAGE_SIZE-len,
1813 		    QLA2XXX_INI_MODE_STR_DUAL);
1814 		break;
1815 	}
1816 	len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1817 
1818 	return len;
1819 }
1820 
1821 static char *mode_to_str[] = {
1822 	"exclusive",
1823 	"disabled",
1824 	"enabled",
1825 	"dual",
1826 };
1827 
1828 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
1829 static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1830 {
1831 	int rc = 0;
1832 	enum {
1833 		NO_ACTION,
1834 		MODE_CHANGE_ACCEPT,
1835 		MODE_CHANGE_NO_ACTION,
1836 		TARGET_STILL_ACTIVE,
1837 	};
1838 	int action = NO_ACTION;
1839 	int set_mode = 0;
1840 	u8  eo_toggle = 0;	/* exchange offload flipped */
1841 
1842 	switch (vha->qlini_mode) {
1843 	case QLA2XXX_INI_MODE_DISABLED:
1844 		switch (op) {
1845 		case QLA2XXX_INI_MODE_DISABLED:
1846 			if (qla_tgt_mode_enabled(vha)) {
1847 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1848 				    vha->hw->flags.exchoffld_enabled)
1849 					eo_toggle = 1;
1850 				if (((vha->ql2xexchoffld !=
1851 				    vha->u_ql2xexchoffld) &&
1852 				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1853 				    eo_toggle) {
1854 					/*
1855 					 * The number of exchange to be offload
1856 					 * was tweaked or offload option was
1857 					 * flipped
1858 					 */
1859 					action = MODE_CHANGE_ACCEPT;
1860 				} else {
1861 					action = MODE_CHANGE_NO_ACTION;
1862 				}
1863 			} else {
1864 				action = MODE_CHANGE_NO_ACTION;
1865 			}
1866 			break;
1867 		case QLA2XXX_INI_MODE_EXCLUSIVE:
1868 			if (qla_tgt_mode_enabled(vha)) {
1869 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1870 				    vha->hw->flags.exchoffld_enabled)
1871 					eo_toggle = 1;
1872 				if (((vha->ql2xexchoffld !=
1873 				    vha->u_ql2xexchoffld) &&
1874 				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1875 				    eo_toggle) {
1876 					/*
1877 					 * The number of exchange to be offload
1878 					 * was tweaked or offload option was
1879 					 * flipped
1880 					 */
1881 					action = MODE_CHANGE_ACCEPT;
1882 				} else {
1883 					action = MODE_CHANGE_NO_ACTION;
1884 				}
1885 			} else {
1886 				action = MODE_CHANGE_ACCEPT;
1887 			}
1888 			break;
1889 		case QLA2XXX_INI_MODE_DUAL:
1890 			action = MODE_CHANGE_ACCEPT;
1891 			/* active_mode is target only, reset it to dual */
1892 			if (qla_tgt_mode_enabled(vha)) {
1893 				set_mode = 1;
1894 				action = MODE_CHANGE_ACCEPT;
1895 			} else {
1896 				action = MODE_CHANGE_NO_ACTION;
1897 			}
1898 			break;
1899 
1900 		case QLA2XXX_INI_MODE_ENABLED:
1901 			if (qla_tgt_mode_enabled(vha))
1902 				action = TARGET_STILL_ACTIVE;
1903 			else {
1904 				action = MODE_CHANGE_ACCEPT;
1905 				set_mode = 1;
1906 			}
1907 			break;
1908 		}
1909 		break;
1910 
1911 	case QLA2XXX_INI_MODE_EXCLUSIVE:
1912 		switch (op) {
1913 		case QLA2XXX_INI_MODE_EXCLUSIVE:
1914 			if (qla_tgt_mode_enabled(vha)) {
1915 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1916 				    vha->hw->flags.exchoffld_enabled)
1917 					eo_toggle = 1;
1918 				if (((vha->ql2xexchoffld !=
1919 				    vha->u_ql2xexchoffld) &&
1920 				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1921 				    eo_toggle)
1922 					/*
1923 					 * The number of exchange to be offload
1924 					 * was tweaked or offload option was
1925 					 * flipped
1926 					 */
1927 					action = MODE_CHANGE_ACCEPT;
1928 				else
1929 					action = NO_ACTION;
1930 			} else
1931 				action = NO_ACTION;
1932 
1933 			break;
1934 
1935 		case QLA2XXX_INI_MODE_DISABLED:
1936 			if (qla_tgt_mode_enabled(vha)) {
1937 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1938 				    vha->hw->flags.exchoffld_enabled)
1939 					eo_toggle = 1;
1940 				if (((vha->ql2xexchoffld !=
1941 				      vha->u_ql2xexchoffld) &&
1942 				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1943 				    eo_toggle)
1944 					action = MODE_CHANGE_ACCEPT;
1945 				else
1946 					action = MODE_CHANGE_NO_ACTION;
1947 			} else
1948 				action = MODE_CHANGE_NO_ACTION;
1949 			break;
1950 
1951 		case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
1952 			if (qla_tgt_mode_enabled(vha)) {
1953 				action = MODE_CHANGE_ACCEPT;
1954 				set_mode = 1;
1955 			} else
1956 				action = MODE_CHANGE_ACCEPT;
1957 			break;
1958 
1959 		case QLA2XXX_INI_MODE_ENABLED:
1960 			if (qla_tgt_mode_enabled(vha))
1961 				action = TARGET_STILL_ACTIVE;
1962 			else {
1963 				if (vha->hw->flags.fw_started)
1964 					action = MODE_CHANGE_NO_ACTION;
1965 				else
1966 					action = MODE_CHANGE_ACCEPT;
1967 			}
1968 			break;
1969 		}
1970 		break;
1971 
1972 	case QLA2XXX_INI_MODE_ENABLED:
1973 		switch (op) {
1974 		case QLA2XXX_INI_MODE_ENABLED:
1975 			if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
1976 			    vha->hw->flags.exchoffld_enabled)
1977 				eo_toggle = 1;
1978 			if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
1979 				NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
1980 			    eo_toggle)
1981 				action = MODE_CHANGE_ACCEPT;
1982 			else
1983 				action = NO_ACTION;
1984 			break;
1985 		case QLA2XXX_INI_MODE_DUAL:
1986 		case QLA2XXX_INI_MODE_DISABLED:
1987 			action = MODE_CHANGE_ACCEPT;
1988 			break;
1989 		default:
1990 			action = MODE_CHANGE_NO_ACTION;
1991 			break;
1992 		}
1993 		break;
1994 
1995 	case QLA2XXX_INI_MODE_DUAL:
1996 		switch (op) {
1997 		case QLA2XXX_INI_MODE_DUAL:
1998 			if (qla_tgt_mode_enabled(vha) ||
1999 			    qla_dual_mode_enabled(vha)) {
2000 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2001 					vha->u_ql2xiniexchg) !=
2002 				    vha->hw->flags.exchoffld_enabled)
2003 					eo_toggle = 1;
2004 
2005 				if ((((vha->ql2xexchoffld +
2006 				       vha->ql2xiniexchg) !=
2007 				    (vha->u_ql2xiniexchg +
2008 				     vha->u_ql2xexchoffld)) &&
2009 				    NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2010 					vha->u_ql2xexchoffld)) || eo_toggle)
2011 					action = MODE_CHANGE_ACCEPT;
2012 				else
2013 					action = NO_ACTION;
2014 			} else {
2015 				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2016 					vha->u_ql2xiniexchg) !=
2017 				    vha->hw->flags.exchoffld_enabled)
2018 					eo_toggle = 1;
2019 
2020 				if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
2021 				    != (vha->u_ql2xiniexchg +
2022 					vha->u_ql2xexchoffld)) &&
2023 				    NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2024 					vha->u_ql2xexchoffld)) || eo_toggle)
2025 					action = MODE_CHANGE_NO_ACTION;
2026 				else
2027 					action = NO_ACTION;
2028 			}
2029 			break;
2030 
2031 		case QLA2XXX_INI_MODE_DISABLED:
2032 			if (qla_tgt_mode_enabled(vha) ||
2033 			    qla_dual_mode_enabled(vha)) {
2034 				/* turning off initiator mode */
2035 				set_mode = 1;
2036 				action = MODE_CHANGE_ACCEPT;
2037 			} else {
2038 				action = MODE_CHANGE_NO_ACTION;
2039 			}
2040 			break;
2041 
2042 		case QLA2XXX_INI_MODE_EXCLUSIVE:
2043 			if (qla_tgt_mode_enabled(vha) ||
2044 			    qla_dual_mode_enabled(vha)) {
2045 				set_mode = 1;
2046 				action = MODE_CHANGE_ACCEPT;
2047 			} else {
2048 				action = MODE_CHANGE_ACCEPT;
2049 			}
2050 			break;
2051 
2052 		case QLA2XXX_INI_MODE_ENABLED:
2053 			if (qla_tgt_mode_enabled(vha) ||
2054 			    qla_dual_mode_enabled(vha)) {
2055 				action = TARGET_STILL_ACTIVE;
2056 			} else {
2057 				action = MODE_CHANGE_ACCEPT;
2058 			}
2059 		}
2060 		break;
2061 	}
2062 
2063 	switch (action) {
2064 	case MODE_CHANGE_ACCEPT:
2065 		ql_log(ql_log_warn, vha, 0xffff,
2066 		    "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2067 		    mode_to_str[vha->qlini_mode], mode_to_str[op],
2068 		    vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2069 		    vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2070 
2071 		vha->qlini_mode = op;
2072 		vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2073 		vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2074 		if (set_mode)
2075 			qlt_set_mode(vha);
2076 		vha->flags.online = 1;
2077 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2078 		break;
2079 
2080 	case MODE_CHANGE_NO_ACTION:
2081 		ql_log(ql_log_warn, vha, 0xffff,
2082 		    "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2083 		    mode_to_str[vha->qlini_mode], mode_to_str[op],
2084 		    vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2085 		    vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2086 		vha->qlini_mode = op;
2087 		vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2088 		vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2089 		break;
2090 
2091 	case TARGET_STILL_ACTIVE:
2092 		ql_log(ql_log_warn, vha, 0xffff,
2093 		    "Target Mode is active. Unable to change Mode.\n");
2094 		break;
2095 
2096 	case NO_ACTION:
2097 	default:
2098 		ql_log(ql_log_warn, vha, 0xffff,
2099 		    "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2100 		    vha->qlini_mode, op,
2101 		    vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2102 		break;
2103 	}
2104 
2105 	return rc;
2106 }
2107 
2108 static ssize_t
2109 qlini_mode_store(struct device *dev, struct device_attribute *attr,
2110     const char *buf, size_t count)
2111 {
2112 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2113 	int ini;
2114 
2115 	if (!buf)
2116 		return -EINVAL;
2117 
2118 	if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2119 		strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2120 		ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2121 	else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2122 		strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2123 		ini = QLA2XXX_INI_MODE_DISABLED;
2124 	else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2125 		  strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2126 		ini = QLA2XXX_INI_MODE_ENABLED;
2127 	else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2128 		strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2129 		ini = QLA2XXX_INI_MODE_DUAL;
2130 	else
2131 		return -EINVAL;
2132 
2133 	qla_set_ini_mode(vha, ini);
2134 	return strlen(buf);
2135 }
2136 
2137 static ssize_t
2138 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2139     char *buf)
2140 {
2141 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2142 	int len = 0;
2143 
2144 	len += scnprintf(buf + len, PAGE_SIZE-len,
2145 		"target exchange: new %d : current: %d\n\n",
2146 		vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2147 
2148 	len += scnprintf(buf + len, PAGE_SIZE-len,
2149 	    "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2150 	    vha->host_no);
2151 
2152 	return len;
2153 }
2154 
2155 static ssize_t
2156 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2157     const char *buf, size_t count)
2158 {
2159 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2160 	int val = 0;
2161 
2162 	if (sscanf(buf, "%d", &val) != 1)
2163 		return -EINVAL;
2164 
2165 	if (val > FW_MAX_EXCHANGES_CNT)
2166 		val = FW_MAX_EXCHANGES_CNT;
2167 	else if (val < 0)
2168 		val = 0;
2169 
2170 	vha->u_ql2xexchoffld = val;
2171 	return strlen(buf);
2172 }
2173 
2174 static ssize_t
2175 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2176     char *buf)
2177 {
2178 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2179 	int len = 0;
2180 
2181 	len += scnprintf(buf + len, PAGE_SIZE-len,
2182 		"target exchange: new %d : current: %d\n\n",
2183 		vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2184 
2185 	len += scnprintf(buf + len, PAGE_SIZE-len,
2186 	    "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2187 	    vha->host_no);
2188 
2189 	return len;
2190 }
2191 
2192 static ssize_t
2193 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2194     const char *buf, size_t count)
2195 {
2196 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2197 	int val = 0;
2198 
2199 	if (sscanf(buf, "%d", &val) != 1)
2200 		return -EINVAL;
2201 
2202 	if (val > FW_MAX_EXCHANGES_CNT)
2203 		val = FW_MAX_EXCHANGES_CNT;
2204 	else if (val < 0)
2205 		val = 0;
2206 
2207 	vha->u_ql2xiniexchg = val;
2208 	return strlen(buf);
2209 }
2210 
2211 static ssize_t
2212 qla2x00_dif_bundle_statistics_show(struct device *dev,
2213     struct device_attribute *attr, char *buf)
2214 {
2215 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2216 	struct qla_hw_data *ha = vha->hw;
2217 
2218 	return scnprintf(buf, PAGE_SIZE,
2219 	    "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2220 	    ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2221 	    ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2222 	    ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2223 }
2224 
2225 static ssize_t
2226 qla2x00_fw_attr_show(struct device *dev,
2227     struct device_attribute *attr, char *buf)
2228 {
2229 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2230 	struct qla_hw_data *ha = vha->hw;
2231 
2232 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2233 		return scnprintf(buf, PAGE_SIZE, "\n");
2234 
2235 	return scnprintf(buf, PAGE_SIZE, "%llx\n",
2236 	    (uint64_t)ha->fw_attributes_ext[1] << 48 |
2237 	    (uint64_t)ha->fw_attributes_ext[0] << 32 |
2238 	    (uint64_t)ha->fw_attributes_h << 16 |
2239 	    (uint64_t)ha->fw_attributes);
2240 }
2241 
2242 static ssize_t
2243 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
2244     char *buf)
2245 {
2246 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2247 
2248 	return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
2249 }
2250 
2251 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
2252 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2253 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2254 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2255 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2256 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2257 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2258 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2259 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2260 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2261 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2262 		   qla2x00_zio_timer_store);
2263 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2264 		   qla2x00_beacon_store);
2265 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2266 		   qla2x00_optrom_bios_version_show, NULL);
2267 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2268 		   qla2x00_optrom_efi_version_show, NULL);
2269 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2270 		   qla2x00_optrom_fcode_version_show, NULL);
2271 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2272 		   NULL);
2273 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2274     qla2x00_optrom_gold_fw_version_show, NULL);
2275 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2276 		   NULL);
2277 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2278 		   NULL);
2279 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
2280 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2281 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2282 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2283 		   NULL);
2284 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2285 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2286 		   qla2x00_vn_port_mac_address_show, NULL);
2287 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2288 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2289 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2290 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2291 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2292 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2293 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2294 		   qla2x00_allow_cna_fw_dump_show,
2295 		   qla2x00_allow_cna_fw_dump_store);
2296 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2297 static DEVICE_ATTR(min_supported_speed, 0444,
2298 		   qla2x00_min_supported_speed_show, NULL);
2299 static DEVICE_ATTR(max_supported_speed, 0444,
2300 		   qla2x00_max_supported_speed_show, NULL);
2301 static DEVICE_ATTR(zio_threshold, 0644,
2302     qla_zio_threshold_show,
2303     qla_zio_threshold_store);
2304 static DEVICE_ATTR_RW(qlini_mode);
2305 static DEVICE_ATTR_RW(ql2xexchoffld);
2306 static DEVICE_ATTR_RW(ql2xiniexchg);
2307 static DEVICE_ATTR(dif_bundle_statistics, 0444,
2308     qla2x00_dif_bundle_statistics_show, NULL);
2309 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2310     qla2x00_port_speed_store);
2311 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
2312 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
2313 
2314 
2315 struct device_attribute *qla2x00_host_attrs[] = {
2316 	&dev_attr_driver_version,
2317 	&dev_attr_fw_version,
2318 	&dev_attr_serial_num,
2319 	&dev_attr_isp_name,
2320 	&dev_attr_isp_id,
2321 	&dev_attr_model_name,
2322 	&dev_attr_model_desc,
2323 	&dev_attr_pci_info,
2324 	&dev_attr_link_state,
2325 	&dev_attr_zio,
2326 	&dev_attr_zio_timer,
2327 	&dev_attr_beacon,
2328 	&dev_attr_optrom_bios_version,
2329 	&dev_attr_optrom_efi_version,
2330 	&dev_attr_optrom_fcode_version,
2331 	&dev_attr_optrom_fw_version,
2332 	&dev_attr_84xx_fw_version,
2333 	&dev_attr_total_isp_aborts,
2334 	&dev_attr_serdes_version,
2335 	&dev_attr_mpi_version,
2336 	&dev_attr_phy_version,
2337 	&dev_attr_flash_block_size,
2338 	&dev_attr_vlan_id,
2339 	&dev_attr_vn_port_mac_address,
2340 	&dev_attr_fabric_param,
2341 	&dev_attr_fw_state,
2342 	&dev_attr_optrom_gold_fw_version,
2343 	&dev_attr_thermal_temp,
2344 	&dev_attr_diag_requests,
2345 	&dev_attr_diag_megabytes,
2346 	&dev_attr_fw_dump_size,
2347 	&dev_attr_allow_cna_fw_dump,
2348 	&dev_attr_pep_version,
2349 	&dev_attr_min_supported_speed,
2350 	&dev_attr_max_supported_speed,
2351 	&dev_attr_zio_threshold,
2352 	&dev_attr_dif_bundle_statistics,
2353 	&dev_attr_port_speed,
2354 	&dev_attr_port_no,
2355 	&dev_attr_fw_attr,
2356 	NULL, /* reserve for qlini_mode */
2357 	NULL, /* reserve for ql2xiniexchg */
2358 	NULL, /* reserve for ql2xexchoffld */
2359 	NULL,
2360 };
2361 
2362 void qla_insert_tgt_attrs(void)
2363 {
2364 	struct device_attribute **attr;
2365 
2366 	/* advance to empty slot */
2367 	for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2368 		continue;
2369 
2370 	*attr = &dev_attr_qlini_mode;
2371 	attr++;
2372 	*attr = &dev_attr_ql2xiniexchg;
2373 	attr++;
2374 	*attr = &dev_attr_ql2xexchoffld;
2375 }
2376 
2377 /* Host attributes. */
2378 
2379 static void
2380 qla2x00_get_host_port_id(struct Scsi_Host *shost)
2381 {
2382 	scsi_qla_host_t *vha = shost_priv(shost);
2383 
2384 	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2385 	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2386 }
2387 
2388 static void
2389 qla2x00_get_host_speed(struct Scsi_Host *shost)
2390 {
2391 	scsi_qla_host_t *vha = shost_priv(shost);
2392 	u32 speed;
2393 
2394 	if (IS_QLAFX00(vha->hw)) {
2395 		qlafx00_get_host_speed(shost);
2396 		return;
2397 	}
2398 
2399 	switch (vha->hw->link_data_rate) {
2400 	case PORT_SPEED_1GB:
2401 		speed = FC_PORTSPEED_1GBIT;
2402 		break;
2403 	case PORT_SPEED_2GB:
2404 		speed = FC_PORTSPEED_2GBIT;
2405 		break;
2406 	case PORT_SPEED_4GB:
2407 		speed = FC_PORTSPEED_4GBIT;
2408 		break;
2409 	case PORT_SPEED_8GB:
2410 		speed = FC_PORTSPEED_8GBIT;
2411 		break;
2412 	case PORT_SPEED_10GB:
2413 		speed = FC_PORTSPEED_10GBIT;
2414 		break;
2415 	case PORT_SPEED_16GB:
2416 		speed = FC_PORTSPEED_16GBIT;
2417 		break;
2418 	case PORT_SPEED_32GB:
2419 		speed = FC_PORTSPEED_32GBIT;
2420 		break;
2421 	case PORT_SPEED_64GB:
2422 		speed = FC_PORTSPEED_64GBIT;
2423 		break;
2424 	default:
2425 		speed = FC_PORTSPEED_UNKNOWN;
2426 		break;
2427 	}
2428 
2429 	fc_host_speed(shost) = speed;
2430 }
2431 
2432 static void
2433 qla2x00_get_host_port_type(struct Scsi_Host *shost)
2434 {
2435 	scsi_qla_host_t *vha = shost_priv(shost);
2436 	uint32_t port_type;
2437 
2438 	if (vha->vp_idx) {
2439 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2440 		return;
2441 	}
2442 	switch (vha->hw->current_topology) {
2443 	case ISP_CFG_NL:
2444 		port_type = FC_PORTTYPE_LPORT;
2445 		break;
2446 	case ISP_CFG_FL:
2447 		port_type = FC_PORTTYPE_NLPORT;
2448 		break;
2449 	case ISP_CFG_N:
2450 		port_type = FC_PORTTYPE_PTP;
2451 		break;
2452 	case ISP_CFG_F:
2453 		port_type = FC_PORTTYPE_NPORT;
2454 		break;
2455 	default:
2456 		port_type = FC_PORTTYPE_UNKNOWN;
2457 		break;
2458 	}
2459 
2460 	fc_host_port_type(shost) = port_type;
2461 }
2462 
2463 static void
2464 qla2x00_get_starget_node_name(struct scsi_target *starget)
2465 {
2466 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2467 	scsi_qla_host_t *vha = shost_priv(host);
2468 	fc_port_t *fcport;
2469 	u64 node_name = 0;
2470 
2471 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2472 		if (fcport->rport &&
2473 		    starget->id == fcport->rport->scsi_target_id) {
2474 			node_name = wwn_to_u64(fcport->node_name);
2475 			break;
2476 		}
2477 	}
2478 
2479 	fc_starget_node_name(starget) = node_name;
2480 }
2481 
2482 static void
2483 qla2x00_get_starget_port_name(struct scsi_target *starget)
2484 {
2485 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2486 	scsi_qla_host_t *vha = shost_priv(host);
2487 	fc_port_t *fcport;
2488 	u64 port_name = 0;
2489 
2490 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2491 		if (fcport->rport &&
2492 		    starget->id == fcport->rport->scsi_target_id) {
2493 			port_name = wwn_to_u64(fcport->port_name);
2494 			break;
2495 		}
2496 	}
2497 
2498 	fc_starget_port_name(starget) = port_name;
2499 }
2500 
2501 static void
2502 qla2x00_get_starget_port_id(struct scsi_target *starget)
2503 {
2504 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2505 	scsi_qla_host_t *vha = shost_priv(host);
2506 	fc_port_t *fcport;
2507 	uint32_t port_id = ~0U;
2508 
2509 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2510 		if (fcport->rport &&
2511 		    starget->id == fcport->rport->scsi_target_id) {
2512 			port_id = fcport->d_id.b.domain << 16 |
2513 			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2514 			break;
2515 		}
2516 	}
2517 
2518 	fc_starget_port_id(starget) = port_id;
2519 }
2520 
2521 static inline void
2522 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2523 {
2524 	rport->dev_loss_tmo = timeout ? timeout : 1;
2525 }
2526 
2527 static void
2528 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2529 {
2530 	struct Scsi_Host *host = rport_to_shost(rport);
2531 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2532 	unsigned long flags;
2533 
2534 	if (!fcport)
2535 		return;
2536 
2537 	/* Now that the rport has been deleted, set the fcport state to
2538 	   FCS_DEVICE_DEAD */
2539 	qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2540 
2541 	/*
2542 	 * Transport has effectively 'deleted' the rport, clear
2543 	 * all local references.
2544 	 */
2545 	spin_lock_irqsave(host->host_lock, flags);
2546 	fcport->rport = fcport->drport = NULL;
2547 	*((fc_port_t **)rport->dd_data) = NULL;
2548 	spin_unlock_irqrestore(host->host_lock, flags);
2549 
2550 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2551 		return;
2552 
2553 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2554 		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2555 		return;
2556 	}
2557 }
2558 
2559 static void
2560 qla2x00_terminate_rport_io(struct fc_rport *rport)
2561 {
2562 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2563 
2564 	if (!fcport)
2565 		return;
2566 
2567 	if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2568 		return;
2569 
2570 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2571 		return;
2572 
2573 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2574 		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2575 		return;
2576 	}
2577 	/*
2578 	 * At this point all fcport's software-states are cleared.  Perform any
2579 	 * final cleanup of firmware resources (PCBs and XCBs).
2580 	 */
2581 	if (fcport->loop_id != FC_NO_LOOP_ID) {
2582 		if (IS_FWI2_CAPABLE(fcport->vha->hw))
2583 			fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
2584 			    fcport->loop_id, fcport->d_id.b.domain,
2585 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2586 		else
2587 			qla2x00_port_logout(fcport->vha, fcport);
2588 	}
2589 }
2590 
2591 static int
2592 qla2x00_issue_lip(struct Scsi_Host *shost)
2593 {
2594 	scsi_qla_host_t *vha = shost_priv(shost);
2595 
2596 	if (IS_QLAFX00(vha->hw))
2597 		return 0;
2598 
2599 	qla2x00_loop_reset(vha);
2600 	return 0;
2601 }
2602 
2603 static struct fc_host_statistics *
2604 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2605 {
2606 	scsi_qla_host_t *vha = shost_priv(shost);
2607 	struct qla_hw_data *ha = vha->hw;
2608 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2609 	int rval;
2610 	struct link_statistics *stats;
2611 	dma_addr_t stats_dma;
2612 	struct fc_host_statistics *p = &vha->fc_host_stat;
2613 
2614 	memset(p, -1, sizeof(*p));
2615 
2616 	if (IS_QLAFX00(vha->hw))
2617 		goto done;
2618 
2619 	if (test_bit(UNLOADING, &vha->dpc_flags))
2620 		goto done;
2621 
2622 	if (unlikely(pci_channel_offline(ha->pdev)))
2623 		goto done;
2624 
2625 	if (qla2x00_chip_is_down(vha))
2626 		goto done;
2627 
2628 	stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2629 				   GFP_KERNEL);
2630 	if (!stats) {
2631 		ql_log(ql_log_warn, vha, 0x707d,
2632 		    "Failed to allocate memory for stats.\n");
2633 		goto done;
2634 	}
2635 
2636 	rval = QLA_FUNCTION_FAILED;
2637 	if (IS_FWI2_CAPABLE(ha)) {
2638 		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2639 	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2640 	    !ha->dpc_active) {
2641 		/* Must be in a 'READY' state for statistics retrieval. */
2642 		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2643 						stats, stats_dma);
2644 	}
2645 
2646 	if (rval != QLA_SUCCESS)
2647 		goto done_free;
2648 
2649 	p->link_failure_count = stats->link_fail_cnt;
2650 	p->loss_of_sync_count = stats->loss_sync_cnt;
2651 	p->loss_of_signal_count = stats->loss_sig_cnt;
2652 	p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
2653 	p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
2654 	p->invalid_crc_count = stats->inval_crc_cnt;
2655 	if (IS_FWI2_CAPABLE(ha)) {
2656 		p->lip_count = stats->lip_cnt;
2657 		p->tx_frames = stats->tx_frames;
2658 		p->rx_frames = stats->rx_frames;
2659 		p->dumped_frames = stats->discarded_frames;
2660 		p->nos_count = stats->nos_rcvd;
2661 		p->error_frames =
2662 			stats->dropped_frames + stats->discarded_frames;
2663 		p->rx_words = vha->qla_stats.input_bytes;
2664 		p->tx_words = vha->qla_stats.output_bytes;
2665 	}
2666 	p->fcp_control_requests = vha->qla_stats.control_requests;
2667 	p->fcp_input_requests = vha->qla_stats.input_requests;
2668 	p->fcp_output_requests = vha->qla_stats.output_requests;
2669 	p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
2670 	p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
2671 	p->seconds_since_last_reset =
2672 		get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2673 	do_div(p->seconds_since_last_reset, HZ);
2674 
2675 done_free:
2676 	dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2677 	    stats, stats_dma);
2678 done:
2679 	return p;
2680 }
2681 
2682 static void
2683 qla2x00_reset_host_stats(struct Scsi_Host *shost)
2684 {
2685 	scsi_qla_host_t *vha = shost_priv(shost);
2686 	struct qla_hw_data *ha = vha->hw;
2687 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2688 	struct link_statistics *stats;
2689 	dma_addr_t stats_dma;
2690 
2691 	memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2692 	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2693 
2694 	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2695 
2696 	if (IS_FWI2_CAPABLE(ha)) {
2697 		stats = dma_alloc_coherent(&ha->pdev->dev,
2698 		    sizeof(*stats), &stats_dma, GFP_KERNEL);
2699 		if (!stats) {
2700 			ql_log(ql_log_warn, vha, 0x70d7,
2701 			    "Failed to allocate memory for stats.\n");
2702 			return;
2703 		}
2704 
2705 		/* reset firmware statistics */
2706 		qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2707 
2708 		dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2709 		    stats, stats_dma);
2710 	}
2711 }
2712 
2713 static void
2714 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2715 {
2716 	scsi_qla_host_t *vha = shost_priv(shost);
2717 
2718 	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2719 	    sizeof(fc_host_symbolic_name(shost)));
2720 }
2721 
2722 static void
2723 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2724 {
2725 	scsi_qla_host_t *vha = shost_priv(shost);
2726 
2727 	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2728 }
2729 
2730 static void
2731 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2732 {
2733 	scsi_qla_host_t *vha = shost_priv(shost);
2734 	static const uint8_t node_name[WWN_SIZE] = {
2735 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
2736 	};
2737 	u64 fabric_name = wwn_to_u64(node_name);
2738 
2739 	if (vha->device_flags & SWITCH_FOUND)
2740 		fabric_name = wwn_to_u64(vha->fabric_node_name);
2741 
2742 	fc_host_fabric_name(shost) = fabric_name;
2743 }
2744 
2745 static void
2746 qla2x00_get_host_port_state(struct Scsi_Host *shost)
2747 {
2748 	scsi_qla_host_t *vha = shost_priv(shost);
2749 	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2750 
2751 	if (!base_vha->flags.online) {
2752 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2753 		return;
2754 	}
2755 
2756 	switch (atomic_read(&base_vha->loop_state)) {
2757 	case LOOP_UPDATE:
2758 		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2759 		break;
2760 	case LOOP_DOWN:
2761 		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
2762 			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2763 		else
2764 			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2765 		break;
2766 	case LOOP_DEAD:
2767 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2768 		break;
2769 	case LOOP_READY:
2770 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2771 		break;
2772 	default:
2773 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2774 		break;
2775 	}
2776 }
2777 
2778 static int
2779 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2780 {
2781 	int	ret = 0;
2782 	uint8_t	qos = 0;
2783 	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
2784 	scsi_qla_host_t *vha = NULL;
2785 	struct qla_hw_data *ha = base_vha->hw;
2786 	int	cnt;
2787 	struct req_que *req = ha->req_q_map[0];
2788 	struct qla_qpair *qpair;
2789 
2790 	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2791 	if (ret) {
2792 		ql_log(ql_log_warn, vha, 0x707e,
2793 		    "Vport sanity check failed, status %x\n", ret);
2794 		return (ret);
2795 	}
2796 
2797 	vha = qla24xx_create_vhost(fc_vport);
2798 	if (vha == NULL) {
2799 		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2800 		return FC_VPORT_FAILED;
2801 	}
2802 	if (disable) {
2803 		atomic_set(&vha->vp_state, VP_OFFLINE);
2804 		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2805 	} else
2806 		atomic_set(&vha->vp_state, VP_FAILED);
2807 
2808 	/* ready to create vport */
2809 	ql_log(ql_log_info, vha, 0x7080,
2810 	    "VP entry id %d assigned.\n", vha->vp_idx);
2811 
2812 	/* initialized vport states */
2813 	atomic_set(&vha->loop_state, LOOP_DOWN);
2814 	vha->vp_err_state = VP_ERR_PORTDWN;
2815 	vha->vp_prev_err_state = VP_ERR_UNKWN;
2816 	/* Check if physical ha port is Up */
2817 	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
2818 	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2819 		/* Don't retry or attempt login of this virtual port */
2820 		ql_dbg(ql_dbg_user, vha, 0x7081,
2821 		    "Vport loop state is not UP.\n");
2822 		atomic_set(&vha->loop_state, LOOP_DEAD);
2823 		if (!disable)
2824 			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
2825 	}
2826 
2827 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2828 		if (ha->fw_attributes & BIT_4) {
2829 			int prot = 0, guard;
2830 
2831 			vha->flags.difdix_supported = 1;
2832 			ql_dbg(ql_dbg_user, vha, 0x7082,
2833 			    "Registered for DIF/DIX type 1 and 3 protection.\n");
2834 			if (ql2xenabledif == 1)
2835 				prot = SHOST_DIX_TYPE0_PROTECTION;
2836 			scsi_host_set_prot(vha->host,
2837 			    prot | SHOST_DIF_TYPE1_PROTECTION
2838 			    | SHOST_DIF_TYPE2_PROTECTION
2839 			    | SHOST_DIF_TYPE3_PROTECTION
2840 			    | SHOST_DIX_TYPE1_PROTECTION
2841 			    | SHOST_DIX_TYPE2_PROTECTION
2842 			    | SHOST_DIX_TYPE3_PROTECTION);
2843 
2844 			guard = SHOST_DIX_GUARD_CRC;
2845 
2846 			if (IS_PI_IPGUARD_CAPABLE(ha) &&
2847 			    (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2848 				guard |= SHOST_DIX_GUARD_IP;
2849 
2850 			scsi_host_set_guard(vha->host, guard);
2851 		} else
2852 			vha->flags.difdix_supported = 0;
2853 	}
2854 
2855 	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
2856 				   &ha->pdev->dev)) {
2857 		ql_dbg(ql_dbg_user, vha, 0x7083,
2858 		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2859 		goto vport_create_failed_2;
2860 	}
2861 
2862 	/* initialize attributes */
2863 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2864 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2865 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2866 	fc_host_supported_classes(vha->host) =
2867 		fc_host_supported_classes(base_vha->host);
2868 	fc_host_supported_speeds(vha->host) =
2869 		fc_host_supported_speeds(base_vha->host);
2870 
2871 	qlt_vport_create(vha, ha);
2872 	qla24xx_vport_disable(fc_vport, disable);
2873 
2874 	if (!ql2xmqsupport || !ha->npiv_info)
2875 		goto vport_queue;
2876 
2877 	/* Create a request queue in QoS mode for the vport */
2878 	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
2879 		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
2880 			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
2881 					8) == 0) {
2882 			qos = ha->npiv_info[cnt].q_qos;
2883 			break;
2884 		}
2885 	}
2886 
2887 	if (qos) {
2888 		qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
2889 		if (!qpair)
2890 			ql_log(ql_log_warn, vha, 0x7084,
2891 			    "Can't create qpair for VP[%d]\n",
2892 			    vha->vp_idx);
2893 		else {
2894 			ql_dbg(ql_dbg_multiq, vha, 0xc001,
2895 			    "Queue pair: %d Qos: %d) created for VP[%d]\n",
2896 			    qpair->id, qos, vha->vp_idx);
2897 			ql_dbg(ql_dbg_user, vha, 0x7085,
2898 			    "Queue Pair: %d Qos: %d) created for VP[%d]\n",
2899 			    qpair->id, qos, vha->vp_idx);
2900 			req = qpair->req;
2901 			vha->qpair = qpair;
2902 		}
2903 	}
2904 
2905 vport_queue:
2906 	vha->req = req;
2907 	return 0;
2908 
2909 vport_create_failed_2:
2910 	qla24xx_disable_vp(vha);
2911 	qla24xx_deallocate_vp_id(vha);
2912 	scsi_host_put(vha->host);
2913 	return FC_VPORT_FAILED;
2914 }
2915 
2916 static int
2917 qla24xx_vport_delete(struct fc_vport *fc_vport)
2918 {
2919 	scsi_qla_host_t *vha = fc_vport->dd_data;
2920 	struct qla_hw_data *ha = vha->hw;
2921 	uint16_t id = vha->vp_idx;
2922 
2923 	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
2924 	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
2925 		msleep(1000);
2926 
2927 	qla_nvme_delete(vha);
2928 
2929 	qla24xx_disable_vp(vha);
2930 	qla2x00_wait_for_sess_deletion(vha);
2931 
2932 	vha->flags.delete_progress = 1;
2933 
2934 	qlt_remove_target(ha, vha);
2935 
2936 	fc_remove_host(vha->host);
2937 
2938 	scsi_remove_host(vha->host);
2939 
2940 	/* Allow timer to run to drain queued items, when removing vp */
2941 	qla24xx_deallocate_vp_id(vha);
2942 
2943 	if (vha->timer_active) {
2944 		qla2x00_vp_stop_timer(vha);
2945 		ql_dbg(ql_dbg_user, vha, 0x7086,
2946 		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2947 	}
2948 
2949 	qla2x00_free_fcports(vha);
2950 
2951 	mutex_lock(&ha->vport_lock);
2952 	ha->cur_vport_count--;
2953 	clear_bit(vha->vp_idx, ha->vp_idx_map);
2954 	mutex_unlock(&ha->vport_lock);
2955 
2956 	dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2957 	    vha->gnl.ldma);
2958 
2959 	vfree(vha->scan.l);
2960 
2961 	if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
2962 		if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2963 			ql_log(ql_log_warn, vha, 0x7087,
2964 			    "Queue Pair delete failed.\n");
2965 	}
2966 
2967 	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
2968 	scsi_host_put(vha->host);
2969 	return 0;
2970 }
2971 
2972 static int
2973 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
2974 {
2975 	scsi_qla_host_t *vha = fc_vport->dd_data;
2976 
2977 	if (disable)
2978 		qla24xx_disable_vp(vha);
2979 	else
2980 		qla24xx_enable_vp(vha);
2981 
2982 	return 0;
2983 }
2984 
2985 struct fc_function_template qla2xxx_transport_functions = {
2986 
2987 	.show_host_node_name = 1,
2988 	.show_host_port_name = 1,
2989 	.show_host_supported_classes = 1,
2990 	.show_host_supported_speeds = 1,
2991 
2992 	.get_host_port_id = qla2x00_get_host_port_id,
2993 	.show_host_port_id = 1,
2994 	.get_host_speed = qla2x00_get_host_speed,
2995 	.show_host_speed = 1,
2996 	.get_host_port_type = qla2x00_get_host_port_type,
2997 	.show_host_port_type = 1,
2998 	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2999 	.show_host_symbolic_name = 1,
3000 	.set_host_system_hostname = qla2x00_set_host_system_hostname,
3001 	.show_host_system_hostname = 1,
3002 	.get_host_fabric_name = qla2x00_get_host_fabric_name,
3003 	.show_host_fabric_name = 1,
3004 	.get_host_port_state = qla2x00_get_host_port_state,
3005 	.show_host_port_state = 1,
3006 
3007 	.dd_fcrport_size = sizeof(struct fc_port *),
3008 	.show_rport_supported_classes = 1,
3009 
3010 	.get_starget_node_name = qla2x00_get_starget_node_name,
3011 	.show_starget_node_name = 1,
3012 	.get_starget_port_name = qla2x00_get_starget_port_name,
3013 	.show_starget_port_name = 1,
3014 	.get_starget_port_id  = qla2x00_get_starget_port_id,
3015 	.show_starget_port_id = 1,
3016 
3017 	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3018 	.show_rport_dev_loss_tmo = 1,
3019 
3020 	.issue_fc_host_lip = qla2x00_issue_lip,
3021 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3022 	.terminate_rport_io = qla2x00_terminate_rport_io,
3023 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
3024 	.reset_fc_host_stats = qla2x00_reset_host_stats,
3025 
3026 	.vport_create = qla24xx_vport_create,
3027 	.vport_disable = qla24xx_vport_disable,
3028 	.vport_delete = qla24xx_vport_delete,
3029 	.bsg_request = qla24xx_bsg_request,
3030 	.bsg_timeout = qla24xx_bsg_timeout,
3031 };
3032 
3033 struct fc_function_template qla2xxx_transport_vport_functions = {
3034 
3035 	.show_host_node_name = 1,
3036 	.show_host_port_name = 1,
3037 	.show_host_supported_classes = 1,
3038 
3039 	.get_host_port_id = qla2x00_get_host_port_id,
3040 	.show_host_port_id = 1,
3041 	.get_host_speed = qla2x00_get_host_speed,
3042 	.show_host_speed = 1,
3043 	.get_host_port_type = qla2x00_get_host_port_type,
3044 	.show_host_port_type = 1,
3045 	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3046 	.show_host_symbolic_name = 1,
3047 	.set_host_system_hostname = qla2x00_set_host_system_hostname,
3048 	.show_host_system_hostname = 1,
3049 	.get_host_fabric_name = qla2x00_get_host_fabric_name,
3050 	.show_host_fabric_name = 1,
3051 	.get_host_port_state = qla2x00_get_host_port_state,
3052 	.show_host_port_state = 1,
3053 
3054 	.dd_fcrport_size = sizeof(struct fc_port *),
3055 	.show_rport_supported_classes = 1,
3056 
3057 	.get_starget_node_name = qla2x00_get_starget_node_name,
3058 	.show_starget_node_name = 1,
3059 	.get_starget_port_name = qla2x00_get_starget_port_name,
3060 	.show_starget_port_name = 1,
3061 	.get_starget_port_id  = qla2x00_get_starget_port_id,
3062 	.show_starget_port_id = 1,
3063 
3064 	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3065 	.show_rport_dev_loss_tmo = 1,
3066 
3067 	.issue_fc_host_lip = qla2x00_issue_lip,
3068 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3069 	.terminate_rport_io = qla2x00_terminate_rport_io,
3070 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
3071 	.reset_fc_host_stats = qla2x00_reset_host_stats,
3072 
3073 	.bsg_request = qla24xx_bsg_request,
3074 	.bsg_timeout = qla24xx_bsg_timeout,
3075 };
3076 
3077 void
3078 qla2x00_init_host_attr(scsi_qla_host_t *vha)
3079 {
3080 	struct qla_hw_data *ha = vha->hw;
3081 	u32 speeds = FC_PORTSPEED_UNKNOWN;
3082 
3083 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3084 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3085 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3086 	fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
3087 			(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
3088 	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
3089 	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
3090 
3091 	if (IS_CNA_CAPABLE(ha))
3092 		speeds = FC_PORTSPEED_10GBIT;
3093 	else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
3094 		if (ha->max_supported_speed == 2) {
3095 			if (ha->min_supported_speed <= 6)
3096 				speeds |= FC_PORTSPEED_64GBIT;
3097 		}
3098 		if (ha->max_supported_speed == 2 ||
3099 		    ha->max_supported_speed == 1) {
3100 			if (ha->min_supported_speed <= 5)
3101 				speeds |= FC_PORTSPEED_32GBIT;
3102 		}
3103 		if (ha->max_supported_speed == 2 ||
3104 		    ha->max_supported_speed == 1 ||
3105 		    ha->max_supported_speed == 0) {
3106 			if (ha->min_supported_speed <= 4)
3107 				speeds |= FC_PORTSPEED_16GBIT;
3108 		}
3109 		if (ha->max_supported_speed == 1 ||
3110 		    ha->max_supported_speed == 0) {
3111 			if (ha->min_supported_speed <= 3)
3112 				speeds |= FC_PORTSPEED_8GBIT;
3113 		}
3114 		if (ha->max_supported_speed == 0) {
3115 			if (ha->min_supported_speed <= 2)
3116 				speeds |= FC_PORTSPEED_4GBIT;
3117 		}
3118 	} else if (IS_QLA2031(ha))
3119 		speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
3120 			FC_PORTSPEED_4GBIT;
3121 	else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
3122 		speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
3123 			FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3124 	else if (IS_QLA24XX_TYPE(ha))
3125 		speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
3126 			FC_PORTSPEED_1GBIT;
3127 	else if (IS_QLA23XX(ha))
3128 		speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3129 	else
3130 		speeds = FC_PORTSPEED_1GBIT;
3131 
3132 	fc_host_supported_speeds(vha->host) = speeds;
3133 }
3134