xref: /linux/drivers/scsi/qla2xxx/qla_attr.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 
15 static int qla24xx_vport_disable(struct fc_vport *, bool);
16 
17 /* SYSFS attributes --------------------------------------------------------- */
18 
19 static ssize_t
20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 			   struct bin_attribute *bin_attr,
22 			   char *buf, loff_t off, size_t count)
23 {
24 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25 	    struct device, kobj)));
26 	struct qla_hw_data *ha = vha->hw;
27 	int rval = 0;
28 
29 	if (ha->fw_dump_reading == 0)
30 		return 0;
31 
32 	if (IS_QLA82XX(ha)) {
33 		if (off < ha->md_template_size) {
34 			rval = memory_read_from_buffer(buf, count,
35 			    &off, ha->md_tmplt_hdr, ha->md_template_size);
36 			return rval;
37 		}
38 		off -= ha->md_template_size;
39 		rval = memory_read_from_buffer(buf, count,
40 		    &off, ha->md_dump, ha->md_dump_size);
41 		return rval;
42 	} else
43 		return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
44 					ha->fw_dump_len);
45 }
46 
47 static ssize_t
48 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
49 			    struct bin_attribute *bin_attr,
50 			    char *buf, loff_t off, size_t count)
51 {
52 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
53 	    struct device, kobj)));
54 	struct qla_hw_data *ha = vha->hw;
55 	int reading;
56 
57 	if (off != 0)
58 		return (0);
59 
60 	reading = simple_strtol(buf, NULL, 10);
61 	switch (reading) {
62 	case 0:
63 		if (!ha->fw_dump_reading)
64 			break;
65 
66 		ql_log(ql_log_info, vha, 0x705d,
67 		    "Firmware dump cleared on (%ld).\n", vha->host_no);
68 
69 		if (IS_QLA82XX(vha->hw)) {
70 			qla82xx_md_free(vha);
71 			qla82xx_md_prep(vha);
72 		}
73 		ha->fw_dump_reading = 0;
74 		ha->fw_dumped = 0;
75 		break;
76 	case 1:
77 		if (ha->fw_dumped && !ha->fw_dump_reading) {
78 			ha->fw_dump_reading = 1;
79 
80 			ql_log(ql_log_info, vha, 0x705e,
81 			    "Raw firmware dump ready for read on (%ld).\n",
82 			    vha->host_no);
83 		}
84 		break;
85 	case 2:
86 		qla2x00_alloc_fw_dump(vha);
87 		break;
88 	case 3:
89 		if (IS_QLA82XX(ha)) {
90 			qla82xx_idc_lock(ha);
91 			qla82xx_set_reset_owner(vha);
92 			qla82xx_idc_unlock(ha);
93 		} else
94 			qla2x00_system_error(vha);
95 		break;
96 	case 4:
97 		if (IS_QLA82XX(ha)) {
98 			if (ha->md_tmplt_hdr)
99 				ql_dbg(ql_dbg_user, vha, 0x705b,
100 				    "MiniDump supported with this firmware.\n");
101 			else
102 				ql_dbg(ql_dbg_user, vha, 0x709d,
103 				    "MiniDump not supported with this firmware.\n");
104 		}
105 		break;
106 	case 5:
107 		if (IS_QLA82XX(ha))
108 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
109 		break;
110 	}
111 	return count;
112 }
113 
114 static struct bin_attribute sysfs_fw_dump_attr = {
115 	.attr = {
116 		.name = "fw_dump",
117 		.mode = S_IRUSR | S_IWUSR,
118 	},
119 	.size = 0,
120 	.read = qla2x00_sysfs_read_fw_dump,
121 	.write = qla2x00_sysfs_write_fw_dump,
122 };
123 
124 static ssize_t
125 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
126 			 struct bin_attribute *bin_attr,
127 			 char *buf, loff_t off, size_t count)
128 {
129 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
130 	    struct device, kobj)));
131 	struct qla_hw_data *ha = vha->hw;
132 
133 	if (!capable(CAP_SYS_ADMIN))
134 		return 0;
135 
136 	if (IS_NOCACHE_VPD_TYPE(ha))
137 		ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
138 		    ha->nvram_size);
139 	return memory_read_from_buffer(buf, count, &off, ha->nvram,
140 					ha->nvram_size);
141 }
142 
143 static ssize_t
144 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
145 			  struct bin_attribute *bin_attr,
146 			  char *buf, loff_t off, size_t count)
147 {
148 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
149 	    struct device, kobj)));
150 	struct qla_hw_data *ha = vha->hw;
151 	uint16_t	cnt;
152 
153 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
154 	    !ha->isp_ops->write_nvram)
155 		return -EINVAL;
156 
157 	/* Checksum NVRAM. */
158 	if (IS_FWI2_CAPABLE(ha)) {
159 		uint32_t *iter;
160 		uint32_t chksum;
161 
162 		iter = (uint32_t *)buf;
163 		chksum = 0;
164 		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
165 			chksum += le32_to_cpu(*iter++);
166 		chksum = ~chksum + 1;
167 		*iter = cpu_to_le32(chksum);
168 	} else {
169 		uint8_t *iter;
170 		uint8_t chksum;
171 
172 		iter = (uint8_t *)buf;
173 		chksum = 0;
174 		for (cnt = 0; cnt < count - 1; cnt++)
175 			chksum += *iter++;
176 		chksum = ~chksum + 1;
177 		*iter = chksum;
178 	}
179 
180 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
181 		ql_log(ql_log_warn, vha, 0x705f,
182 		    "HBA not online, failing NVRAM update.\n");
183 		return -EAGAIN;
184 	}
185 
186 	/* Write NVRAM. */
187 	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
188 	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
189 	    count);
190 
191 	ql_dbg(ql_dbg_user, vha, 0x7060,
192 	    "Setting ISP_ABORT_NEEDED\n");
193 	/* NVRAM settings take effect immediately. */
194 	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
195 	qla2xxx_wake_dpc(vha);
196 	qla2x00_wait_for_chip_reset(vha);
197 
198 	return count;
199 }
200 
201 static struct bin_attribute sysfs_nvram_attr = {
202 	.attr = {
203 		.name = "nvram",
204 		.mode = S_IRUSR | S_IWUSR,
205 	},
206 	.size = 512,
207 	.read = qla2x00_sysfs_read_nvram,
208 	.write = qla2x00_sysfs_write_nvram,
209 };
210 
211 static ssize_t
212 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
213 			  struct bin_attribute *bin_attr,
214 			  char *buf, loff_t off, size_t count)
215 {
216 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
217 	    struct device, kobj)));
218 	struct qla_hw_data *ha = vha->hw;
219 
220 	if (ha->optrom_state != QLA_SREADING)
221 		return 0;
222 
223 	return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
224 					ha->optrom_region_size);
225 }
226 
227 static ssize_t
228 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
229 			   struct bin_attribute *bin_attr,
230 			   char *buf, loff_t off, size_t count)
231 {
232 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
233 	    struct device, kobj)));
234 	struct qla_hw_data *ha = vha->hw;
235 
236 	if (ha->optrom_state != QLA_SWRITING)
237 		return -EINVAL;
238 	if (off > ha->optrom_region_size)
239 		return -ERANGE;
240 	if (off + count > ha->optrom_region_size)
241 		count = ha->optrom_region_size - off;
242 
243 	memcpy(&ha->optrom_buffer[off], buf, count);
244 
245 	return count;
246 }
247 
248 static struct bin_attribute sysfs_optrom_attr = {
249 	.attr = {
250 		.name = "optrom",
251 		.mode = S_IRUSR | S_IWUSR,
252 	},
253 	.size = 0,
254 	.read = qla2x00_sysfs_read_optrom,
255 	.write = qla2x00_sysfs_write_optrom,
256 };
257 
258 static ssize_t
259 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
260 			       struct bin_attribute *bin_attr,
261 			       char *buf, loff_t off, size_t count)
262 {
263 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
264 	    struct device, kobj)));
265 	struct qla_hw_data *ha = vha->hw;
266 
267 	uint32_t start = 0;
268 	uint32_t size = ha->optrom_size;
269 	int val, valid;
270 
271 	if (off)
272 		return -EINVAL;
273 
274 	if (unlikely(pci_channel_offline(ha->pdev)))
275 		return -EAGAIN;
276 
277 	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
278 		return -EINVAL;
279 	if (start > ha->optrom_size)
280 		return -EINVAL;
281 
282 	switch (val) {
283 	case 0:
284 		if (ha->optrom_state != QLA_SREADING &&
285 		    ha->optrom_state != QLA_SWRITING)
286 			return -EINVAL;
287 
288 		ha->optrom_state = QLA_SWAITING;
289 
290 		ql_dbg(ql_dbg_user, vha, 0x7061,
291 		    "Freeing flash region allocation -- 0x%x bytes.\n",
292 		    ha->optrom_region_size);
293 
294 		vfree(ha->optrom_buffer);
295 		ha->optrom_buffer = NULL;
296 		break;
297 	case 1:
298 		if (ha->optrom_state != QLA_SWAITING)
299 			return -EINVAL;
300 
301 		ha->optrom_region_start = start;
302 		ha->optrom_region_size = start + size > ha->optrom_size ?
303 		    ha->optrom_size - start : size;
304 
305 		ha->optrom_state = QLA_SREADING;
306 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
307 		if (ha->optrom_buffer == NULL) {
308 			ql_log(ql_log_warn, vha, 0x7062,
309 			    "Unable to allocate memory for optrom retrieval "
310 			    "(%x).\n", ha->optrom_region_size);
311 
312 			ha->optrom_state = QLA_SWAITING;
313 			return -ENOMEM;
314 		}
315 
316 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
317 			ql_log(ql_log_warn, vha, 0x7063,
318 			    "HBA not online, failing NVRAM update.\n");
319 			return -EAGAIN;
320 		}
321 
322 		ql_dbg(ql_dbg_user, vha, 0x7064,
323 		    "Reading flash region -- 0x%x/0x%x.\n",
324 		    ha->optrom_region_start, ha->optrom_region_size);
325 
326 		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
327 		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
328 		    ha->optrom_region_start, ha->optrom_region_size);
329 		break;
330 	case 2:
331 		if (ha->optrom_state != QLA_SWAITING)
332 			return -EINVAL;
333 
334 		/*
335 		 * We need to be more restrictive on which FLASH regions are
336 		 * allowed to be updated via user-space.  Regions accessible
337 		 * via this method include:
338 		 *
339 		 * ISP21xx/ISP22xx/ISP23xx type boards:
340 		 *
341 		 * 	0x000000 -> 0x020000 -- Boot code.
342 		 *
343 		 * ISP2322/ISP24xx type boards:
344 		 *
345 		 * 	0x000000 -> 0x07ffff -- Boot code.
346 		 * 	0x080000 -> 0x0fffff -- Firmware.
347 		 *
348 		 * ISP25xx type boards:
349 		 *
350 		 * 	0x000000 -> 0x07ffff -- Boot code.
351 		 * 	0x080000 -> 0x0fffff -- Firmware.
352 		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
353 		 */
354 		valid = 0;
355 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
356 			valid = 1;
357 		else if (start == (ha->flt_region_boot * 4) ||
358 		    start == (ha->flt_region_fw * 4))
359 			valid = 1;
360 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
361 			|| IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
362 			valid = 1;
363 		if (!valid) {
364 			ql_log(ql_log_warn, vha, 0x7065,
365 			    "Invalid start region 0x%x/0x%x.\n", start, size);
366 			return -EINVAL;
367 		}
368 
369 		ha->optrom_region_start = start;
370 		ha->optrom_region_size = start + size > ha->optrom_size ?
371 		    ha->optrom_size - start : size;
372 
373 		ha->optrom_state = QLA_SWRITING;
374 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
375 		if (ha->optrom_buffer == NULL) {
376 			ql_log(ql_log_warn, vha, 0x7066,
377 			    "Unable to allocate memory for optrom update "
378 			    "(%x)\n", ha->optrom_region_size);
379 
380 			ha->optrom_state = QLA_SWAITING;
381 			return -ENOMEM;
382 		}
383 
384 		ql_dbg(ql_dbg_user, vha, 0x7067,
385 		    "Staging flash region write -- 0x%x/0x%x.\n",
386 		    ha->optrom_region_start, ha->optrom_region_size);
387 
388 		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
389 		break;
390 	case 3:
391 		if (ha->optrom_state != QLA_SWRITING)
392 			return -EINVAL;
393 
394 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
395 			ql_log(ql_log_warn, vha, 0x7068,
396 			    "HBA not online, failing flash update.\n");
397 			return -EAGAIN;
398 		}
399 
400 		ql_dbg(ql_dbg_user, vha, 0x7069,
401 		    "Writing flash region -- 0x%x/0x%x.\n",
402 		    ha->optrom_region_start, ha->optrom_region_size);
403 
404 		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
405 		    ha->optrom_region_start, ha->optrom_region_size);
406 		break;
407 	default:
408 		return -EINVAL;
409 	}
410 	return count;
411 }
412 
413 static struct bin_attribute sysfs_optrom_ctl_attr = {
414 	.attr = {
415 		.name = "optrom_ctl",
416 		.mode = S_IWUSR,
417 	},
418 	.size = 0,
419 	.write = qla2x00_sysfs_write_optrom_ctl,
420 };
421 
422 static ssize_t
423 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
424 		       struct bin_attribute *bin_attr,
425 		       char *buf, loff_t off, size_t count)
426 {
427 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
428 	    struct device, kobj)));
429 	struct qla_hw_data *ha = vha->hw;
430 
431 	if (unlikely(pci_channel_offline(ha->pdev)))
432 		return -EAGAIN;
433 
434 	if (!capable(CAP_SYS_ADMIN))
435 		return -EINVAL;
436 
437 	if (IS_NOCACHE_VPD_TYPE(ha))
438 		ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
439 		    ha->vpd_size);
440 	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
441 }
442 
443 static ssize_t
444 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
445 			struct bin_attribute *bin_attr,
446 			char *buf, loff_t off, size_t count)
447 {
448 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
449 	    struct device, kobj)));
450 	struct qla_hw_data *ha = vha->hw;
451 	uint8_t *tmp_data;
452 
453 	if (unlikely(pci_channel_offline(ha->pdev)))
454 		return 0;
455 
456 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
457 	    !ha->isp_ops->write_nvram)
458 		return 0;
459 
460 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
461 		ql_log(ql_log_warn, vha, 0x706a,
462 		    "HBA not online, failing VPD update.\n");
463 		return -EAGAIN;
464 	}
465 
466 	/* Write NVRAM. */
467 	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
468 	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
469 
470 	/* Update flash version information for 4Gb & above. */
471 	if (!IS_FWI2_CAPABLE(ha))
472 		return -EINVAL;
473 
474 	tmp_data = vmalloc(256);
475 	if (!tmp_data) {
476 		ql_log(ql_log_warn, vha, 0x706b,
477 		    "Unable to allocate memory for VPD information update.\n");
478 		return -ENOMEM;
479 	}
480 	ha->isp_ops->get_flash_version(vha, tmp_data);
481 	vfree(tmp_data);
482 
483 	return count;
484 }
485 
486 static struct bin_attribute sysfs_vpd_attr = {
487 	.attr = {
488 		.name = "vpd",
489 		.mode = S_IRUSR | S_IWUSR,
490 	},
491 	.size = 0,
492 	.read = qla2x00_sysfs_read_vpd,
493 	.write = qla2x00_sysfs_write_vpd,
494 };
495 
496 static ssize_t
497 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
498 		       struct bin_attribute *bin_attr,
499 		       char *buf, loff_t off, size_t count)
500 {
501 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
502 	    struct device, kobj)));
503 	struct qla_hw_data *ha = vha->hw;
504 	uint16_t iter, addr, offset;
505 	int rval;
506 
507 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
508 		return 0;
509 
510 	if (ha->sfp_data)
511 		goto do_read;
512 
513 	ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
514 	    &ha->sfp_data_dma);
515 	if (!ha->sfp_data) {
516 		ql_log(ql_log_warn, vha, 0x706c,
517 		    "Unable to allocate memory for SFP read-data.\n");
518 		return 0;
519 	}
520 
521 do_read:
522 	memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
523 	addr = 0xa0;
524 	for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
525 	    iter++, offset += SFP_BLOCK_SIZE) {
526 		if (iter == 4) {
527 			/* Skip to next device address. */
528 			addr = 0xa2;
529 			offset = 0;
530 		}
531 
532 		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
533 		    addr, offset, SFP_BLOCK_SIZE, 0);
534 		if (rval != QLA_SUCCESS) {
535 			ql_log(ql_log_warn, vha, 0x706d,
536 			    "Unable to read SFP data (%x/%x/%x).\n", rval,
537 			    addr, offset);
538 
539 			return -EIO;
540 		}
541 		memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
542 		buf += SFP_BLOCK_SIZE;
543 	}
544 
545 	return count;
546 }
547 
548 static struct bin_attribute sysfs_sfp_attr = {
549 	.attr = {
550 		.name = "sfp",
551 		.mode = S_IRUSR | S_IWUSR,
552 	},
553 	.size = SFP_DEV_SIZE * 2,
554 	.read = qla2x00_sysfs_read_sfp,
555 };
556 
557 static ssize_t
558 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
559 			struct bin_attribute *bin_attr,
560 			char *buf, loff_t off, size_t count)
561 {
562 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
563 	    struct device, kobj)));
564 	struct qla_hw_data *ha = vha->hw;
565 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
566 	int type;
567 
568 	if (off != 0)
569 		return -EINVAL;
570 
571 	type = simple_strtol(buf, NULL, 10);
572 	switch (type) {
573 	case 0x2025c:
574 		ql_log(ql_log_info, vha, 0x706e,
575 		    "Issuing ISP reset.\n");
576 
577 		scsi_block_requests(vha->host);
578 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
579 		if (IS_QLA82XX(ha)) {
580 			ha->flags.isp82xx_no_md_cap = 1;
581 			qla82xx_idc_lock(ha);
582 			qla82xx_set_reset_owner(vha);
583 			qla82xx_idc_unlock(ha);
584 		}
585 		qla2xxx_wake_dpc(vha);
586 		qla2x00_wait_for_chip_reset(vha);
587 		scsi_unblock_requests(vha->host);
588 		break;
589 	case 0x2025d:
590 		if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
591 			return -EPERM;
592 
593 		ql_log(ql_log_info, vha, 0x706f,
594 		    "Issuing MPI reset.\n");
595 
596 		/* Make sure FC side is not in reset */
597 		qla2x00_wait_for_hba_online(vha);
598 
599 		/* Issue MPI reset */
600 		scsi_block_requests(vha->host);
601 		if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
602 			ql_log(ql_log_warn, vha, 0x7070,
603 			    "MPI reset failed.\n");
604 		scsi_unblock_requests(vha->host);
605 		break;
606 	case 0x2025e:
607 		if (!IS_QLA82XX(ha) || vha != base_vha) {
608 			ql_log(ql_log_info, vha, 0x7071,
609 			    "FCoE ctx reset no supported.\n");
610 			return -EPERM;
611 		}
612 
613 		ql_log(ql_log_info, vha, 0x7072,
614 		    "Issuing FCoE ctx reset.\n");
615 		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
616 		qla2xxx_wake_dpc(vha);
617 		qla2x00_wait_for_fcoe_ctx_reset(vha);
618 		break;
619 	}
620 	return count;
621 }
622 
623 static struct bin_attribute sysfs_reset_attr = {
624 	.attr = {
625 		.name = "reset",
626 		.mode = S_IWUSR,
627 	},
628 	.size = 0,
629 	.write = qla2x00_sysfs_write_reset,
630 };
631 
632 static ssize_t
633 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
634 		       struct bin_attribute *bin_attr,
635 		       char *buf, loff_t off, size_t count)
636 {
637 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
638 	    struct device, kobj)));
639 	struct qla_hw_data *ha = vha->hw;
640 	int rval;
641 	uint16_t actual_size;
642 
643 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
644 		return 0;
645 
646 	if (ha->xgmac_data)
647 		goto do_read;
648 
649 	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
650 	    &ha->xgmac_data_dma, GFP_KERNEL);
651 	if (!ha->xgmac_data) {
652 		ql_log(ql_log_warn, vha, 0x7076,
653 		    "Unable to allocate memory for XGMAC read-data.\n");
654 		return 0;
655 	}
656 
657 do_read:
658 	actual_size = 0;
659 	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
660 
661 	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
662 	    XGMAC_DATA_SIZE, &actual_size);
663 	if (rval != QLA_SUCCESS) {
664 		ql_log(ql_log_warn, vha, 0x7077,
665 		    "Unable to read XGMAC data (%x).\n", rval);
666 		count = 0;
667 	}
668 
669 	count = actual_size > count ? count: actual_size;
670 	memcpy(buf, ha->xgmac_data, count);
671 
672 	return count;
673 }
674 
675 static struct bin_attribute sysfs_xgmac_stats_attr = {
676 	.attr = {
677 		.name = "xgmac_stats",
678 		.mode = S_IRUSR,
679 	},
680 	.size = 0,
681 	.read = qla2x00_sysfs_read_xgmac_stats,
682 };
683 
684 static ssize_t
685 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
686 		       struct bin_attribute *bin_attr,
687 		       char *buf, loff_t off, size_t count)
688 {
689 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
690 	    struct device, kobj)));
691 	struct qla_hw_data *ha = vha->hw;
692 	int rval;
693 	uint16_t actual_size;
694 
695 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
696 		return 0;
697 
698 	if (ha->dcbx_tlv)
699 		goto do_read;
700 
701 	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
702 	    &ha->dcbx_tlv_dma, GFP_KERNEL);
703 	if (!ha->dcbx_tlv) {
704 		ql_log(ql_log_warn, vha, 0x7078,
705 		    "Unable to allocate memory for DCBX TLV read-data.\n");
706 		return -ENOMEM;
707 	}
708 
709 do_read:
710 	actual_size = 0;
711 	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
712 
713 	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
714 	    DCBX_TLV_DATA_SIZE);
715 	if (rval != QLA_SUCCESS) {
716 		ql_log(ql_log_warn, vha, 0x7079,
717 		    "Unable to read DCBX TLV (%x).\n", rval);
718 		return -EIO;
719 	}
720 
721 	memcpy(buf, ha->dcbx_tlv, count);
722 
723 	return count;
724 }
725 
726 static struct bin_attribute sysfs_dcbx_tlv_attr = {
727 	.attr = {
728 		.name = "dcbx_tlv",
729 		.mode = S_IRUSR,
730 	},
731 	.size = 0,
732 	.read = qla2x00_sysfs_read_dcbx_tlv,
733 };
734 
735 static struct sysfs_entry {
736 	char *name;
737 	struct bin_attribute *attr;
738 	int is4GBp_only;
739 } bin_file_entries[] = {
740 	{ "fw_dump", &sysfs_fw_dump_attr, },
741 	{ "nvram", &sysfs_nvram_attr, },
742 	{ "optrom", &sysfs_optrom_attr, },
743 	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
744 	{ "vpd", &sysfs_vpd_attr, 1 },
745 	{ "sfp", &sysfs_sfp_attr, 1 },
746 	{ "reset", &sysfs_reset_attr, },
747 	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
748 	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
749 	{ NULL },
750 };
751 
752 void
753 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
754 {
755 	struct Scsi_Host *host = vha->host;
756 	struct sysfs_entry *iter;
757 	int ret;
758 
759 	for (iter = bin_file_entries; iter->name; iter++) {
760 		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
761 			continue;
762 		if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
763 			continue;
764 		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
765 			continue;
766 
767 		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
768 		    iter->attr);
769 		if (ret)
770 			ql_log(ql_log_warn, vha, 0x00f3,
771 			    "Unable to create sysfs %s binary attribute (%d).\n",
772 			    iter->name, ret);
773 		else
774 			ql_dbg(ql_dbg_init, vha, 0x00f4,
775 			    "Successfully created sysfs %s binary attribure.\n",
776 			    iter->name);
777 	}
778 }
779 
780 void
781 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
782 {
783 	struct Scsi_Host *host = vha->host;
784 	struct sysfs_entry *iter;
785 	struct qla_hw_data *ha = vha->hw;
786 
787 	for (iter = bin_file_entries; iter->name; iter++) {
788 		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
789 			continue;
790 		if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
791 			continue;
792 		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
793 			continue;
794 
795 		sysfs_remove_bin_file(&host->shost_gendev.kobj,
796 		    iter->attr);
797 	}
798 
799 	if (ha->beacon_blink_led == 1)
800 		ha->isp_ops->beacon_off(vha);
801 }
802 
803 /* Scsi_Host attributes. */
804 
805 static ssize_t
806 qla2x00_drvr_version_show(struct device *dev,
807 			  struct device_attribute *attr, char *buf)
808 {
809 	return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
810 }
811 
812 static ssize_t
813 qla2x00_fw_version_show(struct device *dev,
814 			struct device_attribute *attr, char *buf)
815 {
816 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
817 	struct qla_hw_data *ha = vha->hw;
818 	char fw_str[128];
819 
820 	return snprintf(buf, PAGE_SIZE, "%s\n",
821 	    ha->isp_ops->fw_version_str(vha, fw_str));
822 }
823 
824 static ssize_t
825 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
826 			char *buf)
827 {
828 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
829 	struct qla_hw_data *ha = vha->hw;
830 	uint32_t sn;
831 
832 	if (IS_FWI2_CAPABLE(ha)) {
833 		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
834 		return snprintf(buf, PAGE_SIZE, "%s\n", buf);
835 	}
836 
837 	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
838 	return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
839 	    sn % 100000);
840 }
841 
842 static ssize_t
843 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
844 		      char *buf)
845 {
846 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
847 	return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
848 }
849 
850 static ssize_t
851 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
852 		    char *buf)
853 {
854 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
855 	struct qla_hw_data *ha = vha->hw;
856 	return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
857 	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
858 	    ha->product_id[3]);
859 }
860 
861 static ssize_t
862 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
863 			char *buf)
864 {
865 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
866 	return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
867 }
868 
869 static ssize_t
870 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
871 			char *buf)
872 {
873 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
874 	return snprintf(buf, PAGE_SIZE, "%s\n",
875 	    vha->hw->model_desc ? vha->hw->model_desc : "");
876 }
877 
878 static ssize_t
879 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
880 		      char *buf)
881 {
882 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
883 	char pci_info[30];
884 
885 	return snprintf(buf, PAGE_SIZE, "%s\n",
886 	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
887 }
888 
889 static ssize_t
890 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
891 			char *buf)
892 {
893 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
894 	struct qla_hw_data *ha = vha->hw;
895 	int len = 0;
896 
897 	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
898 	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
899 	    vha->device_flags & DFLG_NO_CABLE)
900 		len = snprintf(buf, PAGE_SIZE, "Link Down\n");
901 	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
902 	    qla2x00_reset_active(vha))
903 		len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
904 	else {
905 		len = snprintf(buf, PAGE_SIZE, "Link Up - ");
906 
907 		switch (ha->current_topology) {
908 		case ISP_CFG_NL:
909 			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
910 			break;
911 		case ISP_CFG_FL:
912 			len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
913 			break;
914 		case ISP_CFG_N:
915 			len += snprintf(buf + len, PAGE_SIZE-len,
916 			    "N_Port to N_Port\n");
917 			break;
918 		case ISP_CFG_F:
919 			len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
920 			break;
921 		default:
922 			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
923 			break;
924 		}
925 	}
926 	return len;
927 }
928 
929 static ssize_t
930 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
931 		 char *buf)
932 {
933 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
934 	int len = 0;
935 
936 	switch (vha->hw->zio_mode) {
937 	case QLA_ZIO_MODE_6:
938 		len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
939 		break;
940 	case QLA_ZIO_DISABLED:
941 		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
942 		break;
943 	}
944 	return len;
945 }
946 
947 static ssize_t
948 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
949 		  const char *buf, size_t count)
950 {
951 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
952 	struct qla_hw_data *ha = vha->hw;
953 	int val = 0;
954 	uint16_t zio_mode;
955 
956 	if (!IS_ZIO_SUPPORTED(ha))
957 		return -ENOTSUPP;
958 
959 	if (sscanf(buf, "%d", &val) != 1)
960 		return -EINVAL;
961 
962 	if (val)
963 		zio_mode = QLA_ZIO_MODE_6;
964 	else
965 		zio_mode = QLA_ZIO_DISABLED;
966 
967 	/* Update per-hba values and queue a reset. */
968 	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
969 		ha->zio_mode = zio_mode;
970 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
971 	}
972 	return strlen(buf);
973 }
974 
975 static ssize_t
976 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
977 		       char *buf)
978 {
979 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
980 
981 	return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
982 }
983 
984 static ssize_t
985 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
986 			const char *buf, size_t count)
987 {
988 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
989 	int val = 0;
990 	uint16_t zio_timer;
991 
992 	if (sscanf(buf, "%d", &val) != 1)
993 		return -EINVAL;
994 	if (val > 25500 || val < 100)
995 		return -ERANGE;
996 
997 	zio_timer = (uint16_t)(val / 100);
998 	vha->hw->zio_timer = zio_timer;
999 
1000 	return strlen(buf);
1001 }
1002 
1003 static ssize_t
1004 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1005 		    char *buf)
1006 {
1007 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1008 	int len = 0;
1009 
1010 	if (vha->hw->beacon_blink_led)
1011 		len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1012 	else
1013 		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1014 	return len;
1015 }
1016 
1017 static ssize_t
1018 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1019 		     const char *buf, size_t count)
1020 {
1021 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1022 	struct qla_hw_data *ha = vha->hw;
1023 	int val = 0;
1024 	int rval;
1025 
1026 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1027 		return -EPERM;
1028 
1029 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1030 		ql_log(ql_log_warn, vha, 0x707a,
1031 		    "Abort ISP active -- ignoring beacon request.\n");
1032 		return -EBUSY;
1033 	}
1034 
1035 	if (sscanf(buf, "%d", &val) != 1)
1036 		return -EINVAL;
1037 
1038 	if (val)
1039 		rval = ha->isp_ops->beacon_on(vha);
1040 	else
1041 		rval = ha->isp_ops->beacon_off(vha);
1042 
1043 	if (rval != QLA_SUCCESS)
1044 		count = 0;
1045 
1046 	return count;
1047 }
1048 
1049 static ssize_t
1050 qla2x00_optrom_bios_version_show(struct device *dev,
1051 				 struct device_attribute *attr, char *buf)
1052 {
1053 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1054 	struct qla_hw_data *ha = vha->hw;
1055 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1056 	    ha->bios_revision[0]);
1057 }
1058 
1059 static ssize_t
1060 qla2x00_optrom_efi_version_show(struct device *dev,
1061 				struct device_attribute *attr, char *buf)
1062 {
1063 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1064 	struct qla_hw_data *ha = vha->hw;
1065 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1066 	    ha->efi_revision[0]);
1067 }
1068 
1069 static ssize_t
1070 qla2x00_optrom_fcode_version_show(struct device *dev,
1071 				  struct device_attribute *attr, char *buf)
1072 {
1073 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1074 	struct qla_hw_data *ha = vha->hw;
1075 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1076 	    ha->fcode_revision[0]);
1077 }
1078 
1079 static ssize_t
1080 qla2x00_optrom_fw_version_show(struct device *dev,
1081 			       struct device_attribute *attr, char *buf)
1082 {
1083 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1084 	struct qla_hw_data *ha = vha->hw;
1085 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1086 	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1087 	    ha->fw_revision[3]);
1088 }
1089 
1090 static ssize_t
1091 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1092     struct device_attribute *attr, char *buf)
1093 {
1094 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1095 	struct qla_hw_data *ha = vha->hw;
1096 
1097 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1098 		return snprintf(buf, PAGE_SIZE, "\n");
1099 
1100 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1101 	    ha->gold_fw_version[0], ha->gold_fw_version[1],
1102 	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
1103 }
1104 
1105 static ssize_t
1106 qla2x00_total_isp_aborts_show(struct device *dev,
1107 			      struct device_attribute *attr, char *buf)
1108 {
1109 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1110 	return snprintf(buf, PAGE_SIZE, "%d\n",
1111 	    vha->qla_stats.total_isp_aborts);
1112 }
1113 
1114 static ssize_t
1115 qla24xx_84xx_fw_version_show(struct device *dev,
1116 	struct device_attribute *attr, char *buf)
1117 {
1118 	int rval = QLA_SUCCESS;
1119 	uint16_t status[2] = {0, 0};
1120 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1121 	struct qla_hw_data *ha = vha->hw;
1122 
1123 	if (!IS_QLA84XX(ha))
1124 		return snprintf(buf, PAGE_SIZE, "\n");
1125 
1126 	if (ha->cs84xx->op_fw_version == 0)
1127 		rval = qla84xx_verify_chip(vha, status);
1128 
1129 	if ((rval == QLA_SUCCESS) && (status[0] == 0))
1130 		return snprintf(buf, PAGE_SIZE, "%u\n",
1131 			(uint32_t)ha->cs84xx->op_fw_version);
1132 
1133 	return snprintf(buf, PAGE_SIZE, "\n");
1134 }
1135 
1136 static ssize_t
1137 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1138     char *buf)
1139 {
1140 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1141 	struct qla_hw_data *ha = vha->hw;
1142 
1143 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1144 		return snprintf(buf, PAGE_SIZE, "\n");
1145 
1146 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1147 	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1148 	    ha->mpi_capabilities);
1149 }
1150 
1151 static ssize_t
1152 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1153     char *buf)
1154 {
1155 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1156 	struct qla_hw_data *ha = vha->hw;
1157 
1158 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1159 		return snprintf(buf, PAGE_SIZE, "\n");
1160 
1161 	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1162 	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1163 }
1164 
1165 static ssize_t
1166 qla2x00_flash_block_size_show(struct device *dev,
1167 			      struct device_attribute *attr, char *buf)
1168 {
1169 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1170 	struct qla_hw_data *ha = vha->hw;
1171 
1172 	return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1173 }
1174 
1175 static ssize_t
1176 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1177     char *buf)
1178 {
1179 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1180 
1181 	if (!IS_CNA_CAPABLE(vha->hw))
1182 		return snprintf(buf, PAGE_SIZE, "\n");
1183 
1184 	return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1185 }
1186 
1187 static ssize_t
1188 qla2x00_vn_port_mac_address_show(struct device *dev,
1189     struct device_attribute *attr, char *buf)
1190 {
1191 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1192 
1193 	if (!IS_CNA_CAPABLE(vha->hw))
1194 		return snprintf(buf, PAGE_SIZE, "\n");
1195 
1196 	return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1197 	    vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1198 	    vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1199 	    vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1200 }
1201 
1202 static ssize_t
1203 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1204     char *buf)
1205 {
1206 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1207 
1208 	return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1209 }
1210 
1211 static ssize_t
1212 qla2x00_thermal_temp_show(struct device *dev,
1213 	struct device_attribute *attr, char *buf)
1214 {
1215 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1216 	int rval = QLA_FUNCTION_FAILED;
1217 	uint16_t temp, frac;
1218 
1219 	if (!vha->hw->flags.thermal_supported)
1220 		return snprintf(buf, PAGE_SIZE, "\n");
1221 
1222 	temp = frac = 0;
1223 	if (qla2x00_reset_active(vha))
1224 		ql_log(ql_log_warn, vha, 0x707b,
1225 		    "ISP reset active.\n");
1226 	else if (!vha->hw->flags.eeh_busy)
1227 		rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1228 	if (rval != QLA_SUCCESS)
1229 		return snprintf(buf, PAGE_SIZE, "\n");
1230 
1231 	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
1232 }
1233 
1234 static ssize_t
1235 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1236     char *buf)
1237 {
1238 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1239 	int rval = QLA_FUNCTION_FAILED;
1240 	uint16_t state[5];
1241 
1242 	if (qla2x00_reset_active(vha))
1243 		ql_log(ql_log_warn, vha, 0x707c,
1244 		    "ISP reset active.\n");
1245 	else if (!vha->hw->flags.eeh_busy)
1246 		rval = qla2x00_get_firmware_state(vha, state);
1247 	if (rval != QLA_SUCCESS)
1248 		memset(state, -1, sizeof(state));
1249 
1250 	return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1251 	    state[1], state[2], state[3], state[4]);
1252 }
1253 
1254 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1255 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1256 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1257 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1258 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1259 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1260 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1261 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1262 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1263 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1264 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1265 		   qla2x00_zio_timer_store);
1266 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1267 		   qla2x00_beacon_store);
1268 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1269 		   qla2x00_optrom_bios_version_show, NULL);
1270 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1271 		   qla2x00_optrom_efi_version_show, NULL);
1272 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1273 		   qla2x00_optrom_fcode_version_show, NULL);
1274 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1275 		   NULL);
1276 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
1277     qla2x00_optrom_gold_fw_version_show, NULL);
1278 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1279 		   NULL);
1280 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1281 		   NULL);
1282 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1283 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1284 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1285 		   NULL);
1286 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1287 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1288 		   qla2x00_vn_port_mac_address_show, NULL);
1289 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1290 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1291 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1292 
1293 struct device_attribute *qla2x00_host_attrs[] = {
1294 	&dev_attr_driver_version,
1295 	&dev_attr_fw_version,
1296 	&dev_attr_serial_num,
1297 	&dev_attr_isp_name,
1298 	&dev_attr_isp_id,
1299 	&dev_attr_model_name,
1300 	&dev_attr_model_desc,
1301 	&dev_attr_pci_info,
1302 	&dev_attr_link_state,
1303 	&dev_attr_zio,
1304 	&dev_attr_zio_timer,
1305 	&dev_attr_beacon,
1306 	&dev_attr_optrom_bios_version,
1307 	&dev_attr_optrom_efi_version,
1308 	&dev_attr_optrom_fcode_version,
1309 	&dev_attr_optrom_fw_version,
1310 	&dev_attr_84xx_fw_version,
1311 	&dev_attr_total_isp_aborts,
1312 	&dev_attr_mpi_version,
1313 	&dev_attr_phy_version,
1314 	&dev_attr_flash_block_size,
1315 	&dev_attr_vlan_id,
1316 	&dev_attr_vn_port_mac_address,
1317 	&dev_attr_fabric_param,
1318 	&dev_attr_fw_state,
1319 	&dev_attr_optrom_gold_fw_version,
1320 	&dev_attr_thermal_temp,
1321 	NULL,
1322 };
1323 
1324 /* Host attributes. */
1325 
1326 static void
1327 qla2x00_get_host_port_id(struct Scsi_Host *shost)
1328 {
1329 	scsi_qla_host_t *vha = shost_priv(shost);
1330 
1331 	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1332 	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1333 }
1334 
1335 static void
1336 qla2x00_get_host_speed(struct Scsi_Host *shost)
1337 {
1338 	struct qla_hw_data *ha = ((struct scsi_qla_host *)
1339 					(shost_priv(shost)))->hw;
1340 	u32 speed = FC_PORTSPEED_UNKNOWN;
1341 
1342 	switch (ha->link_data_rate) {
1343 	case PORT_SPEED_1GB:
1344 		speed = FC_PORTSPEED_1GBIT;
1345 		break;
1346 	case PORT_SPEED_2GB:
1347 		speed = FC_PORTSPEED_2GBIT;
1348 		break;
1349 	case PORT_SPEED_4GB:
1350 		speed = FC_PORTSPEED_4GBIT;
1351 		break;
1352 	case PORT_SPEED_8GB:
1353 		speed = FC_PORTSPEED_8GBIT;
1354 		break;
1355 	case PORT_SPEED_10GB:
1356 		speed = FC_PORTSPEED_10GBIT;
1357 		break;
1358 	case PORT_SPEED_16GB:
1359 		speed = FC_PORTSPEED_16GBIT;
1360 		break;
1361 	}
1362 	fc_host_speed(shost) = speed;
1363 }
1364 
1365 static void
1366 qla2x00_get_host_port_type(struct Scsi_Host *shost)
1367 {
1368 	scsi_qla_host_t *vha = shost_priv(shost);
1369 	uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1370 
1371 	if (vha->vp_idx) {
1372 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1373 		return;
1374 	}
1375 	switch (vha->hw->current_topology) {
1376 	case ISP_CFG_NL:
1377 		port_type = FC_PORTTYPE_LPORT;
1378 		break;
1379 	case ISP_CFG_FL:
1380 		port_type = FC_PORTTYPE_NLPORT;
1381 		break;
1382 	case ISP_CFG_N:
1383 		port_type = FC_PORTTYPE_PTP;
1384 		break;
1385 	case ISP_CFG_F:
1386 		port_type = FC_PORTTYPE_NPORT;
1387 		break;
1388 	}
1389 	fc_host_port_type(shost) = port_type;
1390 }
1391 
1392 static void
1393 qla2x00_get_starget_node_name(struct scsi_target *starget)
1394 {
1395 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1396 	scsi_qla_host_t *vha = shost_priv(host);
1397 	fc_port_t *fcport;
1398 	u64 node_name = 0;
1399 
1400 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1401 		if (fcport->rport &&
1402 		    starget->id == fcport->rport->scsi_target_id) {
1403 			node_name = wwn_to_u64(fcport->node_name);
1404 			break;
1405 		}
1406 	}
1407 
1408 	fc_starget_node_name(starget) = node_name;
1409 }
1410 
1411 static void
1412 qla2x00_get_starget_port_name(struct scsi_target *starget)
1413 {
1414 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1415 	scsi_qla_host_t *vha = shost_priv(host);
1416 	fc_port_t *fcport;
1417 	u64 port_name = 0;
1418 
1419 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1420 		if (fcport->rport &&
1421 		    starget->id == fcport->rport->scsi_target_id) {
1422 			port_name = wwn_to_u64(fcport->port_name);
1423 			break;
1424 		}
1425 	}
1426 
1427 	fc_starget_port_name(starget) = port_name;
1428 }
1429 
1430 static void
1431 qla2x00_get_starget_port_id(struct scsi_target *starget)
1432 {
1433 	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1434 	scsi_qla_host_t *vha = shost_priv(host);
1435 	fc_port_t *fcport;
1436 	uint32_t port_id = ~0U;
1437 
1438 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1439 		if (fcport->rport &&
1440 		    starget->id == fcport->rport->scsi_target_id) {
1441 			port_id = fcport->d_id.b.domain << 16 |
1442 			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1443 			break;
1444 		}
1445 	}
1446 
1447 	fc_starget_port_id(starget) = port_id;
1448 }
1449 
1450 static void
1451 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1452 {
1453 	if (timeout)
1454 		rport->dev_loss_tmo = timeout;
1455 	else
1456 		rport->dev_loss_tmo = 1;
1457 }
1458 
1459 static void
1460 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1461 {
1462 	struct Scsi_Host *host = rport_to_shost(rport);
1463 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1464 	unsigned long flags;
1465 
1466 	if (!fcport)
1467 		return;
1468 
1469 	/* Now that the rport has been deleted, set the fcport state to
1470 	   FCS_DEVICE_DEAD */
1471 	qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
1472 
1473 	/*
1474 	 * Transport has effectively 'deleted' the rport, clear
1475 	 * all local references.
1476 	 */
1477 	spin_lock_irqsave(host->host_lock, flags);
1478 	fcport->rport = fcport->drport = NULL;
1479 	*((fc_port_t **)rport->dd_data) = NULL;
1480 	spin_unlock_irqrestore(host->host_lock, flags);
1481 
1482 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1483 		return;
1484 
1485 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1486 		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1487 		return;
1488 	}
1489 }
1490 
1491 static void
1492 qla2x00_terminate_rport_io(struct fc_rport *rport)
1493 {
1494 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1495 
1496 	if (!fcport)
1497 		return;
1498 
1499 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1500 		return;
1501 
1502 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1503 		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1504 		return;
1505 	}
1506 	/*
1507 	 * At this point all fcport's software-states are cleared.  Perform any
1508 	 * final cleanup of firmware resources (PCBs and XCBs).
1509 	 */
1510 	if (fcport->loop_id != FC_NO_LOOP_ID &&
1511 	    !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
1512 		if (IS_FWI2_CAPABLE(fcport->vha->hw))
1513 			fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1514 			    fcport->loop_id, fcport->d_id.b.domain,
1515 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
1516 		else
1517 			qla2x00_port_logout(fcport->vha, fcport);
1518 	}
1519 }
1520 
1521 static int
1522 qla2x00_issue_lip(struct Scsi_Host *shost)
1523 {
1524 	scsi_qla_host_t *vha = shost_priv(shost);
1525 
1526 	qla2x00_loop_reset(vha);
1527 	return 0;
1528 }
1529 
1530 static struct fc_host_statistics *
1531 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1532 {
1533 	scsi_qla_host_t *vha = shost_priv(shost);
1534 	struct qla_hw_data *ha = vha->hw;
1535 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1536 	int rval;
1537 	struct link_statistics *stats;
1538 	dma_addr_t stats_dma;
1539 	struct fc_host_statistics *pfc_host_stat;
1540 
1541 	pfc_host_stat = &vha->fc_host_stat;
1542 	memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1543 
1544 	if (test_bit(UNLOADING, &vha->dpc_flags))
1545 		goto done;
1546 
1547 	if (unlikely(pci_channel_offline(ha->pdev)))
1548 		goto done;
1549 
1550 	stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1551 	if (stats == NULL) {
1552 		ql_log(ql_log_warn, vha, 0x707d,
1553 		    "Failed to allocate memory for stats.\n");
1554 		goto done;
1555 	}
1556 	memset(stats, 0, DMA_POOL_SIZE);
1557 
1558 	rval = QLA_FUNCTION_FAILED;
1559 	if (IS_FWI2_CAPABLE(ha)) {
1560 		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1561 	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1562 	    !qla2x00_reset_active(vha) && !ha->dpc_active) {
1563 		/* Must be in a 'READY' state for statistics retrieval. */
1564 		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1565 						stats, stats_dma);
1566 	}
1567 
1568 	if (rval != QLA_SUCCESS)
1569 		goto done_free;
1570 
1571 	pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1572 	pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1573 	pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1574 	pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1575 	pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1576 	pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1577 	if (IS_FWI2_CAPABLE(ha)) {
1578 		pfc_host_stat->lip_count = stats->lip_cnt;
1579 		pfc_host_stat->tx_frames = stats->tx_frames;
1580 		pfc_host_stat->rx_frames = stats->rx_frames;
1581 		pfc_host_stat->dumped_frames = stats->dumped_frames;
1582 		pfc_host_stat->nos_count = stats->nos_rcvd;
1583 	}
1584 	pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
1585 	pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
1586 
1587 done_free:
1588         dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1589 done:
1590 	return pfc_host_stat;
1591 }
1592 
1593 static void
1594 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1595 {
1596 	scsi_qla_host_t *vha = shost_priv(shost);
1597 
1598 	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1599 }
1600 
1601 static void
1602 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1603 {
1604 	scsi_qla_host_t *vha = shost_priv(shost);
1605 
1606 	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1607 }
1608 
1609 static void
1610 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1611 {
1612 	scsi_qla_host_t *vha = shost_priv(shost);
1613 	uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1614 		0xFF, 0xFF, 0xFF, 0xFF};
1615 	u64 fabric_name = wwn_to_u64(node_name);
1616 
1617 	if (vha->device_flags & SWITCH_FOUND)
1618 		fabric_name = wwn_to_u64(vha->fabric_node_name);
1619 
1620 	fc_host_fabric_name(shost) = fabric_name;
1621 }
1622 
1623 static void
1624 qla2x00_get_host_port_state(struct Scsi_Host *shost)
1625 {
1626 	scsi_qla_host_t *vha = shost_priv(shost);
1627 	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1628 
1629 	if (!base_vha->flags.online) {
1630 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1631 		return;
1632 	}
1633 
1634 	switch (atomic_read(&base_vha->loop_state)) {
1635 	case LOOP_UPDATE:
1636 		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1637 		break;
1638 	case LOOP_DOWN:
1639 		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
1640 			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1641 		else
1642 			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1643 		break;
1644 	case LOOP_DEAD:
1645 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1646 		break;
1647 	case LOOP_READY:
1648 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1649 		break;
1650 	default:
1651 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1652 		break;
1653 	}
1654 }
1655 
1656 static int
1657 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1658 {
1659 	int	ret = 0;
1660 	uint8_t	qos = 0;
1661 	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1662 	scsi_qla_host_t *vha = NULL;
1663 	struct qla_hw_data *ha = base_vha->hw;
1664 	uint16_t options = 0;
1665 	int	cnt;
1666 	struct req_que *req = ha->req_q_map[0];
1667 
1668 	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1669 	if (ret) {
1670 		ql_log(ql_log_warn, vha, 0x707e,
1671 		    "Vport sanity check failed, status %x\n", ret);
1672 		return (ret);
1673 	}
1674 
1675 	vha = qla24xx_create_vhost(fc_vport);
1676 	if (vha == NULL) {
1677 		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1678 		return FC_VPORT_FAILED;
1679 	}
1680 	if (disable) {
1681 		atomic_set(&vha->vp_state, VP_OFFLINE);
1682 		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1683 	} else
1684 		atomic_set(&vha->vp_state, VP_FAILED);
1685 
1686 	/* ready to create vport */
1687 	ql_log(ql_log_info, vha, 0x7080,
1688 	    "VP entry id %d assigned.\n", vha->vp_idx);
1689 
1690 	/* initialized vport states */
1691 	atomic_set(&vha->loop_state, LOOP_DOWN);
1692 	vha->vp_err_state=  VP_ERR_PORTDWN;
1693 	vha->vp_prev_err_state=  VP_ERR_UNKWN;
1694 	/* Check if physical ha port is Up */
1695 	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1696 	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1697 		/* Don't retry or attempt login of this virtual port */
1698 		ql_dbg(ql_dbg_user, vha, 0x7081,
1699 		    "Vport loop state is not UP.\n");
1700 		atomic_set(&vha->loop_state, LOOP_DEAD);
1701 		if (!disable)
1702 			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1703 	}
1704 
1705 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
1706 		if (ha->fw_attributes & BIT_4) {
1707 			int prot = 0;
1708 			vha->flags.difdix_supported = 1;
1709 			ql_dbg(ql_dbg_user, vha, 0x7082,
1710 			    "Registered for DIF/DIX type 1 and 3 protection.\n");
1711 			if (ql2xenabledif == 1)
1712 				prot = SHOST_DIX_TYPE0_PROTECTION;
1713 			scsi_host_set_prot(vha->host,
1714 			    prot | SHOST_DIF_TYPE1_PROTECTION
1715 			    | SHOST_DIF_TYPE2_PROTECTION
1716 			    | SHOST_DIF_TYPE3_PROTECTION
1717 			    | SHOST_DIX_TYPE1_PROTECTION
1718 			    | SHOST_DIX_TYPE2_PROTECTION
1719 			    | SHOST_DIX_TYPE3_PROTECTION);
1720 			scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
1721 		} else
1722 			vha->flags.difdix_supported = 0;
1723 	}
1724 
1725 	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1726 				   &ha->pdev->dev)) {
1727 		ql_dbg(ql_dbg_user, vha, 0x7083,
1728 		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1729 		goto vport_create_failed_2;
1730 	}
1731 
1732 	/* initialize attributes */
1733 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1734 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1735 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1736 	fc_host_supported_classes(vha->host) =
1737 		fc_host_supported_classes(base_vha->host);
1738 	fc_host_supported_speeds(vha->host) =
1739 		fc_host_supported_speeds(base_vha->host);
1740 
1741 	qlt_vport_create(vha, ha);
1742 	qla24xx_vport_disable(fc_vport, disable);
1743 
1744 	if (ha->flags.cpu_affinity_enabled) {
1745 		req = ha->req_q_map[1];
1746 		ql_dbg(ql_dbg_multiq, vha, 0xc000,
1747 		    "Request queue %p attached with "
1748 		    "VP[%d], cpu affinity =%d\n",
1749 		    req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1750 		goto vport_queue;
1751 	} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1752 		goto vport_queue;
1753 	/* Create a request queue in QoS mode for the vport */
1754 	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1755 		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1756 			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1757 					8) == 0) {
1758 			qos = ha->npiv_info[cnt].q_qos;
1759 			break;
1760 		}
1761 	}
1762 
1763 	if (qos) {
1764 		ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1765 			qos);
1766 		if (!ret)
1767 			ql_log(ql_log_warn, vha, 0x7084,
1768 			    "Can't create request queue for VP[%d]\n",
1769 			    vha->vp_idx);
1770 		else {
1771 			ql_dbg(ql_dbg_multiq, vha, 0xc001,
1772 			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
1773 			    ret, qos, vha->vp_idx);
1774 			ql_dbg(ql_dbg_user, vha, 0x7085,
1775 			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
1776 			    ret, qos, vha->vp_idx);
1777 			req = ha->req_q_map[ret];
1778 		}
1779 	}
1780 
1781 vport_queue:
1782 	vha->req = req;
1783 	return 0;
1784 
1785 vport_create_failed_2:
1786 	qla24xx_disable_vp(vha);
1787 	qla24xx_deallocate_vp_id(vha);
1788 	scsi_host_put(vha->host);
1789 	return FC_VPORT_FAILED;
1790 }
1791 
1792 static int
1793 qla24xx_vport_delete(struct fc_vport *fc_vport)
1794 {
1795 	scsi_qla_host_t *vha = fc_vport->dd_data;
1796 	struct qla_hw_data *ha = vha->hw;
1797 	uint16_t id = vha->vp_idx;
1798 
1799 	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1800 	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1801 		msleep(1000);
1802 
1803 	qla24xx_disable_vp(vha);
1804 
1805 	vha->flags.delete_progress = 1;
1806 
1807 	fc_remove_host(vha->host);
1808 
1809 	scsi_remove_host(vha->host);
1810 
1811 	/* Allow timer to run to drain queued items, when removing vp */
1812 	qla24xx_deallocate_vp_id(vha);
1813 
1814 	if (vha->timer_active) {
1815 		qla2x00_vp_stop_timer(vha);
1816 		ql_dbg(ql_dbg_user, vha, 0x7086,
1817 		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1818 	}
1819 
1820 	/* No pending activities shall be there on the vha now */
1821 	if (ql2xextended_error_logging & ql_dbg_user)
1822 		msleep(random32()%10);  /* Just to see if something falls on
1823 					* the net we have placed below */
1824 
1825 	BUG_ON(atomic_read(&vha->vref_count));
1826 
1827 	qla2x00_free_fcports(vha);
1828 
1829 	mutex_lock(&ha->vport_lock);
1830 	ha->cur_vport_count--;
1831 	clear_bit(vha->vp_idx, ha->vp_idx_map);
1832 	mutex_unlock(&ha->vport_lock);
1833 
1834 	if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1835 		if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1836 			ql_log(ql_log_warn, vha, 0x7087,
1837 			    "Queue delete failed.\n");
1838 	}
1839 
1840 	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1841 	scsi_host_put(vha->host);
1842 	return 0;
1843 }
1844 
1845 static int
1846 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1847 {
1848 	scsi_qla_host_t *vha = fc_vport->dd_data;
1849 
1850 	if (disable)
1851 		qla24xx_disable_vp(vha);
1852 	else
1853 		qla24xx_enable_vp(vha);
1854 
1855 	return 0;
1856 }
1857 
1858 struct fc_function_template qla2xxx_transport_functions = {
1859 
1860 	.show_host_node_name = 1,
1861 	.show_host_port_name = 1,
1862 	.show_host_supported_classes = 1,
1863 	.show_host_supported_speeds = 1,
1864 
1865 	.get_host_port_id = qla2x00_get_host_port_id,
1866 	.show_host_port_id = 1,
1867 	.get_host_speed = qla2x00_get_host_speed,
1868 	.show_host_speed = 1,
1869 	.get_host_port_type = qla2x00_get_host_port_type,
1870 	.show_host_port_type = 1,
1871 	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
1872 	.show_host_symbolic_name = 1,
1873 	.set_host_system_hostname = qla2x00_set_host_system_hostname,
1874 	.show_host_system_hostname = 1,
1875 	.get_host_fabric_name = qla2x00_get_host_fabric_name,
1876 	.show_host_fabric_name = 1,
1877 	.get_host_port_state = qla2x00_get_host_port_state,
1878 	.show_host_port_state = 1,
1879 
1880 	.dd_fcrport_size = sizeof(struct fc_port *),
1881 	.show_rport_supported_classes = 1,
1882 
1883 	.get_starget_node_name = qla2x00_get_starget_node_name,
1884 	.show_starget_node_name = 1,
1885 	.get_starget_port_name = qla2x00_get_starget_port_name,
1886 	.show_starget_port_name = 1,
1887 	.get_starget_port_id  = qla2x00_get_starget_port_id,
1888 	.show_starget_port_id = 1,
1889 
1890 	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1891 	.show_rport_dev_loss_tmo = 1,
1892 
1893 	.issue_fc_host_lip = qla2x00_issue_lip,
1894 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1895 	.terminate_rport_io = qla2x00_terminate_rport_io,
1896 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
1897 
1898 	.vport_create = qla24xx_vport_create,
1899 	.vport_disable = qla24xx_vport_disable,
1900 	.vport_delete = qla24xx_vport_delete,
1901 	.bsg_request = qla24xx_bsg_request,
1902 	.bsg_timeout = qla24xx_bsg_timeout,
1903 };
1904 
1905 struct fc_function_template qla2xxx_transport_vport_functions = {
1906 
1907 	.show_host_node_name = 1,
1908 	.show_host_port_name = 1,
1909 	.show_host_supported_classes = 1,
1910 
1911 	.get_host_port_id = qla2x00_get_host_port_id,
1912 	.show_host_port_id = 1,
1913 	.get_host_speed = qla2x00_get_host_speed,
1914 	.show_host_speed = 1,
1915 	.get_host_port_type = qla2x00_get_host_port_type,
1916 	.show_host_port_type = 1,
1917 	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
1918 	.show_host_symbolic_name = 1,
1919 	.set_host_system_hostname = qla2x00_set_host_system_hostname,
1920 	.show_host_system_hostname = 1,
1921 	.get_host_fabric_name = qla2x00_get_host_fabric_name,
1922 	.show_host_fabric_name = 1,
1923 	.get_host_port_state = qla2x00_get_host_port_state,
1924 	.show_host_port_state = 1,
1925 
1926 	.dd_fcrport_size = sizeof(struct fc_port *),
1927 	.show_rport_supported_classes = 1,
1928 
1929 	.get_starget_node_name = qla2x00_get_starget_node_name,
1930 	.show_starget_node_name = 1,
1931 	.get_starget_port_name = qla2x00_get_starget_port_name,
1932 	.show_starget_port_name = 1,
1933 	.get_starget_port_id  = qla2x00_get_starget_port_id,
1934 	.show_starget_port_id = 1,
1935 
1936 	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1937 	.show_rport_dev_loss_tmo = 1,
1938 
1939 	.issue_fc_host_lip = qla2x00_issue_lip,
1940 	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1941 	.terminate_rport_io = qla2x00_terminate_rport_io,
1942 	.get_fc_host_stats = qla2x00_get_fc_host_stats,
1943 	.bsg_request = qla24xx_bsg_request,
1944 	.bsg_timeout = qla24xx_bsg_timeout,
1945 };
1946 
1947 void
1948 qla2x00_init_host_attr(scsi_qla_host_t *vha)
1949 {
1950 	struct qla_hw_data *ha = vha->hw;
1951 	u32 speed = FC_PORTSPEED_UNKNOWN;
1952 
1953 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1954 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1955 	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1956 	fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
1957 			(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
1958 	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1959 	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1960 
1961 	if (IS_CNA_CAPABLE(ha))
1962 		speed = FC_PORTSPEED_10GBIT;
1963 	else if (IS_QLA2031(ha))
1964 		speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
1965 		    FC_PORTSPEED_4GBIT;
1966 	else if (IS_QLA25XX(ha))
1967 		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
1968 		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1969 	else if (IS_QLA24XX_TYPE(ha))
1970 		speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
1971 		    FC_PORTSPEED_1GBIT;
1972 	else if (IS_QLA23XX(ha))
1973 		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1974 	else
1975 		speed = FC_PORTSPEED_1GBIT;
1976 	fc_host_supported_speeds(vha->host) = speed;
1977 }
1978