[go: nahoru, domu]

1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c)  2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include "qla_target.h"
9
10#include <linux/kthread.h>
11#include <linux/vmalloc.h>
12#include <linux/slab.h>
13#include <linux/delay.h>
14
15static int qla24xx_vport_disable(struct fc_vport *, bool);
16
17/* SYSFS attributes --------------------------------------------------------- */
18
19static ssize_t
20qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21			   struct bin_attribute *bin_attr,
22			   char *buf, loff_t off, size_t count)
23{
24	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25	    struct device, kobj)));
26	struct qla_hw_data *ha = vha->hw;
27	int rval = 0;
28
29	if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30		return 0;
31
32	if (IS_P3P_TYPE(ha)) {
33		if (off < ha->md_template_size) {
34			rval = memory_read_from_buffer(buf, count,
35			    &off, ha->md_tmplt_hdr, ha->md_template_size);
36			return rval;
37		}
38		off -= ha->md_template_size;
39		rval = memory_read_from_buffer(buf, count,
40		    &off, ha->md_dump, ha->md_dump_size);
41		return rval;
42	} else if (ha->mctp_dumped && ha->mctp_dump_reading)
43		return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44		    MCTP_DUMP_SIZE);
45	else if (ha->fw_dump_reading)
46		return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
47					ha->fw_dump_len);
48	else
49		return 0;
50}
51
52static ssize_t
53qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
54			    struct bin_attribute *bin_attr,
55			    char *buf, loff_t off, size_t count)
56{
57	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
58	    struct device, kobj)));
59	struct qla_hw_data *ha = vha->hw;
60	int reading;
61
62	if (off != 0)
63		return (0);
64
65	reading = simple_strtol(buf, NULL, 10);
66	switch (reading) {
67	case 0:
68		if (!ha->fw_dump_reading)
69			break;
70
71		ql_log(ql_log_info, vha, 0x705d,
72		    "Firmware dump cleared on (%ld).\n", vha->host_no);
73
74		if (IS_P3P_TYPE(ha)) {
75			qla82xx_md_free(vha);
76			qla82xx_md_prep(vha);
77		}
78		ha->fw_dump_reading = 0;
79		ha->fw_dumped = 0;
80		break;
81	case 1:
82		if (ha->fw_dumped && !ha->fw_dump_reading) {
83			ha->fw_dump_reading = 1;
84
85			ql_log(ql_log_info, vha, 0x705e,
86			    "Raw firmware dump ready for read on (%ld).\n",
87			    vha->host_no);
88		}
89		break;
90	case 2:
91		qla2x00_alloc_fw_dump(vha);
92		break;
93	case 3:
94		if (IS_QLA82XX(ha)) {
95			qla82xx_idc_lock(ha);
96			qla82xx_set_reset_owner(vha);
97			qla82xx_idc_unlock(ha);
98		} else if (IS_QLA8044(ha)) {
99			qla8044_idc_lock(ha);
100			qla82xx_set_reset_owner(vha);
101			qla8044_idc_unlock(ha);
102		} else
103			qla2x00_system_error(vha);
104		break;
105	case 4:
106		if (IS_P3P_TYPE(ha)) {
107			if (ha->md_tmplt_hdr)
108				ql_dbg(ql_dbg_user, vha, 0x705b,
109				    "MiniDump supported with this firmware.\n");
110			else
111				ql_dbg(ql_dbg_user, vha, 0x709d,
112				    "MiniDump not supported with this firmware.\n");
113		}
114		break;
115	case 5:
116		if (IS_P3P_TYPE(ha))
117			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
118		break;
119	case 6:
120		if (!ha->mctp_dump_reading)
121			break;
122		ql_log(ql_log_info, vha, 0x70c1,
123		    "MCTP dump cleared on (%ld).\n", vha->host_no);
124		ha->mctp_dump_reading = 0;
125		ha->mctp_dumped = 0;
126		break;
127	case 7:
128		if (ha->mctp_dumped && !ha->mctp_dump_reading) {
129			ha->mctp_dump_reading = 1;
130			ql_log(ql_log_info, vha, 0x70c2,
131			    "Raw mctp dump ready for read on (%ld).\n",
132			    vha->host_no);
133		}
134		break;
135	}
136	return count;
137}
138
139static struct bin_attribute sysfs_fw_dump_attr = {
140	.attr = {
141		.name = "fw_dump",
142		.mode = S_IRUSR | S_IWUSR,
143	},
144	.size = 0,
145	.read = qla2x00_sysfs_read_fw_dump,
146	.write = qla2x00_sysfs_write_fw_dump,
147};
148
149static ssize_t
150qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj,
151			   struct bin_attribute *bin_attr,
152			   char *buf, loff_t off, size_t count)
153{
154	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
155	    struct device, kobj)));
156	struct qla_hw_data *ha = vha->hw;
157
158	if (!ha->fw_dump_template || !ha->fw_dump_template_len)
159		return 0;
160
161	ql_dbg(ql_dbg_user, vha, 0x70e2,
162	    "chunk <- off=%llx count=%zx\n", off, count);
163	return memory_read_from_buffer(buf, count, &off,
164	    ha->fw_dump_template, ha->fw_dump_template_len);
165}
166
167static ssize_t
168qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj,
169			    struct bin_attribute *bin_attr,
170			    char *buf, loff_t off, size_t count)
171{
172	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
173	    struct device, kobj)));
174	struct qla_hw_data *ha = vha->hw;
175	uint32_t size;
176
177	if (off == 0) {
178		if (ha->fw_dump)
179			vfree(ha->fw_dump);
180		if (ha->fw_dump_template)
181			vfree(ha->fw_dump_template);
182
183		ha->fw_dump = NULL;
184		ha->fw_dump_len = 0;
185		ha->fw_dump_template = NULL;
186		ha->fw_dump_template_len = 0;
187
188		size = qla27xx_fwdt_template_size(buf);
189		ql_dbg(ql_dbg_user, vha, 0x70d1,
190		    "-> allocating fwdt (%x bytes)...\n", size);
191		ha->fw_dump_template = vmalloc(size);
192		if (!ha->fw_dump_template) {
193			ql_log(ql_log_warn, vha, 0x70d2,
194			    "Failed allocate fwdt (%x bytes).\n", size);
195			return -ENOMEM;
196		}
197		ha->fw_dump_template_len = size;
198	}
199
200	if (off + count > ha->fw_dump_template_len) {
201		count = ha->fw_dump_template_len - off;
202		ql_dbg(ql_dbg_user, vha, 0x70d3,
203		    "chunk -> truncating to %zx bytes.\n", count);
204	}
205
206	ql_dbg(ql_dbg_user, vha, 0x70d4,
207	    "chunk -> off=%llx count=%zx\n", off, count);
208	memcpy(ha->fw_dump_template + off, buf, count);
209
210	if (off + count == ha->fw_dump_template_len) {
211		size = qla27xx_fwdt_calculate_dump_size(vha);
212		ql_dbg(ql_dbg_user, vha, 0x70d5,
213		    "-> allocating fwdump (%x bytes)...\n", size);
214		ha->fw_dump = vmalloc(size);
215		if (!ha->fw_dump) {
216			ql_log(ql_log_warn, vha, 0x70d6,
217			    "Failed allocate fwdump (%x bytes).\n", size);
218			return -ENOMEM;
219		}
220		ha->fw_dump_len = size;
221	}
222
223	return count;
224}
225static struct bin_attribute sysfs_fw_dump_template_attr = {
226	.attr = {
227		.name = "fw_dump_template",
228		.mode = S_IRUSR | S_IWUSR,
229	},
230	.size = 0,
231	.read = qla2x00_sysfs_read_fw_dump_template,
232	.write = qla2x00_sysfs_write_fw_dump_template,
233};
234
235static ssize_t
236qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
237			 struct bin_attribute *bin_attr,
238			 char *buf, loff_t off, size_t count)
239{
240	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
241	    struct device, kobj)));
242	struct qla_hw_data *ha = vha->hw;
243
244	if (!capable(CAP_SYS_ADMIN))
245		return 0;
246
247	if (IS_NOCACHE_VPD_TYPE(ha))
248		ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
249		    ha->nvram_size);
250	return memory_read_from_buffer(buf, count, &off, ha->nvram,
251					ha->nvram_size);
252}
253
254static ssize_t
255qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
256			  struct bin_attribute *bin_attr,
257			  char *buf, loff_t off, size_t count)
258{
259	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
260	    struct device, kobj)));
261	struct qla_hw_data *ha = vha->hw;
262	uint16_t	cnt;
263
264	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
265	    !ha->isp_ops->write_nvram)
266		return -EINVAL;
267
268	/* Checksum NVRAM. */
269	if (IS_FWI2_CAPABLE(ha)) {
270		uint32_t *iter;
271		uint32_t chksum;
272
273		iter = (uint32_t *)buf;
274		chksum = 0;
275		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
276			chksum += le32_to_cpu(*iter++);
277		chksum = ~chksum + 1;
278		*iter = cpu_to_le32(chksum);
279	} else {
280		uint8_t *iter;
281		uint8_t chksum;
282
283		iter = (uint8_t *)buf;
284		chksum = 0;
285		for (cnt = 0; cnt < count - 1; cnt++)
286			chksum += *iter++;
287		chksum = ~chksum + 1;
288		*iter = chksum;
289	}
290
291	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
292		ql_log(ql_log_warn, vha, 0x705f,
293		    "HBA not online, failing NVRAM update.\n");
294		return -EAGAIN;
295	}
296
297	/* Write NVRAM. */
298	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
299	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
300	    count);
301
302	ql_dbg(ql_dbg_user, vha, 0x7060,
303	    "Setting ISP_ABORT_NEEDED\n");
304	/* NVRAM settings take effect immediately. */
305	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
306	qla2xxx_wake_dpc(vha);
307	qla2x00_wait_for_chip_reset(vha);
308
309	return count;
310}
311
312static struct bin_attribute sysfs_nvram_attr = {
313	.attr = {
314		.name = "nvram",
315		.mode = S_IRUSR | S_IWUSR,
316	},
317	.size = 512,
318	.read = qla2x00_sysfs_read_nvram,
319	.write = qla2x00_sysfs_write_nvram,
320};
321
322static ssize_t
323qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
324			  struct bin_attribute *bin_attr,
325			  char *buf, loff_t off, size_t count)
326{
327	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
328	    struct device, kobj)));
329	struct qla_hw_data *ha = vha->hw;
330	ssize_t rval = 0;
331
332	if (ha->optrom_state != QLA_SREADING)
333		return 0;
334
335	mutex_lock(&ha->optrom_mutex);
336	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
337	    ha->optrom_region_size);
338	mutex_unlock(&ha->optrom_mutex);
339
340	return rval;
341}
342
343static ssize_t
344qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
345			   struct bin_attribute *bin_attr,
346			   char *buf, loff_t off, size_t count)
347{
348	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
349	    struct device, kobj)));
350	struct qla_hw_data *ha = vha->hw;
351
352	if (ha->optrom_state != QLA_SWRITING)
353		return -EINVAL;
354	if (off > ha->optrom_region_size)
355		return -ERANGE;
356	if (off + count > ha->optrom_region_size)
357		count = ha->optrom_region_size - off;
358
359	mutex_lock(&ha->optrom_mutex);
360	memcpy(&ha->optrom_buffer[off], buf, count);
361	mutex_unlock(&ha->optrom_mutex);
362
363	return count;
364}
365
366static struct bin_attribute sysfs_optrom_attr = {
367	.attr = {
368		.name = "optrom",
369		.mode = S_IRUSR | S_IWUSR,
370	},
371	.size = 0,
372	.read = qla2x00_sysfs_read_optrom,
373	.write = qla2x00_sysfs_write_optrom,
374};
375
376static ssize_t
377qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
378			       struct bin_attribute *bin_attr,
379			       char *buf, loff_t off, size_t count)
380{
381	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
382	    struct device, kobj)));
383	struct qla_hw_data *ha = vha->hw;
384	uint32_t start = 0;
385	uint32_t size = ha->optrom_size;
386	int val, valid;
387	ssize_t rval = count;
388
389	if (off)
390		return -EINVAL;
391
392	if (unlikely(pci_channel_offline(ha->pdev)))
393		return -EAGAIN;
394
395	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
396		return -EINVAL;
397	if (start > ha->optrom_size)
398		return -EINVAL;
399
400	mutex_lock(&ha->optrom_mutex);
401	switch (val) {
402	case 0:
403		if (ha->optrom_state != QLA_SREADING &&
404		    ha->optrom_state != QLA_SWRITING) {
405			rval =  -EINVAL;
406			goto out;
407		}
408		ha->optrom_state = QLA_SWAITING;
409
410		ql_dbg(ql_dbg_user, vha, 0x7061,
411		    "Freeing flash region allocation -- 0x%x bytes.\n",
412		    ha->optrom_region_size);
413
414		vfree(ha->optrom_buffer);
415		ha->optrom_buffer = NULL;
416		break;
417	case 1:
418		if (ha->optrom_state != QLA_SWAITING) {
419			rval = -EINVAL;
420			goto out;
421		}
422
423		ha->optrom_region_start = start;
424		ha->optrom_region_size = start + size > ha->optrom_size ?
425		    ha->optrom_size - start : size;
426
427		ha->optrom_state = QLA_SREADING;
428		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
429		if (ha->optrom_buffer == NULL) {
430			ql_log(ql_log_warn, vha, 0x7062,
431			    "Unable to allocate memory for optrom retrieval "
432			    "(%x).\n", ha->optrom_region_size);
433
434			ha->optrom_state = QLA_SWAITING;
435			rval = -ENOMEM;
436			goto out;
437		}
438
439		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
440			ql_log(ql_log_warn, vha, 0x7063,
441			    "HBA not online, failing NVRAM update.\n");
442			rval = -EAGAIN;
443			goto out;
444		}
445
446		ql_dbg(ql_dbg_user, vha, 0x7064,
447		    "Reading flash region -- 0x%x/0x%x.\n",
448		    ha->optrom_region_start, ha->optrom_region_size);
449
450		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
451		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
452		    ha->optrom_region_start, ha->optrom_region_size);
453		break;
454	case 2:
455		if (ha->optrom_state != QLA_SWAITING) {
456			rval = -EINVAL;
457			goto out;
458		}
459
460		/*
461		 * We need to be more restrictive on which FLASH regions are
462		 * allowed to be updated via user-space.  Regions accessible
463		 * via this method include:
464		 *
465		 * ISP21xx/ISP22xx/ISP23xx type boards:
466		 *
467		 * 	0x000000 -> 0x020000 -- Boot code.
468		 *
469		 * ISP2322/ISP24xx type boards:
470		 *
471		 * 	0x000000 -> 0x07ffff -- Boot code.
472		 * 	0x080000 -> 0x0fffff -- Firmware.
473		 *
474		 * ISP25xx type boards:
475		 *
476		 * 	0x000000 -> 0x07ffff -- Boot code.
477		 * 	0x080000 -> 0x0fffff -- Firmware.
478		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
479		 */
480		valid = 0;
481		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
482			valid = 1;
483		else if (start == (ha->flt_region_boot * 4) ||
484		    start == (ha->flt_region_fw * 4))
485			valid = 1;
486		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
487			|| IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
488			|| IS_QLA27XX(ha))
489			valid = 1;
490		if (!valid) {
491			ql_log(ql_log_warn, vha, 0x7065,
492			    "Invalid start region 0x%x/0x%x.\n", start, size);
493			rval = -EINVAL;
494			goto out;
495		}
496
497		ha->optrom_region_start = start;
498		ha->optrom_region_size = start + size > ha->optrom_size ?
499		    ha->optrom_size - start : size;
500
501		ha->optrom_state = QLA_SWRITING;
502		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
503		if (ha->optrom_buffer == NULL) {
504			ql_log(ql_log_warn, vha, 0x7066,
505			    "Unable to allocate memory for optrom update "
506			    "(%x)\n", ha->optrom_region_size);
507
508			ha->optrom_state = QLA_SWAITING;
509			rval = -ENOMEM;
510			goto out;
511		}
512
513		ql_dbg(ql_dbg_user, vha, 0x7067,
514		    "Staging flash region write -- 0x%x/0x%x.\n",
515		    ha->optrom_region_start, ha->optrom_region_size);
516
517		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
518		break;
519	case 3:
520		if (ha->optrom_state != QLA_SWRITING) {
521			rval = -EINVAL;
522			goto out;
523		}
524
525		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
526			ql_log(ql_log_warn, vha, 0x7068,
527			    "HBA not online, failing flash update.\n");
528			rval = -EAGAIN;
529			goto out;
530		}
531
532		ql_dbg(ql_dbg_user, vha, 0x7069,
533		    "Writing flash region -- 0x%x/0x%x.\n",
534		    ha->optrom_region_start, ha->optrom_region_size);
535
536		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
537		    ha->optrom_region_start, ha->optrom_region_size);
538		break;
539	default:
540		rval = -EINVAL;
541	}
542
543out:
544	mutex_unlock(&ha->optrom_mutex);
545	return rval;
546}
547
548static struct bin_attribute sysfs_optrom_ctl_attr = {
549	.attr = {
550		.name = "optrom_ctl",
551		.mode = S_IWUSR,
552	},
553	.size = 0,
554	.write = qla2x00_sysfs_write_optrom_ctl,
555};
556
557static ssize_t
558qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
559		       struct bin_attribute *bin_attr,
560		       char *buf, loff_t off, size_t count)
561{
562	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
563	    struct device, kobj)));
564	struct qla_hw_data *ha = vha->hw;
565
566	if (unlikely(pci_channel_offline(ha->pdev)))
567		return -EAGAIN;
568
569	if (!capable(CAP_SYS_ADMIN))
570		return -EINVAL;
571
572	if (IS_NOCACHE_VPD_TYPE(ha))
573		ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
574		    ha->vpd_size);
575	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
576}
577
578static ssize_t
579qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
580			struct bin_attribute *bin_attr,
581			char *buf, loff_t off, size_t count)
582{
583	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
584	    struct device, kobj)));
585	struct qla_hw_data *ha = vha->hw;
586	uint8_t *tmp_data;
587
588	if (unlikely(pci_channel_offline(ha->pdev)))
589		return 0;
590
591	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
592	    !ha->isp_ops->write_nvram)
593		return 0;
594
595	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
596		ql_log(ql_log_warn, vha, 0x706a,
597		    "HBA not online, failing VPD update.\n");
598		return -EAGAIN;
599	}
600
601	/* Write NVRAM. */
602	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
603	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
604
605	/* Update flash version information for 4Gb & above. */
606	if (!IS_FWI2_CAPABLE(ha))
607		return -EINVAL;
608
609	tmp_data = vmalloc(256);
610	if (!tmp_data) {
611		ql_log(ql_log_warn, vha, 0x706b,
612		    "Unable to allocate memory for VPD information update.\n");
613		return -ENOMEM;
614	}
615	ha->isp_ops->get_flash_version(vha, tmp_data);
616	vfree(tmp_data);
617
618	return count;
619}
620
621static struct bin_attribute sysfs_vpd_attr = {
622	.attr = {
623		.name = "vpd",
624		.mode = S_IRUSR | S_IWUSR,
625	},
626	.size = 0,
627	.read = qla2x00_sysfs_read_vpd,
628	.write = qla2x00_sysfs_write_vpd,
629};
630
631static ssize_t
632qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
633		       struct bin_attribute *bin_attr,
634		       char *buf, loff_t off, size_t count)
635{
636	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
637	    struct device, kobj)));
638	struct qla_hw_data *ha = vha->hw;
639	uint16_t iter, addr, offset;
640	int rval;
641
642	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
643		return 0;
644
645	if (ha->sfp_data)
646		goto do_read;
647
648	ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
649	    &ha->sfp_data_dma);
650	if (!ha->sfp_data) {
651		ql_log(ql_log_warn, vha, 0x706c,
652		    "Unable to allocate memory for SFP read-data.\n");
653		return 0;
654	}
655
656do_read:
657	memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
658	addr = 0xa0;
659	for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
660	    iter++, offset += SFP_BLOCK_SIZE) {
661		if (iter == 4) {
662			/* Skip to next device address. */
663			addr = 0xa2;
664			offset = 0;
665		}
666
667		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
668		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
669		if (rval != QLA_SUCCESS) {
670			ql_log(ql_log_warn, vha, 0x706d,
671			    "Unable to read SFP data (%x/%x/%x).\n", rval,
672			    addr, offset);
673
674			return -EIO;
675		}
676		memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
677		buf += SFP_BLOCK_SIZE;
678	}
679
680	return count;
681}
682
683static struct bin_attribute sysfs_sfp_attr = {
684	.attr = {
685		.name = "sfp",
686		.mode = S_IRUSR | S_IWUSR,
687	},
688	.size = SFP_DEV_SIZE * 2,
689	.read = qla2x00_sysfs_read_sfp,
690};
691
692static ssize_t
693qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
694			struct bin_attribute *bin_attr,
695			char *buf, loff_t off, size_t count)
696{
697	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
698	    struct device, kobj)));
699	struct qla_hw_data *ha = vha->hw;
700	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
701	int type;
702	uint32_t idc_control;
703	uint8_t *tmp_data = NULL;
704	if (off != 0)
705		return -EINVAL;
706
707	type = simple_strtol(buf, NULL, 10);
708	switch (type) {
709	case 0x2025c:
710		ql_log(ql_log_info, vha, 0x706e,
711		    "Issuing ISP reset.\n");
712
713		scsi_block_requests(vha->host);
714		if (IS_QLA82XX(ha)) {
715			ha->flags.isp82xx_no_md_cap = 1;
716			qla82xx_idc_lock(ha);
717			qla82xx_set_reset_owner(vha);
718			qla82xx_idc_unlock(ha);
719		} else if (IS_QLA8044(ha)) {
720			qla8044_idc_lock(ha);
721			idc_control = qla8044_rd_reg(ha,
722			    QLA8044_IDC_DRV_CTRL);
723			qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
724			    (idc_control | GRACEFUL_RESET_BIT1));
725			qla82xx_set_reset_owner(vha);
726			qla8044_idc_unlock(ha);
727		} else {
728			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
729			qla2xxx_wake_dpc(vha);
730		}
731		qla2x00_wait_for_chip_reset(vha);
732		scsi_unblock_requests(vha->host);
733		break;
734	case 0x2025d:
735		if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
736			return -EPERM;
737
738		ql_log(ql_log_info, vha, 0x706f,
739		    "Issuing MPI reset.\n");
740
741		if (IS_QLA83XX(ha)) {
742			uint32_t idc_control;
743
744			qla83xx_idc_lock(vha, 0);
745			__qla83xx_get_idc_control(vha, &idc_control);
746			idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
747			__qla83xx_set_idc_control(vha, idc_control);
748			qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
749			    QLA8XXX_DEV_NEED_RESET);
750			qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
751			qla83xx_idc_unlock(vha, 0);
752			break;
753		} else {
754			/* Make sure FC side is not in reset */
755			qla2x00_wait_for_hba_online(vha);
756
757			/* Issue MPI reset */
758			scsi_block_requests(vha->host);
759			if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
760				ql_log(ql_log_warn, vha, 0x7070,
761				    "MPI reset failed.\n");
762			scsi_unblock_requests(vha->host);
763			break;
764		}
765	case 0x2025e:
766		if (!IS_P3P_TYPE(ha) || vha != base_vha) {
767			ql_log(ql_log_info, vha, 0x7071,
768			    "FCoE ctx reset no supported.\n");
769			return -EPERM;
770		}
771
772		ql_log(ql_log_info, vha, 0x7072,
773		    "Issuing FCoE ctx reset.\n");
774		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
775		qla2xxx_wake_dpc(vha);
776		qla2x00_wait_for_fcoe_ctx_reset(vha);
777		break;
778	case 0x2025f:
779		if (!IS_QLA8031(ha))
780			return -EPERM;
781		ql_log(ql_log_info, vha, 0x70bc,
782		    "Disabling Reset by IDC control\n");
783		qla83xx_idc_lock(vha, 0);
784		__qla83xx_get_idc_control(vha, &idc_control);
785		idc_control |= QLA83XX_IDC_RESET_DISABLED;
786		__qla83xx_set_idc_control(vha, idc_control);
787		qla83xx_idc_unlock(vha, 0);
788		break;
789	case 0x20260:
790		if (!IS_QLA8031(ha))
791			return -EPERM;
792		ql_log(ql_log_info, vha, 0x70bd,
793		    "Enabling Reset by IDC control\n");
794		qla83xx_idc_lock(vha, 0);
795		__qla83xx_get_idc_control(vha, &idc_control);
796		idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
797		__qla83xx_set_idc_control(vha, idc_control);
798		qla83xx_idc_unlock(vha, 0);
799		break;
800	case 0x20261:
801		ql_dbg(ql_dbg_user, vha, 0x70e0,
802		    "Updating cache versions without reset ");
803
804		tmp_data = vmalloc(256);
805		if (!tmp_data) {
806			ql_log(ql_log_warn, vha, 0x70e1,
807			    "Unable to allocate memory for VPD information update.\n");
808			return -ENOMEM;
809		}
810		ha->isp_ops->get_flash_version(vha, tmp_data);
811		vfree(tmp_data);
812		break;
813	}
814	return count;
815}
816
817static struct bin_attribute sysfs_reset_attr = {
818	.attr = {
819		.name = "reset",
820		.mode = S_IWUSR,
821	},
822	.size = 0,
823	.write = qla2x00_sysfs_write_reset,
824};
825
826static ssize_t
827qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
828		       struct bin_attribute *bin_attr,
829		       char *buf, loff_t off, size_t count)
830{
831	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
832	    struct device, kobj)));
833	struct qla_hw_data *ha = vha->hw;
834	int rval;
835	uint16_t actual_size;
836
837	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
838		return 0;
839
840	if (ha->xgmac_data)
841		goto do_read;
842
843	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
844	    &ha->xgmac_data_dma, GFP_KERNEL);
845	if (!ha->xgmac_data) {
846		ql_log(ql_log_warn, vha, 0x7076,
847		    "Unable to allocate memory for XGMAC read-data.\n");
848		return 0;
849	}
850
851do_read:
852	actual_size = 0;
853	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
854
855	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
856	    XGMAC_DATA_SIZE, &actual_size);
857	if (rval != QLA_SUCCESS) {
858		ql_log(ql_log_warn, vha, 0x7077,
859		    "Unable to read XGMAC data (%x).\n", rval);
860		count = 0;
861	}
862
863	count = actual_size > count ? count: actual_size;
864	memcpy(buf, ha->xgmac_data, count);
865
866	return count;
867}
868
869static struct bin_attribute sysfs_xgmac_stats_attr = {
870	.attr = {
871		.name = "xgmac_stats",
872		.mode = S_IRUSR,
873	},
874	.size = 0,
875	.read = qla2x00_sysfs_read_xgmac_stats,
876};
877
878static ssize_t
879qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
880		       struct bin_attribute *bin_attr,
881		       char *buf, loff_t off, size_t count)
882{
883	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
884	    struct device, kobj)));
885	struct qla_hw_data *ha = vha->hw;
886	int rval;
887	uint16_t actual_size;
888
889	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
890		return 0;
891
892	if (ha->dcbx_tlv)
893		goto do_read;
894
895	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
896	    &ha->dcbx_tlv_dma, GFP_KERNEL);
897	if (!ha->dcbx_tlv) {
898		ql_log(ql_log_warn, vha, 0x7078,
899		    "Unable to allocate memory for DCBX TLV read-data.\n");
900		return -ENOMEM;
901	}
902
903do_read:
904	actual_size = 0;
905	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
906
907	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
908	    DCBX_TLV_DATA_SIZE);
909	if (rval != QLA_SUCCESS) {
910		ql_log(ql_log_warn, vha, 0x7079,
911		    "Unable to read DCBX TLV (%x).\n", rval);
912		return -EIO;
913	}
914
915	memcpy(buf, ha->dcbx_tlv, count);
916
917	return count;
918}
919
920static struct bin_attribute sysfs_dcbx_tlv_attr = {
921	.attr = {
922		.name = "dcbx_tlv",
923		.mode = S_IRUSR,
924	},
925	.size = 0,
926	.read = qla2x00_sysfs_read_dcbx_tlv,
927};
928
929static struct sysfs_entry {
930	char *name;
931	struct bin_attribute *attr;
932	int is4GBp_only;
933} bin_file_entries[] = {
934	{ "fw_dump", &sysfs_fw_dump_attr, },
935	{ "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },
936	{ "nvram", &sysfs_nvram_attr, },
937	{ "optrom", &sysfs_optrom_attr, },
938	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
939	{ "vpd", &sysfs_vpd_attr, 1 },
940	{ "sfp", &sysfs_sfp_attr, 1 },
941	{ "reset", &sysfs_reset_attr, },
942	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
943	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
944	{ NULL },
945};
946
947void
948qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
949{
950	struct Scsi_Host *host = vha->host;
951	struct sysfs_entry *iter;
952	int ret;
953
954	for (iter = bin_file_entries; iter->name; iter++) {
955		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
956			continue;
957		if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
958			continue;
959		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
960			continue;
961		if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
962			continue;
963
964		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
965		    iter->attr);
966		if (ret)
967			ql_log(ql_log_warn, vha, 0x00f3,
968			    "Unable to create sysfs %s binary attribute (%d).\n",
969			    iter->name, ret);
970		else
971			ql_dbg(ql_dbg_init, vha, 0x00f4,
972			    "Successfully created sysfs %s binary attribure.\n",
973			    iter->name);
974	}
975}
976
977void
978qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
979{
980	struct Scsi_Host *host = vha->host;
981	struct sysfs_entry *iter;
982	struct qla_hw_data *ha = vha->hw;
983
984	for (iter = bin_file_entries; iter->name; iter++) {
985		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
986			continue;
987		if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
988			continue;
989		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
990			continue;
991		if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
992			continue;
993
994		sysfs_remove_bin_file(&host->shost_gendev.kobj,
995		    iter->attr);
996	}
997
998	if (stop_beacon && ha->beacon_blink_led == 1)
999		ha->isp_ops->beacon_off(vha);
1000}
1001
1002/* Scsi_Host attributes. */
1003
1004static ssize_t
1005qla2x00_drvr_version_show(struct device *dev,
1006			  struct device_attribute *attr, char *buf)
1007{
1008	return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1009}
1010
1011static ssize_t
1012qla2x00_fw_version_show(struct device *dev,
1013			struct device_attribute *attr, char *buf)
1014{
1015	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1016	struct qla_hw_data *ha = vha->hw;
1017	char fw_str[128];
1018
1019	return scnprintf(buf, PAGE_SIZE, "%s\n",
1020	    ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1021}
1022
1023static ssize_t
1024qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1025			char *buf)
1026{
1027	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1028	struct qla_hw_data *ha = vha->hw;
1029	uint32_t sn;
1030
1031	if (IS_QLAFX00(vha->hw)) {
1032		return scnprintf(buf, PAGE_SIZE, "%s\n",
1033		    vha->hw->mr.serial_num);
1034	} else if (IS_FWI2_CAPABLE(ha)) {
1035		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1036		return strlen(strcat(buf, "\n"));
1037	}
1038
1039	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1040	return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1041	    sn % 100000);
1042}
1043
1044static ssize_t
1045qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1046		      char *buf)
1047{
1048	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1049	return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1050}
1051
1052static ssize_t
1053qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1054		    char *buf)
1055{
1056	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1057	struct qla_hw_data *ha = vha->hw;
1058
1059	if (IS_QLAFX00(vha->hw))
1060		return scnprintf(buf, PAGE_SIZE, "%s\n",
1061		    vha->hw->mr.hw_version);
1062
1063	return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1064	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
1065	    ha->product_id[3]);
1066}
1067
1068static ssize_t
1069qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1070			char *buf)
1071{
1072	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1073
1074	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1075}
1076
1077static ssize_t
1078qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1079			char *buf)
1080{
1081	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1082	return scnprintf(buf, PAGE_SIZE, "%s\n",
1083	    vha->hw->model_desc ? vha->hw->model_desc : "");
1084}
1085
1086static ssize_t
1087qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1088		      char *buf)
1089{
1090	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1091	char pci_info[30];
1092
1093	return scnprintf(buf, PAGE_SIZE, "%s\n",
1094	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
1095}
1096
1097static ssize_t
1098qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1099			char *buf)
1100{
1101	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1102	struct qla_hw_data *ha = vha->hw;
1103	int len = 0;
1104
1105	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1106	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
1107	    vha->device_flags & DFLG_NO_CABLE)
1108		len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1109	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1110	    qla2x00_reset_active(vha))
1111		len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1112	else {
1113		len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1114
1115		switch (ha->current_topology) {
1116		case ISP_CFG_NL:
1117			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1118			break;
1119		case ISP_CFG_FL:
1120			len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1121			break;
1122		case ISP_CFG_N:
1123			len += scnprintf(buf + len, PAGE_SIZE-len,
1124			    "N_Port to N_Port\n");
1125			break;
1126		case ISP_CFG_F:
1127			len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1128			break;
1129		default:
1130			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1131			break;
1132		}
1133	}
1134	return len;
1135}
1136
1137static ssize_t
1138qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1139		 char *buf)
1140{
1141	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1142	int len = 0;
1143
1144	switch (vha->hw->zio_mode) {
1145	case QLA_ZIO_MODE_6:
1146		len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1147		break;
1148	case QLA_ZIO_DISABLED:
1149		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1150		break;
1151	}
1152	return len;
1153}
1154
1155static ssize_t
1156qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1157		  const char *buf, size_t count)
1158{
1159	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1160	struct qla_hw_data *ha = vha->hw;
1161	int val = 0;
1162	uint16_t zio_mode;
1163
1164	if (!IS_ZIO_SUPPORTED(ha))
1165		return -ENOTSUPP;
1166
1167	if (sscanf(buf, "%d", &val) != 1)
1168		return -EINVAL;
1169
1170	if (val)
1171		zio_mode = QLA_ZIO_MODE_6;
1172	else
1173		zio_mode = QLA_ZIO_DISABLED;
1174
1175	/* Update per-hba values and queue a reset. */
1176	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1177		ha->zio_mode = zio_mode;
1178		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1179	}
1180	return strlen(buf);
1181}
1182
1183static ssize_t
1184qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1185		       char *buf)
1186{
1187	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1188
1189	return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1190}
1191
1192static ssize_t
1193qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1194			const char *buf, size_t count)
1195{
1196	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1197	int val = 0;
1198	uint16_t zio_timer;
1199
1200	if (sscanf(buf, "%d", &val) != 1)
1201		return -EINVAL;
1202	if (val > 25500 || val < 100)
1203		return -ERANGE;
1204
1205	zio_timer = (uint16_t)(val / 100);
1206	vha->hw->zio_timer = zio_timer;
1207
1208	return strlen(buf);
1209}
1210
1211static ssize_t
1212qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1213		    char *buf)
1214{
1215	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1216	int len = 0;
1217
1218	if (vha->hw->beacon_blink_led)
1219		len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1220	else
1221		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1222	return len;
1223}
1224
1225static ssize_t
1226qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1227		     const char *buf, size_t count)
1228{
1229	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1230	struct qla_hw_data *ha = vha->hw;
1231	int val = 0;
1232	int rval;
1233
1234	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1235		return -EPERM;
1236
1237	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1238		ql_log(ql_log_warn, vha, 0x707a,
1239		    "Abort ISP active -- ignoring beacon request.\n");
1240		return -EBUSY;
1241	}
1242
1243	if (sscanf(buf, "%d", &val) != 1)
1244		return -EINVAL;
1245
1246	if (val)
1247		rval = ha->isp_ops->beacon_on(vha);
1248	else
1249		rval = ha->isp_ops->beacon_off(vha);
1250
1251	if (rval != QLA_SUCCESS)
1252		count = 0;
1253
1254	return count;
1255}
1256
1257static ssize_t
1258qla2x00_optrom_bios_version_show(struct device *dev,
1259				 struct device_attribute *attr, char *buf)
1260{
1261	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1262	struct qla_hw_data *ha = vha->hw;
1263	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1264	    ha->bios_revision[0]);
1265}
1266
1267static ssize_t
1268qla2x00_optrom_efi_version_show(struct device *dev,
1269				struct device_attribute *attr, char *buf)
1270{
1271	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1272	struct qla_hw_data *ha = vha->hw;
1273	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1274	    ha->efi_revision[0]);
1275}
1276
1277static ssize_t
1278qla2x00_optrom_fcode_version_show(struct device *dev,
1279				  struct device_attribute *attr, char *buf)
1280{
1281	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1282	struct qla_hw_data *ha = vha->hw;
1283	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1284	    ha->fcode_revision[0]);
1285}
1286
1287static ssize_t
1288qla2x00_optrom_fw_version_show(struct device *dev,
1289			       struct device_attribute *attr, char *buf)
1290{
1291	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1292	struct qla_hw_data *ha = vha->hw;
1293	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1294	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1295	    ha->fw_revision[3]);
1296}
1297
1298static ssize_t
1299qla2x00_optrom_gold_fw_version_show(struct device *dev,
1300    struct device_attribute *attr, char *buf)
1301{
1302	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1303	struct qla_hw_data *ha = vha->hw;
1304
1305	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1306		return scnprintf(buf, PAGE_SIZE, "\n");
1307
1308	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1309	    ha->gold_fw_version[0], ha->gold_fw_version[1],
1310	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
1311}
1312
1313static ssize_t
1314qla2x00_total_isp_aborts_show(struct device *dev,
1315			      struct device_attribute *attr, char *buf)
1316{
1317	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1318	return scnprintf(buf, PAGE_SIZE, "%d\n",
1319	    vha->qla_stats.total_isp_aborts);
1320}
1321
1322static ssize_t
1323qla24xx_84xx_fw_version_show(struct device *dev,
1324	struct device_attribute *attr, char *buf)
1325{
1326	int rval = QLA_SUCCESS;
1327	uint16_t status[2] = {0, 0};
1328	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1329	struct qla_hw_data *ha = vha->hw;
1330
1331	if (!IS_QLA84XX(ha))
1332		return scnprintf(buf, PAGE_SIZE, "\n");
1333
1334	if (ha->cs84xx->op_fw_version == 0)
1335		rval = qla84xx_verify_chip(vha, status);
1336
1337	if ((rval == QLA_SUCCESS) && (status[0] == 0))
1338		return scnprintf(buf, PAGE_SIZE, "%u\n",
1339			(uint32_t)ha->cs84xx->op_fw_version);
1340
1341	return scnprintf(buf, PAGE_SIZE, "\n");
1342}
1343
1344static ssize_t
1345qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1346    char *buf)
1347{
1348	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1349	struct qla_hw_data *ha = vha->hw;
1350
1351	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
1352		return scnprintf(buf, PAGE_SIZE, "\n");
1353
1354	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1355	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1356	    ha->mpi_capabilities);
1357}
1358
1359static ssize_t
1360qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1361    char *buf)
1362{
1363	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1364	struct qla_hw_data *ha = vha->hw;
1365
1366	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1367		return scnprintf(buf, PAGE_SIZE, "\n");
1368
1369	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1370	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1371}
1372
1373static ssize_t
1374qla2x00_flash_block_size_show(struct device *dev,
1375			      struct device_attribute *attr, char *buf)
1376{
1377	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1378	struct qla_hw_data *ha = vha->hw;
1379
1380	return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1381}
1382
1383static ssize_t
1384qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1385    char *buf)
1386{
1387	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1388
1389	if (!IS_CNA_CAPABLE(vha->hw))
1390		return scnprintf(buf, PAGE_SIZE, "\n");
1391
1392	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1393}
1394
1395static ssize_t
1396qla2x00_vn_port_mac_address_show(struct device *dev,
1397    struct device_attribute *attr, char *buf)
1398{
1399	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1400
1401	if (!IS_CNA_CAPABLE(vha->hw))
1402		return scnprintf(buf, PAGE_SIZE, "\n");
1403
1404	return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1405}
1406
1407static ssize_t
1408qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1409    char *buf)
1410{
1411	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1412
1413	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1414}
1415
1416static ssize_t
1417qla2x00_thermal_temp_show(struct device *dev,
1418	struct device_attribute *attr, char *buf)
1419{
1420	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1421	uint16_t temp = 0;
1422
1423	if (qla2x00_reset_active(vha)) {
1424		ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1425		goto done;
1426	}
1427
1428	if (vha->hw->flags.eeh_busy) {
1429		ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1430		goto done;
1431	}
1432
1433	if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
1434		return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1435
1436done:
1437	return scnprintf(buf, PAGE_SIZE, "\n");
1438}
1439
1440static ssize_t
1441qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1442    char *buf)
1443{
1444	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1445	int rval = QLA_FUNCTION_FAILED;
1446	uint16_t state[6];
1447	uint32_t pstate;
1448
1449	if (IS_QLAFX00(vha->hw)) {
1450		pstate = qlafx00_fw_state_show(dev, attr, buf);
1451		return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1452	}
1453
1454	if (qla2x00_reset_active(vha))
1455		ql_log(ql_log_warn, vha, 0x707c,
1456		    "ISP reset active.\n");
1457	else if (!vha->hw->flags.eeh_busy)
1458		rval = qla2x00_get_firmware_state(vha, state);
1459	if (rval != QLA_SUCCESS)
1460		memset(state, -1, sizeof(state));
1461
1462	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1463	    state[0], state[1], state[2], state[3], state[4], state[5]);
1464}
1465
1466static ssize_t
1467qla2x00_diag_requests_show(struct device *dev,
1468	struct device_attribute *attr, char *buf)
1469{
1470	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1471
1472	if (!IS_BIDI_CAPABLE(vha->hw))
1473		return scnprintf(buf, PAGE_SIZE, "\n");
1474
1475	return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1476}
1477
1478static ssize_t
1479qla2x00_diag_megabytes_show(struct device *dev,
1480	struct device_attribute *attr, char *buf)
1481{
1482	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1483
1484	if (!IS_BIDI_CAPABLE(vha->hw))
1485		return scnprintf(buf, PAGE_SIZE, "\n");
1486
1487	return scnprintf(buf, PAGE_SIZE, "%llu\n",
1488	    vha->bidi_stats.transfer_bytes >> 20);
1489}
1490
1491static ssize_t
1492qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1493	char *buf)
1494{
1495	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1496	struct qla_hw_data *ha = vha->hw;
1497	uint32_t size;
1498
1499	if (!ha->fw_dumped)
1500		size = 0;
1501	else if (IS_P3P_TYPE(ha))
1502		size = ha->md_template_size + ha->md_dump_size;
1503	else
1504		size = ha->fw_dump_len;
1505
1506	return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1507}
1508
1509static ssize_t
1510qla2x00_allow_cna_fw_dump_show(struct device *dev,
1511	struct device_attribute *attr, char *buf)
1512{
1513	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1514
1515	if (!IS_P3P_TYPE(vha->hw))
1516		return scnprintf(buf, PAGE_SIZE, "\n");
1517	else
1518		return scnprintf(buf, PAGE_SIZE, "%s\n",
1519		    vha->hw->allow_cna_fw_dump ? "true" : "false");
1520}
1521
1522static ssize_t
1523qla2x00_allow_cna_fw_dump_store(struct device *dev,
1524	struct device_attribute *attr, const char *buf, size_t count)
1525{
1526	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1527	int val = 0;
1528
1529	if (!IS_P3P_TYPE(vha->hw))
1530		return -EINVAL;
1531
1532	if (sscanf(buf, "%d", &val) != 1)
1533		return -EINVAL;
1534
1535	vha->hw->allow_cna_fw_dump = val != 0;
1536
1537	return strlen(buf);
1538}
1539
1540static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1541static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1542static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1543static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1544static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1545static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1546static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1547static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1548static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1549static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1550static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1551		   qla2x00_zio_timer_store);
1552static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1553		   qla2x00_beacon_store);
1554static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1555		   qla2x00_optrom_bios_version_show, NULL);
1556static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1557		   qla2x00_optrom_efi_version_show, NULL);
1558static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1559		   qla2x00_optrom_fcode_version_show, NULL);
1560static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1561		   NULL);
1562static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
1563    qla2x00_optrom_gold_fw_version_show, NULL);
1564static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1565		   NULL);
1566static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1567		   NULL);
1568static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1569static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1570static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1571		   NULL);
1572static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1573static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1574		   qla2x00_vn_port_mac_address_show, NULL);
1575static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1576static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1577static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1578static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
1579static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
1580static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
1581static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
1582		   qla2x00_allow_cna_fw_dump_show,
1583		   qla2x00_allow_cna_fw_dump_store);
1584
1585struct device_attribute *qla2x00_host_attrs[] = {
1586	&dev_attr_driver_version,
1587	&dev_attr_fw_version,
1588	&dev_attr_serial_num,
1589	&dev_attr_isp_name,
1590	&dev_attr_isp_id,
1591	&dev_attr_model_name,
1592	&dev_attr_model_desc,
1593	&dev_attr_pci_info,
1594	&dev_attr_link_state,
1595	&dev_attr_zio,
1596	&dev_attr_zio_timer,
1597	&dev_attr_beacon,
1598	&dev_attr_optrom_bios_version,
1599	&dev_attr_optrom_efi_version,
1600	&dev_attr_optrom_fcode_version,
1601	&dev_attr_optrom_fw_version,
1602	&dev_attr_84xx_fw_version,
1603	&dev_attr_total_isp_aborts,
1604	&dev_attr_mpi_version,
1605	&dev_attr_phy_version,
1606	&dev_attr_flash_block_size,
1607	&dev_attr_vlan_id,
1608	&dev_attr_vn_port_mac_address,
1609	&dev_attr_fabric_param,
1610	&dev_attr_fw_state,
1611	&dev_attr_optrom_gold_fw_version,
1612	&dev_attr_thermal_temp,
1613	&dev_attr_diag_requests,
1614	&dev_attr_diag_megabytes,
1615	&dev_attr_fw_dump_size,
1616	&dev_attr_allow_cna_fw_dump,
1617	NULL,
1618};
1619
1620/* Host attributes. */
1621
1622static void
1623qla2x00_get_host_port_id(struct Scsi_Host *shost)
1624{
1625	scsi_qla_host_t *vha = shost_priv(shost);
1626
1627	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1628	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1629}
1630
1631static void
1632qla2x00_get_host_speed(struct Scsi_Host *shost)
1633{
1634	struct qla_hw_data *ha = ((struct scsi_qla_host *)
1635					(shost_priv(shost)))->hw;
1636	u32 speed = FC_PORTSPEED_UNKNOWN;
1637
1638	if (IS_QLAFX00(ha)) {
1639		qlafx00_get_host_speed(shost);
1640		return;
1641	}
1642
1643	switch (ha->link_data_rate) {
1644	case PORT_SPEED_1GB:
1645		speed = FC_PORTSPEED_1GBIT;
1646		break;
1647	case PORT_SPEED_2GB:
1648		speed = FC_PORTSPEED_2GBIT;
1649		break;
1650	case PORT_SPEED_4GB:
1651		speed = FC_PORTSPEED_4GBIT;
1652		break;
1653	case PORT_SPEED_8GB:
1654		speed = FC_PORTSPEED_8GBIT;
1655		break;
1656	case PORT_SPEED_10GB:
1657		speed = FC_PORTSPEED_10GBIT;
1658		break;
1659	case PORT_SPEED_16GB:
1660		speed = FC_PORTSPEED_16GBIT;
1661		break;
1662	case PORT_SPEED_32GB:
1663		speed = FC_PORTSPEED_32GBIT;
1664		break;
1665	}
1666	fc_host_speed(shost) = speed;
1667}
1668
1669static void
1670qla2x00_get_host_port_type(struct Scsi_Host *shost)
1671{
1672	scsi_qla_host_t *vha = shost_priv(shost);
1673	uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1674
1675	if (vha->vp_idx) {
1676		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1677		return;
1678	}
1679	switch (vha->hw->current_topology) {
1680	case ISP_CFG_NL:
1681		port_type = FC_PORTTYPE_LPORT;
1682		break;
1683	case ISP_CFG_FL:
1684		port_type = FC_PORTTYPE_NLPORT;
1685		break;
1686	case ISP_CFG_N:
1687		port_type = FC_PORTTYPE_PTP;
1688		break;
1689	case ISP_CFG_F:
1690		port_type = FC_PORTTYPE_NPORT;
1691		break;
1692	}
1693	fc_host_port_type(shost) = port_type;
1694}
1695
1696static void
1697qla2x00_get_starget_node_name(struct scsi_target *starget)
1698{
1699	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1700	scsi_qla_host_t *vha = shost_priv(host);
1701	fc_port_t *fcport;
1702	u64 node_name = 0;
1703
1704	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1705		if (fcport->rport &&
1706		    starget->id == fcport->rport->scsi_target_id) {
1707			node_name = wwn_to_u64(fcport->node_name);
1708			break;
1709		}
1710	}
1711
1712	fc_starget_node_name(starget) = node_name;
1713}
1714
1715static void
1716qla2x00_get_starget_port_name(struct scsi_target *starget)
1717{
1718	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1719	scsi_qla_host_t *vha = shost_priv(host);
1720	fc_port_t *fcport;
1721	u64 port_name = 0;
1722
1723	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1724		if (fcport->rport &&
1725		    starget->id == fcport->rport->scsi_target_id) {
1726			port_name = wwn_to_u64(fcport->port_name);
1727			break;
1728		}
1729	}
1730
1731	fc_starget_port_name(starget) = port_name;
1732}
1733
1734static void
1735qla2x00_get_starget_port_id(struct scsi_target *starget)
1736{
1737	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1738	scsi_qla_host_t *vha = shost_priv(host);
1739	fc_port_t *fcport;
1740	uint32_t port_id = ~0U;
1741
1742	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1743		if (fcport->rport &&
1744		    starget->id == fcport->rport->scsi_target_id) {
1745			port_id = fcport->d_id.b.domain << 16 |
1746			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1747			break;
1748		}
1749	}
1750
1751	fc_starget_port_id(starget) = port_id;
1752}
1753
1754static void
1755qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1756{
1757	if (timeout)
1758		rport->dev_loss_tmo = timeout;
1759	else
1760		rport->dev_loss_tmo = 1;
1761}
1762
1763static void
1764qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1765{
1766	struct Scsi_Host *host = rport_to_shost(rport);
1767	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1768	unsigned long flags;
1769
1770	if (!fcport)
1771		return;
1772
1773	/* Now that the rport has been deleted, set the fcport state to
1774	   FCS_DEVICE_DEAD */
1775	qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
1776
1777	/*
1778	 * Transport has effectively 'deleted' the rport, clear
1779	 * all local references.
1780	 */
1781	spin_lock_irqsave(host->host_lock, flags);
1782	fcport->rport = fcport->drport = NULL;
1783	*((fc_port_t **)rport->dd_data) = NULL;
1784	spin_unlock_irqrestore(host->host_lock, flags);
1785
1786	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1787		return;
1788
1789	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1790		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1791		return;
1792	}
1793}
1794
1795static void
1796qla2x00_terminate_rport_io(struct fc_rport *rport)
1797{
1798	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1799
1800	if (!fcport)
1801		return;
1802
1803	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1804		return;
1805
1806	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1807		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1808		return;
1809	}
1810	/*
1811	 * At this point all fcport's software-states are cleared.  Perform any
1812	 * final cleanup of firmware resources (PCBs and XCBs).
1813	 */
1814	if (fcport->loop_id != FC_NO_LOOP_ID) {
1815		if (IS_FWI2_CAPABLE(fcport->vha->hw))
1816			fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1817			    fcport->loop_id, fcport->d_id.b.domain,
1818			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
1819		else
1820			qla2x00_port_logout(fcport->vha, fcport);
1821	}
1822}
1823
1824static int
1825qla2x00_issue_lip(struct Scsi_Host *shost)
1826{
1827	scsi_qla_host_t *vha = shost_priv(shost);
1828
1829	if (IS_QLAFX00(vha->hw))
1830		return 0;
1831
1832	qla2x00_loop_reset(vha);
1833	return 0;
1834}
1835
1836static struct fc_host_statistics *
1837qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1838{
1839	scsi_qla_host_t *vha = shost_priv(shost);
1840	struct qla_hw_data *ha = vha->hw;
1841	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1842	int rval;
1843	struct link_statistics *stats;
1844	dma_addr_t stats_dma;
1845	struct fc_host_statistics *pfc_host_stat;
1846
1847	pfc_host_stat = &vha->fc_host_stat;
1848	memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1849
1850	if (IS_QLAFX00(vha->hw))
1851		goto done;
1852
1853	if (test_bit(UNLOADING, &vha->dpc_flags))
1854		goto done;
1855
1856	if (unlikely(pci_channel_offline(ha->pdev)))
1857		goto done;
1858
1859	if (qla2x00_reset_active(vha))
1860		goto done;
1861
1862	stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1863	if (stats == NULL) {
1864		ql_log(ql_log_warn, vha, 0x707d,
1865		    "Failed to allocate memory for stats.\n");
1866		goto done;
1867	}
1868	memset(stats, 0, DMA_POOL_SIZE);
1869
1870	rval = QLA_FUNCTION_FAILED;
1871	if (IS_FWI2_CAPABLE(ha)) {
1872		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1873	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1874	    !ha->dpc_active) {
1875		/* Must be in a 'READY' state for statistics retrieval. */
1876		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1877						stats, stats_dma);
1878	}
1879
1880	if (rval != QLA_SUCCESS)
1881		goto done_free;
1882
1883	pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1884	pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1885	pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1886	pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1887	pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1888	pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1889	if (IS_FWI2_CAPABLE(ha)) {
1890		pfc_host_stat->lip_count = stats->lip_cnt;
1891		pfc_host_stat->tx_frames = stats->tx_frames;
1892		pfc_host_stat->rx_frames = stats->rx_frames;
1893		pfc_host_stat->dumped_frames = stats->discarded_frames;
1894		pfc_host_stat->nos_count = stats->nos_rcvd;
1895		pfc_host_stat->error_frames =
1896			stats->dropped_frames + stats->discarded_frames;
1897		pfc_host_stat->rx_words = vha->qla_stats.input_bytes;
1898		pfc_host_stat->tx_words = vha->qla_stats.output_bytes;
1899	}
1900	pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests;
1901	pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests;
1902	pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests;
1903	pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
1904	pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
1905	pfc_host_stat->seconds_since_last_reset =
1906		get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
1907	do_div(pfc_host_stat->seconds_since_last_reset, HZ);
1908
1909done_free:
1910        dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1911done:
1912	return pfc_host_stat;
1913}
1914
1915static void
1916qla2x00_reset_host_stats(struct Scsi_Host *shost)
1917{
1918	scsi_qla_host_t *vha = shost_priv(shost);
1919
1920	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1921
1922	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
1923}
1924
1925static void
1926qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1927{
1928	scsi_qla_host_t *vha = shost_priv(shost);
1929
1930	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
1931	    sizeof(fc_host_symbolic_name(shost)));
1932}
1933
1934static void
1935qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1936{
1937	scsi_qla_host_t *vha = shost_priv(shost);
1938
1939	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1940}
1941
1942static void
1943qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1944{
1945	scsi_qla_host_t *vha = shost_priv(shost);
1946	uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1947		0xFF, 0xFF, 0xFF, 0xFF};
1948	u64 fabric_name = wwn_to_u64(node_name);
1949
1950	if (vha->device_flags & SWITCH_FOUND)
1951		fabric_name = wwn_to_u64(vha->fabric_node_name);
1952
1953	fc_host_fabric_name(shost) = fabric_name;
1954}
1955
1956static void
1957qla2x00_get_host_port_state(struct Scsi_Host *shost)
1958{
1959	scsi_qla_host_t *vha = shost_priv(shost);
1960	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1961
1962	if (!base_vha->flags.online) {
1963		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1964		return;
1965	}
1966
1967	switch (atomic_read(&base_vha->loop_state)) {
1968	case LOOP_UPDATE:
1969		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1970		break;
1971	case LOOP_DOWN:
1972		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
1973			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1974		else
1975			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1976		break;
1977	case LOOP_DEAD:
1978		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1979		break;
1980	case LOOP_READY:
1981		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1982		break;
1983	default:
1984		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1985		break;
1986	}
1987}
1988
1989static int
1990qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1991{
1992	int	ret = 0;
1993	uint8_t	qos = 0;
1994	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1995	scsi_qla_host_t *vha = NULL;
1996	struct qla_hw_data *ha = base_vha->hw;
1997	uint16_t options = 0;
1998	int	cnt;
1999	struct req_que *req = ha->req_q_map[0];
2000
2001	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2002	if (ret) {
2003		ql_log(ql_log_warn, vha, 0x707e,
2004		    "Vport sanity check failed, status %x\n", ret);
2005		return (ret);
2006	}
2007
2008	vha = qla24xx_create_vhost(fc_vport);
2009	if (vha == NULL) {
2010		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2011		return FC_VPORT_FAILED;
2012	}
2013	if (disable) {
2014		atomic_set(&vha->vp_state, VP_OFFLINE);
2015		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2016	} else
2017		atomic_set(&vha->vp_state, VP_FAILED);
2018
2019	/* ready to create vport */
2020	ql_log(ql_log_info, vha, 0x7080,
2021	    "VP entry id %d assigned.\n", vha->vp_idx);
2022
2023	/* initialized vport states */
2024	atomic_set(&vha->loop_state, LOOP_DOWN);
2025	vha->vp_err_state=  VP_ERR_PORTDWN;
2026	vha->vp_prev_err_state=  VP_ERR_UNKWN;
2027	/* Check if physical ha port is Up */
2028	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
2029	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2030		/* Don't retry or attempt login of this virtual port */
2031		ql_dbg(ql_dbg_user, vha, 0x7081,
2032		    "Vport loop state is not UP.\n");
2033		atomic_set(&vha->loop_state, LOOP_DEAD);
2034		if (!disable)
2035			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
2036	}
2037
2038	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2039		if (ha->fw_attributes & BIT_4) {
2040			int prot = 0, guard;
2041			vha->flags.difdix_supported = 1;
2042			ql_dbg(ql_dbg_user, vha, 0x7082,
2043			    "Registered for DIF/DIX type 1 and 3 protection.\n");
2044			if (ql2xenabledif == 1)
2045				prot = SHOST_DIX_TYPE0_PROTECTION;
2046			scsi_host_set_prot(vha->host,
2047			    prot | SHOST_DIF_TYPE1_PROTECTION
2048			    | SHOST_DIF_TYPE2_PROTECTION
2049			    | SHOST_DIF_TYPE3_PROTECTION
2050			    | SHOST_DIX_TYPE1_PROTECTION
2051			    | SHOST_DIX_TYPE2_PROTECTION
2052			    | SHOST_DIX_TYPE3_PROTECTION);
2053
2054			guard = SHOST_DIX_GUARD_CRC;
2055
2056			if (IS_PI_IPGUARD_CAPABLE(ha) &&
2057			    (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2058				guard |= SHOST_DIX_GUARD_IP;
2059
2060			scsi_host_set_guard(vha->host, guard);
2061		} else
2062			vha->flags.difdix_supported = 0;
2063	}
2064
2065	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
2066				   &ha->pdev->dev)) {
2067		ql_dbg(ql_dbg_user, vha, 0x7083,
2068		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2069		goto vport_create_failed_2;
2070	}
2071
2072	/* initialize attributes */
2073	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2074	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2075	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2076	fc_host_supported_classes(vha->host) =
2077		fc_host_supported_classes(base_vha->host);
2078	fc_host_supported_speeds(vha->host) =
2079		fc_host_supported_speeds(base_vha->host);
2080
2081	qlt_vport_create(vha, ha);
2082	qla24xx_vport_disable(fc_vport, disable);
2083
2084	if (ha->flags.cpu_affinity_enabled) {
2085		req = ha->req_q_map[1];
2086		ql_dbg(ql_dbg_multiq, vha, 0xc000,
2087		    "Request queue %p attached with "
2088		    "VP[%d], cpu affinity =%d\n",
2089		    req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
2090		goto vport_queue;
2091	} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
2092		goto vport_queue;
2093	/* Create a request queue in QoS mode for the vport */
2094	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
2095		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
2096			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
2097					8) == 0) {
2098			qos = ha->npiv_info[cnt].q_qos;
2099			break;
2100		}
2101	}
2102
2103	if (qos) {
2104		ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
2105			qos);
2106		if (!ret)
2107			ql_log(ql_log_warn, vha, 0x7084,
2108			    "Can't create request queue for VP[%d]\n",
2109			    vha->vp_idx);
2110		else {
2111			ql_dbg(ql_dbg_multiq, vha, 0xc001,
2112			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
2113			    ret, qos, vha->vp_idx);
2114			ql_dbg(ql_dbg_user, vha, 0x7085,
2115			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
2116			    ret, qos, vha->vp_idx);
2117			req = ha->req_q_map[ret];
2118		}
2119	}
2120
2121vport_queue:
2122	vha->req = req;
2123	return 0;
2124
2125vport_create_failed_2:
2126	qla24xx_disable_vp(vha);
2127	qla24xx_deallocate_vp_id(vha);
2128	scsi_host_put(vha->host);
2129	return FC_VPORT_FAILED;
2130}
2131
2132static int
2133qla24xx_vport_delete(struct fc_vport *fc_vport)
2134{
2135	scsi_qla_host_t *vha = fc_vport->dd_data;
2136	struct qla_hw_data *ha = vha->hw;
2137	uint16_t id = vha->vp_idx;
2138
2139	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
2140	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
2141		msleep(1000);
2142
2143	qla24xx_disable_vp(vha);
2144
2145	vha->flags.delete_progress = 1;
2146
2147	qlt_remove_target(ha, vha);
2148
2149	fc_remove_host(vha->host);
2150
2151	scsi_remove_host(vha->host);
2152
2153	/* Allow timer to run to drain queued items, when removing vp */
2154	qla24xx_deallocate_vp_id(vha);
2155
2156	if (vha->timer_active) {
2157		qla2x00_vp_stop_timer(vha);
2158		ql_dbg(ql_dbg_user, vha, 0x7086,
2159		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2160	}
2161
2162	BUG_ON(atomic_read(&vha->vref_count));
2163
2164	qla2x00_free_fcports(vha);
2165
2166	mutex_lock(&ha->vport_lock);
2167	ha->cur_vport_count--;
2168	clear_bit(vha->vp_idx, ha->vp_idx_map);
2169	mutex_unlock(&ha->vport_lock);
2170
2171	if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
2172		if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
2173			ql_log(ql_log_warn, vha, 0x7087,
2174			    "Queue delete failed.\n");
2175	}
2176
2177	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
2178	scsi_host_put(vha->host);
2179	return 0;
2180}
2181
2182static int
2183qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
2184{
2185	scsi_qla_host_t *vha = fc_vport->dd_data;
2186
2187	if (disable)
2188		qla24xx_disable_vp(vha);
2189	else
2190		qla24xx_enable_vp(vha);
2191
2192	return 0;
2193}
2194
2195struct fc_function_template qla2xxx_transport_functions = {
2196
2197	.show_host_node_name = 1,
2198	.show_host_port_name = 1,
2199	.show_host_supported_classes = 1,
2200	.show_host_supported_speeds = 1,
2201
2202	.get_host_port_id = qla2x00_get_host_port_id,
2203	.show_host_port_id = 1,
2204	.get_host_speed = qla2x00_get_host_speed,
2205	.show_host_speed = 1,
2206	.get_host_port_type = qla2x00_get_host_port_type,
2207	.show_host_port_type = 1,
2208	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2209	.show_host_symbolic_name = 1,
2210	.set_host_system_hostname = qla2x00_set_host_system_hostname,
2211	.show_host_system_hostname = 1,
2212	.get_host_fabric_name = qla2x00_get_host_fabric_name,
2213	.show_host_fabric_name = 1,
2214	.get_host_port_state = qla2x00_get_host_port_state,
2215	.show_host_port_state = 1,
2216
2217	.dd_fcrport_size = sizeof(struct fc_port *),
2218	.show_rport_supported_classes = 1,
2219
2220	.get_starget_node_name = qla2x00_get_starget_node_name,
2221	.show_starget_node_name = 1,
2222	.get_starget_port_name = qla2x00_get_starget_port_name,
2223	.show_starget_port_name = 1,
2224	.get_starget_port_id  = qla2x00_get_starget_port_id,
2225	.show_starget_port_id = 1,
2226
2227	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2228	.show_rport_dev_loss_tmo = 1,
2229
2230	.issue_fc_host_lip = qla2x00_issue_lip,
2231	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2232	.terminate_rport_io = qla2x00_terminate_rport_io,
2233	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2234	.reset_fc_host_stats = qla2x00_reset_host_stats,
2235
2236	.vport_create = qla24xx_vport_create,
2237	.vport_disable = qla24xx_vport_disable,
2238	.vport_delete = qla24xx_vport_delete,
2239	.bsg_request = qla24xx_bsg_request,
2240	.bsg_timeout = qla24xx_bsg_timeout,
2241};
2242
2243struct fc_function_template qla2xxx_transport_vport_functions = {
2244
2245	.show_host_node_name = 1,
2246	.show_host_port_name = 1,
2247	.show_host_supported_classes = 1,
2248
2249	.get_host_port_id = qla2x00_get_host_port_id,
2250	.show_host_port_id = 1,
2251	.get_host_speed = qla2x00_get_host_speed,
2252	.show_host_speed = 1,
2253	.get_host_port_type = qla2x00_get_host_port_type,
2254	.show_host_port_type = 1,
2255	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2256	.show_host_symbolic_name = 1,
2257	.set_host_system_hostname = qla2x00_set_host_system_hostname,
2258	.show_host_system_hostname = 1,
2259	.get_host_fabric_name = qla2x00_get_host_fabric_name,
2260	.show_host_fabric_name = 1,
2261	.get_host_port_state = qla2x00_get_host_port_state,
2262	.show_host_port_state = 1,
2263
2264	.dd_fcrport_size = sizeof(struct fc_port *),
2265	.show_rport_supported_classes = 1,
2266
2267	.get_starget_node_name = qla2x00_get_starget_node_name,
2268	.show_starget_node_name = 1,
2269	.get_starget_port_name = qla2x00_get_starget_port_name,
2270	.show_starget_port_name = 1,
2271	.get_starget_port_id  = qla2x00_get_starget_port_id,
2272	.show_starget_port_id = 1,
2273
2274	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2275	.show_rport_dev_loss_tmo = 1,
2276
2277	.issue_fc_host_lip = qla2x00_issue_lip,
2278	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2279	.terminate_rport_io = qla2x00_terminate_rport_io,
2280	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2281	.reset_fc_host_stats = qla2x00_reset_host_stats,
2282
2283	.bsg_request = qla24xx_bsg_request,
2284	.bsg_timeout = qla24xx_bsg_timeout,
2285};
2286
2287void
2288qla2x00_init_host_attr(scsi_qla_host_t *vha)
2289{
2290	struct qla_hw_data *ha = vha->hw;
2291	u32 speed = FC_PORTSPEED_UNKNOWN;
2292
2293	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2294	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2295	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2296	fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
2297			(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
2298	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2299	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2300
2301	if (IS_CNA_CAPABLE(ha))
2302		speed = FC_PORTSPEED_10GBIT;
2303	else if (IS_QLA2031(ha))
2304		speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
2305		    FC_PORTSPEED_4GBIT;
2306	else if (IS_QLA25XX(ha))
2307		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2308		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2309	else if (IS_QLA24XX_TYPE(ha))
2310		speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2311		    FC_PORTSPEED_1GBIT;
2312	else if (IS_QLA23XX(ha))
2313		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2314	else if (IS_QLAFX00(ha))
2315		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2316		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2317	else if (IS_QLA27XX(ha))
2318		speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
2319		    FC_PORTSPEED_8GBIT;
2320	else
2321		speed = FC_PORTSPEED_1GBIT;
2322	fc_host_supported_speeds(vha->host) = speed;
2323}
2324