[go: nahoru, domu]

1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c)   2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include <linux/ratelimit.h>
9
10#include "ql4_def.h"
11#include "ql4_version.h"
12#include "ql4_glbl.h"
13#include "ql4_dbg.h"
14#include "ql4_inline.h"
15
16uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
17{
18	return readl((void __iomem *)(ha->nx_pcibase + addr));
19}
20
21void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
22{
23	writel(val, (void __iomem *)(ha->nx_pcibase + addr));
24}
25
26static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
27{
28	uint32_t val;
29	int ret_val = QLA_SUCCESS;
30
31	qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
32	val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
33	if (val != addr) {
34		ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
35			   __func__, addr, val);
36		ret_val = QLA_ERROR;
37	}
38
39	return ret_val;
40}
41
42int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
43			      uint32_t *data)
44{
45	int ret_val;
46
47	ret_val = qla4_83xx_set_win_base(ha, addr);
48
49	if (ret_val == QLA_SUCCESS)
50		*data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
51	else
52		ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
53			   __func__, addr);
54
55	return ret_val;
56}
57
58int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
59			      uint32_t data)
60{
61	int ret_val;
62
63	ret_val = qla4_83xx_set_win_base(ha, addr);
64
65	if (ret_val == QLA_SUCCESS)
66		qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
67	else
68		ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
69			   __func__, addr, data);
70
71	return ret_val;
72}
73
74static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
75{
76	int lock_owner;
77	int timeout = 0;
78	uint32_t lock_status = 0;
79	int ret_val = QLA_SUCCESS;
80
81	while (lock_status == 0) {
82		lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
83		if (lock_status)
84			break;
85
86		if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
87			lock_owner = qla4_83xx_rd_reg(ha,
88						      QLA83XX_FLASH_LOCK_ID);
89			ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
90				   __func__, ha->func_num, lock_owner);
91			ret_val = QLA_ERROR;
92			break;
93		}
94		msleep(20);
95	}
96
97	qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
98	return ret_val;
99}
100
101static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
102{
103	/* Reading FLASH_UNLOCK register unlocks the Flash */
104	qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
105	qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
106}
107
108int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
109			     uint8_t *p_data, int u32_word_count)
110{
111	int i;
112	uint32_t u32_word;
113	uint32_t addr = flash_addr;
114	int ret_val = QLA_SUCCESS;
115
116	ret_val = qla4_83xx_flash_lock(ha);
117	if (ret_val == QLA_ERROR)
118		goto exit_lock_error;
119
120	if (addr & 0x03) {
121		ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
122			   __func__, addr);
123		ret_val = QLA_ERROR;
124		goto exit_flash_read;
125	}
126
127	for (i = 0; i < u32_word_count; i++) {
128		ret_val = qla4_83xx_wr_reg_indirect(ha,
129						    QLA83XX_FLASH_DIRECT_WINDOW,
130						    (addr & 0xFFFF0000));
131		if (ret_val == QLA_ERROR) {
132			ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
133				   __func__, addr);
134			goto exit_flash_read;
135		}
136
137		ret_val = qla4_83xx_rd_reg_indirect(ha,
138						QLA83XX_FLASH_DIRECT_DATA(addr),
139						&u32_word);
140		if (ret_val == QLA_ERROR) {
141			ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
142				   __func__, addr);
143			goto exit_flash_read;
144		}
145
146		*(__le32 *)p_data = le32_to_cpu(u32_word);
147		p_data = p_data + 4;
148		addr = addr + 4;
149	}
150
151exit_flash_read:
152	qla4_83xx_flash_unlock(ha);
153
154exit_lock_error:
155	return ret_val;
156}
157
158int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
159				      uint32_t flash_addr, uint8_t *p_data,
160				      int u32_word_count)
161{
162	uint32_t i;
163	uint32_t u32_word;
164	uint32_t flash_offset;
165	uint32_t addr = flash_addr;
166	int ret_val = QLA_SUCCESS;
167
168	flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
169
170	if (addr & 0x3) {
171		ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
172			   __func__, addr);
173		ret_val = QLA_ERROR;
174		goto exit_lockless_read;
175	}
176
177	ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
178					    addr);
179	if (ret_val == QLA_ERROR) {
180		ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
181			   __func__, addr);
182		goto exit_lockless_read;
183	}
184
185	/* Check if data is spread across multiple sectors  */
186	if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
187	    (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
188
189		/* Multi sector read */
190		for (i = 0; i < u32_word_count; i++) {
191			ret_val = qla4_83xx_rd_reg_indirect(ha,
192						QLA83XX_FLASH_DIRECT_DATA(addr),
193						&u32_word);
194			if (ret_val == QLA_ERROR) {
195				ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
196					   __func__, addr);
197				goto exit_lockless_read;
198			}
199
200			*(__le32 *)p_data  = le32_to_cpu(u32_word);
201			p_data = p_data + 4;
202			addr = addr + 4;
203			flash_offset = flash_offset + 4;
204
205			if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
206				/* This write is needed once for each sector */
207				ret_val = qla4_83xx_wr_reg_indirect(ha,
208						   QLA83XX_FLASH_DIRECT_WINDOW,
209						   addr);
210				if (ret_val == QLA_ERROR) {
211					ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
212						   __func__, addr);
213					goto exit_lockless_read;
214				}
215				flash_offset = 0;
216			}
217		}
218	} else {
219		/* Single sector read */
220		for (i = 0; i < u32_word_count; i++) {
221			ret_val = qla4_83xx_rd_reg_indirect(ha,
222						QLA83XX_FLASH_DIRECT_DATA(addr),
223						&u32_word);
224			if (ret_val == QLA_ERROR) {
225				ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
226					   __func__, addr);
227				goto exit_lockless_read;
228			}
229
230			*(__le32 *)p_data = le32_to_cpu(u32_word);
231			p_data = p_data + 4;
232			addr = addr + 4;
233		}
234	}
235
236exit_lockless_read:
237	return ret_val;
238}
239
240void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
241{
242	if (qla4_83xx_flash_lock(ha))
243		ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
244
245	/*
246	 * We got the lock, or someone else is holding the lock
247	 * since we are restting, forcefully unlock
248	 */
249	qla4_83xx_flash_unlock(ha);
250}
251
252#define INTENT_TO_RECOVER	0x01
253#define PROCEED_TO_RECOVER	0x02
254
255static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
256{
257
258	uint32_t lock = 0, lockid;
259	int ret_val = QLA_ERROR;
260
261	lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
262
263	/* Check for other Recovery in progress, go wait */
264	if ((lockid & 0x3) != 0)
265		goto exit_lock_recovery;
266
267	/* Intent to Recover */
268	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
269				   (ha->func_num << 2) | INTENT_TO_RECOVER);
270
271	msleep(200);
272
273	/* Check Intent to Recover is advertised */
274	lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
275	if ((lockid & 0x3C) != (ha->func_num << 2))
276		goto exit_lock_recovery;
277
278	ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
279		   __func__, ha->func_num);
280
281	/* Proceed to Recover */
282	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
283				   (ha->func_num << 2) | PROCEED_TO_RECOVER);
284
285	/* Force Unlock */
286	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
287	ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
288
289	/* Clear bits 0-5 in IDC_RECOVERY register*/
290	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
291
292	/* Get lock */
293	lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
294	if (lock) {
295		lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
296		lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
297		ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
298		ret_val = QLA_SUCCESS;
299	}
300
301exit_lock_recovery:
302	return ret_val;
303}
304
305#define	QLA83XX_DRV_LOCK_MSLEEP		200
306
307int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
308{
309	int timeout = 0;
310	uint32_t status = 0;
311	int ret_val = QLA_SUCCESS;
312	uint32_t first_owner = 0;
313	uint32_t tmo_owner = 0;
314	uint32_t lock_id;
315	uint32_t func_num;
316	uint32_t lock_cnt;
317
318	while (status == 0) {
319		status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
320		if (status) {
321			/* Increment Counter (8-31) and update func_num (0-7) on
322			 * getting a successful lock  */
323			lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
324			lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
325			qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
326			break;
327		}
328
329		if (timeout == 0)
330			/* Save counter + ID of function holding the lock for
331			 * first failure */
332			first_owner = ha->isp_ops->rd_reg_direct(ha,
333							  QLA83XX_DRV_LOCK_ID);
334
335		if (++timeout >=
336		    (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
337			tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
338			func_num = tmo_owner & 0xFF;
339			lock_cnt = tmo_owner >> 8;
340			ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
341				   __func__, ha->func_num, func_num, lock_cnt,
342				   (first_owner & 0xFF));
343
344			if (first_owner != tmo_owner) {
345				/* Some other driver got lock, OR same driver
346				 * got lock again (counter value changed), when
347				 * we were waiting for lock.
348				 * Retry for another 2 sec */
349				ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
350					   __func__, ha->func_num);
351				timeout = 0;
352			} else {
353				/* Same driver holding lock > 2sec.
354				 * Force Recovery */
355				ret_val = qla4_83xx_lock_recovery(ha);
356				if (ret_val == QLA_SUCCESS) {
357					/* Recovered and got lock */
358					ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
359						   __func__, ha->func_num);
360					break;
361				}
362				/* Recovery Failed, some other function
363				 * has the lock, wait for 2secs and retry */
364				ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
365					   __func__, ha->func_num);
366				timeout = 0;
367			}
368		}
369		msleep(QLA83XX_DRV_LOCK_MSLEEP);
370	}
371
372	return ret_val;
373}
374
375void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
376{
377	int id;
378
379	id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
380
381	if ((id & 0xFF) != ha->func_num) {
382		ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
383			   __func__, ha->func_num, (id & 0xFF));
384		return;
385	}
386
387	/* Keep lock counter value, update the ha->func_num to 0xFF */
388	qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
389	qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
390}
391
392void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
393{
394	uint32_t idc_ctrl;
395
396	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
397	idc_ctrl |= DONTRESET_BIT0;
398	qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
399	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
400			  idc_ctrl));
401}
402
403void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
404{
405	uint32_t idc_ctrl;
406
407	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
408	idc_ctrl &= ~DONTRESET_BIT0;
409	qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
410	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
411			  idc_ctrl));
412}
413
414int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
415{
416	uint32_t idc_ctrl;
417
418	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
419	return idc_ctrl & DONTRESET_BIT0;
420}
421
422/*-------------------------IDC State Machine ---------------------*/
423
424enum {
425	UNKNOWN_CLASS = 0,
426	NIC_CLASS,
427	FCOE_CLASS,
428	ISCSI_CLASS
429};
430
431struct device_info {
432	int func_num;
433	int device_type;
434	int port_num;
435};
436
437int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
438{
439	uint32_t drv_active;
440	uint32_t dev_part, dev_part1, dev_part2;
441	int i;
442	struct device_info device_map[16];
443	int func_nibble;
444	int nibble;
445	int nic_present = 0;
446	int iscsi_present = 0;
447	int iscsi_func_low = 0;
448
449	/* Use the dev_partition register to determine the PCI function number
450	 * and then check drv_active register to see which driver is loaded */
451	dev_part1 = qla4_83xx_rd_reg(ha,
452				     ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
453	dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
454	drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
455
456	/* Each function has 4 bits in dev_partition Info register,
457	 * Lower 2 bits - device type, Upper 2 bits - physical port number */
458	dev_part = dev_part1;
459	for (i = nibble = 0; i <= 15; i++, nibble++) {
460		func_nibble = dev_part & (0xF << (nibble * 4));
461		func_nibble >>= (nibble * 4);
462		device_map[i].func_num = i;
463		device_map[i].device_type = func_nibble & 0x3;
464		device_map[i].port_num = func_nibble & 0xC;
465
466		if (device_map[i].device_type == NIC_CLASS) {
467			if (drv_active & (1 << device_map[i].func_num)) {
468				nic_present++;
469				break;
470			}
471		} else if (device_map[i].device_type == ISCSI_CLASS) {
472			if (drv_active & (1 << device_map[i].func_num)) {
473				if (!iscsi_present ||
474				    (iscsi_present &&
475				     (iscsi_func_low > device_map[i].func_num)))
476					iscsi_func_low = device_map[i].func_num;
477
478				iscsi_present++;
479			}
480		}
481
482		/* For function_num[8..15] get info from dev_part2 register */
483		if (nibble == 7) {
484			nibble = 0;
485			dev_part = dev_part2;
486		}
487	}
488
489	/* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
490	 * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
491	 * present. */
492	if (!nic_present && (ha->func_num == iscsi_func_low)) {
493		DEBUG2(ql4_printk(KERN_INFO, ha,
494				  "%s: can reset - NIC not present and lower iSCSI function is %d\n",
495				  __func__, ha->func_num));
496		return 1;
497	}
498
499	return 0;
500}
501
502/**
503 * qla4_83xx_need_reset_handler - Code to start reset sequence
504 * @ha: pointer to adapter structure
505 *
506 * Note: IDC lock must be held upon entry
507 **/
508void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
509{
510	uint32_t dev_state, drv_state, drv_active;
511	unsigned long reset_timeout, dev_init_timeout;
512
513	ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
514		   __func__);
515
516	if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
517		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
518				  __func__));
519		qla4_8xxx_set_rst_ready(ha);
520
521		/* Non-reset owners ACK Reset and wait for device INIT state
522		 * as part of Reset Recovery by Reset Owner */
523		dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
524
525		do {
526			if (time_after_eq(jiffies, dev_init_timeout)) {
527				ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
528					   __func__);
529				break;
530			}
531
532			ha->isp_ops->idc_unlock(ha);
533			msleep(1000);
534			ha->isp_ops->idc_lock(ha);
535
536			dev_state = qla4_8xxx_rd_direct(ha,
537							QLA8XXX_CRB_DEV_STATE);
538		} while (dev_state == QLA8XXX_DEV_NEED_RESET);
539	} else {
540		qla4_8xxx_set_rst_ready(ha);
541		reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
542		drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
543		drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
544
545		ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
546			   __func__, drv_state, drv_active);
547
548		while (drv_state != drv_active) {
549			if (time_after_eq(jiffies, reset_timeout)) {
550				ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
551					   __func__, DRIVER_NAME, drv_state,
552					   drv_active);
553				break;
554			}
555
556			ha->isp_ops->idc_unlock(ha);
557			msleep(1000);
558			ha->isp_ops->idc_lock(ha);
559
560			drv_state = qla4_8xxx_rd_direct(ha,
561							QLA8XXX_CRB_DRV_STATE);
562			drv_active = qla4_8xxx_rd_direct(ha,
563							QLA8XXX_CRB_DRV_ACTIVE);
564		}
565
566		if (drv_state != drv_active) {
567			ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
568				   __func__, (drv_active ^ drv_state));
569			drv_active = drv_active & drv_state;
570			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
571					    drv_active);
572		}
573
574		clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
575		/* Start Reset Recovery */
576		qla4_8xxx_device_bootstrap(ha);
577	}
578}
579
580void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
581{
582	uint32_t idc_params, ret_val;
583
584	ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
585					   (uint8_t *)&idc_params, 1);
586	if (ret_val == QLA_SUCCESS) {
587		ha->nx_dev_init_timeout = idc_params & 0xFFFF;
588		ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
589	} else {
590		ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
591		ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
592	}
593
594	DEBUG2(ql4_printk(KERN_DEBUG, ha,
595			  "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
596			  __func__, ha->nx_dev_init_timeout,
597			  ha->nx_reset_timeout));
598}
599
600/*-------------------------Reset Sequence Functions-----------------------*/
601
602static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
603{
604	uint8_t *phdr;
605
606	if (!ha->reset_tmplt.buff) {
607		ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
608			   __func__);
609		return;
610	}
611
612	phdr = ha->reset_tmplt.buff;
613
614	DEBUG2(ql4_printk(KERN_INFO, ha,
615			  "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
616			  *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
617			  *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
618			  *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
619			  *(phdr+13), *(phdr+14), *(phdr+15)));
620}
621
622static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
623{
624	uint8_t *p_cache;
625	uint32_t src, count, size;
626	uint64_t dest;
627	int ret_val = QLA_SUCCESS;
628
629	src = QLA83XX_BOOTLOADER_FLASH_ADDR;
630	dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
631	size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
632
633	/* 128 bit alignment check */
634	if (size & 0xF)
635		size = (size + 16) & ~0xF;
636
637	/* 16 byte count */
638	count = size/16;
639
640	p_cache = vmalloc(size);
641	if (p_cache == NULL) {
642		ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
643			   __func__);
644		ret_val = QLA_ERROR;
645		goto exit_copy_bootloader;
646	}
647
648	ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
649						    size / sizeof(uint32_t));
650	if (ret_val == QLA_ERROR) {
651		ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
652			   __func__);
653		goto exit_copy_error;
654	}
655	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
656			  __func__));
657
658	/* 128 bit/16 byte write to MS memory */
659	ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
660					      count);
661	if (ret_val == QLA_ERROR) {
662		ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
663			   __func__);
664		goto exit_copy_error;
665	}
666
667	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
668			  __func__, size));
669
670exit_copy_error:
671	vfree(p_cache);
672
673exit_copy_bootloader:
674	return ret_val;
675}
676
677static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
678{
679	uint32_t val, ret_val = QLA_ERROR;
680	int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
681
682	do {
683		val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
684		if (val == PHAN_INITIALIZE_COMPLETE) {
685			DEBUG2(ql4_printk(KERN_INFO, ha,
686					  "%s: Command Peg initialization complete. State=0x%x\n",
687					  __func__, val));
688			ret_val = QLA_SUCCESS;
689			break;
690		}
691		msleep(CRB_CMDPEG_CHECK_DELAY);
692	} while (--retries);
693
694	return ret_val;
695}
696
697/**
698 * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
699 * value read ANDed with test_mask is equal to test_result.
700 *
701 * @ha : Pointer to adapter structure
702 * @addr : CRB register address
703 * @duration : Poll for total of "duration" msecs
704 * @test_mask : Mask value read with "test_mask"
705 * @test_result : Compare (value&test_mask) with test_result.
706 **/
707static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
708			      int duration, uint32_t test_mask,
709			      uint32_t test_result)
710{
711	uint32_t value;
712	uint8_t retries;
713	int ret_val = QLA_SUCCESS;
714
715	ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
716	if (ret_val == QLA_ERROR)
717		goto exit_poll_reg;
718
719	retries = duration / 10;
720	do {
721		if ((value & test_mask) != test_result) {
722			msleep(duration / 10);
723			ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
724			if (ret_val == QLA_ERROR)
725				goto exit_poll_reg;
726
727			ret_val = QLA_ERROR;
728		} else {
729			ret_val = QLA_SUCCESS;
730			break;
731		}
732	} while (retries--);
733
734exit_poll_reg:
735	if (ret_val == QLA_ERROR) {
736		ha->reset_tmplt.seq_error++;
737		ql4_printk(KERN_ERR, ha, "%s: Poll Failed:  0x%08x 0x%08x 0x%08x\n",
738			   __func__, value, test_mask, test_result);
739	}
740
741	return ret_val;
742}
743
744static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
745{
746	uint32_t sum =  0;
747	uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
748	int u16_count =  ha->reset_tmplt.hdr->size / sizeof(uint16_t);
749	int ret_val;
750
751	while (u16_count-- > 0)
752		sum += *buff++;
753
754	while (sum >> 16)
755		sum = (sum & 0xFFFF) +  (sum >> 16);
756
757	/* checksum of 0 indicates a valid template */
758	if (~sum) {
759		ret_val = QLA_SUCCESS;
760	} else {
761		ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
762			   __func__);
763		ret_val = QLA_ERROR;
764	}
765
766	return ret_val;
767}
768
769/**
770 * qla4_83xx_read_reset_template - Read Reset Template from Flash
771 * @ha: Pointer to adapter structure
772 **/
773void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
774{
775	uint8_t *p_buff;
776	uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
777	uint32_t ret_val;
778
779	ha->reset_tmplt.seq_error = 0;
780	ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
781	if (ha->reset_tmplt.buff == NULL) {
782		ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
783			   __func__);
784		goto exit_read_reset_template;
785	}
786
787	p_buff = ha->reset_tmplt.buff;
788	addr = QLA83XX_RESET_TEMPLATE_ADDR;
789
790	tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
791				    sizeof(uint32_t);
792
793	DEBUG2(ql4_printk(KERN_INFO, ha,
794			  "%s: Read template hdr size %d from Flash\n",
795			  __func__, tmplt_hdr_def_size));
796
797	/* Copy template header from flash */
798	ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
799					   tmplt_hdr_def_size);
800	if (ret_val != QLA_SUCCESS) {
801		ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
802			   __func__);
803		goto exit_read_template_error;
804	}
805
806	ha->reset_tmplt.hdr =
807		(struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
808
809	/* Validate the template header size and signature */
810	tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
811	if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
812	    (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
813		ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
814			   __func__, tmplt_hdr_size, tmplt_hdr_def_size);
815		goto exit_read_template_error;
816	}
817
818	addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
819	p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
820	tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
821			      ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
822
823	DEBUG2(ql4_printk(KERN_INFO, ha,
824			  "%s: Read rest of the template size %d\n",
825			  __func__, ha->reset_tmplt.hdr->size));
826
827	/* Copy rest of the template */
828	ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
829					   tmplt_hdr_def_size);
830	if (ret_val != QLA_SUCCESS) {
831		ql4_printk(KERN_ERR, ha, "%s: Failed to read reset tempelate\n",
832			   __func__);
833		goto exit_read_template_error;
834	}
835
836	/* Integrity check */
837	if (qla4_83xx_reset_seq_checksum_test(ha)) {
838		ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
839			   __func__);
840		goto exit_read_template_error;
841	}
842	DEBUG2(ql4_printk(KERN_INFO, ha,
843			  "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
844			  __func__));
845
846	/* Get STOP, START, INIT sequence offsets */
847	ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
848				      ha->reset_tmplt.hdr->init_seq_offset;
849	ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
850				       ha->reset_tmplt.hdr->start_seq_offset;
851	ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
852				      ha->reset_tmplt.hdr->hdr_size;
853	qla4_83xx_dump_reset_seq_hdr(ha);
854
855	goto exit_read_reset_template;
856
857exit_read_template_error:
858	vfree(ha->reset_tmplt.buff);
859
860exit_read_reset_template:
861	return;
862}
863
864/**
865 * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
866 *
867 * @ha : Pointer to adapter structure
868 * @raddr : CRB address to read from
869 * @waddr : CRB address to write to
870 **/
871static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
872					 uint32_t raddr, uint32_t waddr)
873{
874	uint32_t value;
875
876	qla4_83xx_rd_reg_indirect(ha, raddr, &value);
877	qla4_83xx_wr_reg_indirect(ha, waddr, value);
878}
879
880/**
881 * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
882 *
883 * This function read value from raddr, AND with test_mask,
884 * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
885 *
886 * @ha : Pointer to adapter structure
887 * @raddr : CRB address to read from
888 * @waddr : CRB address to write to
889 * @p_rmw_hdr : header with shift/or/xor values.
890 **/
891static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
892				  uint32_t waddr,
893				  struct qla4_83xx_rmw *p_rmw_hdr)
894{
895	uint32_t value;
896
897	if (p_rmw_hdr->index_a)
898		value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
899	else
900		qla4_83xx_rd_reg_indirect(ha, raddr, &value);
901
902	value &= p_rmw_hdr->test_mask;
903	value <<= p_rmw_hdr->shl;
904	value >>= p_rmw_hdr->shr;
905	value |= p_rmw_hdr->or_value;
906	value ^= p_rmw_hdr->xor_value;
907
908	qla4_83xx_wr_reg_indirect(ha, waddr, value);
909
910	return;
911}
912
913static void qla4_83xx_write_list(struct scsi_qla_host *ha,
914				 struct qla4_83xx_reset_entry_hdr *p_hdr)
915{
916	struct qla4_83xx_entry *p_entry;
917	uint32_t i;
918
919	p_entry = (struct qla4_83xx_entry *)
920		  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
921
922	for (i = 0; i < p_hdr->count; i++, p_entry++) {
923		qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
924		if (p_hdr->delay)
925			udelay((uint32_t)(p_hdr->delay));
926	}
927}
928
929static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
930				      struct qla4_83xx_reset_entry_hdr *p_hdr)
931{
932	struct qla4_83xx_entry *p_entry;
933	uint32_t i;
934
935	p_entry = (struct qla4_83xx_entry *)
936		  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
937
938	for (i = 0; i < p_hdr->count; i++, p_entry++) {
939		qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
940		if (p_hdr->delay)
941			udelay((uint32_t)(p_hdr->delay));
942	}
943}
944
945static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
946				struct qla4_83xx_reset_entry_hdr *p_hdr)
947{
948	long delay;
949	struct qla4_83xx_entry *p_entry;
950	struct qla4_83xx_poll *p_poll;
951	uint32_t i;
952	uint32_t value;
953
954	p_poll = (struct qla4_83xx_poll *)
955		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
956
957	/* Entries start after 8 byte qla4_83xx_poll, poll header contains
958	 * the test_mask, test_value. */
959	p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
960					     sizeof(struct qla4_83xx_poll));
961
962	delay = (long)p_hdr->delay;
963	if (!delay) {
964		for (i = 0; i < p_hdr->count; i++, p_entry++) {
965			qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
966					   p_poll->test_mask,
967					   p_poll->test_value);
968		}
969	} else {
970		for (i = 0; i < p_hdr->count; i++, p_entry++) {
971			if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
972					       p_poll->test_mask,
973					       p_poll->test_value)) {
974				qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
975							  &value);
976				qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
977							  &value);
978			}
979		}
980	}
981}
982
983static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
984				      struct qla4_83xx_reset_entry_hdr *p_hdr)
985{
986	long delay;
987	struct qla4_83xx_quad_entry *p_entry;
988	struct qla4_83xx_poll *p_poll;
989	uint32_t i;
990
991	p_poll = (struct qla4_83xx_poll *)
992		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
993	p_entry = (struct qla4_83xx_quad_entry *)
994		  ((char *)p_poll + sizeof(struct qla4_83xx_poll));
995	delay = (long)p_hdr->delay;
996
997	for (i = 0; i < p_hdr->count; i++, p_entry++) {
998		qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
999					  p_entry->dr_value);
1000		qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1001					  p_entry->ar_value);
1002		if (delay) {
1003			if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1004					       p_poll->test_mask,
1005					       p_poll->test_value)) {
1006				DEBUG2(ql4_printk(KERN_INFO, ha,
1007						  "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
1008						  __func__, i,
1009						  ha->reset_tmplt.seq_index));
1010			}
1011		}
1012	}
1013}
1014
1015static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
1016					struct qla4_83xx_reset_entry_hdr *p_hdr)
1017{
1018	struct qla4_83xx_entry *p_entry;
1019	struct qla4_83xx_rmw *p_rmw_hdr;
1020	uint32_t i;
1021
1022	p_rmw_hdr = (struct qla4_83xx_rmw *)
1023		    ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1024	p_entry = (struct qla4_83xx_entry *)
1025		  ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
1026
1027	for (i = 0; i < p_hdr->count; i++, p_entry++) {
1028		qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
1029				      p_rmw_hdr);
1030		if (p_hdr->delay)
1031			udelay((uint32_t)(p_hdr->delay));
1032	}
1033}
1034
1035static void qla4_83xx_pause(struct scsi_qla_host *ha,
1036			    struct qla4_83xx_reset_entry_hdr *p_hdr)
1037{
1038	if (p_hdr->delay)
1039		mdelay((uint32_t)((long)p_hdr->delay));
1040}
1041
1042static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
1043				     struct qla4_83xx_reset_entry_hdr *p_hdr)
1044{
1045	long delay;
1046	int index;
1047	struct qla4_83xx_quad_entry *p_entry;
1048	struct qla4_83xx_poll *p_poll;
1049	uint32_t i;
1050	uint32_t value;
1051
1052	p_poll = (struct qla4_83xx_poll *)
1053		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1054	p_entry = (struct qla4_83xx_quad_entry *)
1055		  ((char *)p_poll + sizeof(struct qla4_83xx_poll));
1056	delay = (long)p_hdr->delay;
1057
1058	for (i = 0; i < p_hdr->count; i++, p_entry++) {
1059		qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1060					  p_entry->ar_value);
1061		if (delay) {
1062			if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1063					       p_poll->test_mask,
1064					       p_poll->test_value)) {
1065				DEBUG2(ql4_printk(KERN_INFO, ha,
1066						  "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
1067						  __func__, i,
1068						  ha->reset_tmplt.seq_index));
1069			} else {
1070				index = ha->reset_tmplt.array_index;
1071				qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
1072							  &value);
1073				ha->reset_tmplt.array[index++] = value;
1074
1075				if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
1076					ha->reset_tmplt.array_index = 1;
1077			}
1078		}
1079	}
1080}
1081
1082static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
1083			      struct qla4_83xx_reset_entry_hdr *p_hdr)
1084{
1085	ha->reset_tmplt.seq_end = 1;
1086}
1087
1088static void qla4_83xx_template_end(struct scsi_qla_host *ha,
1089				   struct qla4_83xx_reset_entry_hdr *p_hdr)
1090{
1091	ha->reset_tmplt.template_end = 1;
1092
1093	if (ha->reset_tmplt.seq_error == 0) {
1094		DEBUG2(ql4_printk(KERN_INFO, ha,
1095				  "%s: Reset sequence completed SUCCESSFULLY.\n",
1096				  __func__));
1097	} else {
1098		ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
1099			   __func__);
1100	}
1101}
1102
1103/**
1104 * qla4_83xx_process_reset_template - Process reset template.
1105 *
1106 * Process all entries in reset template till entry with SEQ_END opcode,
1107 * which indicates end of the reset template processing. Each entry has a
1108 * Reset Entry header, entry opcode/command, with size of the entry, number
1109 * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
1110 *
1111 * @ha : Pointer to adapter structure
1112 * @p_buff : Common reset entry header.
1113 **/
1114static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
1115					     char *p_buff)
1116{
1117	int index, entries;
1118	struct qla4_83xx_reset_entry_hdr *p_hdr;
1119	char *p_entry = p_buff;
1120
1121	ha->reset_tmplt.seq_end = 0;
1122	ha->reset_tmplt.template_end = 0;
1123	entries = ha->reset_tmplt.hdr->entries;
1124	index = ha->reset_tmplt.seq_index;
1125
1126	for (; (!ha->reset_tmplt.seq_end) && (index  < entries); index++) {
1127
1128		p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
1129		switch (p_hdr->cmd) {
1130		case OPCODE_NOP:
1131			break;
1132		case OPCODE_WRITE_LIST:
1133			qla4_83xx_write_list(ha, p_hdr);
1134			break;
1135		case OPCODE_READ_WRITE_LIST:
1136			qla4_83xx_read_write_list(ha, p_hdr);
1137			break;
1138		case OPCODE_POLL_LIST:
1139			qla4_83xx_poll_list(ha, p_hdr);
1140			break;
1141		case OPCODE_POLL_WRITE_LIST:
1142			qla4_83xx_poll_write_list(ha, p_hdr);
1143			break;
1144		case OPCODE_READ_MODIFY_WRITE:
1145			qla4_83xx_read_modify_write(ha, p_hdr);
1146			break;
1147		case OPCODE_SEQ_PAUSE:
1148			qla4_83xx_pause(ha, p_hdr);
1149			break;
1150		case OPCODE_SEQ_END:
1151			qla4_83xx_seq_end(ha, p_hdr);
1152			break;
1153		case OPCODE_TMPL_END:
1154			qla4_83xx_template_end(ha, p_hdr);
1155			break;
1156		case OPCODE_POLL_READ_LIST:
1157			qla4_83xx_poll_read_list(ha, p_hdr);
1158			break;
1159		default:
1160			ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
1161				   __func__, p_hdr->cmd, index);
1162			break;
1163		}
1164
1165		/* Set pointer to next entry in the sequence. */
1166		p_entry += p_hdr->size;
1167	}
1168
1169	ha->reset_tmplt.seq_index = index;
1170}
1171
1172static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
1173{
1174	ha->reset_tmplt.seq_index = 0;
1175	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
1176
1177	if (ha->reset_tmplt.seq_end != 1)
1178		ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
1179			   __func__);
1180}
1181
1182static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
1183{
1184	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
1185
1186	if (ha->reset_tmplt.template_end != 1)
1187		ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
1188			   __func__);
1189}
1190
1191static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
1192{
1193	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
1194
1195	if (ha->reset_tmplt.seq_end != 1)
1196		ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
1197			   __func__);
1198}
1199
1200static int qla4_83xx_restart(struct scsi_qla_host *ha)
1201{
1202	int ret_val = QLA_SUCCESS;
1203	uint32_t idc_ctrl;
1204
1205	qla4_83xx_process_stop_seq(ha);
1206
1207	/*
1208	 * Collect minidump.
1209	 * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
1210	 * don't collect minidump
1211	 */
1212	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
1213	if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1214		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
1215				 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1216		ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
1217			   __func__);
1218	} else {
1219		qla4_8xxx_get_minidump(ha);
1220	}
1221
1222	qla4_83xx_process_init_seq(ha);
1223
1224	if (qla4_83xx_copy_bootloader(ha)) {
1225		ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
1226			   __func__);
1227		ret_val = QLA_ERROR;
1228		goto exit_restart;
1229	}
1230
1231	qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
1232	qla4_83xx_process_start_seq(ha);
1233
1234exit_restart:
1235	return ret_val;
1236}
1237
1238int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
1239{
1240	int ret_val = QLA_SUCCESS;
1241
1242	ret_val = qla4_83xx_restart(ha);
1243	if (ret_val == QLA_ERROR) {
1244		ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
1245		goto exit_start_fw;
1246	} else {
1247		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
1248				  __func__));
1249	}
1250
1251	ret_val = qla4_83xx_check_cmd_peg_status(ha);
1252	if (ret_val == QLA_ERROR)
1253		ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
1254			   __func__);
1255
1256exit_start_fw:
1257	return ret_val;
1258}
1259
1260/*----------------------Interrupt Related functions ---------------------*/
1261
1262static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
1263{
1264	if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
1265		qla4_8xxx_intr_disable(ha);
1266}
1267
1268static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
1269{
1270	uint32_t mb_int, ret;
1271
1272	if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1273		ret = readl(&ha->qla4_83xx_reg->mbox_int);
1274		mb_int = ret & ~INT_ENABLE_FW_MB;
1275		writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1276		writel(1, &ha->qla4_83xx_reg->leg_int_mask);
1277	}
1278}
1279
1280void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
1281{
1282	qla4_83xx_disable_mbox_intrs(ha);
1283	qla4_83xx_disable_iocb_intrs(ha);
1284}
1285
1286static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
1287{
1288	if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
1289		qla4_8xxx_intr_enable(ha);
1290		set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
1291	}
1292}
1293
1294void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
1295{
1296	uint32_t mb_int;
1297
1298	if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1299		mb_int = INT_ENABLE_FW_MB;
1300		writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1301		writel(0, &ha->qla4_83xx_reg->leg_int_mask);
1302		set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
1303	}
1304}
1305
1306
1307void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
1308{
1309	qla4_83xx_enable_mbox_intrs(ha);
1310	qla4_83xx_enable_iocb_intrs(ha);
1311}
1312
1313
1314void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
1315			      int incount)
1316{
1317	int i;
1318
1319	/* Load all mailbox registers, except mailbox 0. */
1320	for (i = 1; i < incount; i++)
1321		writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
1322
1323	writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
1324
1325	/* Set Host Interrupt register to 1, to tell the firmware that
1326	 * a mailbox command is pending. Firmware after reading the
1327	 * mailbox command, clears the host interrupt register */
1328	writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
1329}
1330
1331void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
1332{
1333	int intr_status;
1334
1335	intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
1336	if (intr_status) {
1337		ha->mbox_status_count = outcount;
1338		ha->isp_ops->interrupt_service_routine(ha, intr_status);
1339	}
1340}
1341
1342/**
1343 * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
1344 * @ha: pointer to host adapter structure.
1345 **/
1346int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
1347{
1348	int rval;
1349	uint32_t dev_state;
1350
1351	ha->isp_ops->idc_lock(ha);
1352	dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
1353
1354	if (ql4xdontresethba)
1355		qla4_83xx_set_idc_dontreset(ha);
1356
1357	if (dev_state == QLA8XXX_DEV_READY) {
1358		/* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
1359		 * recovery */
1360		if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
1361			ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
1362				   __func__);
1363			rval = QLA_ERROR;
1364			goto exit_isp_reset;
1365		}
1366
1367		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
1368				  __func__));
1369		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
1370				    QLA8XXX_DEV_NEED_RESET);
1371
1372	} else {
1373		/* If device_state is NEED_RESET, go ahead with
1374		 * Reset,irrespective of ql4xdontresethba. This is to allow a
1375		 * non-reset-owner to force a reset. Non-reset-owner sets
1376		 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
1377		 * and then forces a Reset by setting device_state to
1378		 * NEED_RESET. */
1379		DEBUG2(ql4_printk(KERN_INFO, ha,
1380				  "%s: HW state already set to NEED_RESET\n",
1381				  __func__));
1382	}
1383
1384	/* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
1385	 * priority and which drivers are present. Unlike ISP8022, the function
1386	 * setting NEED_RESET, may not be the Reset owner. */
1387	if (qla4_83xx_can_perform_reset(ha))
1388		set_bit(AF_8XXX_RST_OWNER, &ha->flags);
1389
1390	ha->isp_ops->idc_unlock(ha);
1391	rval = qla4_8xxx_device_state_handler(ha);
1392
1393	ha->isp_ops->idc_lock(ha);
1394	qla4_8xxx_clear_rst_ready(ha);
1395exit_isp_reset:
1396	ha->isp_ops->idc_unlock(ha);
1397
1398	if (rval == QLA_SUCCESS)
1399		clear_bit(AF_FW_RECOVERY, &ha->flags);
1400
1401	return rval;
1402}
1403
1404static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
1405{
1406	u32 val = 0, val1 = 0;
1407	int i, status = QLA_SUCCESS;
1408
1409	status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
1410	DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
1411
1412	/* Port 0 Rx Buffer Pause Threshold Registers. */
1413	DEBUG2(ql4_printk(KERN_INFO, ha,
1414		"Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1415	for (i = 0; i < 8; i++) {
1416		status = qla4_83xx_rd_reg_indirect(ha,
1417				QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
1418		DEBUG2(pr_info("0x%x ", val));
1419	}
1420
1421	DEBUG2(pr_info("\n"));
1422
1423	/* Port 1 Rx Buffer Pause Threshold Registers. */
1424	DEBUG2(ql4_printk(KERN_INFO, ha,
1425		"Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1426	for (i = 0; i < 8; i++) {
1427		status = qla4_83xx_rd_reg_indirect(ha,
1428				QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
1429		DEBUG2(pr_info("0x%x  ", val));
1430	}
1431
1432	DEBUG2(pr_info("\n"));
1433
1434	/* Port 0 RxB Traffic Class Max Cell Registers. */
1435	DEBUG2(ql4_printk(KERN_INFO, ha,
1436		"Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
1437	for (i = 0; i < 4; i++) {
1438		status = qla4_83xx_rd_reg_indirect(ha,
1439			       QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
1440		DEBUG2(pr_info("0x%x  ", val));
1441	}
1442
1443	DEBUG2(pr_info("\n"));
1444
1445	/* Port 1 RxB Traffic Class Max Cell Registers. */
1446	DEBUG2(ql4_printk(KERN_INFO, ha,
1447		"Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
1448	for (i = 0; i < 4; i++) {
1449		status = qla4_83xx_rd_reg_indirect(ha,
1450			       QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
1451		DEBUG2(pr_info("0x%x  ", val));
1452	}
1453
1454	DEBUG2(pr_info("\n"));
1455
1456	/* Port 0 RxB Rx Traffic Class Stats. */
1457	DEBUG2(ql4_printk(KERN_INFO, ha,
1458			  "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
1459	for (i = 7; i >= 0; i--) {
1460		status = qla4_83xx_rd_reg_indirect(ha,
1461						   QLA83XX_PORT0_RXB_TC_STATS,
1462						   &val);
1463		val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
1464		qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
1465					  (val | (i << 29)));
1466		status = qla4_83xx_rd_reg_indirect(ha,
1467						   QLA83XX_PORT0_RXB_TC_STATS,
1468						   &val);
1469		DEBUG2(pr_info("0x%x  ", val));
1470	}
1471
1472	DEBUG2(pr_info("\n"));
1473
1474	/* Port 1 RxB Rx Traffic Class Stats. */
1475	DEBUG2(ql4_printk(KERN_INFO, ha,
1476			  "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
1477	for (i = 7; i >= 0; i--) {
1478		status = qla4_83xx_rd_reg_indirect(ha,
1479						   QLA83XX_PORT1_RXB_TC_STATS,
1480						   &val);
1481		val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
1482		qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
1483					  (val | (i << 29)));
1484		status = qla4_83xx_rd_reg_indirect(ha,
1485						   QLA83XX_PORT1_RXB_TC_STATS,
1486						   &val);
1487		DEBUG2(pr_info("0x%x  ", val));
1488	}
1489
1490	DEBUG2(pr_info("\n"));
1491
1492	status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1493					   &val);
1494	status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1495					   &val1);
1496
1497	DEBUG2(ql4_printk(KERN_INFO, ha,
1498			  "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
1499			  val, val1));
1500}
1501
1502static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1503{
1504	int i;
1505
1506	/* set SRE-Shim Control Register */
1507	qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
1508				  QLA83XX_SET_PAUSE_VAL);
1509
1510	for (i = 0; i < 8; i++) {
1511		/* Port 0 Rx Buffer Pause Threshold Registers. */
1512		qla4_83xx_wr_reg_indirect(ha,
1513				      QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
1514				      QLA83XX_SET_PAUSE_VAL);
1515		/* Port 1 Rx Buffer Pause Threshold Registers. */
1516		qla4_83xx_wr_reg_indirect(ha,
1517				      QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
1518				      QLA83XX_SET_PAUSE_VAL);
1519	}
1520
1521	for (i = 0; i < 4; i++) {
1522		/* Port 0 RxB Traffic Class Max Cell Registers. */
1523		qla4_83xx_wr_reg_indirect(ha,
1524				     QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
1525				     QLA83XX_SET_TC_MAX_CELL_VAL);
1526		/* Port 1 RxB Traffic Class Max Cell Registers. */
1527		qla4_83xx_wr_reg_indirect(ha,
1528				     QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
1529				     QLA83XX_SET_TC_MAX_CELL_VAL);
1530	}
1531
1532	qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1533				  QLA83XX_SET_PAUSE_VAL);
1534	qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1535				  QLA83XX_SET_PAUSE_VAL);
1536
1537	ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
1538}
1539
1540/**
1541 * qla4_83xx_eport_init - Initialize EPort.
1542 * @ha: Pointer to host adapter structure.
1543 *
1544 * If EPort hardware is in reset state before disabling pause, there would be
1545 * serious hardware wedging issues. To prevent this perform eport init everytime
1546 * before disabling pause frames.
1547 **/
1548static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
1549{
1550	/* Clear the 8 registers */
1551	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
1552	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
1553	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
1554	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
1555	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
1556	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
1557	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
1558	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
1559
1560	/* Write any value to Reset Control register */
1561	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
1562
1563	ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
1564}
1565
1566void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1567{
1568	ha->isp_ops->idc_lock(ha);
1569	/* Before disabling pause frames, ensure that eport is not in reset */
1570	qla4_83xx_eport_init(ha);
1571	qla4_83xx_dump_pause_control_regs(ha);
1572	__qla4_83xx_disable_pause(ha);
1573	ha->isp_ops->idc_unlock(ha);
1574}
1575
1576/**
1577 * qla4_83xx_is_detached - Check if we are marked invisible.
1578 * @ha: Pointer to host adapter structure.
1579 **/
1580int qla4_83xx_is_detached(struct scsi_qla_host *ha)
1581{
1582	uint32_t drv_active;
1583
1584	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1585
1586	if (test_bit(AF_INIT_DONE, &ha->flags) &&
1587	    !(drv_active & (1 << ha->func_num))) {
1588		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
1589				  __func__, drv_active));
1590		return QLA_SUCCESS;
1591	}
1592
1593	return QLA_ERROR;
1594}
1595