[go: nahoru, domu]

1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2011-2013 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_bsg.h"
11
12static int
13qla4xxx_read_flash(struct bsg_job *bsg_job)
14{
15	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
16	struct scsi_qla_host *ha = to_qla_host(host);
17	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
18	struct iscsi_bsg_request *bsg_req = bsg_job->request;
19	uint32_t offset = 0;
20	uint32_t length = 0;
21	dma_addr_t flash_dma;
22	uint8_t *flash = NULL;
23	int rval = -EINVAL;
24
25	bsg_reply->reply_payload_rcv_len = 0;
26
27	if (unlikely(pci_channel_offline(ha->pdev)))
28		goto leave;
29
30	if (ql4xxx_reset_active(ha)) {
31		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
32		rval = -EBUSY;
33		goto leave;
34	}
35
36	if (ha->flash_state != QLFLASH_WAITING) {
37		ql4_printk(KERN_ERR, ha, "%s: another flash operation "
38			   "active\n", __func__);
39		rval = -EBUSY;
40		goto leave;
41	}
42
43	ha->flash_state = QLFLASH_READING;
44	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
45	length = bsg_job->reply_payload.payload_len;
46
47	flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
48				   GFP_KERNEL);
49	if (!flash) {
50		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
51			   "data\n", __func__);
52		rval = -ENOMEM;
53		goto leave;
54	}
55
56	rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
57	if (rval) {
58		ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
59		bsg_reply->result = DID_ERROR << 16;
60		rval = -EIO;
61	} else {
62		bsg_reply->reply_payload_rcv_len =
63			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
64					    bsg_job->reply_payload.sg_cnt,
65					    flash, length);
66		bsg_reply->result = DID_OK << 16;
67	}
68
69	bsg_job_done(bsg_job, bsg_reply->result,
70		     bsg_reply->reply_payload_rcv_len);
71	dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
72leave:
73	ha->flash_state = QLFLASH_WAITING;
74	return rval;
75}
76
77static int
78qla4xxx_update_flash(struct bsg_job *bsg_job)
79{
80	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
81	struct scsi_qla_host *ha = to_qla_host(host);
82	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
83	struct iscsi_bsg_request *bsg_req = bsg_job->request;
84	uint32_t length = 0;
85	uint32_t offset = 0;
86	uint32_t options = 0;
87	dma_addr_t flash_dma;
88	uint8_t *flash = NULL;
89	int rval = -EINVAL;
90
91	bsg_reply->reply_payload_rcv_len = 0;
92
93	if (unlikely(pci_channel_offline(ha->pdev)))
94		goto leave;
95
96	if (ql4xxx_reset_active(ha)) {
97		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
98		rval = -EBUSY;
99		goto leave;
100	}
101
102	if (ha->flash_state != QLFLASH_WAITING) {
103		ql4_printk(KERN_ERR, ha, "%s: another flash operation "
104			   "active\n", __func__);
105		rval = -EBUSY;
106		goto leave;
107	}
108
109	ha->flash_state = QLFLASH_WRITING;
110	length = bsg_job->request_payload.payload_len;
111	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
112	options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
113
114	flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
115				   GFP_KERNEL);
116	if (!flash) {
117		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
118			   "data\n", __func__);
119		rval = -ENOMEM;
120		goto leave;
121	}
122
123	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
124			  bsg_job->request_payload.sg_cnt, flash, length);
125
126	rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
127	if (rval) {
128		ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
129		bsg_reply->result = DID_ERROR << 16;
130		rval = -EIO;
131	} else
132		bsg_reply->result = DID_OK << 16;
133
134	bsg_job_done(bsg_job, bsg_reply->result,
135		     bsg_reply->reply_payload_rcv_len);
136	dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
137leave:
138	ha->flash_state = QLFLASH_WAITING;
139	return rval;
140}
141
142static int
143qla4xxx_get_acb_state(struct bsg_job *bsg_job)
144{
145	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
146	struct scsi_qla_host *ha = to_qla_host(host);
147	struct iscsi_bsg_request *bsg_req = bsg_job->request;
148	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
149	uint32_t status[MBOX_REG_COUNT];
150	uint32_t acb_idx;
151	uint32_t ip_idx;
152	int rval = -EINVAL;
153
154	bsg_reply->reply_payload_rcv_len = 0;
155
156	if (unlikely(pci_channel_offline(ha->pdev)))
157		goto leave;
158
159	/* Only 4022 and above adapters are supported */
160	if (is_qla4010(ha))
161		goto leave;
162
163	if (ql4xxx_reset_active(ha)) {
164		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
165		rval = -EBUSY;
166		goto leave;
167	}
168
169	if (bsg_job->reply_payload.payload_len < sizeof(status)) {
170		ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
171			   __func__, bsg_job->reply_payload.payload_len);
172		rval = -EINVAL;
173		goto leave;
174	}
175
176	acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
177	ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
178
179	rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
180	if (rval) {
181		ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
182			   __func__);
183		bsg_reply->result = DID_ERROR << 16;
184		rval = -EIO;
185	} else {
186		bsg_reply->reply_payload_rcv_len =
187			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
188					    bsg_job->reply_payload.sg_cnt,
189					    status, sizeof(status));
190		bsg_reply->result = DID_OK << 16;
191	}
192
193	bsg_job_done(bsg_job, bsg_reply->result,
194		     bsg_reply->reply_payload_rcv_len);
195leave:
196	return rval;
197}
198
199static int
200qla4xxx_read_nvram(struct bsg_job *bsg_job)
201{
202	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
203	struct scsi_qla_host *ha = to_qla_host(host);
204	struct iscsi_bsg_request *bsg_req = bsg_job->request;
205	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
206	uint32_t offset = 0;
207	uint32_t len = 0;
208	uint32_t total_len = 0;
209	dma_addr_t nvram_dma;
210	uint8_t *nvram = NULL;
211	int rval = -EINVAL;
212
213	bsg_reply->reply_payload_rcv_len = 0;
214
215	if (unlikely(pci_channel_offline(ha->pdev)))
216		goto leave;
217
218	/* Only 40xx adapters are supported */
219	if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
220		goto leave;
221
222	if (ql4xxx_reset_active(ha)) {
223		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
224		rval = -EBUSY;
225		goto leave;
226	}
227
228	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
229	len = bsg_job->reply_payload.payload_len;
230	total_len = offset + len;
231
232	/* total len should not be greater than max NVRAM size */
233	if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
234	    ((is_qla4022(ha) || is_qla4032(ha)) &&
235	     total_len > QL40X2_NVRAM_SIZE)) {
236		ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
237			   " nvram size, offset=%d len=%d\n",
238			   __func__, offset, len);
239		goto leave;
240	}
241
242	nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
243				   GFP_KERNEL);
244	if (!nvram) {
245		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
246			   "data\n", __func__);
247		rval = -ENOMEM;
248		goto leave;
249	}
250
251	rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
252	if (rval) {
253		ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
254		bsg_reply->result = DID_ERROR << 16;
255		rval = -EIO;
256	} else {
257		bsg_reply->reply_payload_rcv_len =
258			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
259					    bsg_job->reply_payload.sg_cnt,
260					    nvram, len);
261		bsg_reply->result = DID_OK << 16;
262	}
263
264	bsg_job_done(bsg_job, bsg_reply->result,
265		     bsg_reply->reply_payload_rcv_len);
266	dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
267leave:
268	return rval;
269}
270
271static int
272qla4xxx_update_nvram(struct bsg_job *bsg_job)
273{
274	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
275	struct scsi_qla_host *ha = to_qla_host(host);
276	struct iscsi_bsg_request *bsg_req = bsg_job->request;
277	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
278	uint32_t offset = 0;
279	uint32_t len = 0;
280	uint32_t total_len = 0;
281	dma_addr_t nvram_dma;
282	uint8_t *nvram = NULL;
283	int rval = -EINVAL;
284
285	bsg_reply->reply_payload_rcv_len = 0;
286
287	if (unlikely(pci_channel_offline(ha->pdev)))
288		goto leave;
289
290	if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
291		goto leave;
292
293	if (ql4xxx_reset_active(ha)) {
294		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
295		rval = -EBUSY;
296		goto leave;
297	}
298
299	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
300	len = bsg_job->request_payload.payload_len;
301	total_len = offset + len;
302
303	/* total len should not be greater than max NVRAM size */
304	if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
305	    ((is_qla4022(ha) || is_qla4032(ha)) &&
306	     total_len > QL40X2_NVRAM_SIZE)) {
307		ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
308			   " nvram size, offset=%d len=%d\n",
309			   __func__, offset, len);
310		goto leave;
311	}
312
313	nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
314				   GFP_KERNEL);
315	if (!nvram) {
316		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
317			   "data\n", __func__);
318		rval = -ENOMEM;
319		goto leave;
320	}
321
322	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
323			  bsg_job->request_payload.sg_cnt, nvram, len);
324
325	rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
326	if (rval) {
327		ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
328		bsg_reply->result = DID_ERROR << 16;
329		rval = -EIO;
330	} else
331		bsg_reply->result = DID_OK << 16;
332
333	bsg_job_done(bsg_job, bsg_reply->result,
334		     bsg_reply->reply_payload_rcv_len);
335	dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
336leave:
337	return rval;
338}
339
340static int
341qla4xxx_restore_defaults(struct bsg_job *bsg_job)
342{
343	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
344	struct scsi_qla_host *ha = to_qla_host(host);
345	struct iscsi_bsg_request *bsg_req = bsg_job->request;
346	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
347	uint32_t region = 0;
348	uint32_t field0 = 0;
349	uint32_t field1 = 0;
350	int rval = -EINVAL;
351
352	bsg_reply->reply_payload_rcv_len = 0;
353
354	if (unlikely(pci_channel_offline(ha->pdev)))
355		goto leave;
356
357	if (is_qla4010(ha))
358		goto leave;
359
360	if (ql4xxx_reset_active(ha)) {
361		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
362		rval = -EBUSY;
363		goto leave;
364	}
365
366	region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
367	field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
368	field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
369
370	rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
371	if (rval) {
372		ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
373		bsg_reply->result = DID_ERROR << 16;
374		rval = -EIO;
375	} else
376		bsg_reply->result = DID_OK << 16;
377
378	bsg_job_done(bsg_job, bsg_reply->result,
379		     bsg_reply->reply_payload_rcv_len);
380leave:
381	return rval;
382}
383
384static int
385qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
386{
387	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
388	struct scsi_qla_host *ha = to_qla_host(host);
389	struct iscsi_bsg_request *bsg_req = bsg_job->request;
390	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
391	uint32_t acb_type = 0;
392	uint32_t len = 0;
393	dma_addr_t acb_dma;
394	uint8_t *acb = NULL;
395	int rval = -EINVAL;
396
397	bsg_reply->reply_payload_rcv_len = 0;
398
399	if (unlikely(pci_channel_offline(ha->pdev)))
400		goto leave;
401
402	/* Only 4022 and above adapters are supported */
403	if (is_qla4010(ha))
404		goto leave;
405
406	if (ql4xxx_reset_active(ha)) {
407		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
408		rval = -EBUSY;
409		goto leave;
410	}
411
412	acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
413	len = bsg_job->reply_payload.payload_len;
414	if (len < sizeof(struct addr_ctrl_blk)) {
415		ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
416			   __func__, len);
417		rval = -EINVAL;
418		goto leave;
419	}
420
421	acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
422	if (!acb) {
423		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
424			   "data\n", __func__);
425		rval = -ENOMEM;
426		goto leave;
427	}
428
429	rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
430	if (rval) {
431		ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
432		bsg_reply->result = DID_ERROR << 16;
433		rval = -EIO;
434	} else {
435		bsg_reply->reply_payload_rcv_len =
436			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
437					    bsg_job->reply_payload.sg_cnt,
438					    acb, len);
439		bsg_reply->result = DID_OK << 16;
440	}
441
442	bsg_job_done(bsg_job, bsg_reply->result,
443		     bsg_reply->reply_payload_rcv_len);
444	dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
445leave:
446	return rval;
447}
448
449static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
450{
451	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
452	struct scsi_qla_host *ha = to_qla_host(host);
453	struct iscsi_bsg_request *bsg_req = bsg_job->request;
454	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
455	uint8_t *rsp_ptr = NULL;
456	uint32_t mbox_cmd[MBOX_REG_COUNT];
457	uint32_t mbox_sts[MBOX_REG_COUNT];
458	int status = QLA_ERROR;
459
460	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
461
462	if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
463		ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
464			   __func__);
465		bsg_reply->result = DID_ERROR << 16;
466		goto exit_diag_mem_test;
467	}
468
469	bsg_reply->reply_payload_rcv_len = 0;
470	memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
471	       sizeof(uint32_t) * MBOX_REG_COUNT);
472
473	DEBUG2(ql4_printk(KERN_INFO, ha,
474			  "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
475			  __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
476			  mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
477			  mbox_cmd[7]));
478
479	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
480					 &mbox_sts[0]);
481
482	DEBUG2(ql4_printk(KERN_INFO, ha,
483			  "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
484			  __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
485			  mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
486			  mbox_sts[7]));
487
488	if (status == QLA_SUCCESS)
489		bsg_reply->result = DID_OK << 16;
490	else
491		bsg_reply->result = DID_ERROR << 16;
492
493	/* Send mbox_sts to application */
494	bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
495	rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
496	memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
497
498exit_diag_mem_test:
499	DEBUG2(ql4_printk(KERN_INFO, ha,
500			  "%s: bsg_reply->result = x%x, status = %s\n",
501			  __func__, bsg_reply->result, STATUS(status)));
502
503	bsg_job_done(bsg_job, bsg_reply->result,
504		     bsg_reply->reply_payload_rcv_len);
505}
506
507static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
508						   int wait_for_link)
509{
510	int status = QLA_SUCCESS;
511
512	if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) {
513		ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
514			   __func__, ha->idc_extend_tmo);
515		if (ha->idc_extend_tmo) {
516			if (!wait_for_completion_timeout(&ha->idc_comp,
517						(ha->idc_extend_tmo * HZ))) {
518				ha->notify_idc_comp = 0;
519				ha->notify_link_up_comp = 0;
520				ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
521					   __func__);
522				status = QLA_ERROR;
523				goto exit_wait;
524			} else {
525				DEBUG2(ql4_printk(KERN_INFO, ha,
526						  "%s: IDC Complete notification received\n",
527						  __func__));
528			}
529		}
530	} else {
531		DEBUG2(ql4_printk(KERN_INFO, ha,
532				  "%s: IDC Complete notification received\n",
533				  __func__));
534	}
535	ha->notify_idc_comp = 0;
536
537	if (wait_for_link) {
538		if (!wait_for_completion_timeout(&ha->link_up_comp,
539						 (IDC_COMP_TOV * HZ))) {
540			ha->notify_link_up_comp = 0;
541			ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
542				   __func__);
543			status = QLA_ERROR;
544			goto exit_wait;
545		} else {
546			DEBUG2(ql4_printk(KERN_INFO, ha,
547					  "%s: LINK UP notification received\n",
548					  __func__));
549		}
550		ha->notify_link_up_comp = 0;
551	}
552
553exit_wait:
554	return status;
555}
556
557static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
558					 uint32_t *mbox_cmd)
559{
560	uint32_t config = 0;
561	int status = QLA_SUCCESS;
562
563	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
564
565	status = qla4_83xx_get_port_config(ha, &config);
566	if (status != QLA_SUCCESS)
567		goto exit_pre_loopback_config;
568
569	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
570			  __func__, config));
571
572	if ((config & ENABLE_INTERNAL_LOOPBACK) ||
573	    (config & ENABLE_EXTERNAL_LOOPBACK)) {
574		ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid requiest\n",
575			   __func__);
576		goto exit_pre_loopback_config;
577	}
578
579	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
580		config |= ENABLE_INTERNAL_LOOPBACK;
581
582	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
583		config |= ENABLE_EXTERNAL_LOOPBACK;
584
585	config &= ~ENABLE_DCBX;
586
587	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
588			  __func__, config));
589
590	ha->notify_idc_comp = 1;
591	ha->notify_link_up_comp = 1;
592
593	/* get the link state */
594	qla4xxx_get_firmware_state(ha);
595
596	status = qla4_83xx_set_port_config(ha, &config);
597	if (status != QLA_SUCCESS) {
598		ha->notify_idc_comp = 0;
599		ha->notify_link_up_comp = 0;
600		goto exit_pre_loopback_config;
601	}
602exit_pre_loopback_config:
603	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
604			  STATUS(status)));
605	return status;
606}
607
608static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
609					  uint32_t *mbox_cmd)
610{
611	int status = QLA_SUCCESS;
612	uint32_t config = 0;
613
614	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
615
616	status = qla4_83xx_get_port_config(ha, &config);
617	if (status != QLA_SUCCESS)
618		goto exit_post_loopback_config;
619
620	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
621			  config));
622
623	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
624		config &= ~ENABLE_INTERNAL_LOOPBACK;
625	else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
626		config &= ~ENABLE_EXTERNAL_LOOPBACK;
627
628	config |= ENABLE_DCBX;
629
630	DEBUG2(ql4_printk(KERN_INFO, ha,
631			  "%s: Restore default port config=%08X\n", __func__,
632			  config));
633
634	ha->notify_idc_comp = 1;
635	if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
636		ha->notify_link_up_comp = 1;
637
638	status = qla4_83xx_set_port_config(ha, &config);
639	if (status != QLA_SUCCESS) {
640		ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
641			   __func__);
642		set_bit(DPC_RESET_HA, &ha->dpc_flags);
643		clear_bit(AF_LOOPBACK, &ha->flags);
644		goto exit_post_loopback_config;
645	}
646
647exit_post_loopback_config:
648	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
649			  STATUS(status)));
650	return status;
651}
652
653static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
654{
655	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
656	struct scsi_qla_host *ha = to_qla_host(host);
657	struct iscsi_bsg_request *bsg_req = bsg_job->request;
658	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
659	uint8_t *rsp_ptr = NULL;
660	uint32_t mbox_cmd[MBOX_REG_COUNT];
661	uint32_t mbox_sts[MBOX_REG_COUNT];
662	int wait_for_link = 1;
663	int status = QLA_ERROR;
664
665	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
666
667	bsg_reply->reply_payload_rcv_len = 0;
668
669	if (test_bit(AF_LOOPBACK, &ha->flags)) {
670		ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
671			   __func__);
672		bsg_reply->result = DID_ERROR << 16;
673		goto exit_loopback_cmd;
674	}
675
676	if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
677		ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
678			   __func__);
679		bsg_reply->result = DID_ERROR << 16;
680		goto exit_loopback_cmd;
681	}
682
683	memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
684	       sizeof(uint32_t) * MBOX_REG_COUNT);
685
686	if (is_qla8032(ha) || is_qla8042(ha)) {
687		status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
688		if (status != QLA_SUCCESS) {
689			bsg_reply->result = DID_ERROR << 16;
690			goto exit_loopback_cmd;
691		}
692
693		status = qla4_83xx_wait_for_loopback_config_comp(ha,
694								 wait_for_link);
695		if (status != QLA_SUCCESS) {
696			bsg_reply->result = DID_TIME_OUT << 16;
697			goto restore;
698		}
699	}
700
701	DEBUG2(ql4_printk(KERN_INFO, ha,
702			  "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
703			  __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
704			  mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
705			  mbox_cmd[7]));
706
707	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
708				&mbox_sts[0]);
709
710	if (status == QLA_SUCCESS)
711		bsg_reply->result = DID_OK << 16;
712	else
713		bsg_reply->result = DID_ERROR << 16;
714
715	DEBUG2(ql4_printk(KERN_INFO, ha,
716			  "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
717			  __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
718			  mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
719			  mbox_sts[7]));
720
721	/* Send mbox_sts to application */
722	bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
723	rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
724	memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
725restore:
726	if (is_qla8032(ha) || is_qla8042(ha)) {
727		status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
728		if (status != QLA_SUCCESS) {
729			bsg_reply->result = DID_ERROR << 16;
730			goto exit_loopback_cmd;
731		}
732
733		/* for pre_loopback_config() wait for LINK UP only
734		 * if PHY LINK is UP */
735		if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
736			wait_for_link = 0;
737
738		status = qla4_83xx_wait_for_loopback_config_comp(ha,
739								 wait_for_link);
740		if (status != QLA_SUCCESS) {
741			bsg_reply->result = DID_TIME_OUT << 16;
742			goto exit_loopback_cmd;
743		}
744	}
745exit_loopback_cmd:
746	DEBUG2(ql4_printk(KERN_INFO, ha,
747			  "%s: bsg_reply->result = x%x, status = %s\n",
748			  __func__, bsg_reply->result, STATUS(status)));
749	bsg_job_done(bsg_job, bsg_reply->result,
750		     bsg_reply->reply_payload_rcv_len);
751}
752
753static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
754{
755	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
756	struct scsi_qla_host *ha = to_qla_host(host);
757	struct iscsi_bsg_request *bsg_req = bsg_job->request;
758	uint32_t diag_cmd;
759	int rval = -EINVAL;
760
761	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
762
763	diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
764	if (diag_cmd == MBOX_CMD_DIAG_TEST) {
765		switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
766		case QL_DIAG_CMD_TEST_DDR_SIZE:
767		case QL_DIAG_CMD_TEST_DDR_RW:
768		case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
769		case QL_DIAG_CMD_TEST_NVRAM:
770		case QL_DIAG_CMD_TEST_FLASH_ROM:
771		case QL_DIAG_CMD_TEST_DMA_XFER:
772		case QL_DIAG_CMD_SELF_DDR_RW:
773		case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
774			/* Execute diag test for adapter RAM/FLASH */
775			ql4xxx_execute_diag_cmd(bsg_job);
776			/* Always return success as we want to sent bsg_reply
777			 * to Application */
778			rval = QLA_SUCCESS;
779			break;
780
781		case QL_DIAG_CMD_TEST_INT_LOOPBACK:
782		case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
783			/* Execute diag test for Network */
784			qla4xxx_execute_diag_loopback_cmd(bsg_job);
785			/* Always return success as we want to sent bsg_reply
786			 * to Application */
787			rval = QLA_SUCCESS;
788			break;
789		default:
790			ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
791				   __func__,
792				   bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
793		}
794	} else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
795		   (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
796		ql4xxx_execute_diag_cmd(bsg_job);
797		rval = QLA_SUCCESS;
798	} else {
799		ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
800			   __func__, diag_cmd);
801	}
802
803	return rval;
804}
805
806/**
807 * qla4xxx_process_vendor_specific - handle vendor specific bsg request
808 * @job: iscsi_bsg_job to handle
809 **/
810int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
811{
812	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
813	struct iscsi_bsg_request *bsg_req = bsg_job->request;
814	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
815	struct scsi_qla_host *ha = to_qla_host(host);
816
817	switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
818	case QLISCSI_VND_READ_FLASH:
819		return qla4xxx_read_flash(bsg_job);
820
821	case QLISCSI_VND_UPDATE_FLASH:
822		return qla4xxx_update_flash(bsg_job);
823
824	case QLISCSI_VND_GET_ACB_STATE:
825		return qla4xxx_get_acb_state(bsg_job);
826
827	case QLISCSI_VND_READ_NVRAM:
828		return qla4xxx_read_nvram(bsg_job);
829
830	case QLISCSI_VND_UPDATE_NVRAM:
831		return qla4xxx_update_nvram(bsg_job);
832
833	case QLISCSI_VND_RESTORE_DEFAULTS:
834		return qla4xxx_restore_defaults(bsg_job);
835
836	case QLISCSI_VND_GET_ACB:
837		return qla4xxx_bsg_get_acb(bsg_job);
838
839	case QLISCSI_VND_DIAG_TEST:
840		return qla4xxx_execute_diag_test(bsg_job);
841
842	default:
843		ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
844			   "0x%x\n", __func__, bsg_req->msgcode);
845		bsg_reply->result = (DID_ERROR << 16);
846		bsg_reply->reply_payload_rcv_len = 0;
847		bsg_job_done(bsg_job, bsg_reply->result,
848			     bsg_reply->reply_payload_rcv_len);
849		return -ENOSYS;
850	}
851}
852
853/**
854 * qla4xxx_bsg_request - handle bsg request from ISCSI transport
855 * @job: iscsi_bsg_job to handle
856 */
857int qla4xxx_bsg_request(struct bsg_job *bsg_job)
858{
859	struct iscsi_bsg_request *bsg_req = bsg_job->request;
860	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
861	struct scsi_qla_host *ha = to_qla_host(host);
862
863	switch (bsg_req->msgcode) {
864	case ISCSI_BSG_HST_VENDOR:
865		return qla4xxx_process_vendor_specific(bsg_job);
866
867	default:
868		ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
869			   __func__, bsg_req->msgcode);
870	}
871
872	return -ENOSYS;
873}
874