[go: nahoru, domu]

1/*
2 * bnx2fc_els.c: QLogic NetXtreme II Linux FCoE offload driver.
3 * This file contains helper routines that handle ELS requests
4 * and responses.
5 *
6 * Copyright (c) 2008 - 2013 Broadcom Corporation
7 * Copyright (c) 2014, QLogic Corporation
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 *
13 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
14 */
15
16#include "bnx2fc.h"
17
18static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
19			     void *arg);
20static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
21			      void *arg);
22static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
23			void *data, u32 data_len,
24			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
25			struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
26
27static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
28{
29	struct bnx2fc_cmd *orig_io_req;
30	struct bnx2fc_cmd *rrq_req;
31	int rc = 0;
32
33	BUG_ON(!cb_arg);
34	rrq_req = cb_arg->io_req;
35	orig_io_req = cb_arg->aborted_io_req;
36	BUG_ON(!orig_io_req);
37	BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
38		   orig_io_req->xid, rrq_req->xid);
39
40	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
41
42	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
43		/*
44		 * els req is timed out. cleanup the IO with FW and
45		 * drop the completion. Remove from active_cmd_queue.
46		 */
47		BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
48			   rrq_req->xid);
49
50		if (rrq_req->on_active_queue) {
51			list_del_init(&rrq_req->link);
52			rrq_req->on_active_queue = 0;
53			rc = bnx2fc_initiate_cleanup(rrq_req);
54			BUG_ON(rc);
55		}
56	}
57	kfree(cb_arg);
58}
59int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
60{
61
62	struct fc_els_rrq rrq;
63	struct bnx2fc_rport *tgt = aborted_io_req->tgt;
64	struct fc_lport *lport = tgt->rdata->local_port;
65	struct bnx2fc_els_cb_arg *cb_arg = NULL;
66	u32 sid = tgt->sid;
67	u32 r_a_tov = lport->r_a_tov;
68	unsigned long start = jiffies;
69	int rc;
70
71	BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
72		   aborted_io_req->xid);
73	memset(&rrq, 0, sizeof(rrq));
74
75	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
76	if (!cb_arg) {
77		printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
78		rc = -ENOMEM;
79		goto rrq_err;
80	}
81
82	cb_arg->aborted_io_req = aborted_io_req;
83
84	rrq.rrq_cmd = ELS_RRQ;
85	hton24(rrq.rrq_s_id, sid);
86	rrq.rrq_ox_id = htons(aborted_io_req->xid);
87	rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
88
89retry_rrq:
90	rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
91				 bnx2fc_rrq_compl, cb_arg,
92				 r_a_tov);
93	if (rc == -ENOMEM) {
94		if (time_after(jiffies, start + (10 * HZ))) {
95			BNX2FC_ELS_DBG("rrq Failed\n");
96			rc = FAILED;
97			goto rrq_err;
98		}
99		msleep(20);
100		goto retry_rrq;
101	}
102rrq_err:
103	if (rc) {
104		BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
105			aborted_io_req->xid);
106		kfree(cb_arg);
107		spin_lock_bh(&tgt->tgt_lock);
108		kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
109		spin_unlock_bh(&tgt->tgt_lock);
110	}
111	return rc;
112}
113
114static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
115{
116	struct bnx2fc_cmd *els_req;
117	struct bnx2fc_rport *tgt;
118	struct bnx2fc_mp_req *mp_req;
119	struct fc_frame_header *fc_hdr;
120	unsigned char *buf;
121	void *resp_buf;
122	u32 resp_len, hdr_len;
123	u16 l2_oxid;
124	int frame_len;
125	int rc = 0;
126
127	l2_oxid = cb_arg->l2_oxid;
128	BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
129
130	els_req = cb_arg->io_req;
131	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
132		/*
133		 * els req is timed out. cleanup the IO with FW and
134		 * drop the completion. libfc will handle the els timeout
135		 */
136		if (els_req->on_active_queue) {
137			list_del_init(&els_req->link);
138			els_req->on_active_queue = 0;
139			rc = bnx2fc_initiate_cleanup(els_req);
140			BUG_ON(rc);
141		}
142		goto free_arg;
143	}
144
145	tgt = els_req->tgt;
146	mp_req = &(els_req->mp_req);
147	fc_hdr = &(mp_req->resp_fc_hdr);
148	resp_len = mp_req->resp_len;
149	resp_buf = mp_req->resp_buf;
150
151	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
152	if (!buf) {
153		printk(KERN_ERR PFX "Unable to alloc mp buf\n");
154		goto free_arg;
155	}
156	hdr_len = sizeof(*fc_hdr);
157	if (hdr_len + resp_len > PAGE_SIZE) {
158		printk(KERN_ERR PFX "l2_els_compl: resp len is "
159				    "beyond page size\n");
160		goto free_buf;
161	}
162	memcpy(buf, fc_hdr, hdr_len);
163	memcpy(buf + hdr_len, resp_buf, resp_len);
164	frame_len = hdr_len + resp_len;
165
166	bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
167
168free_buf:
169	kfree(buf);
170free_arg:
171	kfree(cb_arg);
172}
173
174int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
175{
176	struct fc_els_adisc *adisc;
177	struct fc_frame_header *fh;
178	struct bnx2fc_els_cb_arg *cb_arg;
179	struct fc_lport *lport = tgt->rdata->local_port;
180	u32 r_a_tov = lport->r_a_tov;
181	int rc;
182
183	fh = fc_frame_header_get(fp);
184	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
185	if (!cb_arg) {
186		printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
187		return -ENOMEM;
188	}
189
190	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
191
192	BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
193	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
194	/* adisc is initialized by libfc */
195	rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
196				 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
197	if (rc)
198		kfree(cb_arg);
199	return rc;
200}
201
202int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
203{
204	struct fc_els_logo *logo;
205	struct fc_frame_header *fh;
206	struct bnx2fc_els_cb_arg *cb_arg;
207	struct fc_lport *lport = tgt->rdata->local_port;
208	u32 r_a_tov = lport->r_a_tov;
209	int rc;
210
211	fh = fc_frame_header_get(fp);
212	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
213	if (!cb_arg) {
214		printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
215		return -ENOMEM;
216	}
217
218	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
219
220	BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
221	logo = fc_frame_payload_get(fp, sizeof(*logo));
222	/* logo is initialized by libfc */
223	rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
224				 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
225	if (rc)
226		kfree(cb_arg);
227	return rc;
228}
229
230int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
231{
232	struct fc_els_rls *rls;
233	struct fc_frame_header *fh;
234	struct bnx2fc_els_cb_arg *cb_arg;
235	struct fc_lport *lport = tgt->rdata->local_port;
236	u32 r_a_tov = lport->r_a_tov;
237	int rc;
238
239	fh = fc_frame_header_get(fp);
240	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
241	if (!cb_arg) {
242		printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
243		return -ENOMEM;
244	}
245
246	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
247
248	rls = fc_frame_payload_get(fp, sizeof(*rls));
249	/* rls is initialized by libfc */
250	rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
251				  bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
252	if (rc)
253		kfree(cb_arg);
254	return rc;
255}
256
257void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
258{
259	struct bnx2fc_mp_req *mp_req;
260	struct fc_frame_header *fc_hdr, *fh;
261	struct bnx2fc_cmd *srr_req;
262	struct bnx2fc_cmd *orig_io_req;
263	struct fc_frame *fp;
264	unsigned char *buf;
265	void *resp_buf;
266	u32 resp_len, hdr_len;
267	u8 opcode;
268	int rc = 0;
269
270	orig_io_req = cb_arg->aborted_io_req;
271	srr_req = cb_arg->io_req;
272	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
273		/* SRR timedout */
274		BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
275		       "orig_io - 0x%x\n",
276			orig_io_req->xid);
277		rc = bnx2fc_initiate_abts(srr_req);
278		if (rc != SUCCESS) {
279			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
280				"failed. issue cleanup\n");
281			bnx2fc_initiate_cleanup(srr_req);
282		}
283		if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
284		    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
285			BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
286				      orig_io_req->xid, orig_io_req->req_flags);
287			goto srr_compl_done;
288		}
289		orig_io_req->srr_retry++;
290		if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
291			struct bnx2fc_rport *tgt = orig_io_req->tgt;
292			spin_unlock_bh(&tgt->tgt_lock);
293			rc = bnx2fc_send_srr(orig_io_req,
294					     orig_io_req->srr_offset,
295					     orig_io_req->srr_rctl);
296			spin_lock_bh(&tgt->tgt_lock);
297			if (!rc)
298				goto srr_compl_done;
299		}
300
301		rc = bnx2fc_initiate_abts(orig_io_req);
302		if (rc != SUCCESS) {
303			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
304				"failed xid = 0x%x. issue cleanup\n",
305				orig_io_req->xid);
306			bnx2fc_initiate_cleanup(orig_io_req);
307		}
308		goto srr_compl_done;
309	}
310	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
311	    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
312		BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
313			      orig_io_req->xid, orig_io_req->req_flags);
314		goto srr_compl_done;
315	}
316	mp_req = &(srr_req->mp_req);
317	fc_hdr = &(mp_req->resp_fc_hdr);
318	resp_len = mp_req->resp_len;
319	resp_buf = mp_req->resp_buf;
320
321	hdr_len = sizeof(*fc_hdr);
322	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
323	if (!buf) {
324		printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
325		goto srr_compl_done;
326	}
327	memcpy(buf, fc_hdr, hdr_len);
328	memcpy(buf + hdr_len, resp_buf, resp_len);
329
330	fp = fc_frame_alloc(NULL, resp_len);
331	if (!fp) {
332		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
333		goto free_buf;
334	}
335
336	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
337	/* Copy FC Frame header and payload into the frame */
338	memcpy(fh, buf, hdr_len + resp_len);
339
340	opcode = fc_frame_payload_op(fp);
341	switch (opcode) {
342	case ELS_LS_ACC:
343		BNX2FC_IO_DBG(srr_req, "SRR success\n");
344		break;
345	case ELS_LS_RJT:
346		BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
347		rc = bnx2fc_initiate_abts(orig_io_req);
348		if (rc != SUCCESS) {
349			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
350				"failed xid = 0x%x. issue cleanup\n",
351				orig_io_req->xid);
352			bnx2fc_initiate_cleanup(orig_io_req);
353		}
354		break;
355	default:
356		BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
357			opcode);
358		break;
359	}
360	fc_frame_free(fp);
361free_buf:
362	kfree(buf);
363srr_compl_done:
364	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
365}
366
367void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
368{
369	struct bnx2fc_cmd *orig_io_req, *new_io_req;
370	struct bnx2fc_cmd *rec_req;
371	struct bnx2fc_mp_req *mp_req;
372	struct fc_frame_header *fc_hdr, *fh;
373	struct fc_els_ls_rjt *rjt;
374	struct fc_els_rec_acc *acc;
375	struct bnx2fc_rport *tgt;
376	struct fcoe_err_report_entry *err_entry;
377	struct scsi_cmnd *sc_cmd;
378	enum fc_rctl r_ctl;
379	unsigned char *buf;
380	void *resp_buf;
381	struct fc_frame *fp;
382	u8 opcode;
383	u32 offset;
384	u32 e_stat;
385	u32 resp_len, hdr_len;
386	int rc = 0;
387	bool send_seq_clnp = false;
388	bool abort_io = false;
389
390	BNX2FC_MISC_DBG("Entered rec_compl callback\n");
391	rec_req = cb_arg->io_req;
392	orig_io_req = cb_arg->aborted_io_req;
393	BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
394	tgt = orig_io_req->tgt;
395
396	/* Handle REC timeout case */
397	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
398		BNX2FC_IO_DBG(rec_req, "timed out, abort "
399		       "orig_io - 0x%x\n",
400			orig_io_req->xid);
401		/* els req is timed out. send abts for els */
402		rc = bnx2fc_initiate_abts(rec_req);
403		if (rc != SUCCESS) {
404			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
405				"failed. issue cleanup\n");
406			bnx2fc_initiate_cleanup(rec_req);
407		}
408		orig_io_req->rec_retry++;
409		/* REC timedout. send ABTS to the orig IO req */
410		if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
411			spin_unlock_bh(&tgt->tgt_lock);
412			rc = bnx2fc_send_rec(orig_io_req);
413			spin_lock_bh(&tgt->tgt_lock);
414			if (!rc)
415				goto rec_compl_done;
416		}
417		rc = bnx2fc_initiate_abts(orig_io_req);
418		if (rc != SUCCESS) {
419			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
420				"failed xid = 0x%x. issue cleanup\n",
421				orig_io_req->xid);
422			bnx2fc_initiate_cleanup(orig_io_req);
423		}
424		goto rec_compl_done;
425	}
426
427	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
428		BNX2FC_IO_DBG(rec_req, "completed"
429		       "orig_io - 0x%x\n",
430			orig_io_req->xid);
431		goto rec_compl_done;
432	}
433	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
434		BNX2FC_IO_DBG(rec_req, "abts in prog "
435		       "orig_io - 0x%x\n",
436			orig_io_req->xid);
437		goto rec_compl_done;
438	}
439
440	mp_req = &(rec_req->mp_req);
441	fc_hdr = &(mp_req->resp_fc_hdr);
442	resp_len = mp_req->resp_len;
443	acc = resp_buf = mp_req->resp_buf;
444
445	hdr_len = sizeof(*fc_hdr);
446
447	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
448	if (!buf) {
449		printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
450		goto rec_compl_done;
451	}
452	memcpy(buf, fc_hdr, hdr_len);
453	memcpy(buf + hdr_len, resp_buf, resp_len);
454
455	fp = fc_frame_alloc(NULL, resp_len);
456	if (!fp) {
457		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
458		goto free_buf;
459	}
460
461	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
462	/* Copy FC Frame header and payload into the frame */
463	memcpy(fh, buf, hdr_len + resp_len);
464
465	opcode = fc_frame_payload_op(fp);
466	if (opcode == ELS_LS_RJT) {
467		BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
468		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
469		if ((rjt->er_reason == ELS_RJT_LOGIC ||
470		    rjt->er_reason == ELS_RJT_UNAB) &&
471		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
472			BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
473			new_io_req = bnx2fc_cmd_alloc(tgt);
474			if (!new_io_req)
475				goto abort_io;
476			new_io_req->sc_cmd = orig_io_req->sc_cmd;
477			/* cleanup orig_io_req that is with the FW */
478			set_bit(BNX2FC_FLAG_CMD_LOST,
479				&orig_io_req->req_flags);
480			bnx2fc_initiate_cleanup(orig_io_req);
481			/* Post a new IO req with the same sc_cmd */
482			BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
483			rc = bnx2fc_post_io_req(tgt, new_io_req);
484			if (!rc)
485				goto free_frame;
486			BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
487		}
488abort_io:
489		rc = bnx2fc_initiate_abts(orig_io_req);
490		if (rc != SUCCESS) {
491			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
492				"failed. issue cleanup\n");
493			bnx2fc_initiate_cleanup(orig_io_req);
494		}
495	} else if (opcode == ELS_LS_ACC) {
496		/* REVISIT: Check if the exchange is already aborted */
497		offset = ntohl(acc->reca_fc4value);
498		e_stat = ntohl(acc->reca_e_stat);
499		if (e_stat & ESB_ST_SEQ_INIT)  {
500			BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
501			goto free_frame;
502		}
503		BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
504			e_stat, offset);
505		/* Seq initiative is with us */
506		err_entry = (struct fcoe_err_report_entry *)
507			     &orig_io_req->err_entry;
508		sc_cmd = orig_io_req->sc_cmd;
509		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
510			/* SCSI WRITE command */
511			if (offset == orig_io_req->data_xfer_len) {
512				BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
513				/* FCP_RSP lost */
514				r_ctl = FC_RCTL_DD_CMD_STATUS;
515				offset = 0;
516			} else  {
517				/* start transmitting from offset */
518				BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
519				send_seq_clnp = true;
520				r_ctl = FC_RCTL_DD_DATA_DESC;
521				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
522								offset, r_ctl))
523					abort_io = true;
524				/* XFER_RDY */
525			}
526		} else {
527			/* SCSI READ command */
528			if (err_entry->data.rx_buf_off ==
529					orig_io_req->data_xfer_len) {
530				/* FCP_RSP lost */
531				BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
532				r_ctl = FC_RCTL_DD_CMD_STATUS;
533				offset = 0;
534			} else  {
535				/* request retransmission from this offset */
536				send_seq_clnp = true;
537				offset = err_entry->data.rx_buf_off;
538				BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
539				/* FCP_DATA lost */
540				r_ctl = FC_RCTL_DD_SOL_DATA;
541				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
542								offset, r_ctl))
543					abort_io = true;
544			}
545		}
546		if (abort_io) {
547			rc = bnx2fc_initiate_abts(orig_io_req);
548			if (rc != SUCCESS) {
549				BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
550					      " failed. issue cleanup\n");
551				bnx2fc_initiate_cleanup(orig_io_req);
552			}
553		} else if (!send_seq_clnp) {
554			BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
555			spin_unlock_bh(&tgt->tgt_lock);
556			rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
557			spin_lock_bh(&tgt->tgt_lock);
558
559			if (rc) {
560				BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
561					" IO will abort\n");
562			}
563		}
564	}
565free_frame:
566	fc_frame_free(fp);
567free_buf:
568	kfree(buf);
569rec_compl_done:
570	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
571	kfree(cb_arg);
572}
573
574int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
575{
576	struct fc_els_rec rec;
577	struct bnx2fc_rport *tgt = orig_io_req->tgt;
578	struct fc_lport *lport = tgt->rdata->local_port;
579	struct bnx2fc_els_cb_arg *cb_arg = NULL;
580	u32 sid = tgt->sid;
581	u32 r_a_tov = lport->r_a_tov;
582	int rc;
583
584	BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
585	memset(&rec, 0, sizeof(rec));
586
587	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
588	if (!cb_arg) {
589		printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
590		rc = -ENOMEM;
591		goto rec_err;
592	}
593	kref_get(&orig_io_req->refcount);
594
595	cb_arg->aborted_io_req = orig_io_req;
596
597	rec.rec_cmd = ELS_REC;
598	hton24(rec.rec_s_id, sid);
599	rec.rec_ox_id = htons(orig_io_req->xid);
600	rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
601
602	rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
603				 bnx2fc_rec_compl, cb_arg,
604				 r_a_tov);
605rec_err:
606	if (rc) {
607		BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
608		spin_lock_bh(&tgt->tgt_lock);
609		kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
610		spin_unlock_bh(&tgt->tgt_lock);
611		kfree(cb_arg);
612	}
613	return rc;
614}
615
616int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
617{
618	struct fcp_srr srr;
619	struct bnx2fc_rport *tgt = orig_io_req->tgt;
620	struct fc_lport *lport = tgt->rdata->local_port;
621	struct bnx2fc_els_cb_arg *cb_arg = NULL;
622	u32 r_a_tov = lport->r_a_tov;
623	int rc;
624
625	BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
626	memset(&srr, 0, sizeof(srr));
627
628	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
629	if (!cb_arg) {
630		printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
631		rc = -ENOMEM;
632		goto srr_err;
633	}
634	kref_get(&orig_io_req->refcount);
635
636	cb_arg->aborted_io_req = orig_io_req;
637
638	srr.srr_op = ELS_SRR;
639	srr.srr_ox_id = htons(orig_io_req->xid);
640	srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
641	srr.srr_rel_off = htonl(offset);
642	srr.srr_r_ctl = r_ctl;
643	orig_io_req->srr_offset = offset;
644	orig_io_req->srr_rctl = r_ctl;
645
646	rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
647				 bnx2fc_srr_compl, cb_arg,
648				 r_a_tov);
649srr_err:
650	if (rc) {
651		BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
652		spin_lock_bh(&tgt->tgt_lock);
653		kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
654		spin_unlock_bh(&tgt->tgt_lock);
655		kfree(cb_arg);
656	} else
657		set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
658
659	return rc;
660}
661
662static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
663			void *data, u32 data_len,
664			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
665			struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
666{
667	struct fcoe_port *port = tgt->port;
668	struct bnx2fc_interface *interface = port->priv;
669	struct fc_rport *rport = tgt->rport;
670	struct fc_lport *lport = port->lport;
671	struct bnx2fc_cmd *els_req;
672	struct bnx2fc_mp_req *mp_req;
673	struct fc_frame_header *fc_hdr;
674	struct fcoe_task_ctx_entry *task;
675	struct fcoe_task_ctx_entry *task_page;
676	int rc = 0;
677	int task_idx, index;
678	u32 did, sid;
679	u16 xid;
680
681	rc = fc_remote_port_chkready(rport);
682	if (rc) {
683		printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
684		rc = -EINVAL;
685		goto els_err;
686	}
687	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
688		printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
689		rc = -EINVAL;
690		goto els_err;
691	}
692	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
693	     (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
694		printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
695		rc = -EINVAL;
696		goto els_err;
697	}
698	els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
699	if (!els_req) {
700		rc = -ENOMEM;
701		goto els_err;
702	}
703
704	els_req->sc_cmd = NULL;
705	els_req->port = port;
706	els_req->tgt = tgt;
707	els_req->cb_func = cb_func;
708	cb_arg->io_req = els_req;
709	els_req->cb_arg = cb_arg;
710
711	mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
712	rc = bnx2fc_init_mp_req(els_req);
713	if (rc == FAILED) {
714		printk(KERN_ERR PFX "ELS MP request init failed\n");
715		spin_lock_bh(&tgt->tgt_lock);
716		kref_put(&els_req->refcount, bnx2fc_cmd_release);
717		spin_unlock_bh(&tgt->tgt_lock);
718		rc = -ENOMEM;
719		goto els_err;
720	} else {
721		/* rc SUCCESS */
722		rc = 0;
723	}
724
725	/* Set the data_xfer_len to the size of ELS payload */
726	mp_req->req_len = data_len;
727	els_req->data_xfer_len = mp_req->req_len;
728
729	/* Fill ELS Payload */
730	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
731		memcpy(mp_req->req_buf, data, data_len);
732	} else {
733		printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
734		els_req->cb_func = NULL;
735		els_req->cb_arg = NULL;
736		spin_lock_bh(&tgt->tgt_lock);
737		kref_put(&els_req->refcount, bnx2fc_cmd_release);
738		spin_unlock_bh(&tgt->tgt_lock);
739		rc = -EINVAL;
740	}
741
742	if (rc)
743		goto els_err;
744
745	/* Fill FC header */
746	fc_hdr = &(mp_req->req_fc_hdr);
747
748	did = tgt->rport->port_id;
749	sid = tgt->sid;
750
751	if (op == ELS_SRR)
752		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
753				   FC_TYPE_FCP, FC_FC_FIRST_SEQ |
754				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
755	else
756		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
757				   FC_TYPE_ELS, FC_FC_FIRST_SEQ |
758				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
759
760	/* Obtain exchange id */
761	xid = els_req->xid;
762	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
763	index = xid % BNX2FC_TASKS_PER_PAGE;
764
765	/* Initialize task context for this IO request */
766	task_page = (struct fcoe_task_ctx_entry *)
767			interface->hba->task_ctx[task_idx];
768	task = &(task_page[index]);
769	bnx2fc_init_mp_task(els_req, task);
770
771	spin_lock_bh(&tgt->tgt_lock);
772
773	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
774		printk(KERN_ERR PFX "initiate_els.. session not ready\n");
775		els_req->cb_func = NULL;
776		els_req->cb_arg = NULL;
777		kref_put(&els_req->refcount, bnx2fc_cmd_release);
778		spin_unlock_bh(&tgt->tgt_lock);
779		return -EINVAL;
780	}
781
782	if (timer_msec)
783		bnx2fc_cmd_timer_set(els_req, timer_msec);
784	bnx2fc_add_2_sq(tgt, xid);
785
786	els_req->on_active_queue = 1;
787	list_add_tail(&els_req->link, &tgt->els_queue);
788
789	/* Ring doorbell */
790	bnx2fc_ring_doorbell(tgt);
791	spin_unlock_bh(&tgt->tgt_lock);
792
793els_err:
794	return rc;
795}
796
797void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
798			      struct fcoe_task_ctx_entry *task, u8 num_rq)
799{
800	struct bnx2fc_mp_req *mp_req;
801	struct fc_frame_header *fc_hdr;
802	u64 *hdr;
803	u64 *temp_hdr;
804
805	BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
806			"cmd_type = %d\n", els_req->xid, els_req->cmd_type);
807
808	if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
809			     &els_req->req_flags)) {
810		BNX2FC_ELS_DBG("Timer context finished processing this "
811			   "els - 0x%x\n", els_req->xid);
812		/* This IO doesn't receive cleanup completion */
813		kref_put(&els_req->refcount, bnx2fc_cmd_release);
814		return;
815	}
816
817	/* Cancel the timeout_work, as we received the response */
818	if (cancel_delayed_work(&els_req->timeout_work))
819		kref_put(&els_req->refcount,
820			 bnx2fc_cmd_release); /* drop timer hold */
821
822	if (els_req->on_active_queue) {
823		list_del_init(&els_req->link);
824		els_req->on_active_queue = 0;
825	}
826
827	mp_req = &(els_req->mp_req);
828	fc_hdr = &(mp_req->resp_fc_hdr);
829
830	hdr = (u64 *)fc_hdr;
831	temp_hdr = (u64 *)
832		&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
833	hdr[0] = cpu_to_be64(temp_hdr[0]);
834	hdr[1] = cpu_to_be64(temp_hdr[1]);
835	hdr[2] = cpu_to_be64(temp_hdr[2]);
836
837	mp_req->resp_len =
838		task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
839
840	/* Parse ELS response */
841	if ((els_req->cb_func) && (els_req->cb_arg)) {
842		els_req->cb_func(els_req->cb_arg);
843		els_req->cb_arg = NULL;
844	}
845
846	kref_put(&els_req->refcount, bnx2fc_cmd_release);
847}
848
849static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
850			      void *arg)
851{
852	struct fcoe_ctlr *fip = arg;
853	struct fc_exch *exch = fc_seq_exch(seq);
854	struct fc_lport *lport = exch->lp;
855	u8 *mac;
856	u8 op;
857
858	if (IS_ERR(fp))
859		goto done;
860
861	mac = fr_cb(fp)->granted_mac;
862	if (is_zero_ether_addr(mac)) {
863		op = fc_frame_payload_op(fp);
864		if (lport->vport) {
865			if (op == ELS_LS_RJT) {
866				printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
867				fc_vport_terminate(lport->vport);
868				fc_frame_free(fp);
869				return;
870			}
871		}
872		fcoe_ctlr_recv_flogi(fip, lport, fp);
873	}
874	if (!is_zero_ether_addr(mac))
875		fip->update_mac(lport, mac);
876done:
877	fc_lport_flogi_resp(seq, fp, lport);
878}
879
880static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
881			     void *arg)
882{
883	struct fcoe_ctlr *fip = arg;
884	struct fc_exch *exch = fc_seq_exch(seq);
885	struct fc_lport *lport = exch->lp;
886	static u8 zero_mac[ETH_ALEN] = { 0 };
887
888	if (!IS_ERR(fp))
889		fip->update_mac(lport, zero_mac);
890	fc_lport_logo_resp(seq, fp, lport);
891}
892
893struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
894				      struct fc_frame *fp, unsigned int op,
895				      void (*resp)(struct fc_seq *,
896						   struct fc_frame *,
897						   void *),
898				      void *arg, u32 timeout)
899{
900	struct fcoe_port *port = lport_priv(lport);
901	struct bnx2fc_interface *interface = port->priv;
902	struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
903	struct fc_frame_header *fh = fc_frame_header_get(fp);
904
905	switch (op) {
906	case ELS_FLOGI:
907	case ELS_FDISC:
908		return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
909				     fip, timeout);
910	case ELS_LOGO:
911		/* only hook onto fabric logouts, not port logouts */
912		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
913			break;
914		return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
915				     fip, timeout);
916	}
917	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
918}
919