[go: nahoru, domu]

1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
32#include "lpfc_hw4.h"
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_sli4.h"
36#include "lpfc_nl.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_vport.h"
43#include "lpfc_debugfs.h"
44
45
46/* Called to verify a rcv'ed ADISC was intended for us. */
47static int
48lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49		 struct lpfc_name *nn, struct lpfc_name *pn)
50{
51	/* First, we MUST have a RPI registered */
52	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
53		return 0;
54
55	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
56	 * table entry for that node.
57	 */
58	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
59		return 0;
60
61	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
62		return 0;
63
64	/* we match, return success */
65	return 1;
66}
67
68int
69lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
70		 struct serv_parm *sp, uint32_t class, int flogi)
71{
72	volatile struct serv_parm *hsp = &vport->fc_sparam;
73	uint16_t hsp_value, ssp_value = 0;
74
75	/*
76	 * The receive data field size and buffer-to-buffer receive data field
77	 * size entries are 16 bits but are represented as two 8-bit fields in
78	 * the driver data structure to account for rsvd bits and other control
79	 * bits.  Reconstruct and compare the fields as a 16-bit values before
80	 * correcting the byte values.
81	 */
82	if (sp->cls1.classValid) {
83		if (!flogi) {
84			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
85				     hsp->cls1.rcvDataSizeLsb);
86			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
87				     sp->cls1.rcvDataSizeLsb);
88			if (!ssp_value)
89				goto bad_service_param;
90			if (ssp_value > hsp_value) {
91				sp->cls1.rcvDataSizeLsb =
92					hsp->cls1.rcvDataSizeLsb;
93				sp->cls1.rcvDataSizeMsb =
94					hsp->cls1.rcvDataSizeMsb;
95			}
96		}
97	} else if (class == CLASS1)
98		goto bad_service_param;
99	if (sp->cls2.classValid) {
100		if (!flogi) {
101			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
102				     hsp->cls2.rcvDataSizeLsb);
103			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
104				     sp->cls2.rcvDataSizeLsb);
105			if (!ssp_value)
106				goto bad_service_param;
107			if (ssp_value > hsp_value) {
108				sp->cls2.rcvDataSizeLsb =
109					hsp->cls2.rcvDataSizeLsb;
110				sp->cls2.rcvDataSizeMsb =
111					hsp->cls2.rcvDataSizeMsb;
112			}
113		}
114	} else if (class == CLASS2)
115		goto bad_service_param;
116	if (sp->cls3.classValid) {
117		if (!flogi) {
118			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
119				     hsp->cls3.rcvDataSizeLsb);
120			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
121				     sp->cls3.rcvDataSizeLsb);
122			if (!ssp_value)
123				goto bad_service_param;
124			if (ssp_value > hsp_value) {
125				sp->cls3.rcvDataSizeLsb =
126					hsp->cls3.rcvDataSizeLsb;
127				sp->cls3.rcvDataSizeMsb =
128					hsp->cls3.rcvDataSizeMsb;
129			}
130		}
131	} else if (class == CLASS3)
132		goto bad_service_param;
133
134	/*
135	 * Preserve the upper four bits of the MSB from the PLOGI response.
136	 * These bits contain the Buffer-to-Buffer State Change Number
137	 * from the target and need to be passed to the FW.
138	 */
139	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
140	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
141	if (ssp_value > hsp_value) {
142		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
143		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
144				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
145	}
146
147	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
148	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
149	return 1;
150bad_service_param:
151	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
152			 "0207 Device %x "
153			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
154			 "invalid service parameters.  Ignoring device.\n",
155			 ndlp->nlp_DID,
156			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
157			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
158			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
159			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
160	return 0;
161}
162
163static void *
164lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
165			struct lpfc_iocbq *rspiocb)
166{
167	struct lpfc_dmabuf *pcmd, *prsp;
168	uint32_t *lp;
169	void     *ptr = NULL;
170	IOCB_t   *irsp;
171
172	irsp = &rspiocb->iocb;
173	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
174
175	/* For lpfc_els_abort, context2 could be zero'ed to delay
176	 * freeing associated memory till after ABTS completes.
177	 */
178	if (pcmd) {
179		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
180				       list);
181		if (prsp) {
182			lp = (uint32_t *) prsp->virt;
183			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
184		}
185	} else {
186		/* Force ulpStatus error since we are returning NULL ptr */
187		if (!(irsp->ulpStatus)) {
188			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
189			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
190		}
191		ptr = NULL;
192	}
193	return ptr;
194}
195
196
197
198/*
199 * Free resources / clean up outstanding I/Os
200 * associated with a LPFC_NODELIST entry. This
201 * routine effectively results in a "software abort".
202 */
203int
204lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
205{
206	LIST_HEAD(abort_list);
207	struct lpfc_sli  *psli = &phba->sli;
208	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
209	struct lpfc_iocbq *iocb, *next_iocb;
210
211	/* Abort outstanding I/O on NPort <nlp_DID> */
212	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
213			 "2819 Abort outstanding I/O on NPort x%x "
214			 "Data: x%x x%x x%x\n",
215			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
216			 ndlp->nlp_rpi);
217	/* Clean up all fabric IOs first.*/
218	lpfc_fabric_abort_nport(ndlp);
219
220	/*
221	 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
222	 * of all ELS IOs that need an ABTS.  The IOs need to stay on the
223	 * txcmplq so that the abort operation completes them successfully.
224	 */
225	spin_lock_irq(&phba->hbalock);
226	if (phba->sli_rev == LPFC_SLI_REV4)
227		spin_lock(&pring->ring_lock);
228	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
229	/* Add to abort_list on on NDLP match. */
230		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
231			list_add_tail(&iocb->dlist, &abort_list);
232	}
233	if (phba->sli_rev == LPFC_SLI_REV4)
234		spin_unlock(&pring->ring_lock);
235	spin_unlock_irq(&phba->hbalock);
236
237	/* Abort the targeted IOs and remove them from the abort list. */
238	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
239			spin_lock_irq(&phba->hbalock);
240			list_del_init(&iocb->dlist);
241			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
242			spin_unlock_irq(&phba->hbalock);
243	}
244
245	INIT_LIST_HEAD(&abort_list);
246
247	/* Now process the txq */
248	spin_lock_irq(&phba->hbalock);
249	if (phba->sli_rev == LPFC_SLI_REV4)
250		spin_lock(&pring->ring_lock);
251
252	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
253		/* Check to see if iocb matches the nport we are looking for */
254		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
255			list_del_init(&iocb->list);
256			list_add_tail(&iocb->list, &abort_list);
257		}
258	}
259
260	if (phba->sli_rev == LPFC_SLI_REV4)
261		spin_unlock(&pring->ring_lock);
262	spin_unlock_irq(&phba->hbalock);
263
264	/* Cancel all the IOCBs from the completions list */
265	lpfc_sli_cancel_iocbs(phba, &abort_list,
266			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
267
268	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
269	return 0;
270}
271
272static int
273lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
274	       struct lpfc_iocbq *cmdiocb)
275{
276	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
277	struct lpfc_hba    *phba = vport->phba;
278	struct lpfc_dmabuf *pcmd;
279	uint32_t *lp;
280	IOCB_t *icmd;
281	struct serv_parm *sp;
282	LPFC_MBOXQ_t *mbox;
283	struct ls_rjt stat;
284	int rc;
285
286	memset(&stat, 0, sizeof (struct ls_rjt));
287	if (vport->port_state <= LPFC_FDISC) {
288		/* Before responding to PLOGI, check for pt2pt mode.
289		 * If we are pt2pt, with an outstanding FLOGI, abort
290		 * the FLOGI and resend it first.
291		 */
292		if (vport->fc_flag & FC_PT2PT) {
293			 lpfc_els_abort_flogi(phba);
294		        if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
295				/* If the other side is supposed to initiate
296				 * the PLOGI anyway, just ACC it now and
297				 * move on with discovery.
298				 */
299				phba->fc_edtov = FF_DEF_EDTOV;
300				phba->fc_ratov = FF_DEF_RATOV;
301				/* Start discovery - this should just do
302				   CLEAR_LA */
303				lpfc_disc_start(vport);
304			} else
305				lpfc_initial_flogi(vport);
306		} else {
307			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
308			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
309			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
310					    ndlp, NULL);
311			return 0;
312		}
313	}
314	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
315	lp = (uint32_t *) pcmd->virt;
316	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
317	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
318		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
319				 "0140 PLOGI Reject: invalid nname\n");
320		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
321		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
322		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
323			NULL);
324		return 0;
325	}
326	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
327		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
328				 "0141 PLOGI Reject: invalid pname\n");
329		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
330		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
331		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
332			NULL);
333		return 0;
334	}
335	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
336		/* Reject this request because invalid parameters */
337		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
338		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
339		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
340			NULL);
341		return 0;
342	}
343	icmd = &cmdiocb->iocb;
344
345	/* PLOGI chkparm OK */
346	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
347			 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
348			 "x%x x%x x%x\n",
349			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
350			 ndlp->nlp_rpi, vport->port_state,
351			 vport->fc_flag);
352
353	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
354		ndlp->nlp_fcp_info |= CLASS2;
355	else
356		ndlp->nlp_fcp_info |= CLASS3;
357
358	ndlp->nlp_class_sup = 0;
359	if (sp->cls1.classValid)
360		ndlp->nlp_class_sup |= FC_COS_CLASS1;
361	if (sp->cls2.classValid)
362		ndlp->nlp_class_sup |= FC_COS_CLASS2;
363	if (sp->cls3.classValid)
364		ndlp->nlp_class_sup |= FC_COS_CLASS3;
365	if (sp->cls4.classValid)
366		ndlp->nlp_class_sup |= FC_COS_CLASS4;
367	ndlp->nlp_maxframe =
368		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
369
370	/* no need to reg_login if we are already in one of these states */
371	switch (ndlp->nlp_state) {
372	case  NLP_STE_NPR_NODE:
373		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
374			break;
375	case  NLP_STE_REG_LOGIN_ISSUE:
376	case  NLP_STE_PRLI_ISSUE:
377	case  NLP_STE_UNMAPPED_NODE:
378	case  NLP_STE_MAPPED_NODE:
379		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
380		return 1;
381	}
382
383	/* Check for Nport to NPort pt2pt protocol */
384	if ((vport->fc_flag & FC_PT2PT) &&
385	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
386
387		/* rcv'ed PLOGI decides what our NPortId will be */
388		vport->fc_myDID = icmd->un.rcvels.parmRo;
389		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
390		if (mbox == NULL)
391			goto out;
392		lpfc_config_link(phba, mbox);
393		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
394		mbox->vport = vport;
395		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
396		if (rc == MBX_NOT_FINISHED) {
397			mempool_free(mbox, phba->mbox_mem_pool);
398			goto out;
399		}
400		/*
401		 * For SLI4, the VFI/VPI are registered AFTER the
402		 * Nport with the higher WWPN sends us a PLOGI with
403		 * our assigned NPortId.
404		 */
405		if (phba->sli_rev == LPFC_SLI_REV4)
406			lpfc_issue_reg_vfi(vport);
407
408		lpfc_can_disctmo(vport);
409	}
410	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
411	if (!mbox)
412		goto out;
413
414	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
415	if (phba->sli_rev == LPFC_SLI_REV4)
416		lpfc_unreg_rpi(vport, ndlp);
417
418	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
419			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
420	if (rc) {
421		mempool_free(mbox, phba->mbox_mem_pool);
422		goto out;
423	}
424
425	/* ACC PLOGI rsp command needs to execute first,
426	 * queue this mbox command to be processed later.
427	 */
428	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
429	/*
430	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
431	 * command issued in lpfc_cmpl_els_acc().
432	 */
433	mbox->vport = vport;
434	spin_lock_irq(shost->host_lock);
435	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
436	spin_unlock_irq(shost->host_lock);
437
438	/*
439	 * If there is an outstanding PLOGI issued, abort it before
440	 * sending ACC rsp for received PLOGI. If pending plogi
441	 * is not canceled here, the plogi will be rejected by
442	 * remote port and will be retried. On a configuration with
443	 * single discovery thread, this will cause a huge delay in
444	 * discovery. Also this will cause multiple state machines
445	 * running in parallel for this node.
446	 */
447	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
448		/* software abort outstanding PLOGI */
449		lpfc_els_abort(phba, ndlp);
450	}
451
452	if ((vport->port_type == LPFC_NPIV_PORT &&
453	     vport->cfg_restrict_login)) {
454
455		/* In order to preserve RPIs, we want to cleanup
456		 * the default RPI the firmware created to rcv
457		 * this ELS request. The only way to do this is
458		 * to register, then unregister the RPI.
459		 */
460		spin_lock_irq(shost->host_lock);
461		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
462		spin_unlock_irq(shost->host_lock);
463		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
464		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
465		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
466			ndlp, mbox);
467		if (rc)
468			mempool_free(mbox, phba->mbox_mem_pool);
469		return 1;
470	}
471	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
472	if (rc)
473		mempool_free(mbox, phba->mbox_mem_pool);
474	return 1;
475out:
476	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
477	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
478	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
479	return 0;
480}
481
482/**
483 * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
484 * @phba: pointer to lpfc hba data structure.
485 * @mboxq: pointer to mailbox object
486 *
487 * This routine is invoked to issue a completion to a rcv'ed
488 * ADISC or PDISC after the paused RPI has been resumed.
489 **/
490static void
491lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
492{
493	struct lpfc_vport *vport;
494	struct lpfc_iocbq *elsiocb;
495	struct lpfc_nodelist *ndlp;
496	uint32_t cmd;
497
498	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
499	ndlp = (struct lpfc_nodelist *) mboxq->context2;
500	vport = mboxq->vport;
501	cmd = elsiocb->drvrTimeout;
502
503	if (cmd == ELS_CMD_ADISC) {
504		lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
505	} else {
506		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
507			ndlp, NULL);
508	}
509	kfree(elsiocb);
510	mempool_free(mboxq, phba->mbox_mem_pool);
511}
512
513static int
514lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
515		struct lpfc_iocbq *cmdiocb)
516{
517	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
518	struct lpfc_iocbq  *elsiocb;
519	struct lpfc_dmabuf *pcmd;
520	struct serv_parm   *sp;
521	struct lpfc_name   *pnn, *ppn;
522	struct ls_rjt stat;
523	ADISC *ap;
524	IOCB_t *icmd;
525	uint32_t *lp;
526	uint32_t cmd;
527
528	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
529	lp = (uint32_t *) pcmd->virt;
530
531	cmd = *lp++;
532	if (cmd == ELS_CMD_ADISC) {
533		ap = (ADISC *) lp;
534		pnn = (struct lpfc_name *) & ap->nodeName;
535		ppn = (struct lpfc_name *) & ap->portName;
536	} else {
537		sp = (struct serv_parm *) lp;
538		pnn = (struct lpfc_name *) & sp->nodeName;
539		ppn = (struct lpfc_name *) & sp->portName;
540	}
541
542	icmd = &cmdiocb->iocb;
543	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
544
545		/*
546		 * As soon as  we send ACC, the remote NPort can
547		 * start sending us data. Thus, for SLI4 we must
548		 * resume the RPI before the ACC goes out.
549		 */
550		if (vport->phba->sli_rev == LPFC_SLI_REV4) {
551			elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
552				GFP_KERNEL);
553			if (elsiocb) {
554
555				/* Save info from cmd IOCB used in rsp */
556				memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
557					sizeof(struct lpfc_iocbq));
558
559				/* Save the ELS cmd */
560				elsiocb->drvrTimeout = cmd;
561
562				lpfc_sli4_resume_rpi(ndlp,
563					lpfc_mbx_cmpl_resume_rpi, elsiocb);
564				goto out;
565			}
566		}
567
568		if (cmd == ELS_CMD_ADISC) {
569			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
570		} else {
571			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
572				ndlp, NULL);
573		}
574out:
575		/* If we are authenticated, move to the proper state */
576		if (ndlp->nlp_type & NLP_FCP_TARGET)
577			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
578		else
579			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
580
581		return 1;
582	}
583	/* Reject this request because invalid parameters */
584	stat.un.b.lsRjtRsvd0 = 0;
585	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
586	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
587	stat.un.b.vendorUnique = 0;
588	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
589
590	/* 1 sec timeout */
591	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
592
593	spin_lock_irq(shost->host_lock);
594	ndlp->nlp_flag |= NLP_DELAY_TMO;
595	spin_unlock_irq(shost->host_lock);
596	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
597	ndlp->nlp_prev_state = ndlp->nlp_state;
598	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
599	return 0;
600}
601
602static int
603lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
604	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
605{
606	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
607	struct lpfc_hba    *phba = vport->phba;
608	struct lpfc_vport **vports;
609	int i, active_vlink_present = 0 ;
610
611	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
612	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
613	 * PLOGIs during LOGO storms from a device.
614	 */
615	spin_lock_irq(shost->host_lock);
616	ndlp->nlp_flag |= NLP_LOGO_ACC;
617	spin_unlock_irq(shost->host_lock);
618	if (els_cmd == ELS_CMD_PRLO)
619		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
620	else
621		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
622	if (ndlp->nlp_DID == Fabric_DID) {
623		if (vport->port_state <= LPFC_FDISC)
624			goto out;
625		lpfc_linkdown_port(vport);
626		spin_lock_irq(shost->host_lock);
627		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
628		spin_unlock_irq(shost->host_lock);
629		vports = lpfc_create_vport_work_array(phba);
630		if (vports) {
631			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
632					i++) {
633				if ((!(vports[i]->fc_flag &
634					FC_VPORT_LOGO_RCVD)) &&
635					(vports[i]->port_state > LPFC_FDISC)) {
636					active_vlink_present = 1;
637					break;
638				}
639			}
640			lpfc_destroy_vport_work_array(phba, vports);
641		}
642
643		if (active_vlink_present) {
644			/*
645			 * If there are other active VLinks present,
646			 * re-instantiate the Vlink using FDISC.
647			 */
648			mod_timer(&ndlp->nlp_delayfunc,
649				  jiffies + msecs_to_jiffies(1000));
650			spin_lock_irq(shost->host_lock);
651			ndlp->nlp_flag |= NLP_DELAY_TMO;
652			spin_unlock_irq(shost->host_lock);
653			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
654			vport->port_state = LPFC_FDISC;
655		} else {
656			spin_lock_irq(shost->host_lock);
657			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
658			spin_unlock_irq(shost->host_lock);
659			lpfc_retry_pport_discovery(phba);
660		}
661	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
662		((ndlp->nlp_type & NLP_FCP_TARGET) ||
663		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
664		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
665		/* Only try to re-login if this is NOT a Fabric Node */
666		mod_timer(&ndlp->nlp_delayfunc,
667			  jiffies + msecs_to_jiffies(1000 * 1));
668		spin_lock_irq(shost->host_lock);
669		ndlp->nlp_flag |= NLP_DELAY_TMO;
670		spin_unlock_irq(shost->host_lock);
671
672		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
673	}
674out:
675	ndlp->nlp_prev_state = ndlp->nlp_state;
676	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
677
678	spin_lock_irq(shost->host_lock);
679	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
680	spin_unlock_irq(shost->host_lock);
681	/* The driver has to wait until the ACC completes before it continues
682	 * processing the LOGO.  The action will resume in
683	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
684	 * unreg_login, the driver waits so the ACC does not get aborted.
685	 */
686	return 0;
687}
688
689static void
690lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
691	      struct lpfc_iocbq *cmdiocb)
692{
693	struct lpfc_dmabuf *pcmd;
694	uint32_t *lp;
695	PRLI *npr;
696	struct fc_rport *rport = ndlp->rport;
697	u32 roles;
698
699	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
700	lp = (uint32_t *) pcmd->virt;
701	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
702
703	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
704	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
705	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
706	if (npr->prliType == PRLI_FCP_TYPE) {
707		if (npr->initiatorFunc)
708			ndlp->nlp_type |= NLP_FCP_INITIATOR;
709		if (npr->targetFunc) {
710			ndlp->nlp_type |= NLP_FCP_TARGET;
711			if (npr->writeXferRdyDis)
712				ndlp->nlp_flag |= NLP_FIRSTBURST;
713		}
714		if (npr->Retry)
715			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
716	}
717	if (rport) {
718		/* We need to update the rport role values */
719		roles = FC_RPORT_ROLE_UNKNOWN;
720		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
721			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
722		if (ndlp->nlp_type & NLP_FCP_TARGET)
723			roles |= FC_RPORT_ROLE_FCP_TARGET;
724
725		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
726			"rport rolechg:   role:x%x did:x%x flg:x%x",
727			roles, ndlp->nlp_DID, ndlp->nlp_flag);
728
729		fc_remote_port_rolechg(rport, roles);
730	}
731}
732
733static uint32_t
734lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
735{
736	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
737
738	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
739		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
740		return 0;
741	}
742
743	if (!(vport->fc_flag & FC_PT2PT)) {
744		/* Check config parameter use-adisc or FCP-2 */
745		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
746		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
747		     (ndlp->nlp_type & NLP_FCP_TARGET))) {
748			spin_lock_irq(shost->host_lock);
749			ndlp->nlp_flag |= NLP_NPR_ADISC;
750			spin_unlock_irq(shost->host_lock);
751			return 1;
752		}
753	}
754	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
755	lpfc_unreg_rpi(vport, ndlp);
756	return 0;
757}
758
759/**
760 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
761 * @phba : Pointer to lpfc_hba structure.
762 * @vport: Pointer to lpfc_vport structure.
763 * @rpi  : rpi to be release.
764 *
765 * This function will send a unreg_login mailbox command to the firmware
766 * to release a rpi.
767 **/
768void
769lpfc_release_rpi(struct lpfc_hba *phba,
770		struct lpfc_vport *vport,
771		uint16_t rpi)
772{
773	LPFC_MBOXQ_t *pmb;
774	int rc;
775
776	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
777			GFP_KERNEL);
778	if (!pmb)
779		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
780			"2796 mailbox memory allocation failed \n");
781	else {
782		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
783		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
784		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
785		if (rc == MBX_NOT_FINISHED)
786			mempool_free(pmb, phba->mbox_mem_pool);
787	}
788}
789
790static uint32_t
791lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
792		  void *arg, uint32_t evt)
793{
794	struct lpfc_hba *phba;
795	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
796	MAILBOX_t *mb;
797	uint16_t rpi;
798
799	phba = vport->phba;
800	/* Release the RPI if reglogin completing */
801	if (!(phba->pport->load_flag & FC_UNLOADING) &&
802		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
803		(!pmb->u.mb.mbxStatus)) {
804		mb = &pmb->u.mb;
805		rpi = pmb->u.mb.un.varWords[0];
806		lpfc_release_rpi(phba, vport, rpi);
807	}
808	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
809			 "0271 Illegal State Transition: node x%x "
810			 "event x%x, state x%x Data: x%x x%x\n",
811			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
812			 ndlp->nlp_flag);
813	return ndlp->nlp_state;
814}
815
816static uint32_t
817lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
818		  void *arg, uint32_t evt)
819{
820	/* This transition is only legal if we previously
821	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
822	 * working on the same NPortID, do nothing for this thread
823	 * to stop it.
824	 */
825	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
826		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
827			 "0272 Illegal State Transition: node x%x "
828			 "event x%x, state x%x Data: x%x x%x\n",
829			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
830			 ndlp->nlp_flag);
831	}
832	return ndlp->nlp_state;
833}
834
835/* Start of Discovery State Machine routines */
836
837static uint32_t
838lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
839			   void *arg, uint32_t evt)
840{
841	struct lpfc_iocbq *cmdiocb;
842
843	cmdiocb = (struct lpfc_iocbq *) arg;
844
845	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
846		return ndlp->nlp_state;
847	}
848	return NLP_STE_FREED_NODE;
849}
850
851static uint32_t
852lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
853			 void *arg, uint32_t evt)
854{
855	lpfc_issue_els_logo(vport, ndlp, 0);
856	return ndlp->nlp_state;
857}
858
859static uint32_t
860lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
861			  void *arg, uint32_t evt)
862{
863	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
864	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
865
866	spin_lock_irq(shost->host_lock);
867	ndlp->nlp_flag |= NLP_LOGO_ACC;
868	spin_unlock_irq(shost->host_lock);
869	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
870
871	return ndlp->nlp_state;
872}
873
874static uint32_t
875lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
876			   void *arg, uint32_t evt)
877{
878	return NLP_STE_FREED_NODE;
879}
880
881static uint32_t
882lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
883			   void *arg, uint32_t evt)
884{
885	return NLP_STE_FREED_NODE;
886}
887
888static uint32_t
889lpfc_device_recov_unused_node(struct lpfc_vport *vport,
890			struct lpfc_nodelist *ndlp,
891			   void *arg, uint32_t evt)
892{
893	return ndlp->nlp_state;
894}
895
896static uint32_t
897lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
898			   void *arg, uint32_t evt)
899{
900	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
901	struct lpfc_hba   *phba = vport->phba;
902	struct lpfc_iocbq *cmdiocb = arg;
903	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
904	uint32_t *lp = (uint32_t *) pcmd->virt;
905	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
906	struct ls_rjt stat;
907	int port_cmp;
908
909	memset(&stat, 0, sizeof (struct ls_rjt));
910
911	/* For a PLOGI, we only accept if our portname is less
912	 * than the remote portname.
913	 */
914	phba->fc_stat.elsLogiCol++;
915	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
916			  sizeof(struct lpfc_name));
917
918	if (port_cmp >= 0) {
919		/* Reject this request because the remote node will accept
920		   ours */
921		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
922		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
923		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
924			NULL);
925	} else {
926		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
927		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
928		    (vport->num_disc_nodes)) {
929			spin_lock_irq(shost->host_lock);
930			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
931			spin_unlock_irq(shost->host_lock);
932			/* Check if there are more PLOGIs to be sent */
933			lpfc_more_plogi(vport);
934			if (vport->num_disc_nodes == 0) {
935				spin_lock_irq(shost->host_lock);
936				vport->fc_flag &= ~FC_NDISC_ACTIVE;
937				spin_unlock_irq(shost->host_lock);
938				lpfc_can_disctmo(vport);
939				lpfc_end_rscn(vport);
940			}
941		}
942	} /* If our portname was less */
943
944	return ndlp->nlp_state;
945}
946
947static uint32_t
948lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
949			  void *arg, uint32_t evt)
950{
951	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
952	struct ls_rjt     stat;
953
954	memset(&stat, 0, sizeof (struct ls_rjt));
955	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
956	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
957	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
958	return ndlp->nlp_state;
959}
960
961static uint32_t
962lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
963			  void *arg, uint32_t evt)
964{
965	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
966
967				/* software abort outstanding PLOGI */
968	lpfc_els_abort(vport->phba, ndlp);
969
970	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
971	return ndlp->nlp_state;
972}
973
974static uint32_t
975lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
976			 void *arg, uint32_t evt)
977{
978	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
979	struct lpfc_hba   *phba = vport->phba;
980	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
981
982	/* software abort outstanding PLOGI */
983	lpfc_els_abort(phba, ndlp);
984
985	if (evt == NLP_EVT_RCV_LOGO) {
986		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
987	} else {
988		lpfc_issue_els_logo(vport, ndlp, 0);
989	}
990
991	/* Put ndlp in npr state set plogi timer for 1 sec */
992	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
993	spin_lock_irq(shost->host_lock);
994	ndlp->nlp_flag |= NLP_DELAY_TMO;
995	spin_unlock_irq(shost->host_lock);
996	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
997	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
998	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
999
1000	return ndlp->nlp_state;
1001}
1002
1003static uint32_t
1004lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1005			    struct lpfc_nodelist *ndlp,
1006			    void *arg,
1007			    uint32_t evt)
1008{
1009	struct lpfc_hba    *phba = vport->phba;
1010	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1011	struct lpfc_iocbq  *cmdiocb, *rspiocb;
1012	struct lpfc_dmabuf *pcmd, *prsp, *mp;
1013	uint32_t *lp;
1014	IOCB_t *irsp;
1015	struct serv_parm *sp;
1016	LPFC_MBOXQ_t *mbox;
1017
1018	cmdiocb = (struct lpfc_iocbq *) arg;
1019	rspiocb = cmdiocb->context_un.rsp_iocb;
1020
1021	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1022		/* Recovery from PLOGI collision logic */
1023		return ndlp->nlp_state;
1024	}
1025
1026	irsp = &rspiocb->iocb;
1027
1028	if (irsp->ulpStatus)
1029		goto out;
1030
1031	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1032
1033	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1034	if (!prsp)
1035		goto out;
1036
1037	lp = (uint32_t *) prsp->virt;
1038	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1039
1040	/* Some switches have FDMI servers returning 0 for WWN */
1041	if ((ndlp->nlp_DID != FDMI_DID) &&
1042		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
1043		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1044		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1045				 "0142 PLOGI RSP: Invalid WWN.\n");
1046		goto out;
1047	}
1048	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1049		goto out;
1050	/* PLOGI chkparm OK */
1051	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1052			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1053			 ndlp->nlp_DID, ndlp->nlp_state,
1054			 ndlp->nlp_flag, ndlp->nlp_rpi);
1055	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1056		ndlp->nlp_fcp_info |= CLASS2;
1057	else
1058		ndlp->nlp_fcp_info |= CLASS3;
1059
1060	ndlp->nlp_class_sup = 0;
1061	if (sp->cls1.classValid)
1062		ndlp->nlp_class_sup |= FC_COS_CLASS1;
1063	if (sp->cls2.classValid)
1064		ndlp->nlp_class_sup |= FC_COS_CLASS2;
1065	if (sp->cls3.classValid)
1066		ndlp->nlp_class_sup |= FC_COS_CLASS3;
1067	if (sp->cls4.classValid)
1068		ndlp->nlp_class_sup |= FC_COS_CLASS4;
1069	ndlp->nlp_maxframe =
1070		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1071
1072	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1073	if (!mbox) {
1074		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1075			"0133 PLOGI: no memory for reg_login "
1076			"Data: x%x x%x x%x x%x\n",
1077			ndlp->nlp_DID, ndlp->nlp_state,
1078			ndlp->nlp_flag, ndlp->nlp_rpi);
1079		goto out;
1080	}
1081
1082	lpfc_unreg_rpi(vport, ndlp);
1083
1084	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
1085			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1086		switch (ndlp->nlp_DID) {
1087		case NameServer_DID:
1088			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
1089			break;
1090		case FDMI_DID:
1091			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
1092			break;
1093		default:
1094			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1095			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1096		}
1097		mbox->context2 = lpfc_nlp_get(ndlp);
1098		mbox->vport = vport;
1099		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1100		    != MBX_NOT_FINISHED) {
1101			lpfc_nlp_set_state(vport, ndlp,
1102					   NLP_STE_REG_LOGIN_ISSUE);
1103			return ndlp->nlp_state;
1104		}
1105		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1106			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1107		/* decrement node reference count to the failed mbox
1108		 * command
1109		 */
1110		lpfc_nlp_put(ndlp);
1111		mp = (struct lpfc_dmabuf *) mbox->context1;
1112		lpfc_mbuf_free(phba, mp->virt, mp->phys);
1113		kfree(mp);
1114		mempool_free(mbox, phba->mbox_mem_pool);
1115
1116		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1117				 "0134 PLOGI: cannot issue reg_login "
1118				 "Data: x%x x%x x%x x%x\n",
1119				 ndlp->nlp_DID, ndlp->nlp_state,
1120				 ndlp->nlp_flag, ndlp->nlp_rpi);
1121	} else {
1122		mempool_free(mbox, phba->mbox_mem_pool);
1123
1124		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1125				 "0135 PLOGI: cannot format reg_login "
1126				 "Data: x%x x%x x%x x%x\n",
1127				 ndlp->nlp_DID, ndlp->nlp_state,
1128				 ndlp->nlp_flag, ndlp->nlp_rpi);
1129	}
1130
1131
1132out:
1133	if (ndlp->nlp_DID == NameServer_DID) {
1134		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1135		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1136				 "0261 Cannot Register NameServer login\n");
1137	}
1138
1139	/*
1140	** In case the node reference counter does not go to zero, ensure that
1141	** the stale state for the node is not processed.
1142	*/
1143
1144	ndlp->nlp_prev_state = ndlp->nlp_state;
1145	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1146	spin_lock_irq(shost->host_lock);
1147	ndlp->nlp_flag |= NLP_DEFER_RM;
1148	spin_unlock_irq(shost->host_lock);
1149	return NLP_STE_FREED_NODE;
1150}
1151
1152static uint32_t
1153lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1154			   void *arg, uint32_t evt)
1155{
1156	return ndlp->nlp_state;
1157}
1158
1159static uint32_t
1160lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1161	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1162{
1163	struct lpfc_hba *phba;
1164	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1165	MAILBOX_t *mb = &pmb->u.mb;
1166	uint16_t rpi;
1167
1168	phba = vport->phba;
1169	/* Release the RPI */
1170	if (!(phba->pport->load_flag & FC_UNLOADING) &&
1171		!mb->mbxStatus) {
1172		rpi = pmb->u.mb.un.varWords[0];
1173		lpfc_release_rpi(phba, vport, rpi);
1174	}
1175	return ndlp->nlp_state;
1176}
1177
1178static uint32_t
1179lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1180			   void *arg, uint32_t evt)
1181{
1182	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1183
1184	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1185		spin_lock_irq(shost->host_lock);
1186		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1187		spin_unlock_irq(shost->host_lock);
1188		return ndlp->nlp_state;
1189	} else {
1190		/* software abort outstanding PLOGI */
1191		lpfc_els_abort(vport->phba, ndlp);
1192
1193		lpfc_drop_node(vport, ndlp);
1194		return NLP_STE_FREED_NODE;
1195	}
1196}
1197
1198static uint32_t
1199lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1200			      struct lpfc_nodelist *ndlp,
1201			      void *arg,
1202			      uint32_t evt)
1203{
1204	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1205	struct lpfc_hba  *phba = vport->phba;
1206
1207	/* Don't do anything that will mess up processing of the
1208	 * previous RSCN.
1209	 */
1210	if (vport->fc_flag & FC_RSCN_DEFERRED)
1211		return ndlp->nlp_state;
1212
1213	/* software abort outstanding PLOGI */
1214	lpfc_els_abort(phba, ndlp);
1215
1216	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1217	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1218	spin_lock_irq(shost->host_lock);
1219	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1220	spin_unlock_irq(shost->host_lock);
1221
1222	return ndlp->nlp_state;
1223}
1224
1225static uint32_t
1226lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1227			   void *arg, uint32_t evt)
1228{
1229	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1230	struct lpfc_hba   *phba = vport->phba;
1231	struct lpfc_iocbq *cmdiocb;
1232
1233	/* software abort outstanding ADISC */
1234	lpfc_els_abort(phba, ndlp);
1235
1236	cmdiocb = (struct lpfc_iocbq *) arg;
1237
1238	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1239		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1240			spin_lock_irq(shost->host_lock);
1241			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1242			spin_unlock_irq(shost->host_lock);
1243			if (vport->num_disc_nodes)
1244				lpfc_more_adisc(vport);
1245		}
1246		return ndlp->nlp_state;
1247	}
1248	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1249	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1250	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1251
1252	return ndlp->nlp_state;
1253}
1254
1255static uint32_t
1256lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1257			  void *arg, uint32_t evt)
1258{
1259	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1260
1261	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1262	return ndlp->nlp_state;
1263}
1264
1265static uint32_t
1266lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1267			  void *arg, uint32_t evt)
1268{
1269	struct lpfc_hba *phba = vport->phba;
1270	struct lpfc_iocbq *cmdiocb;
1271
1272	cmdiocb = (struct lpfc_iocbq *) arg;
1273
1274	/* software abort outstanding ADISC */
1275	lpfc_els_abort(phba, ndlp);
1276
1277	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1278	return ndlp->nlp_state;
1279}
1280
1281static uint32_t
1282lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1283			    struct lpfc_nodelist *ndlp,
1284			    void *arg, uint32_t evt)
1285{
1286	struct lpfc_iocbq *cmdiocb;
1287
1288	cmdiocb = (struct lpfc_iocbq *) arg;
1289
1290	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1291	return ndlp->nlp_state;
1292}
1293
1294static uint32_t
1295lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1296			  void *arg, uint32_t evt)
1297{
1298	struct lpfc_iocbq *cmdiocb;
1299
1300	cmdiocb = (struct lpfc_iocbq *) arg;
1301
1302	/* Treat like rcv logo */
1303	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1304	return ndlp->nlp_state;
1305}
1306
1307static uint32_t
1308lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1309			    struct lpfc_nodelist *ndlp,
1310			    void *arg, uint32_t evt)
1311{
1312	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1313	struct lpfc_hba   *phba = vport->phba;
1314	struct lpfc_iocbq *cmdiocb, *rspiocb;
1315	IOCB_t *irsp;
1316	ADISC *ap;
1317	int rc;
1318
1319	cmdiocb = (struct lpfc_iocbq *) arg;
1320	rspiocb = cmdiocb->context_un.rsp_iocb;
1321
1322	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1323	irsp = &rspiocb->iocb;
1324
1325	if ((irsp->ulpStatus) ||
1326	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1327		/* 1 sec timeout */
1328		mod_timer(&ndlp->nlp_delayfunc,
1329			  jiffies + msecs_to_jiffies(1000));
1330		spin_lock_irq(shost->host_lock);
1331		ndlp->nlp_flag |= NLP_DELAY_TMO;
1332		spin_unlock_irq(shost->host_lock);
1333		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1334
1335		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1336		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1337
1338		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1339		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1340		lpfc_unreg_rpi(vport, ndlp);
1341		return ndlp->nlp_state;
1342	}
1343
1344	if (phba->sli_rev == LPFC_SLI_REV4) {
1345		rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1346		if (rc) {
1347			/* Stay in state and retry. */
1348			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1349			return ndlp->nlp_state;
1350		}
1351	}
1352
1353	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1354		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1355		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1356	} else {
1357		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1358		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1359	}
1360
1361	return ndlp->nlp_state;
1362}
1363
1364static uint32_t
1365lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1366			   void *arg, uint32_t evt)
1367{
1368	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1369
1370	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1371		spin_lock_irq(shost->host_lock);
1372		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1373		spin_unlock_irq(shost->host_lock);
1374		return ndlp->nlp_state;
1375	} else {
1376		/* software abort outstanding ADISC */
1377		lpfc_els_abort(vport->phba, ndlp);
1378
1379		lpfc_drop_node(vport, ndlp);
1380		return NLP_STE_FREED_NODE;
1381	}
1382}
1383
1384static uint32_t
1385lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1386			      struct lpfc_nodelist *ndlp,
1387			      void *arg,
1388			      uint32_t evt)
1389{
1390	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1391	struct lpfc_hba  *phba = vport->phba;
1392
1393	/* Don't do anything that will mess up processing of the
1394	 * previous RSCN.
1395	 */
1396	if (vport->fc_flag & FC_RSCN_DEFERRED)
1397		return ndlp->nlp_state;
1398
1399	/* software abort outstanding ADISC */
1400	lpfc_els_abort(phba, ndlp);
1401
1402	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1403	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1404	spin_lock_irq(shost->host_lock);
1405	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1406	spin_unlock_irq(shost->host_lock);
1407	lpfc_disc_set_adisc(vport, ndlp);
1408	return ndlp->nlp_state;
1409}
1410
1411static uint32_t
1412lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1413			      struct lpfc_nodelist *ndlp,
1414			      void *arg,
1415			      uint32_t evt)
1416{
1417	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1418
1419	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1420	return ndlp->nlp_state;
1421}
1422
1423static uint32_t
1424lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1425			     struct lpfc_nodelist *ndlp,
1426			     void *arg,
1427			     uint32_t evt)
1428{
1429	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1430
1431	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1432	return ndlp->nlp_state;
1433}
1434
1435static uint32_t
1436lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1437			     struct lpfc_nodelist *ndlp,
1438			     void *arg,
1439			     uint32_t evt)
1440{
1441	struct lpfc_hba   *phba = vport->phba;
1442	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1443	LPFC_MBOXQ_t	  *mb;
1444	LPFC_MBOXQ_t	  *nextmb;
1445	struct lpfc_dmabuf *mp;
1446
1447	cmdiocb = (struct lpfc_iocbq *) arg;
1448
1449	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1450	if ((mb = phba->sli.mbox_active)) {
1451		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1452		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1453			lpfc_nlp_put(ndlp);
1454			mb->context2 = NULL;
1455			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1456		}
1457	}
1458
1459	spin_lock_irq(&phba->hbalock);
1460	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1461		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1462		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1463			mp = (struct lpfc_dmabuf *) (mb->context1);
1464			if (mp) {
1465				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1466				kfree(mp);
1467			}
1468			lpfc_nlp_put(ndlp);
1469			list_del(&mb->list);
1470			phba->sli.mboxq_cnt--;
1471			mempool_free(mb, phba->mbox_mem_pool);
1472		}
1473	}
1474	spin_unlock_irq(&phba->hbalock);
1475
1476	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1477	return ndlp->nlp_state;
1478}
1479
1480static uint32_t
1481lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1482			       struct lpfc_nodelist *ndlp,
1483			       void *arg,
1484			       uint32_t evt)
1485{
1486	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1487
1488	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1489	return ndlp->nlp_state;
1490}
1491
1492static uint32_t
1493lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1494			     struct lpfc_nodelist *ndlp,
1495			     void *arg,
1496			     uint32_t evt)
1497{
1498	struct lpfc_iocbq *cmdiocb;
1499
1500	cmdiocb = (struct lpfc_iocbq *) arg;
1501	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1502	return ndlp->nlp_state;
1503}
1504
1505static uint32_t
1506lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1507				  struct lpfc_nodelist *ndlp,
1508				  void *arg,
1509				  uint32_t evt)
1510{
1511	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1512	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1513	MAILBOX_t *mb = &pmb->u.mb;
1514	uint32_t did  = mb->un.varWords[1];
1515
1516	if (mb->mbxStatus) {
1517		/* RegLogin failed */
1518		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1519				"0246 RegLogin failed Data: x%x x%x x%x x%x "
1520				 "x%x\n",
1521				 did, mb->mbxStatus, vport->port_state,
1522				 mb->un.varRegLogin.vpi,
1523				 mb->un.varRegLogin.rpi);
1524		/*
1525		 * If RegLogin failed due to lack of HBA resources do not
1526		 * retry discovery.
1527		 */
1528		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1529			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1530			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1531			return ndlp->nlp_state;
1532		}
1533
1534		/* Put ndlp in npr state set plogi timer for 1 sec */
1535		mod_timer(&ndlp->nlp_delayfunc,
1536			  jiffies + msecs_to_jiffies(1000 * 1));
1537		spin_lock_irq(shost->host_lock);
1538		ndlp->nlp_flag |= NLP_DELAY_TMO;
1539		spin_unlock_irq(shost->host_lock);
1540		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1541
1542		lpfc_issue_els_logo(vport, ndlp, 0);
1543		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1544		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1545		return ndlp->nlp_state;
1546	}
1547
1548	/* SLI4 ports have preallocated logical rpis. */
1549	if (vport->phba->sli_rev < LPFC_SLI_REV4)
1550		ndlp->nlp_rpi = mb->un.varWords[0];
1551
1552	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1553
1554	/* Only if we are not a fabric nport do we issue PRLI */
1555	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1556		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1557		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1558		lpfc_issue_els_prli(vport, ndlp, 0);
1559	} else {
1560		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1561		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1562	}
1563	return ndlp->nlp_state;
1564}
1565
1566static uint32_t
1567lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1568			      struct lpfc_nodelist *ndlp,
1569			      void *arg,
1570			      uint32_t evt)
1571{
1572	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1573
1574	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1575		spin_lock_irq(shost->host_lock);
1576		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1577		spin_unlock_irq(shost->host_lock);
1578		return ndlp->nlp_state;
1579	} else {
1580		lpfc_drop_node(vport, ndlp);
1581		return NLP_STE_FREED_NODE;
1582	}
1583}
1584
1585static uint32_t
1586lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1587				 struct lpfc_nodelist *ndlp,
1588				 void *arg,
1589				 uint32_t evt)
1590{
1591	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1592
1593	/* Don't do anything that will mess up processing of the
1594	 * previous RSCN.
1595	 */
1596	if (vport->fc_flag & FC_RSCN_DEFERRED)
1597		return ndlp->nlp_state;
1598
1599	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1600	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1601	spin_lock_irq(shost->host_lock);
1602	ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1603	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1604	spin_unlock_irq(shost->host_lock);
1605	lpfc_disc_set_adisc(vport, ndlp);
1606	return ndlp->nlp_state;
1607}
1608
1609static uint32_t
1610lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1611			  void *arg, uint32_t evt)
1612{
1613	struct lpfc_iocbq *cmdiocb;
1614
1615	cmdiocb = (struct lpfc_iocbq *) arg;
1616
1617	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1618	return ndlp->nlp_state;
1619}
1620
1621static uint32_t
1622lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1623			 void *arg, uint32_t evt)
1624{
1625	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1626
1627	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1628	return ndlp->nlp_state;
1629}
1630
1631static uint32_t
1632lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1633			 void *arg, uint32_t evt)
1634{
1635	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1636
1637	/* Software abort outstanding PRLI before sending acc */
1638	lpfc_els_abort(vport->phba, ndlp);
1639
1640	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1641	return ndlp->nlp_state;
1642}
1643
1644static uint32_t
1645lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1646			   void *arg, uint32_t evt)
1647{
1648	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1649
1650	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1651	return ndlp->nlp_state;
1652}
1653
1654/* This routine is envoked when we rcv a PRLO request from a nport
1655 * we are logged into.  We should send back a PRLO rsp setting the
1656 * appropriate bits.
1657 * NEXT STATE = PRLI_ISSUE
1658 */
1659static uint32_t
1660lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1661			 void *arg, uint32_t evt)
1662{
1663	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1664
1665	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1666	return ndlp->nlp_state;
1667}
1668
1669static uint32_t
1670lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1671			  void *arg, uint32_t evt)
1672{
1673	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1674	struct lpfc_iocbq *cmdiocb, *rspiocb;
1675	struct lpfc_hba   *phba = vport->phba;
1676	IOCB_t *irsp;
1677	PRLI *npr;
1678
1679	cmdiocb = (struct lpfc_iocbq *) arg;
1680	rspiocb = cmdiocb->context_un.rsp_iocb;
1681	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1682
1683	irsp = &rspiocb->iocb;
1684	if (irsp->ulpStatus) {
1685		if ((vport->port_type == LPFC_NPIV_PORT) &&
1686		    vport->cfg_restrict_login) {
1687			goto out;
1688		}
1689		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1690		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1691		return ndlp->nlp_state;
1692	}
1693
1694	/* Check out PRLI rsp */
1695	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1696	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1697	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
1698	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1699	    (npr->prliType == PRLI_FCP_TYPE)) {
1700		if (npr->initiatorFunc)
1701			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1702		if (npr->targetFunc) {
1703			ndlp->nlp_type |= NLP_FCP_TARGET;
1704			if (npr->writeXferRdyDis)
1705				ndlp->nlp_flag |= NLP_FIRSTBURST;
1706		}
1707		if (npr->Retry)
1708			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1709	}
1710	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1711	    (vport->port_type == LPFC_NPIV_PORT) &&
1712	     vport->cfg_restrict_login) {
1713out:
1714		spin_lock_irq(shost->host_lock);
1715		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1716		spin_unlock_irq(shost->host_lock);
1717		lpfc_issue_els_logo(vport, ndlp, 0);
1718
1719		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1720		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1721		return ndlp->nlp_state;
1722	}
1723
1724	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1725	if (ndlp->nlp_type & NLP_FCP_TARGET)
1726		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1727	else
1728		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1729	return ndlp->nlp_state;
1730}
1731
1732/*! lpfc_device_rm_prli_issue
1733 *
1734 * \pre
1735 * \post
1736 * \param   phba
1737 * \param   ndlp
1738 * \param   arg
1739 * \param   evt
1740 * \return  uint32_t
1741 *
1742 * \b Description:
1743 *    This routine is envoked when we a request to remove a nport we are in the
1744 *    process of PRLIing. We should software abort outstanding prli, unreg
1745 *    login, send a logout. We will change node state to UNUSED_NODE, put it
1746 *    on plogi list so it can be freed when LOGO completes.
1747 *
1748 */
1749
1750static uint32_t
1751lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1752			  void *arg, uint32_t evt)
1753{
1754	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1755
1756	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1757		spin_lock_irq(shost->host_lock);
1758		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1759		spin_unlock_irq(shost->host_lock);
1760		return ndlp->nlp_state;
1761	} else {
1762		/* software abort outstanding PLOGI */
1763		lpfc_els_abort(vport->phba, ndlp);
1764
1765		lpfc_drop_node(vport, ndlp);
1766		return NLP_STE_FREED_NODE;
1767	}
1768}
1769
1770
1771/*! lpfc_device_recov_prli_issue
1772 *
1773 * \pre
1774 * \post
1775 * \param   phba
1776 * \param   ndlp
1777 * \param   arg
1778 * \param   evt
1779 * \return  uint32_t
1780 *
1781 * \b Description:
1782 *    The routine is envoked when the state of a device is unknown, like
1783 *    during a link down. We should remove the nodelist entry from the
1784 *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1785 *    outstanding PRLI command, then free the node entry.
1786 */
1787static uint32_t
1788lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1789			     struct lpfc_nodelist *ndlp,
1790			     void *arg,
1791			     uint32_t evt)
1792{
1793	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1794	struct lpfc_hba  *phba = vport->phba;
1795
1796	/* Don't do anything that will mess up processing of the
1797	 * previous RSCN.
1798	 */
1799	if (vport->fc_flag & FC_RSCN_DEFERRED)
1800		return ndlp->nlp_state;
1801
1802	/* software abort outstanding PRLI */
1803	lpfc_els_abort(phba, ndlp);
1804
1805	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1806	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1807	spin_lock_irq(shost->host_lock);
1808	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1809	spin_unlock_irq(shost->host_lock);
1810	lpfc_disc_set_adisc(vport, ndlp);
1811	return ndlp->nlp_state;
1812}
1813
1814static uint32_t
1815lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1816			  void *arg, uint32_t evt)
1817{
1818	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1819	struct ls_rjt     stat;
1820
1821	memset(&stat, 0, sizeof(struct ls_rjt));
1822	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1823	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1824	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1825	return ndlp->nlp_state;
1826}
1827
1828static uint32_t
1829lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1830			 void *arg, uint32_t evt)
1831{
1832	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1833	struct ls_rjt     stat;
1834
1835	memset(&stat, 0, sizeof(struct ls_rjt));
1836	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1837	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1838	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1839	return ndlp->nlp_state;
1840}
1841
1842static uint32_t
1843lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1844			 void *arg, uint32_t evt)
1845{
1846	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1847	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1848
1849	spin_lock_irq(shost->host_lock);
1850	ndlp->nlp_flag &= NLP_LOGO_ACC;
1851	spin_unlock_irq(shost->host_lock);
1852	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1853	return ndlp->nlp_state;
1854}
1855
1856static uint32_t
1857lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1858			   void *arg, uint32_t evt)
1859{
1860	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1861	struct ls_rjt     stat;
1862
1863	memset(&stat, 0, sizeof(struct ls_rjt));
1864	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1865	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1866	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1867	return ndlp->nlp_state;
1868}
1869
1870static uint32_t
1871lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1872			 void *arg, uint32_t evt)
1873{
1874	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1875	struct ls_rjt     stat;
1876
1877	memset(&stat, 0, sizeof(struct ls_rjt));
1878	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1879	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1880	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1881	return ndlp->nlp_state;
1882}
1883
1884static uint32_t
1885lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1886			  void *arg, uint32_t evt)
1887{
1888	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1889
1890	ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
1891	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1892	spin_lock_irq(shost->host_lock);
1893	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1894	spin_unlock_irq(shost->host_lock);
1895	lpfc_disc_set_adisc(vport, ndlp);
1896	return ndlp->nlp_state;
1897}
1898
1899static uint32_t
1900lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1901			  void *arg, uint32_t evt)
1902{
1903	/*
1904	 * Take no action.  If a LOGO is outstanding, then possibly DevLoss has
1905	 * timed out and is calling for Device Remove.  In this case, the LOGO
1906	 * must be allowed to complete in state LOGO_ISSUE so that the rpi
1907	 * and other NLP flags are correctly cleaned up.
1908	 */
1909	return ndlp->nlp_state;
1910}
1911
1912static uint32_t
1913lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
1914			     struct lpfc_nodelist *ndlp,
1915			     void *arg, uint32_t evt)
1916{
1917	/*
1918	 * Device Recovery events have no meaning for a node with a LOGO
1919	 * outstanding.  The LOGO has to complete first and handle the
1920	 * node from that point.
1921	 */
1922	return ndlp->nlp_state;
1923}
1924
1925static uint32_t
1926lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1927			  void *arg, uint32_t evt)
1928{
1929	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1930
1931	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1932	return ndlp->nlp_state;
1933}
1934
1935static uint32_t
1936lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1937			 void *arg, uint32_t evt)
1938{
1939	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1940
1941	lpfc_rcv_prli(vport, ndlp, cmdiocb);
1942	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1943	return ndlp->nlp_state;
1944}
1945
1946static uint32_t
1947lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1948			 void *arg, uint32_t evt)
1949{
1950	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1951
1952	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1953	return ndlp->nlp_state;
1954}
1955
1956static uint32_t
1957lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1958			   void *arg, uint32_t evt)
1959{
1960	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1961
1962	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1963	return ndlp->nlp_state;
1964}
1965
1966static uint32_t
1967lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1968			 void *arg, uint32_t evt)
1969{
1970	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1971
1972	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1973	return ndlp->nlp_state;
1974}
1975
1976static uint32_t
1977lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1978			     struct lpfc_nodelist *ndlp,
1979			     void *arg,
1980			     uint32_t evt)
1981{
1982	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1983
1984	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1985	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1986	spin_lock_irq(shost->host_lock);
1987	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1988	spin_unlock_irq(shost->host_lock);
1989	lpfc_disc_set_adisc(vport, ndlp);
1990
1991	return ndlp->nlp_state;
1992}
1993
1994static uint32_t
1995lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1996			   void *arg, uint32_t evt)
1997{
1998	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1999
2000	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2001	return ndlp->nlp_state;
2002}
2003
2004static uint32_t
2005lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2006			  void *arg, uint32_t evt)
2007{
2008	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2009
2010	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2011	return ndlp->nlp_state;
2012}
2013
2014static uint32_t
2015lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2016			  void *arg, uint32_t evt)
2017{
2018	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2019
2020	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2021	return ndlp->nlp_state;
2022}
2023
2024static uint32_t
2025lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
2026			    struct lpfc_nodelist *ndlp,
2027			    void *arg, uint32_t evt)
2028{
2029	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2030
2031	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2032	return ndlp->nlp_state;
2033}
2034
2035static uint32_t
2036lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2037			  void *arg, uint32_t evt)
2038{
2039	struct lpfc_hba  *phba = vport->phba;
2040	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2041
2042	/* flush the target */
2043	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2044			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
2045
2046	/* Treat like rcv logo */
2047	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
2048	return ndlp->nlp_state;
2049}
2050
2051static uint32_t
2052lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2053			      struct lpfc_nodelist *ndlp,
2054			      void *arg,
2055			      uint32_t evt)
2056{
2057	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2058
2059	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
2060	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2061	spin_lock_irq(shost->host_lock);
2062	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2063	spin_unlock_irq(shost->host_lock);
2064	lpfc_disc_set_adisc(vport, ndlp);
2065	return ndlp->nlp_state;
2066}
2067
2068static uint32_t
2069lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2070			void *arg, uint32_t evt)
2071{
2072	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2073	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
2074
2075	/* Ignore PLOGI if we have an outstanding LOGO */
2076	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
2077		return ndlp->nlp_state;
2078	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
2079		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2080		spin_lock_irq(shost->host_lock);
2081		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
2082		spin_unlock_irq(shost->host_lock);
2083	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2084		/* send PLOGI immediately, move to PLOGI issue state */
2085		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2086			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2087			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2088			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2089		}
2090	}
2091	return ndlp->nlp_state;
2092}
2093
2094static uint32_t
2095lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2096		       void *arg, uint32_t evt)
2097{
2098	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2099	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2100	struct ls_rjt     stat;
2101
2102	memset(&stat, 0, sizeof (struct ls_rjt));
2103	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2104	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2105	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2106
2107	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2108		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2109			spin_lock_irq(shost->host_lock);
2110			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2111			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2112			spin_unlock_irq(shost->host_lock);
2113			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2114			lpfc_issue_els_adisc(vport, ndlp, 0);
2115		} else {
2116			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2117			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2118			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2119		}
2120	}
2121	return ndlp->nlp_state;
2122}
2123
2124static uint32_t
2125lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
2126		       void *arg, uint32_t evt)
2127{
2128	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2129
2130	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2131	return ndlp->nlp_state;
2132}
2133
2134static uint32_t
2135lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2136			 void *arg, uint32_t evt)
2137{
2138	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2139
2140	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2141	/*
2142	 * Do not start discovery if discovery is about to start
2143	 * or discovery in progress for this node. Starting discovery
2144	 * here will affect the counting of discovery threads.
2145	 */
2146	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
2147	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2148		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2149			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2150			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2151			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2152			lpfc_issue_els_adisc(vport, ndlp, 0);
2153		} else {
2154			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2155			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2156			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2157		}
2158	}
2159	return ndlp->nlp_state;
2160}
2161
2162static uint32_t
2163lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2164		       void *arg, uint32_t evt)
2165{
2166	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2167	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2168
2169	spin_lock_irq(shost->host_lock);
2170	ndlp->nlp_flag |= NLP_LOGO_ACC;
2171	spin_unlock_irq(shost->host_lock);
2172
2173	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2174
2175	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2176		mod_timer(&ndlp->nlp_delayfunc,
2177			  jiffies + msecs_to_jiffies(1000 * 1));
2178		spin_lock_irq(shost->host_lock);
2179		ndlp->nlp_flag |= NLP_DELAY_TMO;
2180		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2181		spin_unlock_irq(shost->host_lock);
2182		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
2183	} else {
2184		spin_lock_irq(shost->host_lock);
2185		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2186		spin_unlock_irq(shost->host_lock);
2187	}
2188	return ndlp->nlp_state;
2189}
2190
2191static uint32_t
2192lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2193			 void *arg, uint32_t evt)
2194{
2195	struct lpfc_iocbq *cmdiocb, *rspiocb;
2196	IOCB_t *irsp;
2197	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2198
2199	cmdiocb = (struct lpfc_iocbq *) arg;
2200	rspiocb = cmdiocb->context_un.rsp_iocb;
2201
2202	irsp = &rspiocb->iocb;
2203	if (irsp->ulpStatus) {
2204		spin_lock_irq(shost->host_lock);
2205		ndlp->nlp_flag |= NLP_DEFER_RM;
2206		spin_unlock_irq(shost->host_lock);
2207		return NLP_STE_FREED_NODE;
2208	}
2209	return ndlp->nlp_state;
2210}
2211
2212static uint32_t
2213lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2214			void *arg, uint32_t evt)
2215{
2216	struct lpfc_iocbq *cmdiocb, *rspiocb;
2217	IOCB_t *irsp;
2218
2219	cmdiocb = (struct lpfc_iocbq *) arg;
2220	rspiocb = cmdiocb->context_un.rsp_iocb;
2221
2222	irsp = &rspiocb->iocb;
2223	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2224		lpfc_drop_node(vport, ndlp);
2225		return NLP_STE_FREED_NODE;
2226	}
2227	return ndlp->nlp_state;
2228}
2229
2230static uint32_t
2231lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2232			void *arg, uint32_t evt)
2233{
2234	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2235
2236	/* For the fabric port just clear the fc flags. */
2237	if (ndlp->nlp_DID == Fabric_DID) {
2238		spin_lock_irq(shost->host_lock);
2239		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2240		spin_unlock_irq(shost->host_lock);
2241	}
2242	lpfc_unreg_rpi(vport, ndlp);
2243	return ndlp->nlp_state;
2244}
2245
2246static uint32_t
2247lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2248			 void *arg, uint32_t evt)
2249{
2250	struct lpfc_iocbq *cmdiocb, *rspiocb;
2251	IOCB_t *irsp;
2252
2253	cmdiocb = (struct lpfc_iocbq *) arg;
2254	rspiocb = cmdiocb->context_un.rsp_iocb;
2255
2256	irsp = &rspiocb->iocb;
2257	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2258		lpfc_drop_node(vport, ndlp);
2259		return NLP_STE_FREED_NODE;
2260	}
2261	return ndlp->nlp_state;
2262}
2263
2264static uint32_t
2265lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2266			    struct lpfc_nodelist *ndlp,
2267			    void *arg, uint32_t evt)
2268{
2269	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2270	MAILBOX_t    *mb = &pmb->u.mb;
2271
2272	if (!mb->mbxStatus) {
2273		/* SLI4 ports have preallocated logical rpis. */
2274		if (vport->phba->sli_rev < LPFC_SLI_REV4)
2275			ndlp->nlp_rpi = mb->un.varWords[0];
2276		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2277	} else {
2278		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2279			lpfc_drop_node(vport, ndlp);
2280			return NLP_STE_FREED_NODE;
2281		}
2282	}
2283	return ndlp->nlp_state;
2284}
2285
2286static uint32_t
2287lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2288			void *arg, uint32_t evt)
2289{
2290	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2291
2292	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2293		spin_lock_irq(shost->host_lock);
2294		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2295		spin_unlock_irq(shost->host_lock);
2296		return ndlp->nlp_state;
2297	}
2298	lpfc_drop_node(vport, ndlp);
2299	return NLP_STE_FREED_NODE;
2300}
2301
2302static uint32_t
2303lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2304			   void *arg, uint32_t evt)
2305{
2306	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2307
2308	/* Don't do anything that will mess up processing of the
2309	 * previous RSCN.
2310	 */
2311	if (vport->fc_flag & FC_RSCN_DEFERRED)
2312		return ndlp->nlp_state;
2313
2314	lpfc_cancel_retry_delay_tmo(vport, ndlp);
2315	spin_lock_irq(shost->host_lock);
2316	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2317	spin_unlock_irq(shost->host_lock);
2318	return ndlp->nlp_state;
2319}
2320
2321
2322/* This next section defines the NPort Discovery State Machine */
2323
2324/* There are 4 different double linked lists nodelist entries can reside on.
2325 * The plogi list and adisc list are used when Link Up discovery or RSCN
2326 * processing is needed. Each list holds the nodes that we will send PLOGI
2327 * or ADISC on. These lists will keep track of what nodes will be effected
2328 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2329 * The unmapped_list will contain all nodes that we have successfully logged
2330 * into at the Fibre Channel level. The mapped_list will contain all nodes
2331 * that are mapped FCP targets.
2332 */
2333/*
2334 * The bind list is a list of undiscovered (potentially non-existent) nodes
2335 * that we have saved binding information on. This information is used when
2336 * nodes transition from the unmapped to the mapped list.
2337 */
2338/* For UNUSED_NODE state, the node has just been allocated .
2339 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2340 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2341 * and put on the unmapped list. For ADISC processing, the node is taken off
2342 * the ADISC list and placed on either the mapped or unmapped list (depending
2343 * on its previous state). Once on the unmapped list, a PRLI is issued and the
2344 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2345 * changed to UNMAPPED_NODE. If the completion indicates a mapped
2346 * node, the node is taken off the unmapped list. The binding list is checked
2347 * for a valid binding, or a binding is automatically assigned. If binding
2348 * assignment is unsuccessful, the node is left on the unmapped list. If
2349 * binding assignment is successful, the associated binding list entry (if
2350 * any) is removed, and the node is placed on the mapped list.
2351 */
2352/*
2353 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2354 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2355 * expire, all effected nodes will receive a DEVICE_RM event.
2356 */
2357/*
2358 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2359 * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2360 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2361 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2362 * we will first process the ADISC list.  32 entries are processed initially and
2363 * ADISC is initited for each one.  Completions / Events for each node are
2364 * funnelled thru the state machine.  As each node finishes ADISC processing, it
2365 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2366 * waiting, and the ADISC list count is identically 0, then we are done. For
2367 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2368 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2369 * list.  32 entries are processed initially and PLOGI is initited for each one.
2370 * Completions / Events for each node are funnelled thru the state machine.  As
2371 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2372 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2373 * indentically 0, then we are done. We have now completed discovery / RSCN
2374 * handling. Upon completion, ALL nodes should be on either the mapped or
2375 * unmapped lists.
2376 */
2377
2378static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2379     (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2380	/* Action routine                  Event       Current State  */
2381	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
2382	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
2383	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
2384	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
2385	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
2386	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
2387	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2388	lpfc_disc_illegal,		/* CMPL_PRLI       */
2389	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
2390	lpfc_disc_illegal,		/* CMPL_ADISC      */
2391	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2392	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
2393	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
2394
2395	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
2396	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
2397	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
2398	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
2399	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
2400	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
2401	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
2402	lpfc_disc_illegal,		/* CMPL_PRLI       */
2403	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
2404	lpfc_disc_illegal,		/* CMPL_ADISC      */
2405	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2406	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
2407	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
2408
2409	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
2410	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
2411	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
2412	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
2413	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
2414	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
2415	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2416	lpfc_disc_illegal,		/* CMPL_PRLI       */
2417	lpfc_disc_illegal,		/* CMPL_LOGO       */
2418	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
2419	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2420	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
2421	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
2422
2423	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
2424	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
2425	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
2426	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
2427	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
2428	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
2429	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2430	lpfc_disc_illegal,		/* CMPL_PRLI       */
2431	lpfc_disc_illegal,		/* CMPL_LOGO       */
2432	lpfc_disc_illegal,		/* CMPL_ADISC      */
2433	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2434	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
2435	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2436
2437	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
2438	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
2439	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
2440	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
2441	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
2442	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
2443	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2444	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
2445	lpfc_disc_illegal,		/* CMPL_LOGO       */
2446	lpfc_disc_illegal,		/* CMPL_ADISC      */
2447	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2448	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2449	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2450
2451	lpfc_rcv_plogi_logo_issue,	/* RCV_PLOGI   LOGO_ISSUE     */
2452	lpfc_rcv_prli_logo_issue,	/* RCV_PRLI        */
2453	lpfc_rcv_logo_logo_issue,	/* RCV_LOGO        */
2454	lpfc_rcv_padisc_logo_issue,	/* RCV_ADISC       */
2455	lpfc_rcv_padisc_logo_issue,	/* RCV_PDISC       */
2456	lpfc_rcv_prlo_logo_issue,	/* RCV_PRLO        */
2457	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2458	lpfc_disc_illegal,		/* CMPL_PRLI       */
2459	lpfc_cmpl_logo_logo_issue,	/* CMPL_LOGO       */
2460	lpfc_disc_illegal,		/* CMPL_ADISC      */
2461	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2462	lpfc_device_rm_logo_issue,	/* DEVICE_RM       */
2463	lpfc_device_recov_logo_issue,	/* DEVICE_RECOVERY */
2464
2465	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2466	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2467	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2468	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2469	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2470	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2471	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2472	lpfc_disc_illegal,		/* CMPL_PRLI       */
2473	lpfc_disc_illegal,		/* CMPL_LOGO       */
2474	lpfc_disc_illegal,		/* CMPL_ADISC      */
2475	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2476	lpfc_disc_illegal,		/* DEVICE_RM       */
2477	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2478
2479	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2480	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2481	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2482	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2483	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2484	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2485	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2486	lpfc_disc_illegal,		/* CMPL_PRLI       */
2487	lpfc_disc_illegal,		/* CMPL_LOGO       */
2488	lpfc_disc_illegal,		/* CMPL_ADISC      */
2489	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2490	lpfc_disc_illegal,		/* DEVICE_RM       */
2491	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2492
2493	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2494	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2495	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2496	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2497	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2498	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2499	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2500	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2501	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2502	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2503	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2504	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2505	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2506};
2507
2508int
2509lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2510			void *arg, uint32_t evt)
2511{
2512	uint32_t cur_state, rc;
2513	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2514			 uint32_t);
2515	uint32_t got_ndlp = 0;
2516
2517	if (lpfc_nlp_get(ndlp))
2518		got_ndlp = 1;
2519
2520	cur_state = ndlp->nlp_state;
2521
2522	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2523	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2524			 "0211 DSM in event x%x on NPort x%x in "
2525			 "state %d Data: x%x\n",
2526			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2527
2528	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2529		 "DSM in:          evt:%d ste:%d did:x%x",
2530		evt, cur_state, ndlp->nlp_DID);
2531
2532	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2533	rc = (func) (vport, ndlp, arg, evt);
2534
2535	/* DSM out state <rc> on NPort <nlp_DID> */
2536	if (got_ndlp) {
2537		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2538			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2539			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2540
2541		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2542			"DSM out:         ste:%d did:x%x flg:x%x",
2543			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2544		/* Decrement the ndlp reference count held for this function */
2545		lpfc_nlp_put(ndlp);
2546	} else {
2547		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2548			"0213 DSM out state %d on NPort free\n", rc);
2549
2550		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2551			"DSM out:         ste:%d did:x%x flg:x%x",
2552			rc, 0, 0);
2553	}
2554
2555	return rc;
2556}
2557