[go: nahoru, domu]

1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/bitops.h>
19#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
27#include <linux/prefetch.h>
28#include <linux/module.h>
29
30#include "bnad.h"
31#include "bna.h"
32#include "cna.h"
33
34static DEFINE_MUTEX(bnad_fwimg_mutex);
35
36/*
37 * Module params
38 */
39static uint bnad_msix_disable;
40module_param(bnad_msix_disable, uint, 0444);
41MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42
43static uint bnad_ioc_auto_recover = 1;
44module_param(bnad_ioc_auto_recover, uint, 0444);
45MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46
47static uint bna_debugfs_enable = 1;
48module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50		 " Range[false:0|true:1]");
51
52/*
53 * Global variables
54 */
55static u32 bnad_rxqs_per_cq = 2;
56static u32 bna_id;
57static struct mutex bnad_list_mutex;
58static LIST_HEAD(bnad_list);
59static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
60
61/*
62 * Local MACROS
63 */
64#define BNAD_GET_MBOX_IRQ(_bnad)				\
65	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
66	 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67	 ((_bnad)->pcidev->irq))
68
69#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)	\
70do {								\
71	(_res_info)->res_type = BNA_RES_T_MEM;			\
72	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
73	(_res_info)->res_u.mem_info.num = (_num);		\
74	(_res_info)->res_u.mem_info.len = (_size);		\
75} while (0)
76
77static void
78bnad_add_to_list(struct bnad *bnad)
79{
80	mutex_lock(&bnad_list_mutex);
81	list_add_tail(&bnad->list_entry, &bnad_list);
82	bnad->id = bna_id++;
83	mutex_unlock(&bnad_list_mutex);
84}
85
86static void
87bnad_remove_from_list(struct bnad *bnad)
88{
89	mutex_lock(&bnad_list_mutex);
90	list_del(&bnad->list_entry);
91	mutex_unlock(&bnad_list_mutex);
92}
93
94/*
95 * Reinitialize completions in CQ, once Rx is taken down
96 */
97static void
98bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
99{
100	struct bna_cq_entry *cmpl;
101	int i;
102
103	for (i = 0; i < ccb->q_depth; i++) {
104		cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
105		cmpl->valid = 0;
106	}
107}
108
109/* Tx Datapath functions */
110
111
112/* Caller should ensure that the entry at unmap_q[index] is valid */
113static u32
114bnad_tx_buff_unmap(struct bnad *bnad,
115			      struct bnad_tx_unmap *unmap_q,
116			      u32 q_depth, u32 index)
117{
118	struct bnad_tx_unmap *unmap;
119	struct sk_buff *skb;
120	int vector, nvecs;
121
122	unmap = &unmap_q[index];
123	nvecs = unmap->nvecs;
124
125	skb = unmap->skb;
126	unmap->skb = NULL;
127	unmap->nvecs = 0;
128	dma_unmap_single(&bnad->pcidev->dev,
129		dma_unmap_addr(&unmap->vectors[0], dma_addr),
130		skb_headlen(skb), DMA_TO_DEVICE);
131	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
132	nvecs--;
133
134	vector = 0;
135	while (nvecs) {
136		vector++;
137		if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
138			vector = 0;
139			BNA_QE_INDX_INC(index, q_depth);
140			unmap = &unmap_q[index];
141		}
142
143		dma_unmap_page(&bnad->pcidev->dev,
144			dma_unmap_addr(&unmap->vectors[vector], dma_addr),
145			dma_unmap_len(&unmap->vectors[vector], dma_len),
146			DMA_TO_DEVICE);
147		dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
148		nvecs--;
149	}
150
151	BNA_QE_INDX_INC(index, q_depth);
152
153	return index;
154}
155
156/*
157 * Frees all pending Tx Bufs
158 * At this point no activity is expected on the Q,
159 * so DMA unmap & freeing is fine.
160 */
161static void
162bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
163{
164	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
165	struct sk_buff *skb;
166	int i;
167
168	for (i = 0; i < tcb->q_depth; i++) {
169		skb = unmap_q[i].skb;
170		if (!skb)
171			continue;
172		bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
173
174		dev_kfree_skb_any(skb);
175	}
176}
177
178/*
179 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
180 * Can be called in a) Interrupt context
181 *		    b) Sending context
182 */
183static u32
184bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
185{
186	u32 sent_packets = 0, sent_bytes = 0;
187	u32 wis, unmap_wis, hw_cons, cons, q_depth;
188	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
189	struct bnad_tx_unmap *unmap;
190	struct sk_buff *skb;
191
192	/* Just return if TX is stopped */
193	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
194		return 0;
195
196	hw_cons = *(tcb->hw_consumer_index);
197	cons = tcb->consumer_index;
198	q_depth = tcb->q_depth;
199
200	wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
201	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
202
203	while (wis) {
204		unmap = &unmap_q[cons];
205
206		skb = unmap->skb;
207
208		sent_packets++;
209		sent_bytes += skb->len;
210
211		unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
212		wis -= unmap_wis;
213
214		cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
215		dev_kfree_skb_any(skb);
216	}
217
218	/* Update consumer pointers. */
219	tcb->consumer_index = hw_cons;
220
221	tcb->txq->tx_packets += sent_packets;
222	tcb->txq->tx_bytes += sent_bytes;
223
224	return sent_packets;
225}
226
227static u32
228bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
229{
230	struct net_device *netdev = bnad->netdev;
231	u32 sent = 0;
232
233	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
234		return 0;
235
236	sent = bnad_txcmpl_process(bnad, tcb);
237	if (sent) {
238		if (netif_queue_stopped(netdev) &&
239		    netif_carrier_ok(netdev) &&
240		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
241				    BNAD_NETIF_WAKE_THRESHOLD) {
242			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
243				netif_wake_queue(netdev);
244				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
245			}
246		}
247	}
248
249	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
250		bna_ib_ack(tcb->i_dbell, sent);
251
252	smp_mb__before_atomic();
253	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
254
255	return sent;
256}
257
258/* MSIX Tx Completion Handler */
259static irqreturn_t
260bnad_msix_tx(int irq, void *data)
261{
262	struct bna_tcb *tcb = (struct bna_tcb *)data;
263	struct bnad *bnad = tcb->bnad;
264
265	bnad_tx_complete(bnad, tcb);
266
267	return IRQ_HANDLED;
268}
269
270static inline void
271bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
272{
273	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
274
275	unmap_q->reuse_pi = -1;
276	unmap_q->alloc_order = -1;
277	unmap_q->map_size = 0;
278	unmap_q->type = BNAD_RXBUF_NONE;
279}
280
281/* Default is page-based allocation. Multi-buffer support - TBD */
282static int
283bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
284{
285	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
286	int order;
287
288	bnad_rxq_alloc_uninit(bnad, rcb);
289
290	order = get_order(rcb->rxq->buffer_size);
291
292	unmap_q->type = BNAD_RXBUF_PAGE;
293
294	if (bna_is_small_rxq(rcb->id)) {
295		unmap_q->alloc_order = 0;
296		unmap_q->map_size = rcb->rxq->buffer_size;
297	} else {
298		if (rcb->rxq->multi_buffer) {
299			unmap_q->alloc_order = 0;
300			unmap_q->map_size = rcb->rxq->buffer_size;
301			unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
302		} else {
303			unmap_q->alloc_order = order;
304			unmap_q->map_size =
305				(rcb->rxq->buffer_size > 2048) ?
306				PAGE_SIZE << order : 2048;
307		}
308	}
309
310	BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
311
312	return 0;
313}
314
315static inline void
316bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
317{
318	if (!unmap->page)
319		return;
320
321	dma_unmap_page(&bnad->pcidev->dev,
322			dma_unmap_addr(&unmap->vector, dma_addr),
323			unmap->vector.len, DMA_FROM_DEVICE);
324	put_page(unmap->page);
325	unmap->page = NULL;
326	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
327	unmap->vector.len = 0;
328}
329
330static inline void
331bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
332{
333	if (!unmap->skb)
334		return;
335
336	dma_unmap_single(&bnad->pcidev->dev,
337			dma_unmap_addr(&unmap->vector, dma_addr),
338			unmap->vector.len, DMA_FROM_DEVICE);
339	dev_kfree_skb_any(unmap->skb);
340	unmap->skb = NULL;
341	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
342	unmap->vector.len = 0;
343}
344
345static void
346bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
347{
348	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
349	int i;
350
351	for (i = 0; i < rcb->q_depth; i++) {
352		struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
353
354		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
355			bnad_rxq_cleanup_skb(bnad, unmap);
356		else
357			bnad_rxq_cleanup_page(bnad, unmap);
358	}
359	bnad_rxq_alloc_uninit(bnad, rcb);
360}
361
362static u32
363bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
364{
365	u32 alloced, prod, q_depth;
366	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
367	struct bnad_rx_unmap *unmap, *prev;
368	struct bna_rxq_entry *rxent;
369	struct page *page;
370	u32 page_offset, alloc_size;
371	dma_addr_t dma_addr;
372
373	prod = rcb->producer_index;
374	q_depth = rcb->q_depth;
375
376	alloc_size = PAGE_SIZE << unmap_q->alloc_order;
377	alloced = 0;
378
379	while (nalloc--) {
380		unmap = &unmap_q->unmap[prod];
381
382		if (unmap_q->reuse_pi < 0) {
383			page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
384					unmap_q->alloc_order);
385			page_offset = 0;
386		} else {
387			prev = &unmap_q->unmap[unmap_q->reuse_pi];
388			page = prev->page;
389			page_offset = prev->page_offset + unmap_q->map_size;
390			get_page(page);
391		}
392
393		if (unlikely(!page)) {
394			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
395			rcb->rxq->rxbuf_alloc_failed++;
396			goto finishing;
397		}
398
399		dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
400				unmap_q->map_size, DMA_FROM_DEVICE);
401
402		unmap->page = page;
403		unmap->page_offset = page_offset;
404		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
405		unmap->vector.len = unmap_q->map_size;
406		page_offset += unmap_q->map_size;
407
408		if (page_offset < alloc_size)
409			unmap_q->reuse_pi = prod;
410		else
411			unmap_q->reuse_pi = -1;
412
413		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
414		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
415		BNA_QE_INDX_INC(prod, q_depth);
416		alloced++;
417	}
418
419finishing:
420	if (likely(alloced)) {
421		rcb->producer_index = prod;
422		smp_mb();
423		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
424			bna_rxq_prod_indx_doorbell(rcb);
425	}
426
427	return alloced;
428}
429
430static u32
431bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
432{
433	u32 alloced, prod, q_depth, buff_sz;
434	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
435	struct bnad_rx_unmap *unmap;
436	struct bna_rxq_entry *rxent;
437	struct sk_buff *skb;
438	dma_addr_t dma_addr;
439
440	buff_sz = rcb->rxq->buffer_size;
441	prod = rcb->producer_index;
442	q_depth = rcb->q_depth;
443
444	alloced = 0;
445	while (nalloc--) {
446		unmap = &unmap_q->unmap[prod];
447
448		skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
449
450		if (unlikely(!skb)) {
451			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
452			rcb->rxq->rxbuf_alloc_failed++;
453			goto finishing;
454		}
455		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
456					  buff_sz, DMA_FROM_DEVICE);
457
458		unmap->skb = skb;
459		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
460		unmap->vector.len = buff_sz;
461
462		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
463		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
464		BNA_QE_INDX_INC(prod, q_depth);
465		alloced++;
466	}
467
468finishing:
469	if (likely(alloced)) {
470		rcb->producer_index = prod;
471		smp_mb();
472		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
473			bna_rxq_prod_indx_doorbell(rcb);
474	}
475
476	return alloced;
477}
478
479static inline void
480bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
481{
482	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
483	u32 to_alloc;
484
485	to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
486	if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
487		return;
488
489	if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
490		bnad_rxq_refill_skb(bnad, rcb, to_alloc);
491	else
492		bnad_rxq_refill_page(bnad, rcb, to_alloc);
493}
494
495#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
496					BNA_CQ_EF_IPV6 | \
497					BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
498					BNA_CQ_EF_L4_CKSUM_OK)
499
500#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
501				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
502#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
503				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
504#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
505				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
506#define flags_udp6 (BNA_CQ_EF_IPV6 | \
507				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
508
509static void
510bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
511		    u32 sop_ci, u32 nvecs)
512{
513	struct bnad_rx_unmap_q *unmap_q;
514	struct bnad_rx_unmap *unmap;
515	u32 ci, vec;
516
517	unmap_q = rcb->unmap_q;
518	for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
519		unmap = &unmap_q->unmap[ci];
520		BNA_QE_INDX_INC(ci, rcb->q_depth);
521
522		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
523			bnad_rxq_cleanup_skb(bnad, unmap);
524		else
525			bnad_rxq_cleanup_page(bnad, unmap);
526	}
527}
528
529static void
530bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
531			u32 sop_ci, u32 nvecs, u32 last_fraglen)
532{
533	struct bnad *bnad;
534	u32 ci, vec, len, totlen = 0;
535	struct bnad_rx_unmap_q *unmap_q;
536	struct bnad_rx_unmap *unmap;
537
538	unmap_q = rcb->unmap_q;
539	bnad = rcb->bnad;
540
541	/* prefetch header */
542	prefetch(page_address(unmap_q->unmap[sop_ci].page) +
543			unmap_q->unmap[sop_ci].page_offset);
544
545	for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
546		unmap = &unmap_q->unmap[ci];
547		BNA_QE_INDX_INC(ci, rcb->q_depth);
548
549		dma_unmap_page(&bnad->pcidev->dev,
550				dma_unmap_addr(&unmap->vector, dma_addr),
551				unmap->vector.len, DMA_FROM_DEVICE);
552
553		len = (vec == nvecs) ?
554			last_fraglen : unmap->vector.len;
555		skb->truesize += unmap->vector.len;
556		totlen += len;
557
558		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
559				unmap->page, unmap->page_offset, len);
560
561		unmap->page = NULL;
562		unmap->vector.len = 0;
563	}
564
565	skb->len += totlen;
566	skb->data_len += totlen;
567}
568
569static inline void
570bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571		  struct bnad_rx_unmap *unmap, u32 len)
572{
573	prefetch(skb->data);
574
575	dma_unmap_single(&bnad->pcidev->dev,
576			dma_unmap_addr(&unmap->vector, dma_addr),
577			unmap->vector.len, DMA_FROM_DEVICE);
578
579	skb_put(skb, len);
580	skb->protocol = eth_type_trans(skb, bnad->netdev);
581
582	unmap->skb = NULL;
583	unmap->vector.len = 0;
584}
585
586static u32
587bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
588{
589	struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590	struct bna_rcb *rcb = NULL;
591	struct bnad_rx_unmap_q *unmap_q;
592	struct bnad_rx_unmap *unmap = NULL;
593	struct sk_buff *skb = NULL;
594	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595	struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596	u32 packets = 0, len = 0, totlen = 0;
597	u32 pi, vec, sop_ci = 0, nvecs = 0;
598	u32 flags, masked_flags;
599
600	prefetch(bnad->netdev);
601
602	cq = ccb->sw_q;
603
604	while (packets < budget) {
605		cmpl = &cq[ccb->producer_index];
606		if (!cmpl->valid)
607			break;
608		/* The 'valid' field is set by the adapter, only after writing
609		 * the other fields of completion entry. Hence, do not load
610		 * other fields of completion entry *before* the 'valid' is
611		 * loaded. Adding the rmb() here prevents the compiler and/or
612		 * CPU from reordering the reads which would potentially result
613		 * in reading stale values in completion entry.
614		 */
615		rmb();
616
617		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
618
619		if (bna_is_small_rxq(cmpl->rxq_id))
620			rcb = ccb->rcb[1];
621		else
622			rcb = ccb->rcb[0];
623
624		unmap_q = rcb->unmap_q;
625
626		/* start of packet ci */
627		sop_ci = rcb->consumer_index;
628
629		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630			unmap = &unmap_q->unmap[sop_ci];
631			skb = unmap->skb;
632		} else {
633			skb = napi_get_frags(&rx_ctrl->napi);
634			if (unlikely(!skb))
635				break;
636		}
637		prefetch(skb);
638
639		flags = ntohl(cmpl->flags);
640		len = ntohs(cmpl->length);
641		totlen = len;
642		nvecs = 1;
643
644		/* Check all the completions for this frame.
645		 * busy-wait doesn't help much, break here.
646		 */
647		if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648		    (flags & BNA_CQ_EF_EOP) == 0) {
649			pi = ccb->producer_index;
650			do {
651				BNA_QE_INDX_INC(pi, ccb->q_depth);
652				next_cmpl = &cq[pi];
653
654				if (!next_cmpl->valid)
655					break;
656				/* The 'valid' field is set by the adapter, only
657				 * after writing the other fields of completion
658				 * entry. Hence, do not load other fields of
659				 * completion entry *before* the 'valid' is
660				 * loaded. Adding the rmb() here prevents the
661				 * compiler and/or CPU from reordering the reads
662				 * which would potentially result in reading
663				 * stale values in completion entry.
664				 */
665				rmb();
666
667				len = ntohs(next_cmpl->length);
668				flags = ntohl(next_cmpl->flags);
669
670				nvecs++;
671				totlen += len;
672			} while ((flags & BNA_CQ_EF_EOP) == 0);
673
674			if (!next_cmpl->valid)
675				break;
676		}
677
678		/* TODO: BNA_CQ_EF_LOCAL ? */
679		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
680						BNA_CQ_EF_FCS_ERROR |
681						BNA_CQ_EF_TOO_LONG))) {
682			bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
683			rcb->rxq->rx_packets_with_error++;
684
685			goto next;
686		}
687
688		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
689			bnad_cq_setup_skb(bnad, skb, unmap, len);
690		else
691			bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
692
693		packets++;
694		rcb->rxq->rx_packets++;
695		rcb->rxq->rx_bytes += totlen;
696		ccb->bytes_per_intr += totlen;
697
698		masked_flags = flags & flags_cksum_prot_mask;
699
700		if (likely
701		    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702		     ((masked_flags == flags_tcp4) ||
703		      (masked_flags == flags_udp4) ||
704		      (masked_flags == flags_tcp6) ||
705		      (masked_flags == flags_udp6))))
706			skb->ip_summed = CHECKSUM_UNNECESSARY;
707		else
708			skb_checksum_none_assert(skb);
709
710		if ((flags & BNA_CQ_EF_VLAN) &&
711		    (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
712			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
713
714		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
715			netif_receive_skb(skb);
716		else
717			napi_gro_frags(&rx_ctrl->napi);
718
719next:
720		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
721		for (vec = 0; vec < nvecs; vec++) {
722			cmpl = &cq[ccb->producer_index];
723			cmpl->valid = 0;
724			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
725		}
726		cmpl = &cq[ccb->producer_index];
727	}
728
729	napi_gro_flush(&rx_ctrl->napi, false);
730	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
731		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
732
733	bnad_rxq_post(bnad, ccb->rcb[0]);
734	if (ccb->rcb[1])
735		bnad_rxq_post(bnad, ccb->rcb[1]);
736
737	return packets;
738}
739
740static void
741bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
742{
743	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
744	struct napi_struct *napi = &rx_ctrl->napi;
745
746	if (likely(napi_schedule_prep(napi))) {
747		__napi_schedule(napi);
748		rx_ctrl->rx_schedule++;
749	}
750}
751
752/* MSIX Rx Path Handler */
753static irqreturn_t
754bnad_msix_rx(int irq, void *data)
755{
756	struct bna_ccb *ccb = (struct bna_ccb *)data;
757
758	if (ccb) {
759		((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
760		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
761	}
762
763	return IRQ_HANDLED;
764}
765
766/* Interrupt handlers */
767
768/* Mbox Interrupt Handlers */
769static irqreturn_t
770bnad_msix_mbox_handler(int irq, void *data)
771{
772	u32 intr_status;
773	unsigned long flags;
774	struct bnad *bnad = (struct bnad *)data;
775
776	spin_lock_irqsave(&bnad->bna_lock, flags);
777	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
778		spin_unlock_irqrestore(&bnad->bna_lock, flags);
779		return IRQ_HANDLED;
780	}
781
782	bna_intr_status_get(&bnad->bna, intr_status);
783
784	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
785		bna_mbox_handler(&bnad->bna, intr_status);
786
787	spin_unlock_irqrestore(&bnad->bna_lock, flags);
788
789	return IRQ_HANDLED;
790}
791
792static irqreturn_t
793bnad_isr(int irq, void *data)
794{
795	int i, j;
796	u32 intr_status;
797	unsigned long flags;
798	struct bnad *bnad = (struct bnad *)data;
799	struct bnad_rx_info *rx_info;
800	struct bnad_rx_ctrl *rx_ctrl;
801	struct bna_tcb *tcb = NULL;
802
803	spin_lock_irqsave(&bnad->bna_lock, flags);
804	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
805		spin_unlock_irqrestore(&bnad->bna_lock, flags);
806		return IRQ_NONE;
807	}
808
809	bna_intr_status_get(&bnad->bna, intr_status);
810
811	if (unlikely(!intr_status)) {
812		spin_unlock_irqrestore(&bnad->bna_lock, flags);
813		return IRQ_NONE;
814	}
815
816	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
817		bna_mbox_handler(&bnad->bna, intr_status);
818
819	spin_unlock_irqrestore(&bnad->bna_lock, flags);
820
821	if (!BNA_IS_INTX_DATA_INTR(intr_status))
822		return IRQ_HANDLED;
823
824	/* Process data interrupts */
825	/* Tx processing */
826	for (i = 0; i < bnad->num_tx; i++) {
827		for (j = 0; j < bnad->num_txq_per_tx; j++) {
828			tcb = bnad->tx_info[i].tcb[j];
829			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
830				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
831		}
832	}
833	/* Rx processing */
834	for (i = 0; i < bnad->num_rx; i++) {
835		rx_info = &bnad->rx_info[i];
836		if (!rx_info->rx)
837			continue;
838		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
839			rx_ctrl = &rx_info->rx_ctrl[j];
840			if (rx_ctrl->ccb)
841				bnad_netif_rx_schedule_poll(bnad,
842							    rx_ctrl->ccb);
843		}
844	}
845	return IRQ_HANDLED;
846}
847
848/*
849 * Called in interrupt / callback context
850 * with bna_lock held, so cfg_flags access is OK
851 */
852static void
853bnad_enable_mbox_irq(struct bnad *bnad)
854{
855	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
856
857	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
858}
859
860/*
861 * Called with bnad->bna_lock held b'cos of
862 * bnad->cfg_flags access.
863 */
864static void
865bnad_disable_mbox_irq(struct bnad *bnad)
866{
867	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
868
869	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
870}
871
872static void
873bnad_set_netdev_perm_addr(struct bnad *bnad)
874{
875	struct net_device *netdev = bnad->netdev;
876
877	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
878	if (is_zero_ether_addr(netdev->dev_addr))
879		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
880}
881
882/* Control Path Handlers */
883
884/* Callbacks */
885void
886bnad_cb_mbox_intr_enable(struct bnad *bnad)
887{
888	bnad_enable_mbox_irq(bnad);
889}
890
891void
892bnad_cb_mbox_intr_disable(struct bnad *bnad)
893{
894	bnad_disable_mbox_irq(bnad);
895}
896
897void
898bnad_cb_ioceth_ready(struct bnad *bnad)
899{
900	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
901	complete(&bnad->bnad_completions.ioc_comp);
902}
903
904void
905bnad_cb_ioceth_failed(struct bnad *bnad)
906{
907	bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
908	complete(&bnad->bnad_completions.ioc_comp);
909}
910
911void
912bnad_cb_ioceth_disabled(struct bnad *bnad)
913{
914	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
915	complete(&bnad->bnad_completions.ioc_comp);
916}
917
918static void
919bnad_cb_enet_disabled(void *arg)
920{
921	struct bnad *bnad = (struct bnad *)arg;
922
923	netif_carrier_off(bnad->netdev);
924	complete(&bnad->bnad_completions.enet_comp);
925}
926
927void
928bnad_cb_ethport_link_status(struct bnad *bnad,
929			enum bna_link_status link_status)
930{
931	bool link_up = false;
932
933	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
934
935	if (link_status == BNA_CEE_UP) {
936		if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
937			BNAD_UPDATE_CTR(bnad, cee_toggle);
938		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
939	} else {
940		if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
941			BNAD_UPDATE_CTR(bnad, cee_toggle);
942		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
943	}
944
945	if (link_up) {
946		if (!netif_carrier_ok(bnad->netdev)) {
947			uint tx_id, tcb_id;
948			printk(KERN_WARNING "bna: %s link up\n",
949				bnad->netdev->name);
950			netif_carrier_on(bnad->netdev);
951			BNAD_UPDATE_CTR(bnad, link_toggle);
952			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
953				for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
954				      tcb_id++) {
955					struct bna_tcb *tcb =
956					bnad->tx_info[tx_id].tcb[tcb_id];
957					u32 txq_id;
958					if (!tcb)
959						continue;
960
961					txq_id = tcb->id;
962
963					if (test_bit(BNAD_TXQ_TX_STARTED,
964						     &tcb->flags)) {
965						/*
966						 * Force an immediate
967						 * Transmit Schedule */
968						printk(KERN_INFO "bna: %s %d "
969						      "TXQ_STARTED\n",
970						       bnad->netdev->name,
971						       txq_id);
972						netif_wake_subqueue(
973								bnad->netdev,
974								txq_id);
975						BNAD_UPDATE_CTR(bnad,
976							netif_queue_wakeup);
977					} else {
978						netif_stop_subqueue(
979								bnad->netdev,
980								txq_id);
981						BNAD_UPDATE_CTR(bnad,
982							netif_queue_stop);
983					}
984				}
985			}
986		}
987	} else {
988		if (netif_carrier_ok(bnad->netdev)) {
989			printk(KERN_WARNING "bna: %s link down\n",
990				bnad->netdev->name);
991			netif_carrier_off(bnad->netdev);
992			BNAD_UPDATE_CTR(bnad, link_toggle);
993		}
994	}
995}
996
997static void
998bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
999{
1000	struct bnad *bnad = (struct bnad *)arg;
1001
1002	complete(&bnad->bnad_completions.tx_comp);
1003}
1004
1005static void
1006bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1007{
1008	struct bnad_tx_info *tx_info =
1009			(struct bnad_tx_info *)tcb->txq->tx->priv;
1010
1011	tcb->priv = tcb;
1012	tx_info->tcb[tcb->id] = tcb;
1013}
1014
1015static void
1016bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1017{
1018	struct bnad_tx_info *tx_info =
1019			(struct bnad_tx_info *)tcb->txq->tx->priv;
1020
1021	tx_info->tcb[tcb->id] = NULL;
1022	tcb->priv = NULL;
1023}
1024
1025static void
1026bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1027{
1028	struct bnad_rx_info *rx_info =
1029			(struct bnad_rx_info *)ccb->cq->rx->priv;
1030
1031	rx_info->rx_ctrl[ccb->id].ccb = ccb;
1032	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1033}
1034
1035static void
1036bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1037{
1038	struct bnad_rx_info *rx_info =
1039			(struct bnad_rx_info *)ccb->cq->rx->priv;
1040
1041	rx_info->rx_ctrl[ccb->id].ccb = NULL;
1042}
1043
1044static void
1045bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1046{
1047	struct bnad_tx_info *tx_info =
1048			(struct bnad_tx_info *)tx->priv;
1049	struct bna_tcb *tcb;
1050	u32 txq_id;
1051	int i;
1052
1053	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1054		tcb = tx_info->tcb[i];
1055		if (!tcb)
1056			continue;
1057		txq_id = tcb->id;
1058		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1059		netif_stop_subqueue(bnad->netdev, txq_id);
1060		printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
1061			bnad->netdev->name, txq_id);
1062	}
1063}
1064
1065static void
1066bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1067{
1068	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1069	struct bna_tcb *tcb;
1070	u32 txq_id;
1071	int i;
1072
1073	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1074		tcb = tx_info->tcb[i];
1075		if (!tcb)
1076			continue;
1077		txq_id = tcb->id;
1078
1079		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1080		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1081		BUG_ON(*(tcb->hw_consumer_index) != 0);
1082
1083		if (netif_carrier_ok(bnad->netdev)) {
1084			printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
1085				bnad->netdev->name, txq_id);
1086			netif_wake_subqueue(bnad->netdev, txq_id);
1087			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1088		}
1089	}
1090
1091	/*
1092	 * Workaround for first ioceth enable failure & we
1093	 * get a 0 MAC address. We try to get the MAC address
1094	 * again here.
1095	 */
1096	if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
1097		bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
1098		bnad_set_netdev_perm_addr(bnad);
1099	}
1100}
1101
1102/*
1103 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1104 */
1105static void
1106bnad_tx_cleanup(struct delayed_work *work)
1107{
1108	struct bnad_tx_info *tx_info =
1109		container_of(work, struct bnad_tx_info, tx_cleanup_work);
1110	struct bnad *bnad = NULL;
1111	struct bna_tcb *tcb;
1112	unsigned long flags;
1113	u32 i, pending = 0;
1114
1115	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1116		tcb = tx_info->tcb[i];
1117		if (!tcb)
1118			continue;
1119
1120		bnad = tcb->bnad;
1121
1122		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1123			pending++;
1124			continue;
1125		}
1126
1127		bnad_txq_cleanup(bnad, tcb);
1128
1129		smp_mb__before_atomic();
1130		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1131	}
1132
1133	if (pending) {
1134		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1135			msecs_to_jiffies(1));
1136		return;
1137	}
1138
1139	spin_lock_irqsave(&bnad->bna_lock, flags);
1140	bna_tx_cleanup_complete(tx_info->tx);
1141	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1142}
1143
1144static void
1145bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1146{
1147	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1148	struct bna_tcb *tcb;
1149	int i;
1150
1151	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1152		tcb = tx_info->tcb[i];
1153		if (!tcb)
1154			continue;
1155	}
1156
1157	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1158}
1159
1160static void
1161bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1162{
1163	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1164	struct bna_ccb *ccb;
1165	struct bnad_rx_ctrl *rx_ctrl;
1166	int i;
1167
1168	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1169		rx_ctrl = &rx_info->rx_ctrl[i];
1170		ccb = rx_ctrl->ccb;
1171		if (!ccb)
1172			continue;
1173
1174		clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1175
1176		if (ccb->rcb[1])
1177			clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1178	}
1179}
1180
1181/*
1182 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1183 */
1184static void
1185bnad_rx_cleanup(void *work)
1186{
1187	struct bnad_rx_info *rx_info =
1188		container_of(work, struct bnad_rx_info, rx_cleanup_work);
1189	struct bnad_rx_ctrl *rx_ctrl;
1190	struct bnad *bnad = NULL;
1191	unsigned long flags;
1192	u32 i;
1193
1194	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1195		rx_ctrl = &rx_info->rx_ctrl[i];
1196
1197		if (!rx_ctrl->ccb)
1198			continue;
1199
1200		bnad = rx_ctrl->ccb->bnad;
1201
1202		/*
1203		 * Wait till the poll handler has exited
1204		 * and nothing can be scheduled anymore
1205		 */
1206		napi_disable(&rx_ctrl->napi);
1207
1208		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1209		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1210		if (rx_ctrl->ccb->rcb[1])
1211			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1212	}
1213
1214	spin_lock_irqsave(&bnad->bna_lock, flags);
1215	bna_rx_cleanup_complete(rx_info->rx);
1216	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1217}
1218
1219static void
1220bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1221{
1222	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1223	struct bna_ccb *ccb;
1224	struct bnad_rx_ctrl *rx_ctrl;
1225	int i;
1226
1227	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1228		rx_ctrl = &rx_info->rx_ctrl[i];
1229		ccb = rx_ctrl->ccb;
1230		if (!ccb)
1231			continue;
1232
1233		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1234
1235		if (ccb->rcb[1])
1236			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1237	}
1238
1239	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1240}
1241
1242static void
1243bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1244{
1245	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1246	struct bna_ccb *ccb;
1247	struct bna_rcb *rcb;
1248	struct bnad_rx_ctrl *rx_ctrl;
1249	int i, j;
1250
1251	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1252		rx_ctrl = &rx_info->rx_ctrl[i];
1253		ccb = rx_ctrl->ccb;
1254		if (!ccb)
1255			continue;
1256
1257		napi_enable(&rx_ctrl->napi);
1258
1259		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1260			rcb = ccb->rcb[j];
1261			if (!rcb)
1262				continue;
1263
1264			bnad_rxq_alloc_init(bnad, rcb);
1265			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1266			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1267			bnad_rxq_post(bnad, rcb);
1268		}
1269	}
1270}
1271
1272static void
1273bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1274{
1275	struct bnad *bnad = (struct bnad *)arg;
1276
1277	complete(&bnad->bnad_completions.rx_comp);
1278}
1279
1280static void
1281bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1282{
1283	bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1284	complete(&bnad->bnad_completions.mcast_comp);
1285}
1286
1287void
1288bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1289		       struct bna_stats *stats)
1290{
1291	if (status == BNA_CB_SUCCESS)
1292		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1293
1294	if (!netif_running(bnad->netdev) ||
1295		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1296		return;
1297
1298	mod_timer(&bnad->stats_timer,
1299		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1300}
1301
1302static void
1303bnad_cb_enet_mtu_set(struct bnad *bnad)
1304{
1305	bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1306	complete(&bnad->bnad_completions.mtu_comp);
1307}
1308
1309void
1310bnad_cb_completion(void *arg, enum bfa_status status)
1311{
1312	struct bnad_iocmd_comp *iocmd_comp =
1313			(struct bnad_iocmd_comp *)arg;
1314
1315	iocmd_comp->comp_status = (u32) status;
1316	complete(&iocmd_comp->comp);
1317}
1318
1319/* Resource allocation, free functions */
1320
1321static void
1322bnad_mem_free(struct bnad *bnad,
1323	      struct bna_mem_info *mem_info)
1324{
1325	int i;
1326	dma_addr_t dma_pa;
1327
1328	if (mem_info->mdl == NULL)
1329		return;
1330
1331	for (i = 0; i < mem_info->num; i++) {
1332		if (mem_info->mdl[i].kva != NULL) {
1333			if (mem_info->mem_type == BNA_MEM_T_DMA) {
1334				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1335						dma_pa);
1336				dma_free_coherent(&bnad->pcidev->dev,
1337						  mem_info->mdl[i].len,
1338						  mem_info->mdl[i].kva, dma_pa);
1339			} else
1340				kfree(mem_info->mdl[i].kva);
1341		}
1342	}
1343	kfree(mem_info->mdl);
1344	mem_info->mdl = NULL;
1345}
1346
1347static int
1348bnad_mem_alloc(struct bnad *bnad,
1349	       struct bna_mem_info *mem_info)
1350{
1351	int i;
1352	dma_addr_t dma_pa;
1353
1354	if ((mem_info->num == 0) || (mem_info->len == 0)) {
1355		mem_info->mdl = NULL;
1356		return 0;
1357	}
1358
1359	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1360				GFP_KERNEL);
1361	if (mem_info->mdl == NULL)
1362		return -ENOMEM;
1363
1364	if (mem_info->mem_type == BNA_MEM_T_DMA) {
1365		for (i = 0; i < mem_info->num; i++) {
1366			mem_info->mdl[i].len = mem_info->len;
1367			mem_info->mdl[i].kva =
1368				dma_alloc_coherent(&bnad->pcidev->dev,
1369						   mem_info->len, &dma_pa,
1370						   GFP_KERNEL);
1371			if (mem_info->mdl[i].kva == NULL)
1372				goto err_return;
1373
1374			BNA_SET_DMA_ADDR(dma_pa,
1375					 &(mem_info->mdl[i].dma));
1376		}
1377	} else {
1378		for (i = 0; i < mem_info->num; i++) {
1379			mem_info->mdl[i].len = mem_info->len;
1380			mem_info->mdl[i].kva = kzalloc(mem_info->len,
1381							GFP_KERNEL);
1382			if (mem_info->mdl[i].kva == NULL)
1383				goto err_return;
1384		}
1385	}
1386
1387	return 0;
1388
1389err_return:
1390	bnad_mem_free(bnad, mem_info);
1391	return -ENOMEM;
1392}
1393
1394/* Free IRQ for Mailbox */
1395static void
1396bnad_mbox_irq_free(struct bnad *bnad)
1397{
1398	int irq;
1399	unsigned long flags;
1400
1401	spin_lock_irqsave(&bnad->bna_lock, flags);
1402	bnad_disable_mbox_irq(bnad);
1403	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1404
1405	irq = BNAD_GET_MBOX_IRQ(bnad);
1406	free_irq(irq, bnad);
1407}
1408
1409/*
1410 * Allocates IRQ for Mailbox, but keep it disabled
1411 * This will be enabled once we get the mbox enable callback
1412 * from bna
1413 */
1414static int
1415bnad_mbox_irq_alloc(struct bnad *bnad)
1416{
1417	int		err = 0;
1418	unsigned long	irq_flags, flags;
1419	u32	irq;
1420	irq_handler_t	irq_handler;
1421
1422	spin_lock_irqsave(&bnad->bna_lock, flags);
1423	if (bnad->cfg_flags & BNAD_CF_MSIX) {
1424		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1425		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1426		irq_flags = 0;
1427	} else {
1428		irq_handler = (irq_handler_t)bnad_isr;
1429		irq = bnad->pcidev->irq;
1430		irq_flags = IRQF_SHARED;
1431	}
1432
1433	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1434	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1435
1436	/*
1437	 * Set the Mbox IRQ disable flag, so that the IRQ handler
1438	 * called from request_irq() for SHARED IRQs do not execute
1439	 */
1440	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1441
1442	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1443
1444	err = request_irq(irq, irq_handler, irq_flags,
1445			  bnad->mbox_irq_name, bnad);
1446
1447	return err;
1448}
1449
1450static void
1451bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1452{
1453	kfree(intr_info->idl);
1454	intr_info->idl = NULL;
1455}
1456
1457/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1458static int
1459bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1460		    u32 txrx_id, struct bna_intr_info *intr_info)
1461{
1462	int i, vector_start = 0;
1463	u32 cfg_flags;
1464	unsigned long flags;
1465
1466	spin_lock_irqsave(&bnad->bna_lock, flags);
1467	cfg_flags = bnad->cfg_flags;
1468	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1469
1470	if (cfg_flags & BNAD_CF_MSIX) {
1471		intr_info->intr_type = BNA_INTR_T_MSIX;
1472		intr_info->idl = kcalloc(intr_info->num,
1473					sizeof(struct bna_intr_descr),
1474					GFP_KERNEL);
1475		if (!intr_info->idl)
1476			return -ENOMEM;
1477
1478		switch (src) {
1479		case BNAD_INTR_TX:
1480			vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1481			break;
1482
1483		case BNAD_INTR_RX:
1484			vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1485					(bnad->num_tx * bnad->num_txq_per_tx) +
1486					txrx_id;
1487			break;
1488
1489		default:
1490			BUG();
1491		}
1492
1493		for (i = 0; i < intr_info->num; i++)
1494			intr_info->idl[i].vector = vector_start + i;
1495	} else {
1496		intr_info->intr_type = BNA_INTR_T_INTX;
1497		intr_info->num = 1;
1498		intr_info->idl = kcalloc(intr_info->num,
1499					sizeof(struct bna_intr_descr),
1500					GFP_KERNEL);
1501		if (!intr_info->idl)
1502			return -ENOMEM;
1503
1504		switch (src) {
1505		case BNAD_INTR_TX:
1506			intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1507			break;
1508
1509		case BNAD_INTR_RX:
1510			intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1511			break;
1512		}
1513	}
1514	return 0;
1515}
1516
1517/* NOTE: Should be called for MSIX only
1518 * Unregisters Tx MSIX vector(s) from the kernel
1519 */
1520static void
1521bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1522			int num_txqs)
1523{
1524	int i;
1525	int vector_num;
1526
1527	for (i = 0; i < num_txqs; i++) {
1528		if (tx_info->tcb[i] == NULL)
1529			continue;
1530
1531		vector_num = tx_info->tcb[i]->intr_vector;
1532		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1533	}
1534}
1535
1536/* NOTE: Should be called for MSIX only
1537 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1538 */
1539static int
1540bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1541			u32 tx_id, int num_txqs)
1542{
1543	int i;
1544	int err;
1545	int vector_num;
1546
1547	for (i = 0; i < num_txqs; i++) {
1548		vector_num = tx_info->tcb[i]->intr_vector;
1549		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1550				tx_id + tx_info->tcb[i]->id);
1551		err = request_irq(bnad->msix_table[vector_num].vector,
1552				  (irq_handler_t)bnad_msix_tx, 0,
1553				  tx_info->tcb[i]->name,
1554				  tx_info->tcb[i]);
1555		if (err)
1556			goto err_return;
1557	}
1558
1559	return 0;
1560
1561err_return:
1562	if (i > 0)
1563		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1564	return -1;
1565}
1566
1567/* NOTE: Should be called for MSIX only
1568 * Unregisters Rx MSIX vector(s) from the kernel
1569 */
1570static void
1571bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1572			int num_rxps)
1573{
1574	int i;
1575	int vector_num;
1576
1577	for (i = 0; i < num_rxps; i++) {
1578		if (rx_info->rx_ctrl[i].ccb == NULL)
1579			continue;
1580
1581		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1582		free_irq(bnad->msix_table[vector_num].vector,
1583			 rx_info->rx_ctrl[i].ccb);
1584	}
1585}
1586
1587/* NOTE: Should be called for MSIX only
1588 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1589 */
1590static int
1591bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1592			u32 rx_id, int num_rxps)
1593{
1594	int i;
1595	int err;
1596	int vector_num;
1597
1598	for (i = 0; i < num_rxps; i++) {
1599		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1600		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1601			bnad->netdev->name,
1602			rx_id + rx_info->rx_ctrl[i].ccb->id);
1603		err = request_irq(bnad->msix_table[vector_num].vector,
1604				  (irq_handler_t)bnad_msix_rx, 0,
1605				  rx_info->rx_ctrl[i].ccb->name,
1606				  rx_info->rx_ctrl[i].ccb);
1607		if (err)
1608			goto err_return;
1609	}
1610
1611	return 0;
1612
1613err_return:
1614	if (i > 0)
1615		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1616	return -1;
1617}
1618
1619/* Free Tx object Resources */
1620static void
1621bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1622{
1623	int i;
1624
1625	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1626		if (res_info[i].res_type == BNA_RES_T_MEM)
1627			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1628		else if (res_info[i].res_type == BNA_RES_T_INTR)
1629			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1630	}
1631}
1632
1633/* Allocates memory and interrupt resources for Tx object */
1634static int
1635bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1636		  u32 tx_id)
1637{
1638	int i, err = 0;
1639
1640	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1641		if (res_info[i].res_type == BNA_RES_T_MEM)
1642			err = bnad_mem_alloc(bnad,
1643					&res_info[i].res_u.mem_info);
1644		else if (res_info[i].res_type == BNA_RES_T_INTR)
1645			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1646					&res_info[i].res_u.intr_info);
1647		if (err)
1648			goto err_return;
1649	}
1650	return 0;
1651
1652err_return:
1653	bnad_tx_res_free(bnad, res_info);
1654	return err;
1655}
1656
1657/* Free Rx object Resources */
1658static void
1659bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1660{
1661	int i;
1662
1663	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1664		if (res_info[i].res_type == BNA_RES_T_MEM)
1665			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1666		else if (res_info[i].res_type == BNA_RES_T_INTR)
1667			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1668	}
1669}
1670
1671/* Allocates memory and interrupt resources for Rx object */
1672static int
1673bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1674		  uint rx_id)
1675{
1676	int i, err = 0;
1677
1678	/* All memory needs to be allocated before setup_ccbs */
1679	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1680		if (res_info[i].res_type == BNA_RES_T_MEM)
1681			err = bnad_mem_alloc(bnad,
1682					&res_info[i].res_u.mem_info);
1683		else if (res_info[i].res_type == BNA_RES_T_INTR)
1684			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1685					&res_info[i].res_u.intr_info);
1686		if (err)
1687			goto err_return;
1688	}
1689	return 0;
1690
1691err_return:
1692	bnad_rx_res_free(bnad, res_info);
1693	return err;
1694}
1695
1696/* Timer callbacks */
1697/* a) IOC timer */
1698static void
1699bnad_ioc_timeout(unsigned long data)
1700{
1701	struct bnad *bnad = (struct bnad *)data;
1702	unsigned long flags;
1703
1704	spin_lock_irqsave(&bnad->bna_lock, flags);
1705	bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1706	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1707}
1708
1709static void
1710bnad_ioc_hb_check(unsigned long data)
1711{
1712	struct bnad *bnad = (struct bnad *)data;
1713	unsigned long flags;
1714
1715	spin_lock_irqsave(&bnad->bna_lock, flags);
1716	bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1717	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1718}
1719
1720static void
1721bnad_iocpf_timeout(unsigned long data)
1722{
1723	struct bnad *bnad = (struct bnad *)data;
1724	unsigned long flags;
1725
1726	spin_lock_irqsave(&bnad->bna_lock, flags);
1727	bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1728	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729}
1730
1731static void
1732bnad_iocpf_sem_timeout(unsigned long data)
1733{
1734	struct bnad *bnad = (struct bnad *)data;
1735	unsigned long flags;
1736
1737	spin_lock_irqsave(&bnad->bna_lock, flags);
1738	bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1739	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1740}
1741
1742/*
1743 * All timer routines use bnad->bna_lock to protect against
1744 * the following race, which may occur in case of no locking:
1745 *	Time	CPU m	CPU n
1746 *	0       1 = test_bit
1747 *	1			clear_bit
1748 *	2			del_timer_sync
1749 *	3	mod_timer
1750 */
1751
1752/* b) Dynamic Interrupt Moderation Timer */
1753static void
1754bnad_dim_timeout(unsigned long data)
1755{
1756	struct bnad *bnad = (struct bnad *)data;
1757	struct bnad_rx_info *rx_info;
1758	struct bnad_rx_ctrl *rx_ctrl;
1759	int i, j;
1760	unsigned long flags;
1761
1762	if (!netif_carrier_ok(bnad->netdev))
1763		return;
1764
1765	spin_lock_irqsave(&bnad->bna_lock, flags);
1766	for (i = 0; i < bnad->num_rx; i++) {
1767		rx_info = &bnad->rx_info[i];
1768		if (!rx_info->rx)
1769			continue;
1770		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1771			rx_ctrl = &rx_info->rx_ctrl[j];
1772			if (!rx_ctrl->ccb)
1773				continue;
1774			bna_rx_dim_update(rx_ctrl->ccb);
1775		}
1776	}
1777
1778	/* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1779	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1780		mod_timer(&bnad->dim_timer,
1781			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1782	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1783}
1784
1785/* c)  Statistics Timer */
1786static void
1787bnad_stats_timeout(unsigned long data)
1788{
1789	struct bnad *bnad = (struct bnad *)data;
1790	unsigned long flags;
1791
1792	if (!netif_running(bnad->netdev) ||
1793		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1794		return;
1795
1796	spin_lock_irqsave(&bnad->bna_lock, flags);
1797	bna_hw_stats_get(&bnad->bna);
1798	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1799}
1800
1801/*
1802 * Set up timer for DIM
1803 * Called with bnad->bna_lock held
1804 */
1805void
1806bnad_dim_timer_start(struct bnad *bnad)
1807{
1808	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1809	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1810		setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1811			    (unsigned long)bnad);
1812		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1813		mod_timer(&bnad->dim_timer,
1814			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1815	}
1816}
1817
1818/*
1819 * Set up timer for statistics
1820 * Called with mutex_lock(&bnad->conf_mutex) held
1821 */
1822static void
1823bnad_stats_timer_start(struct bnad *bnad)
1824{
1825	unsigned long flags;
1826
1827	spin_lock_irqsave(&bnad->bna_lock, flags);
1828	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1829		setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1830			    (unsigned long)bnad);
1831		mod_timer(&bnad->stats_timer,
1832			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1833	}
1834	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1835}
1836
1837/*
1838 * Stops the stats timer
1839 * Called with mutex_lock(&bnad->conf_mutex) held
1840 */
1841static void
1842bnad_stats_timer_stop(struct bnad *bnad)
1843{
1844	int to_del = 0;
1845	unsigned long flags;
1846
1847	spin_lock_irqsave(&bnad->bna_lock, flags);
1848	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1849		to_del = 1;
1850	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1851	if (to_del)
1852		del_timer_sync(&bnad->stats_timer);
1853}
1854
1855/* Utilities */
1856
1857static void
1858bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1859{
1860	int i = 1; /* Index 0 has broadcast address */
1861	struct netdev_hw_addr *mc_addr;
1862
1863	netdev_for_each_mc_addr(mc_addr, netdev) {
1864		memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1865							ETH_ALEN);
1866		i++;
1867	}
1868}
1869
1870static int
1871bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1872{
1873	struct bnad_rx_ctrl *rx_ctrl =
1874		container_of(napi, struct bnad_rx_ctrl, napi);
1875	struct bnad *bnad = rx_ctrl->bnad;
1876	int rcvd = 0;
1877
1878	rx_ctrl->rx_poll_ctr++;
1879
1880	if (!netif_carrier_ok(bnad->netdev))
1881		goto poll_exit;
1882
1883	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1884	if (rcvd >= budget)
1885		return rcvd;
1886
1887poll_exit:
1888	napi_complete(napi);
1889
1890	rx_ctrl->rx_complete++;
1891
1892	if (rx_ctrl->ccb)
1893		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1894
1895	return rcvd;
1896}
1897
1898#define BNAD_NAPI_POLL_QUOTA		64
1899static void
1900bnad_napi_add(struct bnad *bnad, u32 rx_id)
1901{
1902	struct bnad_rx_ctrl *rx_ctrl;
1903	int i;
1904
1905	/* Initialize & enable NAPI */
1906	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
1907		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1908		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1909			       bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1910	}
1911}
1912
1913static void
1914bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1915{
1916	int i;
1917
1918	/* First disable and then clean up */
1919	for (i = 0; i < bnad->num_rxp_per_rx; i++)
1920		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1921}
1922
1923/* Should be held with conf_lock held */
1924void
1925bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1926{
1927	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1928	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1929	unsigned long flags;
1930
1931	if (!tx_info->tx)
1932		return;
1933
1934	init_completion(&bnad->bnad_completions.tx_comp);
1935	spin_lock_irqsave(&bnad->bna_lock, flags);
1936	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1937	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1938	wait_for_completion(&bnad->bnad_completions.tx_comp);
1939
1940	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1941		bnad_tx_msix_unregister(bnad, tx_info,
1942			bnad->num_txq_per_tx);
1943
1944	spin_lock_irqsave(&bnad->bna_lock, flags);
1945	bna_tx_destroy(tx_info->tx);
1946	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1947
1948	tx_info->tx = NULL;
1949	tx_info->tx_id = 0;
1950
1951	bnad_tx_res_free(bnad, res_info);
1952}
1953
1954/* Should be held with conf_lock held */
1955int
1956bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1957{
1958	int err;
1959	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1960	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1961	struct bna_intr_info *intr_info =
1962			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1963	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1964	static const struct bna_tx_event_cbfn tx_cbfn = {
1965		.tcb_setup_cbfn = bnad_cb_tcb_setup,
1966		.tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1967		.tx_stall_cbfn = bnad_cb_tx_stall,
1968		.tx_resume_cbfn = bnad_cb_tx_resume,
1969		.tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1970	};
1971
1972	struct bna_tx *tx;
1973	unsigned long flags;
1974
1975	tx_info->tx_id = tx_id;
1976
1977	/* Initialize the Tx object configuration */
1978	tx_config->num_txq = bnad->num_txq_per_tx;
1979	tx_config->txq_depth = bnad->txq_depth;
1980	tx_config->tx_type = BNA_TX_T_REGULAR;
1981	tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1982
1983	/* Get BNA's resource requirement for one tx object */
1984	spin_lock_irqsave(&bnad->bna_lock, flags);
1985	bna_tx_res_req(bnad->num_txq_per_tx,
1986		bnad->txq_depth, res_info);
1987	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1988
1989	/* Fill Unmap Q memory requirements */
1990	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1991			bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1992			bnad->txq_depth));
1993
1994	/* Allocate resources */
1995	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1996	if (err)
1997		return err;
1998
1999	/* Ask BNA to create one Tx object, supplying required resources */
2000	spin_lock_irqsave(&bnad->bna_lock, flags);
2001	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2002			tx_info);
2003	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2004	if (!tx) {
2005		err = -ENOMEM;
2006		goto err_return;
2007	}
2008	tx_info->tx = tx;
2009
2010	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2011			(work_func_t)bnad_tx_cleanup);
2012
2013	/* Register ISR for the Tx object */
2014	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2015		err = bnad_tx_msix_register(bnad, tx_info,
2016			tx_id, bnad->num_txq_per_tx);
2017		if (err)
2018			goto cleanup_tx;
2019	}
2020
2021	spin_lock_irqsave(&bnad->bna_lock, flags);
2022	bna_tx_enable(tx);
2023	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2024
2025	return 0;
2026
2027cleanup_tx:
2028	spin_lock_irqsave(&bnad->bna_lock, flags);
2029	bna_tx_destroy(tx_info->tx);
2030	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2031	tx_info->tx = NULL;
2032	tx_info->tx_id = 0;
2033err_return:
2034	bnad_tx_res_free(bnad, res_info);
2035	return err;
2036}
2037
2038/* Setup the rx config for bna_rx_create */
2039/* bnad decides the configuration */
2040static void
2041bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2042{
2043	memset(rx_config, 0, sizeof(*rx_config));
2044	rx_config->rx_type = BNA_RX_T_REGULAR;
2045	rx_config->num_paths = bnad->num_rxp_per_rx;
2046	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2047
2048	if (bnad->num_rxp_per_rx > 1) {
2049		rx_config->rss_status = BNA_STATUS_T_ENABLED;
2050		rx_config->rss_config.hash_type =
2051				(BFI_ENET_RSS_IPV6 |
2052				 BFI_ENET_RSS_IPV6_TCP |
2053				 BFI_ENET_RSS_IPV4 |
2054				 BFI_ENET_RSS_IPV4_TCP);
2055		rx_config->rss_config.hash_mask =
2056				bnad->num_rxp_per_rx - 1;
2057		get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
2058			sizeof(rx_config->rss_config.toeplitz_hash_key));
2059	} else {
2060		rx_config->rss_status = BNA_STATUS_T_DISABLED;
2061		memset(&rx_config->rss_config, 0,
2062		       sizeof(rx_config->rss_config));
2063	}
2064
2065	rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2066	rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2067
2068	/* BNA_RXP_SINGLE - one data-buffer queue
2069	 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2070	 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2071	 */
2072	/* TODO: configurable param for queue type */
2073	rx_config->rxp_type = BNA_RXP_SLR;
2074
2075	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2076	    rx_config->frame_size > 4096) {
2077		/* though size_routing_enable is set in SLR,
2078		 * small packets may get routed to same rxq.
2079		 * set buf_size to 2048 instead of PAGE_SIZE.
2080		 */
2081		rx_config->q0_buf_size = 2048;
2082		/* this should be in multiples of 2 */
2083		rx_config->q0_num_vecs = 4;
2084		rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2085		rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2086	} else {
2087		rx_config->q0_buf_size = rx_config->frame_size;
2088		rx_config->q0_num_vecs = 1;
2089		rx_config->q0_depth = bnad->rxq_depth;
2090	}
2091
2092	/* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2093	if (rx_config->rxp_type == BNA_RXP_SLR) {
2094		rx_config->q1_depth = bnad->rxq_depth;
2095		rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2096	}
2097
2098	rx_config->vlan_strip_status =
2099		(bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2100		BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2101}
2102
2103static void
2104bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2105{
2106	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2107	int i;
2108
2109	for (i = 0; i < bnad->num_rxp_per_rx; i++)
2110		rx_info->rx_ctrl[i].bnad = bnad;
2111}
2112
2113/* Called with mutex_lock(&bnad->conf_mutex) held */
2114static u32
2115bnad_reinit_rx(struct bnad *bnad)
2116{
2117	struct net_device *netdev = bnad->netdev;
2118	u32 err = 0, current_err = 0;
2119	u32 rx_id = 0, count = 0;
2120	unsigned long flags;
2121
2122	/* destroy and create new rx objects */
2123	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2124		if (!bnad->rx_info[rx_id].rx)
2125			continue;
2126		bnad_destroy_rx(bnad, rx_id);
2127	}
2128
2129	spin_lock_irqsave(&bnad->bna_lock, flags);
2130	bna_enet_mtu_set(&bnad->bna.enet,
2131			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2132	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2133
2134	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2135		count++;
2136		current_err = bnad_setup_rx(bnad, rx_id);
2137		if (current_err && !err) {
2138			err = current_err;
2139			pr_err("RXQ:%u setup failed\n", rx_id);
2140		}
2141	}
2142
2143	/* restore rx configuration */
2144	if (bnad->rx_info[0].rx && !err) {
2145		bnad_restore_vlans(bnad, 0);
2146		bnad_enable_default_bcast(bnad);
2147		spin_lock_irqsave(&bnad->bna_lock, flags);
2148		bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2149		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2150		bnad_set_rx_mode(netdev);
2151	}
2152
2153	return count;
2154}
2155
2156/* Called with bnad_conf_lock() held */
2157void
2158bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2159{
2160	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2161	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2162	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2163	unsigned long flags;
2164	int to_del = 0;
2165
2166	if (!rx_info->rx)
2167		return;
2168
2169	if (0 == rx_id) {
2170		spin_lock_irqsave(&bnad->bna_lock, flags);
2171		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2172		    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2173			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2174			to_del = 1;
2175		}
2176		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2177		if (to_del)
2178			del_timer_sync(&bnad->dim_timer);
2179	}
2180
2181	init_completion(&bnad->bnad_completions.rx_comp);
2182	spin_lock_irqsave(&bnad->bna_lock, flags);
2183	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2184	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2185	wait_for_completion(&bnad->bnad_completions.rx_comp);
2186
2187	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2188		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2189
2190	bnad_napi_delete(bnad, rx_id);
2191
2192	spin_lock_irqsave(&bnad->bna_lock, flags);
2193	bna_rx_destroy(rx_info->rx);
2194
2195	rx_info->rx = NULL;
2196	rx_info->rx_id = 0;
2197	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2198
2199	bnad_rx_res_free(bnad, res_info);
2200}
2201
2202/* Called with mutex_lock(&bnad->conf_mutex) held */
2203int
2204bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2205{
2206	int err;
2207	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2208	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2209	struct bna_intr_info *intr_info =
2210			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2211	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2212	static const struct bna_rx_event_cbfn rx_cbfn = {
2213		.rcb_setup_cbfn = NULL,
2214		.rcb_destroy_cbfn = NULL,
2215		.ccb_setup_cbfn = bnad_cb_ccb_setup,
2216		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2217		.rx_stall_cbfn = bnad_cb_rx_stall,
2218		.rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2219		.rx_post_cbfn = bnad_cb_rx_post,
2220	};
2221	struct bna_rx *rx;
2222	unsigned long flags;
2223
2224	rx_info->rx_id = rx_id;
2225
2226	/* Initialize the Rx object configuration */
2227	bnad_init_rx_config(bnad, rx_config);
2228
2229	/* Get BNA's resource requirement for one Rx object */
2230	spin_lock_irqsave(&bnad->bna_lock, flags);
2231	bna_rx_res_req(rx_config, res_info);
2232	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2233
2234	/* Fill Unmap Q memory requirements */
2235	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2236				 rx_config->num_paths,
2237			(rx_config->q0_depth *
2238			 sizeof(struct bnad_rx_unmap)) +
2239			 sizeof(struct bnad_rx_unmap_q));
2240
2241	if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2242		BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2243					 rx_config->num_paths,
2244				(rx_config->q1_depth *
2245				 sizeof(struct bnad_rx_unmap) +
2246				 sizeof(struct bnad_rx_unmap_q)));
2247	}
2248	/* Allocate resource */
2249	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2250	if (err)
2251		return err;
2252
2253	bnad_rx_ctrl_init(bnad, rx_id);
2254
2255	/* Ask BNA to create one Rx object, supplying required resources */
2256	spin_lock_irqsave(&bnad->bna_lock, flags);
2257	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2258			rx_info);
2259	if (!rx) {
2260		err = -ENOMEM;
2261		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2262		goto err_return;
2263	}
2264	rx_info->rx = rx;
2265	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2266
2267	INIT_WORK(&rx_info->rx_cleanup_work,
2268			(work_func_t)(bnad_rx_cleanup));
2269
2270	/*
2271	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2272	 * so that IRQ handler cannot schedule NAPI at this point.
2273	 */
2274	bnad_napi_add(bnad, rx_id);
2275
2276	/* Register ISR for the Rx object */
2277	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2278		err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2279						rx_config->num_paths);
2280		if (err)
2281			goto err_return;
2282	}
2283
2284	spin_lock_irqsave(&bnad->bna_lock, flags);
2285	if (0 == rx_id) {
2286		/* Set up Dynamic Interrupt Moderation Vector */
2287		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2288			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2289
2290		/* Enable VLAN filtering only on the default Rx */
2291		bna_rx_vlanfilter_enable(rx);
2292
2293		/* Start the DIM timer */
2294		bnad_dim_timer_start(bnad);
2295	}
2296
2297	bna_rx_enable(rx);
2298	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2299
2300	return 0;
2301
2302err_return:
2303	bnad_destroy_rx(bnad, rx_id);
2304	return err;
2305}
2306
2307/* Called with conf_lock & bnad->bna_lock held */
2308void
2309bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2310{
2311	struct bnad_tx_info *tx_info;
2312
2313	tx_info = &bnad->tx_info[0];
2314	if (!tx_info->tx)
2315		return;
2316
2317	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2318}
2319
2320/* Called with conf_lock & bnad->bna_lock held */
2321void
2322bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2323{
2324	struct bnad_rx_info *rx_info;
2325	int	i;
2326
2327	for (i = 0; i < bnad->num_rx; i++) {
2328		rx_info = &bnad->rx_info[i];
2329		if (!rx_info->rx)
2330			continue;
2331		bna_rx_coalescing_timeo_set(rx_info->rx,
2332				bnad->rx_coalescing_timeo);
2333	}
2334}
2335
2336/*
2337 * Called with bnad->bna_lock held
2338 */
2339int
2340bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2341{
2342	int ret;
2343
2344	if (!is_valid_ether_addr(mac_addr))
2345		return -EADDRNOTAVAIL;
2346
2347	/* If datapath is down, pretend everything went through */
2348	if (!bnad->rx_info[0].rx)
2349		return 0;
2350
2351	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2352	if (ret != BNA_CB_SUCCESS)
2353		return -EADDRNOTAVAIL;
2354
2355	return 0;
2356}
2357
2358/* Should be called with conf_lock held */
2359int
2360bnad_enable_default_bcast(struct bnad *bnad)
2361{
2362	struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2363	int ret;
2364	unsigned long flags;
2365
2366	init_completion(&bnad->bnad_completions.mcast_comp);
2367
2368	spin_lock_irqsave(&bnad->bna_lock, flags);
2369	ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2370				bnad_cb_rx_mcast_add);
2371	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2372
2373	if (ret == BNA_CB_SUCCESS)
2374		wait_for_completion(&bnad->bnad_completions.mcast_comp);
2375	else
2376		return -ENODEV;
2377
2378	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2379		return -ENODEV;
2380
2381	return 0;
2382}
2383
2384/* Called with mutex_lock(&bnad->conf_mutex) held */
2385void
2386bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2387{
2388	u16 vid;
2389	unsigned long flags;
2390
2391	for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2392		spin_lock_irqsave(&bnad->bna_lock, flags);
2393		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2394		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2395	}
2396}
2397
2398/* Statistics utilities */
2399void
2400bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2401{
2402	int i, j;
2403
2404	for (i = 0; i < bnad->num_rx; i++) {
2405		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2406			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2407				stats->rx_packets += bnad->rx_info[i].
2408				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2409				stats->rx_bytes += bnad->rx_info[i].
2410					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2411				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2412					bnad->rx_info[i].rx_ctrl[j].ccb->
2413					rcb[1]->rxq) {
2414					stats->rx_packets +=
2415						bnad->rx_info[i].rx_ctrl[j].
2416						ccb->rcb[1]->rxq->rx_packets;
2417					stats->rx_bytes +=
2418						bnad->rx_info[i].rx_ctrl[j].
2419						ccb->rcb[1]->rxq->rx_bytes;
2420				}
2421			}
2422		}
2423	}
2424	for (i = 0; i < bnad->num_tx; i++) {
2425		for (j = 0; j < bnad->num_txq_per_tx; j++) {
2426			if (bnad->tx_info[i].tcb[j]) {
2427				stats->tx_packets +=
2428				bnad->tx_info[i].tcb[j]->txq->tx_packets;
2429				stats->tx_bytes +=
2430					bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2431			}
2432		}
2433	}
2434}
2435
2436/*
2437 * Must be called with the bna_lock held.
2438 */
2439void
2440bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2441{
2442	struct bfi_enet_stats_mac *mac_stats;
2443	u32 bmap;
2444	int i;
2445
2446	mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2447	stats->rx_errors =
2448		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2449		mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2450		mac_stats->rx_undersize;
2451	stats->tx_errors = mac_stats->tx_fcs_error +
2452					mac_stats->tx_undersize;
2453	stats->rx_dropped = mac_stats->rx_drop;
2454	stats->tx_dropped = mac_stats->tx_drop;
2455	stats->multicast = mac_stats->rx_multicast;
2456	stats->collisions = mac_stats->tx_total_collision;
2457
2458	stats->rx_length_errors = mac_stats->rx_frame_length_error;
2459
2460	/* receive ring buffer overflow  ?? */
2461
2462	stats->rx_crc_errors = mac_stats->rx_fcs_error;
2463	stats->rx_frame_errors = mac_stats->rx_alignment_error;
2464	/* recv'r fifo overrun */
2465	bmap = bna_rx_rid_mask(&bnad->bna);
2466	for (i = 0; bmap; i++) {
2467		if (bmap & 1) {
2468			stats->rx_fifo_errors +=
2469				bnad->stats.bna_stats->
2470					hw_stats.rxf_stats[i].frame_drops;
2471			break;
2472		}
2473		bmap >>= 1;
2474	}
2475}
2476
2477static void
2478bnad_mbox_irq_sync(struct bnad *bnad)
2479{
2480	u32 irq;
2481	unsigned long flags;
2482
2483	spin_lock_irqsave(&bnad->bna_lock, flags);
2484	if (bnad->cfg_flags & BNAD_CF_MSIX)
2485		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2486	else
2487		irq = bnad->pcidev->irq;
2488	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2489
2490	synchronize_irq(irq);
2491}
2492
2493/* Utility used by bnad_start_xmit, for doing TSO */
2494static int
2495bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2496{
2497	int err;
2498
2499	err = skb_cow_head(skb, 0);
2500	if (err < 0) {
2501		BNAD_UPDATE_CTR(bnad, tso_err);
2502		return err;
2503	}
2504
2505	/*
2506	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2507	 * excluding the length field.
2508	 */
2509	if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2510		struct iphdr *iph = ip_hdr(skb);
2511
2512		/* Do we really need these? */
2513		iph->tot_len = 0;
2514		iph->check = 0;
2515
2516		tcp_hdr(skb)->check =
2517			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2518					   IPPROTO_TCP, 0);
2519		BNAD_UPDATE_CTR(bnad, tso4);
2520	} else {
2521		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2522
2523		ipv6h->payload_len = 0;
2524		tcp_hdr(skb)->check =
2525			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2526					 IPPROTO_TCP, 0);
2527		BNAD_UPDATE_CTR(bnad, tso6);
2528	}
2529
2530	return 0;
2531}
2532
2533/*
2534 * Initialize Q numbers depending on Rx Paths
2535 * Called with bnad->bna_lock held, because of cfg_flags
2536 * access.
2537 */
2538static void
2539bnad_q_num_init(struct bnad *bnad)
2540{
2541	int rxps;
2542
2543	rxps = min((uint)num_online_cpus(),
2544			(uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2545
2546	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2547		rxps = 1;	/* INTx */
2548
2549	bnad->num_rx = 1;
2550	bnad->num_tx = 1;
2551	bnad->num_rxp_per_rx = rxps;
2552	bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2553}
2554
2555/*
2556 * Adjusts the Q numbers, given a number of msix vectors
2557 * Give preference to RSS as opposed to Tx priority Queues,
2558 * in such a case, just use 1 Tx Q
2559 * Called with bnad->bna_lock held b'cos of cfg_flags access
2560 */
2561static void
2562bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2563{
2564	bnad->num_txq_per_tx = 1;
2565	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2566	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2567	    (bnad->cfg_flags & BNAD_CF_MSIX)) {
2568		bnad->num_rxp_per_rx = msix_vectors -
2569			(bnad->num_tx * bnad->num_txq_per_tx) -
2570			BNAD_MAILBOX_MSIX_VECTORS;
2571	} else
2572		bnad->num_rxp_per_rx = 1;
2573}
2574
2575/* Enable / disable ioceth */
2576static int
2577bnad_ioceth_disable(struct bnad *bnad)
2578{
2579	unsigned long flags;
2580	int err = 0;
2581
2582	spin_lock_irqsave(&bnad->bna_lock, flags);
2583	init_completion(&bnad->bnad_completions.ioc_comp);
2584	bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2585	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2586
2587	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2588		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2589
2590	err = bnad->bnad_completions.ioc_comp_status;
2591	return err;
2592}
2593
2594static int
2595bnad_ioceth_enable(struct bnad *bnad)
2596{
2597	int err = 0;
2598	unsigned long flags;
2599
2600	spin_lock_irqsave(&bnad->bna_lock, flags);
2601	init_completion(&bnad->bnad_completions.ioc_comp);
2602	bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2603	bna_ioceth_enable(&bnad->bna.ioceth);
2604	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2605
2606	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2607		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2608
2609	err = bnad->bnad_completions.ioc_comp_status;
2610
2611	return err;
2612}
2613
2614/* Free BNA resources */
2615static void
2616bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2617		u32 res_val_max)
2618{
2619	int i;
2620
2621	for (i = 0; i < res_val_max; i++)
2622		bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2623}
2624
2625/* Allocates memory and interrupt resources for BNA */
2626static int
2627bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2628		u32 res_val_max)
2629{
2630	int i, err;
2631
2632	for (i = 0; i < res_val_max; i++) {
2633		err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2634		if (err)
2635			goto err_return;
2636	}
2637	return 0;
2638
2639err_return:
2640	bnad_res_free(bnad, res_info, res_val_max);
2641	return err;
2642}
2643
2644/* Interrupt enable / disable */
2645static void
2646bnad_enable_msix(struct bnad *bnad)
2647{
2648	int i, ret;
2649	unsigned long flags;
2650
2651	spin_lock_irqsave(&bnad->bna_lock, flags);
2652	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2653		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2654		return;
2655	}
2656	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2657
2658	if (bnad->msix_table)
2659		return;
2660
2661	bnad->msix_table =
2662		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2663
2664	if (!bnad->msix_table)
2665		goto intx_mode;
2666
2667	for (i = 0; i < bnad->msix_num; i++)
2668		bnad->msix_table[i].entry = i;
2669
2670	ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2671				    1, bnad->msix_num);
2672	if (ret < 0) {
2673		goto intx_mode;
2674	} else if (ret < bnad->msix_num) {
2675		pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2676			ret, bnad->msix_num);
2677
2678		spin_lock_irqsave(&bnad->bna_lock, flags);
2679		/* ret = #of vectors that we got */
2680		bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2681			(ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2682		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2683
2684		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2685			 BNAD_MAILBOX_MSIX_VECTORS;
2686
2687		if (bnad->msix_num > ret) {
2688			pci_disable_msix(bnad->pcidev);
2689			goto intx_mode;
2690		}
2691	}
2692
2693	pci_intx(bnad->pcidev, 0);
2694
2695	return;
2696
2697intx_mode:
2698	pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2699
2700	kfree(bnad->msix_table);
2701	bnad->msix_table = NULL;
2702	bnad->msix_num = 0;
2703	spin_lock_irqsave(&bnad->bna_lock, flags);
2704	bnad->cfg_flags &= ~BNAD_CF_MSIX;
2705	bnad_q_num_init(bnad);
2706	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2707}
2708
2709static void
2710bnad_disable_msix(struct bnad *bnad)
2711{
2712	u32 cfg_flags;
2713	unsigned long flags;
2714
2715	spin_lock_irqsave(&bnad->bna_lock, flags);
2716	cfg_flags = bnad->cfg_flags;
2717	if (bnad->cfg_flags & BNAD_CF_MSIX)
2718		bnad->cfg_flags &= ~BNAD_CF_MSIX;
2719	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2720
2721	if (cfg_flags & BNAD_CF_MSIX) {
2722		pci_disable_msix(bnad->pcidev);
2723		kfree(bnad->msix_table);
2724		bnad->msix_table = NULL;
2725	}
2726}
2727
2728/* Netdev entry points */
2729static int
2730bnad_open(struct net_device *netdev)
2731{
2732	int err;
2733	struct bnad *bnad = netdev_priv(netdev);
2734	struct bna_pause_config pause_config;
2735	unsigned long flags;
2736
2737	mutex_lock(&bnad->conf_mutex);
2738
2739	/* Tx */
2740	err = bnad_setup_tx(bnad, 0);
2741	if (err)
2742		goto err_return;
2743
2744	/* Rx */
2745	err = bnad_setup_rx(bnad, 0);
2746	if (err)
2747		goto cleanup_tx;
2748
2749	/* Port */
2750	pause_config.tx_pause = 0;
2751	pause_config.rx_pause = 0;
2752
2753	spin_lock_irqsave(&bnad->bna_lock, flags);
2754	bna_enet_mtu_set(&bnad->bna.enet,
2755			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2756	bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2757	bna_enet_enable(&bnad->bna.enet);
2758	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2759
2760	/* Enable broadcast */
2761	bnad_enable_default_bcast(bnad);
2762
2763	/* Restore VLANs, if any */
2764	bnad_restore_vlans(bnad, 0);
2765
2766	/* Set the UCAST address */
2767	spin_lock_irqsave(&bnad->bna_lock, flags);
2768	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2769	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2770
2771	/* Start the stats timer */
2772	bnad_stats_timer_start(bnad);
2773
2774	mutex_unlock(&bnad->conf_mutex);
2775
2776	return 0;
2777
2778cleanup_tx:
2779	bnad_destroy_tx(bnad, 0);
2780
2781err_return:
2782	mutex_unlock(&bnad->conf_mutex);
2783	return err;
2784}
2785
2786static int
2787bnad_stop(struct net_device *netdev)
2788{
2789	struct bnad *bnad = netdev_priv(netdev);
2790	unsigned long flags;
2791
2792	mutex_lock(&bnad->conf_mutex);
2793
2794	/* Stop the stats timer */
2795	bnad_stats_timer_stop(bnad);
2796
2797	init_completion(&bnad->bnad_completions.enet_comp);
2798
2799	spin_lock_irqsave(&bnad->bna_lock, flags);
2800	bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2801			bnad_cb_enet_disabled);
2802	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2803
2804	wait_for_completion(&bnad->bnad_completions.enet_comp);
2805
2806	bnad_destroy_tx(bnad, 0);
2807	bnad_destroy_rx(bnad, 0);
2808
2809	/* Synchronize mailbox IRQ */
2810	bnad_mbox_irq_sync(bnad);
2811
2812	mutex_unlock(&bnad->conf_mutex);
2813
2814	return 0;
2815}
2816
2817/* TX */
2818/* Returns 0 for success */
2819static int
2820bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2821		    struct sk_buff *skb, struct bna_txq_entry *txqent)
2822{
2823	u16 flags = 0;
2824	u32 gso_size;
2825	u16 vlan_tag = 0;
2826
2827	if (vlan_tx_tag_present(skb)) {
2828		vlan_tag = (u16)vlan_tx_tag_get(skb);
2829		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2830	}
2831	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2832		vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2833				| (vlan_tag & 0x1fff);
2834		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2835	}
2836	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2837
2838	if (skb_is_gso(skb)) {
2839		gso_size = skb_shinfo(skb)->gso_size;
2840		if (unlikely(gso_size > bnad->netdev->mtu)) {
2841			BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2842			return -EINVAL;
2843		}
2844		if (unlikely((gso_size + skb_transport_offset(skb) +
2845			      tcp_hdrlen(skb)) >= skb->len)) {
2846			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2847			txqent->hdr.wi.lso_mss = 0;
2848			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2849		} else {
2850			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2851			txqent->hdr.wi.lso_mss = htons(gso_size);
2852		}
2853
2854		if (bnad_tso_prepare(bnad, skb)) {
2855			BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2856			return -EINVAL;
2857		}
2858
2859		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2860		txqent->hdr.wi.l4_hdr_size_n_offset =
2861			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2862			tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2863	} else  {
2864		txqent->hdr.wi.opcode =	htons(BNA_TXQ_WI_SEND);
2865		txqent->hdr.wi.lso_mss = 0;
2866
2867		if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2868			BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2869			return -EINVAL;
2870		}
2871
2872		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2873			__be16 net_proto = vlan_get_protocol(skb);
2874			u8 proto = 0;
2875
2876			if (net_proto == htons(ETH_P_IP))
2877				proto = ip_hdr(skb)->protocol;
2878#ifdef NETIF_F_IPV6_CSUM
2879			else if (net_proto == htons(ETH_P_IPV6)) {
2880				/* nexthdr may not be TCP immediately. */
2881				proto = ipv6_hdr(skb)->nexthdr;
2882			}
2883#endif
2884			if (proto == IPPROTO_TCP) {
2885				flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2886				txqent->hdr.wi.l4_hdr_size_n_offset =
2887					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2888					      (0, skb_transport_offset(skb)));
2889
2890				BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2891
2892				if (unlikely(skb_headlen(skb) <
2893					    skb_transport_offset(skb) +
2894				    tcp_hdrlen(skb))) {
2895					BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2896					return -EINVAL;
2897				}
2898			} else if (proto == IPPROTO_UDP) {
2899				flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2900				txqent->hdr.wi.l4_hdr_size_n_offset =
2901					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2902					      (0, skb_transport_offset(skb)));
2903
2904				BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2905				if (unlikely(skb_headlen(skb) <
2906					    skb_transport_offset(skb) +
2907				    sizeof(struct udphdr))) {
2908					BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2909					return -EINVAL;
2910				}
2911			} else {
2912
2913				BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2914				return -EINVAL;
2915			}
2916		} else
2917			txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2918	}
2919
2920	txqent->hdr.wi.flags = htons(flags);
2921	txqent->hdr.wi.frame_length = htonl(skb->len);
2922
2923	return 0;
2924}
2925
2926/*
2927 * bnad_start_xmit : Netdev entry point for Transmit
2928 *		     Called under lock held by net_device
2929 */
2930static netdev_tx_t
2931bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2932{
2933	struct bnad *bnad = netdev_priv(netdev);
2934	u32 txq_id = 0;
2935	struct bna_tcb *tcb = NULL;
2936	struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2937	u32		prod, q_depth, vect_id;
2938	u32		wis, vectors, len;
2939	int		i;
2940	dma_addr_t		dma_addr;
2941	struct bna_txq_entry *txqent;
2942
2943	len = skb_headlen(skb);
2944
2945	/* Sanity checks for the skb */
2946
2947	if (unlikely(skb->len <= ETH_HLEN)) {
2948		dev_kfree_skb_any(skb);
2949		BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2950		return NETDEV_TX_OK;
2951	}
2952	if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2953		dev_kfree_skb_any(skb);
2954		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2955		return NETDEV_TX_OK;
2956	}
2957	if (unlikely(len == 0)) {
2958		dev_kfree_skb_any(skb);
2959		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2960		return NETDEV_TX_OK;
2961	}
2962
2963	tcb = bnad->tx_info[0].tcb[txq_id];
2964
2965	/*
2966	 * Takes care of the Tx that is scheduled between clearing the flag
2967	 * and the netif_tx_stop_all_queues() call.
2968	 */
2969	if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2970		dev_kfree_skb_any(skb);
2971		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2972		return NETDEV_TX_OK;
2973	}
2974
2975	q_depth = tcb->q_depth;
2976	prod = tcb->producer_index;
2977	unmap_q = tcb->unmap_q;
2978
2979	vectors = 1 + skb_shinfo(skb)->nr_frags;
2980	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
2981
2982	if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2983		dev_kfree_skb_any(skb);
2984		BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2985		return NETDEV_TX_OK;
2986	}
2987
2988	/* Check for available TxQ resources */
2989	if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2990		if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2991		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2992			u32 sent;
2993			sent = bnad_txcmpl_process(bnad, tcb);
2994			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2995				bna_ib_ack(tcb->i_dbell, sent);
2996			smp_mb__before_atomic();
2997			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2998		} else {
2999			netif_stop_queue(netdev);
3000			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3001		}
3002
3003		smp_mb();
3004		/*
3005		 * Check again to deal with race condition between
3006		 * netif_stop_queue here, and netif_wake_queue in
3007		 * interrupt handler which is not inside netif tx lock.
3008		 */
3009		if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3010			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3011			return NETDEV_TX_BUSY;
3012		} else {
3013			netif_wake_queue(netdev);
3014			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3015		}
3016	}
3017
3018	txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3019	head_unmap = &unmap_q[prod];
3020
3021	/* Program the opcode, flags, frame_len, num_vectors in WI */
3022	if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3023		dev_kfree_skb_any(skb);
3024		return NETDEV_TX_OK;
3025	}
3026	txqent->hdr.wi.reserved = 0;
3027	txqent->hdr.wi.num_vectors = vectors;
3028
3029	head_unmap->skb = skb;
3030	head_unmap->nvecs = 0;
3031
3032	/* Program the vectors */
3033	unmap = head_unmap;
3034	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3035				  len, DMA_TO_DEVICE);
3036	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3037	txqent->vector[0].length = htons(len);
3038	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3039	head_unmap->nvecs++;
3040
3041	for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3042		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3043		u32		size = skb_frag_size(frag);
3044
3045		if (unlikely(size == 0)) {
3046			/* Undo the changes starting at tcb->producer_index */
3047			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3048				tcb->producer_index);
3049			dev_kfree_skb_any(skb);
3050			BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3051			return NETDEV_TX_OK;
3052		}
3053
3054		len += size;
3055
3056		vect_id++;
3057		if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3058			vect_id = 0;
3059			BNA_QE_INDX_INC(prod, q_depth);
3060			txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3061			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3062			unmap = &unmap_q[prod];
3063		}
3064
3065		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3066					    0, size, DMA_TO_DEVICE);
3067		dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3068		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3069		txqent->vector[vect_id].length = htons(size);
3070		dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3071				   dma_addr);
3072		head_unmap->nvecs++;
3073	}
3074
3075	if (unlikely(len != skb->len)) {
3076		/* Undo the changes starting at tcb->producer_index */
3077		bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3078		dev_kfree_skb_any(skb);
3079		BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3080		return NETDEV_TX_OK;
3081	}
3082
3083	BNA_QE_INDX_INC(prod, q_depth);
3084	tcb->producer_index = prod;
3085
3086	smp_mb();
3087
3088	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3089		return NETDEV_TX_OK;
3090
3091	skb_tx_timestamp(skb);
3092
3093	bna_txq_prod_indx_doorbell(tcb);
3094	smp_mb();
3095
3096	return NETDEV_TX_OK;
3097}
3098
3099/*
3100 * Used spin_lock to synchronize reading of stats structures, which
3101 * is written by BNA under the same lock.
3102 */
3103static struct rtnl_link_stats64 *
3104bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3105{
3106	struct bnad *bnad = netdev_priv(netdev);
3107	unsigned long flags;
3108
3109	spin_lock_irqsave(&bnad->bna_lock, flags);
3110
3111	bnad_netdev_qstats_fill(bnad, stats);
3112	bnad_netdev_hwstats_fill(bnad, stats);
3113
3114	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3115
3116	return stats;
3117}
3118
3119static void
3120bnad_set_rx_ucast_fltr(struct bnad *bnad)
3121{
3122	struct net_device *netdev = bnad->netdev;
3123	int uc_count = netdev_uc_count(netdev);
3124	enum bna_cb_status ret;
3125	u8 *mac_list;
3126	struct netdev_hw_addr *ha;
3127	int entry;
3128
3129	if (netdev_uc_empty(bnad->netdev)) {
3130		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3131		return;
3132	}
3133
3134	if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3135		goto mode_default;
3136
3137	mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3138	if (mac_list == NULL)
3139		goto mode_default;
3140
3141	entry = 0;
3142	netdev_for_each_uc_addr(ha, netdev) {
3143		memcpy(&mac_list[entry * ETH_ALEN],
3144		       &ha->addr[0], ETH_ALEN);
3145		entry++;
3146	}
3147
3148	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
3149			mac_list, NULL);
3150	kfree(mac_list);
3151
3152	if (ret != BNA_CB_SUCCESS)
3153		goto mode_default;
3154
3155	return;
3156
3157	/* ucast packets not in UCAM are routed to default function */
3158mode_default:
3159	bnad->cfg_flags |= BNAD_CF_DEFAULT;
3160	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3161}
3162
3163static void
3164bnad_set_rx_mcast_fltr(struct bnad *bnad)
3165{
3166	struct net_device *netdev = bnad->netdev;
3167	int mc_count = netdev_mc_count(netdev);
3168	enum bna_cb_status ret;
3169	u8 *mac_list;
3170
3171	if (netdev->flags & IFF_ALLMULTI)
3172		goto mode_allmulti;
3173
3174	if (netdev_mc_empty(netdev))
3175		return;
3176
3177	if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3178		goto mode_allmulti;
3179
3180	mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3181
3182	if (mac_list == NULL)
3183		goto mode_allmulti;
3184
3185	memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
3186
3187	/* copy rest of the MCAST addresses */
3188	bnad_netdev_mc_list_get(netdev, mac_list);
3189	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3190			mac_list, NULL);
3191	kfree(mac_list);
3192
3193	if (ret != BNA_CB_SUCCESS)
3194		goto mode_allmulti;
3195
3196	return;
3197
3198mode_allmulti:
3199	bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3200	bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
3201}
3202
3203void
3204bnad_set_rx_mode(struct net_device *netdev)
3205{
3206	struct bnad *bnad = netdev_priv(netdev);
3207	enum bna_rxmode new_mode, mode_mask;
3208	unsigned long flags;
3209
3210	spin_lock_irqsave(&bnad->bna_lock, flags);
3211
3212	if (bnad->rx_info[0].rx == NULL) {
3213		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3214		return;
3215	}
3216
3217	/* clear bnad flags to update it with new settings */
3218	bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3219			BNAD_CF_ALLMULTI);
3220
3221	new_mode = 0;
3222	if (netdev->flags & IFF_PROMISC) {
3223		new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3224		bnad->cfg_flags |= BNAD_CF_PROMISC;
3225	} else {
3226		bnad_set_rx_mcast_fltr(bnad);
3227
3228		if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3229			new_mode |= BNA_RXMODE_ALLMULTI;
3230
3231		bnad_set_rx_ucast_fltr(bnad);
3232
3233		if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3234			new_mode |= BNA_RXMODE_DEFAULT;
3235	}
3236
3237	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3238			BNA_RXMODE_ALLMULTI;
3239	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
3240
3241	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3242}
3243
3244/*
3245 * bna_lock is used to sync writes to netdev->addr
3246 * conf_lock cannot be used since this call may be made
3247 * in a non-blocking context.
3248 */
3249static int
3250bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3251{
3252	int err;
3253	struct bnad *bnad = netdev_priv(netdev);
3254	struct sockaddr *sa = (struct sockaddr *)mac_addr;
3255	unsigned long flags;
3256
3257	spin_lock_irqsave(&bnad->bna_lock, flags);
3258
3259	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3260
3261	if (!err)
3262		memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3263
3264	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3265
3266	return err;
3267}
3268
3269static int
3270bnad_mtu_set(struct bnad *bnad, int frame_size)
3271{
3272	unsigned long flags;
3273
3274	init_completion(&bnad->bnad_completions.mtu_comp);
3275
3276	spin_lock_irqsave(&bnad->bna_lock, flags);
3277	bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3278	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3279
3280	wait_for_completion(&bnad->bnad_completions.mtu_comp);
3281
3282	return bnad->bnad_completions.mtu_comp_status;
3283}
3284
3285static int
3286bnad_change_mtu(struct net_device *netdev, int new_mtu)
3287{
3288	int err, mtu;
3289	struct bnad *bnad = netdev_priv(netdev);
3290	u32 rx_count = 0, frame, new_frame;
3291
3292	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3293		return -EINVAL;
3294
3295	mutex_lock(&bnad->conf_mutex);
3296
3297	mtu = netdev->mtu;
3298	netdev->mtu = new_mtu;
3299
3300	frame = BNAD_FRAME_SIZE(mtu);
3301	new_frame = BNAD_FRAME_SIZE(new_mtu);
3302
3303	/* check if multi-buffer needs to be enabled */
3304	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3305	    netif_running(bnad->netdev)) {
3306		/* only when transition is over 4K */
3307		if ((frame <= 4096 && new_frame > 4096) ||
3308		    (frame > 4096 && new_frame <= 4096))
3309			rx_count = bnad_reinit_rx(bnad);
3310	}
3311
3312	/* rx_count > 0 - new rx created
3313	 *	- Linux set err = 0 and return
3314	 */
3315	err = bnad_mtu_set(bnad, new_frame);
3316	if (err)
3317		err = -EBUSY;
3318
3319	mutex_unlock(&bnad->conf_mutex);
3320	return err;
3321}
3322
3323static int
3324bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3325{
3326	struct bnad *bnad = netdev_priv(netdev);
3327	unsigned long flags;
3328
3329	if (!bnad->rx_info[0].rx)
3330		return 0;
3331
3332	mutex_lock(&bnad->conf_mutex);
3333
3334	spin_lock_irqsave(&bnad->bna_lock, flags);
3335	bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3336	set_bit(vid, bnad->active_vlans);
3337	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3338
3339	mutex_unlock(&bnad->conf_mutex);
3340
3341	return 0;
3342}
3343
3344static int
3345bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3346{
3347	struct bnad *bnad = netdev_priv(netdev);
3348	unsigned long flags;
3349
3350	if (!bnad->rx_info[0].rx)
3351		return 0;
3352
3353	mutex_lock(&bnad->conf_mutex);
3354
3355	spin_lock_irqsave(&bnad->bna_lock, flags);
3356	clear_bit(vid, bnad->active_vlans);
3357	bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3358	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3359
3360	mutex_unlock(&bnad->conf_mutex);
3361
3362	return 0;
3363}
3364
3365static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3366{
3367	struct bnad *bnad = netdev_priv(dev);
3368	netdev_features_t changed = features ^ dev->features;
3369
3370	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3371		unsigned long flags;
3372
3373		spin_lock_irqsave(&bnad->bna_lock, flags);
3374
3375		if (features & NETIF_F_HW_VLAN_CTAG_RX)
3376			bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3377		else
3378			bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3379
3380		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3381	}
3382
3383	return 0;
3384}
3385
3386#ifdef CONFIG_NET_POLL_CONTROLLER
3387static void
3388bnad_netpoll(struct net_device *netdev)
3389{
3390	struct bnad *bnad = netdev_priv(netdev);
3391	struct bnad_rx_info *rx_info;
3392	struct bnad_rx_ctrl *rx_ctrl;
3393	u32 curr_mask;
3394	int i, j;
3395
3396	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3397		bna_intx_disable(&bnad->bna, curr_mask);
3398		bnad_isr(bnad->pcidev->irq, netdev);
3399		bna_intx_enable(&bnad->bna, curr_mask);
3400	} else {
3401		/*
3402		 * Tx processing may happen in sending context, so no need
3403		 * to explicitly process completions here
3404		 */
3405
3406		/* Rx processing */
3407		for (i = 0; i < bnad->num_rx; i++) {
3408			rx_info = &bnad->rx_info[i];
3409			if (!rx_info->rx)
3410				continue;
3411			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3412				rx_ctrl = &rx_info->rx_ctrl[j];
3413				if (rx_ctrl->ccb)
3414					bnad_netif_rx_schedule_poll(bnad,
3415							    rx_ctrl->ccb);
3416			}
3417		}
3418	}
3419}
3420#endif
3421
3422static const struct net_device_ops bnad_netdev_ops = {
3423	.ndo_open		= bnad_open,
3424	.ndo_stop		= bnad_stop,
3425	.ndo_start_xmit		= bnad_start_xmit,
3426	.ndo_get_stats64		= bnad_get_stats64,
3427	.ndo_set_rx_mode	= bnad_set_rx_mode,
3428	.ndo_validate_addr      = eth_validate_addr,
3429	.ndo_set_mac_address    = bnad_set_mac_address,
3430	.ndo_change_mtu		= bnad_change_mtu,
3431	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3432	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3433	.ndo_set_features	= bnad_set_features,
3434#ifdef CONFIG_NET_POLL_CONTROLLER
3435	.ndo_poll_controller    = bnad_netpoll
3436#endif
3437};
3438
3439static void
3440bnad_netdev_init(struct bnad *bnad, bool using_dac)
3441{
3442	struct net_device *netdev = bnad->netdev;
3443
3444	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3445		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3446		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3447		NETIF_F_HW_VLAN_CTAG_RX;
3448
3449	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3450		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3451		NETIF_F_TSO | NETIF_F_TSO6;
3452
3453	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3454
3455	if (using_dac)
3456		netdev->features |= NETIF_F_HIGHDMA;
3457
3458	netdev->mem_start = bnad->mmio_start;
3459	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3460
3461	netdev->netdev_ops = &bnad_netdev_ops;
3462	bnad_set_ethtool_ops(netdev);
3463}
3464
3465/*
3466 * 1. Initialize the bnad structure
3467 * 2. Setup netdev pointer in pci_dev
3468 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3469 * 4. Initialize work queue.
3470 */
3471static int
3472bnad_init(struct bnad *bnad,
3473	  struct pci_dev *pdev, struct net_device *netdev)
3474{
3475	unsigned long flags;
3476
3477	SET_NETDEV_DEV(netdev, &pdev->dev);
3478	pci_set_drvdata(pdev, netdev);
3479
3480	bnad->netdev = netdev;
3481	bnad->pcidev = pdev;
3482	bnad->mmio_start = pci_resource_start(pdev, 0);
3483	bnad->mmio_len = pci_resource_len(pdev, 0);
3484	bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3485	if (!bnad->bar0) {
3486		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3487		return -ENOMEM;
3488	}
3489	pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3490	       (unsigned long long) bnad->mmio_len);
3491
3492	spin_lock_irqsave(&bnad->bna_lock, flags);
3493	if (!bnad_msix_disable)
3494		bnad->cfg_flags = BNAD_CF_MSIX;
3495
3496	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3497
3498	bnad_q_num_init(bnad);
3499	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3500
3501	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3502		(bnad->num_rx * bnad->num_rxp_per_rx) +
3503			 BNAD_MAILBOX_MSIX_VECTORS;
3504
3505	bnad->txq_depth = BNAD_TXQ_DEPTH;
3506	bnad->rxq_depth = BNAD_RXQ_DEPTH;
3507
3508	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3509	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3510
3511	sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3512	bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3513	if (!bnad->work_q) {
3514		iounmap(bnad->bar0);
3515		return -ENOMEM;
3516	}
3517
3518	return 0;
3519}
3520
3521/*
3522 * Must be called after bnad_pci_uninit()
3523 * so that iounmap() and pci_set_drvdata(NULL)
3524 * happens only after PCI uninitialization.
3525 */
3526static void
3527bnad_uninit(struct bnad *bnad)
3528{
3529	if (bnad->work_q) {
3530		flush_workqueue(bnad->work_q);
3531		destroy_workqueue(bnad->work_q);
3532		bnad->work_q = NULL;
3533	}
3534
3535	if (bnad->bar0)
3536		iounmap(bnad->bar0);
3537}
3538
3539/*
3540 * Initialize locks
3541	a) Per ioceth mutes used for serializing configuration
3542	   changes from OS interface
3543	b) spin lock used to protect bna state machine
3544 */
3545static void
3546bnad_lock_init(struct bnad *bnad)
3547{
3548	spin_lock_init(&bnad->bna_lock);
3549	mutex_init(&bnad->conf_mutex);
3550	mutex_init(&bnad_list_mutex);
3551}
3552
3553static void
3554bnad_lock_uninit(struct bnad *bnad)
3555{
3556	mutex_destroy(&bnad->conf_mutex);
3557	mutex_destroy(&bnad_list_mutex);
3558}
3559
3560/* PCI Initialization */
3561static int
3562bnad_pci_init(struct bnad *bnad,
3563	      struct pci_dev *pdev, bool *using_dac)
3564{
3565	int err;
3566
3567	err = pci_enable_device(pdev);
3568	if (err)
3569		return err;
3570	err = pci_request_regions(pdev, BNAD_NAME);
3571	if (err)
3572		goto disable_device;
3573	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3574		*using_dac = true;
3575	} else {
3576		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3577		if (err)
3578			goto release_regions;
3579		*using_dac = false;
3580	}
3581	pci_set_master(pdev);
3582	return 0;
3583
3584release_regions:
3585	pci_release_regions(pdev);
3586disable_device:
3587	pci_disable_device(pdev);
3588
3589	return err;
3590}
3591
3592static void
3593bnad_pci_uninit(struct pci_dev *pdev)
3594{
3595	pci_release_regions(pdev);
3596	pci_disable_device(pdev);
3597}
3598
3599static int
3600bnad_pci_probe(struct pci_dev *pdev,
3601		const struct pci_device_id *pcidev_id)
3602{
3603	bool	using_dac;
3604	int	err;
3605	struct bnad *bnad;
3606	struct bna *bna;
3607	struct net_device *netdev;
3608	struct bfa_pcidev pcidev_info;
3609	unsigned long flags;
3610
3611	pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3612	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3613
3614	mutex_lock(&bnad_fwimg_mutex);
3615	if (!cna_get_firmware_buf(pdev)) {
3616		mutex_unlock(&bnad_fwimg_mutex);
3617		pr_warn("Failed to load Firmware Image!\n");
3618		return -ENODEV;
3619	}
3620	mutex_unlock(&bnad_fwimg_mutex);
3621
3622	/*
3623	 * Allocates sizeof(struct net_device + struct bnad)
3624	 * bnad = netdev->priv
3625	 */
3626	netdev = alloc_etherdev(sizeof(struct bnad));
3627	if (!netdev) {
3628		err = -ENOMEM;
3629		return err;
3630	}
3631	bnad = netdev_priv(netdev);
3632	bnad_lock_init(bnad);
3633	bnad_add_to_list(bnad);
3634
3635	mutex_lock(&bnad->conf_mutex);
3636	/*
3637	 * PCI initialization
3638	 *	Output : using_dac = 1 for 64 bit DMA
3639	 *			   = 0 for 32 bit DMA
3640	 */
3641	using_dac = false;
3642	err = bnad_pci_init(bnad, pdev, &using_dac);
3643	if (err)
3644		goto unlock_mutex;
3645
3646	/*
3647	 * Initialize bnad structure
3648	 * Setup relation between pci_dev & netdev
3649	 */
3650	err = bnad_init(bnad, pdev, netdev);
3651	if (err)
3652		goto pci_uninit;
3653
3654	/* Initialize netdev structure, set up ethtool ops */
3655	bnad_netdev_init(bnad, using_dac);
3656
3657	/* Set link to down state */
3658	netif_carrier_off(netdev);
3659
3660	/* Setup the debugfs node for this bfad */
3661	if (bna_debugfs_enable)
3662		bnad_debugfs_init(bnad);
3663
3664	/* Get resource requirement form bna */
3665	spin_lock_irqsave(&bnad->bna_lock, flags);
3666	bna_res_req(&bnad->res_info[0]);
3667	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3668
3669	/* Allocate resources from bna */
3670	err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3671	if (err)
3672		goto drv_uninit;
3673
3674	bna = &bnad->bna;
3675
3676	/* Setup pcidev_info for bna_init() */
3677	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3678	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3679	pcidev_info.device_id = bnad->pcidev->device;
3680	pcidev_info.pci_bar_kva = bnad->bar0;
3681
3682	spin_lock_irqsave(&bnad->bna_lock, flags);
3683	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3684	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3685
3686	bnad->stats.bna_stats = &bna->stats;
3687
3688	bnad_enable_msix(bnad);
3689	err = bnad_mbox_irq_alloc(bnad);
3690	if (err)
3691		goto res_free;
3692
3693	/* Set up timers */
3694	setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3695				((unsigned long)bnad));
3696	setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3697				((unsigned long)bnad));
3698	setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3699				((unsigned long)bnad));
3700	setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3701				((unsigned long)bnad));
3702
3703	/* Now start the timer before calling IOC */
3704	mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3705		  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3706
3707	/*
3708	 * Start the chip
3709	 * If the call back comes with error, we bail out.
3710	 * This is a catastrophic error.
3711	 */
3712	err = bnad_ioceth_enable(bnad);
3713	if (err) {
3714		pr_err("BNA: Initialization failed err=%d\n",
3715		       err);
3716		goto probe_success;
3717	}
3718
3719	spin_lock_irqsave(&bnad->bna_lock, flags);
3720	if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3721		bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3722		bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3723			bna_attr(bna)->num_rxp - 1);
3724		if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3725			bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3726			err = -EIO;
3727	}
3728	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3729	if (err)
3730		goto disable_ioceth;
3731
3732	spin_lock_irqsave(&bnad->bna_lock, flags);
3733	bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3734	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3735
3736	err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3737	if (err) {
3738		err = -EIO;
3739		goto disable_ioceth;
3740	}
3741
3742	spin_lock_irqsave(&bnad->bna_lock, flags);
3743	bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3744	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3745
3746	/* Get the burnt-in mac */
3747	spin_lock_irqsave(&bnad->bna_lock, flags);
3748	bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3749	bnad_set_netdev_perm_addr(bnad);
3750	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3751
3752	mutex_unlock(&bnad->conf_mutex);
3753
3754	/* Finally, reguister with net_device layer */
3755	err = register_netdev(netdev);
3756	if (err) {
3757		pr_err("BNA : Registering with netdev failed\n");
3758		goto probe_uninit;
3759	}
3760	set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3761
3762	return 0;
3763
3764probe_success:
3765	mutex_unlock(&bnad->conf_mutex);
3766	return 0;
3767
3768probe_uninit:
3769	mutex_lock(&bnad->conf_mutex);
3770	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3771disable_ioceth:
3772	bnad_ioceth_disable(bnad);
3773	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3774	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3775	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3776	spin_lock_irqsave(&bnad->bna_lock, flags);
3777	bna_uninit(bna);
3778	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3779	bnad_mbox_irq_free(bnad);
3780	bnad_disable_msix(bnad);
3781res_free:
3782	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3783drv_uninit:
3784	/* Remove the debugfs node for this bnad */
3785	kfree(bnad->regdata);
3786	bnad_debugfs_uninit(bnad);
3787	bnad_uninit(bnad);
3788pci_uninit:
3789	bnad_pci_uninit(pdev);
3790unlock_mutex:
3791	mutex_unlock(&bnad->conf_mutex);
3792	bnad_remove_from_list(bnad);
3793	bnad_lock_uninit(bnad);
3794	free_netdev(netdev);
3795	return err;
3796}
3797
3798static void
3799bnad_pci_remove(struct pci_dev *pdev)
3800{
3801	struct net_device *netdev = pci_get_drvdata(pdev);
3802	struct bnad *bnad;
3803	struct bna *bna;
3804	unsigned long flags;
3805
3806	if (!netdev)
3807		return;
3808
3809	pr_info("%s bnad_pci_remove\n", netdev->name);
3810	bnad = netdev_priv(netdev);
3811	bna = &bnad->bna;
3812
3813	if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3814		unregister_netdev(netdev);
3815
3816	mutex_lock(&bnad->conf_mutex);
3817	bnad_ioceth_disable(bnad);
3818	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3819	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3820	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3821	spin_lock_irqsave(&bnad->bna_lock, flags);
3822	bna_uninit(bna);
3823	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3824
3825	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3826	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3827	bnad_mbox_irq_free(bnad);
3828	bnad_disable_msix(bnad);
3829	bnad_pci_uninit(pdev);
3830	mutex_unlock(&bnad->conf_mutex);
3831	bnad_remove_from_list(bnad);
3832	bnad_lock_uninit(bnad);
3833	/* Remove the debugfs node for this bnad */
3834	kfree(bnad->regdata);
3835	bnad_debugfs_uninit(bnad);
3836	bnad_uninit(bnad);
3837	free_netdev(netdev);
3838}
3839
3840static const struct pci_device_id bnad_pci_id_table[] = {
3841	{
3842		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3843			PCI_DEVICE_ID_BROCADE_CT),
3844		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3845		.class_mask =  0xffff00
3846	},
3847	{
3848		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3849			BFA_PCI_DEVICE_ID_CT2),
3850		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3851		.class_mask =  0xffff00
3852	},
3853	{0,  },
3854};
3855
3856MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3857
3858static struct pci_driver bnad_pci_driver = {
3859	.name = BNAD_NAME,
3860	.id_table = bnad_pci_id_table,
3861	.probe = bnad_pci_probe,
3862	.remove = bnad_pci_remove,
3863};
3864
3865static int __init
3866bnad_module_init(void)
3867{
3868	int err;
3869
3870	pr_info("Brocade 10G Ethernet driver - version: %s\n",
3871			BNAD_VERSION);
3872
3873	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3874
3875	err = pci_register_driver(&bnad_pci_driver);
3876	if (err < 0) {
3877		pr_err("bna : PCI registration failed in module init "
3878		       "(%d)\n", err);
3879		return err;
3880	}
3881
3882	return 0;
3883}
3884
3885static void __exit
3886bnad_module_exit(void)
3887{
3888	pci_unregister_driver(&bnad_pci_driver);
3889	release_firmware(bfi_fw);
3890}
3891
3892module_init(bnad_module_init);
3893module_exit(bnad_module_exit);
3894
3895MODULE_AUTHOR("Brocade");
3896MODULE_LICENSE("GPL");
3897MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3898MODULE_VERSION(BNAD_VERSION);
3899MODULE_FIRMWARE(CNA_FW_FILE_CT);
3900MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3901