[go: nahoru, domu]

1/********************************************************************
2 Filename:      via-ircc.c
3 Version:       1.0
4 Description:   Driver for the VIA VT8231/VT8233 IrDA chipsets
5 Author:        VIA Technologies,inc
6 Date  :	08/06/2003
7
8Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10This program is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free Software
12Foundation; either version 2, or (at your option) any later version.
13
14This program is distributed in the hope that it will be useful, but WITHOUT
15ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17See the GNU General Public License for more details.
18
19You should have received a copy of the GNU General Public License along with
20this program; if not, see <http://www.gnu.org/licenses/>.
21
22F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
23F02 Oct/28/02: Add SB device ID for 3147 and 3177.
24 Comment :
25       jul/09/2002 : only implement two kind of dongle currently.
26       Oct/02/2002 : work on VT8231 and VT8233 .
27       Aug/06/2003 : change driver format to pci driver .
28
292004-02-16: <sda@bdit.de>
30- Removed unneeded 'legacy' pci stuff.
31- Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
32- On speed change from core, don't send SIR frame with new speed.
33  Use current speed and change speeds later.
34- Make module-param dongle_id actually work.
35- New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
36  Tested with home-grown PCB on EPIA boards.
37- Code cleanup.
38
39 ********************************************************************/
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/types.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/ioport.h>
46#include <linux/delay.h>
47#include <linux/init.h>
48#include <linux/interrupt.h>
49#include <linux/rtnetlink.h>
50#include <linux/pci.h>
51#include <linux/dma-mapping.h>
52#include <linux/gfp.h>
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <linux/pm.h>
59
60#include <net/irda/wrapper.h>
61#include <net/irda/irda.h>
62#include <net/irda/irda_device.h>
63
64#include "via-ircc.h"
65
66#define VIA_MODULE_NAME "via-ircc"
67#define CHIP_IO_EXTENT 0x40
68
69static char *driver_name = VIA_MODULE_NAME;
70
71/* Module parameters */
72static int qos_mtt_bits = 0x07;	/* 1 ms or more */
73static int dongle_id = 0;	/* default: probe */
74
75/* We can't guess the type of connected dongle, user *must* supply it. */
76module_param(dongle_id, int, 0);
77
78/* Some prototypes */
79static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
80			 unsigned int id);
81static int via_ircc_dma_receive(struct via_ircc_cb *self);
82static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
83					 int iobase);
84static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
85						struct net_device *dev);
86static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
87						struct net_device *dev);
88static void via_hw_init(struct via_ircc_cb *self);
89static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
90static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
91static int via_ircc_is_receiving(struct via_ircc_cb *self);
92static int via_ircc_read_dongle_id(int iobase);
93
94static int via_ircc_net_open(struct net_device *dev);
95static int via_ircc_net_close(struct net_device *dev);
96static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
97			      int cmd);
98static void via_ircc_change_dongle_speed(int iobase, int speed,
99					 int dongle_id);
100static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
101static void hwreset(struct via_ircc_cb *self);
102static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
103static int upload_rxdata(struct via_ircc_cb *self, int iobase);
104static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
105static void via_remove_one(struct pci_dev *pdev);
106
107/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
108static void iodelay(int udelay)
109{
110	u8 data;
111	int i;
112
113	for (i = 0; i < udelay; i++) {
114		data = inb(0x80);
115	}
116}
117
118static const struct pci_device_id via_pci_tbl[] = {
119	{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
120	{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
121	{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
122	{ PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
123	{ PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
124	{ 0, }
125};
126
127MODULE_DEVICE_TABLE(pci,via_pci_tbl);
128
129
130static struct pci_driver via_driver = {
131	.name		= VIA_MODULE_NAME,
132	.id_table	= via_pci_tbl,
133	.probe		= via_init_one,
134	.remove		= via_remove_one,
135};
136
137
138/*
139 * Function via_ircc_init ()
140 *
141 *    Initialize chip. Just find out chip type and resource.
142 */
143static int __init via_ircc_init(void)
144{
145	int rc;
146
147	IRDA_DEBUG(3, "%s()\n", __func__);
148
149	rc = pci_register_driver(&via_driver);
150	if (rc < 0) {
151		IRDA_DEBUG(0, "%s(): error rc = %d, returning  -ENODEV...\n",
152			   __func__, rc);
153		return -ENODEV;
154	}
155	return 0;
156}
157
158static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
159{
160	int rc;
161        u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
162	u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
163	chipio_t info;
164
165	IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
166
167	rc = pci_enable_device (pcidev);
168	if (rc) {
169		IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
170		return -ENODEV;
171	}
172
173	// South Bridge exist
174        if ( ReadLPCReg(0x20) != 0x3C )
175		Chipset=0x3096;
176	else
177		Chipset=0x3076;
178
179	if (Chipset==0x3076) {
180		IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
181
182		WriteLPCReg(7,0x0c );
183		temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
184		if((temp&0x01)==1) {   // BIOS close or no FIR
185			WriteLPCReg(0x1d, 0x82 );
186			WriteLPCReg(0x23,0x18);
187			temp=ReadLPCReg(0xF0);
188			if((temp&0x01)==0) {
189				temp=(ReadLPCReg(0x74)&0x03);    //DMA
190				FirDRQ0=temp + 4;
191				temp=(ReadLPCReg(0x74)&0x0C) >> 2;
192				FirDRQ1=temp + 4;
193			} else {
194				temp=(ReadLPCReg(0x74)&0x0C) >> 2;    //DMA
195				FirDRQ0=temp + 4;
196				FirDRQ1=FirDRQ0;
197			}
198			FirIRQ=(ReadLPCReg(0x70)&0x0f);		//IRQ
199			FirIOBase=ReadLPCReg(0x60 ) << 8;	//IO Space :high byte
200			FirIOBase=FirIOBase| ReadLPCReg(0x61) ;	//low byte
201			FirIOBase=FirIOBase  ;
202			info.fir_base=FirIOBase;
203			info.irq=FirIRQ;
204			info.dma=FirDRQ1;
205			info.dma2=FirDRQ0;
206			pci_read_config_byte(pcidev,0x40,&bTmp);
207			pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
208			pci_read_config_byte(pcidev,0x42,&bTmp);
209			pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
210			pci_write_config_byte(pcidev,0x5a,0xc0);
211			WriteLPCReg(0x28, 0x70 );
212			rc = via_ircc_open(pcidev, &info, 0x3076);
213		} else
214			rc = -ENODEV; //IR not turn on
215	} else { //Not VT1211
216		IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
217
218		pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
219		if((bTmp&0x01)==1) {  // BIOS enable FIR
220			//Enable Double DMA clock
221			pci_read_config_byte(pcidev,0x42,&oldPCI_40);
222			pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
223			pci_read_config_byte(pcidev,0x40,&oldPCI_40);
224			pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
225			pci_read_config_byte(pcidev,0x44,&oldPCI_44);
226			pci_write_config_byte(pcidev,0x44,0x4e);
227  //---------- read configuration from Function0 of south bridge
228			if((bTmp&0x02)==0) {
229				pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
230				FirDRQ0 = (bTmp1 & 0x30) >> 4;
231				pci_read_config_byte(pcidev,0x44,&bTmp1);
232				FirDRQ1 = (bTmp1 & 0xc0) >> 6;
233			} else  {
234				pci_read_config_byte(pcidev,0x44,&bTmp1);    //DMA
235				FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
236				FirDRQ1=0;
237			}
238			pci_read_config_byte(pcidev,0x47,&bTmp1);  //IRQ
239			FirIRQ = bTmp1 & 0x0f;
240
241			pci_read_config_byte(pcidev,0x69,&bTmp);
242			FirIOBase = bTmp << 8;//hight byte
243			pci_read_config_byte(pcidev,0x68,&bTmp);
244			FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
245  //-------------------------
246			info.fir_base=FirIOBase;
247			info.irq=FirIRQ;
248			info.dma=FirDRQ1;
249			info.dma2=FirDRQ0;
250			rc = via_ircc_open(pcidev, &info, 0x3096);
251		} else
252			rc = -ENODEV; //IR not turn on !!!!!
253	}//Not VT1211
254
255	IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
256	return rc;
257}
258
259static void __exit via_ircc_cleanup(void)
260{
261	IRDA_DEBUG(3, "%s()\n", __func__);
262
263	/* Cleanup all instances of the driver */
264	pci_unregister_driver (&via_driver);
265}
266
267static const struct net_device_ops via_ircc_sir_ops = {
268	.ndo_start_xmit = via_ircc_hard_xmit_sir,
269	.ndo_open = via_ircc_net_open,
270	.ndo_stop = via_ircc_net_close,
271	.ndo_do_ioctl = via_ircc_net_ioctl,
272};
273static const struct net_device_ops via_ircc_fir_ops = {
274	.ndo_start_xmit = via_ircc_hard_xmit_fir,
275	.ndo_open = via_ircc_net_open,
276	.ndo_stop = via_ircc_net_close,
277	.ndo_do_ioctl = via_ircc_net_ioctl,
278};
279
280/*
281 * Function via_ircc_open(pdev, iobase, irq)
282 *
283 *    Open driver instance
284 *
285 */
286static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
287{
288	struct net_device *dev;
289	struct via_ircc_cb *self;
290	int err;
291
292	IRDA_DEBUG(3, "%s()\n", __func__);
293
294	/* Allocate new instance of the driver */
295	dev = alloc_irdadev(sizeof(struct via_ircc_cb));
296	if (dev == NULL)
297		return -ENOMEM;
298
299	self = netdev_priv(dev);
300	self->netdev = dev;
301	spin_lock_init(&self->lock);
302
303	pci_set_drvdata(pdev, self);
304
305	/* Initialize Resource */
306	self->io.cfg_base = info->cfg_base;
307	self->io.fir_base = info->fir_base;
308	self->io.irq = info->irq;
309	self->io.fir_ext = CHIP_IO_EXTENT;
310	self->io.dma = info->dma;
311	self->io.dma2 = info->dma2;
312	self->io.fifo_size = 32;
313	self->chip_id = id;
314	self->st_fifo.len = 0;
315	self->RxDataReady = 0;
316
317	/* Reserve the ioports that we need */
318	if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
319		IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
320			   __func__, self->io.fir_base);
321		err = -ENODEV;
322		goto err_out1;
323	}
324
325	/* Initialize QoS for this device */
326	irda_init_max_qos_capabilies(&self->qos);
327
328	/* Check if user has supplied the dongle id or not */
329	if (!dongle_id)
330		dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
331	self->io.dongle_id = dongle_id;
332
333	/* The only value we must override it the baudrate */
334	/* Maximum speeds and capabilities are dongle-dependent. */
335	switch( self->io.dongle_id ){
336	case 0x0d:
337		self->qos.baud_rate.bits =
338		    IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
339		    IR_576000 | IR_1152000 | (IR_4000000 << 8);
340		break;
341	default:
342		self->qos.baud_rate.bits =
343		    IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
344		break;
345	}
346
347	/* Following was used for testing:
348	 *
349	 *   self->qos.baud_rate.bits = IR_9600;
350	 *
351	 * Is is no good, as it prohibits (error-prone) speed-changes.
352	 */
353
354	self->qos.min_turn_time.bits = qos_mtt_bits;
355	irda_qos_bits_to_value(&self->qos);
356
357	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
358	self->rx_buff.truesize = 14384 + 2048;
359	self->tx_buff.truesize = 14384 + 2048;
360
361	/* Allocate memory if needed */
362	self->rx_buff.head =
363		dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
364				    &self->rx_buff_dma, GFP_KERNEL);
365	if (self->rx_buff.head == NULL) {
366		err = -ENOMEM;
367		goto err_out2;
368	}
369
370	self->tx_buff.head =
371		dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
372				    &self->tx_buff_dma, GFP_KERNEL);
373	if (self->tx_buff.head == NULL) {
374		err = -ENOMEM;
375		goto err_out3;
376	}
377
378	self->rx_buff.in_frame = FALSE;
379	self->rx_buff.state = OUTSIDE_FRAME;
380	self->tx_buff.data = self->tx_buff.head;
381	self->rx_buff.data = self->rx_buff.head;
382
383	/* Reset Tx queue info */
384	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
385	self->tx_fifo.tail = self->tx_buff.head;
386
387	/* Override the network functions we need to use */
388	dev->netdev_ops = &via_ircc_sir_ops;
389
390	err = register_netdev(dev);
391	if (err)
392		goto err_out4;
393
394	IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
395
396	/* Initialise the hardware..
397	*/
398	self->io.speed = 9600;
399	via_hw_init(self);
400	return 0;
401 err_out4:
402	dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
403			  self->tx_buff.head, self->tx_buff_dma);
404 err_out3:
405	dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
406			  self->rx_buff.head, self->rx_buff_dma);
407 err_out2:
408	release_region(self->io.fir_base, self->io.fir_ext);
409 err_out1:
410	free_netdev(dev);
411	return err;
412}
413
414/*
415 * Function via_remove_one(pdev)
416 *
417 *    Close driver instance
418 *
419 */
420static void via_remove_one(struct pci_dev *pdev)
421{
422	struct via_ircc_cb *self = pci_get_drvdata(pdev);
423	int iobase;
424
425	IRDA_DEBUG(3, "%s()\n", __func__);
426
427	iobase = self->io.fir_base;
428
429	ResetChip(iobase, 5);	//hardware reset.
430	/* Remove netdevice */
431	unregister_netdev(self->netdev);
432
433	/* Release the PORT that this driver is using */
434	IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
435		   __func__, self->io.fir_base);
436	release_region(self->io.fir_base, self->io.fir_ext);
437	if (self->tx_buff.head)
438		dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
439				  self->tx_buff.head, self->tx_buff_dma);
440	if (self->rx_buff.head)
441		dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
442				  self->rx_buff.head, self->rx_buff_dma);
443
444	free_netdev(self->netdev);
445
446	pci_disable_device(pdev);
447}
448
449/*
450 * Function via_hw_init(self)
451 *
452 *    Returns non-negative on success.
453 *
454 * Formerly via_ircc_setup
455 */
456static void via_hw_init(struct via_ircc_cb *self)
457{
458	int iobase = self->io.fir_base;
459
460	IRDA_DEBUG(3, "%s()\n", __func__);
461
462	SetMaxRxPacketSize(iobase, 0x0fff);	//set to max:4095
463	// FIFO Init
464	EnRXFIFOReadyInt(iobase, OFF);
465	EnRXFIFOHalfLevelInt(iobase, OFF);
466	EnTXFIFOHalfLevelInt(iobase, OFF);
467	EnTXFIFOUnderrunEOMInt(iobase, ON);
468	EnTXFIFOReadyInt(iobase, OFF);
469	InvertTX(iobase, OFF);
470	InvertRX(iobase, OFF);
471
472	if (ReadLPCReg(0x20) == 0x3c)
473		WriteLPCReg(0xF0, 0);	// for VT1211
474	/* Int Init */
475	EnRXSpecInt(iobase, ON);
476
477	/* The following is basically hwreset */
478	/* If this is the case, why not just call hwreset() ? Jean II */
479	ResetChip(iobase, 5);
480	EnableDMA(iobase, OFF);
481	EnableTX(iobase, OFF);
482	EnableRX(iobase, OFF);
483	EnRXDMA(iobase, OFF);
484	EnTXDMA(iobase, OFF);
485	RXStart(iobase, OFF);
486	TXStart(iobase, OFF);
487	InitCard(iobase);
488	CommonInit(iobase);
489	SIRFilter(iobase, ON);
490	SetSIR(iobase, ON);
491	CRC16(iobase, ON);
492	EnTXCRC(iobase, 0);
493	WriteReg(iobase, I_ST_CT_0, 0x00);
494	SetBaudRate(iobase, 9600);
495	SetPulseWidth(iobase, 12);
496	SetSendPreambleCount(iobase, 0);
497
498	self->io.speed = 9600;
499	self->st_fifo.len = 0;
500
501	via_ircc_change_dongle_speed(iobase, self->io.speed,
502				     self->io.dongle_id);
503
504	WriteReg(iobase, I_ST_CT_0, 0x80);
505}
506
507/*
508 * Function via_ircc_read_dongle_id (void)
509 *
510 */
511static int via_ircc_read_dongle_id(int iobase)
512{
513	IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
514	return 9;	/* Default to IBM */
515}
516
517/*
518 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
519 *    Change speed of the attach dongle
520 *    only implement two type of dongle currently.
521 */
522static void via_ircc_change_dongle_speed(int iobase, int speed,
523					 int dongle_id)
524{
525	u8 mode = 0;
526
527	/* speed is unused, as we use IsSIROn()/IsMIROn() */
528	speed = speed;
529
530	IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
531		   __func__, speed, iobase, dongle_id);
532
533	switch (dongle_id) {
534
535		/* Note: The dongle_id's listed here are derived from
536		 * nsc-ircc.c */
537
538	case 0x08:		/* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
539		UseOneRX(iobase, ON);	// use one RX pin   RX1,RX2
540		InvertTX(iobase, OFF);
541		InvertRX(iobase, OFF);
542
543		EnRX2(iobase, ON);	//sir to rx2
544		EnGPIOtoRX2(iobase, OFF);
545
546		if (IsSIROn(iobase)) {	//sir
547			// Mode select Off
548			SlowIRRXLowActive(iobase, ON);
549			udelay(1000);
550			SlowIRRXLowActive(iobase, OFF);
551		} else {
552			if (IsMIROn(iobase)) {	//mir
553				// Mode select On
554				SlowIRRXLowActive(iobase, OFF);
555				udelay(20);
556			} else {	// fir
557				if (IsFIROn(iobase)) {	//fir
558					// Mode select On
559					SlowIRRXLowActive(iobase, OFF);
560					udelay(20);
561				}
562			}
563		}
564		break;
565
566	case 0x09:		/* IBM31T1100 or Temic TFDS6000/TFDS6500 */
567		UseOneRX(iobase, ON);	//use ONE RX....RX1
568		InvertTX(iobase, OFF);
569		InvertRX(iobase, OFF);	// invert RX pin
570
571		EnRX2(iobase, ON);
572		EnGPIOtoRX2(iobase, OFF);
573		if (IsSIROn(iobase)) {	//sir
574			// Mode select On
575			SlowIRRXLowActive(iobase, ON);
576			udelay(20);
577			// Mode select Off
578			SlowIRRXLowActive(iobase, OFF);
579		}
580		if (IsMIROn(iobase)) {	//mir
581			// Mode select On
582			SlowIRRXLowActive(iobase, OFF);
583			udelay(20);
584			// Mode select Off
585			SlowIRRXLowActive(iobase, ON);
586		} else {	// fir
587			if (IsFIROn(iobase)) {	//fir
588				// Mode select On
589				SlowIRRXLowActive(iobase, OFF);
590				// TX On
591				WriteTX(iobase, ON);
592				udelay(20);
593				// Mode select OFF
594				SlowIRRXLowActive(iobase, ON);
595				udelay(20);
596				// TX Off
597				WriteTX(iobase, OFF);
598			}
599		}
600		break;
601
602	case 0x0d:
603		UseOneRX(iobase, OFF);	// use two RX pin   RX1,RX2
604		InvertTX(iobase, OFF);
605		InvertRX(iobase, OFF);
606		SlowIRRXLowActive(iobase, OFF);
607		if (IsSIROn(iobase)) {	//sir
608			EnGPIOtoRX2(iobase, OFF);
609			WriteGIO(iobase, OFF);
610			EnRX2(iobase, OFF);	//sir to rx2
611		} else {	// fir mir
612			EnGPIOtoRX2(iobase, OFF);
613			WriteGIO(iobase, OFF);
614			EnRX2(iobase, OFF);	//fir to rx
615		}
616		break;
617
618	case 0x11:		/* Temic TFDS4500 */
619
620		IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
621
622		UseOneRX(iobase, ON);	//use ONE RX....RX1
623		InvertTX(iobase, OFF);
624		InvertRX(iobase, ON);	// invert RX pin
625
626		EnRX2(iobase, ON);	//sir to rx2
627		EnGPIOtoRX2(iobase, OFF);
628
629		if( IsSIROn(iobase) ){	//sir
630
631			// Mode select On
632			SlowIRRXLowActive(iobase, ON);
633			udelay(20);
634			// Mode select Off
635			SlowIRRXLowActive(iobase, OFF);
636
637		} else{
638			IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
639		}
640		break;
641
642	case 0x0ff:		/* Vishay */
643		if (IsSIROn(iobase))
644			mode = 0;
645		else if (IsMIROn(iobase))
646			mode = 1;
647		else if (IsFIROn(iobase))
648			mode = 2;
649		else if (IsVFIROn(iobase))
650			mode = 5;	//VFIR-16
651		SI_SetMode(iobase, mode);
652		break;
653
654	default:
655		IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
656			   __func__, dongle_id);
657	}
658}
659
660/*
661 * Function via_ircc_change_speed (self, baud)
662 *
663 *    Change the speed of the device
664 *
665 */
666static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
667{
668	struct net_device *dev = self->netdev;
669	u16 iobase;
670	u8 value = 0, bTmp;
671
672	iobase = self->io.fir_base;
673	/* Update accounting for new speed */
674	self->io.speed = speed;
675	IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
676
677	WriteReg(iobase, I_ST_CT_0, 0x0);
678
679	/* Controller mode sellection */
680	switch (speed) {
681	case 2400:
682	case 9600:
683	case 19200:
684	case 38400:
685	case 57600:
686	case 115200:
687		value = (115200/speed)-1;
688		SetSIR(iobase, ON);
689		CRC16(iobase, ON);
690		break;
691	case 576000:
692		/* FIXME: this can't be right, as it's the same as 115200,
693		 * and 576000 is MIR, not SIR. */
694		value = 0;
695		SetSIR(iobase, ON);
696		CRC16(iobase, ON);
697		break;
698	case 1152000:
699		value = 0;
700		SetMIR(iobase, ON);
701		/* FIXME: CRC ??? */
702		break;
703	case 4000000:
704		value = 0;
705		SetFIR(iobase, ON);
706		SetPulseWidth(iobase, 0);
707		SetSendPreambleCount(iobase, 14);
708		CRC16(iobase, OFF);
709		EnTXCRC(iobase, ON);
710		break;
711	case 16000000:
712		value = 0;
713		SetVFIR(iobase, ON);
714		/* FIXME: CRC ??? */
715		break;
716	default:
717		value = 0;
718		break;
719	}
720
721	/* Set baudrate to 0x19[2..7] */
722	bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
723	bTmp |= value << 2;
724	WriteReg(iobase, I_CF_H_1, bTmp);
725
726	/* Some dongles may need to be informed about speed changes. */
727	via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
728
729	/* Set FIFO size to 64 */
730	SetFIFO(iobase, 64);
731
732	/* Enable IR */
733	WriteReg(iobase, I_ST_CT_0, 0x80);
734
735	// EnTXFIFOHalfLevelInt(iobase,ON);
736
737	/* Enable some interrupts so we can receive frames */
738	//EnAllInt(iobase,ON);
739
740	if (IsSIROn(iobase)) {
741		SIRFilter(iobase, ON);
742		SIRRecvAny(iobase, ON);
743	} else {
744		SIRFilter(iobase, OFF);
745		SIRRecvAny(iobase, OFF);
746	}
747
748	if (speed > 115200) {
749		/* Install FIR xmit handler */
750		dev->netdev_ops = &via_ircc_fir_ops;
751		via_ircc_dma_receive(self);
752	} else {
753		/* Install SIR xmit handler */
754		dev->netdev_ops = &via_ircc_sir_ops;
755	}
756	netif_wake_queue(dev);
757}
758
759/*
760 * Function via_ircc_hard_xmit (skb, dev)
761 *
762 *    Transmit the frame!
763 *
764 */
765static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
766						struct net_device *dev)
767{
768	struct via_ircc_cb *self;
769	unsigned long flags;
770	u16 iobase;
771	__u32 speed;
772
773	self = netdev_priv(dev);
774	IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
775	iobase = self->io.fir_base;
776
777	netif_stop_queue(dev);
778	/* Check if we need to change the speed */
779	speed = irda_get_next_speed(skb);
780	if ((speed != self->io.speed) && (speed != -1)) {
781		/* Check for empty frame */
782		if (!skb->len) {
783			via_ircc_change_speed(self, speed);
784			dev->trans_start = jiffies;
785			dev_kfree_skb(skb);
786			return NETDEV_TX_OK;
787		} else
788			self->new_speed = speed;
789	}
790	InitCard(iobase);
791	CommonInit(iobase);
792	SIRFilter(iobase, ON);
793	SetSIR(iobase, ON);
794	CRC16(iobase, ON);
795	EnTXCRC(iobase, 0);
796	WriteReg(iobase, I_ST_CT_0, 0x00);
797
798	spin_lock_irqsave(&self->lock, flags);
799	self->tx_buff.data = self->tx_buff.head;
800	self->tx_buff.len =
801	    async_wrap_skb(skb, self->tx_buff.data,
802			   self->tx_buff.truesize);
803
804	dev->stats.tx_bytes += self->tx_buff.len;
805	/* Send this frame with old speed */
806	SetBaudRate(iobase, self->io.speed);
807	SetPulseWidth(iobase, 12);
808	SetSendPreambleCount(iobase, 0);
809	WriteReg(iobase, I_ST_CT_0, 0x80);
810
811	EnableTX(iobase, ON);
812	EnableRX(iobase, OFF);
813
814	ResetChip(iobase, 0);
815	ResetChip(iobase, 1);
816	ResetChip(iobase, 2);
817	ResetChip(iobase, 3);
818	ResetChip(iobase, 4);
819
820	EnAllInt(iobase, ON);
821	EnTXDMA(iobase, ON);
822	EnRXDMA(iobase, OFF);
823
824	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
825		       DMA_TX_MODE);
826
827	SetSendByte(iobase, self->tx_buff.len);
828	RXStart(iobase, OFF);
829	TXStart(iobase, ON);
830
831	dev->trans_start = jiffies;
832	spin_unlock_irqrestore(&self->lock, flags);
833	dev_kfree_skb(skb);
834	return NETDEV_TX_OK;
835}
836
837static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
838						struct net_device *dev)
839{
840	struct via_ircc_cb *self;
841	u16 iobase;
842	__u32 speed;
843	unsigned long flags;
844
845	self = netdev_priv(dev);
846	iobase = self->io.fir_base;
847
848	if (self->st_fifo.len)
849		return NETDEV_TX_OK;
850	if (self->chip_id == 0x3076)
851		iodelay(1500);
852	else
853		udelay(1500);
854	netif_stop_queue(dev);
855	speed = irda_get_next_speed(skb);
856	if ((speed != self->io.speed) && (speed != -1)) {
857		if (!skb->len) {
858			via_ircc_change_speed(self, speed);
859			dev->trans_start = jiffies;
860			dev_kfree_skb(skb);
861			return NETDEV_TX_OK;
862		} else
863			self->new_speed = speed;
864	}
865	spin_lock_irqsave(&self->lock, flags);
866	self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
867	self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
868
869	self->tx_fifo.tail += skb->len;
870	dev->stats.tx_bytes += skb->len;
871	skb_copy_from_linear_data(skb,
872		      self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
873	self->tx_fifo.len++;
874	self->tx_fifo.free++;
875//F01   if (self->tx_fifo.len == 1) {
876	via_ircc_dma_xmit(self, iobase);
877//F01   }
878//F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
879	dev->trans_start = jiffies;
880	dev_kfree_skb(skb);
881	spin_unlock_irqrestore(&self->lock, flags);
882	return NETDEV_TX_OK;
883
884}
885
886static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
887{
888	EnTXDMA(iobase, OFF);
889	self->io.direction = IO_XMIT;
890	EnPhys(iobase, ON);
891	EnableTX(iobase, ON);
892	EnableRX(iobase, OFF);
893	ResetChip(iobase, 0);
894	ResetChip(iobase, 1);
895	ResetChip(iobase, 2);
896	ResetChip(iobase, 3);
897	ResetChip(iobase, 4);
898	EnAllInt(iobase, ON);
899	EnTXDMA(iobase, ON);
900	EnRXDMA(iobase, OFF);
901	irda_setup_dma(self->io.dma,
902		       ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
903			self->tx_buff.head) + self->tx_buff_dma,
904		       self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
905	IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
906		   __func__, self->tx_fifo.ptr,
907		   self->tx_fifo.queue[self->tx_fifo.ptr].len,
908		   self->tx_fifo.len);
909
910	SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
911	RXStart(iobase, OFF);
912	TXStart(iobase, ON);
913	return 0;
914
915}
916
917/*
918 * Function via_ircc_dma_xmit_complete (self)
919 *
920 *    The transfer of a frame in finished. This function will only be called
921 *    by the interrupt handler
922 *
923 */
924static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
925{
926	int iobase;
927	u8 Tx_status;
928
929	IRDA_DEBUG(3, "%s()\n", __func__);
930
931	iobase = self->io.fir_base;
932	/* Disable DMA */
933//      DisableDmaChannel(self->io.dma);
934	/* Check for underrun! */
935	/* Clear bit, by writing 1 into it */
936	Tx_status = GetTXStatus(iobase);
937	if (Tx_status & 0x08) {
938		self->netdev->stats.tx_errors++;
939		self->netdev->stats.tx_fifo_errors++;
940		hwreset(self);
941	/* how to clear underrun? */
942	} else {
943		self->netdev->stats.tx_packets++;
944		ResetChip(iobase, 3);
945		ResetChip(iobase, 4);
946	}
947	/* Check if we need to change the speed */
948	if (self->new_speed) {
949		via_ircc_change_speed(self, self->new_speed);
950		self->new_speed = 0;
951	}
952
953	/* Finished with this frame, so prepare for next */
954	if (IsFIROn(iobase)) {
955		if (self->tx_fifo.len) {
956			self->tx_fifo.len--;
957			self->tx_fifo.ptr++;
958		}
959	}
960	IRDA_DEBUG(1,
961		   "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
962		   __func__,
963		   self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
964/* F01_S
965	// Any frames to be sent back-to-back?
966	if (self->tx_fifo.len) {
967		// Not finished yet!
968	  	via_ircc_dma_xmit(self, iobase);
969		ret = FALSE;
970	} else {
971F01_E*/
972	// Reset Tx FIFO info
973	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
974	self->tx_fifo.tail = self->tx_buff.head;
975//F01   }
976
977	// Make sure we have room for more frames
978//F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
979	// Not busy transmitting anymore
980	// Tell the network layer, that we can accept more frames
981	netif_wake_queue(self->netdev);
982//F01   }
983	return TRUE;
984}
985
986/*
987 * Function via_ircc_dma_receive (self)
988 *
989 *    Set configuration for receive a frame.
990 *
991 */
992static int via_ircc_dma_receive(struct via_ircc_cb *self)
993{
994	int iobase;
995
996	iobase = self->io.fir_base;
997
998	IRDA_DEBUG(3, "%s()\n", __func__);
999
1000	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1001	self->tx_fifo.tail = self->tx_buff.head;
1002	self->RxDataReady = 0;
1003	self->io.direction = IO_RECV;
1004	self->rx_buff.data = self->rx_buff.head;
1005	self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1006	self->st_fifo.tail = self->st_fifo.head = 0;
1007
1008	EnPhys(iobase, ON);
1009	EnableTX(iobase, OFF);
1010	EnableRX(iobase, ON);
1011
1012	ResetChip(iobase, 0);
1013	ResetChip(iobase, 1);
1014	ResetChip(iobase, 2);
1015	ResetChip(iobase, 3);
1016	ResetChip(iobase, 4);
1017
1018	EnAllInt(iobase, ON);
1019	EnTXDMA(iobase, OFF);
1020	EnRXDMA(iobase, ON);
1021	irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1022		  self->rx_buff.truesize, DMA_RX_MODE);
1023	TXStart(iobase, OFF);
1024	RXStart(iobase, ON);
1025
1026	return 0;
1027}
1028
1029/*
1030 * Function via_ircc_dma_receive_complete (self)
1031 *
1032 *    Controller Finished with receiving frames,
1033 *    and this routine is call by ISR
1034 *
1035 */
1036static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1037					 int iobase)
1038{
1039	struct st_fifo *st_fifo;
1040	struct sk_buff *skb;
1041	int len, i;
1042	u8 status = 0;
1043
1044	iobase = self->io.fir_base;
1045	st_fifo = &self->st_fifo;
1046
1047	if (self->io.speed < 4000000) {	//Speed below FIR
1048		len = GetRecvByte(iobase, self);
1049		skb = dev_alloc_skb(len + 1);
1050		if (skb == NULL)
1051			return FALSE;
1052		// Make sure IP header gets aligned
1053		skb_reserve(skb, 1);
1054		skb_put(skb, len - 2);
1055		if (self->chip_id == 0x3076) {
1056			for (i = 0; i < len - 2; i++)
1057				skb->data[i] = self->rx_buff.data[i * 2];
1058		} else {
1059			if (self->chip_id == 0x3096) {
1060				for (i = 0; i < len - 2; i++)
1061					skb->data[i] =
1062					    self->rx_buff.data[i];
1063			}
1064		}
1065		// Move to next frame
1066		self->rx_buff.data += len;
1067		self->netdev->stats.rx_bytes += len;
1068		self->netdev->stats.rx_packets++;
1069		skb->dev = self->netdev;
1070		skb_reset_mac_header(skb);
1071		skb->protocol = htons(ETH_P_IRDA);
1072		netif_rx(skb);
1073		return TRUE;
1074	}
1075
1076	else {			//FIR mode
1077		len = GetRecvByte(iobase, self);
1078		if (len == 0)
1079			return TRUE;	//interrupt only, data maybe move by RxT
1080		if (((len - 4) < 2) || ((len - 4) > 2048)) {
1081			IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1082				   __func__, len, RxCurCount(iobase, self),
1083				   self->RxLastCount);
1084			hwreset(self);
1085			return FALSE;
1086		}
1087		IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1088			   __func__,
1089			   st_fifo->len, len - 4, RxCurCount(iobase, self));
1090
1091		st_fifo->entries[st_fifo->tail].status = status;
1092		st_fifo->entries[st_fifo->tail].len = len;
1093		st_fifo->pending_bytes += len;
1094		st_fifo->tail++;
1095		st_fifo->len++;
1096		if (st_fifo->tail > MAX_RX_WINDOW)
1097			st_fifo->tail = 0;
1098		self->RxDataReady = 0;
1099
1100		// It maybe have MAX_RX_WINDOW package receive by
1101		// receive_complete before Timer IRQ
1102/* F01_S
1103          if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
1104		  RXStart(iobase,ON);
1105	  	  SetTimer(iobase,4);
1106	  }
1107	  else	  {
1108F01_E */
1109		EnableRX(iobase, OFF);
1110		EnRXDMA(iobase, OFF);
1111		RXStart(iobase, OFF);
1112//F01_S
1113		// Put this entry back in fifo
1114		if (st_fifo->head > MAX_RX_WINDOW)
1115			st_fifo->head = 0;
1116		status = st_fifo->entries[st_fifo->head].status;
1117		len = st_fifo->entries[st_fifo->head].len;
1118		st_fifo->head++;
1119		st_fifo->len--;
1120
1121		skb = dev_alloc_skb(len + 1 - 4);
1122		/*
1123		 * if frame size, data ptr, or skb ptr are wrong, then get next
1124		 * entry.
1125		 */
1126		if ((skb == NULL) || (skb->data == NULL) ||
1127		    (self->rx_buff.data == NULL) || (len < 6)) {
1128			self->netdev->stats.rx_dropped++;
1129			kfree_skb(skb);
1130			return TRUE;
1131		}
1132		skb_reserve(skb, 1);
1133		skb_put(skb, len - 4);
1134
1135		skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1136		IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1137			   len - 4, self->rx_buff.data);
1138
1139		// Move to next frame
1140		self->rx_buff.data += len;
1141		self->netdev->stats.rx_bytes += len;
1142		self->netdev->stats.rx_packets++;
1143		skb->dev = self->netdev;
1144		skb_reset_mac_header(skb);
1145		skb->protocol = htons(ETH_P_IRDA);
1146		netif_rx(skb);
1147
1148//F01_E
1149	}			//FIR
1150	return TRUE;
1151
1152}
1153
1154/*
1155 * if frame is received , but no INT ,then use this routine to upload frame.
1156 */
1157static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1158{
1159	struct sk_buff *skb;
1160	int len;
1161	struct st_fifo *st_fifo;
1162	st_fifo = &self->st_fifo;
1163
1164	len = GetRecvByte(iobase, self);
1165
1166	IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1167
1168	if ((len - 4) < 2) {
1169		self->netdev->stats.rx_dropped++;
1170		return FALSE;
1171	}
1172
1173	skb = dev_alloc_skb(len + 1);
1174	if (skb == NULL) {
1175		self->netdev->stats.rx_dropped++;
1176		return FALSE;
1177	}
1178	skb_reserve(skb, 1);
1179	skb_put(skb, len - 4 + 1);
1180	skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1181	st_fifo->tail++;
1182	st_fifo->len++;
1183	if (st_fifo->tail > MAX_RX_WINDOW)
1184		st_fifo->tail = 0;
1185	// Move to next frame
1186	self->rx_buff.data += len;
1187	self->netdev->stats.rx_bytes += len;
1188	self->netdev->stats.rx_packets++;
1189	skb->dev = self->netdev;
1190	skb_reset_mac_header(skb);
1191	skb->protocol = htons(ETH_P_IRDA);
1192	netif_rx(skb);
1193	if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1194		RXStart(iobase, ON);
1195	} else {
1196		EnableRX(iobase, OFF);
1197		EnRXDMA(iobase, OFF);
1198		RXStart(iobase, OFF);
1199	}
1200	return TRUE;
1201}
1202
1203/*
1204 * Implement back to back receive , use this routine to upload data.
1205 */
1206
1207static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1208{
1209	struct st_fifo *st_fifo;
1210	struct sk_buff *skb;
1211	int len;
1212	u8 status;
1213
1214	st_fifo = &self->st_fifo;
1215
1216	if (CkRxRecv(iobase, self)) {
1217		// if still receiving ,then return ,don't upload frame
1218		self->RetryCount = 0;
1219		SetTimer(iobase, 20);
1220		self->RxDataReady++;
1221		return FALSE;
1222	} else
1223		self->RetryCount++;
1224
1225	if ((self->RetryCount >= 1) ||
1226	    ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1227	    (st_fifo->len >= (MAX_RX_WINDOW))) {
1228		while (st_fifo->len > 0) {	//upload frame
1229			// Put this entry back in fifo
1230			if (st_fifo->head > MAX_RX_WINDOW)
1231				st_fifo->head = 0;
1232			status = st_fifo->entries[st_fifo->head].status;
1233			len = st_fifo->entries[st_fifo->head].len;
1234			st_fifo->head++;
1235			st_fifo->len--;
1236
1237			skb = dev_alloc_skb(len + 1 - 4);
1238			/*
1239			 * if frame size, data ptr, or skb ptr are wrong,
1240			 * then get next entry.
1241			 */
1242			if ((skb == NULL) || (skb->data == NULL) ||
1243			    (self->rx_buff.data == NULL) || (len < 6)) {
1244				self->netdev->stats.rx_dropped++;
1245				continue;
1246			}
1247			skb_reserve(skb, 1);
1248			skb_put(skb, len - 4);
1249			skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1250
1251			IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1252				   len - 4, st_fifo->head);
1253
1254			// Move to next frame
1255			self->rx_buff.data += len;
1256			self->netdev->stats.rx_bytes += len;
1257			self->netdev->stats.rx_packets++;
1258			skb->dev = self->netdev;
1259			skb_reset_mac_header(skb);
1260			skb->protocol = htons(ETH_P_IRDA);
1261			netif_rx(skb);
1262		}		//while
1263		self->RetryCount = 0;
1264
1265		IRDA_DEBUG(2,
1266			   "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1267			   __func__,
1268			   GetHostStatus(iobase), GetRXStatus(iobase));
1269
1270		/*
1271		 * if frame is receive complete at this routine ,then upload
1272		 * frame.
1273		 */
1274		if ((GetRXStatus(iobase) & 0x10) &&
1275		    (RxCurCount(iobase, self) != self->RxLastCount)) {
1276			upload_rxdata(self, iobase);
1277			if (irda_device_txqueue_empty(self->netdev))
1278				via_ircc_dma_receive(self);
1279		}
1280	}			// timer detect complete
1281	else
1282		SetTimer(iobase, 4);
1283	return TRUE;
1284
1285}
1286
1287
1288
1289/*
1290 * Function via_ircc_interrupt (irq, dev_id)
1291 *
1292 *    An interrupt from the chip has arrived. Time to do some work
1293 *
1294 */
1295static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1296{
1297	struct net_device *dev = dev_id;
1298	struct via_ircc_cb *self = netdev_priv(dev);
1299	int iobase;
1300	u8 iHostIntType, iRxIntType, iTxIntType;
1301
1302	iobase = self->io.fir_base;
1303	spin_lock(&self->lock);
1304	iHostIntType = GetHostStatus(iobase);
1305
1306	IRDA_DEBUG(4, "%s(): iHostIntType %02x:  %s %s %s  %02x\n",
1307		   __func__, iHostIntType,
1308		   (iHostIntType & 0x40) ? "Timer" : "",
1309		   (iHostIntType & 0x20) ? "Tx" : "",
1310		   (iHostIntType & 0x10) ? "Rx" : "",
1311		   (iHostIntType & 0x0e) >> 1);
1312
1313	if ((iHostIntType & 0x40) != 0) {	//Timer Event
1314		self->EventFlag.TimeOut++;
1315		ClearTimerInt(iobase, 1);
1316		if (self->io.direction == IO_XMIT) {
1317			via_ircc_dma_xmit(self, iobase);
1318		}
1319		if (self->io.direction == IO_RECV) {
1320			/*
1321			 * frame ready hold too long, must reset.
1322			 */
1323			if (self->RxDataReady > 30) {
1324				hwreset(self);
1325				if (irda_device_txqueue_empty(self->netdev)) {
1326					via_ircc_dma_receive(self);
1327				}
1328			} else {	// call this to upload frame.
1329				RxTimerHandler(self, iobase);
1330			}
1331		}		//RECV
1332	}			//Timer Event
1333	if ((iHostIntType & 0x20) != 0) {	//Tx Event
1334		iTxIntType = GetTXStatus(iobase);
1335
1336		IRDA_DEBUG(4, "%s(): iTxIntType %02x:  %s %s %s %s\n",
1337			   __func__, iTxIntType,
1338			   (iTxIntType & 0x08) ? "FIFO underr." : "",
1339			   (iTxIntType & 0x04) ? "EOM" : "",
1340			   (iTxIntType & 0x02) ? "FIFO ready" : "",
1341			   (iTxIntType & 0x01) ? "Early EOM" : "");
1342
1343		if (iTxIntType & 0x4) {
1344			self->EventFlag.EOMessage++;	// read and will auto clean
1345			if (via_ircc_dma_xmit_complete(self)) {
1346				if (irda_device_txqueue_empty
1347				    (self->netdev)) {
1348					via_ircc_dma_receive(self);
1349				}
1350			} else {
1351				self->EventFlag.Unknown++;
1352			}
1353		}		//EOP
1354	}			//Tx Event
1355	//----------------------------------------
1356	if ((iHostIntType & 0x10) != 0) {	//Rx Event
1357		/* Check if DMA has finished */
1358		iRxIntType = GetRXStatus(iobase);
1359
1360		IRDA_DEBUG(4, "%s(): iRxIntType %02x:  %s %s %s %s %s %s %s\n",
1361			   __func__, iRxIntType,
1362			   (iRxIntType & 0x80) ? "PHY err."	: "",
1363			   (iRxIntType & 0x40) ? "CRC err"	: "",
1364			   (iRxIntType & 0x20) ? "FIFO overr."	: "",
1365			   (iRxIntType & 0x10) ? "EOF"		: "",
1366			   (iRxIntType & 0x08) ? "RxData"	: "",
1367			   (iRxIntType & 0x02) ? "RxMaxLen"	: "",
1368			   (iRxIntType & 0x01) ? "SIR bad"	: "");
1369		if (!iRxIntType)
1370			IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1371
1372		if (iRxIntType & 0x10) {
1373			if (via_ircc_dma_receive_complete(self, iobase)) {
1374//F01       if(!(IsFIROn(iobase)))  via_ircc_dma_receive(self);
1375				via_ircc_dma_receive(self);
1376			}
1377		}		// No ERR
1378		else {		//ERR
1379			IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1380				   __func__, iRxIntType, iHostIntType,
1381				   RxCurCount(iobase, self),
1382				   self->RxLastCount);
1383
1384			if (iRxIntType & 0x20) {	//FIFO OverRun ERR
1385				ResetChip(iobase, 0);
1386				ResetChip(iobase, 1);
1387			} else {	//PHY,CRC ERR
1388
1389				if (iRxIntType != 0x08)
1390					hwreset(self);	//F01
1391			}
1392			via_ircc_dma_receive(self);
1393		}		//ERR
1394
1395	}			//Rx Event
1396	spin_unlock(&self->lock);
1397	return IRQ_RETVAL(iHostIntType);
1398}
1399
1400static void hwreset(struct via_ircc_cb *self)
1401{
1402	int iobase;
1403	iobase = self->io.fir_base;
1404
1405	IRDA_DEBUG(3, "%s()\n", __func__);
1406
1407	ResetChip(iobase, 5);
1408	EnableDMA(iobase, OFF);
1409	EnableTX(iobase, OFF);
1410	EnableRX(iobase, OFF);
1411	EnRXDMA(iobase, OFF);
1412	EnTXDMA(iobase, OFF);
1413	RXStart(iobase, OFF);
1414	TXStart(iobase, OFF);
1415	InitCard(iobase);
1416	CommonInit(iobase);
1417	SIRFilter(iobase, ON);
1418	SetSIR(iobase, ON);
1419	CRC16(iobase, ON);
1420	EnTXCRC(iobase, 0);
1421	WriteReg(iobase, I_ST_CT_0, 0x00);
1422	SetBaudRate(iobase, 9600);
1423	SetPulseWidth(iobase, 12);
1424	SetSendPreambleCount(iobase, 0);
1425	WriteReg(iobase, I_ST_CT_0, 0x80);
1426
1427	/* Restore speed. */
1428	via_ircc_change_speed(self, self->io.speed);
1429
1430	self->st_fifo.len = 0;
1431}
1432
1433/*
1434 * Function via_ircc_is_receiving (self)
1435 *
1436 *    Return TRUE is we are currently receiving a frame
1437 *
1438 */
1439static int via_ircc_is_receiving(struct via_ircc_cb *self)
1440{
1441	int status = FALSE;
1442	int iobase;
1443
1444	IRDA_ASSERT(self != NULL, return FALSE;);
1445
1446	iobase = self->io.fir_base;
1447	if (CkRxRecv(iobase, self))
1448		status = TRUE;
1449
1450	IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1451
1452	return status;
1453}
1454
1455
1456/*
1457 * Function via_ircc_net_open (dev)
1458 *
1459 *    Start the device
1460 *
1461 */
1462static int via_ircc_net_open(struct net_device *dev)
1463{
1464	struct via_ircc_cb *self;
1465	int iobase;
1466	char hwname[32];
1467
1468	IRDA_DEBUG(3, "%s()\n", __func__);
1469
1470	IRDA_ASSERT(dev != NULL, return -1;);
1471	self = netdev_priv(dev);
1472	dev->stats.rx_packets = 0;
1473	IRDA_ASSERT(self != NULL, return 0;);
1474	iobase = self->io.fir_base;
1475	if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1476		IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1477			     self->io.irq);
1478		return -EAGAIN;
1479	}
1480	/*
1481	 * Always allocate the DMA channel after the IRQ, and clean up on
1482	 * failure.
1483	 */
1484	if (request_dma(self->io.dma, dev->name)) {
1485		IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1486			     self->io.dma);
1487		free_irq(self->io.irq, dev);
1488		return -EAGAIN;
1489	}
1490	if (self->io.dma2 != self->io.dma) {
1491		if (request_dma(self->io.dma2, dev->name)) {
1492			IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1493				     driver_name, self->io.dma2);
1494			free_irq(self->io.irq, dev);
1495			free_dma(self->io.dma);
1496			return -EAGAIN;
1497		}
1498	}
1499
1500
1501	/* turn on interrupts */
1502	EnAllInt(iobase, ON);
1503	EnInternalLoop(iobase, OFF);
1504	EnExternalLoop(iobase, OFF);
1505
1506	/* */
1507	via_ircc_dma_receive(self);
1508
1509	/* Ready to play! */
1510	netif_start_queue(dev);
1511
1512	/*
1513	 * Open new IrLAP layer instance, now that everything should be
1514	 * initialized properly
1515	 */
1516	sprintf(hwname, "VIA @ 0x%x", iobase);
1517	self->irlap = irlap_open(dev, &self->qos, hwname);
1518
1519	self->RxLastCount = 0;
1520
1521	return 0;
1522}
1523
1524/*
1525 * Function via_ircc_net_close (dev)
1526 *
1527 *    Stop the device
1528 *
1529 */
1530static int via_ircc_net_close(struct net_device *dev)
1531{
1532	struct via_ircc_cb *self;
1533	int iobase;
1534
1535	IRDA_DEBUG(3, "%s()\n", __func__);
1536
1537	IRDA_ASSERT(dev != NULL, return -1;);
1538	self = netdev_priv(dev);
1539	IRDA_ASSERT(self != NULL, return 0;);
1540
1541	/* Stop device */
1542	netif_stop_queue(dev);
1543	/* Stop and remove instance of IrLAP */
1544	if (self->irlap)
1545		irlap_close(self->irlap);
1546	self->irlap = NULL;
1547	iobase = self->io.fir_base;
1548	EnTXDMA(iobase, OFF);
1549	EnRXDMA(iobase, OFF);
1550	DisableDmaChannel(self->io.dma);
1551
1552	/* Disable interrupts */
1553	EnAllInt(iobase, OFF);
1554	free_irq(self->io.irq, dev);
1555	free_dma(self->io.dma);
1556	if (self->io.dma2 != self->io.dma)
1557		free_dma(self->io.dma2);
1558
1559	return 0;
1560}
1561
1562/*
1563 * Function via_ircc_net_ioctl (dev, rq, cmd)
1564 *
1565 *    Process IOCTL commands for this device
1566 *
1567 */
1568static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1569			      int cmd)
1570{
1571	struct if_irda_req *irq = (struct if_irda_req *) rq;
1572	struct via_ircc_cb *self;
1573	unsigned long flags;
1574	int ret = 0;
1575
1576	IRDA_ASSERT(dev != NULL, return -1;);
1577	self = netdev_priv(dev);
1578	IRDA_ASSERT(self != NULL, return -1;);
1579	IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1580		   cmd);
1581	/* Disable interrupts & save flags */
1582	spin_lock_irqsave(&self->lock, flags);
1583	switch (cmd) {
1584	case SIOCSBANDWIDTH:	/* Set bandwidth */
1585		if (!capable(CAP_NET_ADMIN)) {
1586			ret = -EPERM;
1587			goto out;
1588		}
1589		via_ircc_change_speed(self, irq->ifr_baudrate);
1590		break;
1591	case SIOCSMEDIABUSY:	/* Set media busy */
1592		if (!capable(CAP_NET_ADMIN)) {
1593			ret = -EPERM;
1594			goto out;
1595		}
1596		irda_device_set_media_busy(self->netdev, TRUE);
1597		break;
1598	case SIOCGRECEIVING:	/* Check if we are receiving right now */
1599		irq->ifr_receiving = via_ircc_is_receiving(self);
1600		break;
1601	default:
1602		ret = -EOPNOTSUPP;
1603	}
1604      out:
1605	spin_unlock_irqrestore(&self->lock, flags);
1606	return ret;
1607}
1608
1609MODULE_AUTHOR("VIA Technologies,inc");
1610MODULE_DESCRIPTION("VIA IrDA Device Driver");
1611MODULE_LICENSE("GPL");
1612
1613module_init(via_ircc_init);
1614module_exit(via_ircc_cleanup);
1615