[go: nahoru, domu]

Lines Matching refs:self

81 static int via_ircc_dma_receive(struct via_ircc_cb *self);
82 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
88 static void via_hw_init(struct via_ircc_cb *self);
89 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
91 static int via_ircc_is_receiving(struct via_ircc_cb *self);
100 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
101 static void hwreset(struct via_ircc_cb *self);
102 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
103 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
289 struct via_ircc_cb *self;
299 self = netdev_priv(dev);
300 self->netdev = dev;
301 spin_lock_init(&self->lock);
303 pci_set_drvdata(pdev, self);
306 self->io.cfg_base = info->cfg_base;
307 self->io.fir_base = info->fir_base;
308 self->io.irq = info->irq;
309 self->io.fir_ext = CHIP_IO_EXTENT;
310 self->io.dma = info->dma;
311 self->io.dma2 = info->dma2;
312 self->io.fifo_size = 32;
313 self->chip_id = id;
314 self->st_fifo.len = 0;
315 self->RxDataReady = 0;
318 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
320 __func__, self->io.fir_base);
326 irda_init_max_qos_capabilies(&self->qos);
330 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
331 self->io.dongle_id = dongle_id;
335 switch( self->io.dongle_id ){
337 self->qos.baud_rate.bits =
342 self->qos.baud_rate.bits =
349 * self->qos.baud_rate.bits = IR_9600;
354 self->qos.min_turn_time.bits = qos_mtt_bits;
355 irda_qos_bits_to_value(&self->qos);
358 self->rx_buff.truesize = 14384 + 2048;
359 self->tx_buff.truesize = 14384 + 2048;
362 self->rx_buff.head =
363 dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
364 &self->rx_buff_dma, GFP_KERNEL);
365 if (self->rx_buff.head == NULL) {
370 self->tx_buff.head =
371 dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
372 &self->tx_buff_dma, GFP_KERNEL);
373 if (self->tx_buff.head == NULL) {
378 self->rx_buff.in_frame = FALSE;
379 self->rx_buff.state = OUTSIDE_FRAME;
380 self->tx_buff.data = self->tx_buff.head;
381 self->rx_buff.data = self->rx_buff.head;
384 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
385 self->tx_fifo.tail = self->tx_buff.head;
398 self->io.speed = 9600;
399 via_hw_init(self);
402 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
403 self->tx_buff.head, self->tx_buff_dma);
405 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
406 self->rx_buff.head, self->rx_buff_dma);
408 release_region(self->io.fir_base, self->io.fir_ext);
422 struct via_ircc_cb *self = pci_get_drvdata(pdev);
427 iobase = self->io.fir_base;
431 unregister_netdev(self->netdev);
435 __func__, self->io.fir_base);
436 release_region(self->io.fir_base, self->io.fir_ext);
437 if (self->tx_buff.head)
438 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
439 self->tx_buff.head, self->tx_buff_dma);
440 if (self->rx_buff.head)
441 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
442 self->rx_buff.head, self->rx_buff_dma);
444 free_netdev(self->netdev);
450 * Function via_hw_init(self)
456 static void via_hw_init(struct via_ircc_cb *self)
458 int iobase = self->io.fir_base;
498 self->io.speed = 9600;
499 self->st_fifo.len = 0;
501 via_ircc_change_dongle_speed(iobase, self->io.speed,
502 self->io.dongle_id);
661 * Function via_ircc_change_speed (self, baud)
666 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
668 struct net_device *dev = self->netdev;
672 iobase = self->io.fir_base;
674 self->io.speed = speed;
727 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
751 via_ircc_dma_receive(self);
768 struct via_ircc_cb *self;
773 self = netdev_priv(dev);
774 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
775 iobase = self->io.fir_base;
780 if ((speed != self->io.speed) && (speed != -1)) {
783 via_ircc_change_speed(self, speed);
788 self->new_speed = speed;
798 spin_lock_irqsave(&self->lock, flags);
799 self->tx_buff.data = self->tx_buff.head;
800 self->tx_buff.len =
801 async_wrap_skb(skb, self->tx_buff.data,
802 self->tx_buff.truesize);
804 dev->stats.tx_bytes += self->tx_buff.len;
806 SetBaudRate(iobase, self->io.speed);
824 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
827 SetSendByte(iobase, self->tx_buff.len);
832 spin_unlock_irqrestore(&self->lock, flags);
840 struct via_ircc_cb *self;
845 self = netdev_priv(dev);
846 iobase = self->io.fir_base;
848 if (self->st_fifo.len)
850 if (self->chip_id == 0x3076)
856 if ((speed != self->io.speed) && (speed != -1)) {
858 via_ircc_change_speed(self, speed);
863 self->new_speed = speed;
865 spin_lock_irqsave(&self->lock, flags);
866 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
867 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
869 self->tx_fifo.tail += skb->len;
872 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
873 self->tx_fifo.len++;
874 self->tx_fifo.free++;
875 //F01 if (self->tx_fifo.len == 1) {
876 via_ircc_dma_xmit(self, iobase);
878 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
881 spin_unlock_irqrestore(&self->lock, flags);
886 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
889 self->io.direction = IO_XMIT;
901 irda_setup_dma(self->io.dma,
902 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
903 self->tx_buff.head) + self->tx_buff_dma,
904 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
906 __func__, self->tx_fifo.ptr,
907 self->tx_fifo.queue[self->tx_fifo.ptr].len,
908 self->tx_fifo.len);
910 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
918 * Function via_ircc_dma_xmit_complete (self)
924 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
931 iobase = self->io.fir_base;
933 // DisableDmaChannel(self->io.dma);
938 self->netdev->stats.tx_errors++;
939 self->netdev->stats.tx_fifo_errors++;
940 hwreset(self);
943 self->netdev->stats.tx_packets++;
948 if (self->new_speed) {
949 via_ircc_change_speed(self, self->new_speed);
950 self->new_speed = 0;
955 if (self->tx_fifo.len) {
956 self->tx_fifo.len--;
957 self->tx_fifo.ptr++;
963 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
966 if (self->tx_fifo.len) {
968 via_ircc_dma_xmit(self, iobase);
973 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
974 self->tx_fifo.tail = self->tx_buff.head;
978 //F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
981 netif_wake_queue(self->netdev);
987 * Function via_ircc_dma_receive (self)
992 static int via_ircc_dma_receive(struct via_ircc_cb *self)
996 iobase = self->io.fir_base;
1000 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1001 self->tx_fifo.tail = self->tx_buff.head;
1002 self->RxDataReady = 0;
1003 self->io.direction = IO_RECV;
1004 self->rx_buff.data = self->rx_buff.head;
1005 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1006 self->st_fifo.tail = self->st_fifo.head = 0;
1021 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1022 self->rx_buff.truesize, DMA_RX_MODE);
1030 * Function via_ircc_dma_receive_complete (self)
1036 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1044 iobase = self->io.fir_base;
1045 st_fifo = &self->st_fifo;
1047 if (self->io.speed < 4000000) { //Speed below FIR
1048 len = GetRecvByte(iobase, self);
1055 if (self->chip_id == 0x3076) {
1057 skb->data[i] = self->rx_buff.data[i * 2];
1059 if (self->chip_id == 0x3096) {
1062 self->rx_buff.data[i];
1066 self->rx_buff.data += len;
1067 self->netdev->stats.rx_bytes += len;
1068 self->netdev->stats.rx_packets++;
1069 skb->dev = self->netdev;
1077 len = GetRecvByte(iobase, self);
1082 __func__, len, RxCurCount(iobase, self),
1083 self->RxLastCount);
1084 hwreset(self);
1089 st_fifo->len, len - 4, RxCurCount(iobase, self));
1098 self->RxDataReady = 0;
1127 (self->rx_buff.data == NULL) || (len < 6)) {
1128 self->netdev->stats.rx_dropped++;
1135 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1137 len - 4, self->rx_buff.data);
1140 self->rx_buff.data += len;
1141 self->netdev->stats.rx_bytes += len;
1142 self->netdev->stats.rx_packets++;
1143 skb->dev = self->netdev;
1157 static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1162 st_fifo = &self->st_fifo;
1164 len = GetRecvByte(iobase, self);
1169 self->netdev->stats.rx_dropped++;
1175 self->netdev->stats.rx_dropped++;
1180 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1186 self->rx_buff.data += len;
1187 self->netdev->stats.rx_bytes += len;
1188 self->netdev->stats.rx_packets++;
1189 skb->dev = self->netdev;
1207 static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1214 st_fifo = &self->st_fifo;
1216 if (CkRxRecv(iobase, self)) {
1218 self->RetryCount = 0;
1220 self->RxDataReady++;
1223 self->RetryCount++;
1225 if ((self->RetryCount >= 1) ||
1226 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1243 (self->rx_buff.data == NULL) || (len < 6)) {
1244 self->netdev->stats.rx_dropped++;
1249 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1255 self->rx_buff.data += len;
1256 self->netdev->stats.rx_bytes += len;
1257 self->netdev->stats.rx_packets++;
1258 skb->dev = self->netdev;
1263 self->RetryCount = 0;
1275 (RxCurCount(iobase, self) != self->RxLastCount)) {
1276 upload_rxdata(self, iobase);
1277 if (irda_device_txqueue_empty(self->netdev))
1278 via_ircc_dma_receive(self);
1298 struct via_ircc_cb *self = netdev_priv(dev);
1302 iobase = self->io.fir_base;
1303 spin_lock(&self->lock);
1314 self->EventFlag.TimeOut++;
1316 if (self->io.direction == IO_XMIT) {
1317 via_ircc_dma_xmit(self, iobase);
1319 if (self->io.direction == IO_RECV) {
1323 if (self->RxDataReady > 30) {
1324 hwreset(self);
1325 if (irda_device_txqueue_empty(self->netdev)) {
1326 via_ircc_dma_receive(self);
1329 RxTimerHandler(self, iobase);
1344 self->EventFlag.EOMessage++; // read and will auto clean
1345 if (via_ircc_dma_xmit_complete(self)) {
1347 (self->netdev)) {
1348 via_ircc_dma_receive(self);
1351 self->EventFlag.Unknown++;
1373 if (via_ircc_dma_receive_complete(self, iobase)) {
1374 //F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
1375 via_ircc_dma_receive(self);
1381 RxCurCount(iobase, self),
1382 self->RxLastCount);
1390 hwreset(self); //F01
1392 via_ircc_dma_receive(self);
1396 spin_unlock(&self->lock);
1400 static void hwreset(struct via_ircc_cb *self)
1403 iobase = self->io.fir_base;
1428 via_ircc_change_speed(self, self->io.speed);
1430 self->st_fifo.len = 0;
1434 * Function via_ircc_is_receiving (self)
1439 static int via_ircc_is_receiving(struct via_ircc_cb *self)
1444 IRDA_ASSERT(self != NULL, return FALSE;);
1446 iobase = self->io.fir_base;
1447 if (CkRxRecv(iobase, self))
1464 struct via_ircc_cb *self;
1471 self = netdev_priv(dev);
1473 IRDA_ASSERT(self != NULL, return 0;);
1474 iobase = self->io.fir_base;
1475 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1477 self->io.irq);
1484 if (request_dma(self->io.dma, dev->name)) {
1486 self->io.dma);
1487 free_irq(self->io.irq, dev);
1490 if (self->io.dma2 != self->io.dma) {
1491 if (request_dma(self->io.dma2, dev->name)) {
1493 driver_name, self->io.dma2);
1494 free_irq(self->io.irq, dev);
1495 free_dma(self->io.dma);
1507 via_ircc_dma_receive(self);
1517 self->irlap = irlap_open(dev, &self->qos, hwname);
1519 self->RxLastCount = 0;
1532 struct via_ircc_cb *self;
1538 self = netdev_priv(dev);
1539 IRDA_ASSERT(self != NULL, return 0;);
1544 if (self->irlap)
1545 irlap_close(self->irlap);
1546 self->irlap = NULL;
1547 iobase = self->io.fir_base;
1550 DisableDmaChannel(self->io.dma);
1554 free_irq(self->io.irq, dev);
1555 free_dma(self->io.dma);
1556 if (self->io.dma2 != self->io.dma)
1557 free_dma(self->io.dma2);
1572 struct via_ircc_cb *self;
1577 self = netdev_priv(dev);
1578 IRDA_ASSERT(self != NULL, return -1;);
1582 spin_lock_irqsave(&self->lock, flags);
1589 via_ircc_change_speed(self, irq->ifr_baudrate);
1596 irda_device_set_media_busy(self->netdev, TRUE);
1599 irq->ifr_receiving = via_ircc_is_receiving(self);
1605 spin_unlock_irqrestore(&self->lock, flags);