[go: nahoru, domu]

1/* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2/* PLIP: A parallel port "network" driver for Linux. */
3/* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4/*
5 * Authors:	Donald Becker <becker@scyld.com>
6 *		Tommy Thorn <thorn@daimi.aau.dk>
7 *		Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 *		Alan Cox <gw4pts@gw4pts.ampr.org>
9 *		Peter Bauer <100136.3530@compuserve.com>
10 *		Niibe Yutaka <gniibe@mri.co.jp>
11 *		Nimrod Zimerman <zimerman@mailandnews.com>
12 *
13 * Enhancements:
14 *		Modularization and ifreq/ifmap support by Alan Cox.
15 *		Rewritten by Niibe Yutaka.
16 *		parport-sharing awareness code by Philip Blundell.
17 *		SMP locking by Niibe Yutaka.
18 *		Support for parallel ports with no IRQ (poll mode),
19 *		Modifications to use the parallel port API
20 *		by Nimrod Zimerman.
21 *
22 * Fixes:
23 *		Niibe Yutaka
24 *		  - Module initialization.
25 *		  - MTU fix.
26 *		  - Make sure other end is OK, before sending a packet.
27 *		  - Fix immediate timer problem.
28 *
29 *		Al Viro
30 *		  - Changed {enable,disable}_irq handling to make it work
31 *		    with new ("stack") semantics.
32 *
33 *		This program is free software; you can redistribute it and/or
34 *		modify it under the terms of the GNU General Public License
35 *		as published by the Free Software Foundation; either version
36 *		2 of the License, or (at your option) any later version.
37 */
38
39/*
40 * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
41 * inspired by Russ Nelson's parallel port packet driver.
42 *
43 * NOTE:
44 *     Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 *     Because of the necessity to communicate to DOS machines with the
46 *     Crynwr packet driver, Peter Bauer changed the protocol again
47 *     back to original protocol.
48 *
49 *     This version follows original PLIP protocol.
50 *     So, this PLIP can't communicate the PLIP of Linux v1.0.
51 */
52
53/*
54 *     To use with DOS box, please do (Turn on ARP switch):
55 *	# ifconfig plip[0-2] arp
56 */
57static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
58
59/*
60  Sources:
61	Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62	"parallel.asm" parallel port packet driver.
63
64  The "Crynwr" parallel port standard specifies the following protocol:
65    Trigger by sending nibble '0x8' (this causes interrupt on other end)
66    count-low octet
67    count-high octet
68    ... data octets
69    checksum octet
70  Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71			<wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
72
73  The packet is encapsulated as if it were ethernet.
74
75  The cable used is a de facto standard parallel null cable -- sold as
76  a "LapLink" cable by various places.  You'll need a 12-conductor cable to
77  make one yourself.  The wiring is:
78    SLCTIN	17 - 17
79    GROUND	25 - 25
80    D0->ERROR	2 - 15		15 - 2
81    D1->SLCT	3 - 13		13 - 3
82    D2->PAPOUT	4 - 12		12 - 4
83    D3->ACK	5 - 10		10 - 5
84    D4->BUSY	6 - 11		11 - 6
85  Do not connect the other pins.  They are
86    D5,D6,D7 are 7,8,9
87    STROBE is 1, FEED is 14, INIT is 16
88    extra grounds are 18,19,20,21,22,23,24
89*/
90
91#include <linux/module.h>
92#include <linux/kernel.h>
93#include <linux/types.h>
94#include <linux/fcntl.h>
95#include <linux/interrupt.h>
96#include <linux/string.h>
97#include <linux/slab.h>
98#include <linux/if_ether.h>
99#include <linux/in.h>
100#include <linux/errno.h>
101#include <linux/delay.h>
102#include <linux/init.h>
103#include <linux/netdevice.h>
104#include <linux/etherdevice.h>
105#include <linux/inetdevice.h>
106#include <linux/skbuff.h>
107#include <linux/if_plip.h>
108#include <linux/workqueue.h>
109#include <linux/spinlock.h>
110#include <linux/completion.h>
111#include <linux/parport.h>
112#include <linux/bitops.h>
113
114#include <net/neighbour.h>
115
116#include <asm/irq.h>
117#include <asm/byteorder.h>
118
119/* Maximum number of devices to support. */
120#define PLIP_MAX  8
121
122/* Use 0 for production, 1 for verification, >2 for debug */
123#ifndef NET_DEBUG
124#define NET_DEBUG 1
125#endif
126static const unsigned int net_debug = NET_DEBUG;
127
128#define ENABLE(irq)  if (irq != -1) enable_irq(irq)
129#define DISABLE(irq) if (irq != -1) disable_irq(irq)
130
131/* In micro second */
132#define PLIP_DELAY_UNIT		   1
133
134/* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
135#define PLIP_TRIGGER_WAIT	 500
136
137/* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
138#define PLIP_NIBBLE_WAIT        3000
139
140/* Bottom halves */
141static void plip_kick_bh(struct work_struct *work);
142static void plip_bh(struct work_struct *work);
143static void plip_timer_bh(struct work_struct *work);
144
145/* Interrupt handler */
146static void plip_interrupt(void *dev_id);
147
148/* Functions for DEV methods */
149static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
150static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
151                            unsigned short type, const void *daddr,
152			    const void *saddr, unsigned len);
153static int plip_hard_header_cache(const struct neighbour *neigh,
154                                  struct hh_cache *hh, __be16 type);
155static int plip_open(struct net_device *dev);
156static int plip_close(struct net_device *dev);
157static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
158static int plip_preempt(void *handle);
159static void plip_wakeup(void *handle);
160
161enum plip_connection_state {
162	PLIP_CN_NONE=0,
163	PLIP_CN_RECEIVE,
164	PLIP_CN_SEND,
165	PLIP_CN_CLOSING,
166	PLIP_CN_ERROR
167};
168
169enum plip_packet_state {
170	PLIP_PK_DONE=0,
171	PLIP_PK_TRIGGER,
172	PLIP_PK_LENGTH_LSB,
173	PLIP_PK_LENGTH_MSB,
174	PLIP_PK_DATA,
175	PLIP_PK_CHECKSUM
176};
177
178enum plip_nibble_state {
179	PLIP_NB_BEGIN,
180	PLIP_NB_1,
181	PLIP_NB_2,
182};
183
184struct plip_local {
185	enum plip_packet_state state;
186	enum plip_nibble_state nibble;
187	union {
188		struct {
189#if defined(__LITTLE_ENDIAN)
190			unsigned char lsb;
191			unsigned char msb;
192#elif defined(__BIG_ENDIAN)
193			unsigned char msb;
194			unsigned char lsb;
195#else
196#error	"Please fix the endianness defines in <asm/byteorder.h>"
197#endif
198		} b;
199		unsigned short h;
200	} length;
201	unsigned short byte;
202	unsigned char  checksum;
203	unsigned char  data;
204	struct sk_buff *skb;
205};
206
207struct net_local {
208	struct net_device *dev;
209	struct work_struct immediate;
210	struct delayed_work deferred;
211	struct delayed_work timer;
212	struct plip_local snd_data;
213	struct plip_local rcv_data;
214	struct pardevice *pardev;
215	unsigned long  trigger;
216	unsigned long  nibble;
217	enum plip_connection_state connection;
218	unsigned short timeout_count;
219	int is_deferred;
220	int port_owner;
221	int should_relinquish;
222	spinlock_t lock;
223	atomic_t kill_timer;
224	struct completion killed_timer_cmp;
225};
226
227static inline void enable_parport_interrupts (struct net_device *dev)
228{
229	if (dev->irq != -1)
230	{
231		struct parport *port =
232		   ((struct net_local *)netdev_priv(dev))->pardev->port;
233		port->ops->enable_irq (port);
234	}
235}
236
237static inline void disable_parport_interrupts (struct net_device *dev)
238{
239	if (dev->irq != -1)
240	{
241		struct parport *port =
242		   ((struct net_local *)netdev_priv(dev))->pardev->port;
243		port->ops->disable_irq (port);
244	}
245}
246
247static inline void write_data (struct net_device *dev, unsigned char data)
248{
249	struct parport *port =
250	   ((struct net_local *)netdev_priv(dev))->pardev->port;
251
252	port->ops->write_data (port, data);
253}
254
255static inline unsigned char read_status (struct net_device *dev)
256{
257	struct parport *port =
258	   ((struct net_local *)netdev_priv(dev))->pardev->port;
259
260	return port->ops->read_status (port);
261}
262
263static const struct header_ops plip_header_ops = {
264	.create	= plip_hard_header,
265	.cache  = plip_hard_header_cache,
266};
267
268static const struct net_device_ops plip_netdev_ops = {
269	.ndo_open		 = plip_open,
270	.ndo_stop		 = plip_close,
271	.ndo_start_xmit		 = plip_tx_packet,
272	.ndo_do_ioctl		 = plip_ioctl,
273	.ndo_change_mtu		 = eth_change_mtu,
274	.ndo_set_mac_address	 = eth_mac_addr,
275	.ndo_validate_addr	 = eth_validate_addr,
276};
277
278/* Entry point of PLIP driver.
279   Probe the hardware, and register/initialize the driver.
280
281   PLIP is rather weird, because of the way it interacts with the parport
282   system.  It is _not_ initialised from Space.c.  Instead, plip_init()
283   is called, and that function makes up a "struct net_device" for each port, and
284   then calls us here.
285
286   */
287static void
288plip_init_netdev(struct net_device *dev)
289{
290	struct net_local *nl = netdev_priv(dev);
291
292	/* Then, override parts of it */
293	dev->tx_queue_len 	 = 10;
294	dev->flags	         = IFF_POINTOPOINT|IFF_NOARP;
295	memset(dev->dev_addr, 0xfc, ETH_ALEN);
296
297	dev->netdev_ops		 = &plip_netdev_ops;
298	dev->header_ops          = &plip_header_ops;
299
300
301	nl->port_owner = 0;
302
303	/* Initialize constants */
304	nl->trigger	= PLIP_TRIGGER_WAIT;
305	nl->nibble	= PLIP_NIBBLE_WAIT;
306
307	/* Initialize task queue structures */
308	INIT_WORK(&nl->immediate, plip_bh);
309	INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
310
311	if (dev->irq == -1)
312		INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
313
314	spin_lock_init(&nl->lock);
315}
316
317/* Bottom half handler for the delayed request.
318   This routine is kicked by do_timer().
319   Request `plip_bh' to be invoked. */
320static void
321plip_kick_bh(struct work_struct *work)
322{
323	struct net_local *nl =
324		container_of(work, struct net_local, deferred.work);
325
326	if (nl->is_deferred)
327		schedule_work(&nl->immediate);
328}
329
330/* Forward declarations of internal routines */
331static int plip_none(struct net_device *, struct net_local *,
332		     struct plip_local *, struct plip_local *);
333static int plip_receive_packet(struct net_device *, struct net_local *,
334			       struct plip_local *, struct plip_local *);
335static int plip_send_packet(struct net_device *, struct net_local *,
336			    struct plip_local *, struct plip_local *);
337static int plip_connection_close(struct net_device *, struct net_local *,
338				 struct plip_local *, struct plip_local *);
339static int plip_error(struct net_device *, struct net_local *,
340		      struct plip_local *, struct plip_local *);
341static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
342				 struct plip_local *snd,
343				 struct plip_local *rcv,
344				 int error);
345
346#define OK        0
347#define TIMEOUT   1
348#define ERROR     2
349#define HS_TIMEOUT	3
350
351typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
352			 struct plip_local *snd, struct plip_local *rcv);
353
354static const plip_func connection_state_table[] =
355{
356	plip_none,
357	plip_receive_packet,
358	plip_send_packet,
359	plip_connection_close,
360	plip_error
361};
362
363/* Bottom half handler of PLIP. */
364static void
365plip_bh(struct work_struct *work)
366{
367	struct net_local *nl = container_of(work, struct net_local, immediate);
368	struct plip_local *snd = &nl->snd_data;
369	struct plip_local *rcv = &nl->rcv_data;
370	plip_func f;
371	int r;
372
373	nl->is_deferred = 0;
374	f = connection_state_table[nl->connection];
375	if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
376	    (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
377		nl->is_deferred = 1;
378		schedule_delayed_work(&nl->deferred, 1);
379	}
380}
381
382static void
383plip_timer_bh(struct work_struct *work)
384{
385	struct net_local *nl =
386		container_of(work, struct net_local, timer.work);
387
388	if (!(atomic_read (&nl->kill_timer))) {
389		plip_interrupt (nl->dev);
390
391		schedule_delayed_work(&nl->timer, 1);
392	}
393	else {
394		complete(&nl->killed_timer_cmp);
395	}
396}
397
398static int
399plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
400		      struct plip_local *snd, struct plip_local *rcv,
401		      int error)
402{
403	unsigned char c0;
404	/*
405	 * This is tricky. If we got here from the beginning of send (either
406	 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
407	 * already disabled. With the old variant of {enable,disable}_irq()
408	 * extra disable_irq() was a no-op. Now it became mortal - it's
409	 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
410	 * that is). So we have to treat HS_TIMEOUT and ERROR from send
411	 * in a special way.
412	 */
413
414	spin_lock_irq(&nl->lock);
415	if (nl->connection == PLIP_CN_SEND) {
416
417		if (error != ERROR) { /* Timeout */
418			nl->timeout_count++;
419			if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
420			    nl->timeout_count <= 3) {
421				spin_unlock_irq(&nl->lock);
422				/* Try again later */
423				return TIMEOUT;
424			}
425			c0 = read_status(dev);
426			printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
427			       dev->name, snd->state, c0);
428		} else
429			error = HS_TIMEOUT;
430		dev->stats.tx_errors++;
431		dev->stats.tx_aborted_errors++;
432	} else if (nl->connection == PLIP_CN_RECEIVE) {
433		if (rcv->state == PLIP_PK_TRIGGER) {
434			/* Transmission was interrupted. */
435			spin_unlock_irq(&nl->lock);
436			return OK;
437		}
438		if (error != ERROR) { /* Timeout */
439			if (++nl->timeout_count <= 3) {
440				spin_unlock_irq(&nl->lock);
441				/* Try again later */
442				return TIMEOUT;
443			}
444			c0 = read_status(dev);
445			printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
446			       dev->name, rcv->state, c0);
447		}
448		dev->stats.rx_dropped++;
449	}
450	rcv->state = PLIP_PK_DONE;
451	if (rcv->skb) {
452		kfree_skb(rcv->skb);
453		rcv->skb = NULL;
454	}
455	snd->state = PLIP_PK_DONE;
456	if (snd->skb) {
457		dev_kfree_skb(snd->skb);
458		snd->skb = NULL;
459	}
460	spin_unlock_irq(&nl->lock);
461	if (error == HS_TIMEOUT) {
462		DISABLE(dev->irq);
463		synchronize_irq(dev->irq);
464	}
465	disable_parport_interrupts (dev);
466	netif_stop_queue (dev);
467	nl->connection = PLIP_CN_ERROR;
468	write_data (dev, 0x00);
469
470	return TIMEOUT;
471}
472
473static int
474plip_none(struct net_device *dev, struct net_local *nl,
475	  struct plip_local *snd, struct plip_local *rcv)
476{
477	return OK;
478}
479
480/* PLIP_RECEIVE --- receive a byte(two nibbles)
481   Returns OK on success, TIMEOUT on timeout */
482static inline int
483plip_receive(unsigned short nibble_timeout, struct net_device *dev,
484	     enum plip_nibble_state *ns_p, unsigned char *data_p)
485{
486	unsigned char c0, c1;
487	unsigned int cx;
488
489	switch (*ns_p) {
490	case PLIP_NB_BEGIN:
491		cx = nibble_timeout;
492		while (1) {
493			c0 = read_status(dev);
494			udelay(PLIP_DELAY_UNIT);
495			if ((c0 & 0x80) == 0) {
496				c1 = read_status(dev);
497				if (c0 == c1)
498					break;
499			}
500			if (--cx == 0)
501				return TIMEOUT;
502		}
503		*data_p = (c0 >> 3) & 0x0f;
504		write_data (dev, 0x10); /* send ACK */
505		*ns_p = PLIP_NB_1;
506
507	case PLIP_NB_1:
508		cx = nibble_timeout;
509		while (1) {
510			c0 = read_status(dev);
511			udelay(PLIP_DELAY_UNIT);
512			if (c0 & 0x80) {
513				c1 = read_status(dev);
514				if (c0 == c1)
515					break;
516			}
517			if (--cx == 0)
518				return TIMEOUT;
519		}
520		*data_p |= (c0 << 1) & 0xf0;
521		write_data (dev, 0x00); /* send ACK */
522		*ns_p = PLIP_NB_BEGIN;
523	case PLIP_NB_2:
524		break;
525	}
526	return OK;
527}
528
529/*
530 *	Determine the packet's protocol ID. The rule here is that we
531 *	assume 802.3 if the type field is short enough to be a length.
532 *	This is normal practice and works for any 'now in use' protocol.
533 *
534 *	PLIP is ethernet ish but the daddr might not be valid if unicast.
535 *	PLIP fortunately has no bus architecture (its Point-to-point).
536 *
537 *	We can't fix the daddr thing as that quirk (more bug) is embedded
538 *	in far too many old systems not all even running Linux.
539 */
540
541static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
542{
543	struct ethhdr *eth;
544	unsigned char *rawp;
545
546	skb_reset_mac_header(skb);
547	skb_pull(skb,dev->hard_header_len);
548	eth = eth_hdr(skb);
549
550	if(is_multicast_ether_addr(eth->h_dest))
551	{
552		if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
553			skb->pkt_type=PACKET_BROADCAST;
554		else
555			skb->pkt_type=PACKET_MULTICAST;
556	}
557
558	/*
559	 *	This ALLMULTI check should be redundant by 1.4
560	 *	so don't forget to remove it.
561	 */
562
563	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
564		return eth->h_proto;
565
566	rawp = skb->data;
567
568	/*
569	 *	This is a magic hack to spot IPX packets. Older Novell breaks
570	 *	the protocol design and runs IPX over 802.3 without an 802.2 LLC
571	 *	layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
572	 *	won't work for fault tolerant netware but does for the rest.
573	 */
574	if (*(unsigned short *)rawp == 0xFFFF)
575		return htons(ETH_P_802_3);
576
577	/*
578	 *	Real 802.2 LLC
579	 */
580	return htons(ETH_P_802_2);
581}
582
583/* PLIP_RECEIVE_PACKET --- receive a packet */
584static int
585plip_receive_packet(struct net_device *dev, struct net_local *nl,
586		    struct plip_local *snd, struct plip_local *rcv)
587{
588	unsigned short nibble_timeout = nl->nibble;
589	unsigned char *lbuf;
590
591	switch (rcv->state) {
592	case PLIP_PK_TRIGGER:
593		DISABLE(dev->irq);
594		/* Don't need to synchronize irq, as we can safely ignore it */
595		disable_parport_interrupts (dev);
596		write_data (dev, 0x01); /* send ACK */
597		if (net_debug > 2)
598			printk(KERN_DEBUG "%s: receive start\n", dev->name);
599		rcv->state = PLIP_PK_LENGTH_LSB;
600		rcv->nibble = PLIP_NB_BEGIN;
601
602	case PLIP_PK_LENGTH_LSB:
603		if (snd->state != PLIP_PK_DONE) {
604			if (plip_receive(nl->trigger, dev,
605					 &rcv->nibble, &rcv->length.b.lsb)) {
606				/* collision, here dev->tbusy == 1 */
607				rcv->state = PLIP_PK_DONE;
608				nl->is_deferred = 1;
609				nl->connection = PLIP_CN_SEND;
610				schedule_delayed_work(&nl->deferred, 1);
611				enable_parport_interrupts (dev);
612				ENABLE(dev->irq);
613				return OK;
614			}
615		} else {
616			if (plip_receive(nibble_timeout, dev,
617					 &rcv->nibble, &rcv->length.b.lsb))
618				return TIMEOUT;
619		}
620		rcv->state = PLIP_PK_LENGTH_MSB;
621
622	case PLIP_PK_LENGTH_MSB:
623		if (plip_receive(nibble_timeout, dev,
624				 &rcv->nibble, &rcv->length.b.msb))
625			return TIMEOUT;
626		if (rcv->length.h > dev->mtu + dev->hard_header_len ||
627		    rcv->length.h < 8) {
628			printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
629			return ERROR;
630		}
631		/* Malloc up new buffer. */
632		rcv->skb = dev_alloc_skb(rcv->length.h + 2);
633		if (rcv->skb == NULL) {
634			printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
635			return ERROR;
636		}
637		skb_reserve(rcv->skb, 2);	/* Align IP on 16 byte boundaries */
638		skb_put(rcv->skb,rcv->length.h);
639		rcv->skb->dev = dev;
640		rcv->state = PLIP_PK_DATA;
641		rcv->byte = 0;
642		rcv->checksum = 0;
643
644	case PLIP_PK_DATA:
645		lbuf = rcv->skb->data;
646		do {
647			if (plip_receive(nibble_timeout, dev,
648					 &rcv->nibble, &lbuf[rcv->byte]))
649				return TIMEOUT;
650		} while (++rcv->byte < rcv->length.h);
651		do {
652			rcv->checksum += lbuf[--rcv->byte];
653		} while (rcv->byte);
654		rcv->state = PLIP_PK_CHECKSUM;
655
656	case PLIP_PK_CHECKSUM:
657		if (plip_receive(nibble_timeout, dev,
658				 &rcv->nibble, &rcv->data))
659			return TIMEOUT;
660		if (rcv->data != rcv->checksum) {
661			dev->stats.rx_crc_errors++;
662			if (net_debug)
663				printk(KERN_DEBUG "%s: checksum error\n", dev->name);
664			return ERROR;
665		}
666		rcv->state = PLIP_PK_DONE;
667
668	case PLIP_PK_DONE:
669		/* Inform the upper layer for the arrival of a packet. */
670		rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
671		netif_rx_ni(rcv->skb);
672		dev->stats.rx_bytes += rcv->length.h;
673		dev->stats.rx_packets++;
674		rcv->skb = NULL;
675		if (net_debug > 2)
676			printk(KERN_DEBUG "%s: receive end\n", dev->name);
677
678		/* Close the connection. */
679		write_data (dev, 0x00);
680		spin_lock_irq(&nl->lock);
681		if (snd->state != PLIP_PK_DONE) {
682			nl->connection = PLIP_CN_SEND;
683			spin_unlock_irq(&nl->lock);
684			schedule_work(&nl->immediate);
685			enable_parport_interrupts (dev);
686			ENABLE(dev->irq);
687			return OK;
688		} else {
689			nl->connection = PLIP_CN_NONE;
690			spin_unlock_irq(&nl->lock);
691			enable_parport_interrupts (dev);
692			ENABLE(dev->irq);
693			return OK;
694		}
695	}
696	return OK;
697}
698
699/* PLIP_SEND --- send a byte (two nibbles)
700   Returns OK on success, TIMEOUT when timeout    */
701static inline int
702plip_send(unsigned short nibble_timeout, struct net_device *dev,
703	  enum plip_nibble_state *ns_p, unsigned char data)
704{
705	unsigned char c0;
706	unsigned int cx;
707
708	switch (*ns_p) {
709	case PLIP_NB_BEGIN:
710		write_data (dev, data & 0x0f);
711		*ns_p = PLIP_NB_1;
712
713	case PLIP_NB_1:
714		write_data (dev, 0x10 | (data & 0x0f));
715		cx = nibble_timeout;
716		while (1) {
717			c0 = read_status(dev);
718			if ((c0 & 0x80) == 0)
719				break;
720			if (--cx == 0)
721				return TIMEOUT;
722			udelay(PLIP_DELAY_UNIT);
723		}
724		write_data (dev, 0x10 | (data >> 4));
725		*ns_p = PLIP_NB_2;
726
727	case PLIP_NB_2:
728		write_data (dev, (data >> 4));
729		cx = nibble_timeout;
730		while (1) {
731			c0 = read_status(dev);
732			if (c0 & 0x80)
733				break;
734			if (--cx == 0)
735				return TIMEOUT;
736			udelay(PLIP_DELAY_UNIT);
737		}
738		*ns_p = PLIP_NB_BEGIN;
739		return OK;
740	}
741	return OK;
742}
743
744/* PLIP_SEND_PACKET --- send a packet */
745static int
746plip_send_packet(struct net_device *dev, struct net_local *nl,
747		 struct plip_local *snd, struct plip_local *rcv)
748{
749	unsigned short nibble_timeout = nl->nibble;
750	unsigned char *lbuf;
751	unsigned char c0;
752	unsigned int cx;
753
754	if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
755		printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
756		snd->state = PLIP_PK_DONE;
757		snd->skb = NULL;
758		return ERROR;
759	}
760
761	switch (snd->state) {
762	case PLIP_PK_TRIGGER:
763		if ((read_status(dev) & 0xf8) != 0x80)
764			return HS_TIMEOUT;
765
766		/* Trigger remote rx interrupt. */
767		write_data (dev, 0x08);
768		cx = nl->trigger;
769		while (1) {
770			udelay(PLIP_DELAY_UNIT);
771			spin_lock_irq(&nl->lock);
772			if (nl->connection == PLIP_CN_RECEIVE) {
773				spin_unlock_irq(&nl->lock);
774				/* Interrupted. */
775				dev->stats.collisions++;
776				return OK;
777			}
778			c0 = read_status(dev);
779			if (c0 & 0x08) {
780				spin_unlock_irq(&nl->lock);
781				DISABLE(dev->irq);
782				synchronize_irq(dev->irq);
783				if (nl->connection == PLIP_CN_RECEIVE) {
784					/* Interrupted.
785					   We don't need to enable irq,
786					   as it is soon disabled.    */
787					/* Yes, we do. New variant of
788					   {enable,disable}_irq *counts*
789					   them.  -- AV  */
790					ENABLE(dev->irq);
791					dev->stats.collisions++;
792					return OK;
793				}
794				disable_parport_interrupts (dev);
795				if (net_debug > 2)
796					printk(KERN_DEBUG "%s: send start\n", dev->name);
797				snd->state = PLIP_PK_LENGTH_LSB;
798				snd->nibble = PLIP_NB_BEGIN;
799				nl->timeout_count = 0;
800				break;
801			}
802			spin_unlock_irq(&nl->lock);
803			if (--cx == 0) {
804				write_data (dev, 0x00);
805				return HS_TIMEOUT;
806			}
807		}
808
809	case PLIP_PK_LENGTH_LSB:
810		if (plip_send(nibble_timeout, dev,
811			      &snd->nibble, snd->length.b.lsb))
812			return TIMEOUT;
813		snd->state = PLIP_PK_LENGTH_MSB;
814
815	case PLIP_PK_LENGTH_MSB:
816		if (plip_send(nibble_timeout, dev,
817			      &snd->nibble, snd->length.b.msb))
818			return TIMEOUT;
819		snd->state = PLIP_PK_DATA;
820		snd->byte = 0;
821		snd->checksum = 0;
822
823	case PLIP_PK_DATA:
824		do {
825			if (plip_send(nibble_timeout, dev,
826				      &snd->nibble, lbuf[snd->byte]))
827				return TIMEOUT;
828		} while (++snd->byte < snd->length.h);
829		do {
830			snd->checksum += lbuf[--snd->byte];
831		} while (snd->byte);
832		snd->state = PLIP_PK_CHECKSUM;
833
834	case PLIP_PK_CHECKSUM:
835		if (plip_send(nibble_timeout, dev,
836			      &snd->nibble, snd->checksum))
837			return TIMEOUT;
838
839		dev->stats.tx_bytes += snd->skb->len;
840		dev_kfree_skb(snd->skb);
841		dev->stats.tx_packets++;
842		snd->state = PLIP_PK_DONE;
843
844	case PLIP_PK_DONE:
845		/* Close the connection */
846		write_data (dev, 0x00);
847		snd->skb = NULL;
848		if (net_debug > 2)
849			printk(KERN_DEBUG "%s: send end\n", dev->name);
850		nl->connection = PLIP_CN_CLOSING;
851		nl->is_deferred = 1;
852		schedule_delayed_work(&nl->deferred, 1);
853		enable_parport_interrupts (dev);
854		ENABLE(dev->irq);
855		return OK;
856	}
857	return OK;
858}
859
860static int
861plip_connection_close(struct net_device *dev, struct net_local *nl,
862		      struct plip_local *snd, struct plip_local *rcv)
863{
864	spin_lock_irq(&nl->lock);
865	if (nl->connection == PLIP_CN_CLOSING) {
866		nl->connection = PLIP_CN_NONE;
867		netif_wake_queue (dev);
868	}
869	spin_unlock_irq(&nl->lock);
870	if (nl->should_relinquish) {
871		nl->should_relinquish = nl->port_owner = 0;
872		parport_release(nl->pardev);
873	}
874	return OK;
875}
876
877/* PLIP_ERROR --- wait till other end settled */
878static int
879plip_error(struct net_device *dev, struct net_local *nl,
880	   struct plip_local *snd, struct plip_local *rcv)
881{
882	unsigned char status;
883
884	status = read_status(dev);
885	if ((status & 0xf8) == 0x80) {
886		if (net_debug > 2)
887			printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
888		nl->connection = PLIP_CN_NONE;
889		nl->should_relinquish = 0;
890		netif_start_queue (dev);
891		enable_parport_interrupts (dev);
892		ENABLE(dev->irq);
893		netif_wake_queue (dev);
894	} else {
895		nl->is_deferred = 1;
896		schedule_delayed_work(&nl->deferred, 1);
897	}
898
899	return OK;
900}
901
902/* Handle the parallel port interrupts. */
903static void
904plip_interrupt(void *dev_id)
905{
906	struct net_device *dev = dev_id;
907	struct net_local *nl;
908	struct plip_local *rcv;
909	unsigned char c0;
910	unsigned long flags;
911
912	nl = netdev_priv(dev);
913	rcv = &nl->rcv_data;
914
915	spin_lock_irqsave (&nl->lock, flags);
916
917	c0 = read_status(dev);
918	if ((c0 & 0xf8) != 0xc0) {
919		if ((dev->irq != -1) && (net_debug > 1))
920			printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
921		spin_unlock_irqrestore (&nl->lock, flags);
922		return;
923	}
924
925	if (net_debug > 3)
926		printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
927
928	switch (nl->connection) {
929	case PLIP_CN_CLOSING:
930		netif_wake_queue (dev);
931	case PLIP_CN_NONE:
932	case PLIP_CN_SEND:
933		rcv->state = PLIP_PK_TRIGGER;
934		nl->connection = PLIP_CN_RECEIVE;
935		nl->timeout_count = 0;
936		schedule_work(&nl->immediate);
937		break;
938
939	case PLIP_CN_RECEIVE:
940		/* May occur because there is race condition
941		   around test and set of dev->interrupt.
942		   Ignore this interrupt. */
943		break;
944
945	case PLIP_CN_ERROR:
946		printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
947		break;
948	}
949
950	spin_unlock_irqrestore(&nl->lock, flags);
951}
952
953static int
954plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
955{
956	struct net_local *nl = netdev_priv(dev);
957	struct plip_local *snd = &nl->snd_data;
958
959	if (netif_queue_stopped(dev))
960		return NETDEV_TX_BUSY;
961
962	/* We may need to grab the bus */
963	if (!nl->port_owner) {
964		if (parport_claim(nl->pardev))
965			return NETDEV_TX_BUSY;
966		nl->port_owner = 1;
967	}
968
969	netif_stop_queue (dev);
970
971	if (skb->len > dev->mtu + dev->hard_header_len) {
972		printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
973		netif_start_queue (dev);
974		return NETDEV_TX_BUSY;
975	}
976
977	if (net_debug > 2)
978		printk(KERN_DEBUG "%s: send request\n", dev->name);
979
980	spin_lock_irq(&nl->lock);
981	snd->skb = skb;
982	snd->length.h = skb->len;
983	snd->state = PLIP_PK_TRIGGER;
984	if (nl->connection == PLIP_CN_NONE) {
985		nl->connection = PLIP_CN_SEND;
986		nl->timeout_count = 0;
987	}
988	schedule_work(&nl->immediate);
989	spin_unlock_irq(&nl->lock);
990
991	return NETDEV_TX_OK;
992}
993
994static void
995plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
996{
997	const struct in_device *in_dev;
998
999	rcu_read_lock();
1000	in_dev = __in_dev_get_rcu(dev);
1001	if (in_dev) {
1002		/* Any address will do - we take the first */
1003		const struct in_ifaddr *ifa = in_dev->ifa_list;
1004		if (ifa) {
1005			memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1006			memset(eth->h_dest, 0xfc, 2);
1007			memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1008		}
1009	}
1010	rcu_read_unlock();
1011}
1012
1013static int
1014plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1015		 unsigned short type, const void *daddr,
1016		 const void *saddr, unsigned len)
1017{
1018	int ret;
1019
1020	ret = eth_header(skb, dev, type, daddr, saddr, len);
1021	if (ret >= 0)
1022		plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1023
1024	return ret;
1025}
1026
1027static int plip_hard_header_cache(const struct neighbour *neigh,
1028				  struct hh_cache *hh, __be16 type)
1029{
1030	int ret;
1031
1032	ret = eth_header_cache(neigh, hh, type);
1033	if (ret == 0) {
1034		struct ethhdr *eth;
1035
1036		eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1037				       HH_DATA_OFF(sizeof(*eth)));
1038		plip_rewrite_address (neigh->dev, eth);
1039	}
1040
1041	return ret;
1042}
1043
1044/* Open/initialize the board.  This is called (in the current kernel)
1045   sometime after booting when the 'ifconfig' program is run.
1046
1047   This routine gets exclusive access to the parallel port by allocating
1048   its IRQ line.
1049 */
1050static int
1051plip_open(struct net_device *dev)
1052{
1053	struct net_local *nl = netdev_priv(dev);
1054	struct in_device *in_dev;
1055
1056	/* Grab the port */
1057	if (!nl->port_owner) {
1058		if (parport_claim(nl->pardev)) return -EAGAIN;
1059		nl->port_owner = 1;
1060	}
1061
1062	nl->should_relinquish = 0;
1063
1064	/* Clear the data port. */
1065	write_data (dev, 0x00);
1066
1067	/* Enable rx interrupt. */
1068	enable_parport_interrupts (dev);
1069	if (dev->irq == -1)
1070	{
1071		atomic_set (&nl->kill_timer, 0);
1072		schedule_delayed_work(&nl->timer, 1);
1073	}
1074
1075	/* Initialize the state machine. */
1076	nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1077	nl->rcv_data.skb = nl->snd_data.skb = NULL;
1078	nl->connection = PLIP_CN_NONE;
1079	nl->is_deferred = 0;
1080
1081	/* Fill in the MAC-level header.
1082	   We used to abuse dev->broadcast to store the point-to-point
1083	   MAC address, but we no longer do it. Instead, we fetch the
1084	   interface address whenever it is needed, which is cheap enough
1085	   because we use the hh_cache. Actually, abusing dev->broadcast
1086	   didn't work, because when using plip_open the point-to-point
1087	   address isn't yet known.
1088	   PLIP doesn't have a real MAC address, but we need it to be
1089	   DOS compatible, and to properly support taps (otherwise,
1090	   when the device address isn't identical to the address of a
1091	   received frame, the kernel incorrectly drops it).             */
1092
1093	in_dev=__in_dev_get_rtnl(dev);
1094	if (in_dev) {
1095		/* Any address will do - we take the first. We already
1096		   have the first two bytes filled with 0xfc, from
1097		   plip_init_dev(). */
1098		struct in_ifaddr *ifa=in_dev->ifa_list;
1099		if (ifa != NULL) {
1100			memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1101		}
1102	}
1103
1104	netif_start_queue (dev);
1105
1106	return 0;
1107}
1108
1109/* The inverse routine to plip_open (). */
1110static int
1111plip_close(struct net_device *dev)
1112{
1113	struct net_local *nl = netdev_priv(dev);
1114	struct plip_local *snd = &nl->snd_data;
1115	struct plip_local *rcv = &nl->rcv_data;
1116
1117	netif_stop_queue (dev);
1118	DISABLE(dev->irq);
1119	synchronize_irq(dev->irq);
1120
1121	if (dev->irq == -1)
1122	{
1123		init_completion(&nl->killed_timer_cmp);
1124		atomic_set (&nl->kill_timer, 1);
1125		wait_for_completion(&nl->killed_timer_cmp);
1126	}
1127
1128#ifdef NOTDEF
1129	outb(0x00, PAR_DATA(dev));
1130#endif
1131	nl->is_deferred = 0;
1132	nl->connection = PLIP_CN_NONE;
1133	if (nl->port_owner) {
1134		parport_release(nl->pardev);
1135		nl->port_owner = 0;
1136	}
1137
1138	snd->state = PLIP_PK_DONE;
1139	if (snd->skb) {
1140		dev_kfree_skb(snd->skb);
1141		snd->skb = NULL;
1142	}
1143	rcv->state = PLIP_PK_DONE;
1144	if (rcv->skb) {
1145		kfree_skb(rcv->skb);
1146		rcv->skb = NULL;
1147	}
1148
1149#ifdef NOTDEF
1150	/* Reset. */
1151	outb(0x00, PAR_CONTROL(dev));
1152#endif
1153	return 0;
1154}
1155
1156static int
1157plip_preempt(void *handle)
1158{
1159	struct net_device *dev = (struct net_device *)handle;
1160	struct net_local *nl = netdev_priv(dev);
1161
1162	/* Stand our ground if a datagram is on the wire */
1163	if (nl->connection != PLIP_CN_NONE) {
1164		nl->should_relinquish = 1;
1165		return 1;
1166	}
1167
1168	nl->port_owner = 0;	/* Remember that we released the bus */
1169	return 0;
1170}
1171
1172static void
1173plip_wakeup(void *handle)
1174{
1175	struct net_device *dev = (struct net_device *)handle;
1176	struct net_local *nl = netdev_priv(dev);
1177
1178	if (nl->port_owner) {
1179		/* Why are we being woken up? */
1180		printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1181		if (!parport_claim(nl->pardev))
1182			/* bus_owner is already set (but why?) */
1183			printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1184		else
1185			return;
1186	}
1187
1188	if (!(dev->flags & IFF_UP))
1189		/* Don't need the port when the interface is down */
1190		return;
1191
1192	if (!parport_claim(nl->pardev)) {
1193		nl->port_owner = 1;
1194		/* Clear the data port. */
1195		write_data (dev, 0x00);
1196	}
1197}
1198
1199static int
1200plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1201{
1202	struct net_local *nl = netdev_priv(dev);
1203	struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1204
1205	if (cmd != SIOCDEVPLIP)
1206		return -EOPNOTSUPP;
1207
1208	switch(pc->pcmd) {
1209	case PLIP_GET_TIMEOUT:
1210		pc->trigger = nl->trigger;
1211		pc->nibble  = nl->nibble;
1212		break;
1213	case PLIP_SET_TIMEOUT:
1214		if(!capable(CAP_NET_ADMIN))
1215			return -EPERM;
1216		nl->trigger = pc->trigger;
1217		nl->nibble  = pc->nibble;
1218		break;
1219	default:
1220		return -EOPNOTSUPP;
1221	}
1222	return 0;
1223}
1224
1225static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1226static int timid;
1227
1228module_param_array(parport, int, NULL, 0);
1229module_param(timid, int, 0);
1230MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1231
1232static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1233
1234static inline int
1235plip_searchfor(int list[], int a)
1236{
1237	int i;
1238	for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1239		if (list[i] == a) return 1;
1240	}
1241	return 0;
1242}
1243
1244/* plip_attach() is called (by the parport code) when a port is
1245 * available to use. */
1246static void plip_attach (struct parport *port)
1247{
1248	static int unit;
1249	struct net_device *dev;
1250	struct net_local *nl;
1251	char name[IFNAMSIZ];
1252
1253	if ((parport[0] == -1 && (!timid || !port->devices)) ||
1254	    plip_searchfor(parport, port->number)) {
1255		if (unit == PLIP_MAX) {
1256			printk(KERN_ERR "plip: too many devices\n");
1257			return;
1258		}
1259
1260		sprintf(name, "plip%d", unit);
1261		dev = alloc_etherdev(sizeof(struct net_local));
1262		if (!dev)
1263			return;
1264
1265		strcpy(dev->name, name);
1266
1267		dev->irq = port->irq;
1268		dev->base_addr = port->base;
1269		if (port->irq == -1) {
1270			printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1271		                 "which is fairly inefficient!\n", port->name);
1272		}
1273
1274		nl = netdev_priv(dev);
1275		nl->dev = dev;
1276		nl->pardev = parport_register_device(port, dev->name, plip_preempt,
1277						 plip_wakeup, plip_interrupt,
1278						 0, dev);
1279
1280		if (!nl->pardev) {
1281			printk(KERN_ERR "%s: parport_register failed\n", name);
1282			goto err_free_dev;
1283		}
1284
1285		plip_init_netdev(dev);
1286
1287		if (register_netdev(dev)) {
1288			printk(KERN_ERR "%s: network register failed\n", name);
1289			goto err_parport_unregister;
1290		}
1291
1292		printk(KERN_INFO "%s", version);
1293		if (dev->irq != -1)
1294			printk(KERN_INFO "%s: Parallel port at %#3lx, "
1295					 "using IRQ %d.\n",
1296				         dev->name, dev->base_addr, dev->irq);
1297		else
1298			printk(KERN_INFO "%s: Parallel port at %#3lx, "
1299					 "not using IRQ.\n",
1300					 dev->name, dev->base_addr);
1301		dev_plip[unit++] = dev;
1302	}
1303	return;
1304
1305err_parport_unregister:
1306	parport_unregister_device(nl->pardev);
1307err_free_dev:
1308	free_netdev(dev);
1309}
1310
1311/* plip_detach() is called (by the parport code) when a port is
1312 * no longer available to use. */
1313static void plip_detach (struct parport *port)
1314{
1315	/* Nothing to do */
1316}
1317
1318static struct parport_driver plip_driver = {
1319	.name	= "plip",
1320	.attach = plip_attach,
1321	.detach = plip_detach
1322};
1323
1324static void __exit plip_cleanup_module (void)
1325{
1326	struct net_device *dev;
1327	int i;
1328
1329	parport_unregister_driver (&plip_driver);
1330
1331	for (i=0; i < PLIP_MAX; i++) {
1332		if ((dev = dev_plip[i])) {
1333			struct net_local *nl = netdev_priv(dev);
1334			unregister_netdev(dev);
1335			if (nl->port_owner)
1336				parport_release(nl->pardev);
1337			parport_unregister_device(nl->pardev);
1338			free_netdev(dev);
1339			dev_plip[i] = NULL;
1340		}
1341	}
1342}
1343
1344#ifndef MODULE
1345
1346static int parport_ptr;
1347
1348static int __init plip_setup(char *str)
1349{
1350	int ints[4];
1351
1352	str = get_options(str, ARRAY_SIZE(ints), ints);
1353
1354	/* Ugh. */
1355	if (!strncmp(str, "parport", 7)) {
1356		int n = simple_strtoul(str+7, NULL, 10);
1357		if (parport_ptr < PLIP_MAX)
1358			parport[parport_ptr++] = n;
1359		else
1360			printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1361			       str);
1362	} else if (!strcmp(str, "timid")) {
1363		timid = 1;
1364	} else {
1365		if (ints[0] == 0 || ints[1] == 0) {
1366			/* disable driver on "plip=" or "plip=0" */
1367			parport[0] = -2;
1368		} else {
1369			printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1370			       ints[1]);
1371		}
1372	}
1373	return 1;
1374}
1375
1376__setup("plip=", plip_setup);
1377
1378#endif /* !MODULE */
1379
1380static int __init plip_init (void)
1381{
1382	if (parport[0] == -2)
1383		return 0;
1384
1385	if (parport[0] != -1 && timid) {
1386		printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1387		timid = 0;
1388	}
1389
1390	if (parport_register_driver (&plip_driver)) {
1391		printk (KERN_WARNING "plip: couldn't register driver\n");
1392		return 1;
1393	}
1394
1395	return 0;
1396}
1397
1398module_init(plip_init);
1399module_exit(plip_cleanup_module);
1400MODULE_LICENSE("GPL");
1401