[go: nahoru, domu]

ozproto.c revision 2b8b61aaef59751fe85c1b2df51a848a6c50d202
1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6
7#include <linux/module.h>
8#include <linux/timer.h>
9#include <linux/sched.h>
10#include <linux/netdevice.h>
11#include <linux/etherdevice.h>
12#include <linux/errno.h>
13#include <linux/ieee80211.h>
14#include <linux/slab.h>
15#include "ozdbg.h"
16#include "ozprotocol.h"
17#include "ozeltbuf.h"
18#include "ozpd.h"
19#include "ozproto.h"
20#include "ozusbsvc.h"
21
22#include "ozappif.h"
23#include <asm/unaligned.h>
24#include <linux/uaccess.h>
25#include <net/psnap.h>
26
27#define OZ_CF_CONN_SUCCESS	1
28#define OZ_CF_CONN_FAILURE	2
29
30#define OZ_DO_STOP		1
31#define OZ_DO_SLEEP		2
32
33struct oz_binding {
34	struct packet_type ptype;
35	char name[OZ_MAX_BINDING_LEN];
36	struct list_head link;
37};
38
39/*
40 * External variable
41 */
42
43DEFINE_SPINLOCK(g_polling_lock);
44/*
45 * Static external variables.
46 */
47static LIST_HEAD(g_pd_list);
48static LIST_HEAD(g_binding);
49static DEFINE_SPINLOCK(g_binding_lock);
50static struct sk_buff_head g_rx_queue;
51static u8 g_session_id;
52static u16 g_apps = 0x1;
53static int g_processing_rx;
54
55struct kmem_cache *oz_elt_info_cache;
56
57/*
58 * Context: softirq-serialized
59 */
60static u8 oz_get_new_session_id(u8 exclude)
61{
62	if (++g_session_id == 0)
63		g_session_id = 1;
64	if (g_session_id == exclude) {
65		if (++g_session_id == 0)
66			g_session_id = 1;
67	}
68	return g_session_id;
69}
70
71/*
72 * Context: softirq-serialized
73 */
74static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
75{
76	struct sk_buff *skb;
77	struct net_device *dev = pd->net_dev;
78	struct oz_hdr *oz_hdr;
79	struct oz_elt *elt;
80	struct oz_elt_connect_rsp *body;
81
82	int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
83			sizeof(struct oz_elt_connect_rsp);
84	skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
85	if (skb == NULL)
86		return;
87	skb_reserve(skb, LL_RESERVED_SPACE(dev));
88	skb_reset_network_header(skb);
89	oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
90	elt = (struct oz_elt *)(oz_hdr+1);
91	body = (struct oz_elt_connect_rsp *)(elt+1);
92	skb->dev = dev;
93	skb->protocol = htons(OZ_ETHERTYPE);
94	/* Fill in device header */
95	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
96			dev->dev_addr, skb->len) < 0) {
97		kfree_skb(skb);
98		return;
99	}
100	oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
101	oz_hdr->last_pkt_num = 0;
102	put_unaligned(0, &oz_hdr->pkt_num);
103	elt->type = OZ_ELT_CONNECT_RSP;
104	elt->length = sizeof(struct oz_elt_connect_rsp);
105	memset(body, 0, sizeof(struct oz_elt_connect_rsp));
106	body->status = status;
107	if (status == 0) {
108		body->mode = pd->mode;
109		body->session_id = pd->session_id;
110		put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
111	}
112	oz_dbg(ON, "TX: OZ_ELT_CONNECT_RSP %d", status);
113	dev_queue_xmit(skb);
114	return;
115}
116
117/*
118 * Context: softirq-serialized
119 */
120static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
121{
122	unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
123
124	switch (kalive & OZ_KALIVE_TYPE_MASK) {
125	case OZ_KALIVE_SPECIAL:
126		pd->keep_alive = keep_alive * 1000*60*60*24*20;
127		break;
128	case OZ_KALIVE_SECS:
129		pd->keep_alive = keep_alive*1000;
130		break;
131	case OZ_KALIVE_MINS:
132		pd->keep_alive = keep_alive*1000*60;
133		break;
134	case OZ_KALIVE_HOURS:
135		pd->keep_alive = keep_alive*1000*60*60;
136		break;
137	default:
138		pd->keep_alive = 0;
139	}
140	oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive);
141}
142
143/*
144 * Context: softirq-serialized
145 */
146static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer)
147{
148	if (presleep)
149		pd->presleep = presleep*100;
150	else
151		pd->presleep = OZ_PRESLEEP_TOUT;
152	if (start_timer) {
153		spin_unlock(&g_polling_lock);
154		oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
155		spin_lock(&g_polling_lock);
156	}
157	oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep);
158}
159
160/*
161 * Context: softirq-serialized
162 */
163static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
164			const u8 *pd_addr, struct net_device *net_dev)
165{
166	struct oz_pd *pd;
167	struct oz_elt_connect_req *body =
168			(struct oz_elt_connect_req *)(elt+1);
169	u8 rsp_status = OZ_STATUS_SUCCESS;
170	u8 stop_needed = 0;
171	u16 new_apps = g_apps;
172	struct net_device *old_net_dev = NULL;
173	struct oz_pd *free_pd = NULL;
174
175	if (cur_pd) {
176		pd = cur_pd;
177		spin_lock_bh(&g_polling_lock);
178	} else {
179		struct oz_pd *pd2 = NULL;
180		struct list_head *e;
181		pd = oz_pd_alloc(pd_addr);
182		if (pd == NULL)
183			return NULL;
184		getnstimeofday(&pd->last_rx_timestamp);
185		spin_lock_bh(&g_polling_lock);
186		list_for_each(e, &g_pd_list) {
187			pd2 = container_of(e, struct oz_pd, link);
188			if (ether_addr_equal(pd2->mac_addr, pd_addr)) {
189				free_pd = pd;
190				pd = pd2;
191				break;
192			}
193		}
194		if (pd != pd2)
195			list_add_tail(&pd->link, &g_pd_list);
196	}
197	if (pd == NULL) {
198		spin_unlock_bh(&g_polling_lock);
199		return NULL;
200	}
201	if (pd->net_dev != net_dev) {
202		old_net_dev = pd->net_dev;
203		dev_hold(net_dev);
204		pd->net_dev = net_dev;
205	}
206	oz_dbg(ON, "Host vendor: %d\n", body->host_vendor);
207	pd->max_tx_size = OZ_MAX_TX_SIZE;
208	pd->mode = body->mode;
209	pd->pd_info = body->pd_info;
210	if (pd->mode & OZ_F_ISOC_NO_ELTS) {
211		pd->ms_per_isoc = body->ms_per_isoc;
212		if (!pd->ms_per_isoc)
213			pd->ms_per_isoc = 4;
214
215		switch (body->ms_isoc_latency & OZ_LATENCY_MASK) {
216		case OZ_ONE_MS_LATENCY:
217			pd->isoc_latency = (body->ms_isoc_latency &
218					~OZ_LATENCY_MASK) / pd->ms_per_isoc;
219			break;
220		case OZ_TEN_MS_LATENCY:
221			pd->isoc_latency = ((body->ms_isoc_latency &
222				~OZ_LATENCY_MASK) * 10) / pd->ms_per_isoc;
223			break;
224		default:
225			pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC;
226		}
227	}
228	if (body->max_len_div16)
229		pd->max_tx_size = ((u16)body->max_len_div16)<<4;
230	oz_dbg(ON, "Max frame:%u Ms per isoc:%u\n",
231	       pd->max_tx_size, pd->ms_per_isoc);
232	pd->max_stream_buffering = 3*1024;
233	pd->pulse_period = OZ_QUANTUM;
234	pd_set_presleep(pd, body->presleep, 0);
235	pd_set_keepalive(pd, body->keep_alive);
236
237	new_apps &= le16_to_cpu(get_unaligned(&body->apps));
238	if ((new_apps & 0x1) && (body->session_id)) {
239		if (pd->session_id) {
240			if (pd->session_id != body->session_id) {
241				rsp_status = OZ_STATUS_SESSION_MISMATCH;
242				goto done;
243			}
244		} else {
245			new_apps &= ~0x1;  /* Resume not permitted */
246			pd->session_id =
247				oz_get_new_session_id(body->session_id);
248		}
249	} else {
250		if (pd->session_id && !body->session_id) {
251			rsp_status = OZ_STATUS_SESSION_TEARDOWN;
252			stop_needed = 1;
253		} else {
254			new_apps &= ~0x1;  /* Resume not permitted */
255			pd->session_id =
256				oz_get_new_session_id(body->session_id);
257		}
258	}
259done:
260	if (rsp_status == OZ_STATUS_SUCCESS) {
261		u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
262		u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
263		u16 resume_apps = new_apps & pd->paused_apps  & ~0x1;
264		spin_unlock_bh(&g_polling_lock);
265		oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
266		oz_dbg(ON, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
267		       new_apps, pd->total_apps, pd->paused_apps);
268		if (start_apps) {
269			if (oz_services_start(pd, start_apps, 0))
270				rsp_status = OZ_STATUS_TOO_MANY_PDS;
271		}
272		if (resume_apps)
273			if (oz_services_start(pd, resume_apps, 1))
274				rsp_status = OZ_STATUS_TOO_MANY_PDS;
275		if (stop_apps)
276			oz_services_stop(pd, stop_apps, 0);
277		oz_pd_request_heartbeat(pd);
278	} else {
279		spin_unlock_bh(&g_polling_lock);
280	}
281	oz_send_conn_rsp(pd, rsp_status);
282	if (rsp_status != OZ_STATUS_SUCCESS) {
283		if (stop_needed)
284			oz_pd_stop(pd);
285		oz_pd_put(pd);
286		pd = NULL;
287	}
288	if (old_net_dev)
289		dev_put(old_net_dev);
290	if (free_pd)
291		oz_pd_destroy(free_pd);
292	return pd;
293}
294
295/*
296 * Context: softirq-serialized
297 */
298static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
299			const u8 *report, u8 len)
300{
301	struct oz_farewell *f;
302	struct oz_farewell *f2;
303	int found = 0;
304
305	f = kmalloc(sizeof(struct oz_farewell) + len, GFP_ATOMIC);
306	if (!f)
307		return;
308	f->ep_num = ep_num;
309	f->index = index;
310	f->len = len;
311	memcpy(f->report, report, len);
312	oz_dbg(ON, "RX: Adding farewell report\n");
313	spin_lock(&g_polling_lock);
314	list_for_each_entry(f2, &pd->farewell_list, link) {
315		if ((f2->ep_num == ep_num) && (f2->index == index)) {
316			found = 1;
317			list_del(&f2->link);
318			break;
319		}
320	}
321	list_add_tail(&f->link, &pd->farewell_list);
322	spin_unlock(&g_polling_lock);
323	if (found)
324		kfree(f2);
325}
326
327/*
328 * Context: softirq-serialized
329 */
330static void oz_rx_frame(struct sk_buff *skb)
331{
332	u8 *mac_hdr;
333	u8 *src_addr;
334	struct oz_elt *elt;
335	int length;
336	struct oz_pd *pd = NULL;
337	struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
338	struct timespec current_time;
339	int dup = 0;
340	u32 pkt_num;
341
342	oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
343	       oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
344	mac_hdr = skb_mac_header(skb);
345	src_addr = &mac_hdr[ETH_ALEN];
346	length = skb->len;
347
348	/* Check the version field */
349	if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
350		oz_dbg(ON, "Incorrect protocol version: %d\n",
351		       oz_get_prot_ver(oz_hdr->control));
352		goto done;
353	}
354
355	pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
356
357	pd = oz_pd_find(src_addr);
358	if (pd) {
359		if (!(pd->state & OZ_PD_S_CONNECTED))
360			oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
361		getnstimeofday(&current_time);
362		if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) ||
363			(pd->presleep < MSEC_PER_SEC))  {
364			oz_timer_add(pd, OZ_TIMER_TOUT,	pd->presleep);
365			pd->last_rx_timestamp = current_time;
366		}
367		if (pkt_num != pd->last_rx_pkt_num) {
368			pd->last_rx_pkt_num = pkt_num;
369		} else {
370			dup = 1;
371			oz_dbg(ON, "Duplicate frame\n");
372		}
373	}
374
375	if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
376		oz_dbg(RX_FRAMES, "Received TRIGGER Frame\n");
377		pd->last_sent_frame = &pd->tx_queue;
378		if (oz_hdr->control & OZ_F_ACK) {
379			/* Retire completed frames */
380			oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
381		}
382		if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
383				(pd->state == OZ_PD_S_CONNECTED)) {
384			int backlog = pd->nb_queued_frames;
385			pd->trigger_pkt_num = pkt_num;
386			/* Send queued frames */
387			oz_send_queued_frames(pd, backlog);
388		}
389	}
390
391	length -= sizeof(struct oz_hdr);
392	elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
393
394	while (length >= sizeof(struct oz_elt)) {
395		length -= sizeof(struct oz_elt) + elt->length;
396		if (length < 0)
397			break;
398		switch (elt->type) {
399		case OZ_ELT_CONNECT_REQ:
400			oz_dbg(ON, "RX: OZ_ELT_CONNECT_REQ\n");
401			pd = oz_connect_req(pd, elt, src_addr, skb->dev);
402			break;
403		case OZ_ELT_DISCONNECT:
404			oz_dbg(ON, "RX: OZ_ELT_DISCONNECT\n");
405			if (pd)
406				oz_pd_sleep(pd);
407			break;
408		case OZ_ELT_UPDATE_PARAM_REQ: {
409				struct oz_elt_update_param *body =
410					(struct oz_elt_update_param *)(elt + 1);
411				oz_dbg(ON, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
412				if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
413					spin_lock(&g_polling_lock);
414					pd_set_keepalive(pd, body->keepalive);
415					pd_set_presleep(pd, body->presleep, 1);
416					spin_unlock(&g_polling_lock);
417				}
418			}
419			break;
420		case OZ_ELT_FAREWELL_REQ: {
421				struct oz_elt_farewell *body =
422					(struct oz_elt_farewell *)(elt + 1);
423				oz_dbg(ON, "RX: OZ_ELT_FAREWELL_REQ\n");
424				oz_add_farewell(pd, body->ep_num,
425					body->index, body->report,
426					elt->length + 1 - sizeof(*body));
427			}
428			break;
429		case OZ_ELT_APP_DATA:
430			if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
431				struct oz_app_hdr *app_hdr =
432					(struct oz_app_hdr *)(elt+1);
433				if (dup)
434					break;
435				oz_handle_app_elt(pd, app_hdr->app_id, elt);
436			}
437			break;
438		default:
439			oz_dbg(ON, "RX: Unknown elt %02x\n", elt->type);
440		}
441		elt = oz_next_elt(elt);
442	}
443done:
444	if (pd)
445		oz_pd_put(pd);
446	consume_skb(skb);
447}
448
449/*
450 * Context: process
451 */
452void oz_protocol_term(void)
453{
454	struct oz_binding *b, *t;
455
456	/* Walk the list of bindings and remove each one.
457	 */
458	spin_lock_bh(&g_binding_lock);
459	list_for_each_entry_safe(b, t, &g_binding, link) {
460		list_del(&b->link);
461		spin_unlock_bh(&g_binding_lock);
462		dev_remove_pack(&b->ptype);
463		if (b->ptype.dev)
464			dev_put(b->ptype.dev);
465		kfree(b);
466		spin_lock_bh(&g_binding_lock);
467	}
468	spin_unlock_bh(&g_binding_lock);
469	/* Walk the list of PDs and stop each one. This causes the PD to be
470	 * removed from the list so we can just pull each one from the head
471	 * of the list.
472	 */
473	spin_lock_bh(&g_polling_lock);
474	while (!list_empty(&g_pd_list)) {
475		struct oz_pd *pd =
476			list_first_entry(&g_pd_list, struct oz_pd, link);
477		oz_pd_get(pd);
478		spin_unlock_bh(&g_polling_lock);
479		oz_pd_stop(pd);
480		oz_pd_put(pd);
481		spin_lock_bh(&g_polling_lock);
482	}
483	spin_unlock_bh(&g_polling_lock);
484	oz_dbg(ON, "Protocol stopped\n");
485
486	kmem_cache_destroy(oz_elt_info_cache);
487}
488
489/*
490 * Context: softirq
491 */
492void oz_pd_heartbeat_handler(unsigned long data)
493{
494	struct oz_pd *pd = (struct oz_pd *)data;
495	u16 apps = 0;
496
497	spin_lock_bh(&g_polling_lock);
498	if (pd->state & OZ_PD_S_CONNECTED)
499		apps = pd->total_apps;
500	spin_unlock_bh(&g_polling_lock);
501	if (apps)
502		oz_pd_heartbeat(pd, apps);
503	oz_pd_put(pd);
504}
505
506/*
507 * Context: softirq
508 */
509void oz_pd_timeout_handler(unsigned long data)
510{
511	int type;
512	struct oz_pd *pd = (struct oz_pd *)data;
513
514	spin_lock_bh(&g_polling_lock);
515	type = pd->timeout_type;
516	spin_unlock_bh(&g_polling_lock);
517	switch (type) {
518	case OZ_TIMER_TOUT:
519		oz_pd_sleep(pd);
520		break;
521	case OZ_TIMER_STOP:
522		oz_pd_stop(pd);
523		break;
524	}
525	oz_pd_put(pd);
526}
527
528/*
529 * Context: Interrupt
530 */
531enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer)
532{
533	struct oz_pd *pd;
534
535	pd = container_of(timer, struct oz_pd, heartbeat);
536	hrtimer_forward_now(timer, ktime_set(pd->pulse_period /
537	MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC));
538	oz_pd_get(pd);
539	tasklet_schedule(&pd->heartbeat_tasklet);
540	return HRTIMER_RESTART;
541}
542
543/*
544 * Context: Interrupt
545 */
546enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer)
547{
548	struct oz_pd *pd;
549
550	pd = container_of(timer, struct oz_pd, timeout);
551	oz_pd_get(pd);
552	tasklet_schedule(&pd->timeout_tasklet);
553	return HRTIMER_NORESTART;
554}
555
556/*
557 * Context: softirq or process
558 */
559void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time)
560{
561	spin_lock_bh(&g_polling_lock);
562	switch (type) {
563	case OZ_TIMER_TOUT:
564	case OZ_TIMER_STOP:
565		if (hrtimer_active(&pd->timeout)) {
566			hrtimer_set_expires(&pd->timeout, ktime_set(due_time /
567			MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
568							NSEC_PER_MSEC));
569			hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL);
570		} else {
571			hrtimer_start(&pd->timeout, ktime_set(due_time /
572			MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
573					NSEC_PER_MSEC), HRTIMER_MODE_REL);
574		}
575		pd->timeout_type = type;
576		break;
577	case OZ_TIMER_HEARTBEAT:
578		if (!hrtimer_active(&pd->heartbeat))
579			hrtimer_start(&pd->heartbeat, ktime_set(due_time /
580			MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
581					NSEC_PER_MSEC), HRTIMER_MODE_REL);
582		break;
583	}
584	spin_unlock_bh(&g_polling_lock);
585}
586
587/*
588 * Context: softirq or process
589 */
590void oz_pd_request_heartbeat(struct oz_pd *pd)
591{
592	oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ?
593					pd->pulse_period : OZ_QUANTUM);
594}
595
596/*
597 * Context: softirq or process
598 */
599struct oz_pd *oz_pd_find(const u8 *mac_addr)
600{
601	struct oz_pd *pd;
602	struct list_head *e;
603
604	spin_lock_bh(&g_polling_lock);
605	list_for_each(e, &g_pd_list) {
606		pd = container_of(e, struct oz_pd, link);
607		if (ether_addr_equal(pd->mac_addr, mac_addr)) {
608			atomic_inc(&pd->ref_count);
609			spin_unlock_bh(&g_polling_lock);
610			return pd;
611		}
612	}
613	spin_unlock_bh(&g_polling_lock);
614	return NULL;
615}
616
617/*
618 * Context: process
619 */
620void oz_app_enable(int app_id, int enable)
621{
622	if (app_id < OZ_NB_APPS) {
623		spin_lock_bh(&g_polling_lock);
624		if (enable)
625			g_apps |= (1<<app_id);
626		else
627			g_apps &= ~(1<<app_id);
628		spin_unlock_bh(&g_polling_lock);
629	}
630}
631
632/*
633 * Context: softirq
634 */
635static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
636		struct packet_type *pt, struct net_device *orig_dev)
637{
638	skb = skb_share_check(skb, GFP_ATOMIC);
639	if (skb == NULL)
640		return 0;
641	spin_lock_bh(&g_rx_queue.lock);
642	if (g_processing_rx) {
643		/* We already hold the lock so use __ variant.
644		 */
645		__skb_queue_head(&g_rx_queue, skb);
646		spin_unlock_bh(&g_rx_queue.lock);
647	} else {
648		g_processing_rx = 1;
649		do {
650
651			spin_unlock_bh(&g_rx_queue.lock);
652			oz_rx_frame(skb);
653			spin_lock_bh(&g_rx_queue.lock);
654			if (skb_queue_empty(&g_rx_queue)) {
655				g_processing_rx = 0;
656				spin_unlock_bh(&g_rx_queue.lock);
657				break;
658			}
659			/* We already hold the lock so use __ variant.
660			 */
661			skb = __skb_dequeue(&g_rx_queue);
662		} while (1);
663	}
664	return 0;
665}
666
667/*
668 * Context: process
669 */
670void oz_binding_add(const char *net_dev)
671{
672	struct oz_binding *binding;
673
674	binding = kzalloc(sizeof(struct oz_binding), GFP_KERNEL);
675	if (!binding)
676		return;
677
678	binding->ptype.type = htons(OZ_ETHERTYPE);
679	binding->ptype.func = oz_pkt_recv;
680	if (net_dev && *net_dev) {
681		memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
682		oz_dbg(ON, "Adding binding: %s\n", net_dev);
683		binding->ptype.dev = dev_get_by_name(&init_net, net_dev);
684		if (binding->ptype.dev == NULL) {
685			oz_dbg(ON, "Netdev %s not found\n", net_dev);
686			kfree(binding);
687			return;
688		}
689	}
690	dev_add_pack(&binding->ptype);
691	spin_lock_bh(&g_binding_lock);
692	list_add_tail(&binding->link, &g_binding);
693	spin_unlock_bh(&g_binding_lock);
694}
695
696/*
697 * Context: process
698 */
699static void pd_stop_all_for_device(struct net_device *net_dev)
700{
701	struct list_head h;
702	struct oz_pd *pd;
703	struct oz_pd *n;
704
705	INIT_LIST_HEAD(&h);
706	spin_lock_bh(&g_polling_lock);
707	list_for_each_entry_safe(pd, n, &g_pd_list, link) {
708		if (pd->net_dev == net_dev) {
709			list_move(&pd->link, &h);
710			oz_pd_get(pd);
711		}
712	}
713	spin_unlock_bh(&g_polling_lock);
714	while (!list_empty(&h)) {
715		pd = list_first_entry(&h, struct oz_pd, link);
716		oz_pd_stop(pd);
717		oz_pd_put(pd);
718	}
719}
720
721/*
722 * Context: process
723 */
724void oz_binding_remove(const char *net_dev)
725{
726	struct oz_binding *binding;
727	int found = 0;
728
729	oz_dbg(ON, "Removing binding: %s\n", net_dev);
730	spin_lock_bh(&g_binding_lock);
731	list_for_each_entry(binding, &g_binding, link) {
732		if (strncmp(binding->name, net_dev, OZ_MAX_BINDING_LEN) == 0) {
733			oz_dbg(ON, "Binding '%s' found\n", net_dev);
734			found = 1;
735			break;
736		}
737	}
738	spin_unlock_bh(&g_binding_lock);
739	if (found) {
740		dev_remove_pack(&binding->ptype);
741		if (binding->ptype.dev) {
742			dev_put(binding->ptype.dev);
743			pd_stop_all_for_device(binding->ptype.dev);
744		}
745		list_del(&binding->link);
746		kfree(binding);
747	}
748}
749
750/*
751 * Context: process
752 */
753static char *oz_get_next_device_name(char *s, char *dname, int max_size)
754{
755	while (*s == ',')
756		s++;
757	while (*s && (*s != ',') && max_size > 1) {
758		*dname++ = *s++;
759		max_size--;
760	}
761	*dname = 0;
762	return s;
763}
764
765/*
766 * Context: process
767 */
768int oz_protocol_init(char *devs)
769{
770	oz_elt_info_cache = KMEM_CACHE(oz_elt_info, 0);
771	if (!oz_elt_info_cache)
772		return -ENOMEM;
773
774	skb_queue_head_init(&g_rx_queue);
775	if (devs[0] == '*') {
776		oz_binding_add(NULL);
777	} else {
778		char d[32];
779		while (*devs) {
780			devs = oz_get_next_device_name(devs, d, sizeof(d));
781			if (d[0])
782				oz_binding_add(d);
783		}
784	}
785	return 0;
786}
787
788/*
789 * Context: process
790 */
791int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
792{
793	struct oz_pd *pd;
794	struct list_head *e;
795	int count = 0;
796
797	spin_lock_bh(&g_polling_lock);
798	list_for_each(e, &g_pd_list) {
799		if (count >= max_count)
800			break;
801		pd = container_of(e, struct oz_pd, link);
802		ether_addr_copy((u8 *)&addr[count++], pd->mac_addr);
803	}
804	spin_unlock_bh(&g_polling_lock);
805	return count;
806}
807
808