[go: nahoru, domu]

1/*
2 * ipmi_msghandler.c
3 *
4 * Incoming and outgoing message routing for an IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 *         Corey Minyard <minyard@mvista.com>
8 *         source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 *  This program is free software; you can redistribute it and/or modify it
13 *  under the terms of the GNU General Public License as published by the
14 *  Free Software Foundation; either version 2 of the License, or (at your
15 *  option) any later version.
16 *
17 *
18 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 *  You should have received a copy of the GNU General Public License along
30 *  with this program; if not, write to the Free Software Foundation, Inc.,
31 *  675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34#include <linux/module.h>
35#include <linux/errno.h>
36#include <linux/poll.h>
37#include <linux/sched.h>
38#include <linux/seq_file.h>
39#include <linux/spinlock.h>
40#include <linux/mutex.h>
41#include <linux/slab.h>
42#include <linux/ipmi.h>
43#include <linux/ipmi_smi.h>
44#include <linux/notifier.h>
45#include <linux/init.h>
46#include <linux/proc_fs.h>
47#include <linux/rcupdate.h>
48#include <linux/interrupt.h>
49
50#define PFX "IPMI message handler: "
51
52#define IPMI_DRIVER_VERSION "39.2"
53
54static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
55static int ipmi_init_msghandler(void);
56static void smi_recv_tasklet(unsigned long);
57static void handle_new_recv_msgs(ipmi_smi_t intf);
58static void need_waiter(ipmi_smi_t intf);
59
60static int initialized;
61
62#ifdef CONFIG_PROC_FS
63static struct proc_dir_entry *proc_ipmi_root;
64#endif /* CONFIG_PROC_FS */
65
66/* Remain in auto-maintenance mode for this amount of time (in ms). */
67#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
68
69#define MAX_EVENTS_IN_QUEUE	25
70
71/*
72 * Don't let a message sit in a queue forever, always time it with at lest
73 * the max message timer.  This is in milliseconds.
74 */
75#define MAX_MSG_TIMEOUT		60000
76
77/* Call every ~1000 ms. */
78#define IPMI_TIMEOUT_TIME	1000
79
80/* How many jiffies does it take to get to the timeout time. */
81#define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
82
83/*
84 * Request events from the queue every second (this is the number of
85 * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
86 * future, IPMI will add a way to know immediately if an event is in
87 * the queue and this silliness can go away.
88 */
89#define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))
90
91/*
92 * The main "user" data structure.
93 */
94struct ipmi_user {
95	struct list_head link;
96
97	/* Set to false when the user is destroyed. */
98	bool valid;
99
100	struct kref refcount;
101
102	/* The upper layer that handles receive messages. */
103	struct ipmi_user_hndl *handler;
104	void             *handler_data;
105
106	/* The interface this user is bound to. */
107	ipmi_smi_t intf;
108
109	/* Does this interface receive IPMI events? */
110	bool gets_events;
111};
112
113struct cmd_rcvr {
114	struct list_head link;
115
116	ipmi_user_t   user;
117	unsigned char netfn;
118	unsigned char cmd;
119	unsigned int  chans;
120
121	/*
122	 * This is used to form a linked lised during mass deletion.
123	 * Since this is in an RCU list, we cannot use the link above
124	 * or change any data until the RCU period completes.  So we
125	 * use this next variable during mass deletion so we can have
126	 * a list and don't have to wait and restart the search on
127	 * every individual deletion of a command.
128	 */
129	struct cmd_rcvr *next;
130};
131
132struct seq_table {
133	unsigned int         inuse : 1;
134	unsigned int         broadcast : 1;
135
136	unsigned long        timeout;
137	unsigned long        orig_timeout;
138	unsigned int         retries_left;
139
140	/*
141	 * To verify on an incoming send message response that this is
142	 * the message that the response is for, we keep a sequence id
143	 * and increment it every time we send a message.
144	 */
145	long                 seqid;
146
147	/*
148	 * This is held so we can properly respond to the message on a
149	 * timeout, and it is used to hold the temporary data for
150	 * retransmission, too.
151	 */
152	struct ipmi_recv_msg *recv_msg;
153};
154
155/*
156 * Store the information in a msgid (long) to allow us to find a
157 * sequence table entry from the msgid.
158 */
159#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
160
161#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
162	do {								\
163		seq = ((msgid >> 26) & 0x3f);				\
164		seqid = (msgid & 0x3fffff);				\
165	} while (0)
166
167#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
168
169struct ipmi_channel {
170	unsigned char medium;
171	unsigned char protocol;
172
173	/*
174	 * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
175	 * but may be changed by the user.
176	 */
177	unsigned char address;
178
179	/*
180	 * My LUN.  This should generally stay the SMS LUN, but just in
181	 * case...
182	 */
183	unsigned char lun;
184};
185
186#ifdef CONFIG_PROC_FS
187struct ipmi_proc_entry {
188	char                   *name;
189	struct ipmi_proc_entry *next;
190};
191#endif
192
193struct bmc_device {
194	struct platform_device *dev;
195	struct ipmi_device_id  id;
196	unsigned char          guid[16];
197	int                    guid_set;
198
199	struct kref	       refcount;
200
201	/* bmc device attributes */
202	struct device_attribute device_id_attr;
203	struct device_attribute provides_dev_sdrs_attr;
204	struct device_attribute revision_attr;
205	struct device_attribute firmware_rev_attr;
206	struct device_attribute version_attr;
207	struct device_attribute add_dev_support_attr;
208	struct device_attribute manufacturer_id_attr;
209	struct device_attribute product_id_attr;
210	struct device_attribute guid_attr;
211	struct device_attribute aux_firmware_rev_attr;
212};
213
214/*
215 * Various statistics for IPMI, these index stats[] in the ipmi_smi
216 * structure.
217 */
218enum ipmi_stat_indexes {
219	/* Commands we got from the user that were invalid. */
220	IPMI_STAT_sent_invalid_commands = 0,
221
222	/* Commands we sent to the MC. */
223	IPMI_STAT_sent_local_commands,
224
225	/* Responses from the MC that were delivered to a user. */
226	IPMI_STAT_handled_local_responses,
227
228	/* Responses from the MC that were not delivered to a user. */
229	IPMI_STAT_unhandled_local_responses,
230
231	/* Commands we sent out to the IPMB bus. */
232	IPMI_STAT_sent_ipmb_commands,
233
234	/* Commands sent on the IPMB that had errors on the SEND CMD */
235	IPMI_STAT_sent_ipmb_command_errs,
236
237	/* Each retransmit increments this count. */
238	IPMI_STAT_retransmitted_ipmb_commands,
239
240	/*
241	 * When a message times out (runs out of retransmits) this is
242	 * incremented.
243	 */
244	IPMI_STAT_timed_out_ipmb_commands,
245
246	/*
247	 * This is like above, but for broadcasts.  Broadcasts are
248	 * *not* included in the above count (they are expected to
249	 * time out).
250	 */
251	IPMI_STAT_timed_out_ipmb_broadcasts,
252
253	/* Responses I have sent to the IPMB bus. */
254	IPMI_STAT_sent_ipmb_responses,
255
256	/* The response was delivered to the user. */
257	IPMI_STAT_handled_ipmb_responses,
258
259	/* The response had invalid data in it. */
260	IPMI_STAT_invalid_ipmb_responses,
261
262	/* The response didn't have anyone waiting for it. */
263	IPMI_STAT_unhandled_ipmb_responses,
264
265	/* Commands we sent out to the IPMB bus. */
266	IPMI_STAT_sent_lan_commands,
267
268	/* Commands sent on the IPMB that had errors on the SEND CMD */
269	IPMI_STAT_sent_lan_command_errs,
270
271	/* Each retransmit increments this count. */
272	IPMI_STAT_retransmitted_lan_commands,
273
274	/*
275	 * When a message times out (runs out of retransmits) this is
276	 * incremented.
277	 */
278	IPMI_STAT_timed_out_lan_commands,
279
280	/* Responses I have sent to the IPMB bus. */
281	IPMI_STAT_sent_lan_responses,
282
283	/* The response was delivered to the user. */
284	IPMI_STAT_handled_lan_responses,
285
286	/* The response had invalid data in it. */
287	IPMI_STAT_invalid_lan_responses,
288
289	/* The response didn't have anyone waiting for it. */
290	IPMI_STAT_unhandled_lan_responses,
291
292	/* The command was delivered to the user. */
293	IPMI_STAT_handled_commands,
294
295	/* The command had invalid data in it. */
296	IPMI_STAT_invalid_commands,
297
298	/* The command didn't have anyone waiting for it. */
299	IPMI_STAT_unhandled_commands,
300
301	/* Invalid data in an event. */
302	IPMI_STAT_invalid_events,
303
304	/* Events that were received with the proper format. */
305	IPMI_STAT_events,
306
307	/* Retransmissions on IPMB that failed. */
308	IPMI_STAT_dropped_rexmit_ipmb_commands,
309
310	/* Retransmissions on LAN that failed. */
311	IPMI_STAT_dropped_rexmit_lan_commands,
312
313	/* This *must* remain last, add new values above this. */
314	IPMI_NUM_STATS
315};
316
317
318#define IPMI_IPMB_NUM_SEQ	64
319#define IPMI_MAX_CHANNELS       16
320struct ipmi_smi {
321	/* What interface number are we? */
322	int intf_num;
323
324	struct kref refcount;
325
326	/* Used for a list of interfaces. */
327	struct list_head link;
328
329	/*
330	 * The list of upper layers that are using me.  seq_lock
331	 * protects this.
332	 */
333	struct list_head users;
334
335	/* Information to supply to users. */
336	unsigned char ipmi_version_major;
337	unsigned char ipmi_version_minor;
338
339	/* Used for wake ups at startup. */
340	wait_queue_head_t waitq;
341
342	struct bmc_device *bmc;
343	char *my_dev_name;
344	char *sysfs_name;
345
346	/*
347	 * This is the lower-layer's sender routine.  Note that you
348	 * must either be holding the ipmi_interfaces_mutex or be in
349	 * an umpreemptible region to use this.  You must fetch the
350	 * value into a local variable and make sure it is not NULL.
351	 */
352	struct ipmi_smi_handlers *handlers;
353	void                     *send_info;
354
355#ifdef CONFIG_PROC_FS
356	/* A list of proc entries for this interface. */
357	struct mutex           proc_entry_lock;
358	struct ipmi_proc_entry *proc_entries;
359#endif
360
361	/* Driver-model device for the system interface. */
362	struct device          *si_dev;
363
364	/*
365	 * A table of sequence numbers for this interface.  We use the
366	 * sequence numbers for IPMB messages that go out of the
367	 * interface to match them up with their responses.  A routine
368	 * is called periodically to time the items in this list.
369	 */
370	spinlock_t       seq_lock;
371	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
372	int curr_seq;
373
374	/*
375	 * Messages queued for delivery.  If delivery fails (out of memory
376	 * for instance), They will stay in here to be processed later in a
377	 * periodic timer interrupt.  The tasklet is for handling received
378	 * messages directly from the handler.
379	 */
380	spinlock_t       waiting_msgs_lock;
381	struct list_head waiting_msgs;
382	atomic_t	 watchdog_pretimeouts_to_deliver;
383	struct tasklet_struct recv_tasklet;
384
385	/*
386	 * The list of command receivers that are registered for commands
387	 * on this interface.
388	 */
389	struct mutex     cmd_rcvrs_mutex;
390	struct list_head cmd_rcvrs;
391
392	/*
393	 * Events that were queues because no one was there to receive
394	 * them.
395	 */
396	spinlock_t       events_lock; /* For dealing with event stuff. */
397	struct list_head waiting_events;
398	unsigned int     waiting_events_count; /* How many events in queue? */
399	char             delivering_events;
400	char             event_msg_printed;
401	atomic_t         event_waiters;
402	unsigned int     ticks_to_req_ev;
403	int              last_needs_timer;
404
405	/*
406	 * The event receiver for my BMC, only really used at panic
407	 * shutdown as a place to store this.
408	 */
409	unsigned char event_receiver;
410	unsigned char event_receiver_lun;
411	unsigned char local_sel_device;
412	unsigned char local_event_generator;
413
414	/* For handling of maintenance mode. */
415	int maintenance_mode;
416	bool maintenance_mode_enable;
417	int auto_maintenance_timeout;
418	spinlock_t maintenance_mode_lock; /* Used in a timer... */
419
420	/*
421	 * A cheap hack, if this is non-null and a message to an
422	 * interface comes in with a NULL user, call this routine with
423	 * it.  Note that the message will still be freed by the
424	 * caller.  This only works on the system interface.
425	 */
426	void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
427
428	/*
429	 * When we are scanning the channels for an SMI, this will
430	 * tell which channel we are scanning.
431	 */
432	int curr_channel;
433
434	/* Channel information */
435	struct ipmi_channel channels[IPMI_MAX_CHANNELS];
436
437	/* Proc FS stuff. */
438	struct proc_dir_entry *proc_dir;
439	char                  proc_dir_name[10];
440
441	atomic_t stats[IPMI_NUM_STATS];
442
443	/*
444	 * run_to_completion duplicate of smb_info, smi_info
445	 * and ipmi_serial_info structures. Used to decrease numbers of
446	 * parameters passed by "low" level IPMI code.
447	 */
448	int run_to_completion;
449};
450#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
451
452/**
453 * The driver model view of the IPMI messaging driver.
454 */
455static struct platform_driver ipmidriver = {
456	.driver = {
457		.name = "ipmi",
458		.bus = &platform_bus_type
459	}
460};
461static DEFINE_MUTEX(ipmidriver_mutex);
462
463static LIST_HEAD(ipmi_interfaces);
464static DEFINE_MUTEX(ipmi_interfaces_mutex);
465
466/*
467 * List of watchers that want to know when smi's are added and deleted.
468 */
469static LIST_HEAD(smi_watchers);
470static DEFINE_MUTEX(smi_watchers_mutex);
471
472#define ipmi_inc_stat(intf, stat) \
473	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
474#define ipmi_get_stat(intf, stat) \
475	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
476
477static int is_lan_addr(struct ipmi_addr *addr)
478{
479	return addr->addr_type == IPMI_LAN_ADDR_TYPE;
480}
481
482static int is_ipmb_addr(struct ipmi_addr *addr)
483{
484	return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
485}
486
487static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
488{
489	return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
490}
491
492static void free_recv_msg_list(struct list_head *q)
493{
494	struct ipmi_recv_msg *msg, *msg2;
495
496	list_for_each_entry_safe(msg, msg2, q, link) {
497		list_del(&msg->link);
498		ipmi_free_recv_msg(msg);
499	}
500}
501
502static void free_smi_msg_list(struct list_head *q)
503{
504	struct ipmi_smi_msg *msg, *msg2;
505
506	list_for_each_entry_safe(msg, msg2, q, link) {
507		list_del(&msg->link);
508		ipmi_free_smi_msg(msg);
509	}
510}
511
512static void clean_up_interface_data(ipmi_smi_t intf)
513{
514	int              i;
515	struct cmd_rcvr  *rcvr, *rcvr2;
516	struct list_head list;
517
518	tasklet_kill(&intf->recv_tasklet);
519
520	free_smi_msg_list(&intf->waiting_msgs);
521	free_recv_msg_list(&intf->waiting_events);
522
523	/*
524	 * Wholesale remove all the entries from the list in the
525	 * interface and wait for RCU to know that none are in use.
526	 */
527	mutex_lock(&intf->cmd_rcvrs_mutex);
528	INIT_LIST_HEAD(&list);
529	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
530	mutex_unlock(&intf->cmd_rcvrs_mutex);
531
532	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
533		kfree(rcvr);
534
535	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
536		if ((intf->seq_table[i].inuse)
537					&& (intf->seq_table[i].recv_msg))
538			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
539	}
540}
541
542static void intf_free(struct kref *ref)
543{
544	ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
545
546	clean_up_interface_data(intf);
547	kfree(intf);
548}
549
550struct watcher_entry {
551	int              intf_num;
552	ipmi_smi_t       intf;
553	struct list_head link;
554};
555
556int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
557{
558	ipmi_smi_t intf;
559	LIST_HEAD(to_deliver);
560	struct watcher_entry *e, *e2;
561
562	mutex_lock(&smi_watchers_mutex);
563
564	mutex_lock(&ipmi_interfaces_mutex);
565
566	/* Build a list of things to deliver. */
567	list_for_each_entry(intf, &ipmi_interfaces, link) {
568		if (intf->intf_num == -1)
569			continue;
570		e = kmalloc(sizeof(*e), GFP_KERNEL);
571		if (!e)
572			goto out_err;
573		kref_get(&intf->refcount);
574		e->intf = intf;
575		e->intf_num = intf->intf_num;
576		list_add_tail(&e->link, &to_deliver);
577	}
578
579	/* We will succeed, so add it to the list. */
580	list_add(&watcher->link, &smi_watchers);
581
582	mutex_unlock(&ipmi_interfaces_mutex);
583
584	list_for_each_entry_safe(e, e2, &to_deliver, link) {
585		list_del(&e->link);
586		watcher->new_smi(e->intf_num, e->intf->si_dev);
587		kref_put(&e->intf->refcount, intf_free);
588		kfree(e);
589	}
590
591	mutex_unlock(&smi_watchers_mutex);
592
593	return 0;
594
595 out_err:
596	mutex_unlock(&ipmi_interfaces_mutex);
597	mutex_unlock(&smi_watchers_mutex);
598	list_for_each_entry_safe(e, e2, &to_deliver, link) {
599		list_del(&e->link);
600		kref_put(&e->intf->refcount, intf_free);
601		kfree(e);
602	}
603	return -ENOMEM;
604}
605EXPORT_SYMBOL(ipmi_smi_watcher_register);
606
607int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
608{
609	mutex_lock(&smi_watchers_mutex);
610	list_del(&(watcher->link));
611	mutex_unlock(&smi_watchers_mutex);
612	return 0;
613}
614EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
615
616/*
617 * Must be called with smi_watchers_mutex held.
618 */
619static void
620call_smi_watchers(int i, struct device *dev)
621{
622	struct ipmi_smi_watcher *w;
623
624	list_for_each_entry(w, &smi_watchers, link) {
625		if (try_module_get(w->owner)) {
626			w->new_smi(i, dev);
627			module_put(w->owner);
628		}
629	}
630}
631
632static int
633ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
634{
635	if (addr1->addr_type != addr2->addr_type)
636		return 0;
637
638	if (addr1->channel != addr2->channel)
639		return 0;
640
641	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
642		struct ipmi_system_interface_addr *smi_addr1
643		    = (struct ipmi_system_interface_addr *) addr1;
644		struct ipmi_system_interface_addr *smi_addr2
645		    = (struct ipmi_system_interface_addr *) addr2;
646		return (smi_addr1->lun == smi_addr2->lun);
647	}
648
649	if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
650		struct ipmi_ipmb_addr *ipmb_addr1
651		    = (struct ipmi_ipmb_addr *) addr1;
652		struct ipmi_ipmb_addr *ipmb_addr2
653		    = (struct ipmi_ipmb_addr *) addr2;
654
655		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
656			&& (ipmb_addr1->lun == ipmb_addr2->lun));
657	}
658
659	if (is_lan_addr(addr1)) {
660		struct ipmi_lan_addr *lan_addr1
661			= (struct ipmi_lan_addr *) addr1;
662		struct ipmi_lan_addr *lan_addr2
663		    = (struct ipmi_lan_addr *) addr2;
664
665		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
666			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
667			&& (lan_addr1->session_handle
668			    == lan_addr2->session_handle)
669			&& (lan_addr1->lun == lan_addr2->lun));
670	}
671
672	return 1;
673}
674
675int ipmi_validate_addr(struct ipmi_addr *addr, int len)
676{
677	if (len < sizeof(struct ipmi_system_interface_addr))
678		return -EINVAL;
679
680	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
681		if (addr->channel != IPMI_BMC_CHANNEL)
682			return -EINVAL;
683		return 0;
684	}
685
686	if ((addr->channel == IPMI_BMC_CHANNEL)
687	    || (addr->channel >= IPMI_MAX_CHANNELS)
688	    || (addr->channel < 0))
689		return -EINVAL;
690
691	if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
692		if (len < sizeof(struct ipmi_ipmb_addr))
693			return -EINVAL;
694		return 0;
695	}
696
697	if (is_lan_addr(addr)) {
698		if (len < sizeof(struct ipmi_lan_addr))
699			return -EINVAL;
700		return 0;
701	}
702
703	return -EINVAL;
704}
705EXPORT_SYMBOL(ipmi_validate_addr);
706
707unsigned int ipmi_addr_length(int addr_type)
708{
709	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
710		return sizeof(struct ipmi_system_interface_addr);
711
712	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
713			|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
714		return sizeof(struct ipmi_ipmb_addr);
715
716	if (addr_type == IPMI_LAN_ADDR_TYPE)
717		return sizeof(struct ipmi_lan_addr);
718
719	return 0;
720}
721EXPORT_SYMBOL(ipmi_addr_length);
722
723static void deliver_response(struct ipmi_recv_msg *msg)
724{
725	if (!msg->user) {
726		ipmi_smi_t    intf = msg->user_msg_data;
727
728		/* Special handling for NULL users. */
729		if (intf->null_user_handler) {
730			intf->null_user_handler(intf, msg);
731			ipmi_inc_stat(intf, handled_local_responses);
732		} else {
733			/* No handler, so give up. */
734			ipmi_inc_stat(intf, unhandled_local_responses);
735		}
736		ipmi_free_recv_msg(msg);
737	} else {
738		ipmi_user_t user = msg->user;
739		user->handler->ipmi_recv_hndl(msg, user->handler_data);
740	}
741}
742
743static void
744deliver_err_response(struct ipmi_recv_msg *msg, int err)
745{
746	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
747	msg->msg_data[0] = err;
748	msg->msg.netfn |= 1; /* Convert to a response. */
749	msg->msg.data_len = 1;
750	msg->msg.data = msg->msg_data;
751	deliver_response(msg);
752}
753
754/*
755 * Find the next sequence number not being used and add the given
756 * message with the given timeout to the sequence table.  This must be
757 * called with the interface's seq_lock held.
758 */
759static int intf_next_seq(ipmi_smi_t           intf,
760			 struct ipmi_recv_msg *recv_msg,
761			 unsigned long        timeout,
762			 int                  retries,
763			 int                  broadcast,
764			 unsigned char        *seq,
765			 long                 *seqid)
766{
767	int          rv = 0;
768	unsigned int i;
769
770	for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
771					i = (i+1)%IPMI_IPMB_NUM_SEQ) {
772		if (!intf->seq_table[i].inuse)
773			break;
774	}
775
776	if (!intf->seq_table[i].inuse) {
777		intf->seq_table[i].recv_msg = recv_msg;
778
779		/*
780		 * Start with the maximum timeout, when the send response
781		 * comes in we will start the real timer.
782		 */
783		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
784		intf->seq_table[i].orig_timeout = timeout;
785		intf->seq_table[i].retries_left = retries;
786		intf->seq_table[i].broadcast = broadcast;
787		intf->seq_table[i].inuse = 1;
788		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
789		*seq = i;
790		*seqid = intf->seq_table[i].seqid;
791		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
792		need_waiter(intf);
793	} else {
794		rv = -EAGAIN;
795	}
796
797	return rv;
798}
799
800/*
801 * Return the receive message for the given sequence number and
802 * release the sequence number so it can be reused.  Some other data
803 * is passed in to be sure the message matches up correctly (to help
804 * guard against message coming in after their timeout and the
805 * sequence number being reused).
806 */
807static int intf_find_seq(ipmi_smi_t           intf,
808			 unsigned char        seq,
809			 short                channel,
810			 unsigned char        cmd,
811			 unsigned char        netfn,
812			 struct ipmi_addr     *addr,
813			 struct ipmi_recv_msg **recv_msg)
814{
815	int           rv = -ENODEV;
816	unsigned long flags;
817
818	if (seq >= IPMI_IPMB_NUM_SEQ)
819		return -EINVAL;
820
821	spin_lock_irqsave(&(intf->seq_lock), flags);
822	if (intf->seq_table[seq].inuse) {
823		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
824
825		if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
826				&& (msg->msg.netfn == netfn)
827				&& (ipmi_addr_equal(addr, &(msg->addr)))) {
828			*recv_msg = msg;
829			intf->seq_table[seq].inuse = 0;
830			rv = 0;
831		}
832	}
833	spin_unlock_irqrestore(&(intf->seq_lock), flags);
834
835	return rv;
836}
837
838
839/* Start the timer for a specific sequence table entry. */
840static int intf_start_seq_timer(ipmi_smi_t intf,
841				long       msgid)
842{
843	int           rv = -ENODEV;
844	unsigned long flags;
845	unsigned char seq;
846	unsigned long seqid;
847
848
849	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
850
851	spin_lock_irqsave(&(intf->seq_lock), flags);
852	/*
853	 * We do this verification because the user can be deleted
854	 * while a message is outstanding.
855	 */
856	if ((intf->seq_table[seq].inuse)
857				&& (intf->seq_table[seq].seqid == seqid)) {
858		struct seq_table *ent = &(intf->seq_table[seq]);
859		ent->timeout = ent->orig_timeout;
860		rv = 0;
861	}
862	spin_unlock_irqrestore(&(intf->seq_lock), flags);
863
864	return rv;
865}
866
867/* Got an error for the send message for a specific sequence number. */
868static int intf_err_seq(ipmi_smi_t   intf,
869			long         msgid,
870			unsigned int err)
871{
872	int                  rv = -ENODEV;
873	unsigned long        flags;
874	unsigned char        seq;
875	unsigned long        seqid;
876	struct ipmi_recv_msg *msg = NULL;
877
878
879	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
880
881	spin_lock_irqsave(&(intf->seq_lock), flags);
882	/*
883	 * We do this verification because the user can be deleted
884	 * while a message is outstanding.
885	 */
886	if ((intf->seq_table[seq].inuse)
887				&& (intf->seq_table[seq].seqid == seqid)) {
888		struct seq_table *ent = &(intf->seq_table[seq]);
889
890		ent->inuse = 0;
891		msg = ent->recv_msg;
892		rv = 0;
893	}
894	spin_unlock_irqrestore(&(intf->seq_lock), flags);
895
896	if (msg)
897		deliver_err_response(msg, err);
898
899	return rv;
900}
901
902
903int ipmi_create_user(unsigned int          if_num,
904		     struct ipmi_user_hndl *handler,
905		     void                  *handler_data,
906		     ipmi_user_t           *user)
907{
908	unsigned long flags;
909	ipmi_user_t   new_user;
910	int           rv = 0;
911	ipmi_smi_t    intf;
912
913	/*
914	 * There is no module usecount here, because it's not
915	 * required.  Since this can only be used by and called from
916	 * other modules, they will implicitly use this module, and
917	 * thus this can't be removed unless the other modules are
918	 * removed.
919	 */
920
921	if (handler == NULL)
922		return -EINVAL;
923
924	/*
925	 * Make sure the driver is actually initialized, this handles
926	 * problems with initialization order.
927	 */
928	if (!initialized) {
929		rv = ipmi_init_msghandler();
930		if (rv)
931			return rv;
932
933		/*
934		 * The init code doesn't return an error if it was turned
935		 * off, but it won't initialize.  Check that.
936		 */
937		if (!initialized)
938			return -ENODEV;
939	}
940
941	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
942	if (!new_user)
943		return -ENOMEM;
944
945	mutex_lock(&ipmi_interfaces_mutex);
946	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
947		if (intf->intf_num == if_num)
948			goto found;
949	}
950	/* Not found, return an error */
951	rv = -EINVAL;
952	goto out_kfree;
953
954 found:
955	/* Note that each existing user holds a refcount to the interface. */
956	kref_get(&intf->refcount);
957
958	kref_init(&new_user->refcount);
959	new_user->handler = handler;
960	new_user->handler_data = handler_data;
961	new_user->intf = intf;
962	new_user->gets_events = false;
963
964	if (!try_module_get(intf->handlers->owner)) {
965		rv = -ENODEV;
966		goto out_kref;
967	}
968
969	if (intf->handlers->inc_usecount) {
970		rv = intf->handlers->inc_usecount(intf->send_info);
971		if (rv) {
972			module_put(intf->handlers->owner);
973			goto out_kref;
974		}
975	}
976
977	/*
978	 * Hold the lock so intf->handlers is guaranteed to be good
979	 * until now
980	 */
981	mutex_unlock(&ipmi_interfaces_mutex);
982
983	new_user->valid = true;
984	spin_lock_irqsave(&intf->seq_lock, flags);
985	list_add_rcu(&new_user->link, &intf->users);
986	spin_unlock_irqrestore(&intf->seq_lock, flags);
987	if (handler->ipmi_watchdog_pretimeout) {
988		/* User wants pretimeouts, so make sure to watch for them. */
989		if (atomic_inc_return(&intf->event_waiters) == 1)
990			need_waiter(intf);
991	}
992	*user = new_user;
993	return 0;
994
995out_kref:
996	kref_put(&intf->refcount, intf_free);
997out_kfree:
998	mutex_unlock(&ipmi_interfaces_mutex);
999	kfree(new_user);
1000	return rv;
1001}
1002EXPORT_SYMBOL(ipmi_create_user);
1003
1004int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1005{
1006	int           rv = 0;
1007	ipmi_smi_t    intf;
1008	struct ipmi_smi_handlers *handlers;
1009
1010	mutex_lock(&ipmi_interfaces_mutex);
1011	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1012		if (intf->intf_num == if_num)
1013			goto found;
1014	}
1015	/* Not found, return an error */
1016	rv = -EINVAL;
1017	mutex_unlock(&ipmi_interfaces_mutex);
1018	return rv;
1019
1020found:
1021	handlers = intf->handlers;
1022	rv = -ENOSYS;
1023	if (handlers->get_smi_info)
1024		rv = handlers->get_smi_info(intf->send_info, data);
1025	mutex_unlock(&ipmi_interfaces_mutex);
1026
1027	return rv;
1028}
1029EXPORT_SYMBOL(ipmi_get_smi_info);
1030
1031static void free_user(struct kref *ref)
1032{
1033	ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
1034	kfree(user);
1035}
1036
1037int ipmi_destroy_user(ipmi_user_t user)
1038{
1039	ipmi_smi_t       intf = user->intf;
1040	int              i;
1041	unsigned long    flags;
1042	struct cmd_rcvr  *rcvr;
1043	struct cmd_rcvr  *rcvrs = NULL;
1044
1045	user->valid = false;
1046
1047	if (user->handler->ipmi_watchdog_pretimeout)
1048		atomic_dec(&intf->event_waiters);
1049
1050	if (user->gets_events)
1051		atomic_dec(&intf->event_waiters);
1052
1053	/* Remove the user from the interface's sequence table. */
1054	spin_lock_irqsave(&intf->seq_lock, flags);
1055	list_del_rcu(&user->link);
1056
1057	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1058		if (intf->seq_table[i].inuse
1059		    && (intf->seq_table[i].recv_msg->user == user)) {
1060			intf->seq_table[i].inuse = 0;
1061			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1062		}
1063	}
1064	spin_unlock_irqrestore(&intf->seq_lock, flags);
1065
1066	/*
1067	 * Remove the user from the command receiver's table.  First
1068	 * we build a list of everything (not using the standard link,
1069	 * since other things may be using it till we do
1070	 * synchronize_rcu()) then free everything in that list.
1071	 */
1072	mutex_lock(&intf->cmd_rcvrs_mutex);
1073	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1074		if (rcvr->user == user) {
1075			list_del_rcu(&rcvr->link);
1076			rcvr->next = rcvrs;
1077			rcvrs = rcvr;
1078		}
1079	}
1080	mutex_unlock(&intf->cmd_rcvrs_mutex);
1081	synchronize_rcu();
1082	while (rcvrs) {
1083		rcvr = rcvrs;
1084		rcvrs = rcvr->next;
1085		kfree(rcvr);
1086	}
1087
1088	mutex_lock(&ipmi_interfaces_mutex);
1089	if (intf->handlers) {
1090		module_put(intf->handlers->owner);
1091		if (intf->handlers->dec_usecount)
1092			intf->handlers->dec_usecount(intf->send_info);
1093	}
1094	mutex_unlock(&ipmi_interfaces_mutex);
1095
1096	kref_put(&intf->refcount, intf_free);
1097
1098	kref_put(&user->refcount, free_user);
1099
1100	return 0;
1101}
1102EXPORT_SYMBOL(ipmi_destroy_user);
1103
1104void ipmi_get_version(ipmi_user_t   user,
1105		      unsigned char *major,
1106		      unsigned char *minor)
1107{
1108	*major = user->intf->ipmi_version_major;
1109	*minor = user->intf->ipmi_version_minor;
1110}
1111EXPORT_SYMBOL(ipmi_get_version);
1112
1113int ipmi_set_my_address(ipmi_user_t   user,
1114			unsigned int  channel,
1115			unsigned char address)
1116{
1117	if (channel >= IPMI_MAX_CHANNELS)
1118		return -EINVAL;
1119	user->intf->channels[channel].address = address;
1120	return 0;
1121}
1122EXPORT_SYMBOL(ipmi_set_my_address);
1123
1124int ipmi_get_my_address(ipmi_user_t   user,
1125			unsigned int  channel,
1126			unsigned char *address)
1127{
1128	if (channel >= IPMI_MAX_CHANNELS)
1129		return -EINVAL;
1130	*address = user->intf->channels[channel].address;
1131	return 0;
1132}
1133EXPORT_SYMBOL(ipmi_get_my_address);
1134
1135int ipmi_set_my_LUN(ipmi_user_t   user,
1136		    unsigned int  channel,
1137		    unsigned char LUN)
1138{
1139	if (channel >= IPMI_MAX_CHANNELS)
1140		return -EINVAL;
1141	user->intf->channels[channel].lun = LUN & 0x3;
1142	return 0;
1143}
1144EXPORT_SYMBOL(ipmi_set_my_LUN);
1145
1146int ipmi_get_my_LUN(ipmi_user_t   user,
1147		    unsigned int  channel,
1148		    unsigned char *address)
1149{
1150	if (channel >= IPMI_MAX_CHANNELS)
1151		return -EINVAL;
1152	*address = user->intf->channels[channel].lun;
1153	return 0;
1154}
1155EXPORT_SYMBOL(ipmi_get_my_LUN);
1156
1157int ipmi_get_maintenance_mode(ipmi_user_t user)
1158{
1159	int           mode;
1160	unsigned long flags;
1161
1162	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1163	mode = user->intf->maintenance_mode;
1164	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1165
1166	return mode;
1167}
1168EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1169
1170static void maintenance_mode_update(ipmi_smi_t intf)
1171{
1172	if (intf->handlers->set_maintenance_mode)
1173		intf->handlers->set_maintenance_mode(
1174			intf->send_info, intf->maintenance_mode_enable);
1175}
1176
1177int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1178{
1179	int           rv = 0;
1180	unsigned long flags;
1181	ipmi_smi_t    intf = user->intf;
1182
1183	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1184	if (intf->maintenance_mode != mode) {
1185		switch (mode) {
1186		case IPMI_MAINTENANCE_MODE_AUTO:
1187			intf->maintenance_mode_enable
1188				= (intf->auto_maintenance_timeout > 0);
1189			break;
1190
1191		case IPMI_MAINTENANCE_MODE_OFF:
1192			intf->maintenance_mode_enable = false;
1193			break;
1194
1195		case IPMI_MAINTENANCE_MODE_ON:
1196			intf->maintenance_mode_enable = true;
1197			break;
1198
1199		default:
1200			rv = -EINVAL;
1201			goto out_unlock;
1202		}
1203		intf->maintenance_mode = mode;
1204
1205		maintenance_mode_update(intf);
1206	}
1207 out_unlock:
1208	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1209
1210	return rv;
1211}
1212EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1213
1214int ipmi_set_gets_events(ipmi_user_t user, bool val)
1215{
1216	unsigned long        flags;
1217	ipmi_smi_t           intf = user->intf;
1218	struct ipmi_recv_msg *msg, *msg2;
1219	struct list_head     msgs;
1220
1221	INIT_LIST_HEAD(&msgs);
1222
1223	spin_lock_irqsave(&intf->events_lock, flags);
1224	if (user->gets_events == val)
1225		goto out;
1226
1227	user->gets_events = val;
1228
1229	if (val) {
1230		if (atomic_inc_return(&intf->event_waiters) == 1)
1231			need_waiter(intf);
1232	} else {
1233		atomic_dec(&intf->event_waiters);
1234	}
1235
1236	if (intf->delivering_events)
1237		/*
1238		 * Another thread is delivering events for this, so
1239		 * let it handle any new events.
1240		 */
1241		goto out;
1242
1243	/* Deliver any queued events. */
1244	while (user->gets_events && !list_empty(&intf->waiting_events)) {
1245		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1246			list_move_tail(&msg->link, &msgs);
1247		intf->waiting_events_count = 0;
1248		if (intf->event_msg_printed) {
1249			printk(KERN_WARNING PFX "Event queue no longer"
1250			       " full\n");
1251			intf->event_msg_printed = 0;
1252		}
1253
1254		intf->delivering_events = 1;
1255		spin_unlock_irqrestore(&intf->events_lock, flags);
1256
1257		list_for_each_entry_safe(msg, msg2, &msgs, link) {
1258			msg->user = user;
1259			kref_get(&user->refcount);
1260			deliver_response(msg);
1261		}
1262
1263		spin_lock_irqsave(&intf->events_lock, flags);
1264		intf->delivering_events = 0;
1265	}
1266
1267 out:
1268	spin_unlock_irqrestore(&intf->events_lock, flags);
1269
1270	return 0;
1271}
1272EXPORT_SYMBOL(ipmi_set_gets_events);
1273
1274static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t    intf,
1275				      unsigned char netfn,
1276				      unsigned char cmd,
1277				      unsigned char chan)
1278{
1279	struct cmd_rcvr *rcvr;
1280
1281	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1282		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1283					&& (rcvr->chans & (1 << chan)))
1284			return rcvr;
1285	}
1286	return NULL;
1287}
1288
1289static int is_cmd_rcvr_exclusive(ipmi_smi_t    intf,
1290				 unsigned char netfn,
1291				 unsigned char cmd,
1292				 unsigned int  chans)
1293{
1294	struct cmd_rcvr *rcvr;
1295
1296	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1297		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1298					&& (rcvr->chans & chans))
1299			return 0;
1300	}
1301	return 1;
1302}
1303
1304int ipmi_register_for_cmd(ipmi_user_t   user,
1305			  unsigned char netfn,
1306			  unsigned char cmd,
1307			  unsigned int  chans)
1308{
1309	ipmi_smi_t      intf = user->intf;
1310	struct cmd_rcvr *rcvr;
1311	int             rv = 0;
1312
1313
1314	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1315	if (!rcvr)
1316		return -ENOMEM;
1317	rcvr->cmd = cmd;
1318	rcvr->netfn = netfn;
1319	rcvr->chans = chans;
1320	rcvr->user = user;
1321
1322	mutex_lock(&intf->cmd_rcvrs_mutex);
1323	/* Make sure the command/netfn is not already registered. */
1324	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1325		rv = -EBUSY;
1326		goto out_unlock;
1327	}
1328
1329	if (atomic_inc_return(&intf->event_waiters) == 1)
1330		need_waiter(intf);
1331
1332	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1333
1334 out_unlock:
1335	mutex_unlock(&intf->cmd_rcvrs_mutex);
1336	if (rv)
1337		kfree(rcvr);
1338
1339	return rv;
1340}
1341EXPORT_SYMBOL(ipmi_register_for_cmd);
1342
1343int ipmi_unregister_for_cmd(ipmi_user_t   user,
1344			    unsigned char netfn,
1345			    unsigned char cmd,
1346			    unsigned int  chans)
1347{
1348	ipmi_smi_t      intf = user->intf;
1349	struct cmd_rcvr *rcvr;
1350	struct cmd_rcvr *rcvrs = NULL;
1351	int i, rv = -ENOENT;
1352
1353	mutex_lock(&intf->cmd_rcvrs_mutex);
1354	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1355		if (((1 << i) & chans) == 0)
1356			continue;
1357		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1358		if (rcvr == NULL)
1359			continue;
1360		if (rcvr->user == user) {
1361			rv = 0;
1362			rcvr->chans &= ~chans;
1363			if (rcvr->chans == 0) {
1364				list_del_rcu(&rcvr->link);
1365				rcvr->next = rcvrs;
1366				rcvrs = rcvr;
1367			}
1368		}
1369	}
1370	mutex_unlock(&intf->cmd_rcvrs_mutex);
1371	synchronize_rcu();
1372	while (rcvrs) {
1373		atomic_dec(&intf->event_waiters);
1374		rcvr = rcvrs;
1375		rcvrs = rcvr->next;
1376		kfree(rcvr);
1377	}
1378	return rv;
1379}
1380EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1381
1382static unsigned char
1383ipmb_checksum(unsigned char *data, int size)
1384{
1385	unsigned char csum = 0;
1386
1387	for (; size > 0; size--, data++)
1388		csum += *data;
1389
1390	return -csum;
1391}
1392
1393static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1394				   struct kernel_ipmi_msg *msg,
1395				   struct ipmi_ipmb_addr *ipmb_addr,
1396				   long                  msgid,
1397				   unsigned char         ipmb_seq,
1398				   int                   broadcast,
1399				   unsigned char         source_address,
1400				   unsigned char         source_lun)
1401{
1402	int i = broadcast;
1403
1404	/* Format the IPMB header data. */
1405	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1406	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1407	smi_msg->data[2] = ipmb_addr->channel;
1408	if (broadcast)
1409		smi_msg->data[3] = 0;
1410	smi_msg->data[i+3] = ipmb_addr->slave_addr;
1411	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1412	smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1413	smi_msg->data[i+6] = source_address;
1414	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1415	smi_msg->data[i+8] = msg->cmd;
1416
1417	/* Now tack on the data to the message. */
1418	if (msg->data_len > 0)
1419		memcpy(&(smi_msg->data[i+9]), msg->data,
1420		       msg->data_len);
1421	smi_msg->data_size = msg->data_len + 9;
1422
1423	/* Now calculate the checksum and tack it on. */
1424	smi_msg->data[i+smi_msg->data_size]
1425		= ipmb_checksum(&(smi_msg->data[i+6]),
1426				smi_msg->data_size-6);
1427
1428	/*
1429	 * Add on the checksum size and the offset from the
1430	 * broadcast.
1431	 */
1432	smi_msg->data_size += 1 + i;
1433
1434	smi_msg->msgid = msgid;
1435}
1436
1437static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1438				  struct kernel_ipmi_msg *msg,
1439				  struct ipmi_lan_addr  *lan_addr,
1440				  long                  msgid,
1441				  unsigned char         ipmb_seq,
1442				  unsigned char         source_lun)
1443{
1444	/* Format the IPMB header data. */
1445	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1446	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1447	smi_msg->data[2] = lan_addr->channel;
1448	smi_msg->data[3] = lan_addr->session_handle;
1449	smi_msg->data[4] = lan_addr->remote_SWID;
1450	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1451	smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1452	smi_msg->data[7] = lan_addr->local_SWID;
1453	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1454	smi_msg->data[9] = msg->cmd;
1455
1456	/* Now tack on the data to the message. */
1457	if (msg->data_len > 0)
1458		memcpy(&(smi_msg->data[10]), msg->data,
1459		       msg->data_len);
1460	smi_msg->data_size = msg->data_len + 10;
1461
1462	/* Now calculate the checksum and tack it on. */
1463	smi_msg->data[smi_msg->data_size]
1464		= ipmb_checksum(&(smi_msg->data[7]),
1465				smi_msg->data_size-7);
1466
1467	/*
1468	 * Add on the checksum size and the offset from the
1469	 * broadcast.
1470	 */
1471	smi_msg->data_size += 1;
1472
1473	smi_msg->msgid = msgid;
1474}
1475
1476/*
1477 * Separate from ipmi_request so that the user does not have to be
1478 * supplied in certain circumstances (mainly at panic time).  If
1479 * messages are supplied, they will be freed, even if an error
1480 * occurs.
1481 */
1482static int i_ipmi_request(ipmi_user_t          user,
1483			  ipmi_smi_t           intf,
1484			  struct ipmi_addr     *addr,
1485			  long                 msgid,
1486			  struct kernel_ipmi_msg *msg,
1487			  void                 *user_msg_data,
1488			  void                 *supplied_smi,
1489			  struct ipmi_recv_msg *supplied_recv,
1490			  int                  priority,
1491			  unsigned char        source_address,
1492			  unsigned char        source_lun,
1493			  int                  retries,
1494			  unsigned int         retry_time_ms)
1495{
1496	int                      rv = 0;
1497	struct ipmi_smi_msg      *smi_msg;
1498	struct ipmi_recv_msg     *recv_msg;
1499	unsigned long            flags;
1500	struct ipmi_smi_handlers *handlers;
1501
1502
1503	if (supplied_recv)
1504		recv_msg = supplied_recv;
1505	else {
1506		recv_msg = ipmi_alloc_recv_msg();
1507		if (recv_msg == NULL)
1508			return -ENOMEM;
1509	}
1510	recv_msg->user_msg_data = user_msg_data;
1511
1512	if (supplied_smi)
1513		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1514	else {
1515		smi_msg = ipmi_alloc_smi_msg();
1516		if (smi_msg == NULL) {
1517			ipmi_free_recv_msg(recv_msg);
1518			return -ENOMEM;
1519		}
1520	}
1521
1522	rcu_read_lock();
1523	handlers = intf->handlers;
1524	if (!handlers) {
1525		rv = -ENODEV;
1526		goto out_err;
1527	}
1528
1529	recv_msg->user = user;
1530	if (user)
1531		kref_get(&user->refcount);
1532	recv_msg->msgid = msgid;
1533	/*
1534	 * Store the message to send in the receive message so timeout
1535	 * responses can get the proper response data.
1536	 */
1537	recv_msg->msg = *msg;
1538
1539	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1540		struct ipmi_system_interface_addr *smi_addr;
1541
1542		if (msg->netfn & 1) {
1543			/* Responses are not allowed to the SMI. */
1544			rv = -EINVAL;
1545			goto out_err;
1546		}
1547
1548		smi_addr = (struct ipmi_system_interface_addr *) addr;
1549		if (smi_addr->lun > 3) {
1550			ipmi_inc_stat(intf, sent_invalid_commands);
1551			rv = -EINVAL;
1552			goto out_err;
1553		}
1554
1555		memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1556
1557		if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1558		    && ((msg->cmd == IPMI_SEND_MSG_CMD)
1559			|| (msg->cmd == IPMI_GET_MSG_CMD)
1560			|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1561			/*
1562			 * We don't let the user do these, since we manage
1563			 * the sequence numbers.
1564			 */
1565			ipmi_inc_stat(intf, sent_invalid_commands);
1566			rv = -EINVAL;
1567			goto out_err;
1568		}
1569
1570		if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1571		      && ((msg->cmd == IPMI_COLD_RESET_CMD)
1572			  || (msg->cmd == IPMI_WARM_RESET_CMD)))
1573		     || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
1574			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1575			intf->auto_maintenance_timeout
1576				= IPMI_MAINTENANCE_MODE_TIMEOUT;
1577			if (!intf->maintenance_mode
1578			    && !intf->maintenance_mode_enable) {
1579				intf->maintenance_mode_enable = true;
1580				maintenance_mode_update(intf);
1581			}
1582			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1583					       flags);
1584		}
1585
1586		if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1587			ipmi_inc_stat(intf, sent_invalid_commands);
1588			rv = -EMSGSIZE;
1589			goto out_err;
1590		}
1591
1592		smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1593		smi_msg->data[1] = msg->cmd;
1594		smi_msg->msgid = msgid;
1595		smi_msg->user_data = recv_msg;
1596		if (msg->data_len > 0)
1597			memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1598		smi_msg->data_size = msg->data_len + 2;
1599		ipmi_inc_stat(intf, sent_local_commands);
1600	} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
1601		struct ipmi_ipmb_addr *ipmb_addr;
1602		unsigned char         ipmb_seq;
1603		long                  seqid;
1604		int                   broadcast = 0;
1605
1606		if (addr->channel >= IPMI_MAX_CHANNELS) {
1607			ipmi_inc_stat(intf, sent_invalid_commands);
1608			rv = -EINVAL;
1609			goto out_err;
1610		}
1611
1612		if (intf->channels[addr->channel].medium
1613					!= IPMI_CHANNEL_MEDIUM_IPMB) {
1614			ipmi_inc_stat(intf, sent_invalid_commands);
1615			rv = -EINVAL;
1616			goto out_err;
1617		}
1618
1619		if (retries < 0) {
1620		    if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1621			retries = 0; /* Don't retry broadcasts. */
1622		    else
1623			retries = 4;
1624		}
1625		if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1626		    /*
1627		     * Broadcasts add a zero at the beginning of the
1628		     * message, but otherwise is the same as an IPMB
1629		     * address.
1630		     */
1631		    addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1632		    broadcast = 1;
1633		}
1634
1635
1636		/* Default to 1 second retries. */
1637		if (retry_time_ms == 0)
1638		    retry_time_ms = 1000;
1639
1640		/*
1641		 * 9 for the header and 1 for the checksum, plus
1642		 * possibly one for the broadcast.
1643		 */
1644		if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1645			ipmi_inc_stat(intf, sent_invalid_commands);
1646			rv = -EMSGSIZE;
1647			goto out_err;
1648		}
1649
1650		ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1651		if (ipmb_addr->lun > 3) {
1652			ipmi_inc_stat(intf, sent_invalid_commands);
1653			rv = -EINVAL;
1654			goto out_err;
1655		}
1656
1657		memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1658
1659		if (recv_msg->msg.netfn & 0x1) {
1660			/*
1661			 * It's a response, so use the user's sequence
1662			 * from msgid.
1663			 */
1664			ipmi_inc_stat(intf, sent_ipmb_responses);
1665			format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1666					msgid, broadcast,
1667					source_address, source_lun);
1668
1669			/*
1670			 * Save the receive message so we can use it
1671			 * to deliver the response.
1672			 */
1673			smi_msg->user_data = recv_msg;
1674		} else {
1675			/* It's a command, so get a sequence for it. */
1676
1677			spin_lock_irqsave(&(intf->seq_lock), flags);
1678
1679			/*
1680			 * Create a sequence number with a 1 second
1681			 * timeout and 4 retries.
1682			 */
1683			rv = intf_next_seq(intf,
1684					   recv_msg,
1685					   retry_time_ms,
1686					   retries,
1687					   broadcast,
1688					   &ipmb_seq,
1689					   &seqid);
1690			if (rv) {
1691				/*
1692				 * We have used up all the sequence numbers,
1693				 * probably, so abort.
1694				 */
1695				spin_unlock_irqrestore(&(intf->seq_lock),
1696						       flags);
1697				goto out_err;
1698			}
1699
1700			ipmi_inc_stat(intf, sent_ipmb_commands);
1701
1702			/*
1703			 * Store the sequence number in the message,
1704			 * so that when the send message response
1705			 * comes back we can start the timer.
1706			 */
1707			format_ipmb_msg(smi_msg, msg, ipmb_addr,
1708					STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1709					ipmb_seq, broadcast,
1710					source_address, source_lun);
1711
1712			/*
1713			 * Copy the message into the recv message data, so we
1714			 * can retransmit it later if necessary.
1715			 */
1716			memcpy(recv_msg->msg_data, smi_msg->data,
1717			       smi_msg->data_size);
1718			recv_msg->msg.data = recv_msg->msg_data;
1719			recv_msg->msg.data_len = smi_msg->data_size;
1720
1721			/*
1722			 * We don't unlock until here, because we need
1723			 * to copy the completed message into the
1724			 * recv_msg before we release the lock.
1725			 * Otherwise, race conditions may bite us.  I
1726			 * know that's pretty paranoid, but I prefer
1727			 * to be correct.
1728			 */
1729			spin_unlock_irqrestore(&(intf->seq_lock), flags);
1730		}
1731	} else if (is_lan_addr(addr)) {
1732		struct ipmi_lan_addr  *lan_addr;
1733		unsigned char         ipmb_seq;
1734		long                  seqid;
1735
1736		if (addr->channel >= IPMI_MAX_CHANNELS) {
1737			ipmi_inc_stat(intf, sent_invalid_commands);
1738			rv = -EINVAL;
1739			goto out_err;
1740		}
1741
1742		if ((intf->channels[addr->channel].medium
1743				!= IPMI_CHANNEL_MEDIUM_8023LAN)
1744		    && (intf->channels[addr->channel].medium
1745				!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
1746			ipmi_inc_stat(intf, sent_invalid_commands);
1747			rv = -EINVAL;
1748			goto out_err;
1749		}
1750
1751		retries = 4;
1752
1753		/* Default to 1 second retries. */
1754		if (retry_time_ms == 0)
1755		    retry_time_ms = 1000;
1756
1757		/* 11 for the header and 1 for the checksum. */
1758		if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1759			ipmi_inc_stat(intf, sent_invalid_commands);
1760			rv = -EMSGSIZE;
1761			goto out_err;
1762		}
1763
1764		lan_addr = (struct ipmi_lan_addr *) addr;
1765		if (lan_addr->lun > 3) {
1766			ipmi_inc_stat(intf, sent_invalid_commands);
1767			rv = -EINVAL;
1768			goto out_err;
1769		}
1770
1771		memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1772
1773		if (recv_msg->msg.netfn & 0x1) {
1774			/*
1775			 * It's a response, so use the user's sequence
1776			 * from msgid.
1777			 */
1778			ipmi_inc_stat(intf, sent_lan_responses);
1779			format_lan_msg(smi_msg, msg, lan_addr, msgid,
1780				       msgid, source_lun);
1781
1782			/*
1783			 * Save the receive message so we can use it
1784			 * to deliver the response.
1785			 */
1786			smi_msg->user_data = recv_msg;
1787		} else {
1788			/* It's a command, so get a sequence for it. */
1789
1790			spin_lock_irqsave(&(intf->seq_lock), flags);
1791
1792			/*
1793			 * Create a sequence number with a 1 second
1794			 * timeout and 4 retries.
1795			 */
1796			rv = intf_next_seq(intf,
1797					   recv_msg,
1798					   retry_time_ms,
1799					   retries,
1800					   0,
1801					   &ipmb_seq,
1802					   &seqid);
1803			if (rv) {
1804				/*
1805				 * We have used up all the sequence numbers,
1806				 * probably, so abort.
1807				 */
1808				spin_unlock_irqrestore(&(intf->seq_lock),
1809						       flags);
1810				goto out_err;
1811			}
1812
1813			ipmi_inc_stat(intf, sent_lan_commands);
1814
1815			/*
1816			 * Store the sequence number in the message,
1817			 * so that when the send message response
1818			 * comes back we can start the timer.
1819			 */
1820			format_lan_msg(smi_msg, msg, lan_addr,
1821				       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1822				       ipmb_seq, source_lun);
1823
1824			/*
1825			 * Copy the message into the recv message data, so we
1826			 * can retransmit it later if necessary.
1827			 */
1828			memcpy(recv_msg->msg_data, smi_msg->data,
1829			       smi_msg->data_size);
1830			recv_msg->msg.data = recv_msg->msg_data;
1831			recv_msg->msg.data_len = smi_msg->data_size;
1832
1833			/*
1834			 * We don't unlock until here, because we need
1835			 * to copy the completed message into the
1836			 * recv_msg before we release the lock.
1837			 * Otherwise, race conditions may bite us.  I
1838			 * know that's pretty paranoid, but I prefer
1839			 * to be correct.
1840			 */
1841			spin_unlock_irqrestore(&(intf->seq_lock), flags);
1842		}
1843	} else {
1844	    /* Unknown address type. */
1845		ipmi_inc_stat(intf, sent_invalid_commands);
1846		rv = -EINVAL;
1847		goto out_err;
1848	}
1849
1850#ifdef DEBUG_MSGING
1851	{
1852		int m;
1853		for (m = 0; m < smi_msg->data_size; m++)
1854			printk(" %2.2x", smi_msg->data[m]);
1855		printk("\n");
1856	}
1857#endif
1858
1859	handlers->sender(intf->send_info, smi_msg, priority);
1860	rcu_read_unlock();
1861
1862	return 0;
1863
1864 out_err:
1865	rcu_read_unlock();
1866	ipmi_free_smi_msg(smi_msg);
1867	ipmi_free_recv_msg(recv_msg);
1868	return rv;
1869}
1870
1871static int check_addr(ipmi_smi_t       intf,
1872		      struct ipmi_addr *addr,
1873		      unsigned char    *saddr,
1874		      unsigned char    *lun)
1875{
1876	if (addr->channel >= IPMI_MAX_CHANNELS)
1877		return -EINVAL;
1878	*lun = intf->channels[addr->channel].lun;
1879	*saddr = intf->channels[addr->channel].address;
1880	return 0;
1881}
1882
1883int ipmi_request_settime(ipmi_user_t      user,
1884			 struct ipmi_addr *addr,
1885			 long             msgid,
1886			 struct kernel_ipmi_msg  *msg,
1887			 void             *user_msg_data,
1888			 int              priority,
1889			 int              retries,
1890			 unsigned int     retry_time_ms)
1891{
1892	unsigned char saddr = 0, lun = 0;
1893	int           rv;
1894
1895	if (!user)
1896		return -EINVAL;
1897	rv = check_addr(user->intf, addr, &saddr, &lun);
1898	if (rv)
1899		return rv;
1900	return i_ipmi_request(user,
1901			      user->intf,
1902			      addr,
1903			      msgid,
1904			      msg,
1905			      user_msg_data,
1906			      NULL, NULL,
1907			      priority,
1908			      saddr,
1909			      lun,
1910			      retries,
1911			      retry_time_ms);
1912}
1913EXPORT_SYMBOL(ipmi_request_settime);
1914
1915int ipmi_request_supply_msgs(ipmi_user_t          user,
1916			     struct ipmi_addr     *addr,
1917			     long                 msgid,
1918			     struct kernel_ipmi_msg *msg,
1919			     void                 *user_msg_data,
1920			     void                 *supplied_smi,
1921			     struct ipmi_recv_msg *supplied_recv,
1922			     int                  priority)
1923{
1924	unsigned char saddr = 0, lun = 0;
1925	int           rv;
1926
1927	if (!user)
1928		return -EINVAL;
1929	rv = check_addr(user->intf, addr, &saddr, &lun);
1930	if (rv)
1931		return rv;
1932	return i_ipmi_request(user,
1933			      user->intf,
1934			      addr,
1935			      msgid,
1936			      msg,
1937			      user_msg_data,
1938			      supplied_smi,
1939			      supplied_recv,
1940			      priority,
1941			      saddr,
1942			      lun,
1943			      -1, 0);
1944}
1945EXPORT_SYMBOL(ipmi_request_supply_msgs);
1946
1947#ifdef CONFIG_PROC_FS
1948static int smi_ipmb_proc_show(struct seq_file *m, void *v)
1949{
1950	ipmi_smi_t intf = m->private;
1951	int        i;
1952
1953	seq_printf(m, "%x", intf->channels[0].address);
1954	for (i = 1; i < IPMI_MAX_CHANNELS; i++)
1955		seq_printf(m, " %x", intf->channels[i].address);
1956	return seq_putc(m, '\n');
1957}
1958
1959static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
1960{
1961	return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode));
1962}
1963
1964static const struct file_operations smi_ipmb_proc_ops = {
1965	.open		= smi_ipmb_proc_open,
1966	.read		= seq_read,
1967	.llseek		= seq_lseek,
1968	.release	= single_release,
1969};
1970
1971static int smi_version_proc_show(struct seq_file *m, void *v)
1972{
1973	ipmi_smi_t intf = m->private;
1974
1975	return seq_printf(m, "%u.%u\n",
1976		       ipmi_version_major(&intf->bmc->id),
1977		       ipmi_version_minor(&intf->bmc->id));
1978}
1979
1980static int smi_version_proc_open(struct inode *inode, struct file *file)
1981{
1982	return single_open(file, smi_version_proc_show, PDE_DATA(inode));
1983}
1984
1985static const struct file_operations smi_version_proc_ops = {
1986	.open		= smi_version_proc_open,
1987	.read		= seq_read,
1988	.llseek		= seq_lseek,
1989	.release	= single_release,
1990};
1991
1992static int smi_stats_proc_show(struct seq_file *m, void *v)
1993{
1994	ipmi_smi_t intf = m->private;
1995
1996	seq_printf(m, "sent_invalid_commands:       %u\n",
1997		       ipmi_get_stat(intf, sent_invalid_commands));
1998	seq_printf(m, "sent_local_commands:         %u\n",
1999		       ipmi_get_stat(intf, sent_local_commands));
2000	seq_printf(m, "handled_local_responses:     %u\n",
2001		       ipmi_get_stat(intf, handled_local_responses));
2002	seq_printf(m, "unhandled_local_responses:   %u\n",
2003		       ipmi_get_stat(intf, unhandled_local_responses));
2004	seq_printf(m, "sent_ipmb_commands:          %u\n",
2005		       ipmi_get_stat(intf, sent_ipmb_commands));
2006	seq_printf(m, "sent_ipmb_command_errs:      %u\n",
2007		       ipmi_get_stat(intf, sent_ipmb_command_errs));
2008	seq_printf(m, "retransmitted_ipmb_commands: %u\n",
2009		       ipmi_get_stat(intf, retransmitted_ipmb_commands));
2010	seq_printf(m, "timed_out_ipmb_commands:     %u\n",
2011		       ipmi_get_stat(intf, timed_out_ipmb_commands));
2012	seq_printf(m, "timed_out_ipmb_broadcasts:   %u\n",
2013		       ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
2014	seq_printf(m, "sent_ipmb_responses:         %u\n",
2015		       ipmi_get_stat(intf, sent_ipmb_responses));
2016	seq_printf(m, "handled_ipmb_responses:      %u\n",
2017		       ipmi_get_stat(intf, handled_ipmb_responses));
2018	seq_printf(m, "invalid_ipmb_responses:      %u\n",
2019		       ipmi_get_stat(intf, invalid_ipmb_responses));
2020	seq_printf(m, "unhandled_ipmb_responses:    %u\n",
2021		       ipmi_get_stat(intf, unhandled_ipmb_responses));
2022	seq_printf(m, "sent_lan_commands:           %u\n",
2023		       ipmi_get_stat(intf, sent_lan_commands));
2024	seq_printf(m, "sent_lan_command_errs:       %u\n",
2025		       ipmi_get_stat(intf, sent_lan_command_errs));
2026	seq_printf(m, "retransmitted_lan_commands:  %u\n",
2027		       ipmi_get_stat(intf, retransmitted_lan_commands));
2028	seq_printf(m, "timed_out_lan_commands:      %u\n",
2029		       ipmi_get_stat(intf, timed_out_lan_commands));
2030	seq_printf(m, "sent_lan_responses:          %u\n",
2031		       ipmi_get_stat(intf, sent_lan_responses));
2032	seq_printf(m, "handled_lan_responses:       %u\n",
2033		       ipmi_get_stat(intf, handled_lan_responses));
2034	seq_printf(m, "invalid_lan_responses:       %u\n",
2035		       ipmi_get_stat(intf, invalid_lan_responses));
2036	seq_printf(m, "unhandled_lan_responses:     %u\n",
2037		       ipmi_get_stat(intf, unhandled_lan_responses));
2038	seq_printf(m, "handled_commands:            %u\n",
2039		       ipmi_get_stat(intf, handled_commands));
2040	seq_printf(m, "invalid_commands:            %u\n",
2041		       ipmi_get_stat(intf, invalid_commands));
2042	seq_printf(m, "unhandled_commands:          %u\n",
2043		       ipmi_get_stat(intf, unhandled_commands));
2044	seq_printf(m, "invalid_events:              %u\n",
2045		       ipmi_get_stat(intf, invalid_events));
2046	seq_printf(m, "events:                      %u\n",
2047		       ipmi_get_stat(intf, events));
2048	seq_printf(m, "failed rexmit LAN msgs:      %u\n",
2049		       ipmi_get_stat(intf, dropped_rexmit_lan_commands));
2050	seq_printf(m, "failed rexmit IPMB msgs:     %u\n",
2051		       ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
2052	return 0;
2053}
2054
2055static int smi_stats_proc_open(struct inode *inode, struct file *file)
2056{
2057	return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
2058}
2059
2060static const struct file_operations smi_stats_proc_ops = {
2061	.open		= smi_stats_proc_open,
2062	.read		= seq_read,
2063	.llseek		= seq_lseek,
2064	.release	= single_release,
2065};
2066#endif /* CONFIG_PROC_FS */
2067
2068int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
2069			    const struct file_operations *proc_ops,
2070			    void *data)
2071{
2072	int                    rv = 0;
2073#ifdef CONFIG_PROC_FS
2074	struct proc_dir_entry  *file;
2075	struct ipmi_proc_entry *entry;
2076
2077	/* Create a list element. */
2078	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2079	if (!entry)
2080		return -ENOMEM;
2081	entry->name = kstrdup(name, GFP_KERNEL);
2082	if (!entry->name) {
2083		kfree(entry);
2084		return -ENOMEM;
2085	}
2086
2087	file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
2088	if (!file) {
2089		kfree(entry->name);
2090		kfree(entry);
2091		rv = -ENOMEM;
2092	} else {
2093		mutex_lock(&smi->proc_entry_lock);
2094		/* Stick it on the list. */
2095		entry->next = smi->proc_entries;
2096		smi->proc_entries = entry;
2097		mutex_unlock(&smi->proc_entry_lock);
2098	}
2099#endif /* CONFIG_PROC_FS */
2100
2101	return rv;
2102}
2103EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
2104
2105static int add_proc_entries(ipmi_smi_t smi, int num)
2106{
2107	int rv = 0;
2108
2109#ifdef CONFIG_PROC_FS
2110	sprintf(smi->proc_dir_name, "%d", num);
2111	smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
2112	if (!smi->proc_dir)
2113		rv = -ENOMEM;
2114
2115	if (rv == 0)
2116		rv = ipmi_smi_add_proc_entry(smi, "stats",
2117					     &smi_stats_proc_ops,
2118					     smi);
2119
2120	if (rv == 0)
2121		rv = ipmi_smi_add_proc_entry(smi, "ipmb",
2122					     &smi_ipmb_proc_ops,
2123					     smi);
2124
2125	if (rv == 0)
2126		rv = ipmi_smi_add_proc_entry(smi, "version",
2127					     &smi_version_proc_ops,
2128					     smi);
2129#endif /* CONFIG_PROC_FS */
2130
2131	return rv;
2132}
2133
2134static void remove_proc_entries(ipmi_smi_t smi)
2135{
2136#ifdef CONFIG_PROC_FS
2137	struct ipmi_proc_entry *entry;
2138
2139	mutex_lock(&smi->proc_entry_lock);
2140	while (smi->proc_entries) {
2141		entry = smi->proc_entries;
2142		smi->proc_entries = entry->next;
2143
2144		remove_proc_entry(entry->name, smi->proc_dir);
2145		kfree(entry->name);
2146		kfree(entry);
2147	}
2148	mutex_unlock(&smi->proc_entry_lock);
2149	remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
2150#endif /* CONFIG_PROC_FS */
2151}
2152
2153static int __find_bmc_guid(struct device *dev, void *data)
2154{
2155	unsigned char *id = data;
2156	struct bmc_device *bmc = dev_get_drvdata(dev);
2157	return memcmp(bmc->guid, id, 16) == 0;
2158}
2159
2160static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2161					     unsigned char *guid)
2162{
2163	struct device *dev;
2164
2165	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2166	if (dev)
2167		return dev_get_drvdata(dev);
2168	else
2169		return NULL;
2170}
2171
2172struct prod_dev_id {
2173	unsigned int  product_id;
2174	unsigned char device_id;
2175};
2176
2177static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2178{
2179	struct prod_dev_id *id = data;
2180	struct bmc_device *bmc = dev_get_drvdata(dev);
2181
2182	return (bmc->id.product_id == id->product_id
2183		&& bmc->id.device_id == id->device_id);
2184}
2185
2186static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2187	struct device_driver *drv,
2188	unsigned int product_id, unsigned char device_id)
2189{
2190	struct prod_dev_id id = {
2191		.product_id = product_id,
2192		.device_id = device_id,
2193	};
2194	struct device *dev;
2195
2196	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2197	if (dev)
2198		return dev_get_drvdata(dev);
2199	else
2200		return NULL;
2201}
2202
2203static ssize_t device_id_show(struct device *dev,
2204			      struct device_attribute *attr,
2205			      char *buf)
2206{
2207	struct bmc_device *bmc = dev_get_drvdata(dev);
2208
2209	return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2210}
2211
2212static ssize_t provides_dev_sdrs_show(struct device *dev,
2213				      struct device_attribute *attr,
2214				      char *buf)
2215{
2216	struct bmc_device *bmc = dev_get_drvdata(dev);
2217
2218	return snprintf(buf, 10, "%u\n",
2219			(bmc->id.device_revision & 0x80) >> 7);
2220}
2221
2222static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2223			     char *buf)
2224{
2225	struct bmc_device *bmc = dev_get_drvdata(dev);
2226
2227	return snprintf(buf, 20, "%u\n",
2228			bmc->id.device_revision & 0x0F);
2229}
2230
2231static ssize_t firmware_rev_show(struct device *dev,
2232				 struct device_attribute *attr,
2233				 char *buf)
2234{
2235	struct bmc_device *bmc = dev_get_drvdata(dev);
2236
2237	return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2238			bmc->id.firmware_revision_2);
2239}
2240
2241static ssize_t ipmi_version_show(struct device *dev,
2242				 struct device_attribute *attr,
2243				 char *buf)
2244{
2245	struct bmc_device *bmc = dev_get_drvdata(dev);
2246
2247	return snprintf(buf, 20, "%u.%u\n",
2248			ipmi_version_major(&bmc->id),
2249			ipmi_version_minor(&bmc->id));
2250}
2251
2252static ssize_t add_dev_support_show(struct device *dev,
2253				    struct device_attribute *attr,
2254				    char *buf)
2255{
2256	struct bmc_device *bmc = dev_get_drvdata(dev);
2257
2258	return snprintf(buf, 10, "0x%02x\n",
2259			bmc->id.additional_device_support);
2260}
2261
2262static ssize_t manufacturer_id_show(struct device *dev,
2263				    struct device_attribute *attr,
2264				    char *buf)
2265{
2266	struct bmc_device *bmc = dev_get_drvdata(dev);
2267
2268	return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2269}
2270
2271static ssize_t product_id_show(struct device *dev,
2272			       struct device_attribute *attr,
2273			       char *buf)
2274{
2275	struct bmc_device *bmc = dev_get_drvdata(dev);
2276
2277	return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2278}
2279
2280static ssize_t aux_firmware_rev_show(struct device *dev,
2281				     struct device_attribute *attr,
2282				     char *buf)
2283{
2284	struct bmc_device *bmc = dev_get_drvdata(dev);
2285
2286	return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2287			bmc->id.aux_firmware_revision[3],
2288			bmc->id.aux_firmware_revision[2],
2289			bmc->id.aux_firmware_revision[1],
2290			bmc->id.aux_firmware_revision[0]);
2291}
2292
2293static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2294			 char *buf)
2295{
2296	struct bmc_device *bmc = dev_get_drvdata(dev);
2297
2298	return snprintf(buf, 100, "%Lx%Lx\n",
2299			(long long) bmc->guid[0],
2300			(long long) bmc->guid[8]);
2301}
2302
2303static void remove_files(struct bmc_device *bmc)
2304{
2305	if (!bmc->dev)
2306		return;
2307
2308	device_remove_file(&bmc->dev->dev,
2309			   &bmc->device_id_attr);
2310	device_remove_file(&bmc->dev->dev,
2311			   &bmc->provides_dev_sdrs_attr);
2312	device_remove_file(&bmc->dev->dev,
2313			   &bmc->revision_attr);
2314	device_remove_file(&bmc->dev->dev,
2315			   &bmc->firmware_rev_attr);
2316	device_remove_file(&bmc->dev->dev,
2317			   &bmc->version_attr);
2318	device_remove_file(&bmc->dev->dev,
2319			   &bmc->add_dev_support_attr);
2320	device_remove_file(&bmc->dev->dev,
2321			   &bmc->manufacturer_id_attr);
2322	device_remove_file(&bmc->dev->dev,
2323			   &bmc->product_id_attr);
2324
2325	if (bmc->id.aux_firmware_revision_set)
2326		device_remove_file(&bmc->dev->dev,
2327				   &bmc->aux_firmware_rev_attr);
2328	if (bmc->guid_set)
2329		device_remove_file(&bmc->dev->dev,
2330				   &bmc->guid_attr);
2331}
2332
2333static void
2334cleanup_bmc_device(struct kref *ref)
2335{
2336	struct bmc_device *bmc;
2337
2338	bmc = container_of(ref, struct bmc_device, refcount);
2339
2340	remove_files(bmc);
2341	platform_device_unregister(bmc->dev);
2342	kfree(bmc);
2343}
2344
2345static void ipmi_bmc_unregister(ipmi_smi_t intf)
2346{
2347	struct bmc_device *bmc = intf->bmc;
2348
2349	if (intf->sysfs_name) {
2350		sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2351		kfree(intf->sysfs_name);
2352		intf->sysfs_name = NULL;
2353	}
2354	if (intf->my_dev_name) {
2355		sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2356		kfree(intf->my_dev_name);
2357		intf->my_dev_name = NULL;
2358	}
2359
2360	mutex_lock(&ipmidriver_mutex);
2361	kref_put(&bmc->refcount, cleanup_bmc_device);
2362	intf->bmc = NULL;
2363	mutex_unlock(&ipmidriver_mutex);
2364}
2365
2366static int create_files(struct bmc_device *bmc)
2367{
2368	int err;
2369
2370	bmc->device_id_attr.attr.name = "device_id";
2371	bmc->device_id_attr.attr.mode = S_IRUGO;
2372	bmc->device_id_attr.show = device_id_show;
2373	sysfs_attr_init(&bmc->device_id_attr.attr);
2374
2375	bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2376	bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2377	bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2378	sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
2379
2380	bmc->revision_attr.attr.name = "revision";
2381	bmc->revision_attr.attr.mode = S_IRUGO;
2382	bmc->revision_attr.show = revision_show;
2383	sysfs_attr_init(&bmc->revision_attr.attr);
2384
2385	bmc->firmware_rev_attr.attr.name = "firmware_revision";
2386	bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2387	bmc->firmware_rev_attr.show = firmware_rev_show;
2388	sysfs_attr_init(&bmc->firmware_rev_attr.attr);
2389
2390	bmc->version_attr.attr.name = "ipmi_version";
2391	bmc->version_attr.attr.mode = S_IRUGO;
2392	bmc->version_attr.show = ipmi_version_show;
2393	sysfs_attr_init(&bmc->version_attr.attr);
2394
2395	bmc->add_dev_support_attr.attr.name = "additional_device_support";
2396	bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2397	bmc->add_dev_support_attr.show = add_dev_support_show;
2398	sysfs_attr_init(&bmc->add_dev_support_attr.attr);
2399
2400	bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2401	bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2402	bmc->manufacturer_id_attr.show = manufacturer_id_show;
2403	sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
2404
2405	bmc->product_id_attr.attr.name = "product_id";
2406	bmc->product_id_attr.attr.mode = S_IRUGO;
2407	bmc->product_id_attr.show = product_id_show;
2408	sysfs_attr_init(&bmc->product_id_attr.attr);
2409
2410	bmc->guid_attr.attr.name = "guid";
2411	bmc->guid_attr.attr.mode = S_IRUGO;
2412	bmc->guid_attr.show = guid_show;
2413	sysfs_attr_init(&bmc->guid_attr.attr);
2414
2415	bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2416	bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2417	bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2418	sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
2419
2420	err = device_create_file(&bmc->dev->dev,
2421			   &bmc->device_id_attr);
2422	if (err)
2423		goto out;
2424	err = device_create_file(&bmc->dev->dev,
2425			   &bmc->provides_dev_sdrs_attr);
2426	if (err)
2427		goto out_devid;
2428	err = device_create_file(&bmc->dev->dev,
2429			   &bmc->revision_attr);
2430	if (err)
2431		goto out_sdrs;
2432	err = device_create_file(&bmc->dev->dev,
2433			   &bmc->firmware_rev_attr);
2434	if (err)
2435		goto out_rev;
2436	err = device_create_file(&bmc->dev->dev,
2437			   &bmc->version_attr);
2438	if (err)
2439		goto out_firm;
2440	err = device_create_file(&bmc->dev->dev,
2441			   &bmc->add_dev_support_attr);
2442	if (err)
2443		goto out_version;
2444	err = device_create_file(&bmc->dev->dev,
2445			   &bmc->manufacturer_id_attr);
2446	if (err)
2447		goto out_add_dev;
2448	err = device_create_file(&bmc->dev->dev,
2449			   &bmc->product_id_attr);
2450	if (err)
2451		goto out_manu;
2452	if (bmc->id.aux_firmware_revision_set) {
2453		err = device_create_file(&bmc->dev->dev,
2454				   &bmc->aux_firmware_rev_attr);
2455		if (err)
2456			goto out_prod_id;
2457	}
2458	if (bmc->guid_set) {
2459		err = device_create_file(&bmc->dev->dev,
2460				   &bmc->guid_attr);
2461		if (err)
2462			goto out_aux_firm;
2463	}
2464
2465	return 0;
2466
2467out_aux_firm:
2468	if (bmc->id.aux_firmware_revision_set)
2469		device_remove_file(&bmc->dev->dev,
2470				   &bmc->aux_firmware_rev_attr);
2471out_prod_id:
2472	device_remove_file(&bmc->dev->dev,
2473			   &bmc->product_id_attr);
2474out_manu:
2475	device_remove_file(&bmc->dev->dev,
2476			   &bmc->manufacturer_id_attr);
2477out_add_dev:
2478	device_remove_file(&bmc->dev->dev,
2479			   &bmc->add_dev_support_attr);
2480out_version:
2481	device_remove_file(&bmc->dev->dev,
2482			   &bmc->version_attr);
2483out_firm:
2484	device_remove_file(&bmc->dev->dev,
2485			   &bmc->firmware_rev_attr);
2486out_rev:
2487	device_remove_file(&bmc->dev->dev,
2488			   &bmc->revision_attr);
2489out_sdrs:
2490	device_remove_file(&bmc->dev->dev,
2491			   &bmc->provides_dev_sdrs_attr);
2492out_devid:
2493	device_remove_file(&bmc->dev->dev,
2494			   &bmc->device_id_attr);
2495out:
2496	return err;
2497}
2498
2499static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2500			     const char *sysfs_name)
2501{
2502	int               rv;
2503	struct bmc_device *bmc = intf->bmc;
2504	struct bmc_device *old_bmc;
2505	int               size;
2506	char              dummy[1];
2507
2508	mutex_lock(&ipmidriver_mutex);
2509
2510	/*
2511	 * Try to find if there is an bmc_device struct
2512	 * representing the interfaced BMC already
2513	 */
2514	if (bmc->guid_set)
2515		old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid);
2516	else
2517		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2518						    bmc->id.product_id,
2519						    bmc->id.device_id);
2520
2521	/*
2522	 * If there is already an bmc_device, free the new one,
2523	 * otherwise register the new BMC device
2524	 */
2525	if (old_bmc) {
2526		kfree(bmc);
2527		intf->bmc = old_bmc;
2528		bmc = old_bmc;
2529
2530		kref_get(&bmc->refcount);
2531		mutex_unlock(&ipmidriver_mutex);
2532
2533		printk(KERN_INFO
2534		       "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2535		       " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2536		       bmc->id.manufacturer_id,
2537		       bmc->id.product_id,
2538		       bmc->id.device_id);
2539	} else {
2540		char name[14];
2541		unsigned char orig_dev_id = bmc->id.device_id;
2542		int warn_printed = 0;
2543
2544		snprintf(name, sizeof(name),
2545			 "ipmi_bmc.%4.4x", bmc->id.product_id);
2546
2547		while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2548						 bmc->id.product_id,
2549						 bmc->id.device_id)) {
2550			if (!warn_printed) {
2551				printk(KERN_WARNING PFX
2552				       "This machine has two different BMCs"
2553				       " with the same product id and device"
2554				       " id.  This is an error in the"
2555				       " firmware, but incrementing the"
2556				       " device id to work around the problem."
2557				       " Prod ID = 0x%x, Dev ID = 0x%x\n",
2558				       bmc->id.product_id, bmc->id.device_id);
2559				warn_printed = 1;
2560			}
2561			bmc->id.device_id++; /* Wraps at 255 */
2562			if (bmc->id.device_id == orig_dev_id) {
2563				printk(KERN_ERR PFX
2564				       "Out of device ids!\n");
2565				break;
2566			}
2567		}
2568
2569		bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2570		if (!bmc->dev) {
2571			mutex_unlock(&ipmidriver_mutex);
2572			printk(KERN_ERR
2573			       "ipmi_msghandler:"
2574			       " Unable to allocate platform device\n");
2575			return -ENOMEM;
2576		}
2577		bmc->dev->dev.driver = &ipmidriver.driver;
2578		dev_set_drvdata(&bmc->dev->dev, bmc);
2579		kref_init(&bmc->refcount);
2580
2581		rv = platform_device_add(bmc->dev);
2582		mutex_unlock(&ipmidriver_mutex);
2583		if (rv) {
2584			platform_device_put(bmc->dev);
2585			bmc->dev = NULL;
2586			printk(KERN_ERR
2587			       "ipmi_msghandler:"
2588			       " Unable to register bmc device: %d\n",
2589			       rv);
2590			/*
2591			 * Don't go to out_err, you can only do that if
2592			 * the device is registered already.
2593			 */
2594			return rv;
2595		}
2596
2597		rv = create_files(bmc);
2598		if (rv) {
2599			mutex_lock(&ipmidriver_mutex);
2600			platform_device_unregister(bmc->dev);
2601			mutex_unlock(&ipmidriver_mutex);
2602
2603			return rv;
2604		}
2605
2606		dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2607			 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2608			 bmc->id.manufacturer_id,
2609			 bmc->id.product_id,
2610			 bmc->id.device_id);
2611	}
2612
2613	/*
2614	 * create symlink from system interface device to bmc device
2615	 * and back.
2616	 */
2617	intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2618	if (!intf->sysfs_name) {
2619		rv = -ENOMEM;
2620		printk(KERN_ERR
2621		       "ipmi_msghandler: allocate link to BMC: %d\n",
2622		       rv);
2623		goto out_err;
2624	}
2625
2626	rv = sysfs_create_link(&intf->si_dev->kobj,
2627			       &bmc->dev->dev.kobj, intf->sysfs_name);
2628	if (rv) {
2629		kfree(intf->sysfs_name);
2630		intf->sysfs_name = NULL;
2631		printk(KERN_ERR
2632		       "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2633		       rv);
2634		goto out_err;
2635	}
2636
2637	size = snprintf(dummy, 0, "ipmi%d", ifnum);
2638	intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2639	if (!intf->my_dev_name) {
2640		kfree(intf->sysfs_name);
2641		intf->sysfs_name = NULL;
2642		rv = -ENOMEM;
2643		printk(KERN_ERR
2644		       "ipmi_msghandler: allocate link from BMC: %d\n",
2645		       rv);
2646		goto out_err;
2647	}
2648	snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2649
2650	rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2651			       intf->my_dev_name);
2652	if (rv) {
2653		kfree(intf->sysfs_name);
2654		intf->sysfs_name = NULL;
2655		kfree(intf->my_dev_name);
2656		intf->my_dev_name = NULL;
2657		printk(KERN_ERR
2658		       "ipmi_msghandler:"
2659		       " Unable to create symlink to bmc: %d\n",
2660		       rv);
2661		goto out_err;
2662	}
2663
2664	return 0;
2665
2666out_err:
2667	ipmi_bmc_unregister(intf);
2668	return rv;
2669}
2670
2671static int
2672send_guid_cmd(ipmi_smi_t intf, int chan)
2673{
2674	struct kernel_ipmi_msg            msg;
2675	struct ipmi_system_interface_addr si;
2676
2677	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2678	si.channel = IPMI_BMC_CHANNEL;
2679	si.lun = 0;
2680
2681	msg.netfn = IPMI_NETFN_APP_REQUEST;
2682	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2683	msg.data = NULL;
2684	msg.data_len = 0;
2685	return i_ipmi_request(NULL,
2686			      intf,
2687			      (struct ipmi_addr *) &si,
2688			      0,
2689			      &msg,
2690			      intf,
2691			      NULL,
2692			      NULL,
2693			      0,
2694			      intf->channels[0].address,
2695			      intf->channels[0].lun,
2696			      -1, 0);
2697}
2698
2699static void
2700guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2701{
2702	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2703	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2704	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2705		/* Not for me */
2706		return;
2707
2708	if (msg->msg.data[0] != 0) {
2709		/* Error from getting the GUID, the BMC doesn't have one. */
2710		intf->bmc->guid_set = 0;
2711		goto out;
2712	}
2713
2714	if (msg->msg.data_len < 17) {
2715		intf->bmc->guid_set = 0;
2716		printk(KERN_WARNING PFX
2717		       "guid_handler: The GUID response from the BMC was too"
2718		       " short, it was %d but should have been 17.  Assuming"
2719		       " GUID is not available.\n",
2720		       msg->msg.data_len);
2721		goto out;
2722	}
2723
2724	memcpy(intf->bmc->guid, msg->msg.data, 16);
2725	intf->bmc->guid_set = 1;
2726 out:
2727	wake_up(&intf->waitq);
2728}
2729
2730static void
2731get_guid(ipmi_smi_t intf)
2732{
2733	int rv;
2734
2735	intf->bmc->guid_set = 0x2;
2736	intf->null_user_handler = guid_handler;
2737	rv = send_guid_cmd(intf, 0);
2738	if (rv)
2739		/* Send failed, no GUID available. */
2740		intf->bmc->guid_set = 0;
2741	wait_event(intf->waitq, intf->bmc->guid_set != 2);
2742	intf->null_user_handler = NULL;
2743}
2744
2745static int
2746send_channel_info_cmd(ipmi_smi_t intf, int chan)
2747{
2748	struct kernel_ipmi_msg            msg;
2749	unsigned char                     data[1];
2750	struct ipmi_system_interface_addr si;
2751
2752	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2753	si.channel = IPMI_BMC_CHANNEL;
2754	si.lun = 0;
2755
2756	msg.netfn = IPMI_NETFN_APP_REQUEST;
2757	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2758	msg.data = data;
2759	msg.data_len = 1;
2760	data[0] = chan;
2761	return i_ipmi_request(NULL,
2762			      intf,
2763			      (struct ipmi_addr *) &si,
2764			      0,
2765			      &msg,
2766			      intf,
2767			      NULL,
2768			      NULL,
2769			      0,
2770			      intf->channels[0].address,
2771			      intf->channels[0].lun,
2772			      -1, 0);
2773}
2774
2775static void
2776channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2777{
2778	int rv = 0;
2779	int chan;
2780
2781	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2782	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2783	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
2784		/* It's the one we want */
2785		if (msg->msg.data[0] != 0) {
2786			/* Got an error from the channel, just go on. */
2787
2788			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2789				/*
2790				 * If the MC does not support this
2791				 * command, that is legal.  We just
2792				 * assume it has one IPMB at channel
2793				 * zero.
2794				 */
2795				intf->channels[0].medium
2796					= IPMI_CHANNEL_MEDIUM_IPMB;
2797				intf->channels[0].protocol
2798					= IPMI_CHANNEL_PROTOCOL_IPMB;
2799
2800				intf->curr_channel = IPMI_MAX_CHANNELS;
2801				wake_up(&intf->waitq);
2802				goto out;
2803			}
2804			goto next_channel;
2805		}
2806		if (msg->msg.data_len < 4) {
2807			/* Message not big enough, just go on. */
2808			goto next_channel;
2809		}
2810		chan = intf->curr_channel;
2811		intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2812		intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2813
2814 next_channel:
2815		intf->curr_channel++;
2816		if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2817			wake_up(&intf->waitq);
2818		else
2819			rv = send_channel_info_cmd(intf, intf->curr_channel);
2820
2821		if (rv) {
2822			/* Got an error somehow, just give up. */
2823			printk(KERN_WARNING PFX
2824			       "Error sending channel information for channel"
2825			       " %d: %d\n", intf->curr_channel, rv);
2826
2827			intf->curr_channel = IPMI_MAX_CHANNELS;
2828			wake_up(&intf->waitq);
2829		}
2830	}
2831 out:
2832	return;
2833}
2834
2835static void ipmi_poll(ipmi_smi_t intf)
2836{
2837	if (intf->handlers->poll)
2838		intf->handlers->poll(intf->send_info);
2839	/* In case something came in */
2840	handle_new_recv_msgs(intf);
2841}
2842
2843void ipmi_poll_interface(ipmi_user_t user)
2844{
2845	ipmi_poll(user->intf);
2846}
2847EXPORT_SYMBOL(ipmi_poll_interface);
2848
2849int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2850		      void		       *send_info,
2851		      struct ipmi_device_id    *device_id,
2852		      struct device            *si_dev,
2853		      const char               *sysfs_name,
2854		      unsigned char            slave_addr)
2855{
2856	int              i, j;
2857	int              rv;
2858	ipmi_smi_t       intf;
2859	ipmi_smi_t       tintf;
2860	struct list_head *link;
2861
2862	/*
2863	 * Make sure the driver is actually initialized, this handles
2864	 * problems with initialization order.
2865	 */
2866	if (!initialized) {
2867		rv = ipmi_init_msghandler();
2868		if (rv)
2869			return rv;
2870		/*
2871		 * The init code doesn't return an error if it was turned
2872		 * off, but it won't initialize.  Check that.
2873		 */
2874		if (!initialized)
2875			return -ENODEV;
2876	}
2877
2878	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
2879	if (!intf)
2880		return -ENOMEM;
2881
2882	intf->ipmi_version_major = ipmi_version_major(device_id);
2883	intf->ipmi_version_minor = ipmi_version_minor(device_id);
2884
2885	intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2886	if (!intf->bmc) {
2887		kfree(intf);
2888		return -ENOMEM;
2889	}
2890	intf->intf_num = -1; /* Mark it invalid for now. */
2891	kref_init(&intf->refcount);
2892	intf->bmc->id = *device_id;
2893	intf->si_dev = si_dev;
2894	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2895		intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2896		intf->channels[j].lun = 2;
2897	}
2898	if (slave_addr != 0)
2899		intf->channels[0].address = slave_addr;
2900	INIT_LIST_HEAD(&intf->users);
2901	intf->handlers = handlers;
2902	intf->send_info = send_info;
2903	spin_lock_init(&intf->seq_lock);
2904	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2905		intf->seq_table[j].inuse = 0;
2906		intf->seq_table[j].seqid = 0;
2907	}
2908	intf->curr_seq = 0;
2909#ifdef CONFIG_PROC_FS
2910	mutex_init(&intf->proc_entry_lock);
2911#endif
2912	spin_lock_init(&intf->waiting_msgs_lock);
2913	INIT_LIST_HEAD(&intf->waiting_msgs);
2914	tasklet_init(&intf->recv_tasklet,
2915		     smi_recv_tasklet,
2916		     (unsigned long) intf);
2917	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2918	spin_lock_init(&intf->events_lock);
2919	atomic_set(&intf->event_waiters, 0);
2920	intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2921	INIT_LIST_HEAD(&intf->waiting_events);
2922	intf->waiting_events_count = 0;
2923	mutex_init(&intf->cmd_rcvrs_mutex);
2924	spin_lock_init(&intf->maintenance_mode_lock);
2925	INIT_LIST_HEAD(&intf->cmd_rcvrs);
2926	init_waitqueue_head(&intf->waitq);
2927	for (i = 0; i < IPMI_NUM_STATS; i++)
2928		atomic_set(&intf->stats[i], 0);
2929
2930	intf->proc_dir = NULL;
2931
2932	mutex_lock(&smi_watchers_mutex);
2933	mutex_lock(&ipmi_interfaces_mutex);
2934	/* Look for a hole in the numbers. */
2935	i = 0;
2936	link = &ipmi_interfaces;
2937	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2938		if (tintf->intf_num != i) {
2939			link = &tintf->link;
2940			break;
2941		}
2942		i++;
2943	}
2944	/* Add the new interface in numeric order. */
2945	if (i == 0)
2946		list_add_rcu(&intf->link, &ipmi_interfaces);
2947	else
2948		list_add_tail_rcu(&intf->link, link);
2949
2950	rv = handlers->start_processing(send_info, intf);
2951	if (rv)
2952		goto out;
2953
2954	get_guid(intf);
2955
2956	if ((intf->ipmi_version_major > 1)
2957			|| ((intf->ipmi_version_major == 1)
2958			    && (intf->ipmi_version_minor >= 5))) {
2959		/*
2960		 * Start scanning the channels to see what is
2961		 * available.
2962		 */
2963		intf->null_user_handler = channel_handler;
2964		intf->curr_channel = 0;
2965		rv = send_channel_info_cmd(intf, 0);
2966		if (rv) {
2967			printk(KERN_WARNING PFX
2968			       "Error sending channel information for channel"
2969			       " 0, %d\n", rv);
2970			goto out;
2971		}
2972
2973		/* Wait for the channel info to be read. */
2974		wait_event(intf->waitq,
2975			   intf->curr_channel >= IPMI_MAX_CHANNELS);
2976		intf->null_user_handler = NULL;
2977	} else {
2978		/* Assume a single IPMB channel at zero. */
2979		intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2980		intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2981		intf->curr_channel = IPMI_MAX_CHANNELS;
2982	}
2983
2984	if (rv == 0)
2985		rv = add_proc_entries(intf, i);
2986
2987	rv = ipmi_bmc_register(intf, i, sysfs_name);
2988
2989 out:
2990	if (rv) {
2991		if (intf->proc_dir)
2992			remove_proc_entries(intf);
2993		intf->handlers = NULL;
2994		list_del_rcu(&intf->link);
2995		mutex_unlock(&ipmi_interfaces_mutex);
2996		mutex_unlock(&smi_watchers_mutex);
2997		synchronize_rcu();
2998		kref_put(&intf->refcount, intf_free);
2999	} else {
3000		/*
3001		 * Keep memory order straight for RCU readers.  Make
3002		 * sure everything else is committed to memory before
3003		 * setting intf_num to mark the interface valid.
3004		 */
3005		smp_wmb();
3006		intf->intf_num = i;
3007		mutex_unlock(&ipmi_interfaces_mutex);
3008		/* After this point the interface is legal to use. */
3009		call_smi_watchers(i, intf->si_dev);
3010		mutex_unlock(&smi_watchers_mutex);
3011	}
3012
3013	return rv;
3014}
3015EXPORT_SYMBOL(ipmi_register_smi);
3016
3017static void cleanup_smi_msgs(ipmi_smi_t intf)
3018{
3019	int              i;
3020	struct seq_table *ent;
3021
3022	/* No need for locks, the interface is down. */
3023	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3024		ent = &(intf->seq_table[i]);
3025		if (!ent->inuse)
3026			continue;
3027		deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3028	}
3029}
3030
3031int ipmi_unregister_smi(ipmi_smi_t intf)
3032{
3033	struct ipmi_smi_watcher *w;
3034	int    intf_num = intf->intf_num;
3035
3036	ipmi_bmc_unregister(intf);
3037
3038	mutex_lock(&smi_watchers_mutex);
3039	mutex_lock(&ipmi_interfaces_mutex);
3040	intf->intf_num = -1;
3041	intf->handlers = NULL;
3042	list_del_rcu(&intf->link);
3043	mutex_unlock(&ipmi_interfaces_mutex);
3044	synchronize_rcu();
3045
3046	cleanup_smi_msgs(intf);
3047
3048	remove_proc_entries(intf);
3049
3050	/*
3051	 * Call all the watcher interfaces to tell them that
3052	 * an interface is gone.
3053	 */
3054	list_for_each_entry(w, &smi_watchers, link)
3055		w->smi_gone(intf_num);
3056	mutex_unlock(&smi_watchers_mutex);
3057
3058	kref_put(&intf->refcount, intf_free);
3059	return 0;
3060}
3061EXPORT_SYMBOL(ipmi_unregister_smi);
3062
3063static int handle_ipmb_get_msg_rsp(ipmi_smi_t          intf,
3064				   struct ipmi_smi_msg *msg)
3065{
3066	struct ipmi_ipmb_addr ipmb_addr;
3067	struct ipmi_recv_msg  *recv_msg;
3068
3069	/*
3070	 * This is 11, not 10, because the response must contain a
3071	 * completion code.
3072	 */
3073	if (msg->rsp_size < 11) {
3074		/* Message not big enough, just ignore it. */
3075		ipmi_inc_stat(intf, invalid_ipmb_responses);
3076		return 0;
3077	}
3078
3079	if (msg->rsp[2] != 0) {
3080		/* An error getting the response, just ignore it. */
3081		return 0;
3082	}
3083
3084	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3085	ipmb_addr.slave_addr = msg->rsp[6];
3086	ipmb_addr.channel = msg->rsp[3] & 0x0f;
3087	ipmb_addr.lun = msg->rsp[7] & 3;
3088
3089	/*
3090	 * It's a response from a remote entity.  Look up the sequence
3091	 * number and handle the response.
3092	 */
3093	if (intf_find_seq(intf,
3094			  msg->rsp[7] >> 2,
3095			  msg->rsp[3] & 0x0f,
3096			  msg->rsp[8],
3097			  (msg->rsp[4] >> 2) & (~1),
3098			  (struct ipmi_addr *) &(ipmb_addr),
3099			  &recv_msg)) {
3100		/*
3101		 * We were unable to find the sequence number,
3102		 * so just nuke the message.
3103		 */
3104		ipmi_inc_stat(intf, unhandled_ipmb_responses);
3105		return 0;
3106	}
3107
3108	memcpy(recv_msg->msg_data,
3109	       &(msg->rsp[9]),
3110	       msg->rsp_size - 9);
3111	/*
3112	 * The other fields matched, so no need to set them, except
3113	 * for netfn, which needs to be the response that was
3114	 * returned, not the request value.
3115	 */
3116	recv_msg->msg.netfn = msg->rsp[4] >> 2;
3117	recv_msg->msg.data = recv_msg->msg_data;
3118	recv_msg->msg.data_len = msg->rsp_size - 10;
3119	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3120	ipmi_inc_stat(intf, handled_ipmb_responses);
3121	deliver_response(recv_msg);
3122
3123	return 0;
3124}
3125
3126static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
3127				   struct ipmi_smi_msg *msg)
3128{
3129	struct cmd_rcvr          *rcvr;
3130	int                      rv = 0;
3131	unsigned char            netfn;
3132	unsigned char            cmd;
3133	unsigned char            chan;
3134	ipmi_user_t              user = NULL;
3135	struct ipmi_ipmb_addr    *ipmb_addr;
3136	struct ipmi_recv_msg     *recv_msg;
3137	struct ipmi_smi_handlers *handlers;
3138
3139	if (msg->rsp_size < 10) {
3140		/* Message not big enough, just ignore it. */
3141		ipmi_inc_stat(intf, invalid_commands);
3142		return 0;
3143	}
3144
3145	if (msg->rsp[2] != 0) {
3146		/* An error getting the response, just ignore it. */
3147		return 0;
3148	}
3149
3150	netfn = msg->rsp[4] >> 2;
3151	cmd = msg->rsp[8];
3152	chan = msg->rsp[3] & 0xf;
3153
3154	rcu_read_lock();
3155	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3156	if (rcvr) {
3157		user = rcvr->user;
3158		kref_get(&user->refcount);
3159	} else
3160		user = NULL;
3161	rcu_read_unlock();
3162
3163	if (user == NULL) {
3164		/* We didn't find a user, deliver an error response. */
3165		ipmi_inc_stat(intf, unhandled_commands);
3166
3167		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3168		msg->data[1] = IPMI_SEND_MSG_CMD;
3169		msg->data[2] = msg->rsp[3];
3170		msg->data[3] = msg->rsp[6];
3171		msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3172		msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
3173		msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
3174		/* rqseq/lun */
3175		msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3176		msg->data[8] = msg->rsp[8]; /* cmd */
3177		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3178		msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
3179		msg->data_size = 11;
3180
3181#ifdef DEBUG_MSGING
3182	{
3183		int m;
3184		printk("Invalid command:");
3185		for (m = 0; m < msg->data_size; m++)
3186			printk(" %2.2x", msg->data[m]);
3187		printk("\n");
3188	}
3189#endif
3190		rcu_read_lock();
3191		handlers = intf->handlers;
3192		if (handlers) {
3193			handlers->sender(intf->send_info, msg, 0);
3194			/*
3195			 * We used the message, so return the value
3196			 * that causes it to not be freed or
3197			 * queued.
3198			 */
3199			rv = -1;
3200		}
3201		rcu_read_unlock();
3202	} else {
3203		/* Deliver the message to the user. */
3204		ipmi_inc_stat(intf, handled_commands);
3205
3206		recv_msg = ipmi_alloc_recv_msg();
3207		if (!recv_msg) {
3208			/*
3209			 * We couldn't allocate memory for the
3210			 * message, so requeue it for handling
3211			 * later.
3212			 */
3213			rv = 1;
3214			kref_put(&user->refcount, free_user);
3215		} else {
3216			/* Extract the source address from the data. */
3217			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3218			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3219			ipmb_addr->slave_addr = msg->rsp[6];
3220			ipmb_addr->lun = msg->rsp[7] & 3;
3221			ipmb_addr->channel = msg->rsp[3] & 0xf;
3222
3223			/*
3224			 * Extract the rest of the message information
3225			 * from the IPMB header.
3226			 */
3227			recv_msg->user = user;
3228			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3229			recv_msg->msgid = msg->rsp[7] >> 2;
3230			recv_msg->msg.netfn = msg->rsp[4] >> 2;
3231			recv_msg->msg.cmd = msg->rsp[8];
3232			recv_msg->msg.data = recv_msg->msg_data;
3233
3234			/*
3235			 * We chop off 10, not 9 bytes because the checksum
3236			 * at the end also needs to be removed.
3237			 */
3238			recv_msg->msg.data_len = msg->rsp_size - 10;
3239			memcpy(recv_msg->msg_data,
3240			       &(msg->rsp[9]),
3241			       msg->rsp_size - 10);
3242			deliver_response(recv_msg);
3243		}
3244	}
3245
3246	return rv;
3247}
3248
3249static int handle_lan_get_msg_rsp(ipmi_smi_t          intf,
3250				  struct ipmi_smi_msg *msg)
3251{
3252	struct ipmi_lan_addr  lan_addr;
3253	struct ipmi_recv_msg  *recv_msg;
3254
3255
3256	/*
3257	 * This is 13, not 12, because the response must contain a
3258	 * completion code.
3259	 */
3260	if (msg->rsp_size < 13) {
3261		/* Message not big enough, just ignore it. */
3262		ipmi_inc_stat(intf, invalid_lan_responses);
3263		return 0;
3264	}
3265
3266	if (msg->rsp[2] != 0) {
3267		/* An error getting the response, just ignore it. */
3268		return 0;
3269	}
3270
3271	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3272	lan_addr.session_handle = msg->rsp[4];
3273	lan_addr.remote_SWID = msg->rsp[8];
3274	lan_addr.local_SWID = msg->rsp[5];
3275	lan_addr.channel = msg->rsp[3] & 0x0f;
3276	lan_addr.privilege = msg->rsp[3] >> 4;
3277	lan_addr.lun = msg->rsp[9] & 3;
3278
3279	/*
3280	 * It's a response from a remote entity.  Look up the sequence
3281	 * number and handle the response.
3282	 */
3283	if (intf_find_seq(intf,
3284			  msg->rsp[9] >> 2,
3285			  msg->rsp[3] & 0x0f,
3286			  msg->rsp[10],
3287			  (msg->rsp[6] >> 2) & (~1),
3288			  (struct ipmi_addr *) &(lan_addr),
3289			  &recv_msg)) {
3290		/*
3291		 * We were unable to find the sequence number,
3292		 * so just nuke the message.
3293		 */
3294		ipmi_inc_stat(intf, unhandled_lan_responses);
3295		return 0;
3296	}
3297
3298	memcpy(recv_msg->msg_data,
3299	       &(msg->rsp[11]),
3300	       msg->rsp_size - 11);
3301	/*
3302	 * The other fields matched, so no need to set them, except
3303	 * for netfn, which needs to be the response that was
3304	 * returned, not the request value.
3305	 */
3306	recv_msg->msg.netfn = msg->rsp[6] >> 2;
3307	recv_msg->msg.data = recv_msg->msg_data;
3308	recv_msg->msg.data_len = msg->rsp_size - 12;
3309	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3310	ipmi_inc_stat(intf, handled_lan_responses);
3311	deliver_response(recv_msg);
3312
3313	return 0;
3314}
3315
3316static int handle_lan_get_msg_cmd(ipmi_smi_t          intf,
3317				  struct ipmi_smi_msg *msg)
3318{
3319	struct cmd_rcvr          *rcvr;
3320	int                      rv = 0;
3321	unsigned char            netfn;
3322	unsigned char            cmd;
3323	unsigned char            chan;
3324	ipmi_user_t              user = NULL;
3325	struct ipmi_lan_addr     *lan_addr;
3326	struct ipmi_recv_msg     *recv_msg;
3327
3328	if (msg->rsp_size < 12) {
3329		/* Message not big enough, just ignore it. */
3330		ipmi_inc_stat(intf, invalid_commands);
3331		return 0;
3332	}
3333
3334	if (msg->rsp[2] != 0) {
3335		/* An error getting the response, just ignore it. */
3336		return 0;
3337	}
3338
3339	netfn = msg->rsp[6] >> 2;
3340	cmd = msg->rsp[10];
3341	chan = msg->rsp[3] & 0xf;
3342
3343	rcu_read_lock();
3344	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3345	if (rcvr) {
3346		user = rcvr->user;
3347		kref_get(&user->refcount);
3348	} else
3349		user = NULL;
3350	rcu_read_unlock();
3351
3352	if (user == NULL) {
3353		/* We didn't find a user, just give up. */
3354		ipmi_inc_stat(intf, unhandled_commands);
3355
3356		/*
3357		 * Don't do anything with these messages, just allow
3358		 * them to be freed.
3359		 */
3360		rv = 0;
3361	} else {
3362		/* Deliver the message to the user. */
3363		ipmi_inc_stat(intf, handled_commands);
3364
3365		recv_msg = ipmi_alloc_recv_msg();
3366		if (!recv_msg) {
3367			/*
3368			 * We couldn't allocate memory for the
3369			 * message, so requeue it for handling later.
3370			 */
3371			rv = 1;
3372			kref_put(&user->refcount, free_user);
3373		} else {
3374			/* Extract the source address from the data. */
3375			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3376			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3377			lan_addr->session_handle = msg->rsp[4];
3378			lan_addr->remote_SWID = msg->rsp[8];
3379			lan_addr->local_SWID = msg->rsp[5];
3380			lan_addr->lun = msg->rsp[9] & 3;
3381			lan_addr->channel = msg->rsp[3] & 0xf;
3382			lan_addr->privilege = msg->rsp[3] >> 4;
3383
3384			/*
3385			 * Extract the rest of the message information
3386			 * from the IPMB header.
3387			 */
3388			recv_msg->user = user;
3389			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3390			recv_msg->msgid = msg->rsp[9] >> 2;
3391			recv_msg->msg.netfn = msg->rsp[6] >> 2;
3392			recv_msg->msg.cmd = msg->rsp[10];
3393			recv_msg->msg.data = recv_msg->msg_data;
3394
3395			/*
3396			 * We chop off 12, not 11 bytes because the checksum
3397			 * at the end also needs to be removed.
3398			 */
3399			recv_msg->msg.data_len = msg->rsp_size - 12;
3400			memcpy(recv_msg->msg_data,
3401			       &(msg->rsp[11]),
3402			       msg->rsp_size - 12);
3403			deliver_response(recv_msg);
3404		}
3405	}
3406
3407	return rv;
3408}
3409
3410/*
3411 * This routine will handle "Get Message" command responses with
3412 * channels that use an OEM Medium. The message format belongs to
3413 * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3414 * Chapter 22, sections 22.6 and 22.24 for more details.
3415 */
3416static int handle_oem_get_msg_cmd(ipmi_smi_t          intf,
3417				  struct ipmi_smi_msg *msg)
3418{
3419	struct cmd_rcvr       *rcvr;
3420	int                   rv = 0;
3421	unsigned char         netfn;
3422	unsigned char         cmd;
3423	unsigned char         chan;
3424	ipmi_user_t           user = NULL;
3425	struct ipmi_system_interface_addr *smi_addr;
3426	struct ipmi_recv_msg  *recv_msg;
3427
3428	/*
3429	 * We expect the OEM SW to perform error checking
3430	 * so we just do some basic sanity checks
3431	 */
3432	if (msg->rsp_size < 4) {
3433		/* Message not big enough, just ignore it. */
3434		ipmi_inc_stat(intf, invalid_commands);
3435		return 0;
3436	}
3437
3438	if (msg->rsp[2] != 0) {
3439		/* An error getting the response, just ignore it. */
3440		return 0;
3441	}
3442
3443	/*
3444	 * This is an OEM Message so the OEM needs to know how
3445	 * handle the message. We do no interpretation.
3446	 */
3447	netfn = msg->rsp[0] >> 2;
3448	cmd = msg->rsp[1];
3449	chan = msg->rsp[3] & 0xf;
3450
3451	rcu_read_lock();
3452	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3453	if (rcvr) {
3454		user = rcvr->user;
3455		kref_get(&user->refcount);
3456	} else
3457		user = NULL;
3458	rcu_read_unlock();
3459
3460	if (user == NULL) {
3461		/* We didn't find a user, just give up. */
3462		ipmi_inc_stat(intf, unhandled_commands);
3463
3464		/*
3465		 * Don't do anything with these messages, just allow
3466		 * them to be freed.
3467		 */
3468
3469		rv = 0;
3470	} else {
3471		/* Deliver the message to the user. */
3472		ipmi_inc_stat(intf, handled_commands);
3473
3474		recv_msg = ipmi_alloc_recv_msg();
3475		if (!recv_msg) {
3476			/*
3477			 * We couldn't allocate memory for the
3478			 * message, so requeue it for handling
3479			 * later.
3480			 */
3481			rv = 1;
3482			kref_put(&user->refcount, free_user);
3483		} else {
3484			/*
3485			 * OEM Messages are expected to be delivered via
3486			 * the system interface to SMS software.  We might
3487			 * need to visit this again depending on OEM
3488			 * requirements
3489			 */
3490			smi_addr = ((struct ipmi_system_interface_addr *)
3491				    &(recv_msg->addr));
3492			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3493			smi_addr->channel = IPMI_BMC_CHANNEL;
3494			smi_addr->lun = msg->rsp[0] & 3;
3495
3496			recv_msg->user = user;
3497			recv_msg->user_msg_data = NULL;
3498			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
3499			recv_msg->msg.netfn = msg->rsp[0] >> 2;
3500			recv_msg->msg.cmd = msg->rsp[1];
3501			recv_msg->msg.data = recv_msg->msg_data;
3502
3503			/*
3504			 * The message starts at byte 4 which follows the
3505			 * the Channel Byte in the "GET MESSAGE" command
3506			 */
3507			recv_msg->msg.data_len = msg->rsp_size - 4;
3508			memcpy(recv_msg->msg_data,
3509			       &(msg->rsp[4]),
3510			       msg->rsp_size - 4);
3511			deliver_response(recv_msg);
3512		}
3513	}
3514
3515	return rv;
3516}
3517
3518static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3519				     struct ipmi_smi_msg  *msg)
3520{
3521	struct ipmi_system_interface_addr *smi_addr;
3522
3523	recv_msg->msgid = 0;
3524	smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3525	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3526	smi_addr->channel = IPMI_BMC_CHANNEL;
3527	smi_addr->lun = msg->rsp[0] & 3;
3528	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3529	recv_msg->msg.netfn = msg->rsp[0] >> 2;
3530	recv_msg->msg.cmd = msg->rsp[1];
3531	memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3532	recv_msg->msg.data = recv_msg->msg_data;
3533	recv_msg->msg.data_len = msg->rsp_size - 3;
3534}
3535
3536static int handle_read_event_rsp(ipmi_smi_t          intf,
3537				 struct ipmi_smi_msg *msg)
3538{
3539	struct ipmi_recv_msg *recv_msg, *recv_msg2;
3540	struct list_head     msgs;
3541	ipmi_user_t          user;
3542	int                  rv = 0;
3543	int                  deliver_count = 0;
3544	unsigned long        flags;
3545
3546	if (msg->rsp_size < 19) {
3547		/* Message is too small to be an IPMB event. */
3548		ipmi_inc_stat(intf, invalid_events);
3549		return 0;
3550	}
3551
3552	if (msg->rsp[2] != 0) {
3553		/* An error getting the event, just ignore it. */
3554		return 0;
3555	}
3556
3557	INIT_LIST_HEAD(&msgs);
3558
3559	spin_lock_irqsave(&intf->events_lock, flags);
3560
3561	ipmi_inc_stat(intf, events);
3562
3563	/*
3564	 * Allocate and fill in one message for every user that is
3565	 * getting events.
3566	 */
3567	rcu_read_lock();
3568	list_for_each_entry_rcu(user, &intf->users, link) {
3569		if (!user->gets_events)
3570			continue;
3571
3572		recv_msg = ipmi_alloc_recv_msg();
3573		if (!recv_msg) {
3574			rcu_read_unlock();
3575			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3576						 link) {
3577				list_del(&recv_msg->link);
3578				ipmi_free_recv_msg(recv_msg);
3579			}
3580			/*
3581			 * We couldn't allocate memory for the
3582			 * message, so requeue it for handling
3583			 * later.
3584			 */
3585			rv = 1;
3586			goto out;
3587		}
3588
3589		deliver_count++;
3590
3591		copy_event_into_recv_msg(recv_msg, msg);
3592		recv_msg->user = user;
3593		kref_get(&user->refcount);
3594		list_add_tail(&(recv_msg->link), &msgs);
3595	}
3596	rcu_read_unlock();
3597
3598	if (deliver_count) {
3599		/* Now deliver all the messages. */
3600		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3601			list_del(&recv_msg->link);
3602			deliver_response(recv_msg);
3603		}
3604	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3605		/*
3606		 * No one to receive the message, put it in queue if there's
3607		 * not already too many things in the queue.
3608		 */
3609		recv_msg = ipmi_alloc_recv_msg();
3610		if (!recv_msg) {
3611			/*
3612			 * We couldn't allocate memory for the
3613			 * message, so requeue it for handling
3614			 * later.
3615			 */
3616			rv = 1;
3617			goto out;
3618		}
3619
3620		copy_event_into_recv_msg(recv_msg, msg);
3621		list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3622		intf->waiting_events_count++;
3623	} else if (!intf->event_msg_printed) {
3624		/*
3625		 * There's too many things in the queue, discard this
3626		 * message.
3627		 */
3628		printk(KERN_WARNING PFX "Event queue full, discarding"
3629		       " incoming events\n");
3630		intf->event_msg_printed = 1;
3631	}
3632
3633 out:
3634	spin_unlock_irqrestore(&(intf->events_lock), flags);
3635
3636	return rv;
3637}
3638
3639static int handle_bmc_rsp(ipmi_smi_t          intf,
3640			  struct ipmi_smi_msg *msg)
3641{
3642	struct ipmi_recv_msg *recv_msg;
3643	struct ipmi_user     *user;
3644
3645	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3646	if (recv_msg == NULL) {
3647		printk(KERN_WARNING
3648		       "IPMI message received with no owner. This\n"
3649		       "could be because of a malformed message, or\n"
3650		       "because of a hardware error.  Contact your\n"
3651		       "hardware vender for assistance\n");
3652		return 0;
3653	}
3654
3655	user = recv_msg->user;
3656	/* Make sure the user still exists. */
3657	if (user && !user->valid) {
3658		/* The user for the message went away, so give up. */
3659		ipmi_inc_stat(intf, unhandled_local_responses);
3660		ipmi_free_recv_msg(recv_msg);
3661	} else {
3662		struct ipmi_system_interface_addr *smi_addr;
3663
3664		ipmi_inc_stat(intf, handled_local_responses);
3665		recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3666		recv_msg->msgid = msg->msgid;
3667		smi_addr = ((struct ipmi_system_interface_addr *)
3668			    &(recv_msg->addr));
3669		smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3670		smi_addr->channel = IPMI_BMC_CHANNEL;
3671		smi_addr->lun = msg->rsp[0] & 3;
3672		recv_msg->msg.netfn = msg->rsp[0] >> 2;
3673		recv_msg->msg.cmd = msg->rsp[1];
3674		memcpy(recv_msg->msg_data,
3675		       &(msg->rsp[2]),
3676		       msg->rsp_size - 2);
3677		recv_msg->msg.data = recv_msg->msg_data;
3678		recv_msg->msg.data_len = msg->rsp_size - 2;
3679		deliver_response(recv_msg);
3680	}
3681
3682	return 0;
3683}
3684
3685/*
3686 * Handle a received message.  Return 1 if the message should be requeued,
3687 * 0 if the message should be freed, or -1 if the message should not
3688 * be freed or requeued.
3689 */
3690static int handle_one_recv_msg(ipmi_smi_t          intf,
3691			       struct ipmi_smi_msg *msg)
3692{
3693	int requeue;
3694	int chan;
3695
3696#ifdef DEBUG_MSGING
3697	int m;
3698	printk("Recv:");
3699	for (m = 0; m < msg->rsp_size; m++)
3700		printk(" %2.2x", msg->rsp[m]);
3701	printk("\n");
3702#endif
3703	if (msg->rsp_size < 2) {
3704		/* Message is too small to be correct. */
3705		printk(KERN_WARNING PFX "BMC returned to small a message"
3706		       " for netfn %x cmd %x, got %d bytes\n",
3707		       (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3708
3709		/* Generate an error response for the message. */
3710		msg->rsp[0] = msg->data[0] | (1 << 2);
3711		msg->rsp[1] = msg->data[1];
3712		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3713		msg->rsp_size = 3;
3714	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
3715		   || (msg->rsp[1] != msg->data[1])) {
3716		/*
3717		 * The NetFN and Command in the response is not even
3718		 * marginally correct.
3719		 */
3720		printk(KERN_WARNING PFX "BMC returned incorrect response,"
3721		       " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3722		       (msg->data[0] >> 2) | 1, msg->data[1],
3723		       msg->rsp[0] >> 2, msg->rsp[1]);
3724
3725		/* Generate an error response for the message. */
3726		msg->rsp[0] = msg->data[0] | (1 << 2);
3727		msg->rsp[1] = msg->data[1];
3728		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3729		msg->rsp_size = 3;
3730	}
3731
3732	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3733	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3734	    && (msg->user_data != NULL)) {
3735		/*
3736		 * It's a response to a response we sent.  For this we
3737		 * deliver a send message response to the user.
3738		 */
3739		struct ipmi_recv_msg     *recv_msg = msg->user_data;
3740
3741		requeue = 0;
3742		if (msg->rsp_size < 2)
3743			/* Message is too small to be correct. */
3744			goto out;
3745
3746		chan = msg->data[2] & 0x0f;
3747		if (chan >= IPMI_MAX_CHANNELS)
3748			/* Invalid channel number */
3749			goto out;
3750
3751		if (!recv_msg)
3752			goto out;
3753
3754		/* Make sure the user still exists. */
3755		if (!recv_msg->user || !recv_msg->user->valid)
3756			goto out;
3757
3758		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3759		recv_msg->msg.data = recv_msg->msg_data;
3760		recv_msg->msg.data_len = 1;
3761		recv_msg->msg_data[0] = msg->rsp[2];
3762		deliver_response(recv_msg);
3763	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3764		   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
3765		/* It's from the receive queue. */
3766		chan = msg->rsp[3] & 0xf;
3767		if (chan >= IPMI_MAX_CHANNELS) {
3768			/* Invalid channel number */
3769			requeue = 0;
3770			goto out;
3771		}
3772
3773		/*
3774		 * We need to make sure the channels have been initialized.
3775		 * The channel_handler routine will set the "curr_channel"
3776		 * equal to or greater than IPMI_MAX_CHANNELS when all the
3777		 * channels for this interface have been initialized.
3778		 */
3779		if (intf->curr_channel < IPMI_MAX_CHANNELS) {
3780			requeue = 0; /* Throw the message away */
3781			goto out;
3782		}
3783
3784		switch (intf->channels[chan].medium) {
3785		case IPMI_CHANNEL_MEDIUM_IPMB:
3786			if (msg->rsp[4] & 0x04) {
3787				/*
3788				 * It's a response, so find the
3789				 * requesting message and send it up.
3790				 */
3791				requeue = handle_ipmb_get_msg_rsp(intf, msg);
3792			} else {
3793				/*
3794				 * It's a command to the SMS from some other
3795				 * entity.  Handle that.
3796				 */
3797				requeue = handle_ipmb_get_msg_cmd(intf, msg);
3798			}
3799			break;
3800
3801		case IPMI_CHANNEL_MEDIUM_8023LAN:
3802		case IPMI_CHANNEL_MEDIUM_ASYNC:
3803			if (msg->rsp[6] & 0x04) {
3804				/*
3805				 * It's a response, so find the
3806				 * requesting message and send it up.
3807				 */
3808				requeue = handle_lan_get_msg_rsp(intf, msg);
3809			} else {
3810				/*
3811				 * It's a command to the SMS from some other
3812				 * entity.  Handle that.
3813				 */
3814				requeue = handle_lan_get_msg_cmd(intf, msg);
3815			}
3816			break;
3817
3818		default:
3819			/* Check for OEM Channels.  Clients had better
3820			   register for these commands. */
3821			if ((intf->channels[chan].medium
3822			     >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
3823			    && (intf->channels[chan].medium
3824				<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
3825				requeue = handle_oem_get_msg_cmd(intf, msg);
3826			} else {
3827				/*
3828				 * We don't handle the channel type, so just
3829				 * free the message.
3830				 */
3831				requeue = 0;
3832			}
3833		}
3834
3835	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3836		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
3837		/* It's an asynchronous event. */
3838		requeue = handle_read_event_rsp(intf, msg);
3839	} else {
3840		/* It's a response from the local BMC. */
3841		requeue = handle_bmc_rsp(intf, msg);
3842	}
3843
3844 out:
3845	return requeue;
3846}
3847
3848/*
3849 * If there are messages in the queue or pretimeouts, handle them.
3850 */
3851static void handle_new_recv_msgs(ipmi_smi_t intf)
3852{
3853	struct ipmi_smi_msg  *smi_msg;
3854	unsigned long        flags = 0;
3855	int                  rv;
3856	int                  run_to_completion = intf->run_to_completion;
3857
3858	/* See if any waiting messages need to be processed. */
3859	if (!run_to_completion)
3860		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3861	while (!list_empty(&intf->waiting_msgs)) {
3862		smi_msg = list_entry(intf->waiting_msgs.next,
3863				     struct ipmi_smi_msg, link);
3864		list_del(&smi_msg->link);
3865		if (!run_to_completion)
3866			spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3867		rv = handle_one_recv_msg(intf, smi_msg);
3868		if (!run_to_completion)
3869			spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3870		if (rv == 0) {
3871			/* Message handled */
3872			ipmi_free_smi_msg(smi_msg);
3873		} else if (rv < 0) {
3874			/* Fatal error on the message, del but don't free. */
3875		} else {
3876			/*
3877			 * To preserve message order, quit if we
3878			 * can't handle a message.
3879			 */
3880			list_add(&smi_msg->link, &intf->waiting_msgs);
3881			break;
3882		}
3883	}
3884	if (!run_to_completion)
3885		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3886
3887	/*
3888	 * If the pretimout count is non-zero, decrement one from it and
3889	 * deliver pretimeouts to all the users.
3890	 */
3891	if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
3892		ipmi_user_t user;
3893
3894		rcu_read_lock();
3895		list_for_each_entry_rcu(user, &intf->users, link) {
3896			if (user->handler->ipmi_watchdog_pretimeout)
3897				user->handler->ipmi_watchdog_pretimeout(
3898					user->handler_data);
3899		}
3900		rcu_read_unlock();
3901	}
3902}
3903
3904static void smi_recv_tasklet(unsigned long val)
3905{
3906	handle_new_recv_msgs((ipmi_smi_t) val);
3907}
3908
3909/* Handle a new message from the lower layer. */
3910void ipmi_smi_msg_received(ipmi_smi_t          intf,
3911			   struct ipmi_smi_msg *msg)
3912{
3913	unsigned long flags = 0; /* keep us warning-free. */
3914	int           run_to_completion;
3915
3916
3917	if ((msg->data_size >= 2)
3918	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3919	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
3920	    && (msg->user_data == NULL)) {
3921		/*
3922		 * This is the local response to a command send, start
3923		 * the timer for these.  The user_data will not be
3924		 * NULL if this is a response send, and we will let
3925		 * response sends just go through.
3926		 */
3927
3928		/*
3929		 * Check for errors, if we get certain errors (ones
3930		 * that mean basically we can try again later), we
3931		 * ignore them and start the timer.  Otherwise we
3932		 * report the error immediately.
3933		 */
3934		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3935		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3936		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3937		    && (msg->rsp[2] != IPMI_BUS_ERR)
3938		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
3939			int chan = msg->rsp[3] & 0xf;
3940
3941			/* Got an error sending the message, handle it. */
3942			if (chan >= IPMI_MAX_CHANNELS)
3943				; /* This shouldn't happen */
3944			else if ((intf->channels[chan].medium
3945				  == IPMI_CHANNEL_MEDIUM_8023LAN)
3946				 || (intf->channels[chan].medium
3947				     == IPMI_CHANNEL_MEDIUM_ASYNC))
3948				ipmi_inc_stat(intf, sent_lan_command_errs);
3949			else
3950				ipmi_inc_stat(intf, sent_ipmb_command_errs);
3951			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3952		} else
3953			/* The message was sent, start the timer. */
3954			intf_start_seq_timer(intf, msg->msgid);
3955
3956		ipmi_free_smi_msg(msg);
3957		goto out;
3958	}
3959
3960	/*
3961	 * To preserve message order, if the list is not empty, we
3962	 * tack this message onto the end of the list.
3963	 */
3964	run_to_completion = intf->run_to_completion;
3965	if (!run_to_completion)
3966		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3967	list_add_tail(&msg->link, &intf->waiting_msgs);
3968	if (!run_to_completion)
3969		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3970
3971	tasklet_schedule(&intf->recv_tasklet);
3972 out:
3973	return;
3974}
3975EXPORT_SYMBOL(ipmi_smi_msg_received);
3976
3977void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3978{
3979	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
3980	tasklet_schedule(&intf->recv_tasklet);
3981}
3982EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3983
3984static struct ipmi_smi_msg *
3985smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3986		  unsigned char seq, long seqid)
3987{
3988	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3989	if (!smi_msg)
3990		/*
3991		 * If we can't allocate the message, then just return, we
3992		 * get 4 retries, so this should be ok.
3993		 */
3994		return NULL;
3995
3996	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3997	smi_msg->data_size = recv_msg->msg.data_len;
3998	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3999
4000#ifdef DEBUG_MSGING
4001	{
4002		int m;
4003		printk("Resend: ");
4004		for (m = 0; m < smi_msg->data_size; m++)
4005			printk(" %2.2x", smi_msg->data[m]);
4006		printk("\n");
4007	}
4008#endif
4009	return smi_msg;
4010}
4011
4012static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
4013			      struct list_head *timeouts, long timeout_period,
4014			      int slot, unsigned long *flags,
4015			      unsigned int *waiting_msgs)
4016{
4017	struct ipmi_recv_msg     *msg;
4018	struct ipmi_smi_handlers *handlers;
4019
4020	if (intf->intf_num == -1)
4021		return;
4022
4023	if (!ent->inuse)
4024		return;
4025
4026	ent->timeout -= timeout_period;
4027	if (ent->timeout > 0) {
4028		(*waiting_msgs)++;
4029		return;
4030	}
4031
4032	if (ent->retries_left == 0) {
4033		/* The message has used all its retries. */
4034		ent->inuse = 0;
4035		msg = ent->recv_msg;
4036		list_add_tail(&msg->link, timeouts);
4037		if (ent->broadcast)
4038			ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4039		else if (is_lan_addr(&ent->recv_msg->addr))
4040			ipmi_inc_stat(intf, timed_out_lan_commands);
4041		else
4042			ipmi_inc_stat(intf, timed_out_ipmb_commands);
4043	} else {
4044		struct ipmi_smi_msg *smi_msg;
4045		/* More retries, send again. */
4046
4047		(*waiting_msgs)++;
4048
4049		/*
4050		 * Start with the max timer, set to normal timer after
4051		 * the message is sent.
4052		 */
4053		ent->timeout = MAX_MSG_TIMEOUT;
4054		ent->retries_left--;
4055		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4056					    ent->seqid);
4057		if (!smi_msg) {
4058			if (is_lan_addr(&ent->recv_msg->addr))
4059				ipmi_inc_stat(intf,
4060					      dropped_rexmit_lan_commands);
4061			else
4062				ipmi_inc_stat(intf,
4063					      dropped_rexmit_ipmb_commands);
4064			return;
4065		}
4066
4067		spin_unlock_irqrestore(&intf->seq_lock, *flags);
4068
4069		/*
4070		 * Send the new message.  We send with a zero
4071		 * priority.  It timed out, I doubt time is that
4072		 * critical now, and high priority messages are really
4073		 * only for messages to the local MC, which don't get
4074		 * resent.
4075		 */
4076		handlers = intf->handlers;
4077		if (handlers) {
4078			if (is_lan_addr(&ent->recv_msg->addr))
4079				ipmi_inc_stat(intf,
4080					      retransmitted_lan_commands);
4081			else
4082				ipmi_inc_stat(intf,
4083					      retransmitted_ipmb_commands);
4084
4085			intf->handlers->sender(intf->send_info,
4086					       smi_msg, 0);
4087		} else
4088			ipmi_free_smi_msg(smi_msg);
4089
4090		spin_lock_irqsave(&intf->seq_lock, *flags);
4091	}
4092}
4093
4094static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
4095{
4096	struct list_head     timeouts;
4097	struct ipmi_recv_msg *msg, *msg2;
4098	unsigned long        flags;
4099	int                  i;
4100	unsigned int         waiting_msgs = 0;
4101
4102	/*
4103	 * Go through the seq table and find any messages that
4104	 * have timed out, putting them in the timeouts
4105	 * list.
4106	 */
4107	INIT_LIST_HEAD(&timeouts);
4108	spin_lock_irqsave(&intf->seq_lock, flags);
4109	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4110		check_msg_timeout(intf, &(intf->seq_table[i]),
4111				  &timeouts, timeout_period, i,
4112				  &flags, &waiting_msgs);
4113	spin_unlock_irqrestore(&intf->seq_lock, flags);
4114
4115	list_for_each_entry_safe(msg, msg2, &timeouts, link)
4116		deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
4117
4118	/*
4119	 * Maintenance mode handling.  Check the timeout
4120	 * optimistically before we claim the lock.  It may
4121	 * mean a timeout gets missed occasionally, but that
4122	 * only means the timeout gets extended by one period
4123	 * in that case.  No big deal, and it avoids the lock
4124	 * most of the time.
4125	 */
4126	if (intf->auto_maintenance_timeout > 0) {
4127		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4128		if (intf->auto_maintenance_timeout > 0) {
4129			intf->auto_maintenance_timeout
4130				-= timeout_period;
4131			if (!intf->maintenance_mode
4132			    && (intf->auto_maintenance_timeout <= 0)) {
4133				intf->maintenance_mode_enable = false;
4134				maintenance_mode_update(intf);
4135			}
4136		}
4137		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4138				       flags);
4139	}
4140
4141	tasklet_schedule(&intf->recv_tasklet);
4142
4143	return waiting_msgs;
4144}
4145
4146static void ipmi_request_event(ipmi_smi_t intf)
4147{
4148	struct ipmi_smi_handlers *handlers;
4149
4150	/* No event requests when in maintenance mode. */
4151	if (intf->maintenance_mode_enable)
4152		return;
4153
4154	handlers = intf->handlers;
4155	if (handlers)
4156		handlers->request_events(intf->send_info);
4157}
4158
4159static struct timer_list ipmi_timer;
4160
4161static atomic_t stop_operation;
4162
4163static void ipmi_timeout(unsigned long data)
4164{
4165	ipmi_smi_t intf;
4166	int nt = 0;
4167
4168	if (atomic_read(&stop_operation))
4169		return;
4170
4171	rcu_read_lock();
4172	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4173		int lnt = 0;
4174
4175		if (atomic_read(&intf->event_waiters)) {
4176			intf->ticks_to_req_ev--;
4177			if (intf->ticks_to_req_ev == 0) {
4178				ipmi_request_event(intf);
4179				intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4180			}
4181			lnt++;
4182		}
4183
4184		lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4185
4186		lnt = !!lnt;
4187		if (lnt != intf->last_needs_timer &&
4188					intf->handlers->set_need_watch)
4189			intf->handlers->set_need_watch(intf->send_info, lnt);
4190		intf->last_needs_timer = lnt;
4191
4192		nt += lnt;
4193	}
4194	rcu_read_unlock();
4195
4196	if (nt)
4197		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4198}
4199
4200static void need_waiter(ipmi_smi_t intf)
4201{
4202	/* Racy, but worst case we start the timer twice. */
4203	if (!timer_pending(&ipmi_timer))
4204		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4205}
4206
4207static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4208static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4209
4210/* FIXME - convert these to slabs. */
4211static void free_smi_msg(struct ipmi_smi_msg *msg)
4212{
4213	atomic_dec(&smi_msg_inuse_count);
4214	kfree(msg);
4215}
4216
4217struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4218{
4219	struct ipmi_smi_msg *rv;
4220	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4221	if (rv) {
4222		rv->done = free_smi_msg;
4223		rv->user_data = NULL;
4224		atomic_inc(&smi_msg_inuse_count);
4225	}
4226	return rv;
4227}
4228EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4229
4230static void free_recv_msg(struct ipmi_recv_msg *msg)
4231{
4232	atomic_dec(&recv_msg_inuse_count);
4233	kfree(msg);
4234}
4235
4236static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4237{
4238	struct ipmi_recv_msg *rv;
4239
4240	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4241	if (rv) {
4242		rv->user = NULL;
4243		rv->done = free_recv_msg;
4244		atomic_inc(&recv_msg_inuse_count);
4245	}
4246	return rv;
4247}
4248
4249void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4250{
4251	if (msg->user)
4252		kref_put(&msg->user->refcount, free_user);
4253	msg->done(msg);
4254}
4255EXPORT_SYMBOL(ipmi_free_recv_msg);
4256
4257#ifdef CONFIG_IPMI_PANIC_EVENT
4258
4259static atomic_t panic_done_count = ATOMIC_INIT(0);
4260
4261static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4262{
4263	atomic_dec(&panic_done_count);
4264}
4265
4266static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4267{
4268	atomic_dec(&panic_done_count);
4269}
4270
4271/*
4272 * Inside a panic, send a message and wait for a response.
4273 */
4274static void ipmi_panic_request_and_wait(ipmi_smi_t           intf,
4275					struct ipmi_addr     *addr,
4276					struct kernel_ipmi_msg *msg)
4277{
4278	struct ipmi_smi_msg  smi_msg;
4279	struct ipmi_recv_msg recv_msg;
4280	int rv;
4281
4282	smi_msg.done = dummy_smi_done_handler;
4283	recv_msg.done = dummy_recv_done_handler;
4284	atomic_add(2, &panic_done_count);
4285	rv = i_ipmi_request(NULL,
4286			    intf,
4287			    addr,
4288			    0,
4289			    msg,
4290			    intf,
4291			    &smi_msg,
4292			    &recv_msg,
4293			    0,
4294			    intf->channels[0].address,
4295			    intf->channels[0].lun,
4296			    0, 1); /* Don't retry, and don't wait. */
4297	if (rv)
4298		atomic_sub(2, &panic_done_count);
4299	while (atomic_read(&panic_done_count) != 0)
4300		ipmi_poll(intf);
4301}
4302
4303#ifdef CONFIG_IPMI_PANIC_STRING
4304static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
4305{
4306	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4307	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4308	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4309	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4310		/* A get event receiver command, save it. */
4311		intf->event_receiver = msg->msg.data[1];
4312		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4313	}
4314}
4315
4316static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
4317{
4318	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4319	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4320	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4321	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4322		/*
4323		 * A get device id command, save if we are an event
4324		 * receiver or generator.
4325		 */
4326		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4327		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4328	}
4329}
4330#endif
4331
4332static void send_panic_events(char *str)
4333{
4334	struct kernel_ipmi_msg            msg;
4335	ipmi_smi_t                        intf;
4336	unsigned char                     data[16];
4337	struct ipmi_system_interface_addr *si;
4338	struct ipmi_addr                  addr;
4339
4340	si = (struct ipmi_system_interface_addr *) &addr;
4341	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4342	si->channel = IPMI_BMC_CHANNEL;
4343	si->lun = 0;
4344
4345	/* Fill in an event telling that we have failed. */
4346	msg.netfn = 0x04; /* Sensor or Event. */
4347	msg.cmd = 2; /* Platform event command. */
4348	msg.data = data;
4349	msg.data_len = 8;
4350	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4351	data[1] = 0x03; /* This is for IPMI 1.0. */
4352	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4353	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4354	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4355
4356	/*
4357	 * Put a few breadcrumbs in.  Hopefully later we can add more things
4358	 * to make the panic events more useful.
4359	 */
4360	if (str) {
4361		data[3] = str[0];
4362		data[6] = str[1];
4363		data[7] = str[2];
4364	}
4365
4366	/* For every registered interface, send the event. */
4367	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4368		if (!intf->handlers)
4369			/* Interface is not ready. */
4370			continue;
4371
4372		intf->run_to_completion = 1;
4373		/* Send the event announcing the panic. */
4374		intf->handlers->set_run_to_completion(intf->send_info, 1);
4375		ipmi_panic_request_and_wait(intf, &addr, &msg);
4376	}
4377
4378#ifdef CONFIG_IPMI_PANIC_STRING
4379	/*
4380	 * On every interface, dump a bunch of OEM event holding the
4381	 * string.
4382	 */
4383	if (!str)
4384		return;
4385
4386	/* For every registered interface, send the event. */
4387	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4388		char                  *p = str;
4389		struct ipmi_ipmb_addr *ipmb;
4390		int                   j;
4391
4392		if (intf->intf_num == -1)
4393			/* Interface was not ready yet. */
4394			continue;
4395
4396		/*
4397		 * intf_num is used as an marker to tell if the
4398		 * interface is valid.  Thus we need a read barrier to
4399		 * make sure data fetched before checking intf_num
4400		 * won't be used.
4401		 */
4402		smp_rmb();
4403
4404		/*
4405		 * First job here is to figure out where to send the
4406		 * OEM events.  There's no way in IPMI to send OEM
4407		 * events using an event send command, so we have to
4408		 * find the SEL to put them in and stick them in
4409		 * there.
4410		 */
4411
4412		/* Get capabilities from the get device id. */
4413		intf->local_sel_device = 0;
4414		intf->local_event_generator = 0;
4415		intf->event_receiver = 0;
4416
4417		/* Request the device info from the local MC. */
4418		msg.netfn = IPMI_NETFN_APP_REQUEST;
4419		msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4420		msg.data = NULL;
4421		msg.data_len = 0;
4422		intf->null_user_handler = device_id_fetcher;
4423		ipmi_panic_request_and_wait(intf, &addr, &msg);
4424
4425		if (intf->local_event_generator) {
4426			/* Request the event receiver from the local MC. */
4427			msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4428			msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4429			msg.data = NULL;
4430			msg.data_len = 0;
4431			intf->null_user_handler = event_receiver_fetcher;
4432			ipmi_panic_request_and_wait(intf, &addr, &msg);
4433		}
4434		intf->null_user_handler = NULL;
4435
4436		/*
4437		 * Validate the event receiver.  The low bit must not
4438		 * be 1 (it must be a valid IPMB address), it cannot
4439		 * be zero, and it must not be my address.
4440		 */
4441		if (((intf->event_receiver & 1) == 0)
4442		    && (intf->event_receiver != 0)
4443		    && (intf->event_receiver != intf->channels[0].address)) {
4444			/*
4445			 * The event receiver is valid, send an IPMB
4446			 * message.
4447			 */
4448			ipmb = (struct ipmi_ipmb_addr *) &addr;
4449			ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4450			ipmb->channel = 0; /* FIXME - is this right? */
4451			ipmb->lun = intf->event_receiver_lun;
4452			ipmb->slave_addr = intf->event_receiver;
4453		} else if (intf->local_sel_device) {
4454			/*
4455			 * The event receiver was not valid (or was
4456			 * me), but I am an SEL device, just dump it
4457			 * in my SEL.
4458			 */
4459			si = (struct ipmi_system_interface_addr *) &addr;
4460			si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4461			si->channel = IPMI_BMC_CHANNEL;
4462			si->lun = 0;
4463		} else
4464			continue; /* No where to send the event. */
4465
4466		msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4467		msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4468		msg.data = data;
4469		msg.data_len = 16;
4470
4471		j = 0;
4472		while (*p) {
4473			int size = strlen(p);
4474
4475			if (size > 11)
4476				size = 11;
4477			data[0] = 0;
4478			data[1] = 0;
4479			data[2] = 0xf0; /* OEM event without timestamp. */
4480			data[3] = intf->channels[0].address;
4481			data[4] = j++; /* sequence # */
4482			/*
4483			 * Always give 11 bytes, so strncpy will fill
4484			 * it with zeroes for me.
4485			 */
4486			strncpy(data+5, p, 11);
4487			p += size;
4488
4489			ipmi_panic_request_and_wait(intf, &addr, &msg);
4490		}
4491	}
4492#endif /* CONFIG_IPMI_PANIC_STRING */
4493}
4494#endif /* CONFIG_IPMI_PANIC_EVENT */
4495
4496static int has_panicked;
4497
4498static int panic_event(struct notifier_block *this,
4499		       unsigned long         event,
4500		       void                  *ptr)
4501{
4502	ipmi_smi_t intf;
4503
4504	if (has_panicked)
4505		return NOTIFY_DONE;
4506	has_panicked = 1;
4507
4508	/* For every registered interface, set it to run to completion. */
4509	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4510		if (!intf->handlers)
4511			/* Interface is not ready. */
4512			continue;
4513
4514		intf->run_to_completion = 1;
4515		intf->handlers->set_run_to_completion(intf->send_info, 1);
4516	}
4517
4518#ifdef CONFIG_IPMI_PANIC_EVENT
4519	send_panic_events(ptr);
4520#endif
4521
4522	return NOTIFY_DONE;
4523}
4524
4525static struct notifier_block panic_block = {
4526	.notifier_call	= panic_event,
4527	.next		= NULL,
4528	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
4529};
4530
4531static int ipmi_init_msghandler(void)
4532{
4533	int rv;
4534
4535	if (initialized)
4536		return 0;
4537
4538	rv = driver_register(&ipmidriver.driver);
4539	if (rv) {
4540		printk(KERN_ERR PFX "Could not register IPMI driver\n");
4541		return rv;
4542	}
4543
4544	printk(KERN_INFO "ipmi message handler version "
4545	       IPMI_DRIVER_VERSION "\n");
4546
4547#ifdef CONFIG_PROC_FS
4548	proc_ipmi_root = proc_mkdir("ipmi", NULL);
4549	if (!proc_ipmi_root) {
4550	    printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4551	    return -ENOMEM;
4552	}
4553
4554#endif /* CONFIG_PROC_FS */
4555
4556	setup_timer(&ipmi_timer, ipmi_timeout, 0);
4557	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4558
4559	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4560
4561	initialized = 1;
4562
4563	return 0;
4564}
4565
4566static int __init ipmi_init_msghandler_mod(void)
4567{
4568	ipmi_init_msghandler();
4569	return 0;
4570}
4571
4572static void __exit cleanup_ipmi(void)
4573{
4574	int count;
4575
4576	if (!initialized)
4577		return;
4578
4579	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4580
4581	/*
4582	 * This can't be called if any interfaces exist, so no worry
4583	 * about shutting down the interfaces.
4584	 */
4585
4586	/*
4587	 * Tell the timer to stop, then wait for it to stop.  This
4588	 * avoids problems with race conditions removing the timer
4589	 * here.
4590	 */
4591	atomic_inc(&stop_operation);
4592	del_timer_sync(&ipmi_timer);
4593
4594#ifdef CONFIG_PROC_FS
4595	proc_remove(proc_ipmi_root);
4596#endif /* CONFIG_PROC_FS */
4597
4598	driver_unregister(&ipmidriver.driver);
4599
4600	initialized = 0;
4601
4602	/* Check for buffer leaks. */
4603	count = atomic_read(&smi_msg_inuse_count);
4604	if (count != 0)
4605		printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4606		       count);
4607	count = atomic_read(&recv_msg_inuse_count);
4608	if (count != 0)
4609		printk(KERN_WARNING PFX "recv message count %d at exit\n",
4610		       count);
4611}
4612module_exit(cleanup_ipmi);
4613
4614module_init(ipmi_init_msghandler_mod);
4615MODULE_LICENSE("GPL");
4616MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4617MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
4618		   " interface.");
4619MODULE_VERSION(IPMI_DRIVER_VERSION);
4620