]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/char/ipmi/ipmi_si_intf.c
IPMI: don't init irq until ready
[linux-2.6-omap-h63xx.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13  *
14  *  This program is free software; you can redistribute it and/or modify it
15  *  under the terms of the GNU General Public License as published by the
16  *  Free Software Foundation; either version 2 of the License, or (at your
17  *  option) any later version.
18  *
19  *
20  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  *  You should have received a copy of the GNU General Public License along
32  *  with this program; if not, write to the Free Software Foundation, Inc.,
33  *  675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35
36 /*
37  * This file holds the "policy" for the interface to the SMI state
38  * machine.  It does the configuration, handles timers and interrupts,
39  * and drives the real SMI state machine.
40  */
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi_smi.h>
61 #include <asm/io.h>
62 #include "ipmi_si_sm.h"
63 #include <linux/init.h>
64 #include <linux/dmi.h>
65 #include <linux/string.h>
66 #include <linux/ctype.h>
67
68 #ifdef CONFIG_PPC_OF
69 #include <asm/of_device.h>
70 #include <asm/of_platform.h>
71 #endif
72
73 #define PFX "ipmi_si: "
74
75 /* Measure times between events in the driver. */
76 #undef DEBUG_TIMING
77
78 /* Call every 10 ms. */
79 #define SI_TIMEOUT_TIME_USEC    10000
80 #define SI_USEC_PER_JIFFY       (1000000/HZ)
81 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
83                                        short timeout */
84
85 /* Bit for BMC global enables. */
86 #define IPMI_BMC_RCV_MSG_INTR     0x01
87 #define IPMI_BMC_EVT_MSG_INTR     0x02
88 #define IPMI_BMC_EVT_MSG_BUFF     0x04
89 #define IPMI_BMC_SYS_LOG          0x08
90
91 enum si_intf_state {
92         SI_NORMAL,
93         SI_GETTING_FLAGS,
94         SI_GETTING_EVENTS,
95         SI_CLEARING_FLAGS,
96         SI_CLEARING_FLAGS_THEN_SET_IRQ,
97         SI_GETTING_MESSAGES,
98         SI_ENABLE_INTERRUPTS1,
99         SI_ENABLE_INTERRUPTS2,
100         SI_DISABLE_INTERRUPTS1,
101         SI_DISABLE_INTERRUPTS2
102         /* FIXME - add watchdog stuff. */
103 };
104
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG             2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
109
110 enum si_type {
111     SI_KCS, SI_SMIC, SI_BT
112 };
113 static char *si_to_str[] = { "kcs", "smic", "bt" };
114
115 #define DEVICE_NAME "ipmi_si"
116
117 static struct device_driver ipmi_driver =
118 {
119         .name = DEVICE_NAME,
120         .bus = &platform_bus_type
121 };
122
123 struct smi_info
124 {
125         int                    intf_num;
126         ipmi_smi_t             intf;
127         struct si_sm_data      *si_sm;
128         struct si_sm_handlers  *handlers;
129         enum si_type           si_type;
130         spinlock_t             si_lock;
131         spinlock_t             msg_lock;
132         struct list_head       xmit_msgs;
133         struct list_head       hp_xmit_msgs;
134         struct ipmi_smi_msg    *curr_msg;
135         enum si_intf_state     si_state;
136
137         /* Used to handle the various types of I/O that can occur with
138            IPMI */
139         struct si_sm_io io;
140         int (*io_setup)(struct smi_info *info);
141         void (*io_cleanup)(struct smi_info *info);
142         int (*irq_setup)(struct smi_info *info);
143         void (*irq_cleanup)(struct smi_info *info);
144         unsigned int io_size;
145         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146         void (*addr_source_cleanup)(struct smi_info *info);
147         void *addr_source_data;
148
149         /* Per-OEM handler, called from handle_flags().
150            Returns 1 when handle_flags() needs to be re-run
151            or 0 indicating it set si_state itself.
152         */
153         int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
155         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156            is set to hold the flags until we are done handling everything
157            from the flags. */
158 #define RECEIVE_MSG_AVAIL       0x01
159 #define EVENT_MSG_BUFFER_FULL   0x02
160 #define WDT_PRE_TIMEOUT_INT     0x08
161 #define OEM0_DATA_AVAIL     0x20
162 #define OEM1_DATA_AVAIL     0x40
163 #define OEM2_DATA_AVAIL     0x80
164 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
165                              OEM1_DATA_AVAIL | \
166                              OEM2_DATA_AVAIL)
167         unsigned char       msg_flags;
168
169         /* If set to true, this will request events the next time the
170            state machine is idle. */
171         atomic_t            req_events;
172
173         /* If true, run the state machine to completion on every send
174            call.  Generally used after a panic to make sure stuff goes
175            out. */
176         int                 run_to_completion;
177
178         /* The I/O port of an SI interface. */
179         int                 port;
180
181         /* The space between start addresses of the two ports.  For
182            instance, if the first port is 0xca2 and the spacing is 4, then
183            the second port is 0xca6. */
184         unsigned int        spacing;
185
186         /* zero if no irq; */
187         int                 irq;
188
189         /* The timer for this si. */
190         struct timer_list   si_timer;
191
192         /* The time (in jiffies) the last timeout occurred at. */
193         unsigned long       last_timeout_jiffies;
194
195         /* Used to gracefully stop the timer without race conditions. */
196         atomic_t            stop_operation;
197
198         /* The driver will disable interrupts when it gets into a
199            situation where it cannot handle messages due to lack of
200            memory.  Once that situation clears up, it will re-enable
201            interrupts. */
202         int interrupt_disabled;
203
204         /* From the get device id response... */
205         struct ipmi_device_id device_id;
206
207         /* Driver model stuff. */
208         struct device *dev;
209         struct platform_device *pdev;
210
211          /* True if we allocated the device, false if it came from
212           * someplace else (like PCI). */
213         int dev_registered;
214
215         /* Slave address, could be reported from DMI. */
216         unsigned char slave_addr;
217
218         /* Counters and things for the proc filesystem. */
219         spinlock_t count_lock;
220         unsigned long short_timeouts;
221         unsigned long long_timeouts;
222         unsigned long timeout_restarts;
223         unsigned long idles;
224         unsigned long interrupts;
225         unsigned long attentions;
226         unsigned long flag_fetches;
227         unsigned long hosed_count;
228         unsigned long complete_transactions;
229         unsigned long events;
230         unsigned long watchdog_pretimeouts;
231         unsigned long incoming_messages;
232
233         struct task_struct *thread;
234
235         struct list_head link;
236 };
237
238 #define SI_MAX_PARMS 4
239
240 static int force_kipmid[SI_MAX_PARMS];
241 static int num_force_kipmid;
242
243 static int unload_when_empty = 1;
244
245 static int try_smi_init(struct smi_info *smi);
246 static void cleanup_one_si(struct smi_info *to_clean);
247
248 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
249 static int register_xaction_notifier(struct notifier_block * nb)
250 {
251         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
252 }
253
254 static void deliver_recv_msg(struct smi_info *smi_info,
255                              struct ipmi_smi_msg *msg)
256 {
257         /* Deliver the message to the upper layer with the lock
258            released. */
259         spin_unlock(&(smi_info->si_lock));
260         ipmi_smi_msg_received(smi_info->intf, msg);
261         spin_lock(&(smi_info->si_lock));
262 }
263
264 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
265 {
266         struct ipmi_smi_msg *msg = smi_info->curr_msg;
267
268         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
269                 cCode = IPMI_ERR_UNSPECIFIED;
270         /* else use it as is */
271
272         /* Make it a reponse */
273         msg->rsp[0] = msg->data[0] | 4;
274         msg->rsp[1] = msg->data[1];
275         msg->rsp[2] = cCode;
276         msg->rsp_size = 3;
277
278         smi_info->curr_msg = NULL;
279         deliver_recv_msg(smi_info, msg);
280 }
281
282 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
283 {
284         int              rv;
285         struct list_head *entry = NULL;
286 #ifdef DEBUG_TIMING
287         struct timeval t;
288 #endif
289
290         /* No need to save flags, we aleady have interrupts off and we
291            already hold the SMI lock. */
292         spin_lock(&(smi_info->msg_lock));
293
294         /* Pick the high priority queue first. */
295         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
296                 entry = smi_info->hp_xmit_msgs.next;
297         } else if (!list_empty(&(smi_info->xmit_msgs))) {
298                 entry = smi_info->xmit_msgs.next;
299         }
300
301         if (!entry) {
302                 smi_info->curr_msg = NULL;
303                 rv = SI_SM_IDLE;
304         } else {
305                 int err;
306
307                 list_del(entry);
308                 smi_info->curr_msg = list_entry(entry,
309                                                 struct ipmi_smi_msg,
310                                                 link);
311 #ifdef DEBUG_TIMING
312                 do_gettimeofday(&t);
313                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
314 #endif
315                 err = atomic_notifier_call_chain(&xaction_notifier_list,
316                                 0, smi_info);
317                 if (err & NOTIFY_STOP_MASK) {
318                         rv = SI_SM_CALL_WITHOUT_DELAY;
319                         goto out;
320                 }
321                 err = smi_info->handlers->start_transaction(
322                         smi_info->si_sm,
323                         smi_info->curr_msg->data,
324                         smi_info->curr_msg->data_size);
325                 if (err) {
326                         return_hosed_msg(smi_info, err);
327                 }
328
329                 rv = SI_SM_CALL_WITHOUT_DELAY;
330         }
331         out:
332         spin_unlock(&(smi_info->msg_lock));
333
334         return rv;
335 }
336
337 static void start_enable_irq(struct smi_info *smi_info)
338 {
339         unsigned char msg[2];
340
341         /* If we are enabling interrupts, we have to tell the
342            BMC to use them. */
343         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
344         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
345
346         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
347         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
348 }
349
350 static void start_disable_irq(struct smi_info *smi_info)
351 {
352         unsigned char msg[2];
353
354         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
355         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
356
357         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
358         smi_info->si_state = SI_DISABLE_INTERRUPTS1;
359 }
360
361 static void start_clear_flags(struct smi_info *smi_info)
362 {
363         unsigned char msg[3];
364
365         /* Make sure the watchdog pre-timeout flag is not set at startup. */
366         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
367         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
368         msg[2] = WDT_PRE_TIMEOUT_INT;
369
370         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
371         smi_info->si_state = SI_CLEARING_FLAGS;
372 }
373
374 /* When we have a situtaion where we run out of memory and cannot
375    allocate messages, we just leave them in the BMC and run the system
376    polled until we can allocate some memory.  Once we have some
377    memory, we will re-enable the interrupt. */
378 static inline void disable_si_irq(struct smi_info *smi_info)
379 {
380         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
381                 start_disable_irq(smi_info);
382                 smi_info->interrupt_disabled = 1;
383         }
384 }
385
386 static inline void enable_si_irq(struct smi_info *smi_info)
387 {
388         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
389                 start_enable_irq(smi_info);
390                 smi_info->interrupt_disabled = 0;
391         }
392 }
393
394 static void handle_flags(struct smi_info *smi_info)
395 {
396  retry:
397         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
398                 /* Watchdog pre-timeout */
399                 spin_lock(&smi_info->count_lock);
400                 smi_info->watchdog_pretimeouts++;
401                 spin_unlock(&smi_info->count_lock);
402
403                 start_clear_flags(smi_info);
404                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
405                 spin_unlock(&(smi_info->si_lock));
406                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
407                 spin_lock(&(smi_info->si_lock));
408         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
409                 /* Messages available. */
410                 smi_info->curr_msg = ipmi_alloc_smi_msg();
411                 if (!smi_info->curr_msg) {
412                         disable_si_irq(smi_info);
413                         smi_info->si_state = SI_NORMAL;
414                         return;
415                 }
416                 enable_si_irq(smi_info);
417
418                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
419                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
420                 smi_info->curr_msg->data_size = 2;
421
422                 smi_info->handlers->start_transaction(
423                         smi_info->si_sm,
424                         smi_info->curr_msg->data,
425                         smi_info->curr_msg->data_size);
426                 smi_info->si_state = SI_GETTING_MESSAGES;
427         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
428                 /* Events available. */
429                 smi_info->curr_msg = ipmi_alloc_smi_msg();
430                 if (!smi_info->curr_msg) {
431                         disable_si_irq(smi_info);
432                         smi_info->si_state = SI_NORMAL;
433                         return;
434                 }
435                 enable_si_irq(smi_info);
436
437                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
438                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
439                 smi_info->curr_msg->data_size = 2;
440
441                 smi_info->handlers->start_transaction(
442                         smi_info->si_sm,
443                         smi_info->curr_msg->data,
444                         smi_info->curr_msg->data_size);
445                 smi_info->si_state = SI_GETTING_EVENTS;
446         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
447                    smi_info->oem_data_avail_handler) {
448                 if (smi_info->oem_data_avail_handler(smi_info))
449                         goto retry;
450         } else {
451                 smi_info->si_state = SI_NORMAL;
452         }
453 }
454
455 static void handle_transaction_done(struct smi_info *smi_info)
456 {
457         struct ipmi_smi_msg *msg;
458 #ifdef DEBUG_TIMING
459         struct timeval t;
460
461         do_gettimeofday(&t);
462         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
463 #endif
464         switch (smi_info->si_state) {
465         case SI_NORMAL:
466                 if (!smi_info->curr_msg)
467                         break;
468
469                 smi_info->curr_msg->rsp_size
470                         = smi_info->handlers->get_result(
471                                 smi_info->si_sm,
472                                 smi_info->curr_msg->rsp,
473                                 IPMI_MAX_MSG_LENGTH);
474
475                 /* Do this here becase deliver_recv_msg() releases the
476                    lock, and a new message can be put in during the
477                    time the lock is released. */
478                 msg = smi_info->curr_msg;
479                 smi_info->curr_msg = NULL;
480                 deliver_recv_msg(smi_info, msg);
481                 break;
482
483         case SI_GETTING_FLAGS:
484         {
485                 unsigned char msg[4];
486                 unsigned int  len;
487
488                 /* We got the flags from the SMI, now handle them. */
489                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
490                 if (msg[2] != 0) {
491                         /* Error fetching flags, just give up for
492                            now. */
493                         smi_info->si_state = SI_NORMAL;
494                 } else if (len < 4) {
495                         /* Hmm, no flags.  That's technically illegal, but
496                            don't use uninitialized data. */
497                         smi_info->si_state = SI_NORMAL;
498                 } else {
499                         smi_info->msg_flags = msg[3];
500                         handle_flags(smi_info);
501                 }
502                 break;
503         }
504
505         case SI_CLEARING_FLAGS:
506         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
507         {
508                 unsigned char msg[3];
509
510                 /* We cleared the flags. */
511                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
512                 if (msg[2] != 0) {
513                         /* Error clearing flags */
514                         printk(KERN_WARNING
515                                "ipmi_si: Error clearing flags: %2.2x\n",
516                                msg[2]);
517                 }
518                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
519                         start_enable_irq(smi_info);
520                 else
521                         smi_info->si_state = SI_NORMAL;
522                 break;
523         }
524
525         case SI_GETTING_EVENTS:
526         {
527                 smi_info->curr_msg->rsp_size
528                         = smi_info->handlers->get_result(
529                                 smi_info->si_sm,
530                                 smi_info->curr_msg->rsp,
531                                 IPMI_MAX_MSG_LENGTH);
532
533                 /* Do this here becase deliver_recv_msg() releases the
534                    lock, and a new message can be put in during the
535                    time the lock is released. */
536                 msg = smi_info->curr_msg;
537                 smi_info->curr_msg = NULL;
538                 if (msg->rsp[2] != 0) {
539                         /* Error getting event, probably done. */
540                         msg->done(msg);
541
542                         /* Take off the event flag. */
543                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
544                         handle_flags(smi_info);
545                 } else {
546                         spin_lock(&smi_info->count_lock);
547                         smi_info->events++;
548                         spin_unlock(&smi_info->count_lock);
549
550                         /* Do this before we deliver the message
551                            because delivering the message releases the
552                            lock and something else can mess with the
553                            state. */
554                         handle_flags(smi_info);
555
556                         deliver_recv_msg(smi_info, msg);
557                 }
558                 break;
559         }
560
561         case SI_GETTING_MESSAGES:
562         {
563                 smi_info->curr_msg->rsp_size
564                         = smi_info->handlers->get_result(
565                                 smi_info->si_sm,
566                                 smi_info->curr_msg->rsp,
567                                 IPMI_MAX_MSG_LENGTH);
568
569                 /* Do this here becase deliver_recv_msg() releases the
570                    lock, and a new message can be put in during the
571                    time the lock is released. */
572                 msg = smi_info->curr_msg;
573                 smi_info->curr_msg = NULL;
574                 if (msg->rsp[2] != 0) {
575                         /* Error getting event, probably done. */
576                         msg->done(msg);
577
578                         /* Take off the msg flag. */
579                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
580                         handle_flags(smi_info);
581                 } else {
582                         spin_lock(&smi_info->count_lock);
583                         smi_info->incoming_messages++;
584                         spin_unlock(&smi_info->count_lock);
585
586                         /* Do this before we deliver the message
587                            because delivering the message releases the
588                            lock and something else can mess with the
589                            state. */
590                         handle_flags(smi_info);
591
592                         deliver_recv_msg(smi_info, msg);
593                 }
594                 break;
595         }
596
597         case SI_ENABLE_INTERRUPTS1:
598         {
599                 unsigned char msg[4];
600
601                 /* We got the flags from the SMI, now handle them. */
602                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
603                 if (msg[2] != 0) {
604                         printk(KERN_WARNING
605                                "ipmi_si: Could not enable interrupts"
606                                ", failed get, using polled mode.\n");
607                         smi_info->si_state = SI_NORMAL;
608                 } else {
609                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
610                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
611                         msg[2] = (msg[3] |
612                                   IPMI_BMC_RCV_MSG_INTR |
613                                   IPMI_BMC_EVT_MSG_INTR);
614                         smi_info->handlers->start_transaction(
615                                 smi_info->si_sm, msg, 3);
616                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
617                 }
618                 break;
619         }
620
621         case SI_ENABLE_INTERRUPTS2:
622         {
623                 unsigned char msg[4];
624
625                 /* We got the flags from the SMI, now handle them. */
626                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
627                 if (msg[2] != 0) {
628                         printk(KERN_WARNING
629                                "ipmi_si: Could not enable interrupts"
630                                ", failed set, using polled mode.\n");
631                 }
632                 smi_info->si_state = SI_NORMAL;
633                 break;
634         }
635
636         case SI_DISABLE_INTERRUPTS1:
637         {
638                 unsigned char msg[4];
639
640                 /* We got the flags from the SMI, now handle them. */
641                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
642                 if (msg[2] != 0) {
643                         printk(KERN_WARNING
644                                "ipmi_si: Could not disable interrupts"
645                                ", failed get.\n");
646                         smi_info->si_state = SI_NORMAL;
647                 } else {
648                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
649                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
650                         msg[2] = (msg[3] &
651                                   ~(IPMI_BMC_RCV_MSG_INTR |
652                                     IPMI_BMC_EVT_MSG_INTR));
653                         smi_info->handlers->start_transaction(
654                                 smi_info->si_sm, msg, 3);
655                         smi_info->si_state = SI_DISABLE_INTERRUPTS2;
656                 }
657                 break;
658         }
659
660         case SI_DISABLE_INTERRUPTS2:
661         {
662                 unsigned char msg[4];
663
664                 /* We got the flags from the SMI, now handle them. */
665                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
666                 if (msg[2] != 0) {
667                         printk(KERN_WARNING
668                                "ipmi_si: Could not disable interrupts"
669                                ", failed set.\n");
670                 }
671                 smi_info->si_state = SI_NORMAL;
672                 break;
673         }
674         }
675 }
676
677 /* Called on timeouts and events.  Timeouts should pass the elapsed
678    time, interrupts should pass in zero. */
679 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
680                                            int time)
681 {
682         enum si_sm_result si_sm_result;
683
684  restart:
685         /* There used to be a loop here that waited a little while
686            (around 25us) before giving up.  That turned out to be
687            pointless, the minimum delays I was seeing were in the 300us
688            range, which is far too long to wait in an interrupt.  So
689            we just run until the state machine tells us something
690            happened or it needs a delay. */
691         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
692         time = 0;
693         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
694         {
695                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
696         }
697
698         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
699         {
700                 spin_lock(&smi_info->count_lock);
701                 smi_info->complete_transactions++;
702                 spin_unlock(&smi_info->count_lock);
703
704                 handle_transaction_done(smi_info);
705                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
706         }
707         else if (si_sm_result == SI_SM_HOSED)
708         {
709                 spin_lock(&smi_info->count_lock);
710                 smi_info->hosed_count++;
711                 spin_unlock(&smi_info->count_lock);
712
713                 /* Do the before return_hosed_msg, because that
714                    releases the lock. */
715                 smi_info->si_state = SI_NORMAL;
716                 if (smi_info->curr_msg != NULL) {
717                         /* If we were handling a user message, format
718                            a response to send to the upper layer to
719                            tell it about the error. */
720                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
721                 }
722                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
723         }
724
725         /* We prefer handling attn over new messages. */
726         if (si_sm_result == SI_SM_ATTN)
727         {
728                 unsigned char msg[2];
729
730                 spin_lock(&smi_info->count_lock);
731                 smi_info->attentions++;
732                 spin_unlock(&smi_info->count_lock);
733
734                 /* Got a attn, send down a get message flags to see
735                    what's causing it.  It would be better to handle
736                    this in the upper layer, but due to the way
737                    interrupts work with the SMI, that's not really
738                    possible. */
739                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
740                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
741
742                 smi_info->handlers->start_transaction(
743                         smi_info->si_sm, msg, 2);
744                 smi_info->si_state = SI_GETTING_FLAGS;
745                 goto restart;
746         }
747
748         /* If we are currently idle, try to start the next message. */
749         if (si_sm_result == SI_SM_IDLE) {
750                 spin_lock(&smi_info->count_lock);
751                 smi_info->idles++;
752                 spin_unlock(&smi_info->count_lock);
753
754                 si_sm_result = start_next_msg(smi_info);
755                 if (si_sm_result != SI_SM_IDLE)
756                         goto restart;
757         }
758
759         if ((si_sm_result == SI_SM_IDLE)
760             && (atomic_read(&smi_info->req_events)))
761         {
762                 /* We are idle and the upper layer requested that I fetch
763                    events, so do so. */
764                 atomic_set(&smi_info->req_events, 0);
765
766                 smi_info->curr_msg = ipmi_alloc_smi_msg();
767                 if (!smi_info->curr_msg)
768                         goto out;
769
770                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
771                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
772                 smi_info->curr_msg->data_size = 2;
773
774                 smi_info->handlers->start_transaction(
775                         smi_info->si_sm,
776                         smi_info->curr_msg->data,
777                         smi_info->curr_msg->data_size);
778                 smi_info->si_state = SI_GETTING_EVENTS;
779                 goto restart;
780         }
781  out:
782         return si_sm_result;
783 }
784
785 static void sender(void                *send_info,
786                    struct ipmi_smi_msg *msg,
787                    int                 priority)
788 {
789         struct smi_info   *smi_info = send_info;
790         enum si_sm_result result;
791         unsigned long     flags;
792 #ifdef DEBUG_TIMING
793         struct timeval    t;
794 #endif
795
796         if (atomic_read(&smi_info->stop_operation)) {
797                 msg->rsp[0] = msg->data[0] | 4;
798                 msg->rsp[1] = msg->data[1];
799                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
800                 msg->rsp_size = 3;
801                 deliver_recv_msg(smi_info, msg);
802                 return;
803         }
804
805         spin_lock_irqsave(&(smi_info->msg_lock), flags);
806 #ifdef DEBUG_TIMING
807         do_gettimeofday(&t);
808         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
809 #endif
810
811         if (smi_info->run_to_completion) {
812                 /* If we are running to completion, then throw it in
813                    the list and run transactions until everything is
814                    clear.  Priority doesn't matter here. */
815                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
816
817                 /* We have to release the msg lock and claim the smi
818                    lock in this case, because of race conditions. */
819                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
820
821                 spin_lock_irqsave(&(smi_info->si_lock), flags);
822                 result = smi_event_handler(smi_info, 0);
823                 while (result != SI_SM_IDLE) {
824                         udelay(SI_SHORT_TIMEOUT_USEC);
825                         result = smi_event_handler(smi_info,
826                                                    SI_SHORT_TIMEOUT_USEC);
827                 }
828                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
829                 return;
830         } else {
831                 if (priority > 0) {
832                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
833                 } else {
834                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
835                 }
836         }
837         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
838
839         spin_lock_irqsave(&(smi_info->si_lock), flags);
840         if ((smi_info->si_state == SI_NORMAL)
841             && (smi_info->curr_msg == NULL))
842         {
843                 start_next_msg(smi_info);
844         }
845         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
846 }
847
848 static void set_run_to_completion(void *send_info, int i_run_to_completion)
849 {
850         struct smi_info   *smi_info = send_info;
851         enum si_sm_result result;
852         unsigned long     flags;
853
854         spin_lock_irqsave(&(smi_info->si_lock), flags);
855
856         smi_info->run_to_completion = i_run_to_completion;
857         if (i_run_to_completion) {
858                 result = smi_event_handler(smi_info, 0);
859                 while (result != SI_SM_IDLE) {
860                         udelay(SI_SHORT_TIMEOUT_USEC);
861                         result = smi_event_handler(smi_info,
862                                                    SI_SHORT_TIMEOUT_USEC);
863                 }
864         }
865
866         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
867 }
868
869 static int ipmi_thread(void *data)
870 {
871         struct smi_info *smi_info = data;
872         unsigned long flags;
873         enum si_sm_result smi_result;
874
875         set_user_nice(current, 19);
876         while (!kthread_should_stop()) {
877                 spin_lock_irqsave(&(smi_info->si_lock), flags);
878                 smi_result = smi_event_handler(smi_info, 0);
879                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
880                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
881                         /* do nothing */
882                 }
883                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
884                         schedule();
885                 else
886                         schedule_timeout_interruptible(1);
887         }
888         return 0;
889 }
890
891
892 static void poll(void *send_info)
893 {
894         struct smi_info *smi_info = send_info;
895
896         /*
897          * Make sure there is some delay in the poll loop so we can
898          * drive time forward and timeout things.
899          */
900         udelay(10);
901         smi_event_handler(smi_info, 10);
902 }
903
904 static void request_events(void *send_info)
905 {
906         struct smi_info *smi_info = send_info;
907
908         if (atomic_read(&smi_info->stop_operation))
909                 return;
910
911         atomic_set(&smi_info->req_events, 1);
912 }
913
914 static int initialized;
915
916 static void smi_timeout(unsigned long data)
917 {
918         struct smi_info   *smi_info = (struct smi_info *) data;
919         enum si_sm_result smi_result;
920         unsigned long     flags;
921         unsigned long     jiffies_now;
922         long              time_diff;
923 #ifdef DEBUG_TIMING
924         struct timeval    t;
925 #endif
926
927         spin_lock_irqsave(&(smi_info->si_lock), flags);
928 #ifdef DEBUG_TIMING
929         do_gettimeofday(&t);
930         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
931 #endif
932         jiffies_now = jiffies;
933         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
934                      * SI_USEC_PER_JIFFY);
935         smi_result = smi_event_handler(smi_info, time_diff);
936
937         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
938
939         smi_info->last_timeout_jiffies = jiffies_now;
940
941         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
942                 /* Running with interrupts, only do long timeouts. */
943                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
944                 spin_lock_irqsave(&smi_info->count_lock, flags);
945                 smi_info->long_timeouts++;
946                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
947                 goto do_add_timer;
948         }
949
950         /* If the state machine asks for a short delay, then shorten
951            the timer timeout. */
952         if (smi_result == SI_SM_CALL_WITH_DELAY) {
953                 spin_lock_irqsave(&smi_info->count_lock, flags);
954                 smi_info->short_timeouts++;
955                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
956                 smi_info->si_timer.expires = jiffies + 1;
957         } else {
958                 spin_lock_irqsave(&smi_info->count_lock, flags);
959                 smi_info->long_timeouts++;
960                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
961                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
962         }
963
964  do_add_timer:
965         add_timer(&(smi_info->si_timer));
966 }
967
968 static irqreturn_t si_irq_handler(int irq, void *data)
969 {
970         struct smi_info *smi_info = data;
971         unsigned long   flags;
972 #ifdef DEBUG_TIMING
973         struct timeval  t;
974 #endif
975
976         spin_lock_irqsave(&(smi_info->si_lock), flags);
977
978         spin_lock(&smi_info->count_lock);
979         smi_info->interrupts++;
980         spin_unlock(&smi_info->count_lock);
981
982 #ifdef DEBUG_TIMING
983         do_gettimeofday(&t);
984         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
985 #endif
986         smi_event_handler(smi_info, 0);
987         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
988         return IRQ_HANDLED;
989 }
990
991 static irqreturn_t si_bt_irq_handler(int irq, void *data)
992 {
993         struct smi_info *smi_info = data;
994         /* We need to clear the IRQ flag for the BT interface. */
995         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
996                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
997                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
998         return si_irq_handler(irq, data);
999 }
1000
1001 static int smi_start_processing(void       *send_info,
1002                                 ipmi_smi_t intf)
1003 {
1004         struct smi_info *new_smi = send_info;
1005         int             enable = 0;
1006
1007         new_smi->intf = intf;
1008
1009         /* Try to claim any interrupts. */
1010         if (new_smi->irq_setup)
1011                 new_smi->irq_setup(new_smi);
1012
1013         /* Set up the timer that drives the interface. */
1014         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1015         new_smi->last_timeout_jiffies = jiffies;
1016         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1017
1018         /*
1019          * Check if the user forcefully enabled the daemon.
1020          */
1021         if (new_smi->intf_num < num_force_kipmid)
1022                 enable = force_kipmid[new_smi->intf_num];
1023         /*
1024          * The BT interface is efficient enough to not need a thread,
1025          * and there is no need for a thread if we have interrupts.
1026          */
1027         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1028                 enable = 1;
1029
1030         if (enable) {
1031                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1032                                               "kipmi%d", new_smi->intf_num);
1033                 if (IS_ERR(new_smi->thread)) {
1034                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
1035                                " kernel thread due to error %ld, only using"
1036                                " timers to drive the interface\n",
1037                                PTR_ERR(new_smi->thread));
1038                         new_smi->thread = NULL;
1039                 }
1040         }
1041
1042         return 0;
1043 }
1044
1045 static void set_maintenance_mode(void *send_info, int enable)
1046 {
1047         struct smi_info   *smi_info = send_info;
1048
1049         if (!enable)
1050                 atomic_set(&smi_info->req_events, 0);
1051 }
1052
1053 static struct ipmi_smi_handlers handlers =
1054 {
1055         .owner                  = THIS_MODULE,
1056         .start_processing       = smi_start_processing,
1057         .sender                 = sender,
1058         .request_events         = request_events,
1059         .set_maintenance_mode   = set_maintenance_mode,
1060         .set_run_to_completion  = set_run_to_completion,
1061         .poll                   = poll,
1062 };
1063
1064 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1065    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
1066
1067 static LIST_HEAD(smi_infos);
1068 static DEFINE_MUTEX(smi_infos_lock);
1069 static int smi_num; /* Used to sequence the SMIs */
1070
1071 #define DEFAULT_REGSPACING      1
1072 #define DEFAULT_REGSIZE         1
1073
1074 static int           si_trydefaults = 1;
1075 static char          *si_type[SI_MAX_PARMS];
1076 #define MAX_SI_TYPE_STR 30
1077 static char          si_type_str[MAX_SI_TYPE_STR];
1078 static unsigned long addrs[SI_MAX_PARMS];
1079 static unsigned int num_addrs;
1080 static unsigned int  ports[SI_MAX_PARMS];
1081 static unsigned int num_ports;
1082 static int           irqs[SI_MAX_PARMS];
1083 static unsigned int num_irqs;
1084 static int           regspacings[SI_MAX_PARMS];
1085 static unsigned int num_regspacings;
1086 static int           regsizes[SI_MAX_PARMS];
1087 static unsigned int num_regsizes;
1088 static int           regshifts[SI_MAX_PARMS];
1089 static unsigned int num_regshifts;
1090 static int slave_addrs[SI_MAX_PARMS];
1091 static unsigned int num_slave_addrs;
1092
1093 #define IPMI_IO_ADDR_SPACE  0
1094 #define IPMI_MEM_ADDR_SPACE 1
1095 static char *addr_space_to_str[] = { "i/o", "mem" };
1096
1097 static int hotmod_handler(const char *val, struct kernel_param *kp);
1098
1099 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1100 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1101                  " Documentation/IPMI.txt in the kernel sources for the"
1102                  " gory details.");
1103
1104 module_param_named(trydefaults, si_trydefaults, bool, 0);
1105 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1106                  " default scan of the KCS and SMIC interface at the standard"
1107                  " address");
1108 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1109 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1110                  " interface separated by commas.  The types are 'kcs',"
1111                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1112                  " the first interface to kcs and the second to bt");
1113 module_param_array(addrs, ulong, &num_addrs, 0);
1114 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1115                  " addresses separated by commas.  Only use if an interface"
1116                  " is in memory.  Otherwise, set it to zero or leave"
1117                  " it blank.");
1118 module_param_array(ports, uint, &num_ports, 0);
1119 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1120                  " addresses separated by commas.  Only use if an interface"
1121                  " is a port.  Otherwise, set it to zero or leave"
1122                  " it blank.");
1123 module_param_array(irqs, int, &num_irqs, 0);
1124 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1125                  " addresses separated by commas.  Only use if an interface"
1126                  " has an interrupt.  Otherwise, set it to zero or leave"
1127                  " it blank.");
1128 module_param_array(regspacings, int, &num_regspacings, 0);
1129 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1130                  " and each successive register used by the interface.  For"
1131                  " instance, if the start address is 0xca2 and the spacing"
1132                  " is 2, then the second address is at 0xca4.  Defaults"
1133                  " to 1.");
1134 module_param_array(regsizes, int, &num_regsizes, 0);
1135 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1136                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1137                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1138                  " the 8-bit IPMI register has to be read from a larger"
1139                  " register.");
1140 module_param_array(regshifts, int, &num_regshifts, 0);
1141 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1142                  " IPMI register, in bits.  For instance, if the data"
1143                  " is read from a 32-bit word and the IPMI data is in"
1144                  " bit 8-15, then the shift would be 8");
1145 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1146 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1147                  " the controller.  Normally this is 0x20, but can be"
1148                  " overridden by this parm.  This is an array indexed"
1149                  " by interface number.");
1150 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1151 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1152                  " disabled(0).  Normally the IPMI driver auto-detects"
1153                  " this, but the value may be overridden by this parm.");
1154 module_param(unload_when_empty, int, 0);
1155 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1156                  " specified or found, default is 1.  Setting to 0"
1157                  " is useful for hot add of devices using hotmod.");
1158
1159
1160 static void std_irq_cleanup(struct smi_info *info)
1161 {
1162         if (info->si_type == SI_BT)
1163                 /* Disable the interrupt in the BT interface. */
1164                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1165         free_irq(info->irq, info);
1166 }
1167
1168 static int std_irq_setup(struct smi_info *info)
1169 {
1170         int rv;
1171
1172         if (!info->irq)
1173                 return 0;
1174
1175         if (info->si_type == SI_BT) {
1176                 rv = request_irq(info->irq,
1177                                  si_bt_irq_handler,
1178                                  IRQF_SHARED | IRQF_DISABLED,
1179                                  DEVICE_NAME,
1180                                  info);
1181                 if (!rv)
1182                         /* Enable the interrupt in the BT interface. */
1183                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1184                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1185         } else
1186                 rv = request_irq(info->irq,
1187                                  si_irq_handler,
1188                                  IRQF_SHARED | IRQF_DISABLED,
1189                                  DEVICE_NAME,
1190                                  info);
1191         if (rv) {
1192                 printk(KERN_WARNING
1193                        "ipmi_si: %s unable to claim interrupt %d,"
1194                        " running polled\n",
1195                        DEVICE_NAME, info->irq);
1196                 info->irq = 0;
1197         } else {
1198                 info->irq_cleanup = std_irq_cleanup;
1199                 printk("  Using irq %d\n", info->irq);
1200         }
1201
1202         return rv;
1203 }
1204
1205 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1206 {
1207         unsigned int addr = io->addr_data;
1208
1209         return inb(addr + (offset * io->regspacing));
1210 }
1211
1212 static void port_outb(struct si_sm_io *io, unsigned int offset,
1213                       unsigned char b)
1214 {
1215         unsigned int addr = io->addr_data;
1216
1217         outb(b, addr + (offset * io->regspacing));
1218 }
1219
1220 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1221 {
1222         unsigned int addr = io->addr_data;
1223
1224         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1225 }
1226
1227 static void port_outw(struct si_sm_io *io, unsigned int offset,
1228                       unsigned char b)
1229 {
1230         unsigned int addr = io->addr_data;
1231
1232         outw(b << io->regshift, addr + (offset * io->regspacing));
1233 }
1234
1235 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1236 {
1237         unsigned int addr = io->addr_data;
1238
1239         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1240 }
1241
1242 static void port_outl(struct si_sm_io *io, unsigned int offset,
1243                       unsigned char b)
1244 {
1245         unsigned int addr = io->addr_data;
1246
1247         outl(b << io->regshift, addr+(offset * io->regspacing));
1248 }
1249
1250 static void port_cleanup(struct smi_info *info)
1251 {
1252         unsigned int addr = info->io.addr_data;
1253         int          idx;
1254
1255         if (addr) {
1256                 for (idx = 0; idx < info->io_size; idx++) {
1257                         release_region(addr + idx * info->io.regspacing,
1258                                        info->io.regsize);
1259                 }
1260         }
1261 }
1262
1263 static int port_setup(struct smi_info *info)
1264 {
1265         unsigned int addr = info->io.addr_data;
1266         int          idx;
1267
1268         if (!addr)
1269                 return -ENODEV;
1270
1271         info->io_cleanup = port_cleanup;
1272
1273         /* Figure out the actual inb/inw/inl/etc routine to use based
1274            upon the register size. */
1275         switch (info->io.regsize) {
1276         case 1:
1277                 info->io.inputb = port_inb;
1278                 info->io.outputb = port_outb;
1279                 break;
1280         case 2:
1281                 info->io.inputb = port_inw;
1282                 info->io.outputb = port_outw;
1283                 break;
1284         case 4:
1285                 info->io.inputb = port_inl;
1286                 info->io.outputb = port_outl;
1287                 break;
1288         default:
1289                 printk("ipmi_si: Invalid register size: %d\n",
1290                        info->io.regsize);
1291                 return -EINVAL;
1292         }
1293
1294         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1295          * tables.  This causes problems when trying to register the
1296          * entire I/O region.  Therefore we must register each I/O
1297          * port separately.
1298          */
1299         for (idx = 0; idx < info->io_size; idx++) {
1300                 if (request_region(addr + idx * info->io.regspacing,
1301                                    info->io.regsize, DEVICE_NAME) == NULL) {
1302                         /* Undo allocations */
1303                         while (idx--) {
1304                                 release_region(addr + idx * info->io.regspacing,
1305                                                info->io.regsize);
1306                         }
1307                         return -EIO;
1308                 }
1309         }
1310         return 0;
1311 }
1312
1313 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1314 {
1315         return readb((io->addr)+(offset * io->regspacing));
1316 }
1317
1318 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1319                      unsigned char b)
1320 {
1321         writeb(b, (io->addr)+(offset * io->regspacing));
1322 }
1323
1324 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1325 {
1326         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1327                 & 0xff;
1328 }
1329
1330 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1331                      unsigned char b)
1332 {
1333         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1334 }
1335
1336 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1337 {
1338         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1339                 & 0xff;
1340 }
1341
1342 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1343                      unsigned char b)
1344 {
1345         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1346 }
1347
1348 #ifdef readq
1349 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1350 {
1351         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1352                 & 0xff;
1353 }
1354
1355 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1356                      unsigned char b)
1357 {
1358         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1359 }
1360 #endif
1361
1362 static void mem_cleanup(struct smi_info *info)
1363 {
1364         unsigned long addr = info->io.addr_data;
1365         int           mapsize;
1366
1367         if (info->io.addr) {
1368                 iounmap(info->io.addr);
1369
1370                 mapsize = ((info->io_size * info->io.regspacing)
1371                            - (info->io.regspacing - info->io.regsize));
1372
1373                 release_mem_region(addr, mapsize);
1374         }
1375 }
1376
1377 static int mem_setup(struct smi_info *info)
1378 {
1379         unsigned long addr = info->io.addr_data;
1380         int           mapsize;
1381
1382         if (!addr)
1383                 return -ENODEV;
1384
1385         info->io_cleanup = mem_cleanup;
1386
1387         /* Figure out the actual readb/readw/readl/etc routine to use based
1388            upon the register size. */
1389         switch (info->io.regsize) {
1390         case 1:
1391                 info->io.inputb = intf_mem_inb;
1392                 info->io.outputb = intf_mem_outb;
1393                 break;
1394         case 2:
1395                 info->io.inputb = intf_mem_inw;
1396                 info->io.outputb = intf_mem_outw;
1397                 break;
1398         case 4:
1399                 info->io.inputb = intf_mem_inl;
1400                 info->io.outputb = intf_mem_outl;
1401                 break;
1402 #ifdef readq
1403         case 8:
1404                 info->io.inputb = mem_inq;
1405                 info->io.outputb = mem_outq;
1406                 break;
1407 #endif
1408         default:
1409                 printk("ipmi_si: Invalid register size: %d\n",
1410                        info->io.regsize);
1411                 return -EINVAL;
1412         }
1413
1414         /* Calculate the total amount of memory to claim.  This is an
1415          * unusual looking calculation, but it avoids claiming any
1416          * more memory than it has to.  It will claim everything
1417          * between the first address to the end of the last full
1418          * register. */
1419         mapsize = ((info->io_size * info->io.regspacing)
1420                    - (info->io.regspacing - info->io.regsize));
1421
1422         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1423                 return -EIO;
1424
1425         info->io.addr = ioremap(addr, mapsize);
1426         if (info->io.addr == NULL) {
1427                 release_mem_region(addr, mapsize);
1428                 return -EIO;
1429         }
1430         return 0;
1431 }
1432
1433 /*
1434  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1435  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1436  * Options are:
1437  *   rsp=<regspacing>
1438  *   rsi=<regsize>
1439  *   rsh=<regshift>
1440  *   irq=<irq>
1441  *   ipmb=<ipmb addr>
1442  */
1443 enum hotmod_op { HM_ADD, HM_REMOVE };
1444 struct hotmod_vals {
1445         char *name;
1446         int  val;
1447 };
1448 static struct hotmod_vals hotmod_ops[] = {
1449         { "add",        HM_ADD },
1450         { "remove",     HM_REMOVE },
1451         { NULL }
1452 };
1453 static struct hotmod_vals hotmod_si[] = {
1454         { "kcs",        SI_KCS },
1455         { "smic",       SI_SMIC },
1456         { "bt",         SI_BT },
1457         { NULL }
1458 };
1459 static struct hotmod_vals hotmod_as[] = {
1460         { "mem",        IPMI_MEM_ADDR_SPACE },
1461         { "i/o",        IPMI_IO_ADDR_SPACE },
1462         { NULL }
1463 };
1464
1465 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1466 {
1467         char *s;
1468         int  i;
1469
1470         s = strchr(*curr, ',');
1471         if (!s) {
1472                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1473                 return -EINVAL;
1474         }
1475         *s = '\0';
1476         s++;
1477         for (i = 0; hotmod_ops[i].name; i++) {
1478                 if (strcmp(*curr, v[i].name) == 0) {
1479                         *val = v[i].val;
1480                         *curr = s;
1481                         return 0;
1482                 }
1483         }
1484
1485         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1486         return -EINVAL;
1487 }
1488
1489 static int check_hotmod_int_op(const char *curr, const char *option,
1490                                const char *name, int *val)
1491 {
1492         char *n;
1493
1494         if (strcmp(curr, name) == 0) {
1495                 if (!option) {
1496                         printk(KERN_WARNING PFX
1497                                "No option given for '%s'\n",
1498                                curr);
1499                         return -EINVAL;
1500                 }
1501                 *val = simple_strtoul(option, &n, 0);
1502                 if ((*n != '\0') || (*option == '\0')) {
1503                         printk(KERN_WARNING PFX
1504                                "Bad option given for '%s'\n",
1505                                curr);
1506                         return -EINVAL;
1507                 }
1508                 return 1;
1509         }
1510         return 0;
1511 }
1512
1513 static int hotmod_handler(const char *val, struct kernel_param *kp)
1514 {
1515         char *str = kstrdup(val, GFP_KERNEL);
1516         int  rv;
1517         char *next, *curr, *s, *n, *o;
1518         enum hotmod_op op;
1519         enum si_type si_type;
1520         int  addr_space;
1521         unsigned long addr;
1522         int regspacing;
1523         int regsize;
1524         int regshift;
1525         int irq;
1526         int ipmb;
1527         int ival;
1528         int len;
1529         struct smi_info *info;
1530
1531         if (!str)
1532                 return -ENOMEM;
1533
1534         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1535         len = strlen(str);
1536         ival = len - 1;
1537         while ((ival >= 0) && isspace(str[ival])) {
1538                 str[ival] = '\0';
1539                 ival--;
1540         }
1541
1542         for (curr = str; curr; curr = next) {
1543                 regspacing = 1;
1544                 regsize = 1;
1545                 regshift = 0;
1546                 irq = 0;
1547                 ipmb = 0x20;
1548
1549                 next = strchr(curr, ':');
1550                 if (next) {
1551                         *next = '\0';
1552                         next++;
1553                 }
1554
1555                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1556                 if (rv)
1557                         break;
1558                 op = ival;
1559
1560                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1561                 if (rv)
1562                         break;
1563                 si_type = ival;
1564
1565                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1566                 if (rv)
1567                         break;
1568
1569                 s = strchr(curr, ',');
1570                 if (s) {
1571                         *s = '\0';
1572                         s++;
1573                 }
1574                 addr = simple_strtoul(curr, &n, 0);
1575                 if ((*n != '\0') || (*curr == '\0')) {
1576                         printk(KERN_WARNING PFX "Invalid hotmod address"
1577                                " '%s'\n", curr);
1578                         break;
1579                 }
1580
1581                 while (s) {
1582                         curr = s;
1583                         s = strchr(curr, ',');
1584                         if (s) {
1585                                 *s = '\0';
1586                                 s++;
1587                         }
1588                         o = strchr(curr, '=');
1589                         if (o) {
1590                                 *o = '\0';
1591                                 o++;
1592                         }
1593                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1594                         if (rv < 0)
1595                                 goto out;
1596                         else if (rv)
1597                                 continue;
1598                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1599                         if (rv < 0)
1600                                 goto out;
1601                         else if (rv)
1602                                 continue;
1603                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1604                         if (rv < 0)
1605                                 goto out;
1606                         else if (rv)
1607                                 continue;
1608                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1609                         if (rv < 0)
1610                                 goto out;
1611                         else if (rv)
1612                                 continue;
1613                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1614                         if (rv < 0)
1615                                 goto out;
1616                         else if (rv)
1617                                 continue;
1618
1619                         rv = -EINVAL;
1620                         printk(KERN_WARNING PFX
1621                                "Invalid hotmod option '%s'\n",
1622                                curr);
1623                         goto out;
1624                 }
1625
1626                 if (op == HM_ADD) {
1627                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1628                         if (!info) {
1629                                 rv = -ENOMEM;
1630                                 goto out;
1631                         }
1632
1633                         info->addr_source = "hotmod";
1634                         info->si_type = si_type;
1635                         info->io.addr_data = addr;
1636                         info->io.addr_type = addr_space;
1637                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1638                                 info->io_setup = mem_setup;
1639                         else
1640                                 info->io_setup = port_setup;
1641
1642                         info->io.addr = NULL;
1643                         info->io.regspacing = regspacing;
1644                         if (!info->io.regspacing)
1645                                 info->io.regspacing = DEFAULT_REGSPACING;
1646                         info->io.regsize = regsize;
1647                         if (!info->io.regsize)
1648                                 info->io.regsize = DEFAULT_REGSPACING;
1649                         info->io.regshift = regshift;
1650                         info->irq = irq;
1651                         if (info->irq)
1652                                 info->irq_setup = std_irq_setup;
1653                         info->slave_addr = ipmb;
1654
1655                         try_smi_init(info);
1656                 } else {
1657                         /* remove */
1658                         struct smi_info *e, *tmp_e;
1659
1660                         mutex_lock(&smi_infos_lock);
1661                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1662                                 if (e->io.addr_type != addr_space)
1663                                         continue;
1664                                 if (e->si_type != si_type)
1665                                         continue;
1666                                 if (e->io.addr_data == addr)
1667                                         cleanup_one_si(e);
1668                         }
1669                         mutex_unlock(&smi_infos_lock);
1670                 }
1671         }
1672         rv = len;
1673  out:
1674         kfree(str);
1675         return rv;
1676 }
1677
1678 static __devinit void hardcode_find_bmc(void)
1679 {
1680         int             i;
1681         struct smi_info *info;
1682
1683         for (i = 0; i < SI_MAX_PARMS; i++) {
1684                 if (!ports[i] && !addrs[i])
1685                         continue;
1686
1687                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1688                 if (!info)
1689                         return;
1690
1691                 info->addr_source = "hardcoded";
1692
1693                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1694                         info->si_type = SI_KCS;
1695                 } else if (strcmp(si_type[i], "smic") == 0) {
1696                         info->si_type = SI_SMIC;
1697                 } else if (strcmp(si_type[i], "bt") == 0) {
1698                         info->si_type = SI_BT;
1699                 } else {
1700                         printk(KERN_WARNING
1701                                "ipmi_si: Interface type specified "
1702                                "for interface %d, was invalid: %s\n",
1703                                i, si_type[i]);
1704                         kfree(info);
1705                         continue;
1706                 }
1707
1708                 if (ports[i]) {
1709                         /* An I/O port */
1710                         info->io_setup = port_setup;
1711                         info->io.addr_data = ports[i];
1712                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1713                 } else if (addrs[i]) {
1714                         /* A memory port */
1715                         info->io_setup = mem_setup;
1716                         info->io.addr_data = addrs[i];
1717                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1718                 } else {
1719                         printk(KERN_WARNING
1720                                "ipmi_si: Interface type specified "
1721                                "for interface %d, "
1722                                "but port and address were not set or "
1723                                "set to zero.\n", i);
1724                         kfree(info);
1725                         continue;
1726                 }
1727
1728                 info->io.addr = NULL;
1729                 info->io.regspacing = regspacings[i];
1730                 if (!info->io.regspacing)
1731                         info->io.regspacing = DEFAULT_REGSPACING;
1732                 info->io.regsize = regsizes[i];
1733                 if (!info->io.regsize)
1734                         info->io.regsize = DEFAULT_REGSPACING;
1735                 info->io.regshift = regshifts[i];
1736                 info->irq = irqs[i];
1737                 if (info->irq)
1738                         info->irq_setup = std_irq_setup;
1739
1740                 try_smi_init(info);
1741         }
1742 }
1743
1744 #ifdef CONFIG_ACPI
1745
1746 #include <linux/acpi.h>
1747
1748 /* Once we get an ACPI failure, we don't try any more, because we go
1749    through the tables sequentially.  Once we don't find a table, there
1750    are no more. */
1751 static int acpi_failure;
1752
1753 /* For GPE-type interrupts. */
1754 static u32 ipmi_acpi_gpe(void *context)
1755 {
1756         struct smi_info *smi_info = context;
1757         unsigned long   flags;
1758 #ifdef DEBUG_TIMING
1759         struct timeval t;
1760 #endif
1761
1762         spin_lock_irqsave(&(smi_info->si_lock), flags);
1763
1764         spin_lock(&smi_info->count_lock);
1765         smi_info->interrupts++;
1766         spin_unlock(&smi_info->count_lock);
1767
1768 #ifdef DEBUG_TIMING
1769         do_gettimeofday(&t);
1770         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1771 #endif
1772         smi_event_handler(smi_info, 0);
1773         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1774
1775         return ACPI_INTERRUPT_HANDLED;
1776 }
1777
1778 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1779 {
1780         if (!info->irq)
1781                 return;
1782
1783         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1784 }
1785
1786 static int acpi_gpe_irq_setup(struct smi_info *info)
1787 {
1788         acpi_status status;
1789
1790         if (!info->irq)
1791                 return 0;
1792
1793         /* FIXME - is level triggered right? */
1794         status = acpi_install_gpe_handler(NULL,
1795                                           info->irq,
1796                                           ACPI_GPE_LEVEL_TRIGGERED,
1797                                           &ipmi_acpi_gpe,
1798                                           info);
1799         if (status != AE_OK) {
1800                 printk(KERN_WARNING
1801                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1802                        " running polled\n",
1803                        DEVICE_NAME, info->irq);
1804                 info->irq = 0;
1805                 return -EINVAL;
1806         } else {
1807                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1808                 printk("  Using ACPI GPE %d\n", info->irq);
1809                 return 0;
1810         }
1811 }
1812
1813 /*
1814  * Defined at
1815  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1816  */
1817 struct SPMITable {
1818         s8      Signature[4];
1819         u32     Length;
1820         u8      Revision;
1821         u8      Checksum;
1822         s8      OEMID[6];
1823         s8      OEMTableID[8];
1824         s8      OEMRevision[4];
1825         s8      CreatorID[4];
1826         s8      CreatorRevision[4];
1827         u8      InterfaceType;
1828         u8      IPMIlegacy;
1829         s16     SpecificationRevision;
1830
1831         /*
1832          * Bit 0 - SCI interrupt supported
1833          * Bit 1 - I/O APIC/SAPIC
1834          */
1835         u8      InterruptType;
1836
1837         /* If bit 0 of InterruptType is set, then this is the SCI
1838            interrupt in the GPEx_STS register. */
1839         u8      GPE;
1840
1841         s16     Reserved;
1842
1843         /* If bit 1 of InterruptType is set, then this is the I/O
1844            APIC/SAPIC interrupt. */
1845         u32     GlobalSystemInterrupt;
1846
1847         /* The actual register address. */
1848         struct acpi_generic_address addr;
1849
1850         u8      UID[4];
1851
1852         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1853 };
1854
1855 static __devinit int try_init_acpi(struct SPMITable *spmi)
1856 {
1857         struct smi_info  *info;
1858         u8               addr_space;
1859
1860         if (spmi->IPMIlegacy != 1) {
1861             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1862             return -ENODEV;
1863         }
1864
1865         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1866                 addr_space = IPMI_MEM_ADDR_SPACE;
1867         else
1868                 addr_space = IPMI_IO_ADDR_SPACE;
1869
1870         info = kzalloc(sizeof(*info), GFP_KERNEL);
1871         if (!info) {
1872                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1873                 return -ENOMEM;
1874         }
1875
1876         info->addr_source = "ACPI";
1877
1878         /* Figure out the interface type. */
1879         switch (spmi->InterfaceType)
1880         {
1881         case 1: /* KCS */
1882                 info->si_type = SI_KCS;
1883                 break;
1884         case 2: /* SMIC */
1885                 info->si_type = SI_SMIC;
1886                 break;
1887         case 3: /* BT */
1888                 info->si_type = SI_BT;
1889                 break;
1890         default:
1891                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1892                         spmi->InterfaceType);
1893                 kfree(info);
1894                 return -EIO;
1895         }
1896
1897         if (spmi->InterruptType & 1) {
1898                 /* We've got a GPE interrupt. */
1899                 info->irq = spmi->GPE;
1900                 info->irq_setup = acpi_gpe_irq_setup;
1901         } else if (spmi->InterruptType & 2) {
1902                 /* We've got an APIC/SAPIC interrupt. */
1903                 info->irq = spmi->GlobalSystemInterrupt;
1904                 info->irq_setup = std_irq_setup;
1905         } else {
1906                 /* Use the default interrupt setting. */
1907                 info->irq = 0;
1908                 info->irq_setup = NULL;
1909         }
1910
1911         if (spmi->addr.bit_width) {
1912                 /* A (hopefully) properly formed register bit width. */
1913                 info->io.regspacing = spmi->addr.bit_width / 8;
1914         } else {
1915                 info->io.regspacing = DEFAULT_REGSPACING;
1916         }
1917         info->io.regsize = info->io.regspacing;
1918         info->io.regshift = spmi->addr.bit_offset;
1919
1920         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1921                 info->io_setup = mem_setup;
1922                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1923         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1924                 info->io_setup = port_setup;
1925                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1926         } else {
1927                 kfree(info);
1928                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1929                 return -EIO;
1930         }
1931         info->io.addr_data = spmi->addr.address;
1932
1933         try_smi_init(info);
1934
1935         return 0;
1936 }
1937
1938 static __devinit void acpi_find_bmc(void)
1939 {
1940         acpi_status      status;
1941         struct SPMITable *spmi;
1942         int              i;
1943
1944         if (acpi_disabled)
1945                 return;
1946
1947         if (acpi_failure)
1948                 return;
1949
1950         for (i = 0; ; i++) {
1951                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1952                                         (struct acpi_table_header **)&spmi);
1953                 if (status != AE_OK)
1954                         return;
1955
1956                 try_init_acpi(spmi);
1957         }
1958 }
1959 #endif
1960
1961 #ifdef CONFIG_DMI
1962 struct dmi_ipmi_data
1963 {
1964         u8              type;
1965         u8              addr_space;
1966         unsigned long   base_addr;
1967         u8              irq;
1968         u8              offset;
1969         u8              slave_addr;
1970 };
1971
1972 static int __devinit decode_dmi(const struct dmi_header *dm,
1973                                 struct dmi_ipmi_data *dmi)
1974 {
1975         const u8        *data = (const u8 *)dm;
1976         unsigned long   base_addr;
1977         u8              reg_spacing;
1978         u8              len = dm->length;
1979
1980         dmi->type = data[4];
1981
1982         memcpy(&base_addr, data+8, sizeof(unsigned long));
1983         if (len >= 0x11) {
1984                 if (base_addr & 1) {
1985                         /* I/O */
1986                         base_addr &= 0xFFFE;
1987                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1988                 }
1989                 else {
1990                         /* Memory */
1991                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1992                 }
1993                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1994                    is odd. */
1995                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1996
1997                 dmi->irq = data[0x11];
1998
1999                 /* The top two bits of byte 0x10 hold the register spacing. */
2000                 reg_spacing = (data[0x10] & 0xC0) >> 6;
2001                 switch(reg_spacing){
2002                 case 0x00: /* Byte boundaries */
2003                     dmi->offset = 1;
2004                     break;
2005                 case 0x01: /* 32-bit boundaries */
2006                     dmi->offset = 4;
2007                     break;
2008                 case 0x02: /* 16-byte boundaries */
2009                     dmi->offset = 16;
2010                     break;
2011                 default:
2012                     /* Some other interface, just ignore it. */
2013                     return -EIO;
2014                 }
2015         } else {
2016                 /* Old DMI spec. */
2017                 /* Note that technically, the lower bit of the base
2018                  * address should be 1 if the address is I/O and 0 if
2019                  * the address is in memory.  So many systems get that
2020                  * wrong (and all that I have seen are I/O) so we just
2021                  * ignore that bit and assume I/O.  Systems that use
2022                  * memory should use the newer spec, anyway. */
2023                 dmi->base_addr = base_addr & 0xfffe;
2024                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2025                 dmi->offset = 1;
2026         }
2027
2028         dmi->slave_addr = data[6];
2029
2030         return 0;
2031 }
2032
2033 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2034 {
2035         struct smi_info *info;
2036
2037         info = kzalloc(sizeof(*info), GFP_KERNEL);
2038         if (!info) {
2039                 printk(KERN_ERR
2040                        "ipmi_si: Could not allocate SI data\n");
2041                 return;
2042         }
2043
2044         info->addr_source = "SMBIOS";
2045
2046         switch (ipmi_data->type) {
2047         case 0x01: /* KCS */
2048                 info->si_type = SI_KCS;
2049                 break;
2050         case 0x02: /* SMIC */
2051                 info->si_type = SI_SMIC;
2052                 break;
2053         case 0x03: /* BT */
2054                 info->si_type = SI_BT;
2055                 break;
2056         default:
2057                 kfree(info);
2058                 return;
2059         }
2060
2061         switch (ipmi_data->addr_space) {
2062         case IPMI_MEM_ADDR_SPACE:
2063                 info->io_setup = mem_setup;
2064                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2065                 break;
2066
2067         case IPMI_IO_ADDR_SPACE:
2068                 info->io_setup = port_setup;
2069                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2070                 break;
2071
2072         default:
2073                 kfree(info);
2074                 printk(KERN_WARNING
2075                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2076                        ipmi_data->addr_space);
2077                 return;
2078         }
2079         info->io.addr_data = ipmi_data->base_addr;
2080
2081         info->io.regspacing = ipmi_data->offset;
2082         if (!info->io.regspacing)
2083                 info->io.regspacing = DEFAULT_REGSPACING;
2084         info->io.regsize = DEFAULT_REGSPACING;
2085         info->io.regshift = 0;
2086
2087         info->slave_addr = ipmi_data->slave_addr;
2088
2089         info->irq = ipmi_data->irq;
2090         if (info->irq)
2091                 info->irq_setup = std_irq_setup;
2092
2093         try_smi_init(info);
2094 }
2095
2096 static void __devinit dmi_find_bmc(void)
2097 {
2098         const struct dmi_device *dev = NULL;
2099         struct dmi_ipmi_data data;
2100         int                  rv;
2101
2102         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2103                 memset(&data, 0, sizeof(data));
2104                 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2105                                 &data);
2106                 if (!rv)
2107                         try_init_dmi(&data);
2108         }
2109 }
2110 #endif /* CONFIG_DMI */
2111
2112 #ifdef CONFIG_PCI
2113
2114 #define PCI_ERMC_CLASSCODE              0x0C0700
2115 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2116 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2117 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2118 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2119 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2120
2121 #define PCI_HP_VENDOR_ID    0x103C
2122 #define PCI_MMC_DEVICE_ID   0x121A
2123 #define PCI_MMC_ADDR_CW     0x10
2124
2125 static void ipmi_pci_cleanup(struct smi_info *info)
2126 {
2127         struct pci_dev *pdev = info->addr_source_data;
2128
2129         pci_disable_device(pdev);
2130 }
2131
2132 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2133                                     const struct pci_device_id *ent)
2134 {
2135         int rv;
2136         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2137         struct smi_info *info;
2138         int first_reg_offset = 0;
2139
2140         info = kzalloc(sizeof(*info), GFP_KERNEL);
2141         if (!info)
2142                 return -ENOMEM;
2143
2144         info->addr_source = "PCI";
2145
2146         switch (class_type) {
2147         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2148                 info->si_type = SI_SMIC;
2149                 break;
2150
2151         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2152                 info->si_type = SI_KCS;
2153                 break;
2154
2155         case PCI_ERMC_CLASSCODE_TYPE_BT:
2156                 info->si_type = SI_BT;
2157                 break;
2158
2159         default:
2160                 kfree(info);
2161                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2162                        pci_name(pdev), class_type);
2163                 return -ENOMEM;
2164         }
2165
2166         rv = pci_enable_device(pdev);
2167         if (rv) {
2168                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2169                        pci_name(pdev));
2170                 kfree(info);
2171                 return rv;
2172         }
2173
2174         info->addr_source_cleanup = ipmi_pci_cleanup;
2175         info->addr_source_data = pdev;
2176
2177         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2178                 first_reg_offset = 1;
2179
2180         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2181                 info->io_setup = port_setup;
2182                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2183         } else {
2184                 info->io_setup = mem_setup;
2185                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2186         }
2187         info->io.addr_data = pci_resource_start(pdev, 0);
2188
2189         info->io.regspacing = DEFAULT_REGSPACING;
2190         info->io.regsize = DEFAULT_REGSPACING;
2191         info->io.regshift = 0;
2192
2193         info->irq = pdev->irq;
2194         if (info->irq)
2195                 info->irq_setup = std_irq_setup;
2196
2197         info->dev = &pdev->dev;
2198         pci_set_drvdata(pdev, info);
2199
2200         return try_smi_init(info);
2201 }
2202
2203 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2204 {
2205         struct smi_info *info = pci_get_drvdata(pdev);
2206         cleanup_one_si(info);
2207 }
2208
2209 #ifdef CONFIG_PM
2210 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2211 {
2212         return 0;
2213 }
2214
2215 static int ipmi_pci_resume(struct pci_dev *pdev)
2216 {
2217         return 0;
2218 }
2219 #endif
2220
2221 static struct pci_device_id ipmi_pci_devices[] = {
2222         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2223         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2224         { 0, }
2225 };
2226 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2227
2228 static struct pci_driver ipmi_pci_driver = {
2229         .name =         DEVICE_NAME,
2230         .id_table =     ipmi_pci_devices,
2231         .probe =        ipmi_pci_probe,
2232         .remove =       __devexit_p(ipmi_pci_remove),
2233 #ifdef CONFIG_PM
2234         .suspend =      ipmi_pci_suspend,
2235         .resume =       ipmi_pci_resume,
2236 #endif
2237 };
2238 #endif /* CONFIG_PCI */
2239
2240
2241 #ifdef CONFIG_PPC_OF
2242 static int __devinit ipmi_of_probe(struct of_device *dev,
2243                          const struct of_device_id *match)
2244 {
2245         struct smi_info *info;
2246         struct resource resource;
2247         const int *regsize, *regspacing, *regshift;
2248         struct device_node *np = dev->node;
2249         int ret;
2250         int proplen;
2251
2252         dev_info(&dev->dev, PFX "probing via device tree\n");
2253
2254         ret = of_address_to_resource(np, 0, &resource);
2255         if (ret) {
2256                 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2257                 return ret;
2258         }
2259
2260         regsize = of_get_property(np, "reg-size", &proplen);
2261         if (regsize && proplen != 4) {
2262                 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2263                 return -EINVAL;
2264         }
2265
2266         regspacing = of_get_property(np, "reg-spacing", &proplen);
2267         if (regspacing && proplen != 4) {
2268                 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2269                 return -EINVAL;
2270         }
2271
2272         regshift = of_get_property(np, "reg-shift", &proplen);
2273         if (regshift && proplen != 4) {
2274                 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2275                 return -EINVAL;
2276         }
2277
2278         info = kzalloc(sizeof(*info), GFP_KERNEL);
2279
2280         if (!info) {
2281                 dev_err(&dev->dev,
2282                         PFX "could not allocate memory for OF probe\n");
2283                 return -ENOMEM;
2284         }
2285
2286         info->si_type           = (enum si_type) match->data;
2287         info->addr_source       = "device-tree";
2288         info->io_setup          = mem_setup;
2289         info->irq_setup         = std_irq_setup;
2290
2291         info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2292         info->io.addr_data      = resource.start;
2293
2294         info->io.regsize        = regsize ? *regsize : DEFAULT_REGSIZE;
2295         info->io.regspacing     = regspacing ? *regspacing : DEFAULT_REGSPACING;
2296         info->io.regshift       = regshift ? *regshift : 0;
2297
2298         info->irq               = irq_of_parse_and_map(dev->node, 0);
2299         info->dev               = &dev->dev;
2300
2301         dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
2302                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2303                 info->irq);
2304
2305         dev->dev.driver_data = (void*) info;
2306
2307         return try_smi_init(info);
2308 }
2309
2310 static int __devexit ipmi_of_remove(struct of_device *dev)
2311 {
2312         cleanup_one_si(dev->dev.driver_data);
2313         return 0;
2314 }
2315
2316 static struct of_device_id ipmi_match[] =
2317 {
2318         { .type = "ipmi", .compatible = "ipmi-kcs",  .data = (void *)(unsigned long) SI_KCS },
2319         { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2320         { .type = "ipmi", .compatible = "ipmi-bt",   .data = (void *)(unsigned long) SI_BT },
2321         {},
2322 };
2323
2324 static struct of_platform_driver ipmi_of_platform_driver =
2325 {
2326         .name           = "ipmi",
2327         .match_table    = ipmi_match,
2328         .probe          = ipmi_of_probe,
2329         .remove         = __devexit_p(ipmi_of_remove),
2330 };
2331 #endif /* CONFIG_PPC_OF */
2332
2333
2334 static int try_get_dev_id(struct smi_info *smi_info)
2335 {
2336         unsigned char         msg[2];
2337         unsigned char         *resp;
2338         unsigned long         resp_len;
2339         enum si_sm_result     smi_result;
2340         int                   rv = 0;
2341
2342         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2343         if (!resp)
2344                 return -ENOMEM;
2345
2346         /* Do a Get Device ID command, since it comes back with some
2347            useful info. */
2348         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2349         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2350         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2351
2352         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2353         for (;;)
2354         {
2355                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2356                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2357                         schedule_timeout_uninterruptible(1);
2358                         smi_result = smi_info->handlers->event(
2359                                 smi_info->si_sm, 100);
2360                 }
2361                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2362                 {
2363                         smi_result = smi_info->handlers->event(
2364                                 smi_info->si_sm, 0);
2365                 }
2366                 else
2367                         break;
2368         }
2369         if (smi_result == SI_SM_HOSED) {
2370                 /* We couldn't get the state machine to run, so whatever's at
2371                    the port is probably not an IPMI SMI interface. */
2372                 rv = -ENODEV;
2373                 goto out;
2374         }
2375
2376         /* Otherwise, we got some data. */
2377         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2378                                                   resp, IPMI_MAX_MSG_LENGTH);
2379         if (resp_len < 14) {
2380                 /* That's odd, it should be longer. */
2381                 rv = -EINVAL;
2382                 goto out;
2383         }
2384
2385         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2386                 /* That's odd, it shouldn't be able to fail. */
2387                 rv = -EINVAL;
2388                 goto out;
2389         }
2390
2391         /* Record info from the get device id, in case we need it. */
2392         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2393
2394  out:
2395         kfree(resp);
2396         return rv;
2397 }
2398
2399 static int type_file_read_proc(char *page, char **start, off_t off,
2400                                int count, int *eof, void *data)
2401 {
2402         struct smi_info *smi = data;
2403
2404         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2405 }
2406
2407 static int stat_file_read_proc(char *page, char **start, off_t off,
2408                                int count, int *eof, void *data)
2409 {
2410         char            *out = (char *) page;
2411         struct smi_info *smi = data;
2412
2413         out += sprintf(out, "interrupts_enabled:    %d\n",
2414                        smi->irq && !smi->interrupt_disabled);
2415         out += sprintf(out, "short_timeouts:        %ld\n",
2416                        smi->short_timeouts);
2417         out += sprintf(out, "long_timeouts:         %ld\n",
2418                        smi->long_timeouts);
2419         out += sprintf(out, "timeout_restarts:      %ld\n",
2420                        smi->timeout_restarts);
2421         out += sprintf(out, "idles:                 %ld\n",
2422                        smi->idles);
2423         out += sprintf(out, "interrupts:            %ld\n",
2424                        smi->interrupts);
2425         out += sprintf(out, "attentions:            %ld\n",
2426                        smi->attentions);
2427         out += sprintf(out, "flag_fetches:          %ld\n",
2428                        smi->flag_fetches);
2429         out += sprintf(out, "hosed_count:           %ld\n",
2430                        smi->hosed_count);
2431         out += sprintf(out, "complete_transactions: %ld\n",
2432                        smi->complete_transactions);
2433         out += sprintf(out, "events:                %ld\n",
2434                        smi->events);
2435         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2436                        smi->watchdog_pretimeouts);
2437         out += sprintf(out, "incoming_messages:     %ld\n",
2438                        smi->incoming_messages);
2439
2440         return out - page;
2441 }
2442
2443 static int param_read_proc(char *page, char **start, off_t off,
2444                            int count, int *eof, void *data)
2445 {
2446         struct smi_info *smi = data;
2447
2448         return sprintf(page,
2449                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2450                        si_to_str[smi->si_type],
2451                        addr_space_to_str[smi->io.addr_type],
2452                        smi->io.addr_data,
2453                        smi->io.regspacing,
2454                        smi->io.regsize,
2455                        smi->io.regshift,
2456                        smi->irq,
2457                        smi->slave_addr);
2458 }
2459
2460 /*
2461  * oem_data_avail_to_receive_msg_avail
2462  * @info - smi_info structure with msg_flags set
2463  *
2464  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2465  * Returns 1 indicating need to re-run handle_flags().
2466  */
2467 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2468 {
2469         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2470                                 RECEIVE_MSG_AVAIL);
2471         return 1;
2472 }
2473
2474 /*
2475  * setup_dell_poweredge_oem_data_handler
2476  * @info - smi_info.device_id must be populated
2477  *
2478  * Systems that match, but have firmware version < 1.40 may assert
2479  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2480  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2481  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2482  * as RECEIVE_MSG_AVAIL instead.
2483  *
2484  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2485  * assert the OEM[012] bits, and if it did, the driver would have to
2486  * change to handle that properly, we don't actually check for the
2487  * firmware version.
2488  * Device ID = 0x20                BMC on PowerEdge 8G servers
2489  * Device Revision = 0x80
2490  * Firmware Revision1 = 0x01       BMC version 1.40
2491  * Firmware Revision2 = 0x40       BCD encoded
2492  * IPMI Version = 0x51             IPMI 1.5
2493  * Manufacturer ID = A2 02 00      Dell IANA
2494  *
2495  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2496  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2497  *
2498  */
2499 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2500 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2501 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2502 #define DELL_IANA_MFR_ID 0x0002a2
2503 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2504 {
2505         struct ipmi_device_id *id = &smi_info->device_id;
2506         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2507                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2508                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2509                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2510                         smi_info->oem_data_avail_handler =
2511                                 oem_data_avail_to_receive_msg_avail;
2512                 }
2513                 else if (ipmi_version_major(id) < 1 ||
2514                          (ipmi_version_major(id) == 1 &&
2515                           ipmi_version_minor(id) < 5)) {
2516                         smi_info->oem_data_avail_handler =
2517                                 oem_data_avail_to_receive_msg_avail;
2518                 }
2519         }
2520 }
2521
2522 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2523 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2524 {
2525         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2526
2527         /* Make it a reponse */
2528         msg->rsp[0] = msg->data[0] | 4;
2529         msg->rsp[1] = msg->data[1];
2530         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2531         msg->rsp_size = 3;
2532         smi_info->curr_msg = NULL;
2533         deliver_recv_msg(smi_info, msg);
2534 }
2535
2536 /*
2537  * dell_poweredge_bt_xaction_handler
2538  * @info - smi_info.device_id must be populated
2539  *
2540  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2541  * not respond to a Get SDR command if the length of the data
2542  * requested is exactly 0x3A, which leads to command timeouts and no
2543  * data returned.  This intercepts such commands, and causes userspace
2544  * callers to try again with a different-sized buffer, which succeeds.
2545  */
2546
2547 #define STORAGE_NETFN 0x0A
2548 #define STORAGE_CMD_GET_SDR 0x23
2549 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2550                                              unsigned long unused,
2551                                              void *in)
2552 {
2553         struct smi_info *smi_info = in;
2554         unsigned char *data = smi_info->curr_msg->data;
2555         unsigned int size   = smi_info->curr_msg->data_size;
2556         if (size >= 8 &&
2557             (data[0]>>2) == STORAGE_NETFN &&
2558             data[1] == STORAGE_CMD_GET_SDR &&
2559             data[7] == 0x3A) {
2560                 return_hosed_msg_badsize(smi_info);
2561                 return NOTIFY_STOP;
2562         }
2563         return NOTIFY_DONE;
2564 }
2565
2566 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2567         .notifier_call  = dell_poweredge_bt_xaction_handler,
2568 };
2569
2570 /*
2571  * setup_dell_poweredge_bt_xaction_handler
2572  * @info - smi_info.device_id must be filled in already
2573  *
2574  * Fills in smi_info.device_id.start_transaction_pre_hook
2575  * when we know what function to use there.
2576  */
2577 static void
2578 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2579 {
2580         struct ipmi_device_id *id = &smi_info->device_id;
2581         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2582             smi_info->si_type == SI_BT)
2583                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2584 }
2585
2586 /*
2587  * setup_oem_data_handler
2588  * @info - smi_info.device_id must be filled in already
2589  *
2590  * Fills in smi_info.device_id.oem_data_available_handler
2591  * when we know what function to use there.
2592  */
2593
2594 static void setup_oem_data_handler(struct smi_info *smi_info)
2595 {
2596         setup_dell_poweredge_oem_data_handler(smi_info);
2597 }
2598
2599 static void setup_xaction_handlers(struct smi_info *smi_info)
2600 {
2601         setup_dell_poweredge_bt_xaction_handler(smi_info);
2602 }
2603
2604 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2605 {
2606         if (smi_info->intf) {
2607                 /* The timer and thread are only running if the
2608                    interface has been started up and registered. */
2609                 if (smi_info->thread != NULL)
2610                         kthread_stop(smi_info->thread);
2611                 del_timer_sync(&smi_info->si_timer);
2612         }
2613 }
2614
2615 static __devinitdata struct ipmi_default_vals
2616 {
2617         int type;
2618         int port;
2619 } ipmi_defaults[] =
2620 {
2621         { .type = SI_KCS, .port = 0xca2 },
2622         { .type = SI_SMIC, .port = 0xca9 },
2623         { .type = SI_BT, .port = 0xe4 },
2624         { .port = 0 }
2625 };
2626
2627 static __devinit void default_find_bmc(void)
2628 {
2629         struct smi_info *info;
2630         int             i;
2631
2632         for (i = 0; ; i++) {
2633                 if (!ipmi_defaults[i].port)
2634                         break;
2635
2636                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2637                 if (!info)
2638                         return;
2639
2640 #ifdef CONFIG_PPC_MERGE
2641                 if (check_legacy_ioport(ipmi_defaults[i].port))
2642                         continue;
2643 #endif
2644
2645                 info->addr_source = NULL;
2646
2647                 info->si_type = ipmi_defaults[i].type;
2648                 info->io_setup = port_setup;
2649                 info->io.addr_data = ipmi_defaults[i].port;
2650                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2651
2652                 info->io.addr = NULL;
2653                 info->io.regspacing = DEFAULT_REGSPACING;
2654                 info->io.regsize = DEFAULT_REGSPACING;
2655                 info->io.regshift = 0;
2656
2657                 if (try_smi_init(info) == 0) {
2658                         /* Found one... */
2659                         printk(KERN_INFO "ipmi_si: Found default %s state"
2660                                " machine at %s address 0x%lx\n",
2661                                si_to_str[info->si_type],
2662                                addr_space_to_str[info->io.addr_type],
2663                                info->io.addr_data);
2664                         return;
2665                 }
2666         }
2667 }
2668
2669 static int is_new_interface(struct smi_info *info)
2670 {
2671         struct smi_info *e;
2672
2673         list_for_each_entry(e, &smi_infos, link) {
2674                 if (e->io.addr_type != info->io.addr_type)
2675                         continue;
2676                 if (e->io.addr_data == info->io.addr_data)
2677                         return 0;
2678         }
2679
2680         return 1;
2681 }
2682
2683 static int try_smi_init(struct smi_info *new_smi)
2684 {
2685         int rv;
2686
2687         if (new_smi->addr_source) {
2688                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2689                        " machine at %s address 0x%lx, slave address 0x%x,"
2690                        " irq %d\n",
2691                        new_smi->addr_source,
2692                        si_to_str[new_smi->si_type],
2693                        addr_space_to_str[new_smi->io.addr_type],
2694                        new_smi->io.addr_data,
2695                        new_smi->slave_addr, new_smi->irq);
2696         }
2697
2698         mutex_lock(&smi_infos_lock);
2699         if (!is_new_interface(new_smi)) {
2700                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2701                 rv = -EBUSY;
2702                 goto out_err;
2703         }
2704
2705         /* So we know not to free it unless we have allocated one. */
2706         new_smi->intf = NULL;
2707         new_smi->si_sm = NULL;
2708         new_smi->handlers = NULL;
2709
2710         switch (new_smi->si_type) {
2711         case SI_KCS:
2712                 new_smi->handlers = &kcs_smi_handlers;
2713                 break;
2714
2715         case SI_SMIC:
2716                 new_smi->handlers = &smic_smi_handlers;
2717                 break;
2718
2719         case SI_BT:
2720                 new_smi->handlers = &bt_smi_handlers;
2721                 break;
2722
2723         default:
2724                 /* No support for anything else yet. */
2725                 rv = -EIO;
2726                 goto out_err;
2727         }
2728
2729         /* Allocate the state machine's data and initialize it. */
2730         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2731         if (!new_smi->si_sm) {
2732                 printk(" Could not allocate state machine memory\n");
2733                 rv = -ENOMEM;
2734                 goto out_err;
2735         }
2736         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2737                                                         &new_smi->io);
2738
2739         /* Now that we know the I/O size, we can set up the I/O. */
2740         rv = new_smi->io_setup(new_smi);
2741         if (rv) {
2742                 printk(" Could not set up I/O space\n");
2743                 goto out_err;
2744         }
2745
2746         spin_lock_init(&(new_smi->si_lock));
2747         spin_lock_init(&(new_smi->msg_lock));
2748         spin_lock_init(&(new_smi->count_lock));
2749
2750         /* Do low-level detection first. */
2751         if (new_smi->handlers->detect(new_smi->si_sm)) {
2752                 if (new_smi->addr_source)
2753                         printk(KERN_INFO "ipmi_si: Interface detection"
2754                                " failed\n");
2755                 rv = -ENODEV;
2756                 goto out_err;
2757         }
2758
2759         /* Attempt a get device id command.  If it fails, we probably
2760            don't have a BMC here. */
2761         rv = try_get_dev_id(new_smi);
2762         if (rv) {
2763                 if (new_smi->addr_source)
2764                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2765                                " at this location\n");
2766                 goto out_err;
2767         }
2768
2769         setup_oem_data_handler(new_smi);
2770         setup_xaction_handlers(new_smi);
2771
2772         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2773         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2774         new_smi->curr_msg = NULL;
2775         atomic_set(&new_smi->req_events, 0);
2776         new_smi->run_to_completion = 0;
2777
2778         new_smi->interrupt_disabled = 0;
2779         atomic_set(&new_smi->stop_operation, 0);
2780         new_smi->intf_num = smi_num;
2781         smi_num++;
2782
2783         /* Start clearing the flags before we enable interrupts or the
2784            timer to avoid racing with the timer. */
2785         start_clear_flags(new_smi);
2786         /* IRQ is defined to be set when non-zero. */
2787         if (new_smi->irq)
2788                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2789
2790         if (!new_smi->dev) {
2791                 /* If we don't already have a device from something
2792                  * else (like PCI), then register a new one. */
2793                 new_smi->pdev = platform_device_alloc("ipmi_si",
2794                                                       new_smi->intf_num);
2795                 if (rv) {
2796                         printk(KERN_ERR
2797                                "ipmi_si_intf:"
2798                                " Unable to allocate platform device\n");
2799                         goto out_err;
2800                 }
2801                 new_smi->dev = &new_smi->pdev->dev;
2802                 new_smi->dev->driver = &ipmi_driver;
2803
2804                 rv = platform_device_add(new_smi->pdev);
2805                 if (rv) {
2806                         printk(KERN_ERR
2807                                "ipmi_si_intf:"
2808                                " Unable to register system interface device:"
2809                                " %d\n",
2810                                rv);
2811                         goto out_err;
2812                 }
2813                 new_smi->dev_registered = 1;
2814         }
2815
2816         rv = ipmi_register_smi(&handlers,
2817                                new_smi,
2818                                &new_smi->device_id,
2819                                new_smi->dev,
2820                                "bmc",
2821                                new_smi->slave_addr);
2822         if (rv) {
2823                 printk(KERN_ERR
2824                        "ipmi_si: Unable to register device: error %d\n",
2825                        rv);
2826                 goto out_err_stop_timer;
2827         }
2828
2829         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2830                                      type_file_read_proc, NULL,
2831                                      new_smi, THIS_MODULE);
2832         if (rv) {
2833                 printk(KERN_ERR
2834                        "ipmi_si: Unable to create proc entry: %d\n",
2835                        rv);
2836                 goto out_err_stop_timer;
2837         }
2838
2839         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2840                                      stat_file_read_proc, NULL,
2841                                      new_smi, THIS_MODULE);
2842         if (rv) {
2843                 printk(KERN_ERR
2844                        "ipmi_si: Unable to create proc entry: %d\n",
2845                        rv);
2846                 goto out_err_stop_timer;
2847         }
2848
2849         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2850                                      param_read_proc, NULL,
2851                                      new_smi, THIS_MODULE);
2852         if (rv) {
2853                 printk(KERN_ERR
2854                        "ipmi_si: Unable to create proc entry: %d\n",
2855                        rv);
2856                 goto out_err_stop_timer;
2857         }
2858
2859         list_add_tail(&new_smi->link, &smi_infos);
2860
2861         mutex_unlock(&smi_infos_lock);
2862
2863         printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2864
2865         return 0;
2866
2867  out_err_stop_timer:
2868         atomic_inc(&new_smi->stop_operation);
2869         wait_for_timer_and_thread(new_smi);
2870
2871  out_err:
2872         if (new_smi->intf)
2873                 ipmi_unregister_smi(new_smi->intf);
2874
2875         if (new_smi->irq_cleanup)
2876                 new_smi->irq_cleanup(new_smi);
2877
2878         /* Wait until we know that we are out of any interrupt
2879            handlers might have been running before we freed the
2880            interrupt. */
2881         synchronize_sched();
2882
2883         if (new_smi->si_sm) {
2884                 if (new_smi->handlers)
2885                         new_smi->handlers->cleanup(new_smi->si_sm);
2886                 kfree(new_smi->si_sm);
2887         }
2888         if (new_smi->addr_source_cleanup)
2889                 new_smi->addr_source_cleanup(new_smi);
2890         if (new_smi->io_cleanup)
2891                 new_smi->io_cleanup(new_smi);
2892
2893         if (new_smi->dev_registered)
2894                 platform_device_unregister(new_smi->pdev);
2895
2896         kfree(new_smi);
2897
2898         mutex_unlock(&smi_infos_lock);
2899
2900         return rv;
2901 }
2902
2903 static __devinit int init_ipmi_si(void)
2904 {
2905         int  i;
2906         char *str;
2907         int  rv;
2908
2909         if (initialized)
2910                 return 0;
2911         initialized = 1;
2912
2913         /* Register the device drivers. */
2914         rv = driver_register(&ipmi_driver);
2915         if (rv) {
2916                 printk(KERN_ERR
2917                        "init_ipmi_si: Unable to register driver: %d\n",
2918                        rv);
2919                 return rv;
2920         }
2921
2922
2923         /* Parse out the si_type string into its components. */
2924         str = si_type_str;
2925         if (*str != '\0') {
2926                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2927                         si_type[i] = str;
2928                         str = strchr(str, ',');
2929                         if (str) {
2930                                 *str = '\0';
2931                                 str++;
2932                         } else {
2933                                 break;
2934                         }
2935                 }
2936         }
2937
2938         printk(KERN_INFO "IPMI System Interface driver.\n");
2939
2940         hardcode_find_bmc();
2941
2942 #ifdef CONFIG_DMI
2943         dmi_find_bmc();
2944 #endif
2945
2946 #ifdef CONFIG_ACPI
2947         acpi_find_bmc();
2948 #endif
2949
2950 #ifdef CONFIG_PCI
2951         rv = pci_register_driver(&ipmi_pci_driver);
2952         if (rv){
2953                 printk(KERN_ERR
2954                        "init_ipmi_si: Unable to register PCI driver: %d\n",
2955                        rv);
2956         }
2957 #endif
2958
2959 #ifdef CONFIG_PPC_OF
2960         of_register_platform_driver(&ipmi_of_platform_driver);
2961 #endif
2962
2963         if (si_trydefaults) {
2964                 mutex_lock(&smi_infos_lock);
2965                 if (list_empty(&smi_infos)) {
2966                         /* No BMC was found, try defaults. */
2967                         mutex_unlock(&smi_infos_lock);
2968                         default_find_bmc();
2969                 } else {
2970                         mutex_unlock(&smi_infos_lock);
2971                 }
2972         }
2973
2974         mutex_lock(&smi_infos_lock);
2975         if (unload_when_empty && list_empty(&smi_infos)) {
2976                 mutex_unlock(&smi_infos_lock);
2977 #ifdef CONFIG_PCI
2978                 pci_unregister_driver(&ipmi_pci_driver);
2979 #endif
2980
2981 #ifdef CONFIG_PPC_OF
2982                 of_unregister_platform_driver(&ipmi_of_platform_driver);
2983 #endif
2984                 driver_unregister(&ipmi_driver);
2985                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2986                 return -ENODEV;
2987         } else {
2988                 mutex_unlock(&smi_infos_lock);
2989                 return 0;
2990         }
2991 }
2992 module_init(init_ipmi_si);
2993
2994 static void cleanup_one_si(struct smi_info *to_clean)
2995 {
2996         int           rv;
2997         unsigned long flags;
2998
2999         if (!to_clean)
3000                 return;
3001
3002         list_del(&to_clean->link);
3003
3004         /* Tell the driver that we are shutting down. */
3005         atomic_inc(&to_clean->stop_operation);
3006
3007         /* Make sure the timer and thread are stopped and will not run
3008            again. */
3009         wait_for_timer_and_thread(to_clean);
3010
3011         /* Timeouts are stopped, now make sure the interrupts are off
3012            for the device.  A little tricky with locks to make sure
3013            there are no races. */
3014         spin_lock_irqsave(&to_clean->si_lock, flags);
3015         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3016                 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3017                 poll(to_clean);
3018                 schedule_timeout_uninterruptible(1);
3019                 spin_lock_irqsave(&to_clean->si_lock, flags);
3020         }
3021         disable_si_irq(to_clean);
3022         spin_unlock_irqrestore(&to_clean->si_lock, flags);
3023         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3024                 poll(to_clean);
3025                 schedule_timeout_uninterruptible(1);
3026         }
3027
3028         /* Clean up interrupts and make sure that everything is done. */
3029         if (to_clean->irq_cleanup)
3030                 to_clean->irq_cleanup(to_clean);
3031         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3032                 poll(to_clean);
3033                 schedule_timeout_uninterruptible(1);
3034         }
3035
3036         rv = ipmi_unregister_smi(to_clean->intf);
3037         if (rv) {
3038                 printk(KERN_ERR
3039                        "ipmi_si: Unable to unregister device: errno=%d\n",
3040                        rv);
3041         }
3042
3043         to_clean->handlers->cleanup(to_clean->si_sm);
3044
3045         kfree(to_clean->si_sm);
3046
3047         if (to_clean->addr_source_cleanup)
3048                 to_clean->addr_source_cleanup(to_clean);
3049         if (to_clean->io_cleanup)
3050                 to_clean->io_cleanup(to_clean);
3051
3052         if (to_clean->dev_registered)
3053                 platform_device_unregister(to_clean->pdev);
3054
3055         kfree(to_clean);
3056 }
3057
3058 static __exit void cleanup_ipmi_si(void)
3059 {
3060         struct smi_info *e, *tmp_e;
3061
3062         if (!initialized)
3063                 return;
3064
3065 #ifdef CONFIG_PCI
3066         pci_unregister_driver(&ipmi_pci_driver);
3067 #endif
3068
3069 #ifdef CONFIG_PPC_OF
3070         of_unregister_platform_driver(&ipmi_of_platform_driver);
3071 #endif
3072
3073         mutex_lock(&smi_infos_lock);
3074         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3075                 cleanup_one_si(e);
3076         mutex_unlock(&smi_infos_lock);
3077
3078         driver_unregister(&ipmi_driver);
3079 }
3080 module_exit(cleanup_ipmi_si);
3081
3082 MODULE_LICENSE("GPL");
3083 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3084 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");