]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/char/ipmi/ipmi_si_intf.c
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
[linux-2.6-omap-h63xx.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13  *
14  *  This program is free software; you can redistribute it and/or modify it
15  *  under the terms of the GNU General Public License as published by the
16  *  Free Software Foundation; either version 2 of the License, or (at your
17  *  option) any later version.
18  *
19  *
20  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  *  You should have received a copy of the GNU General Public License along
32  *  with this program; if not, write to the Free Software Foundation, Inc.,
33  *  675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35
36 /*
37  * This file holds the "policy" for the interface to the SMI state
38  * machine.  It does the configuration, handles timers and interrupts,
39  * and drives the real SMI state machine.
40  */
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi_smi.h>
61 #include <asm/io.h>
62 #include "ipmi_si_sm.h"
63 #include <linux/init.h>
64 #include <linux/dmi.h>
65 #include <linux/string.h>
66 #include <linux/ctype.h>
67
68 #ifdef CONFIG_PPC_OF
69 #include <asm/of_device.h>
70 #include <asm/of_platform.h>
71 #endif
72
73 #define PFX "ipmi_si: "
74
75 /* Measure times between events in the driver. */
76 #undef DEBUG_TIMING
77
78 /* Call every 10 ms. */
79 #define SI_TIMEOUT_TIME_USEC    10000
80 #define SI_USEC_PER_JIFFY       (1000000/HZ)
81 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
83                                        short timeout */
84
85 /* Bit for BMC global enables. */
86 #define IPMI_BMC_RCV_MSG_INTR     0x01
87 #define IPMI_BMC_EVT_MSG_INTR     0x02
88 #define IPMI_BMC_EVT_MSG_BUFF     0x04
89 #define IPMI_BMC_SYS_LOG          0x08
90
91 enum si_intf_state {
92         SI_NORMAL,
93         SI_GETTING_FLAGS,
94         SI_GETTING_EVENTS,
95         SI_CLEARING_FLAGS,
96         SI_CLEARING_FLAGS_THEN_SET_IRQ,
97         SI_GETTING_MESSAGES,
98         SI_ENABLE_INTERRUPTS1,
99         SI_ENABLE_INTERRUPTS2,
100         SI_DISABLE_INTERRUPTS1,
101         SI_DISABLE_INTERRUPTS2
102         /* FIXME - add watchdog stuff. */
103 };
104
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG             2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
109
110 enum si_type {
111     SI_KCS, SI_SMIC, SI_BT
112 };
113 static char *si_to_str[] = { "kcs", "smic", "bt" };
114
115 #define DEVICE_NAME "ipmi_si"
116
117 static struct device_driver ipmi_driver =
118 {
119         .name = DEVICE_NAME,
120         .bus = &platform_bus_type
121 };
122
123 struct smi_info
124 {
125         int                    intf_num;
126         ipmi_smi_t             intf;
127         struct si_sm_data      *si_sm;
128         struct si_sm_handlers  *handlers;
129         enum si_type           si_type;
130         spinlock_t             si_lock;
131         spinlock_t             msg_lock;
132         struct list_head       xmit_msgs;
133         struct list_head       hp_xmit_msgs;
134         struct ipmi_smi_msg    *curr_msg;
135         enum si_intf_state     si_state;
136
137         /* Used to handle the various types of I/O that can occur with
138            IPMI */
139         struct si_sm_io io;
140         int (*io_setup)(struct smi_info *info);
141         void (*io_cleanup)(struct smi_info *info);
142         int (*irq_setup)(struct smi_info *info);
143         void (*irq_cleanup)(struct smi_info *info);
144         unsigned int io_size;
145         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146         void (*addr_source_cleanup)(struct smi_info *info);
147         void *addr_source_data;
148
149         /* Per-OEM handler, called from handle_flags().
150            Returns 1 when handle_flags() needs to be re-run
151            or 0 indicating it set si_state itself.
152         */
153         int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
155         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156            is set to hold the flags until we are done handling everything
157            from the flags. */
158 #define RECEIVE_MSG_AVAIL       0x01
159 #define EVENT_MSG_BUFFER_FULL   0x02
160 #define WDT_PRE_TIMEOUT_INT     0x08
161 #define OEM0_DATA_AVAIL     0x20
162 #define OEM1_DATA_AVAIL     0x40
163 #define OEM2_DATA_AVAIL     0x80
164 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
165                              OEM1_DATA_AVAIL | \
166                              OEM2_DATA_AVAIL)
167         unsigned char       msg_flags;
168
169         /* If set to true, this will request events the next time the
170            state machine is idle. */
171         atomic_t            req_events;
172
173         /* If true, run the state machine to completion on every send
174            call.  Generally used after a panic to make sure stuff goes
175            out. */
176         int                 run_to_completion;
177
178         /* The I/O port of an SI interface. */
179         int                 port;
180
181         /* The space between start addresses of the two ports.  For
182            instance, if the first port is 0xca2 and the spacing is 4, then
183            the second port is 0xca6. */
184         unsigned int        spacing;
185
186         /* zero if no irq; */
187         int                 irq;
188
189         /* The timer for this si. */
190         struct timer_list   si_timer;
191
192         /* The time (in jiffies) the last timeout occurred at. */
193         unsigned long       last_timeout_jiffies;
194
195         /* Used to gracefully stop the timer without race conditions. */
196         atomic_t            stop_operation;
197
198         /* The driver will disable interrupts when it gets into a
199            situation where it cannot handle messages due to lack of
200            memory.  Once that situation clears up, it will re-enable
201            interrupts. */
202         int interrupt_disabled;
203
204         /* From the get device id response... */
205         struct ipmi_device_id device_id;
206
207         /* Driver model stuff. */
208         struct device *dev;
209         struct platform_device *pdev;
210
211          /* True if we allocated the device, false if it came from
212           * someplace else (like PCI). */
213         int dev_registered;
214
215         /* Slave address, could be reported from DMI. */
216         unsigned char slave_addr;
217
218         /* Counters and things for the proc filesystem. */
219         spinlock_t count_lock;
220         unsigned long short_timeouts;
221         unsigned long long_timeouts;
222         unsigned long timeout_restarts;
223         unsigned long idles;
224         unsigned long interrupts;
225         unsigned long attentions;
226         unsigned long flag_fetches;
227         unsigned long hosed_count;
228         unsigned long complete_transactions;
229         unsigned long events;
230         unsigned long watchdog_pretimeouts;
231         unsigned long incoming_messages;
232
233         struct task_struct *thread;
234
235         struct list_head link;
236 };
237
238 #define SI_MAX_PARMS 4
239
240 static int force_kipmid[SI_MAX_PARMS];
241 static int num_force_kipmid;
242
243 static int unload_when_empty = 1;
244
245 static int try_smi_init(struct smi_info *smi);
246 static void cleanup_one_si(struct smi_info *to_clean);
247
248 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
249 static int register_xaction_notifier(struct notifier_block * nb)
250 {
251         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
252 }
253
254 static void deliver_recv_msg(struct smi_info *smi_info,
255                              struct ipmi_smi_msg *msg)
256 {
257         /* Deliver the message to the upper layer with the lock
258            released. */
259         spin_unlock(&(smi_info->si_lock));
260         ipmi_smi_msg_received(smi_info->intf, msg);
261         spin_lock(&(smi_info->si_lock));
262 }
263
264 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
265 {
266         struct ipmi_smi_msg *msg = smi_info->curr_msg;
267
268         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
269                 cCode = IPMI_ERR_UNSPECIFIED;
270         /* else use it as is */
271
272         /* Make it a reponse */
273         msg->rsp[0] = msg->data[0] | 4;
274         msg->rsp[1] = msg->data[1];
275         msg->rsp[2] = cCode;
276         msg->rsp_size = 3;
277
278         smi_info->curr_msg = NULL;
279         deliver_recv_msg(smi_info, msg);
280 }
281
282 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
283 {
284         int              rv;
285         struct list_head *entry = NULL;
286 #ifdef DEBUG_TIMING
287         struct timeval t;
288 #endif
289
290         /* No need to save flags, we aleady have interrupts off and we
291            already hold the SMI lock. */
292         spin_lock(&(smi_info->msg_lock));
293
294         /* Pick the high priority queue first. */
295         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
296                 entry = smi_info->hp_xmit_msgs.next;
297         } else if (!list_empty(&(smi_info->xmit_msgs))) {
298                 entry = smi_info->xmit_msgs.next;
299         }
300
301         if (!entry) {
302                 smi_info->curr_msg = NULL;
303                 rv = SI_SM_IDLE;
304         } else {
305                 int err;
306
307                 list_del(entry);
308                 smi_info->curr_msg = list_entry(entry,
309                                                 struct ipmi_smi_msg,
310                                                 link);
311 #ifdef DEBUG_TIMING
312                 do_gettimeofday(&t);
313                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
314 #endif
315                 err = atomic_notifier_call_chain(&xaction_notifier_list,
316                                 0, smi_info);
317                 if (err & NOTIFY_STOP_MASK) {
318                         rv = SI_SM_CALL_WITHOUT_DELAY;
319                         goto out;
320                 }
321                 err = smi_info->handlers->start_transaction(
322                         smi_info->si_sm,
323                         smi_info->curr_msg->data,
324                         smi_info->curr_msg->data_size);
325                 if (err) {
326                         return_hosed_msg(smi_info, err);
327                 }
328
329                 rv = SI_SM_CALL_WITHOUT_DELAY;
330         }
331         out:
332         spin_unlock(&(smi_info->msg_lock));
333
334         return rv;
335 }
336
337 static void start_enable_irq(struct smi_info *smi_info)
338 {
339         unsigned char msg[2];
340
341         /* If we are enabling interrupts, we have to tell the
342            BMC to use them. */
343         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
344         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
345
346         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
347         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
348 }
349
350 static void start_disable_irq(struct smi_info *smi_info)
351 {
352         unsigned char msg[2];
353
354         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
355         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
356
357         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
358         smi_info->si_state = SI_DISABLE_INTERRUPTS1;
359 }
360
361 static void start_clear_flags(struct smi_info *smi_info)
362 {
363         unsigned char msg[3];
364
365         /* Make sure the watchdog pre-timeout flag is not set at startup. */
366         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
367         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
368         msg[2] = WDT_PRE_TIMEOUT_INT;
369
370         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
371         smi_info->si_state = SI_CLEARING_FLAGS;
372 }
373
374 /* When we have a situtaion where we run out of memory and cannot
375    allocate messages, we just leave them in the BMC and run the system
376    polled until we can allocate some memory.  Once we have some
377    memory, we will re-enable the interrupt. */
378 static inline void disable_si_irq(struct smi_info *smi_info)
379 {
380         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
381                 start_disable_irq(smi_info);
382                 smi_info->interrupt_disabled = 1;
383         }
384 }
385
386 static inline void enable_si_irq(struct smi_info *smi_info)
387 {
388         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
389                 start_enable_irq(smi_info);
390                 smi_info->interrupt_disabled = 0;
391         }
392 }
393
394 static void handle_flags(struct smi_info *smi_info)
395 {
396  retry:
397         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
398                 /* Watchdog pre-timeout */
399                 spin_lock(&smi_info->count_lock);
400                 smi_info->watchdog_pretimeouts++;
401                 spin_unlock(&smi_info->count_lock);
402
403                 start_clear_flags(smi_info);
404                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
405                 spin_unlock(&(smi_info->si_lock));
406                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
407                 spin_lock(&(smi_info->si_lock));
408         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
409                 /* Messages available. */
410                 smi_info->curr_msg = ipmi_alloc_smi_msg();
411                 if (!smi_info->curr_msg) {
412                         disable_si_irq(smi_info);
413                         smi_info->si_state = SI_NORMAL;
414                         return;
415                 }
416                 enable_si_irq(smi_info);
417
418                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
419                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
420                 smi_info->curr_msg->data_size = 2;
421
422                 smi_info->handlers->start_transaction(
423                         smi_info->si_sm,
424                         smi_info->curr_msg->data,
425                         smi_info->curr_msg->data_size);
426                 smi_info->si_state = SI_GETTING_MESSAGES;
427         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
428                 /* Events available. */
429                 smi_info->curr_msg = ipmi_alloc_smi_msg();
430                 if (!smi_info->curr_msg) {
431                         disable_si_irq(smi_info);
432                         smi_info->si_state = SI_NORMAL;
433                         return;
434                 }
435                 enable_si_irq(smi_info);
436
437                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
438                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
439                 smi_info->curr_msg->data_size = 2;
440
441                 smi_info->handlers->start_transaction(
442                         smi_info->si_sm,
443                         smi_info->curr_msg->data,
444                         smi_info->curr_msg->data_size);
445                 smi_info->si_state = SI_GETTING_EVENTS;
446         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
447                    smi_info->oem_data_avail_handler) {
448                 if (smi_info->oem_data_avail_handler(smi_info))
449                         goto retry;
450         } else {
451                 smi_info->si_state = SI_NORMAL;
452         }
453 }
454
455 static void handle_transaction_done(struct smi_info *smi_info)
456 {
457         struct ipmi_smi_msg *msg;
458 #ifdef DEBUG_TIMING
459         struct timeval t;
460
461         do_gettimeofday(&t);
462         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
463 #endif
464         switch (smi_info->si_state) {
465         case SI_NORMAL:
466                 if (!smi_info->curr_msg)
467                         break;
468
469                 smi_info->curr_msg->rsp_size
470                         = smi_info->handlers->get_result(
471                                 smi_info->si_sm,
472                                 smi_info->curr_msg->rsp,
473                                 IPMI_MAX_MSG_LENGTH);
474
475                 /* Do this here becase deliver_recv_msg() releases the
476                    lock, and a new message can be put in during the
477                    time the lock is released. */
478                 msg = smi_info->curr_msg;
479                 smi_info->curr_msg = NULL;
480                 deliver_recv_msg(smi_info, msg);
481                 break;
482
483         case SI_GETTING_FLAGS:
484         {
485                 unsigned char msg[4];
486                 unsigned int  len;
487
488                 /* We got the flags from the SMI, now handle them. */
489                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
490                 if (msg[2] != 0) {
491                         /* Error fetching flags, just give up for
492                            now. */
493                         smi_info->si_state = SI_NORMAL;
494                 } else if (len < 4) {
495                         /* Hmm, no flags.  That's technically illegal, but
496                            don't use uninitialized data. */
497                         smi_info->si_state = SI_NORMAL;
498                 } else {
499                         smi_info->msg_flags = msg[3];
500                         handle_flags(smi_info);
501                 }
502                 break;
503         }
504
505         case SI_CLEARING_FLAGS:
506         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
507         {
508                 unsigned char msg[3];
509
510                 /* We cleared the flags. */
511                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
512                 if (msg[2] != 0) {
513                         /* Error clearing flags */
514                         printk(KERN_WARNING
515                                "ipmi_si: Error clearing flags: %2.2x\n",
516                                msg[2]);
517                 }
518                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
519                         start_enable_irq(smi_info);
520                 else
521                         smi_info->si_state = SI_NORMAL;
522                 break;
523         }
524
525         case SI_GETTING_EVENTS:
526         {
527                 smi_info->curr_msg->rsp_size
528                         = smi_info->handlers->get_result(
529                                 smi_info->si_sm,
530                                 smi_info->curr_msg->rsp,
531                                 IPMI_MAX_MSG_LENGTH);
532
533                 /* Do this here becase deliver_recv_msg() releases the
534                    lock, and a new message can be put in during the
535                    time the lock is released. */
536                 msg = smi_info->curr_msg;
537                 smi_info->curr_msg = NULL;
538                 if (msg->rsp[2] != 0) {
539                         /* Error getting event, probably done. */
540                         msg->done(msg);
541
542                         /* Take off the event flag. */
543                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
544                         handle_flags(smi_info);
545                 } else {
546                         spin_lock(&smi_info->count_lock);
547                         smi_info->events++;
548                         spin_unlock(&smi_info->count_lock);
549
550                         /* Do this before we deliver the message
551                            because delivering the message releases the
552                            lock and something else can mess with the
553                            state. */
554                         handle_flags(smi_info);
555
556                         deliver_recv_msg(smi_info, msg);
557                 }
558                 break;
559         }
560
561         case SI_GETTING_MESSAGES:
562         {
563                 smi_info->curr_msg->rsp_size
564                         = smi_info->handlers->get_result(
565                                 smi_info->si_sm,
566                                 smi_info->curr_msg->rsp,
567                                 IPMI_MAX_MSG_LENGTH);
568
569                 /* Do this here becase deliver_recv_msg() releases the
570                    lock, and a new message can be put in during the
571                    time the lock is released. */
572                 msg = smi_info->curr_msg;
573                 smi_info->curr_msg = NULL;
574                 if (msg->rsp[2] != 0) {
575                         /* Error getting event, probably done. */
576                         msg->done(msg);
577
578                         /* Take off the msg flag. */
579                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
580                         handle_flags(smi_info);
581                 } else {
582                         spin_lock(&smi_info->count_lock);
583                         smi_info->incoming_messages++;
584                         spin_unlock(&smi_info->count_lock);
585
586                         /* Do this before we deliver the message
587                            because delivering the message releases the
588                            lock and something else can mess with the
589                            state. */
590                         handle_flags(smi_info);
591
592                         deliver_recv_msg(smi_info, msg);
593                 }
594                 break;
595         }
596
597         case SI_ENABLE_INTERRUPTS1:
598         {
599                 unsigned char msg[4];
600
601                 /* We got the flags from the SMI, now handle them. */
602                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
603                 if (msg[2] != 0) {
604                         printk(KERN_WARNING
605                                "ipmi_si: Could not enable interrupts"
606                                ", failed get, using polled mode.\n");
607                         smi_info->si_state = SI_NORMAL;
608                 } else {
609                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
610                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
611                         msg[2] = (msg[3] |
612                                   IPMI_BMC_RCV_MSG_INTR |
613                                   IPMI_BMC_EVT_MSG_INTR);
614                         smi_info->handlers->start_transaction(
615                                 smi_info->si_sm, msg, 3);
616                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
617                 }
618                 break;
619         }
620
621         case SI_ENABLE_INTERRUPTS2:
622         {
623                 unsigned char msg[4];
624
625                 /* We got the flags from the SMI, now handle them. */
626                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
627                 if (msg[2] != 0) {
628                         printk(KERN_WARNING
629                                "ipmi_si: Could not enable interrupts"
630                                ", failed set, using polled mode.\n");
631                 }
632                 smi_info->si_state = SI_NORMAL;
633                 break;
634         }
635
636         case SI_DISABLE_INTERRUPTS1:
637         {
638                 unsigned char msg[4];
639
640                 /* We got the flags from the SMI, now handle them. */
641                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
642                 if (msg[2] != 0) {
643                         printk(KERN_WARNING
644                                "ipmi_si: Could not disable interrupts"
645                                ", failed get.\n");
646                         smi_info->si_state = SI_NORMAL;
647                 } else {
648                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
649                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
650                         msg[2] = (msg[3] &
651                                   ~(IPMI_BMC_RCV_MSG_INTR |
652                                     IPMI_BMC_EVT_MSG_INTR));
653                         smi_info->handlers->start_transaction(
654                                 smi_info->si_sm, msg, 3);
655                         smi_info->si_state = SI_DISABLE_INTERRUPTS2;
656                 }
657                 break;
658         }
659
660         case SI_DISABLE_INTERRUPTS2:
661         {
662                 unsigned char msg[4];
663
664                 /* We got the flags from the SMI, now handle them. */
665                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
666                 if (msg[2] != 0) {
667                         printk(KERN_WARNING
668                                "ipmi_si: Could not disable interrupts"
669                                ", failed set.\n");
670                 }
671                 smi_info->si_state = SI_NORMAL;
672                 break;
673         }
674         }
675 }
676
677 /* Called on timeouts and events.  Timeouts should pass the elapsed
678    time, interrupts should pass in zero.  Must be called with
679    si_lock held and interrupts disabled. */
680 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
681                                            int time)
682 {
683         enum si_sm_result si_sm_result;
684
685  restart:
686         /* There used to be a loop here that waited a little while
687            (around 25us) before giving up.  That turned out to be
688            pointless, the minimum delays I was seeing were in the 300us
689            range, which is far too long to wait in an interrupt.  So
690            we just run until the state machine tells us something
691            happened or it needs a delay. */
692         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
693         time = 0;
694         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
695         {
696                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
697         }
698
699         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
700         {
701                 spin_lock(&smi_info->count_lock);
702                 smi_info->complete_transactions++;
703                 spin_unlock(&smi_info->count_lock);
704
705                 handle_transaction_done(smi_info);
706                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
707         }
708         else if (si_sm_result == SI_SM_HOSED)
709         {
710                 spin_lock(&smi_info->count_lock);
711                 smi_info->hosed_count++;
712                 spin_unlock(&smi_info->count_lock);
713
714                 /* Do the before return_hosed_msg, because that
715                    releases the lock. */
716                 smi_info->si_state = SI_NORMAL;
717                 if (smi_info->curr_msg != NULL) {
718                         /* If we were handling a user message, format
719                            a response to send to the upper layer to
720                            tell it about the error. */
721                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
722                 }
723                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
724         }
725
726         /* We prefer handling attn over new messages. */
727         if (si_sm_result == SI_SM_ATTN)
728         {
729                 unsigned char msg[2];
730
731                 spin_lock(&smi_info->count_lock);
732                 smi_info->attentions++;
733                 spin_unlock(&smi_info->count_lock);
734
735                 /* Got a attn, send down a get message flags to see
736                    what's causing it.  It would be better to handle
737                    this in the upper layer, but due to the way
738                    interrupts work with the SMI, that's not really
739                    possible. */
740                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
741                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
742
743                 smi_info->handlers->start_transaction(
744                         smi_info->si_sm, msg, 2);
745                 smi_info->si_state = SI_GETTING_FLAGS;
746                 goto restart;
747         }
748
749         /* If we are currently idle, try to start the next message. */
750         if (si_sm_result == SI_SM_IDLE) {
751                 spin_lock(&smi_info->count_lock);
752                 smi_info->idles++;
753                 spin_unlock(&smi_info->count_lock);
754
755                 si_sm_result = start_next_msg(smi_info);
756                 if (si_sm_result != SI_SM_IDLE)
757                         goto restart;
758         }
759
760         if ((si_sm_result == SI_SM_IDLE)
761             && (atomic_read(&smi_info->req_events)))
762         {
763                 /* We are idle and the upper layer requested that I fetch
764                    events, so do so. */
765                 atomic_set(&smi_info->req_events, 0);
766
767                 smi_info->curr_msg = ipmi_alloc_smi_msg();
768                 if (!smi_info->curr_msg)
769                         goto out;
770
771                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
772                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
773                 smi_info->curr_msg->data_size = 2;
774
775                 smi_info->handlers->start_transaction(
776                         smi_info->si_sm,
777                         smi_info->curr_msg->data,
778                         smi_info->curr_msg->data_size);
779                 smi_info->si_state = SI_GETTING_EVENTS;
780                 goto restart;
781         }
782  out:
783         return si_sm_result;
784 }
785
786 static void sender(void                *send_info,
787                    struct ipmi_smi_msg *msg,
788                    int                 priority)
789 {
790         struct smi_info   *smi_info = send_info;
791         enum si_sm_result result;
792         unsigned long     flags;
793 #ifdef DEBUG_TIMING
794         struct timeval    t;
795 #endif
796
797         if (atomic_read(&smi_info->stop_operation)) {
798                 msg->rsp[0] = msg->data[0] | 4;
799                 msg->rsp[1] = msg->data[1];
800                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
801                 msg->rsp_size = 3;
802                 deliver_recv_msg(smi_info, msg);
803                 return;
804         }
805
806         spin_lock_irqsave(&(smi_info->msg_lock), flags);
807 #ifdef DEBUG_TIMING
808         do_gettimeofday(&t);
809         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
810 #endif
811
812         if (smi_info->run_to_completion) {
813                 /* If we are running to completion, then throw it in
814                    the list and run transactions until everything is
815                    clear.  Priority doesn't matter here. */
816                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
817
818                 /* We have to release the msg lock and claim the smi
819                    lock in this case, because of race conditions. */
820                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
821
822                 spin_lock_irqsave(&(smi_info->si_lock), flags);
823                 result = smi_event_handler(smi_info, 0);
824                 while (result != SI_SM_IDLE) {
825                         udelay(SI_SHORT_TIMEOUT_USEC);
826                         result = smi_event_handler(smi_info,
827                                                    SI_SHORT_TIMEOUT_USEC);
828                 }
829                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
830                 return;
831         } else {
832                 if (priority > 0) {
833                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
834                 } else {
835                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
836                 }
837         }
838         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
839
840         spin_lock_irqsave(&(smi_info->si_lock), flags);
841         if ((smi_info->si_state == SI_NORMAL)
842             && (smi_info->curr_msg == NULL))
843         {
844                 start_next_msg(smi_info);
845         }
846         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
847 }
848
849 static void set_run_to_completion(void *send_info, int i_run_to_completion)
850 {
851         struct smi_info   *smi_info = send_info;
852         enum si_sm_result result;
853         unsigned long     flags;
854
855         spin_lock_irqsave(&(smi_info->si_lock), flags);
856
857         smi_info->run_to_completion = i_run_to_completion;
858         if (i_run_to_completion) {
859                 result = smi_event_handler(smi_info, 0);
860                 while (result != SI_SM_IDLE) {
861                         udelay(SI_SHORT_TIMEOUT_USEC);
862                         result = smi_event_handler(smi_info,
863                                                    SI_SHORT_TIMEOUT_USEC);
864                 }
865         }
866
867         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
868 }
869
870 static int ipmi_thread(void *data)
871 {
872         struct smi_info *smi_info = data;
873         unsigned long flags;
874         enum si_sm_result smi_result;
875
876         set_user_nice(current, 19);
877         while (!kthread_should_stop()) {
878                 spin_lock_irqsave(&(smi_info->si_lock), flags);
879                 smi_result = smi_event_handler(smi_info, 0);
880                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
881                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
882                         /* do nothing */
883                 }
884                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
885                         schedule();
886                 else
887                         schedule_timeout_interruptible(1);
888         }
889         return 0;
890 }
891
892
893 static void poll(void *send_info)
894 {
895         struct smi_info *smi_info = send_info;
896         unsigned long flags;
897
898         /*
899          * Make sure there is some delay in the poll loop so we can
900          * drive time forward and timeout things.
901          */
902         udelay(10);
903         spin_lock_irqsave(&smi_info->si_lock, flags);
904         smi_event_handler(smi_info, 10);
905         spin_unlock_irqrestore(&smi_info->si_lock, flags);
906 }
907
908 static void request_events(void *send_info)
909 {
910         struct smi_info *smi_info = send_info;
911
912         if (atomic_read(&smi_info->stop_operation))
913                 return;
914
915         atomic_set(&smi_info->req_events, 1);
916 }
917
918 static int initialized;
919
920 static void smi_timeout(unsigned long data)
921 {
922         struct smi_info   *smi_info = (struct smi_info *) data;
923         enum si_sm_result smi_result;
924         unsigned long     flags;
925         unsigned long     jiffies_now;
926         long              time_diff;
927 #ifdef DEBUG_TIMING
928         struct timeval    t;
929 #endif
930
931         spin_lock_irqsave(&(smi_info->si_lock), flags);
932 #ifdef DEBUG_TIMING
933         do_gettimeofday(&t);
934         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
935 #endif
936         jiffies_now = jiffies;
937         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
938                      * SI_USEC_PER_JIFFY);
939         smi_result = smi_event_handler(smi_info, time_diff);
940
941         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
942
943         smi_info->last_timeout_jiffies = jiffies_now;
944
945         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
946                 /* Running with interrupts, only do long timeouts. */
947                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
948                 spin_lock_irqsave(&smi_info->count_lock, flags);
949                 smi_info->long_timeouts++;
950                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
951                 goto do_add_timer;
952         }
953
954         /* If the state machine asks for a short delay, then shorten
955            the timer timeout. */
956         if (smi_result == SI_SM_CALL_WITH_DELAY) {
957                 spin_lock_irqsave(&smi_info->count_lock, flags);
958                 smi_info->short_timeouts++;
959                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
960                 smi_info->si_timer.expires = jiffies + 1;
961         } else {
962                 spin_lock_irqsave(&smi_info->count_lock, flags);
963                 smi_info->long_timeouts++;
964                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
965                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
966         }
967
968  do_add_timer:
969         add_timer(&(smi_info->si_timer));
970 }
971
972 static irqreturn_t si_irq_handler(int irq, void *data)
973 {
974         struct smi_info *smi_info = data;
975         unsigned long   flags;
976 #ifdef DEBUG_TIMING
977         struct timeval  t;
978 #endif
979
980         spin_lock_irqsave(&(smi_info->si_lock), flags);
981
982         spin_lock(&smi_info->count_lock);
983         smi_info->interrupts++;
984         spin_unlock(&smi_info->count_lock);
985
986 #ifdef DEBUG_TIMING
987         do_gettimeofday(&t);
988         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
989 #endif
990         smi_event_handler(smi_info, 0);
991         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
992         return IRQ_HANDLED;
993 }
994
995 static irqreturn_t si_bt_irq_handler(int irq, void *data)
996 {
997         struct smi_info *smi_info = data;
998         /* We need to clear the IRQ flag for the BT interface. */
999         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1000                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1001                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1002         return si_irq_handler(irq, data);
1003 }
1004
1005 static int smi_start_processing(void       *send_info,
1006                                 ipmi_smi_t intf)
1007 {
1008         struct smi_info *new_smi = send_info;
1009         int             enable = 0;
1010
1011         new_smi->intf = intf;
1012
1013         /* Try to claim any interrupts. */
1014         if (new_smi->irq_setup)
1015                 new_smi->irq_setup(new_smi);
1016
1017         /* Set up the timer that drives the interface. */
1018         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1019         new_smi->last_timeout_jiffies = jiffies;
1020         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1021
1022         /*
1023          * Check if the user forcefully enabled the daemon.
1024          */
1025         if (new_smi->intf_num < num_force_kipmid)
1026                 enable = force_kipmid[new_smi->intf_num];
1027         /*
1028          * The BT interface is efficient enough to not need a thread,
1029          * and there is no need for a thread if we have interrupts.
1030          */
1031         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1032                 enable = 1;
1033
1034         if (enable) {
1035                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1036                                               "kipmi%d", new_smi->intf_num);
1037                 if (IS_ERR(new_smi->thread)) {
1038                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
1039                                " kernel thread due to error %ld, only using"
1040                                " timers to drive the interface\n",
1041                                PTR_ERR(new_smi->thread));
1042                         new_smi->thread = NULL;
1043                 }
1044         }
1045
1046         return 0;
1047 }
1048
1049 static void set_maintenance_mode(void *send_info, int enable)
1050 {
1051         struct smi_info   *smi_info = send_info;
1052
1053         if (!enable)
1054                 atomic_set(&smi_info->req_events, 0);
1055 }
1056
1057 static struct ipmi_smi_handlers handlers =
1058 {
1059         .owner                  = THIS_MODULE,
1060         .start_processing       = smi_start_processing,
1061         .sender                 = sender,
1062         .request_events         = request_events,
1063         .set_maintenance_mode   = set_maintenance_mode,
1064         .set_run_to_completion  = set_run_to_completion,
1065         .poll                   = poll,
1066 };
1067
1068 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1069    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
1070
1071 static LIST_HEAD(smi_infos);
1072 static DEFINE_MUTEX(smi_infos_lock);
1073 static int smi_num; /* Used to sequence the SMIs */
1074
1075 #define DEFAULT_REGSPACING      1
1076 #define DEFAULT_REGSIZE         1
1077
1078 static int           si_trydefaults = 1;
1079 static char          *si_type[SI_MAX_PARMS];
1080 #define MAX_SI_TYPE_STR 30
1081 static char          si_type_str[MAX_SI_TYPE_STR];
1082 static unsigned long addrs[SI_MAX_PARMS];
1083 static unsigned int num_addrs;
1084 static unsigned int  ports[SI_MAX_PARMS];
1085 static unsigned int num_ports;
1086 static int           irqs[SI_MAX_PARMS];
1087 static unsigned int num_irqs;
1088 static int           regspacings[SI_MAX_PARMS];
1089 static unsigned int num_regspacings;
1090 static int           regsizes[SI_MAX_PARMS];
1091 static unsigned int num_regsizes;
1092 static int           regshifts[SI_MAX_PARMS];
1093 static unsigned int num_regshifts;
1094 static int slave_addrs[SI_MAX_PARMS];
1095 static unsigned int num_slave_addrs;
1096
1097 #define IPMI_IO_ADDR_SPACE  0
1098 #define IPMI_MEM_ADDR_SPACE 1
1099 static char *addr_space_to_str[] = { "i/o", "mem" };
1100
1101 static int hotmod_handler(const char *val, struct kernel_param *kp);
1102
1103 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1104 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1105                  " Documentation/IPMI.txt in the kernel sources for the"
1106                  " gory details.");
1107
1108 module_param_named(trydefaults, si_trydefaults, bool, 0);
1109 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1110                  " default scan of the KCS and SMIC interface at the standard"
1111                  " address");
1112 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1113 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1114                  " interface separated by commas.  The types are 'kcs',"
1115                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1116                  " the first interface to kcs and the second to bt");
1117 module_param_array(addrs, ulong, &num_addrs, 0);
1118 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1119                  " addresses separated by commas.  Only use if an interface"
1120                  " is in memory.  Otherwise, set it to zero or leave"
1121                  " it blank.");
1122 module_param_array(ports, uint, &num_ports, 0);
1123 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1124                  " addresses separated by commas.  Only use if an interface"
1125                  " is a port.  Otherwise, set it to zero or leave"
1126                  " it blank.");
1127 module_param_array(irqs, int, &num_irqs, 0);
1128 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1129                  " addresses separated by commas.  Only use if an interface"
1130                  " has an interrupt.  Otherwise, set it to zero or leave"
1131                  " it blank.");
1132 module_param_array(regspacings, int, &num_regspacings, 0);
1133 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1134                  " and each successive register used by the interface.  For"
1135                  " instance, if the start address is 0xca2 and the spacing"
1136                  " is 2, then the second address is at 0xca4.  Defaults"
1137                  " to 1.");
1138 module_param_array(regsizes, int, &num_regsizes, 0);
1139 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1140                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1141                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1142                  " the 8-bit IPMI register has to be read from a larger"
1143                  " register.");
1144 module_param_array(regshifts, int, &num_regshifts, 0);
1145 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1146                  " IPMI register, in bits.  For instance, if the data"
1147                  " is read from a 32-bit word and the IPMI data is in"
1148                  " bit 8-15, then the shift would be 8");
1149 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1150 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1151                  " the controller.  Normally this is 0x20, but can be"
1152                  " overridden by this parm.  This is an array indexed"
1153                  " by interface number.");
1154 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1155 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1156                  " disabled(0).  Normally the IPMI driver auto-detects"
1157                  " this, but the value may be overridden by this parm.");
1158 module_param(unload_when_empty, int, 0);
1159 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1160                  " specified or found, default is 1.  Setting to 0"
1161                  " is useful for hot add of devices using hotmod.");
1162
1163
1164 static void std_irq_cleanup(struct smi_info *info)
1165 {
1166         if (info->si_type == SI_BT)
1167                 /* Disable the interrupt in the BT interface. */
1168                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1169         free_irq(info->irq, info);
1170 }
1171
1172 static int std_irq_setup(struct smi_info *info)
1173 {
1174         int rv;
1175
1176         if (!info->irq)
1177                 return 0;
1178
1179         if (info->si_type == SI_BT) {
1180                 rv = request_irq(info->irq,
1181                                  si_bt_irq_handler,
1182                                  IRQF_SHARED | IRQF_DISABLED,
1183                                  DEVICE_NAME,
1184                                  info);
1185                 if (!rv)
1186                         /* Enable the interrupt in the BT interface. */
1187                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1188                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1189         } else
1190                 rv = request_irq(info->irq,
1191                                  si_irq_handler,
1192                                  IRQF_SHARED | IRQF_DISABLED,
1193                                  DEVICE_NAME,
1194                                  info);
1195         if (rv) {
1196                 printk(KERN_WARNING
1197                        "ipmi_si: %s unable to claim interrupt %d,"
1198                        " running polled\n",
1199                        DEVICE_NAME, info->irq);
1200                 info->irq = 0;
1201         } else {
1202                 info->irq_cleanup = std_irq_cleanup;
1203                 printk("  Using irq %d\n", info->irq);
1204         }
1205
1206         return rv;
1207 }
1208
1209 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1210 {
1211         unsigned int addr = io->addr_data;
1212
1213         return inb(addr + (offset * io->regspacing));
1214 }
1215
1216 static void port_outb(struct si_sm_io *io, unsigned int offset,
1217                       unsigned char b)
1218 {
1219         unsigned int addr = io->addr_data;
1220
1221         outb(b, addr + (offset * io->regspacing));
1222 }
1223
1224 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1225 {
1226         unsigned int addr = io->addr_data;
1227
1228         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1229 }
1230
1231 static void port_outw(struct si_sm_io *io, unsigned int offset,
1232                       unsigned char b)
1233 {
1234         unsigned int addr = io->addr_data;
1235
1236         outw(b << io->regshift, addr + (offset * io->regspacing));
1237 }
1238
1239 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1240 {
1241         unsigned int addr = io->addr_data;
1242
1243         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1244 }
1245
1246 static void port_outl(struct si_sm_io *io, unsigned int offset,
1247                       unsigned char b)
1248 {
1249         unsigned int addr = io->addr_data;
1250
1251         outl(b << io->regshift, addr+(offset * io->regspacing));
1252 }
1253
1254 static void port_cleanup(struct smi_info *info)
1255 {
1256         unsigned int addr = info->io.addr_data;
1257         int          idx;
1258
1259         if (addr) {
1260                 for (idx = 0; idx < info->io_size; idx++) {
1261                         release_region(addr + idx * info->io.regspacing,
1262                                        info->io.regsize);
1263                 }
1264         }
1265 }
1266
1267 static int port_setup(struct smi_info *info)
1268 {
1269         unsigned int addr = info->io.addr_data;
1270         int          idx;
1271
1272         if (!addr)
1273                 return -ENODEV;
1274
1275         info->io_cleanup = port_cleanup;
1276
1277         /* Figure out the actual inb/inw/inl/etc routine to use based
1278            upon the register size. */
1279         switch (info->io.regsize) {
1280         case 1:
1281                 info->io.inputb = port_inb;
1282                 info->io.outputb = port_outb;
1283                 break;
1284         case 2:
1285                 info->io.inputb = port_inw;
1286                 info->io.outputb = port_outw;
1287                 break;
1288         case 4:
1289                 info->io.inputb = port_inl;
1290                 info->io.outputb = port_outl;
1291                 break;
1292         default:
1293                 printk("ipmi_si: Invalid register size: %d\n",
1294                        info->io.regsize);
1295                 return -EINVAL;
1296         }
1297
1298         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1299          * tables.  This causes problems when trying to register the
1300          * entire I/O region.  Therefore we must register each I/O
1301          * port separately.
1302          */
1303         for (idx = 0; idx < info->io_size; idx++) {
1304                 if (request_region(addr + idx * info->io.regspacing,
1305                                    info->io.regsize, DEVICE_NAME) == NULL) {
1306                         /* Undo allocations */
1307                         while (idx--) {
1308                                 release_region(addr + idx * info->io.regspacing,
1309                                                info->io.regsize);
1310                         }
1311                         return -EIO;
1312                 }
1313         }
1314         return 0;
1315 }
1316
1317 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1318 {
1319         return readb((io->addr)+(offset * io->regspacing));
1320 }
1321
1322 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1323                      unsigned char b)
1324 {
1325         writeb(b, (io->addr)+(offset * io->regspacing));
1326 }
1327
1328 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1329 {
1330         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1331                 & 0xff;
1332 }
1333
1334 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1335                      unsigned char b)
1336 {
1337         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1338 }
1339
1340 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1341 {
1342         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1343                 & 0xff;
1344 }
1345
1346 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1347                      unsigned char b)
1348 {
1349         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1350 }
1351
1352 #ifdef readq
1353 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1354 {
1355         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1356                 & 0xff;
1357 }
1358
1359 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1360                      unsigned char b)
1361 {
1362         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1363 }
1364 #endif
1365
1366 static void mem_cleanup(struct smi_info *info)
1367 {
1368         unsigned long addr = info->io.addr_data;
1369         int           mapsize;
1370
1371         if (info->io.addr) {
1372                 iounmap(info->io.addr);
1373
1374                 mapsize = ((info->io_size * info->io.regspacing)
1375                            - (info->io.regspacing - info->io.regsize));
1376
1377                 release_mem_region(addr, mapsize);
1378         }
1379 }
1380
1381 static int mem_setup(struct smi_info *info)
1382 {
1383         unsigned long addr = info->io.addr_data;
1384         int           mapsize;
1385
1386         if (!addr)
1387                 return -ENODEV;
1388
1389         info->io_cleanup = mem_cleanup;
1390
1391         /* Figure out the actual readb/readw/readl/etc routine to use based
1392            upon the register size. */
1393         switch (info->io.regsize) {
1394         case 1:
1395                 info->io.inputb = intf_mem_inb;
1396                 info->io.outputb = intf_mem_outb;
1397                 break;
1398         case 2:
1399                 info->io.inputb = intf_mem_inw;
1400                 info->io.outputb = intf_mem_outw;
1401                 break;
1402         case 4:
1403                 info->io.inputb = intf_mem_inl;
1404                 info->io.outputb = intf_mem_outl;
1405                 break;
1406 #ifdef readq
1407         case 8:
1408                 info->io.inputb = mem_inq;
1409                 info->io.outputb = mem_outq;
1410                 break;
1411 #endif
1412         default:
1413                 printk("ipmi_si: Invalid register size: %d\n",
1414                        info->io.regsize);
1415                 return -EINVAL;
1416         }
1417
1418         /* Calculate the total amount of memory to claim.  This is an
1419          * unusual looking calculation, but it avoids claiming any
1420          * more memory than it has to.  It will claim everything
1421          * between the first address to the end of the last full
1422          * register. */
1423         mapsize = ((info->io_size * info->io.regspacing)
1424                    - (info->io.regspacing - info->io.regsize));
1425
1426         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1427                 return -EIO;
1428
1429         info->io.addr = ioremap(addr, mapsize);
1430         if (info->io.addr == NULL) {
1431                 release_mem_region(addr, mapsize);
1432                 return -EIO;
1433         }
1434         return 0;
1435 }
1436
1437 /*
1438  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1439  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1440  * Options are:
1441  *   rsp=<regspacing>
1442  *   rsi=<regsize>
1443  *   rsh=<regshift>
1444  *   irq=<irq>
1445  *   ipmb=<ipmb addr>
1446  */
1447 enum hotmod_op { HM_ADD, HM_REMOVE };
1448 struct hotmod_vals {
1449         char *name;
1450         int  val;
1451 };
1452 static struct hotmod_vals hotmod_ops[] = {
1453         { "add",        HM_ADD },
1454         { "remove",     HM_REMOVE },
1455         { NULL }
1456 };
1457 static struct hotmod_vals hotmod_si[] = {
1458         { "kcs",        SI_KCS },
1459         { "smic",       SI_SMIC },
1460         { "bt",         SI_BT },
1461         { NULL }
1462 };
1463 static struct hotmod_vals hotmod_as[] = {
1464         { "mem",        IPMI_MEM_ADDR_SPACE },
1465         { "i/o",        IPMI_IO_ADDR_SPACE },
1466         { NULL }
1467 };
1468
1469 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1470 {
1471         char *s;
1472         int  i;
1473
1474         s = strchr(*curr, ',');
1475         if (!s) {
1476                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1477                 return -EINVAL;
1478         }
1479         *s = '\0';
1480         s++;
1481         for (i = 0; hotmod_ops[i].name; i++) {
1482                 if (strcmp(*curr, v[i].name) == 0) {
1483                         *val = v[i].val;
1484                         *curr = s;
1485                         return 0;
1486                 }
1487         }
1488
1489         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1490         return -EINVAL;
1491 }
1492
1493 static int check_hotmod_int_op(const char *curr, const char *option,
1494                                const char *name, int *val)
1495 {
1496         char *n;
1497
1498         if (strcmp(curr, name) == 0) {
1499                 if (!option) {
1500                         printk(KERN_WARNING PFX
1501                                "No option given for '%s'\n",
1502                                curr);
1503                         return -EINVAL;
1504                 }
1505                 *val = simple_strtoul(option, &n, 0);
1506                 if ((*n != '\0') || (*option == '\0')) {
1507                         printk(KERN_WARNING PFX
1508                                "Bad option given for '%s'\n",
1509                                curr);
1510                         return -EINVAL;
1511                 }
1512                 return 1;
1513         }
1514         return 0;
1515 }
1516
1517 static int hotmod_handler(const char *val, struct kernel_param *kp)
1518 {
1519         char *str = kstrdup(val, GFP_KERNEL);
1520         int  rv;
1521         char *next, *curr, *s, *n, *o;
1522         enum hotmod_op op;
1523         enum si_type si_type;
1524         int  addr_space;
1525         unsigned long addr;
1526         int regspacing;
1527         int regsize;
1528         int regshift;
1529         int irq;
1530         int ipmb;
1531         int ival;
1532         int len;
1533         struct smi_info *info;
1534
1535         if (!str)
1536                 return -ENOMEM;
1537
1538         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1539         len = strlen(str);
1540         ival = len - 1;
1541         while ((ival >= 0) && isspace(str[ival])) {
1542                 str[ival] = '\0';
1543                 ival--;
1544         }
1545
1546         for (curr = str; curr; curr = next) {
1547                 regspacing = 1;
1548                 regsize = 1;
1549                 regshift = 0;
1550                 irq = 0;
1551                 ipmb = 0x20;
1552
1553                 next = strchr(curr, ':');
1554                 if (next) {
1555                         *next = '\0';
1556                         next++;
1557                 }
1558
1559                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1560                 if (rv)
1561                         break;
1562                 op = ival;
1563
1564                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1565                 if (rv)
1566                         break;
1567                 si_type = ival;
1568
1569                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1570                 if (rv)
1571                         break;
1572
1573                 s = strchr(curr, ',');
1574                 if (s) {
1575                         *s = '\0';
1576                         s++;
1577                 }
1578                 addr = simple_strtoul(curr, &n, 0);
1579                 if ((*n != '\0') || (*curr == '\0')) {
1580                         printk(KERN_WARNING PFX "Invalid hotmod address"
1581                                " '%s'\n", curr);
1582                         break;
1583                 }
1584
1585                 while (s) {
1586                         curr = s;
1587                         s = strchr(curr, ',');
1588                         if (s) {
1589                                 *s = '\0';
1590                                 s++;
1591                         }
1592                         o = strchr(curr, '=');
1593                         if (o) {
1594                                 *o = '\0';
1595                                 o++;
1596                         }
1597                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1598                         if (rv < 0)
1599                                 goto out;
1600                         else if (rv)
1601                                 continue;
1602                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1603                         if (rv < 0)
1604                                 goto out;
1605                         else if (rv)
1606                                 continue;
1607                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1608                         if (rv < 0)
1609                                 goto out;
1610                         else if (rv)
1611                                 continue;
1612                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1613                         if (rv < 0)
1614                                 goto out;
1615                         else if (rv)
1616                                 continue;
1617                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1618                         if (rv < 0)
1619                                 goto out;
1620                         else if (rv)
1621                                 continue;
1622
1623                         rv = -EINVAL;
1624                         printk(KERN_WARNING PFX
1625                                "Invalid hotmod option '%s'\n",
1626                                curr);
1627                         goto out;
1628                 }
1629
1630                 if (op == HM_ADD) {
1631                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1632                         if (!info) {
1633                                 rv = -ENOMEM;
1634                                 goto out;
1635                         }
1636
1637                         info->addr_source = "hotmod";
1638                         info->si_type = si_type;
1639                         info->io.addr_data = addr;
1640                         info->io.addr_type = addr_space;
1641                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1642                                 info->io_setup = mem_setup;
1643                         else
1644                                 info->io_setup = port_setup;
1645
1646                         info->io.addr = NULL;
1647                         info->io.regspacing = regspacing;
1648                         if (!info->io.regspacing)
1649                                 info->io.regspacing = DEFAULT_REGSPACING;
1650                         info->io.regsize = regsize;
1651                         if (!info->io.regsize)
1652                                 info->io.regsize = DEFAULT_REGSPACING;
1653                         info->io.regshift = regshift;
1654                         info->irq = irq;
1655                         if (info->irq)
1656                                 info->irq_setup = std_irq_setup;
1657                         info->slave_addr = ipmb;
1658
1659                         try_smi_init(info);
1660                 } else {
1661                         /* remove */
1662                         struct smi_info *e, *tmp_e;
1663
1664                         mutex_lock(&smi_infos_lock);
1665                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1666                                 if (e->io.addr_type != addr_space)
1667                                         continue;
1668                                 if (e->si_type != si_type)
1669                                         continue;
1670                                 if (e->io.addr_data == addr)
1671                                         cleanup_one_si(e);
1672                         }
1673                         mutex_unlock(&smi_infos_lock);
1674                 }
1675         }
1676         rv = len;
1677  out:
1678         kfree(str);
1679         return rv;
1680 }
1681
1682 static __devinit void hardcode_find_bmc(void)
1683 {
1684         int             i;
1685         struct smi_info *info;
1686
1687         for (i = 0; i < SI_MAX_PARMS; i++) {
1688                 if (!ports[i] && !addrs[i])
1689                         continue;
1690
1691                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1692                 if (!info)
1693                         return;
1694
1695                 info->addr_source = "hardcoded";
1696
1697                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1698                         info->si_type = SI_KCS;
1699                 } else if (strcmp(si_type[i], "smic") == 0) {
1700                         info->si_type = SI_SMIC;
1701                 } else if (strcmp(si_type[i], "bt") == 0) {
1702                         info->si_type = SI_BT;
1703                 } else {
1704                         printk(KERN_WARNING
1705                                "ipmi_si: Interface type specified "
1706                                "for interface %d, was invalid: %s\n",
1707                                i, si_type[i]);
1708                         kfree(info);
1709                         continue;
1710                 }
1711
1712                 if (ports[i]) {
1713                         /* An I/O port */
1714                         info->io_setup = port_setup;
1715                         info->io.addr_data = ports[i];
1716                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1717                 } else if (addrs[i]) {
1718                         /* A memory port */
1719                         info->io_setup = mem_setup;
1720                         info->io.addr_data = addrs[i];
1721                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1722                 } else {
1723                         printk(KERN_WARNING
1724                                "ipmi_si: Interface type specified "
1725                                "for interface %d, "
1726                                "but port and address were not set or "
1727                                "set to zero.\n", i);
1728                         kfree(info);
1729                         continue;
1730                 }
1731
1732                 info->io.addr = NULL;
1733                 info->io.regspacing = regspacings[i];
1734                 if (!info->io.regspacing)
1735                         info->io.regspacing = DEFAULT_REGSPACING;
1736                 info->io.regsize = regsizes[i];
1737                 if (!info->io.regsize)
1738                         info->io.regsize = DEFAULT_REGSPACING;
1739                 info->io.regshift = regshifts[i];
1740                 info->irq = irqs[i];
1741                 if (info->irq)
1742                         info->irq_setup = std_irq_setup;
1743
1744                 try_smi_init(info);
1745         }
1746 }
1747
1748 #ifdef CONFIG_ACPI
1749
1750 #include <linux/acpi.h>
1751
1752 /* Once we get an ACPI failure, we don't try any more, because we go
1753    through the tables sequentially.  Once we don't find a table, there
1754    are no more. */
1755 static int acpi_failure;
1756
1757 /* For GPE-type interrupts. */
1758 static u32 ipmi_acpi_gpe(void *context)
1759 {
1760         struct smi_info *smi_info = context;
1761         unsigned long   flags;
1762 #ifdef DEBUG_TIMING
1763         struct timeval t;
1764 #endif
1765
1766         spin_lock_irqsave(&(smi_info->si_lock), flags);
1767
1768         spin_lock(&smi_info->count_lock);
1769         smi_info->interrupts++;
1770         spin_unlock(&smi_info->count_lock);
1771
1772 #ifdef DEBUG_TIMING
1773         do_gettimeofday(&t);
1774         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1775 #endif
1776         smi_event_handler(smi_info, 0);
1777         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1778
1779         return ACPI_INTERRUPT_HANDLED;
1780 }
1781
1782 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1783 {
1784         if (!info->irq)
1785                 return;
1786
1787         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1788 }
1789
1790 static int acpi_gpe_irq_setup(struct smi_info *info)
1791 {
1792         acpi_status status;
1793
1794         if (!info->irq)
1795                 return 0;
1796
1797         /* FIXME - is level triggered right? */
1798         status = acpi_install_gpe_handler(NULL,
1799                                           info->irq,
1800                                           ACPI_GPE_LEVEL_TRIGGERED,
1801                                           &ipmi_acpi_gpe,
1802                                           info);
1803         if (status != AE_OK) {
1804                 printk(KERN_WARNING
1805                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1806                        " running polled\n",
1807                        DEVICE_NAME, info->irq);
1808                 info->irq = 0;
1809                 return -EINVAL;
1810         } else {
1811                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1812                 printk("  Using ACPI GPE %d\n", info->irq);
1813                 return 0;
1814         }
1815 }
1816
1817 /*
1818  * Defined at
1819  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1820  */
1821 struct SPMITable {
1822         s8      Signature[4];
1823         u32     Length;
1824         u8      Revision;
1825         u8      Checksum;
1826         s8      OEMID[6];
1827         s8      OEMTableID[8];
1828         s8      OEMRevision[4];
1829         s8      CreatorID[4];
1830         s8      CreatorRevision[4];
1831         u8      InterfaceType;
1832         u8      IPMIlegacy;
1833         s16     SpecificationRevision;
1834
1835         /*
1836          * Bit 0 - SCI interrupt supported
1837          * Bit 1 - I/O APIC/SAPIC
1838          */
1839         u8      InterruptType;
1840
1841         /* If bit 0 of InterruptType is set, then this is the SCI
1842            interrupt in the GPEx_STS register. */
1843         u8      GPE;
1844
1845         s16     Reserved;
1846
1847         /* If bit 1 of InterruptType is set, then this is the I/O
1848            APIC/SAPIC interrupt. */
1849         u32     GlobalSystemInterrupt;
1850
1851         /* The actual register address. */
1852         struct acpi_generic_address addr;
1853
1854         u8      UID[4];
1855
1856         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1857 };
1858
1859 static __devinit int try_init_acpi(struct SPMITable *spmi)
1860 {
1861         struct smi_info  *info;
1862         u8               addr_space;
1863
1864         if (spmi->IPMIlegacy != 1) {
1865             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1866             return -ENODEV;
1867         }
1868
1869         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1870                 addr_space = IPMI_MEM_ADDR_SPACE;
1871         else
1872                 addr_space = IPMI_IO_ADDR_SPACE;
1873
1874         info = kzalloc(sizeof(*info), GFP_KERNEL);
1875         if (!info) {
1876                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1877                 return -ENOMEM;
1878         }
1879
1880         info->addr_source = "ACPI";
1881
1882         /* Figure out the interface type. */
1883         switch (spmi->InterfaceType)
1884         {
1885         case 1: /* KCS */
1886                 info->si_type = SI_KCS;
1887                 break;
1888         case 2: /* SMIC */
1889                 info->si_type = SI_SMIC;
1890                 break;
1891         case 3: /* BT */
1892                 info->si_type = SI_BT;
1893                 break;
1894         default:
1895                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1896                         spmi->InterfaceType);
1897                 kfree(info);
1898                 return -EIO;
1899         }
1900
1901         if (spmi->InterruptType & 1) {
1902                 /* We've got a GPE interrupt. */
1903                 info->irq = spmi->GPE;
1904                 info->irq_setup = acpi_gpe_irq_setup;
1905         } else if (spmi->InterruptType & 2) {
1906                 /* We've got an APIC/SAPIC interrupt. */
1907                 info->irq = spmi->GlobalSystemInterrupt;
1908                 info->irq_setup = std_irq_setup;
1909         } else {
1910                 /* Use the default interrupt setting. */
1911                 info->irq = 0;
1912                 info->irq_setup = NULL;
1913         }
1914
1915         if (spmi->addr.bit_width) {
1916                 /* A (hopefully) properly formed register bit width. */
1917                 info->io.regspacing = spmi->addr.bit_width / 8;
1918         } else {
1919                 info->io.regspacing = DEFAULT_REGSPACING;
1920         }
1921         info->io.regsize = info->io.regspacing;
1922         info->io.regshift = spmi->addr.bit_offset;
1923
1924         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1925                 info->io_setup = mem_setup;
1926                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1927         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1928                 info->io_setup = port_setup;
1929                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1930         } else {
1931                 kfree(info);
1932                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1933                 return -EIO;
1934         }
1935         info->io.addr_data = spmi->addr.address;
1936
1937         try_smi_init(info);
1938
1939         return 0;
1940 }
1941
1942 static __devinit void acpi_find_bmc(void)
1943 {
1944         acpi_status      status;
1945         struct SPMITable *spmi;
1946         int              i;
1947
1948         if (acpi_disabled)
1949                 return;
1950
1951         if (acpi_failure)
1952                 return;
1953
1954         for (i = 0; ; i++) {
1955                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1956                                         (struct acpi_table_header **)&spmi);
1957                 if (status != AE_OK)
1958                         return;
1959
1960                 try_init_acpi(spmi);
1961         }
1962 }
1963 #endif
1964
1965 #ifdef CONFIG_DMI
1966 struct dmi_ipmi_data
1967 {
1968         u8              type;
1969         u8              addr_space;
1970         unsigned long   base_addr;
1971         u8              irq;
1972         u8              offset;
1973         u8              slave_addr;
1974 };
1975
1976 static int __devinit decode_dmi(const struct dmi_header *dm,
1977                                 struct dmi_ipmi_data *dmi)
1978 {
1979         const u8        *data = (const u8 *)dm;
1980         unsigned long   base_addr;
1981         u8              reg_spacing;
1982         u8              len = dm->length;
1983
1984         dmi->type = data[4];
1985
1986         memcpy(&base_addr, data+8, sizeof(unsigned long));
1987         if (len >= 0x11) {
1988                 if (base_addr & 1) {
1989                         /* I/O */
1990                         base_addr &= 0xFFFE;
1991                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1992                 }
1993                 else {
1994                         /* Memory */
1995                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1996                 }
1997                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1998                    is odd. */
1999                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
2000
2001                 dmi->irq = data[0x11];
2002
2003                 /* The top two bits of byte 0x10 hold the register spacing. */
2004                 reg_spacing = (data[0x10] & 0xC0) >> 6;
2005                 switch(reg_spacing){
2006                 case 0x00: /* Byte boundaries */
2007                     dmi->offset = 1;
2008                     break;
2009                 case 0x01: /* 32-bit boundaries */
2010                     dmi->offset = 4;
2011                     break;
2012                 case 0x02: /* 16-byte boundaries */
2013                     dmi->offset = 16;
2014                     break;
2015                 default:
2016                     /* Some other interface, just ignore it. */
2017                     return -EIO;
2018                 }
2019         } else {
2020                 /* Old DMI spec. */
2021                 /* Note that technically, the lower bit of the base
2022                  * address should be 1 if the address is I/O and 0 if
2023                  * the address is in memory.  So many systems get that
2024                  * wrong (and all that I have seen are I/O) so we just
2025                  * ignore that bit and assume I/O.  Systems that use
2026                  * memory should use the newer spec, anyway. */
2027                 dmi->base_addr = base_addr & 0xfffe;
2028                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2029                 dmi->offset = 1;
2030         }
2031
2032         dmi->slave_addr = data[6];
2033
2034         return 0;
2035 }
2036
2037 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2038 {
2039         struct smi_info *info;
2040
2041         info = kzalloc(sizeof(*info), GFP_KERNEL);
2042         if (!info) {
2043                 printk(KERN_ERR
2044                        "ipmi_si: Could not allocate SI data\n");
2045                 return;
2046         }
2047
2048         info->addr_source = "SMBIOS";
2049
2050         switch (ipmi_data->type) {
2051         case 0x01: /* KCS */
2052                 info->si_type = SI_KCS;
2053                 break;
2054         case 0x02: /* SMIC */
2055                 info->si_type = SI_SMIC;
2056                 break;
2057         case 0x03: /* BT */
2058                 info->si_type = SI_BT;
2059                 break;
2060         default:
2061                 kfree(info);
2062                 return;
2063         }
2064
2065         switch (ipmi_data->addr_space) {
2066         case IPMI_MEM_ADDR_SPACE:
2067                 info->io_setup = mem_setup;
2068                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2069                 break;
2070
2071         case IPMI_IO_ADDR_SPACE:
2072                 info->io_setup = port_setup;
2073                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2074                 break;
2075
2076         default:
2077                 kfree(info);
2078                 printk(KERN_WARNING
2079                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2080                        ipmi_data->addr_space);
2081                 return;
2082         }
2083         info->io.addr_data = ipmi_data->base_addr;
2084
2085         info->io.regspacing = ipmi_data->offset;
2086         if (!info->io.regspacing)
2087                 info->io.regspacing = DEFAULT_REGSPACING;
2088         info->io.regsize = DEFAULT_REGSPACING;
2089         info->io.regshift = 0;
2090
2091         info->slave_addr = ipmi_data->slave_addr;
2092
2093         info->irq = ipmi_data->irq;
2094         if (info->irq)
2095                 info->irq_setup = std_irq_setup;
2096
2097         try_smi_init(info);
2098 }
2099
2100 static void __devinit dmi_find_bmc(void)
2101 {
2102         const struct dmi_device *dev = NULL;
2103         struct dmi_ipmi_data data;
2104         int                  rv;
2105
2106         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2107                 memset(&data, 0, sizeof(data));
2108                 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2109                                 &data);
2110                 if (!rv)
2111                         try_init_dmi(&data);
2112         }
2113 }
2114 #endif /* CONFIG_DMI */
2115
2116 #ifdef CONFIG_PCI
2117
2118 #define PCI_ERMC_CLASSCODE              0x0C0700
2119 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2120 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2121 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2122 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2123 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2124
2125 #define PCI_HP_VENDOR_ID    0x103C
2126 #define PCI_MMC_DEVICE_ID   0x121A
2127 #define PCI_MMC_ADDR_CW     0x10
2128
2129 static void ipmi_pci_cleanup(struct smi_info *info)
2130 {
2131         struct pci_dev *pdev = info->addr_source_data;
2132
2133         pci_disable_device(pdev);
2134 }
2135
2136 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2137                                     const struct pci_device_id *ent)
2138 {
2139         int rv;
2140         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2141         struct smi_info *info;
2142         int first_reg_offset = 0;
2143
2144         info = kzalloc(sizeof(*info), GFP_KERNEL);
2145         if (!info)
2146                 return -ENOMEM;
2147
2148         info->addr_source = "PCI";
2149
2150         switch (class_type) {
2151         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2152                 info->si_type = SI_SMIC;
2153                 break;
2154
2155         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2156                 info->si_type = SI_KCS;
2157                 break;
2158
2159         case PCI_ERMC_CLASSCODE_TYPE_BT:
2160                 info->si_type = SI_BT;
2161                 break;
2162
2163         default:
2164                 kfree(info);
2165                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2166                        pci_name(pdev), class_type);
2167                 return -ENOMEM;
2168         }
2169
2170         rv = pci_enable_device(pdev);
2171         if (rv) {
2172                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2173                        pci_name(pdev));
2174                 kfree(info);
2175                 return rv;
2176         }
2177
2178         info->addr_source_cleanup = ipmi_pci_cleanup;
2179         info->addr_source_data = pdev;
2180
2181         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2182                 first_reg_offset = 1;
2183
2184         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2185                 info->io_setup = port_setup;
2186                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2187         } else {
2188                 info->io_setup = mem_setup;
2189                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2190         }
2191         info->io.addr_data = pci_resource_start(pdev, 0);
2192
2193         info->io.regspacing = DEFAULT_REGSPACING;
2194         info->io.regsize = DEFAULT_REGSPACING;
2195         info->io.regshift = 0;
2196
2197         info->irq = pdev->irq;
2198         if (info->irq)
2199                 info->irq_setup = std_irq_setup;
2200
2201         info->dev = &pdev->dev;
2202         pci_set_drvdata(pdev, info);
2203
2204         return try_smi_init(info);
2205 }
2206
2207 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2208 {
2209         struct smi_info *info = pci_get_drvdata(pdev);
2210         cleanup_one_si(info);
2211 }
2212
2213 #ifdef CONFIG_PM
2214 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2215 {
2216         return 0;
2217 }
2218
2219 static int ipmi_pci_resume(struct pci_dev *pdev)
2220 {
2221         return 0;
2222 }
2223 #endif
2224
2225 static struct pci_device_id ipmi_pci_devices[] = {
2226         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2227         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2228         { 0, }
2229 };
2230 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2231
2232 static struct pci_driver ipmi_pci_driver = {
2233         .name =         DEVICE_NAME,
2234         .id_table =     ipmi_pci_devices,
2235         .probe =        ipmi_pci_probe,
2236         .remove =       __devexit_p(ipmi_pci_remove),
2237 #ifdef CONFIG_PM
2238         .suspend =      ipmi_pci_suspend,
2239         .resume =       ipmi_pci_resume,
2240 #endif
2241 };
2242 #endif /* CONFIG_PCI */
2243
2244
2245 #ifdef CONFIG_PPC_OF
2246 static int __devinit ipmi_of_probe(struct of_device *dev,
2247                          const struct of_device_id *match)
2248 {
2249         struct smi_info *info;
2250         struct resource resource;
2251         const int *regsize, *regspacing, *regshift;
2252         struct device_node *np = dev->node;
2253         int ret;
2254         int proplen;
2255
2256         dev_info(&dev->dev, PFX "probing via device tree\n");
2257
2258         ret = of_address_to_resource(np, 0, &resource);
2259         if (ret) {
2260                 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2261                 return ret;
2262         }
2263
2264         regsize = of_get_property(np, "reg-size", &proplen);
2265         if (regsize && proplen != 4) {
2266                 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2267                 return -EINVAL;
2268         }
2269
2270         regspacing = of_get_property(np, "reg-spacing", &proplen);
2271         if (regspacing && proplen != 4) {
2272                 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2273                 return -EINVAL;
2274         }
2275
2276         regshift = of_get_property(np, "reg-shift", &proplen);
2277         if (regshift && proplen != 4) {
2278                 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2279                 return -EINVAL;
2280         }
2281
2282         info = kzalloc(sizeof(*info), GFP_KERNEL);
2283
2284         if (!info) {
2285                 dev_err(&dev->dev,
2286                         PFX "could not allocate memory for OF probe\n");
2287                 return -ENOMEM;
2288         }
2289
2290         info->si_type           = (enum si_type) match->data;
2291         info->addr_source       = "device-tree";
2292         info->io_setup          = mem_setup;
2293         info->irq_setup         = std_irq_setup;
2294
2295         info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2296         info->io.addr_data      = resource.start;
2297
2298         info->io.regsize        = regsize ? *regsize : DEFAULT_REGSIZE;
2299         info->io.regspacing     = regspacing ? *regspacing : DEFAULT_REGSPACING;
2300         info->io.regshift       = regshift ? *regshift : 0;
2301
2302         info->irq               = irq_of_parse_and_map(dev->node, 0);
2303         info->dev               = &dev->dev;
2304
2305         dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
2306                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2307                 info->irq);
2308
2309         dev->dev.driver_data = (void*) info;
2310
2311         return try_smi_init(info);
2312 }
2313
2314 static int __devexit ipmi_of_remove(struct of_device *dev)
2315 {
2316         cleanup_one_si(dev->dev.driver_data);
2317         return 0;
2318 }
2319
2320 static struct of_device_id ipmi_match[] =
2321 {
2322         { .type = "ipmi", .compatible = "ipmi-kcs",  .data = (void *)(unsigned long) SI_KCS },
2323         { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2324         { .type = "ipmi", .compatible = "ipmi-bt",   .data = (void *)(unsigned long) SI_BT },
2325         {},
2326 };
2327
2328 static struct of_platform_driver ipmi_of_platform_driver =
2329 {
2330         .name           = "ipmi",
2331         .match_table    = ipmi_match,
2332         .probe          = ipmi_of_probe,
2333         .remove         = __devexit_p(ipmi_of_remove),
2334 };
2335 #endif /* CONFIG_PPC_OF */
2336
2337
2338 static int try_get_dev_id(struct smi_info *smi_info)
2339 {
2340         unsigned char         msg[2];
2341         unsigned char         *resp;
2342         unsigned long         resp_len;
2343         enum si_sm_result     smi_result;
2344         int                   rv = 0;
2345
2346         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2347         if (!resp)
2348                 return -ENOMEM;
2349
2350         /* Do a Get Device ID command, since it comes back with some
2351            useful info. */
2352         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2353         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2354         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2355
2356         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2357         for (;;)
2358         {
2359                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2360                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2361                         schedule_timeout_uninterruptible(1);
2362                         smi_result = smi_info->handlers->event(
2363                                 smi_info->si_sm, 100);
2364                 }
2365                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2366                 {
2367                         smi_result = smi_info->handlers->event(
2368                                 smi_info->si_sm, 0);
2369                 }
2370                 else
2371                         break;
2372         }
2373         if (smi_result == SI_SM_HOSED) {
2374                 /* We couldn't get the state machine to run, so whatever's at
2375                    the port is probably not an IPMI SMI interface. */
2376                 rv = -ENODEV;
2377                 goto out;
2378         }
2379
2380         /* Otherwise, we got some data. */
2381         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2382                                                   resp, IPMI_MAX_MSG_LENGTH);
2383
2384         /* Check and record info from the get device id, in case we need it. */
2385         rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
2386
2387  out:
2388         kfree(resp);
2389         return rv;
2390 }
2391
2392 static int type_file_read_proc(char *page, char **start, off_t off,
2393                                int count, int *eof, void *data)
2394 {
2395         struct smi_info *smi = data;
2396
2397         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2398 }
2399
2400 static int stat_file_read_proc(char *page, char **start, off_t off,
2401                                int count, int *eof, void *data)
2402 {
2403         char            *out = (char *) page;
2404         struct smi_info *smi = data;
2405
2406         out += sprintf(out, "interrupts_enabled:    %d\n",
2407                        smi->irq && !smi->interrupt_disabled);
2408         out += sprintf(out, "short_timeouts:        %ld\n",
2409                        smi->short_timeouts);
2410         out += sprintf(out, "long_timeouts:         %ld\n",
2411                        smi->long_timeouts);
2412         out += sprintf(out, "timeout_restarts:      %ld\n",
2413                        smi->timeout_restarts);
2414         out += sprintf(out, "idles:                 %ld\n",
2415                        smi->idles);
2416         out += sprintf(out, "interrupts:            %ld\n",
2417                        smi->interrupts);
2418         out += sprintf(out, "attentions:            %ld\n",
2419                        smi->attentions);
2420         out += sprintf(out, "flag_fetches:          %ld\n",
2421                        smi->flag_fetches);
2422         out += sprintf(out, "hosed_count:           %ld\n",
2423                        smi->hosed_count);
2424         out += sprintf(out, "complete_transactions: %ld\n",
2425                        smi->complete_transactions);
2426         out += sprintf(out, "events:                %ld\n",
2427                        smi->events);
2428         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2429                        smi->watchdog_pretimeouts);
2430         out += sprintf(out, "incoming_messages:     %ld\n",
2431                        smi->incoming_messages);
2432
2433         return out - page;
2434 }
2435
2436 static int param_read_proc(char *page, char **start, off_t off,
2437                            int count, int *eof, void *data)
2438 {
2439         struct smi_info *smi = data;
2440
2441         return sprintf(page,
2442                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2443                        si_to_str[smi->si_type],
2444                        addr_space_to_str[smi->io.addr_type],
2445                        smi->io.addr_data,
2446                        smi->io.regspacing,
2447                        smi->io.regsize,
2448                        smi->io.regshift,
2449                        smi->irq,
2450                        smi->slave_addr);
2451 }
2452
2453 /*
2454  * oem_data_avail_to_receive_msg_avail
2455  * @info - smi_info structure with msg_flags set
2456  *
2457  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2458  * Returns 1 indicating need to re-run handle_flags().
2459  */
2460 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2461 {
2462         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2463                                 RECEIVE_MSG_AVAIL);
2464         return 1;
2465 }
2466
2467 /*
2468  * setup_dell_poweredge_oem_data_handler
2469  * @info - smi_info.device_id must be populated
2470  *
2471  * Systems that match, but have firmware version < 1.40 may assert
2472  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2473  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2474  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2475  * as RECEIVE_MSG_AVAIL instead.
2476  *
2477  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2478  * assert the OEM[012] bits, and if it did, the driver would have to
2479  * change to handle that properly, we don't actually check for the
2480  * firmware version.
2481  * Device ID = 0x20                BMC on PowerEdge 8G servers
2482  * Device Revision = 0x80
2483  * Firmware Revision1 = 0x01       BMC version 1.40
2484  * Firmware Revision2 = 0x40       BCD encoded
2485  * IPMI Version = 0x51             IPMI 1.5
2486  * Manufacturer ID = A2 02 00      Dell IANA
2487  *
2488  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2489  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2490  *
2491  */
2492 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2493 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2494 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2495 #define DELL_IANA_MFR_ID 0x0002a2
2496 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2497 {
2498         struct ipmi_device_id *id = &smi_info->device_id;
2499         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2500                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2501                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2502                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2503                         smi_info->oem_data_avail_handler =
2504                                 oem_data_avail_to_receive_msg_avail;
2505                 }
2506                 else if (ipmi_version_major(id) < 1 ||
2507                          (ipmi_version_major(id) == 1 &&
2508                           ipmi_version_minor(id) < 5)) {
2509                         smi_info->oem_data_avail_handler =
2510                                 oem_data_avail_to_receive_msg_avail;
2511                 }
2512         }
2513 }
2514
2515 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2516 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2517 {
2518         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2519
2520         /* Make it a reponse */
2521         msg->rsp[0] = msg->data[0] | 4;
2522         msg->rsp[1] = msg->data[1];
2523         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2524         msg->rsp_size = 3;
2525         smi_info->curr_msg = NULL;
2526         deliver_recv_msg(smi_info, msg);
2527 }
2528
2529 /*
2530  * dell_poweredge_bt_xaction_handler
2531  * @info - smi_info.device_id must be populated
2532  *
2533  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2534  * not respond to a Get SDR command if the length of the data
2535  * requested is exactly 0x3A, which leads to command timeouts and no
2536  * data returned.  This intercepts such commands, and causes userspace
2537  * callers to try again with a different-sized buffer, which succeeds.
2538  */
2539
2540 #define STORAGE_NETFN 0x0A
2541 #define STORAGE_CMD_GET_SDR 0x23
2542 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2543                                              unsigned long unused,
2544                                              void *in)
2545 {
2546         struct smi_info *smi_info = in;
2547         unsigned char *data = smi_info->curr_msg->data;
2548         unsigned int size   = smi_info->curr_msg->data_size;
2549         if (size >= 8 &&
2550             (data[0]>>2) == STORAGE_NETFN &&
2551             data[1] == STORAGE_CMD_GET_SDR &&
2552             data[7] == 0x3A) {
2553                 return_hosed_msg_badsize(smi_info);
2554                 return NOTIFY_STOP;
2555         }
2556         return NOTIFY_DONE;
2557 }
2558
2559 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2560         .notifier_call  = dell_poweredge_bt_xaction_handler,
2561 };
2562
2563 /*
2564  * setup_dell_poweredge_bt_xaction_handler
2565  * @info - smi_info.device_id must be filled in already
2566  *
2567  * Fills in smi_info.device_id.start_transaction_pre_hook
2568  * when we know what function to use there.
2569  */
2570 static void
2571 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2572 {
2573         struct ipmi_device_id *id = &smi_info->device_id;
2574         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2575             smi_info->si_type == SI_BT)
2576                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2577 }
2578
2579 /*
2580  * setup_oem_data_handler
2581  * @info - smi_info.device_id must be filled in already
2582  *
2583  * Fills in smi_info.device_id.oem_data_available_handler
2584  * when we know what function to use there.
2585  */
2586
2587 static void setup_oem_data_handler(struct smi_info *smi_info)
2588 {
2589         setup_dell_poweredge_oem_data_handler(smi_info);
2590 }
2591
2592 static void setup_xaction_handlers(struct smi_info *smi_info)
2593 {
2594         setup_dell_poweredge_bt_xaction_handler(smi_info);
2595 }
2596
2597 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2598 {
2599         if (smi_info->intf) {
2600                 /* The timer and thread are only running if the
2601                    interface has been started up and registered. */
2602                 if (smi_info->thread != NULL)
2603                         kthread_stop(smi_info->thread);
2604                 del_timer_sync(&smi_info->si_timer);
2605         }
2606 }
2607
2608 static __devinitdata struct ipmi_default_vals
2609 {
2610         int type;
2611         int port;
2612 } ipmi_defaults[] =
2613 {
2614         { .type = SI_KCS, .port = 0xca2 },
2615         { .type = SI_SMIC, .port = 0xca9 },
2616         { .type = SI_BT, .port = 0xe4 },
2617         { .port = 0 }
2618 };
2619
2620 static __devinit void default_find_bmc(void)
2621 {
2622         struct smi_info *info;
2623         int             i;
2624
2625         for (i = 0; ; i++) {
2626                 if (!ipmi_defaults[i].port)
2627                         break;
2628
2629                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2630                 if (!info)
2631                         return;
2632
2633 #ifdef CONFIG_PPC_MERGE
2634                 if (check_legacy_ioport(ipmi_defaults[i].port))
2635                         continue;
2636 #endif
2637
2638                 info->addr_source = NULL;
2639
2640                 info->si_type = ipmi_defaults[i].type;
2641                 info->io_setup = port_setup;
2642                 info->io.addr_data = ipmi_defaults[i].port;
2643                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2644
2645                 info->io.addr = NULL;
2646                 info->io.regspacing = DEFAULT_REGSPACING;
2647                 info->io.regsize = DEFAULT_REGSPACING;
2648                 info->io.regshift = 0;
2649
2650                 if (try_smi_init(info) == 0) {
2651                         /* Found one... */
2652                         printk(KERN_INFO "ipmi_si: Found default %s state"
2653                                " machine at %s address 0x%lx\n",
2654                                si_to_str[info->si_type],
2655                                addr_space_to_str[info->io.addr_type],
2656                                info->io.addr_data);
2657                         return;
2658                 }
2659         }
2660 }
2661
2662 static int is_new_interface(struct smi_info *info)
2663 {
2664         struct smi_info *e;
2665
2666         list_for_each_entry(e, &smi_infos, link) {
2667                 if (e->io.addr_type != info->io.addr_type)
2668                         continue;
2669                 if (e->io.addr_data == info->io.addr_data)
2670                         return 0;
2671         }
2672
2673         return 1;
2674 }
2675
2676 static int try_smi_init(struct smi_info *new_smi)
2677 {
2678         int rv;
2679
2680         if (new_smi->addr_source) {
2681                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2682                        " machine at %s address 0x%lx, slave address 0x%x,"
2683                        " irq %d\n",
2684                        new_smi->addr_source,
2685                        si_to_str[new_smi->si_type],
2686                        addr_space_to_str[new_smi->io.addr_type],
2687                        new_smi->io.addr_data,
2688                        new_smi->slave_addr, new_smi->irq);
2689         }
2690
2691         mutex_lock(&smi_infos_lock);
2692         if (!is_new_interface(new_smi)) {
2693                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2694                 rv = -EBUSY;
2695                 goto out_err;
2696         }
2697
2698         /* So we know not to free it unless we have allocated one. */
2699         new_smi->intf = NULL;
2700         new_smi->si_sm = NULL;
2701         new_smi->handlers = NULL;
2702
2703         switch (new_smi->si_type) {
2704         case SI_KCS:
2705                 new_smi->handlers = &kcs_smi_handlers;
2706                 break;
2707
2708         case SI_SMIC:
2709                 new_smi->handlers = &smic_smi_handlers;
2710                 break;
2711
2712         case SI_BT:
2713                 new_smi->handlers = &bt_smi_handlers;
2714                 break;
2715
2716         default:
2717                 /* No support for anything else yet. */
2718                 rv = -EIO;
2719                 goto out_err;
2720         }
2721
2722         /* Allocate the state machine's data and initialize it. */
2723         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2724         if (!new_smi->si_sm) {
2725                 printk(" Could not allocate state machine memory\n");
2726                 rv = -ENOMEM;
2727                 goto out_err;
2728         }
2729         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2730                                                         &new_smi->io);
2731
2732         /* Now that we know the I/O size, we can set up the I/O. */
2733         rv = new_smi->io_setup(new_smi);
2734         if (rv) {
2735                 printk(" Could not set up I/O space\n");
2736                 goto out_err;
2737         }
2738
2739         spin_lock_init(&(new_smi->si_lock));
2740         spin_lock_init(&(new_smi->msg_lock));
2741         spin_lock_init(&(new_smi->count_lock));
2742
2743         /* Do low-level detection first. */
2744         if (new_smi->handlers->detect(new_smi->si_sm)) {
2745                 if (new_smi->addr_source)
2746                         printk(KERN_INFO "ipmi_si: Interface detection"
2747                                " failed\n");
2748                 rv = -ENODEV;
2749                 goto out_err;
2750         }
2751
2752         /* Attempt a get device id command.  If it fails, we probably
2753            don't have a BMC here. */
2754         rv = try_get_dev_id(new_smi);
2755         if (rv) {
2756                 if (new_smi->addr_source)
2757                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2758                                " at this location\n");
2759                 goto out_err;
2760         }
2761
2762         setup_oem_data_handler(new_smi);
2763         setup_xaction_handlers(new_smi);
2764
2765         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2766         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2767         new_smi->curr_msg = NULL;
2768         atomic_set(&new_smi->req_events, 0);
2769         new_smi->run_to_completion = 0;
2770
2771         new_smi->interrupt_disabled = 0;
2772         atomic_set(&new_smi->stop_operation, 0);
2773         new_smi->intf_num = smi_num;
2774         smi_num++;
2775
2776         /* Start clearing the flags before we enable interrupts or the
2777            timer to avoid racing with the timer. */
2778         start_clear_flags(new_smi);
2779         /* IRQ is defined to be set when non-zero. */
2780         if (new_smi->irq)
2781                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2782
2783         if (!new_smi->dev) {
2784                 /* If we don't already have a device from something
2785                  * else (like PCI), then register a new one. */
2786                 new_smi->pdev = platform_device_alloc("ipmi_si",
2787                                                       new_smi->intf_num);
2788                 if (rv) {
2789                         printk(KERN_ERR
2790                                "ipmi_si_intf:"
2791                                " Unable to allocate platform device\n");
2792                         goto out_err;
2793                 }
2794                 new_smi->dev = &new_smi->pdev->dev;
2795                 new_smi->dev->driver = &ipmi_driver;
2796
2797                 rv = platform_device_add(new_smi->pdev);
2798                 if (rv) {
2799                         printk(KERN_ERR
2800                                "ipmi_si_intf:"
2801                                " Unable to register system interface device:"
2802                                " %d\n",
2803                                rv);
2804                         goto out_err;
2805                 }
2806                 new_smi->dev_registered = 1;
2807         }
2808
2809         rv = ipmi_register_smi(&handlers,
2810                                new_smi,
2811                                &new_smi->device_id,
2812                                new_smi->dev,
2813                                "bmc",
2814                                new_smi->slave_addr);
2815         if (rv) {
2816                 printk(KERN_ERR
2817                        "ipmi_si: Unable to register device: error %d\n",
2818                        rv);
2819                 goto out_err_stop_timer;
2820         }
2821
2822         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2823                                      type_file_read_proc, NULL,
2824                                      new_smi, THIS_MODULE);
2825         if (rv) {
2826                 printk(KERN_ERR
2827                        "ipmi_si: Unable to create proc entry: %d\n",
2828                        rv);
2829                 goto out_err_stop_timer;
2830         }
2831
2832         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2833                                      stat_file_read_proc, NULL,
2834                                      new_smi, THIS_MODULE);
2835         if (rv) {
2836                 printk(KERN_ERR
2837                        "ipmi_si: Unable to create proc entry: %d\n",
2838                        rv);
2839                 goto out_err_stop_timer;
2840         }
2841
2842         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2843                                      param_read_proc, NULL,
2844                                      new_smi, THIS_MODULE);
2845         if (rv) {
2846                 printk(KERN_ERR
2847                        "ipmi_si: Unable to create proc entry: %d\n",
2848                        rv);
2849                 goto out_err_stop_timer;
2850         }
2851
2852         list_add_tail(&new_smi->link, &smi_infos);
2853
2854         mutex_unlock(&smi_infos_lock);
2855
2856         printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2857
2858         return 0;
2859
2860  out_err_stop_timer:
2861         atomic_inc(&new_smi->stop_operation);
2862         wait_for_timer_and_thread(new_smi);
2863
2864  out_err:
2865         if (new_smi->intf)
2866                 ipmi_unregister_smi(new_smi->intf);
2867
2868         if (new_smi->irq_cleanup)
2869                 new_smi->irq_cleanup(new_smi);
2870
2871         /* Wait until we know that we are out of any interrupt
2872            handlers might have been running before we freed the
2873            interrupt. */
2874         synchronize_sched();
2875
2876         if (new_smi->si_sm) {
2877                 if (new_smi->handlers)
2878                         new_smi->handlers->cleanup(new_smi->si_sm);
2879                 kfree(new_smi->si_sm);
2880         }
2881         if (new_smi->addr_source_cleanup)
2882                 new_smi->addr_source_cleanup(new_smi);
2883         if (new_smi->io_cleanup)
2884                 new_smi->io_cleanup(new_smi);
2885
2886         if (new_smi->dev_registered)
2887                 platform_device_unregister(new_smi->pdev);
2888
2889         kfree(new_smi);
2890
2891         mutex_unlock(&smi_infos_lock);
2892
2893         return rv;
2894 }
2895
2896 static __devinit int init_ipmi_si(void)
2897 {
2898         int  i;
2899         char *str;
2900         int  rv;
2901
2902         if (initialized)
2903                 return 0;
2904         initialized = 1;
2905
2906         /* Register the device drivers. */
2907         rv = driver_register(&ipmi_driver);
2908         if (rv) {
2909                 printk(KERN_ERR
2910                        "init_ipmi_si: Unable to register driver: %d\n",
2911                        rv);
2912                 return rv;
2913         }
2914
2915
2916         /* Parse out the si_type string into its components. */
2917         str = si_type_str;
2918         if (*str != '\0') {
2919                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2920                         si_type[i] = str;
2921                         str = strchr(str, ',');
2922                         if (str) {
2923                                 *str = '\0';
2924                                 str++;
2925                         } else {
2926                                 break;
2927                         }
2928                 }
2929         }
2930
2931         printk(KERN_INFO "IPMI System Interface driver.\n");
2932
2933         hardcode_find_bmc();
2934
2935 #ifdef CONFIG_DMI
2936         dmi_find_bmc();
2937 #endif
2938
2939 #ifdef CONFIG_ACPI
2940         acpi_find_bmc();
2941 #endif
2942
2943 #ifdef CONFIG_PCI
2944         rv = pci_register_driver(&ipmi_pci_driver);
2945         if (rv){
2946                 printk(KERN_ERR
2947                        "init_ipmi_si: Unable to register PCI driver: %d\n",
2948                        rv);
2949         }
2950 #endif
2951
2952 #ifdef CONFIG_PPC_OF
2953         of_register_platform_driver(&ipmi_of_platform_driver);
2954 #endif
2955
2956         if (si_trydefaults) {
2957                 mutex_lock(&smi_infos_lock);
2958                 if (list_empty(&smi_infos)) {
2959                         /* No BMC was found, try defaults. */
2960                         mutex_unlock(&smi_infos_lock);
2961                         default_find_bmc();
2962                 } else {
2963                         mutex_unlock(&smi_infos_lock);
2964                 }
2965         }
2966
2967         mutex_lock(&smi_infos_lock);
2968         if (unload_when_empty && list_empty(&smi_infos)) {
2969                 mutex_unlock(&smi_infos_lock);
2970 #ifdef CONFIG_PCI
2971                 pci_unregister_driver(&ipmi_pci_driver);
2972 #endif
2973
2974 #ifdef CONFIG_PPC_OF
2975                 of_unregister_platform_driver(&ipmi_of_platform_driver);
2976 #endif
2977                 driver_unregister(&ipmi_driver);
2978                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2979                 return -ENODEV;
2980         } else {
2981                 mutex_unlock(&smi_infos_lock);
2982                 return 0;
2983         }
2984 }
2985 module_init(init_ipmi_si);
2986
2987 static void cleanup_one_si(struct smi_info *to_clean)
2988 {
2989         int           rv;
2990         unsigned long flags;
2991
2992         if (!to_clean)
2993                 return;
2994
2995         list_del(&to_clean->link);
2996
2997         /* Tell the driver that we are shutting down. */
2998         atomic_inc(&to_clean->stop_operation);
2999
3000         /* Make sure the timer and thread are stopped and will not run
3001            again. */
3002         wait_for_timer_and_thread(to_clean);
3003
3004         /* Timeouts are stopped, now make sure the interrupts are off
3005            for the device.  A little tricky with locks to make sure
3006            there are no races. */
3007         spin_lock_irqsave(&to_clean->si_lock, flags);
3008         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3009                 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3010                 poll(to_clean);
3011                 schedule_timeout_uninterruptible(1);
3012                 spin_lock_irqsave(&to_clean->si_lock, flags);
3013         }
3014         disable_si_irq(to_clean);
3015         spin_unlock_irqrestore(&to_clean->si_lock, flags);
3016         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3017                 poll(to_clean);
3018                 schedule_timeout_uninterruptible(1);
3019         }
3020
3021         /* Clean up interrupts and make sure that everything is done. */
3022         if (to_clean->irq_cleanup)
3023                 to_clean->irq_cleanup(to_clean);
3024         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3025                 poll(to_clean);
3026                 schedule_timeout_uninterruptible(1);
3027         }
3028
3029         rv = ipmi_unregister_smi(to_clean->intf);
3030         if (rv) {
3031                 printk(KERN_ERR
3032                        "ipmi_si: Unable to unregister device: errno=%d\n",
3033                        rv);
3034         }
3035
3036         to_clean->handlers->cleanup(to_clean->si_sm);
3037
3038         kfree(to_clean->si_sm);
3039
3040         if (to_clean->addr_source_cleanup)
3041                 to_clean->addr_source_cleanup(to_clean);
3042         if (to_clean->io_cleanup)
3043                 to_clean->io_cleanup(to_clean);
3044
3045         if (to_clean->dev_registered)
3046                 platform_device_unregister(to_clean->pdev);
3047
3048         kfree(to_clean);
3049 }
3050
3051 static __exit void cleanup_ipmi_si(void)
3052 {
3053         struct smi_info *e, *tmp_e;
3054
3055         if (!initialized)
3056                 return;
3057
3058 #ifdef CONFIG_PCI
3059         pci_unregister_driver(&ipmi_pci_driver);
3060 #endif
3061
3062 #ifdef CONFIG_PPC_OF
3063         of_unregister_platform_driver(&ipmi_of_platform_driver);
3064 #endif
3065
3066         mutex_lock(&smi_infos_lock);
3067         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3068                 cleanup_one_si(e);
3069         mutex_unlock(&smi_infos_lock);
3070
3071         driver_unregister(&ipmi_driver);
3072 }
3073 module_exit(cleanup_ipmi_si);
3074
3075 MODULE_LICENSE("GPL");
3076 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3077 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");