]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/misc/sgi-xp/xpc_main.c
aae90f5933b52dd95a8bfc8cce9f0888275ac0fc
[linux-2.6-omap-h63xx.git] / drivers / misc / sgi-xp / xpc_main.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
7  */
8
9 /*
10  * Cross Partition Communication (XPC) support - standard version.
11  *
12  *      XPC provides a message passing capability that crosses partition
13  *      boundaries. This module is made up of two parts:
14  *
15  *          partition   This part detects the presence/absence of other
16  *                      partitions. It provides a heartbeat and monitors
17  *                      the heartbeats of other partitions.
18  *
19  *          channel     This part manages the channels and sends/receives
20  *                      messages across them to/from other partitions.
21  *
22  *      There are a couple of additional functions residing in XP, which
23  *      provide an interface to XPC for its users.
24  *
25  *
26  *      Caveats:
27  *
28  *        . We currently have no way to determine which nasid an IPI came
29  *          from. Thus, xpc_IPI_send() does a remote AMO write followed by
30  *          an IPI. The AMO indicates where data is to be pulled from, so
31  *          after the IPI arrives, the remote partition checks the AMO word.
32  *          The IPI can actually arrive before the AMO however, so other code
33  *          must periodically check for this case. Also, remote AMO operations
34  *          do not reliably time out. Thus we do a remote PIO read solely to
35  *          know whether the remote partition is down and whether we should
36  *          stop sending IPIs to it. This remote PIO read operation is set up
37  *          in a special nofault region so SAL knows to ignore (and cleanup)
38  *          any errors due to the remote AMO write, PIO read, and/or PIO
39  *          write operations.
40  *
41  *          If/when new hardware solves this IPI problem, we should abandon
42  *          the current approach.
43  *
44  */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/init.h>
49 #include <linux/cache.h>
50 #include <linux/interrupt.h>
51 #include <linux/delay.h>
52 #include <linux/reboot.h>
53 #include <linux/completion.h>
54 #include <linux/kdebug.h>
55 #include <linux/kthread.h>
56 #include <linux/uaccess.h>
57 #include <asm/sn/intr.h>
58 #include <asm/sn/sn_sal.h>
59 #include "xpc.h"
60
61 /* define two XPC debug device structures to be used with dev_dbg() et al */
62
63 struct device_driver xpc_dbg_name = {
64         .name = "xpc"
65 };
66
67 struct device xpc_part_dbg_subname = {
68         .bus_id = {0},          /* set to "part" at xpc_init() time */
69         .driver = &xpc_dbg_name
70 };
71
72 struct device xpc_chan_dbg_subname = {
73         .bus_id = {0},          /* set to "chan" at xpc_init() time */
74         .driver = &xpc_dbg_name
75 };
76
77 struct device *xpc_part = &xpc_part_dbg_subname;
78 struct device *xpc_chan = &xpc_chan_dbg_subname;
79
80 static int xpc_kdebug_ignore;
81
82 /* systune related variables for /proc/sys directories */
83
84 static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
85 static int xpc_hb_min_interval = 1;
86 static int xpc_hb_max_interval = 10;
87
88 static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
89 static int xpc_hb_check_min_interval = 10;
90 static int xpc_hb_check_max_interval = 120;
91
92 int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
93 static int xpc_disengage_request_min_timelimit; /* = 0 */
94 static int xpc_disengage_request_max_timelimit = 120;
95
96 static ctl_table xpc_sys_xpc_hb_dir[] = {
97         {
98          .ctl_name = CTL_UNNUMBERED,
99          .procname = "hb_interval",
100          .data = &xpc_hb_interval,
101          .maxlen = sizeof(int),
102          .mode = 0644,
103          .proc_handler = &proc_dointvec_minmax,
104          .strategy = &sysctl_intvec,
105          .extra1 = &xpc_hb_min_interval,
106          .extra2 = &xpc_hb_max_interval},
107         {
108          .ctl_name = CTL_UNNUMBERED,
109          .procname = "hb_check_interval",
110          .data = &xpc_hb_check_interval,
111          .maxlen = sizeof(int),
112          .mode = 0644,
113          .proc_handler = &proc_dointvec_minmax,
114          .strategy = &sysctl_intvec,
115          .extra1 = &xpc_hb_check_min_interval,
116          .extra2 = &xpc_hb_check_max_interval},
117         {}
118 };
119 static ctl_table xpc_sys_xpc_dir[] = {
120         {
121          .ctl_name = CTL_UNNUMBERED,
122          .procname = "hb",
123          .mode = 0555,
124          .child = xpc_sys_xpc_hb_dir},
125         {
126          .ctl_name = CTL_UNNUMBERED,
127          .procname = "disengage_request_timelimit",
128          .data = &xpc_disengage_request_timelimit,
129          .maxlen = sizeof(int),
130          .mode = 0644,
131          .proc_handler = &proc_dointvec_minmax,
132          .strategy = &sysctl_intvec,
133          .extra1 = &xpc_disengage_request_min_timelimit,
134          .extra2 = &xpc_disengage_request_max_timelimit},
135         {}
136 };
137 static ctl_table xpc_sys_dir[] = {
138         {
139          .ctl_name = CTL_UNNUMBERED,
140          .procname = "xpc",
141          .mode = 0555,
142          .child = xpc_sys_xpc_dir},
143         {}
144 };
145 static struct ctl_table_header *xpc_sysctl;
146
147 /* non-zero if any remote partition disengage request was timed out */
148 int xpc_disengage_request_timedout;
149
150 /* #of IRQs received */
151 atomic_t xpc_act_IRQ_rcvd;
152
153 /* IRQ handler notifies this wait queue on receipt of an IRQ */
154 DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
155
156 static unsigned long xpc_hb_check_timeout;
157 static struct timer_list xpc_hb_timer;
158 void *xpc_heartbeating_to_mask;
159
160 /* notification that the xpc_hb_checker thread has exited */
161 static DECLARE_COMPLETION(xpc_hb_checker_exited);
162
163 /* notification that the xpc_discovery thread has exited */
164 static DECLARE_COMPLETION(xpc_discovery_exited);
165
166 static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
167
168 static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
169 static struct notifier_block xpc_reboot_notifier = {
170         .notifier_call = xpc_system_reboot,
171 };
172
173 static int xpc_system_die(struct notifier_block *, unsigned long, void *);
174 static struct notifier_block xpc_die_notifier = {
175         .notifier_call = xpc_system_die,
176 };
177
178 enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp);
179 void (*xpc_heartbeat_init) (void);
180 void (*xpc_heartbeat_exit) (void);
181 void (*xpc_increment_heartbeat) (void);
182 void (*xpc_offline_heartbeat) (void);
183 void (*xpc_online_heartbeat) (void);
184 void (*xpc_check_remote_hb) (void);
185
186 enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
187 u64 (*xpc_get_IPI_flags) (struct xpc_partition *part);
188 struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
189
190 void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *remote_rp,
191                                            u64 remote_rp_pa, int nasid);
192
193 void (*xpc_process_act_IRQ_rcvd) (int n_IRQs_expected);
194 enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part);
195 void (*xpc_teardown_infrastructure) (struct xpc_partition *part);
196
197 void (*xpc_mark_partition_engaged) (struct xpc_partition *part);
198 void (*xpc_mark_partition_disengaged) (struct xpc_partition *part);
199 void (*xpc_request_partition_disengage) (struct xpc_partition *part);
200 void (*xpc_cancel_partition_disengage_request) (struct xpc_partition *part);
201 u64 (*xpc_partition_engaged) (u64 partid_mask);
202 u64 (*xpc_partition_disengage_requested) (u64 partid_mask);
203 void (*xpc_clear_partition_engaged) (u64 partid_mask);
204 void (*xpc_clear_partition_disengage_request) (u64 partid_mask);
205
206 void (*xpc_IPI_send_local_activate) (int from_nasid);
207 void (*xpc_IPI_send_activated) (struct xpc_partition *part);
208 void (*xpc_IPI_send_local_reactivate) (int from_nasid);
209 void (*xpc_IPI_send_disengage) (struct xpc_partition *part);
210
211 void (*xpc_IPI_send_closerequest) (struct xpc_channel *ch,
212                                    unsigned long *irq_flags);
213 void (*xpc_IPI_send_closereply) (struct xpc_channel *ch,
214                                  unsigned long *irq_flags);
215 void (*xpc_IPI_send_openrequest) (struct xpc_channel *ch,
216                                   unsigned long *irq_flags);
217 void (*xpc_IPI_send_openreply) (struct xpc_channel *ch,
218                                 unsigned long *irq_flags);
219
220 enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
221                                 void *payload, u16 payload_size, u8 notify_type,
222                                 xpc_notify_func func, void *key);
223 void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg);
224
225 /*
226  * Timer function to enforce the timelimit on the partition disengage request.
227  */
228 static void
229 xpc_timeout_partition_disengage_request(unsigned long data)
230 {
231         struct xpc_partition *part = (struct xpc_partition *)data;
232
233         DBUG_ON(time_is_after_jiffies(part->disengage_request_timeout));
234
235         (void)xpc_partition_disengaged(part);
236
237         DBUG_ON(part->disengage_request_timeout != 0);
238         DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
239 }
240
241 /*
242  * Notify the heartbeat check thread that an IRQ has been received.
243  */
244 static irqreturn_t
245 xpc_act_IRQ_handler(int irq, void *dev_id)
246 {
247         atomic_inc(&xpc_act_IRQ_rcvd);
248         wake_up_interruptible(&xpc_act_IRQ_wq);
249         return IRQ_HANDLED;
250 }
251
252 /*
253  * Timer to produce the heartbeat.  The timer structures function is
254  * already set when this is initially called.  A tunable is used to
255  * specify when the next timeout should occur.
256  */
257 static void
258 xpc_hb_beater(unsigned long dummy)
259 {
260         xpc_increment_heartbeat();
261
262         if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
263                 wake_up_interruptible(&xpc_act_IRQ_wq);
264
265         xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
266         add_timer(&xpc_hb_timer);
267 }
268
269 static void
270 xpc_start_hb_beater(void)
271 {
272         xpc_heartbeat_init();
273         init_timer(&xpc_hb_timer);
274         xpc_hb_timer.function = xpc_hb_beater;
275         xpc_hb_beater(0);
276 }
277
278 static void
279 xpc_stop_hb_beater(void)
280 {
281         del_timer_sync(&xpc_hb_timer);
282         xpc_heartbeat_exit();
283 }
284
285 /*
286  * This thread is responsible for nearly all of the partition
287  * activation/deactivation.
288  */
289 static int
290 xpc_hb_checker(void *ignore)
291 {
292         int last_IRQ_count = 0;
293         int new_IRQ_count;
294         int force_IRQ = 0;
295
296         /* this thread was marked active by xpc_hb_init() */
297
298         set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
299
300         /* set our heartbeating to other partitions into motion */
301         xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
302         xpc_start_hb_beater();
303
304         while (!xpc_exiting) {
305
306                 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
307                         "been received\n",
308                         (int)(xpc_hb_check_timeout - jiffies),
309                         atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
310
311                 /* checking of remote heartbeats is skewed by IRQ handling */
312                 if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
313                         dev_dbg(xpc_part, "checking remote heartbeats\n");
314                         xpc_check_remote_hb();
315
316                         /*
317                          * We need to periodically recheck to ensure no
318                          * IPI/AMO pairs have been missed.  That check
319                          * must always reset xpc_hb_check_timeout.
320                          */
321                         force_IRQ = 1;
322                 }
323
324                 /* check for outstanding IRQs */
325                 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
326                 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
327                         force_IRQ = 0;
328
329                         dev_dbg(xpc_part, "found an IRQ to process; will be "
330                                 "resetting xpc_hb_check_timeout\n");
331
332                         xpc_process_act_IRQ_rcvd(new_IRQ_count -
333                                                  last_IRQ_count);
334                         last_IRQ_count = new_IRQ_count;
335
336                         xpc_hb_check_timeout = jiffies +
337                             (xpc_hb_check_interval * HZ);
338                 }
339
340                 /* wait for IRQ or timeout */
341                 (void)wait_event_interruptible(xpc_act_IRQ_wq,
342                                                (last_IRQ_count <
343                                                 atomic_read(&xpc_act_IRQ_rcvd)
344                                                 || time_is_before_eq_jiffies(
345                                                 xpc_hb_check_timeout) ||
346                                                 xpc_exiting));
347         }
348
349         xpc_stop_hb_beater();
350
351         dev_dbg(xpc_part, "heartbeat checker is exiting\n");
352
353         /* mark this thread as having exited */
354         complete(&xpc_hb_checker_exited);
355         return 0;
356 }
357
358 /*
359  * This thread will attempt to discover other partitions to activate
360  * based on info provided by SAL. This new thread is short lived and
361  * will exit once discovery is complete.
362  */
363 static int
364 xpc_initiate_discovery(void *ignore)
365 {
366         xpc_discovery();
367
368         dev_dbg(xpc_part, "discovery thread is exiting\n");
369
370         /* mark this thread as having exited */
371         complete(&xpc_discovery_exited);
372         return 0;
373 }
374
375 /*
376  * The first kthread assigned to a newly activated partition is the one
377  * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
378  * that kthread until the partition is brought down, at which time that kthread
379  * returns back to XPC HB. (The return of that kthread will signify to XPC HB
380  * that XPC has dismantled all communication infrastructure for the associated
381  * partition.) This kthread becomes the channel manager for that partition.
382  *
383  * Each active partition has a channel manager, who, besides connecting and
384  * disconnecting channels, will ensure that each of the partition's connected
385  * channels has the required number of assigned kthreads to get the work done.
386  */
387 static void
388 xpc_channel_mgr(struct xpc_partition *part)
389 {
390         while (part->act_state != XPC_P_DEACTIVATING ||
391                atomic_read(&part->nchannels_active) > 0 ||
392                !xpc_partition_disengaged(part)) {
393
394                 xpc_process_channel_activity(part);
395
396                 /*
397                  * Wait until we've been requested to activate kthreads or
398                  * all of the channel's message queues have been torn down or
399                  * a signal is pending.
400                  *
401                  * The channel_mgr_requests is set to 1 after being awakened,
402                  * This is done to prevent the channel mgr from making one pass
403                  * through the loop for each request, since he will
404                  * be servicing all the requests in one pass. The reason it's
405                  * set to 1 instead of 0 is so that other kthreads will know
406                  * that the channel mgr is running and won't bother trying to
407                  * wake him up.
408                  */
409                 atomic_dec(&part->channel_mgr_requests);
410                 (void)wait_event_interruptible(part->channel_mgr_wq,
411                                 (atomic_read(&part->channel_mgr_requests) > 0 ||
412                                  part->local_IPI_amo != 0 ||
413                                  (part->act_state == XPC_P_DEACTIVATING &&
414                                  atomic_read(&part->nchannels_active) == 0 &&
415                                  xpc_partition_disengaged(part))));
416                 atomic_set(&part->channel_mgr_requests, 1);
417         }
418 }
419
420 /*
421  * When XPC HB determines that a partition has come up, it will create a new
422  * kthread and that kthread will call this function to attempt to set up the
423  * basic infrastructure used for Cross Partition Communication with the newly
424  * upped partition.
425  *
426  * The kthread that was created by XPC HB and which setup the XPC
427  * infrastructure will remain assigned to the partition becoming the channel
428  * manager for that partition until the partition is deactivating, at which
429  * time the kthread will teardown the XPC infrastructure and then exit.
430  */
431 static int
432 xpc_activating(void *__partid)
433 {
434         short partid = (u64)__partid;
435         struct xpc_partition *part = &xpc_partitions[partid];
436         unsigned long irq_flags;
437
438         DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
439
440         spin_lock_irqsave(&part->act_lock, irq_flags);
441
442         if (part->act_state == XPC_P_DEACTIVATING) {
443                 part->act_state = XPC_P_INACTIVE;
444                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
445                 part->remote_rp_pa = 0;
446                 return 0;
447         }
448
449         /* indicate the thread is activating */
450         DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
451         part->act_state = XPC_P_ACTIVATING;
452
453         XPC_SET_REASON(part, 0, 0);
454         spin_unlock_irqrestore(&part->act_lock, irq_flags);
455
456         dev_dbg(xpc_part, "activating partition %d\n", partid);
457
458         xpc_allow_hb(partid);
459
460         if (xpc_setup_infrastructure(part) == xpSuccess) {
461                 (void)xpc_part_ref(part);       /* this will always succeed */
462
463                 if (xpc_make_first_contact(part) == xpSuccess) {
464                         xpc_mark_partition_active(part);
465                         xpc_channel_mgr(part);
466                         /* won't return until partition is deactivating */
467                 }
468
469                 xpc_part_deref(part);
470                 xpc_teardown_infrastructure(part);
471         }
472
473         xpc_disallow_hb(partid);
474         xpc_mark_partition_inactive(part);
475
476         if (part->reason == xpReactivating) {
477                 /* interrupting ourselves results in activating partition */
478                 xpc_IPI_send_local_reactivate(part->reactivate_nasid);
479         }
480
481         return 0;
482 }
483
484 void
485 xpc_activate_partition(struct xpc_partition *part)
486 {
487         short partid = XPC_PARTID(part);
488         unsigned long irq_flags;
489         struct task_struct *kthread;
490
491         spin_lock_irqsave(&part->act_lock, irq_flags);
492
493         DBUG_ON(part->act_state != XPC_P_INACTIVE);
494
495         part->act_state = XPC_P_ACTIVATION_REQ;
496         XPC_SET_REASON(part, xpCloneKThread, __LINE__);
497
498         spin_unlock_irqrestore(&part->act_lock, irq_flags);
499
500         kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
501                               partid);
502         if (IS_ERR(kthread)) {
503                 spin_lock_irqsave(&part->act_lock, irq_flags);
504                 part->act_state = XPC_P_INACTIVE;
505                 XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
506                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
507         }
508 }
509
510 /*
511  * Check to see if there is any channel activity to/from the specified
512  * partition.
513  */
514 static void
515 xpc_check_for_channel_activity(struct xpc_partition *part)
516 {
517         u64 IPI_amo;
518         unsigned long irq_flags;
519
520 /* this needs to be uncommented, but I'm thinking this function and the */
521 /* ones that call it need to be moved into xpc_sn2.c... */
522         IPI_amo = 0; /* = xpc_IPI_receive(part->local_IPI_amo_va); */
523         if (IPI_amo == 0)
524                 return;
525
526         spin_lock_irqsave(&part->IPI_lock, irq_flags);
527         part->local_IPI_amo |= IPI_amo;
528         spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
529
530         dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
531                 XPC_PARTID(part), IPI_amo);
532
533         xpc_wakeup_channel_mgr(part);
534 }
535
536 /*
537  * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
538  * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
539  * than one partition, we use an AMO_t structure per partition to indicate
540  * whether a partition has sent an IPI or not.  If it has, then wake up the
541  * associated kthread to handle it.
542  *
543  * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
544  * running on other partitions.
545  *
546  * Noteworthy Arguments:
547  *
548  *      irq - Interrupt ReQuest number. NOT USED.
549  *
550  *      dev_id - partid of IPI's potential sender.
551  */
552 irqreturn_t
553 xpc_notify_IRQ_handler(int irq, void *dev_id)
554 {
555         short partid = (short)(u64)dev_id;
556         struct xpc_partition *part = &xpc_partitions[partid];
557
558         DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
559
560         if (xpc_part_ref(part)) {
561                 xpc_check_for_channel_activity(part);
562
563                 xpc_part_deref(part);
564         }
565         return IRQ_HANDLED;
566 }
567
568 /*
569  * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
570  * because the write to their associated IPI amo completed after the IRQ/IPI
571  * was received.
572  */
573 void
574 xpc_dropped_IPI_check(struct xpc_partition *part)
575 {
576         if (xpc_part_ref(part)) {
577                 xpc_check_for_channel_activity(part);
578
579                 part->dropped_IPI_timer.expires = jiffies +
580                     XPC_P_DROPPED_IPI_WAIT_INTERVAL;
581                 add_timer(&part->dropped_IPI_timer);
582                 xpc_part_deref(part);
583         }
584 }
585
586 void
587 xpc_activate_kthreads(struct xpc_channel *ch, int needed)
588 {
589         int idle = atomic_read(&ch->kthreads_idle);
590         int assigned = atomic_read(&ch->kthreads_assigned);
591         int wakeup;
592
593         DBUG_ON(needed <= 0);
594
595         if (idle > 0) {
596                 wakeup = (needed > idle) ? idle : needed;
597                 needed -= wakeup;
598
599                 dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
600                         "channel=%d\n", wakeup, ch->partid, ch->number);
601
602                 /* only wakeup the requested number of kthreads */
603                 wake_up_nr(&ch->idle_wq, wakeup);
604         }
605
606         if (needed <= 0)
607                 return;
608
609         if (needed + assigned > ch->kthreads_assigned_limit) {
610                 needed = ch->kthreads_assigned_limit - assigned;
611                 if (needed <= 0)
612                         return;
613         }
614
615         dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
616                 needed, ch->partid, ch->number);
617
618         xpc_create_kthreads(ch, needed, 0);
619 }
620
621 /*
622  * This function is where XPC's kthreads wait for messages to deliver.
623  */
624 static void
625 xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
626 {
627         do {
628                 /* deliver messages to their intended recipients */
629
630                 while (ch->w_local_GP.get < ch->w_remote_GP.put &&
631                        !(ch->flags & XPC_C_DISCONNECTING)) {
632                         xpc_deliver_msg(ch);
633                 }
634
635                 if (atomic_inc_return(&ch->kthreads_idle) >
636                     ch->kthreads_idle_limit) {
637                         /* too many idle kthreads on this channel */
638                         atomic_dec(&ch->kthreads_idle);
639                         break;
640                 }
641
642                 dev_dbg(xpc_chan, "idle kthread calling "
643                         "wait_event_interruptible_exclusive()\n");
644
645                 (void)wait_event_interruptible_exclusive(ch->idle_wq,
646                                 (ch->w_local_GP.get < ch->w_remote_GP.put ||
647                                  (ch->flags & XPC_C_DISCONNECTING)));
648
649                 atomic_dec(&ch->kthreads_idle);
650
651         } while (!(ch->flags & XPC_C_DISCONNECTING));
652 }
653
654 static int
655 xpc_kthread_start(void *args)
656 {
657         short partid = XPC_UNPACK_ARG1(args);
658         u16 ch_number = XPC_UNPACK_ARG2(args);
659         struct xpc_partition *part = &xpc_partitions[partid];
660         struct xpc_channel *ch;
661         int n_needed;
662         unsigned long irq_flags;
663
664         dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
665                 partid, ch_number);
666
667         ch = &part->channels[ch_number];
668
669         if (!(ch->flags & XPC_C_DISCONNECTING)) {
670
671                 /* let registerer know that connection has been established */
672
673                 spin_lock_irqsave(&ch->lock, irq_flags);
674                 if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
675                         ch->flags |= XPC_C_CONNECTEDCALLOUT;
676                         spin_unlock_irqrestore(&ch->lock, irq_flags);
677
678                         xpc_connected_callout(ch);
679
680                         spin_lock_irqsave(&ch->lock, irq_flags);
681                         ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
682                         spin_unlock_irqrestore(&ch->lock, irq_flags);
683
684                         /*
685                          * It is possible that while the callout was being
686                          * made that the remote partition sent some messages.
687                          * If that is the case, we may need to activate
688                          * additional kthreads to help deliver them. We only
689                          * need one less than total #of messages to deliver.
690                          */
691                         n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
692                         if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
693                                 xpc_activate_kthreads(ch, n_needed);
694
695                 } else {
696                         spin_unlock_irqrestore(&ch->lock, irq_flags);
697                 }
698
699                 xpc_kthread_waitmsgs(part, ch);
700         }
701
702         /* let registerer know that connection is disconnecting */
703
704         spin_lock_irqsave(&ch->lock, irq_flags);
705         if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
706             !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
707                 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
708                 spin_unlock_irqrestore(&ch->lock, irq_flags);
709
710                 xpc_disconnect_callout(ch, xpDisconnecting);
711
712                 spin_lock_irqsave(&ch->lock, irq_flags);
713                 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
714         }
715         spin_unlock_irqrestore(&ch->lock, irq_flags);
716
717         if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
718                 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
719                         xpc_mark_partition_disengaged(part);
720                         xpc_IPI_send_disengage(part);
721                 }
722         }
723
724         xpc_msgqueue_deref(ch);
725
726         dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
727                 partid, ch_number);
728
729         xpc_part_deref(part);
730         return 0;
731 }
732
733 /*
734  * For each partition that XPC has established communications with, there is
735  * a minimum of one kernel thread assigned to perform any operation that
736  * may potentially sleep or block (basically the callouts to the asynchronous
737  * functions registered via xpc_connect()).
738  *
739  * Additional kthreads are created and destroyed by XPC as the workload
740  * demands.
741  *
742  * A kthread is assigned to one of the active channels that exists for a given
743  * partition.
744  */
745 void
746 xpc_create_kthreads(struct xpc_channel *ch, int needed,
747                     int ignore_disconnecting)
748 {
749         unsigned long irq_flags;
750         u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
751         struct xpc_partition *part = &xpc_partitions[ch->partid];
752         struct task_struct *kthread;
753
754         while (needed-- > 0) {
755
756                 /*
757                  * The following is done on behalf of the newly created
758                  * kthread. That kthread is responsible for doing the
759                  * counterpart to the following before it exits.
760                  */
761                 if (ignore_disconnecting) {
762                         if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
763                                 /* kthreads assigned had gone to zero */
764                                 BUG_ON(!(ch->flags &
765                                          XPC_C_DISCONNECTINGCALLOUT_MADE));
766                                 break;
767                         }
768
769                 } else if (ch->flags & XPC_C_DISCONNECTING) {
770                         break;
771
772                 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) {
773                         if (atomic_inc_return(&part->nchannels_engaged) == 1)
774                                 xpc_mark_partition_engaged(part);
775                 }
776                 (void)xpc_part_ref(part);
777                 xpc_msgqueue_ref(ch);
778
779                 kthread = kthread_run(xpc_kthread_start, (void *)args,
780                                       "xpc%02dc%d", ch->partid, ch->number);
781                 if (IS_ERR(kthread)) {
782                         /* the fork failed */
783
784                         /*
785                          * NOTE: if (ignore_disconnecting &&
786                          * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
787                          * then we'll deadlock if all other kthreads assigned
788                          * to this channel are blocked in the channel's
789                          * registerer, because the only thing that will unblock
790                          * them is the xpDisconnecting callout that this
791                          * failed kthread_run() would have made.
792                          */
793
794                         if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
795                             atomic_dec_return(&part->nchannels_engaged) == 0) {
796                                 xpc_mark_partition_disengaged(part);
797                                 xpc_IPI_send_disengage(part);
798                         }
799                         xpc_msgqueue_deref(ch);
800                         xpc_part_deref(part);
801
802                         if (atomic_read(&ch->kthreads_assigned) <
803                             ch->kthreads_idle_limit) {
804                                 /*
805                                  * Flag this as an error only if we have an
806                                  * insufficient #of kthreads for the channel
807                                  * to function.
808                                  */
809                                 spin_lock_irqsave(&ch->lock, irq_flags);
810                                 XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
811                                                        &irq_flags);
812                                 spin_unlock_irqrestore(&ch->lock, irq_flags);
813                         }
814                         break;
815                 }
816         }
817 }
818
819 void
820 xpc_disconnect_wait(int ch_number)
821 {
822         unsigned long irq_flags;
823         short partid;
824         struct xpc_partition *part;
825         struct xpc_channel *ch;
826         int wakeup_channel_mgr;
827
828         /* now wait for all callouts to the caller's function to cease */
829         for (partid = 0; partid < xp_max_npartitions; partid++) {
830                 part = &xpc_partitions[partid];
831
832                 if (!xpc_part_ref(part))
833                         continue;
834
835                 ch = &part->channels[ch_number];
836
837                 if (!(ch->flags & XPC_C_WDISCONNECT)) {
838                         xpc_part_deref(part);
839                         continue;
840                 }
841
842                 wait_for_completion(&ch->wdisconnect_wait);
843
844                 spin_lock_irqsave(&ch->lock, irq_flags);
845                 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
846                 wakeup_channel_mgr = 0;
847
848                 if (ch->delayed_IPI_flags) {
849                         if (part->act_state != XPC_P_DEACTIVATING) {
850                                 spin_lock(&part->IPI_lock);
851                                 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
852                                                   ch->number,
853                                                   ch->delayed_IPI_flags);
854                                 spin_unlock(&part->IPI_lock);
855                                 wakeup_channel_mgr = 1;
856                         }
857                         ch->delayed_IPI_flags = 0;
858                 }
859
860                 ch->flags &= ~XPC_C_WDISCONNECT;
861                 spin_unlock_irqrestore(&ch->lock, irq_flags);
862
863                 if (wakeup_channel_mgr)
864                         xpc_wakeup_channel_mgr(part);
865
866                 xpc_part_deref(part);
867         }
868 }
869
870 static void
871 xpc_do_exit(enum xp_retval reason)
872 {
873         short partid;
874         int active_part_count, printed_waiting_msg = 0;
875         struct xpc_partition *part;
876         unsigned long printmsg_time, disengage_request_timeout = 0;
877
878         /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
879         DBUG_ON(xpc_exiting == 1);
880
881         /*
882          * Let the heartbeat checker thread and the discovery thread
883          * (if one is running) know that they should exit. Also wake up
884          * the heartbeat checker thread in case it's sleeping.
885          */
886         xpc_exiting = 1;
887         wake_up_interruptible(&xpc_act_IRQ_wq);
888
889         /* ignore all incoming interrupts */
890         free_irq(SGI_XPC_ACTIVATE, NULL);
891
892         /* wait for the discovery thread to exit */
893         wait_for_completion(&xpc_discovery_exited);
894
895         /* wait for the heartbeat checker thread to exit */
896         wait_for_completion(&xpc_hb_checker_exited);
897
898         /* sleep for a 1/3 of a second or so */
899         (void)msleep_interruptible(300);
900
901         /* wait for all partitions to become inactive */
902
903         printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
904         xpc_disengage_request_timedout = 0;
905
906         do {
907                 active_part_count = 0;
908
909                 for (partid = 0; partid < xp_max_npartitions; partid++) {
910                         part = &xpc_partitions[partid];
911
912                         if (xpc_partition_disengaged(part) &&
913                             part->act_state == XPC_P_INACTIVE) {
914                                 continue;
915                         }
916
917                         active_part_count++;
918
919                         XPC_DEACTIVATE_PARTITION(part, reason);
920
921                         if (part->disengage_request_timeout >
922                             disengage_request_timeout) {
923                                 disengage_request_timeout =
924                                     part->disengage_request_timeout;
925                         }
926                 }
927
928                 if (xpc_partition_engaged(-1UL)) {
929                         if (time_is_before_jiffies(printmsg_time)) {
930                                 dev_info(xpc_part, "waiting for remote "
931                                          "partitions to disengage, timeout in "
932                                          "%ld seconds\n",
933                                          (disengage_request_timeout - jiffies)
934                                          / HZ);
935                                 printmsg_time = jiffies +
936                                     (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
937                                 printed_waiting_msg = 1;
938                         }
939
940                 } else if (active_part_count > 0) {
941                         if (printed_waiting_msg) {
942                                 dev_info(xpc_part, "waiting for local partition"
943                                          " to disengage\n");
944                                 printed_waiting_msg = 0;
945                         }
946
947                 } else {
948                         if (!xpc_disengage_request_timedout) {
949                                 dev_info(xpc_part, "all partitions have "
950                                          "disengaged\n");
951                         }
952                         break;
953                 }
954
955                 /* sleep for a 1/3 of a second or so */
956                 (void)msleep_interruptible(300);
957
958         } while (1);
959
960         DBUG_ON(xpc_partition_engaged(-1UL));
961         DBUG_ON(xpc_any_hbs_allowed() != 0);
962
963         /* indicate to others that our reserved page is uninitialized */
964         xpc_rsvd_page->stamp = 0;
965
966         if (reason == xpUnloading) {
967                 (void)unregister_die_notifier(&xpc_die_notifier);
968                 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
969         }
970
971         /* close down protections for IPI operations */
972         xpc_restrict_IPI_ops();
973
974         /* clear the interface to XPC's functions */
975         xpc_clear_interface();
976
977         if (xpc_sysctl)
978                 unregister_sysctl_table(xpc_sysctl);
979
980         kfree(xpc_partitions);
981         kfree(xpc_remote_copy_buffer_base);
982 }
983
984 /*
985  * This function is called when the system is being rebooted.
986  */
987 static int
988 xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
989 {
990         enum xp_retval reason;
991
992         switch (event) {
993         case SYS_RESTART:
994                 reason = xpSystemReboot;
995                 break;
996         case SYS_HALT:
997                 reason = xpSystemHalt;
998                 break;
999         case SYS_POWER_OFF:
1000                 reason = xpSystemPoweroff;
1001                 break;
1002         default:
1003                 reason = xpSystemGoingDown;
1004         }
1005
1006         xpc_do_exit(reason);
1007         return NOTIFY_DONE;
1008 }
1009
1010 /*
1011  * Notify other partitions to disengage from all references to our memory.
1012  */
1013 static void
1014 xpc_die_disengage(void)
1015 {
1016         struct xpc_partition *part;
1017         short partid;
1018         unsigned long engaged;
1019         long time, printmsg_time, disengage_request_timeout;
1020
1021         /* keep xpc_hb_checker thread from doing anything (just in case) */
1022         xpc_exiting = 1;
1023
1024         xpc_disallow_all_hbs(); /*indicate we're deactivated */
1025
1026         for (partid = 0; partid < xp_max_npartitions; partid++) {
1027                 part = &xpc_partitions[partid];
1028
1029                 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
1030                     remote_vars_version)) {
1031
1032                         /* just in case it was left set by an earlier XPC */
1033                         xpc_clear_partition_engaged(1UL << partid);
1034                         continue;
1035                 }
1036
1037                 if (xpc_partition_engaged(1UL << partid) ||
1038                     part->act_state != XPC_P_INACTIVE) {
1039                         xpc_request_partition_disengage(part);
1040                         xpc_mark_partition_disengaged(part);
1041                         xpc_IPI_send_disengage(part);
1042                 }
1043         }
1044
1045         time = rtc_time();
1046         printmsg_time = time +
1047             (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1048         disengage_request_timeout = time +
1049             (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
1050
1051         /* wait for all other partitions to disengage from us */
1052
1053         while (1) {
1054                 engaged = xpc_partition_engaged(-1UL);
1055                 if (!engaged) {
1056                         dev_info(xpc_part, "all partitions have disengaged\n");
1057                         break;
1058                 }
1059
1060                 time = rtc_time();
1061                 if (time >= disengage_request_timeout) {
1062                         for (partid = 0; partid < xp_max_npartitions;
1063                              partid++) {
1064                                 if (engaged & (1UL << partid)) {
1065                                         dev_info(xpc_part, "disengage from "
1066                                                  "remote partition %d timed "
1067                                                  "out\n", partid);
1068                                 }
1069                         }
1070                         break;
1071                 }
1072
1073                 if (time >= printmsg_time) {
1074                         dev_info(xpc_part, "waiting for remote partitions to "
1075                                  "disengage, timeout in %ld seconds\n",
1076                                  (disengage_request_timeout - time) /
1077                                  sn_rtc_cycles_per_second);
1078                         printmsg_time = time +
1079                             (XPC_DISENGAGE_PRINTMSG_INTERVAL *
1080                              sn_rtc_cycles_per_second);
1081                 }
1082         }
1083 }
1084
1085 /*
1086  * This function is called when the system is being restarted or halted due
1087  * to some sort of system failure. If this is the case we need to notify the
1088  * other partitions to disengage from all references to our memory.
1089  * This function can also be called when our heartbeater could be offlined
1090  * for a time. In this case we need to notify other partitions to not worry
1091  * about the lack of a heartbeat.
1092  */
1093 static int
1094 xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1095 {
1096         switch (event) {
1097         case DIE_MACHINE_RESTART:
1098         case DIE_MACHINE_HALT:
1099                 xpc_die_disengage();
1100                 break;
1101
1102         case DIE_KDEBUG_ENTER:
1103                 /* Should lack of heartbeat be ignored by other partitions? */
1104                 if (!xpc_kdebug_ignore)
1105                         break;
1106
1107                 /* fall through */
1108         case DIE_MCA_MONARCH_ENTER:
1109         case DIE_INIT_MONARCH_ENTER:
1110                 xpc_offline_heartbeat();
1111                 break;
1112
1113         case DIE_KDEBUG_LEAVE:
1114                 /* Is lack of heartbeat being ignored by other partitions? */
1115                 if (!xpc_kdebug_ignore)
1116                         break;
1117
1118                 /* fall through */
1119         case DIE_MCA_MONARCH_LEAVE:
1120         case DIE_INIT_MONARCH_LEAVE:
1121                 xpc_online_heartbeat();
1122                 break;
1123         }
1124
1125         return NOTIFY_DONE;
1126 }
1127
1128 int __init
1129 xpc_init(void)
1130 {
1131         int ret;
1132         short partid;
1133         struct xpc_partition *part;
1134         struct task_struct *kthread;
1135         size_t buf_size;
1136
1137         if (is_shub()) {
1138                 /*
1139                  * The ia64-sn2 architecture supports at most 64 partitions.
1140                  * And the inability to unregister remote AMOs restricts us
1141                  * further to only support exactly 64 partitions on this
1142                  * architecture, no less.
1143                  */
1144                 if (xp_max_npartitions != 64)
1145                         return -EINVAL;
1146
1147                 xpc_init_sn2();
1148
1149         } else if (is_uv()) {
1150                 xpc_init_uv();
1151
1152         } else {
1153                 return -ENODEV;
1154         }
1155
1156         snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
1157         snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
1158
1159         buf_size = max(XPC_RP_VARS_SIZE,
1160                        XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1161         xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1162                                                                GFP_KERNEL,
1163                                                   &xpc_remote_copy_buffer_base);
1164         if (xpc_remote_copy_buffer == NULL) {
1165                 dev_err(xpc_part, "can't get memory for remote copy buffer\n");
1166                 return -ENOMEM;
1167         }
1168
1169         xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
1170                                  xp_max_npartitions, GFP_KERNEL);
1171         if (xpc_partitions == NULL) {
1172                 dev_err(xpc_part, "can't get memory for partition structure\n");
1173                 ret = -ENOMEM;
1174                 goto out_1;
1175         }
1176
1177         /*
1178          * The first few fields of each entry of xpc_partitions[] need to
1179          * be initialized now so that calls to xpc_connect() and
1180          * xpc_disconnect() can be made prior to the activation of any remote
1181          * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
1182          * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
1183          * PARTITION HAS BEEN ACTIVATED.
1184          */
1185         for (partid = 0; partid < xp_max_npartitions; partid++) {
1186                 part = &xpc_partitions[partid];
1187
1188                 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1189
1190                 part->act_IRQ_rcvd = 0;
1191                 spin_lock_init(&part->act_lock);
1192                 part->act_state = XPC_P_INACTIVE;
1193                 XPC_SET_REASON(part, 0, 0);
1194
1195                 init_timer(&part->disengage_request_timer);
1196                 part->disengage_request_timer.function =
1197                     xpc_timeout_partition_disengage_request;
1198                 part->disengage_request_timer.data = (unsigned long)part;
1199
1200                 part->setup_state = XPC_P_UNSET;
1201                 init_waitqueue_head(&part->teardown_wq);
1202                 atomic_set(&part->references, 0);
1203         }
1204
1205         xpc_sysctl = register_sysctl_table(xpc_sys_dir);
1206
1207         /*
1208          * Open up protections for IPI operations (and AMO operations on
1209          * Shub 1.1 systems).
1210          */
1211         xpc_allow_IPI_ops();
1212
1213         /*
1214          * Interrupts being processed will increment this atomic variable and
1215          * awaken the heartbeat thread which will process the interrupts.
1216          */
1217         atomic_set(&xpc_act_IRQ_rcvd, 0);
1218
1219         /*
1220          * This is safe to do before the xpc_hb_checker thread has started
1221          * because the handler releases a wait queue.  If an interrupt is
1222          * received before the thread is waiting, it will not go to sleep,
1223          * but rather immediately process the interrupt.
1224          */
1225         ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1226                           "xpc hb", NULL);
1227         if (ret != 0) {
1228                 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1229                         "errno=%d\n", -ret);
1230                 ret = -EBUSY;
1231                 goto out_2;
1232         }
1233
1234         /*
1235          * Fill the partition reserved page with the information needed by
1236          * other partitions to discover we are alive and establish initial
1237          * communications.
1238          */
1239         xpc_rsvd_page = xpc_setup_rsvd_page();
1240         if (xpc_rsvd_page == NULL) {
1241                 dev_err(xpc_part, "can't setup our reserved page\n");
1242                 ret = -EBUSY;
1243                 goto out_3;
1244         }
1245
1246         /* add ourselves to the reboot_notifier_list */
1247         ret = register_reboot_notifier(&xpc_reboot_notifier);
1248         if (ret != 0)
1249                 dev_warn(xpc_part, "can't register reboot notifier\n");
1250
1251         /* add ourselves to the die_notifier list */
1252         ret = register_die_notifier(&xpc_die_notifier);
1253         if (ret != 0)
1254                 dev_warn(xpc_part, "can't register die notifier\n");
1255
1256         /*
1257          * The real work-horse behind xpc.  This processes incoming
1258          * interrupts and monitors remote heartbeats.
1259          */
1260         kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
1261         if (IS_ERR(kthread)) {
1262                 dev_err(xpc_part, "failed while forking hb check thread\n");
1263                 ret = -EBUSY;
1264                 goto out_4;
1265         }
1266
1267         /*
1268          * Startup a thread that will attempt to discover other partitions to
1269          * activate based on info provided by SAL. This new thread is short
1270          * lived and will exit once discovery is complete.
1271          */
1272         kthread = kthread_run(xpc_initiate_discovery, NULL,
1273                               XPC_DISCOVERY_THREAD_NAME);
1274         if (IS_ERR(kthread)) {
1275                 dev_err(xpc_part, "failed while forking discovery thread\n");
1276
1277                 /* mark this new thread as a non-starter */
1278                 complete(&xpc_discovery_exited);
1279
1280                 xpc_do_exit(xpUnloading);
1281                 return -EBUSY;
1282         }
1283
1284         /* set the interface to point at XPC's functions */
1285         xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1286                           xpc_initiate_send, xpc_initiate_send_notify,
1287                           xpc_initiate_received, xpc_initiate_partid_to_nasids);
1288
1289         return 0;
1290
1291         /* initialization was not successful */
1292 out_4:
1293         /* indicate to others that our reserved page is uninitialized */
1294         xpc_rsvd_page->stamp = 0;
1295
1296         (void)unregister_die_notifier(&xpc_die_notifier);
1297         (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1298 out_3:
1299         free_irq(SGI_XPC_ACTIVATE, NULL);
1300 out_2:
1301         xpc_restrict_IPI_ops();
1302         if (xpc_sysctl)
1303                 unregister_sysctl_table(xpc_sysctl);
1304         kfree(xpc_partitions);
1305 out_1:
1306         kfree(xpc_remote_copy_buffer_base);
1307         return ret;
1308 }
1309
1310 module_init(xpc_init);
1311
1312 void __exit
1313 xpc_exit(void)
1314 {
1315         xpc_do_exit(xpUnloading);
1316 }
1317
1318 module_exit(xpc_exit);
1319
1320 MODULE_AUTHOR("Silicon Graphics, Inc.");
1321 MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
1322 MODULE_LICENSE("GPL");
1323
1324 module_param(xpc_hb_interval, int, 0);
1325 MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1326                  "heartbeat increments.");
1327
1328 module_param(xpc_hb_check_interval, int, 0);
1329 MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1330                  "heartbeat checks.");
1331
1332 module_param(xpc_disengage_request_timelimit, int, 0);
1333 MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1334                  "for disengage request to complete.");
1335
1336 module_param(xpc_kdebug_ignore, int, 0);
1337 MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1338                  "other partitions when dropping into kdebug.");