]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/misc/sgi-xp/xpc_main.c
sgi-xp: isolate xpc_vars structure to sn2 only
[linux-2.6-omap-h63xx.git] / drivers / misc / sgi-xp / xpc_main.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
7  */
8
9 /*
10  * Cross Partition Communication (XPC) support - standard version.
11  *
12  *      XPC provides a message passing capability that crosses partition
13  *      boundaries. This module is made up of two parts:
14  *
15  *          partition   This part detects the presence/absence of other
16  *                      partitions. It provides a heartbeat and monitors
17  *                      the heartbeats of other partitions.
18  *
19  *          channel     This part manages the channels and sends/receives
20  *                      messages across them to/from other partitions.
21  *
22  *      There are a couple of additional functions residing in XP, which
23  *      provide an interface to XPC for its users.
24  *
25  *
26  *      Caveats:
27  *
28  *        . We currently have no way to determine which nasid an IPI came
29  *          from. Thus, xpc_IPI_send() does a remote AMO write followed by
30  *          an IPI. The AMO indicates where data is to be pulled from, so
31  *          after the IPI arrives, the remote partition checks the AMO word.
32  *          The IPI can actually arrive before the AMO however, so other code
33  *          must periodically check for this case. Also, remote AMO operations
34  *          do not reliably time out. Thus we do a remote PIO read solely to
35  *          know whether the remote partition is down and whether we should
36  *          stop sending IPIs to it. This remote PIO read operation is set up
37  *          in a special nofault region so SAL knows to ignore (and cleanup)
38  *          any errors due to the remote AMO write, PIO read, and/or PIO
39  *          write operations.
40  *
41  *          If/when new hardware solves this IPI problem, we should abandon
42  *          the current approach.
43  *
44  */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/init.h>
49 #include <linux/cache.h>
50 #include <linux/interrupt.h>
51 #include <linux/delay.h>
52 #include <linux/reboot.h>
53 #include <linux/completion.h>
54 #include <linux/kdebug.h>
55 #include <linux/kthread.h>
56 #include <linux/uaccess.h>
57 #include <asm/sn/intr.h>
58 #include <asm/sn/sn_sal.h>
59 #include "xpc.h"
60
61 /* define two XPC debug device structures to be used with dev_dbg() et al */
62
63 struct device_driver xpc_dbg_name = {
64         .name = "xpc"
65 };
66
67 struct device xpc_part_dbg_subname = {
68         .bus_id = {0},          /* set to "part" at xpc_init() time */
69         .driver = &xpc_dbg_name
70 };
71
72 struct device xpc_chan_dbg_subname = {
73         .bus_id = {0},          /* set to "chan" at xpc_init() time */
74         .driver = &xpc_dbg_name
75 };
76
77 struct device *xpc_part = &xpc_part_dbg_subname;
78 struct device *xpc_chan = &xpc_chan_dbg_subname;
79
80 static int xpc_kdebug_ignore;
81
82 /* systune related variables for /proc/sys directories */
83
84 static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
85 static int xpc_hb_min_interval = 1;
86 static int xpc_hb_max_interval = 10;
87
88 static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
89 static int xpc_hb_check_min_interval = 10;
90 static int xpc_hb_check_max_interval = 120;
91
92 int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
93 static int xpc_disengage_request_min_timelimit; /* = 0 */
94 static int xpc_disengage_request_max_timelimit = 120;
95
96 static ctl_table xpc_sys_xpc_hb_dir[] = {
97         {
98          .ctl_name = CTL_UNNUMBERED,
99          .procname = "hb_interval",
100          .data = &xpc_hb_interval,
101          .maxlen = sizeof(int),
102          .mode = 0644,
103          .proc_handler = &proc_dointvec_minmax,
104          .strategy = &sysctl_intvec,
105          .extra1 = &xpc_hb_min_interval,
106          .extra2 = &xpc_hb_max_interval},
107         {
108          .ctl_name = CTL_UNNUMBERED,
109          .procname = "hb_check_interval",
110          .data = &xpc_hb_check_interval,
111          .maxlen = sizeof(int),
112          .mode = 0644,
113          .proc_handler = &proc_dointvec_minmax,
114          .strategy = &sysctl_intvec,
115          .extra1 = &xpc_hb_check_min_interval,
116          .extra2 = &xpc_hb_check_max_interval},
117         {}
118 };
119 static ctl_table xpc_sys_xpc_dir[] = {
120         {
121          .ctl_name = CTL_UNNUMBERED,
122          .procname = "hb",
123          .mode = 0555,
124          .child = xpc_sys_xpc_hb_dir},
125         {
126          .ctl_name = CTL_UNNUMBERED,
127          .procname = "disengage_request_timelimit",
128          .data = &xpc_disengage_request_timelimit,
129          .maxlen = sizeof(int),
130          .mode = 0644,
131          .proc_handler = &proc_dointvec_minmax,
132          .strategy = &sysctl_intvec,
133          .extra1 = &xpc_disengage_request_min_timelimit,
134          .extra2 = &xpc_disengage_request_max_timelimit},
135         {}
136 };
137 static ctl_table xpc_sys_dir[] = {
138         {
139          .ctl_name = CTL_UNNUMBERED,
140          .procname = "xpc",
141          .mode = 0555,
142          .child = xpc_sys_xpc_dir},
143         {}
144 };
145 static struct ctl_table_header *xpc_sysctl;
146
147 /* non-zero if any remote partition disengage request was timed out */
148 int xpc_disengage_request_timedout;
149
150 /* #of IRQs received */
151 atomic_t xpc_act_IRQ_rcvd;
152
153 /* IRQ handler notifies this wait queue on receipt of an IRQ */
154 DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
155
156 static unsigned long xpc_hb_check_timeout;
157 static struct timer_list xpc_hb_timer;
158 void *xpc_heartbeating_to_mask;
159
160 /* notification that the xpc_hb_checker thread has exited */
161 static DECLARE_COMPLETION(xpc_hb_checker_exited);
162
163 /* notification that the xpc_discovery thread has exited */
164 static DECLARE_COMPLETION(xpc_discovery_exited);
165
166 static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
167
168 static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
169 static struct notifier_block xpc_reboot_notifier = {
170         .notifier_call = xpc_system_reboot,
171 };
172
173 static int xpc_system_die(struct notifier_block *, unsigned long, void *);
174 static struct notifier_block xpc_die_notifier = {
175         .notifier_call = xpc_system_die,
176 };
177
178 enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp);
179 void (*xpc_heartbeat_init) (void);
180 void (*xpc_heartbeat_exit) (void);
181 void (*xpc_increment_heartbeat) (void);
182 void (*xpc_offline_heartbeat) (void);
183 void (*xpc_online_heartbeat) (void);
184 void (*xpc_check_remote_hb) (void);
185
186 enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
187 u64 (*xpc_get_IPI_flags) (struct xpc_partition *part);
188 struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
189
190 void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *remote_rp,
191                                            u64 remote_rp_pa, int nasid);
192
193 void (*xpc_process_act_IRQ_rcvd) (int n_IRQs_expected);
194 enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part);
195 void (*xpc_teardown_infrastructure) (struct xpc_partition *part);
196
197 void (*xpc_mark_partition_engaged) (struct xpc_partition *part);
198 void (*xpc_mark_partition_disengaged) (struct xpc_partition *part);
199 void (*xpc_request_partition_disengage) (struct xpc_partition *part);
200 void (*xpc_cancel_partition_disengage_request) (struct xpc_partition *part);
201 u64 (*xpc_partition_engaged) (u64 partid_mask);
202 u64 (*xpc_partition_disengage_requested) (u64 partid_mask);
203 void (*xpc_clear_partition_engaged) (u64 partid_mask);
204 void (*xpc_clear_partition_disengage_request) (u64 partid_mask);
205
206 void (*xpc_IPI_send_local_activate) (int from_nasid);
207 void (*xpc_IPI_send_activated) (struct xpc_partition *part);
208 void (*xpc_IPI_send_local_reactivate) (int from_nasid);
209 void (*xpc_IPI_send_disengage) (struct xpc_partition *part);
210
211 void (*xpc_IPI_send_closerequest) (struct xpc_channel *ch,
212                                    unsigned long *irq_flags);
213 void (*xpc_IPI_send_closereply) (struct xpc_channel *ch,
214                                  unsigned long *irq_flags);
215 void (*xpc_IPI_send_openrequest) (struct xpc_channel *ch,
216                                   unsigned long *irq_flags);
217 void (*xpc_IPI_send_openreply) (struct xpc_channel *ch,
218                                 unsigned long *irq_flags);
219
220 enum xp_retval (*xpc_allocate_msg) (struct xpc_channel *ch, u32 flags,
221                                     struct xpc_msg **address_of_msg);
222
223 enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, struct xpc_msg *msg,
224                                 u8 notify_type, xpc_notify_func func,
225                                 void *key);
226 void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg);
227
228 /*
229  * Timer function to enforce the timelimit on the partition disengage request.
230  */
231 static void
232 xpc_timeout_partition_disengage_request(unsigned long data)
233 {
234         struct xpc_partition *part = (struct xpc_partition *)data;
235
236         DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
237
238         (void)xpc_partition_disengaged(part);
239
240         DBUG_ON(part->disengage_request_timeout != 0);
241         DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
242 }
243
244 /*
245  * Notify the heartbeat check thread that an IRQ has been received.
246  */
247 static irqreturn_t
248 xpc_act_IRQ_handler(int irq, void *dev_id)
249 {
250         atomic_inc(&xpc_act_IRQ_rcvd);
251         wake_up_interruptible(&xpc_act_IRQ_wq);
252         return IRQ_HANDLED;
253 }
254
255 /*
256  * Timer to produce the heartbeat.  The timer structures function is
257  * already set when this is initially called.  A tunable is used to
258  * specify when the next timeout should occur.
259  */
260 static void
261 xpc_hb_beater(unsigned long dummy)
262 {
263         xpc_increment_heartbeat();
264
265         if (time_after_eq(jiffies, xpc_hb_check_timeout))
266                 wake_up_interruptible(&xpc_act_IRQ_wq);
267
268         xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
269         add_timer(&xpc_hb_timer);
270 }
271
272 static void
273 xpc_start_hb_beater(void)
274 {
275         xpc_heartbeat_init();
276         init_timer(&xpc_hb_timer);
277         xpc_hb_timer.function = xpc_hb_beater;
278         xpc_hb_beater(0);
279 }
280
281 static void
282 xpc_stop_hb_beater(void)
283 {
284         del_timer_sync(&xpc_hb_timer);
285         xpc_heartbeat_exit();
286 }
287
288 /*
289  * This thread is responsible for nearly all of the partition
290  * activation/deactivation.
291  */
292 static int
293 xpc_hb_checker(void *ignore)
294 {
295         int last_IRQ_count = 0;
296         int new_IRQ_count;
297         int force_IRQ = 0;
298
299         /* this thread was marked active by xpc_hb_init() */
300
301         set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
302
303         /* set our heartbeating to other partitions into motion */
304         xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
305         xpc_start_hb_beater();
306
307         while (!xpc_exiting) {
308
309                 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
310                         "been received\n",
311                         (int)(xpc_hb_check_timeout - jiffies),
312                         atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
313
314                 /* checking of remote heartbeats is skewed by IRQ handling */
315                 if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
316                         dev_dbg(xpc_part, "checking remote heartbeats\n");
317                         xpc_check_remote_hb();
318
319                         /*
320                          * We need to periodically recheck to ensure no
321                          * IPI/AMO pairs have been missed.  That check
322                          * must always reset xpc_hb_check_timeout.
323                          */
324                         force_IRQ = 1;
325                 }
326
327                 /* check for outstanding IRQs */
328                 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
329                 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
330                         force_IRQ = 0;
331
332                         dev_dbg(xpc_part, "found an IRQ to process; will be "
333                                 "resetting xpc_hb_check_timeout\n");
334
335                         xpc_process_act_IRQ_rcvd(new_IRQ_count -
336                                                  last_IRQ_count);
337                         last_IRQ_count = new_IRQ_count;
338
339                         xpc_hb_check_timeout = jiffies +
340                             (xpc_hb_check_interval * HZ);
341                 }
342
343                 /* wait for IRQ or timeout */
344                 (void)wait_event_interruptible(xpc_act_IRQ_wq,
345                                                (last_IRQ_count <
346                                                 atomic_read(&xpc_act_IRQ_rcvd)
347                                                 || time_after_eq(jiffies,
348                                                         xpc_hb_check_timeout) ||
349                                                 xpc_exiting));
350         }
351
352         xpc_stop_hb_beater();
353
354         dev_dbg(xpc_part, "heartbeat checker is exiting\n");
355
356         /* mark this thread as having exited */
357         complete(&xpc_hb_checker_exited);
358         return 0;
359 }
360
361 /*
362  * This thread will attempt to discover other partitions to activate
363  * based on info provided by SAL. This new thread is short lived and
364  * will exit once discovery is complete.
365  */
366 static int
367 xpc_initiate_discovery(void *ignore)
368 {
369         xpc_discovery();
370
371         dev_dbg(xpc_part, "discovery thread is exiting\n");
372
373         /* mark this thread as having exited */
374         complete(&xpc_discovery_exited);
375         return 0;
376 }
377
378 /*
379  * The first kthread assigned to a newly activated partition is the one
380  * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
381  * that kthread until the partition is brought down, at which time that kthread
382  * returns back to XPC HB. (The return of that kthread will signify to XPC HB
383  * that XPC has dismantled all communication infrastructure for the associated
384  * partition.) This kthread becomes the channel manager for that partition.
385  *
386  * Each active partition has a channel manager, who, besides connecting and
387  * disconnecting channels, will ensure that each of the partition's connected
388  * channels has the required number of assigned kthreads to get the work done.
389  */
390 static void
391 xpc_channel_mgr(struct xpc_partition *part)
392 {
393         while (part->act_state != XPC_P_DEACTIVATING ||
394                atomic_read(&part->nchannels_active) > 0 ||
395                !xpc_partition_disengaged(part)) {
396
397                 xpc_process_channel_activity(part);
398
399                 /*
400                  * Wait until we've been requested to activate kthreads or
401                  * all of the channel's message queues have been torn down or
402                  * a signal is pending.
403                  *
404                  * The channel_mgr_requests is set to 1 after being awakened,
405                  * This is done to prevent the channel mgr from making one pass
406                  * through the loop for each request, since he will
407                  * be servicing all the requests in one pass. The reason it's
408                  * set to 1 instead of 0 is so that other kthreads will know
409                  * that the channel mgr is running and won't bother trying to
410                  * wake him up.
411                  */
412                 atomic_dec(&part->channel_mgr_requests);
413                 (void)wait_event_interruptible(part->channel_mgr_wq,
414                                 (atomic_read(&part->channel_mgr_requests) > 0 ||
415                                  part->local_IPI_amo != 0 ||
416                                  (part->act_state == XPC_P_DEACTIVATING &&
417                                  atomic_read(&part->nchannels_active) == 0 &&
418                                  xpc_partition_disengaged(part))));
419                 atomic_set(&part->channel_mgr_requests, 1);
420         }
421 }
422
423 /*
424  * When XPC HB determines that a partition has come up, it will create a new
425  * kthread and that kthread will call this function to attempt to set up the
426  * basic infrastructure used for Cross Partition Communication with the newly
427  * upped partition.
428  *
429  * The kthread that was created by XPC HB and which setup the XPC
430  * infrastructure will remain assigned to the partition becoming the channel
431  * manager for that partition until the partition is deactivating, at which
432  * time the kthread will teardown the XPC infrastructure and then exit.
433  */
434 static int
435 xpc_activating(void *__partid)
436 {
437         short partid = (u64)__partid;
438         struct xpc_partition *part = &xpc_partitions[partid];
439         unsigned long irq_flags;
440
441         DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
442
443         spin_lock_irqsave(&part->act_lock, irq_flags);
444
445         if (part->act_state == XPC_P_DEACTIVATING) {
446                 part->act_state = XPC_P_INACTIVE;
447                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
448                 part->remote_rp_pa = 0;
449                 return 0;
450         }
451
452         /* indicate the thread is activating */
453         DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
454         part->act_state = XPC_P_ACTIVATING;
455
456         XPC_SET_REASON(part, 0, 0);
457         spin_unlock_irqrestore(&part->act_lock, irq_flags);
458
459         dev_dbg(xpc_part, "activating partition %d\n", partid);
460
461         xpc_allow_hb(partid);
462
463         if (xpc_setup_infrastructure(part) == xpSuccess) {
464                 (void)xpc_part_ref(part);       /* this will always succeed */
465
466                 if (xpc_make_first_contact(part) == xpSuccess) {
467                         xpc_mark_partition_active(part);
468                         xpc_channel_mgr(part);
469                         /* won't return until partition is deactivating */
470                 }
471
472                 xpc_part_deref(part);
473                 xpc_teardown_infrastructure(part);
474         }
475
476         xpc_disallow_hb(partid);
477         xpc_mark_partition_inactive(part);
478
479         if (part->reason == xpReactivating) {
480                 /* interrupting ourselves results in activating partition */
481                 xpc_IPI_send_local_reactivate(part->reactivate_nasid);
482         }
483
484         return 0;
485 }
486
487 void
488 xpc_activate_partition(struct xpc_partition *part)
489 {
490         short partid = XPC_PARTID(part);
491         unsigned long irq_flags;
492         struct task_struct *kthread;
493
494         spin_lock_irqsave(&part->act_lock, irq_flags);
495
496         DBUG_ON(part->act_state != XPC_P_INACTIVE);
497
498         part->act_state = XPC_P_ACTIVATION_REQ;
499         XPC_SET_REASON(part, xpCloneKThread, __LINE__);
500
501         spin_unlock_irqrestore(&part->act_lock, irq_flags);
502
503         kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
504                               partid);
505         if (IS_ERR(kthread)) {
506                 spin_lock_irqsave(&part->act_lock, irq_flags);
507                 part->act_state = XPC_P_INACTIVE;
508                 XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
509                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
510         }
511 }
512
513 /*
514  * Check to see if there is any channel activity to/from the specified
515  * partition.
516  */
517 static void
518 xpc_check_for_channel_activity(struct xpc_partition *part)
519 {
520         u64 IPI_amo;
521         unsigned long irq_flags;
522
523 /* this needs to be uncommented, but I'm thinking this function and the */
524 /* ones that call it need to be moved into xpc_sn2.c... */
525         IPI_amo = 0; /* = xpc_IPI_receive(part->local_IPI_amo_va); */
526         if (IPI_amo == 0)
527                 return;
528
529         spin_lock_irqsave(&part->IPI_lock, irq_flags);
530         part->local_IPI_amo |= IPI_amo;
531         spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
532
533         dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
534                 XPC_PARTID(part), IPI_amo);
535
536         xpc_wakeup_channel_mgr(part);
537 }
538
539 /*
540  * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
541  * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
542  * than one partition, we use an AMO_t structure per partition to indicate
543  * whether a partition has sent an IPI or not.  If it has, then wake up the
544  * associated kthread to handle it.
545  *
546  * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
547  * running on other partitions.
548  *
549  * Noteworthy Arguments:
550  *
551  *      irq - Interrupt ReQuest number. NOT USED.
552  *
553  *      dev_id - partid of IPI's potential sender.
554  */
555 irqreturn_t
556 xpc_notify_IRQ_handler(int irq, void *dev_id)
557 {
558         short partid = (short)(u64)dev_id;
559         struct xpc_partition *part = &xpc_partitions[partid];
560
561         DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
562
563         if (xpc_part_ref(part)) {
564                 xpc_check_for_channel_activity(part);
565
566                 xpc_part_deref(part);
567         }
568         return IRQ_HANDLED;
569 }
570
571 /*
572  * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
573  * because the write to their associated IPI amo completed after the IRQ/IPI
574  * was received.
575  */
576 void
577 xpc_dropped_IPI_check(struct xpc_partition *part)
578 {
579         if (xpc_part_ref(part)) {
580                 xpc_check_for_channel_activity(part);
581
582                 part->dropped_IPI_timer.expires = jiffies +
583                     XPC_P_DROPPED_IPI_WAIT_INTERVAL;
584                 add_timer(&part->dropped_IPI_timer);
585                 xpc_part_deref(part);
586         }
587 }
588
589 void
590 xpc_activate_kthreads(struct xpc_channel *ch, int needed)
591 {
592         int idle = atomic_read(&ch->kthreads_idle);
593         int assigned = atomic_read(&ch->kthreads_assigned);
594         int wakeup;
595
596         DBUG_ON(needed <= 0);
597
598         if (idle > 0) {
599                 wakeup = (needed > idle) ? idle : needed;
600                 needed -= wakeup;
601
602                 dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
603                         "channel=%d\n", wakeup, ch->partid, ch->number);
604
605                 /* only wakeup the requested number of kthreads */
606                 wake_up_nr(&ch->idle_wq, wakeup);
607         }
608
609         if (needed <= 0)
610                 return;
611
612         if (needed + assigned > ch->kthreads_assigned_limit) {
613                 needed = ch->kthreads_assigned_limit - assigned;
614                 if (needed <= 0)
615                         return;
616         }
617
618         dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
619                 needed, ch->partid, ch->number);
620
621         xpc_create_kthreads(ch, needed, 0);
622 }
623
624 /*
625  * This function is where XPC's kthreads wait for messages to deliver.
626  */
627 static void
628 xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
629 {
630         do {
631                 /* deliver messages to their intended recipients */
632
633                 while (ch->w_local_GP.get < ch->w_remote_GP.put &&
634                        !(ch->flags & XPC_C_DISCONNECTING)) {
635                         xpc_deliver_msg(ch);
636                 }
637
638                 if (atomic_inc_return(&ch->kthreads_idle) >
639                     ch->kthreads_idle_limit) {
640                         /* too many idle kthreads on this channel */
641                         atomic_dec(&ch->kthreads_idle);
642                         break;
643                 }
644
645                 dev_dbg(xpc_chan, "idle kthread calling "
646                         "wait_event_interruptible_exclusive()\n");
647
648                 (void)wait_event_interruptible_exclusive(ch->idle_wq,
649                                 (ch->w_local_GP.get < ch->w_remote_GP.put ||
650                                  (ch->flags & XPC_C_DISCONNECTING)));
651
652                 atomic_dec(&ch->kthreads_idle);
653
654         } while (!(ch->flags & XPC_C_DISCONNECTING));
655 }
656
657 static int
658 xpc_kthread_start(void *args)
659 {
660         short partid = XPC_UNPACK_ARG1(args);
661         u16 ch_number = XPC_UNPACK_ARG2(args);
662         struct xpc_partition *part = &xpc_partitions[partid];
663         struct xpc_channel *ch;
664         int n_needed;
665         unsigned long irq_flags;
666
667         dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
668                 partid, ch_number);
669
670         ch = &part->channels[ch_number];
671
672         if (!(ch->flags & XPC_C_DISCONNECTING)) {
673
674                 /* let registerer know that connection has been established */
675
676                 spin_lock_irqsave(&ch->lock, irq_flags);
677                 if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
678                         ch->flags |= XPC_C_CONNECTEDCALLOUT;
679                         spin_unlock_irqrestore(&ch->lock, irq_flags);
680
681                         xpc_connected_callout(ch);
682
683                         spin_lock_irqsave(&ch->lock, irq_flags);
684                         ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
685                         spin_unlock_irqrestore(&ch->lock, irq_flags);
686
687                         /*
688                          * It is possible that while the callout was being
689                          * made that the remote partition sent some messages.
690                          * If that is the case, we may need to activate
691                          * additional kthreads to help deliver them. We only
692                          * need one less than total #of messages to deliver.
693                          */
694                         n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
695                         if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
696                                 xpc_activate_kthreads(ch, n_needed);
697
698                 } else {
699                         spin_unlock_irqrestore(&ch->lock, irq_flags);
700                 }
701
702                 xpc_kthread_waitmsgs(part, ch);
703         }
704
705         /* let registerer know that connection is disconnecting */
706
707         spin_lock_irqsave(&ch->lock, irq_flags);
708         if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
709             !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
710                 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
711                 spin_unlock_irqrestore(&ch->lock, irq_flags);
712
713                 xpc_disconnect_callout(ch, xpDisconnecting);
714
715                 spin_lock_irqsave(&ch->lock, irq_flags);
716                 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
717         }
718         spin_unlock_irqrestore(&ch->lock, irq_flags);
719
720         if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
721                 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
722                         xpc_mark_partition_disengaged(part);
723                         xpc_IPI_send_disengage(part);
724                 }
725         }
726
727         xpc_msgqueue_deref(ch);
728
729         dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
730                 partid, ch_number);
731
732         xpc_part_deref(part);
733         return 0;
734 }
735
736 /*
737  * For each partition that XPC has established communications with, there is
738  * a minimum of one kernel thread assigned to perform any operation that
739  * may potentially sleep or block (basically the callouts to the asynchronous
740  * functions registered via xpc_connect()).
741  *
742  * Additional kthreads are created and destroyed by XPC as the workload
743  * demands.
744  *
745  * A kthread is assigned to one of the active channels that exists for a given
746  * partition.
747  */
748 void
749 xpc_create_kthreads(struct xpc_channel *ch, int needed,
750                     int ignore_disconnecting)
751 {
752         unsigned long irq_flags;
753         u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
754         struct xpc_partition *part = &xpc_partitions[ch->partid];
755         struct task_struct *kthread;
756
757         while (needed-- > 0) {
758
759                 /*
760                  * The following is done on behalf of the newly created
761                  * kthread. That kthread is responsible for doing the
762                  * counterpart to the following before it exits.
763                  */
764                 if (ignore_disconnecting) {
765                         if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
766                                 /* kthreads assigned had gone to zero */
767                                 BUG_ON(!(ch->flags &
768                                          XPC_C_DISCONNECTINGCALLOUT_MADE));
769                                 break;
770                         }
771
772                 } else if (ch->flags & XPC_C_DISCONNECTING) {
773                         break;
774
775                 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) {
776                         if (atomic_inc_return(&part->nchannels_engaged) == 1)
777                                 xpc_mark_partition_engaged(part);
778                 }
779                 (void)xpc_part_ref(part);
780                 xpc_msgqueue_ref(ch);
781
782                 kthread = kthread_run(xpc_kthread_start, (void *)args,
783                                       "xpc%02dc%d", ch->partid, ch->number);
784                 if (IS_ERR(kthread)) {
785                         /* the fork failed */
786
787                         /*
788                          * NOTE: if (ignore_disconnecting &&
789                          * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
790                          * then we'll deadlock if all other kthreads assigned
791                          * to this channel are blocked in the channel's
792                          * registerer, because the only thing that will unblock
793                          * them is the xpDisconnecting callout that this
794                          * failed kthread_run() would have made.
795                          */
796
797                         if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
798                             atomic_dec_return(&part->nchannels_engaged) == 0) {
799                                 xpc_mark_partition_disengaged(part);
800                                 xpc_IPI_send_disengage(part);
801                         }
802                         xpc_msgqueue_deref(ch);
803                         xpc_part_deref(part);
804
805                         if (atomic_read(&ch->kthreads_assigned) <
806                             ch->kthreads_idle_limit) {
807                                 /*
808                                  * Flag this as an error only if we have an
809                                  * insufficient #of kthreads for the channel
810                                  * to function.
811                                  */
812                                 spin_lock_irqsave(&ch->lock, irq_flags);
813                                 XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
814                                                        &irq_flags);
815                                 spin_unlock_irqrestore(&ch->lock, irq_flags);
816                         }
817                         break;
818                 }
819         }
820 }
821
822 void
823 xpc_disconnect_wait(int ch_number)
824 {
825         unsigned long irq_flags;
826         short partid;
827         struct xpc_partition *part;
828         struct xpc_channel *ch;
829         int wakeup_channel_mgr;
830
831         /* now wait for all callouts to the caller's function to cease */
832         for (partid = 0; partid < xp_max_npartitions; partid++) {
833                 part = &xpc_partitions[partid];
834
835                 if (!xpc_part_ref(part))
836                         continue;
837
838                 ch = &part->channels[ch_number];
839
840                 if (!(ch->flags & XPC_C_WDISCONNECT)) {
841                         xpc_part_deref(part);
842                         continue;
843                 }
844
845                 wait_for_completion(&ch->wdisconnect_wait);
846
847                 spin_lock_irqsave(&ch->lock, irq_flags);
848                 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
849                 wakeup_channel_mgr = 0;
850
851                 if (ch->delayed_IPI_flags) {
852                         if (part->act_state != XPC_P_DEACTIVATING) {
853                                 spin_lock(&part->IPI_lock);
854                                 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
855                                                   ch->number,
856                                                   ch->delayed_IPI_flags);
857                                 spin_unlock(&part->IPI_lock);
858                                 wakeup_channel_mgr = 1;
859                         }
860                         ch->delayed_IPI_flags = 0;
861                 }
862
863                 ch->flags &= ~XPC_C_WDISCONNECT;
864                 spin_unlock_irqrestore(&ch->lock, irq_flags);
865
866                 if (wakeup_channel_mgr)
867                         xpc_wakeup_channel_mgr(part);
868
869                 xpc_part_deref(part);
870         }
871 }
872
873 static void
874 xpc_do_exit(enum xp_retval reason)
875 {
876         short partid;
877         int active_part_count, printed_waiting_msg = 0;
878         struct xpc_partition *part;
879         unsigned long printmsg_time, disengage_request_timeout = 0;
880
881         /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
882         DBUG_ON(xpc_exiting == 1);
883
884         /*
885          * Let the heartbeat checker thread and the discovery thread
886          * (if one is running) know that they should exit. Also wake up
887          * the heartbeat checker thread in case it's sleeping.
888          */
889         xpc_exiting = 1;
890         wake_up_interruptible(&xpc_act_IRQ_wq);
891
892         /* ignore all incoming interrupts */
893         free_irq(SGI_XPC_ACTIVATE, NULL);
894
895         /* wait for the discovery thread to exit */
896         wait_for_completion(&xpc_discovery_exited);
897
898         /* wait for the heartbeat checker thread to exit */
899         wait_for_completion(&xpc_hb_checker_exited);
900
901         /* sleep for a 1/3 of a second or so */
902         (void)msleep_interruptible(300);
903
904         /* wait for all partitions to become inactive */
905
906         printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
907         xpc_disengage_request_timedout = 0;
908
909         do {
910                 active_part_count = 0;
911
912                 for (partid = 0; partid < xp_max_npartitions; partid++) {
913                         part = &xpc_partitions[partid];
914
915                         if (xpc_partition_disengaged(part) &&
916                             part->act_state == XPC_P_INACTIVE) {
917                                 continue;
918                         }
919
920                         active_part_count++;
921
922                         XPC_DEACTIVATE_PARTITION(part, reason);
923
924                         if (part->disengage_request_timeout >
925                             disengage_request_timeout) {
926                                 disengage_request_timeout =
927                                     part->disengage_request_timeout;
928                         }
929                 }
930
931                 if (xpc_partition_engaged(-1UL)) {
932                         if (time_after(jiffies, printmsg_time)) {
933                                 dev_info(xpc_part, "waiting for remote "
934                                          "partitions to disengage, timeout in "
935                                          "%ld seconds\n",
936                                          (disengage_request_timeout - jiffies)
937                                          / HZ);
938                                 printmsg_time = jiffies +
939                                     (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
940                                 printed_waiting_msg = 1;
941                         }
942
943                 } else if (active_part_count > 0) {
944                         if (printed_waiting_msg) {
945                                 dev_info(xpc_part, "waiting for local partition"
946                                          " to disengage\n");
947                                 printed_waiting_msg = 0;
948                         }
949
950                 } else {
951                         if (!xpc_disengage_request_timedout) {
952                                 dev_info(xpc_part, "all partitions have "
953                                          "disengaged\n");
954                         }
955                         break;
956                 }
957
958                 /* sleep for a 1/3 of a second or so */
959                 (void)msleep_interruptible(300);
960
961         } while (1);
962
963         DBUG_ON(xpc_partition_engaged(-1UL));
964         DBUG_ON(xpc_any_hbs_allowed() != 0);
965
966         /* indicate to others that our reserved page is uninitialized */
967         xpc_rsvd_page->stamp = ZERO_STAMP;
968
969         if (reason == xpUnloading) {
970                 (void)unregister_die_notifier(&xpc_die_notifier);
971                 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
972         }
973
974         /* close down protections for IPI operations */
975         xpc_restrict_IPI_ops();
976
977         /* clear the interface to XPC's functions */
978         xpc_clear_interface();
979
980         if (xpc_sysctl)
981                 unregister_sysctl_table(xpc_sysctl);
982
983         kfree(xpc_partitions);
984         kfree(xpc_remote_copy_buffer_base);
985 }
986
987 /*
988  * This function is called when the system is being rebooted.
989  */
990 static int
991 xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
992 {
993         enum xp_retval reason;
994
995         switch (event) {
996         case SYS_RESTART:
997                 reason = xpSystemReboot;
998                 break;
999         case SYS_HALT:
1000                 reason = xpSystemHalt;
1001                 break;
1002         case SYS_POWER_OFF:
1003                 reason = xpSystemPoweroff;
1004                 break;
1005         default:
1006                 reason = xpSystemGoingDown;
1007         }
1008
1009         xpc_do_exit(reason);
1010         return NOTIFY_DONE;
1011 }
1012
1013 /*
1014  * Notify other partitions to disengage from all references to our memory.
1015  */
1016 static void
1017 xpc_die_disengage(void)
1018 {
1019         struct xpc_partition *part;
1020         short partid;
1021         unsigned long engaged;
1022         long time, printmsg_time, disengage_request_timeout;
1023
1024         /* keep xpc_hb_checker thread from doing anything (just in case) */
1025         xpc_exiting = 1;
1026
1027         xpc_disallow_all_hbs(); /*indicate we're deactivated */
1028
1029         for (partid = 0; partid < xp_max_npartitions; partid++) {
1030                 part = &xpc_partitions[partid];
1031
1032                 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
1033                     remote_vars_version)) {
1034
1035                         /* just in case it was left set by an earlier XPC */
1036                         xpc_clear_partition_engaged(1UL << partid);
1037                         continue;
1038                 }
1039
1040                 if (xpc_partition_engaged(1UL << partid) ||
1041                     part->act_state != XPC_P_INACTIVE) {
1042                         xpc_request_partition_disengage(part);
1043                         xpc_mark_partition_disengaged(part);
1044                         xpc_IPI_send_disengage(part);
1045                 }
1046         }
1047
1048         time = rtc_time();
1049         printmsg_time = time +
1050             (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1051         disengage_request_timeout = time +
1052             (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
1053
1054         /* wait for all other partitions to disengage from us */
1055
1056         while (1) {
1057                 engaged = xpc_partition_engaged(-1UL);
1058                 if (!engaged) {
1059                         dev_info(xpc_part, "all partitions have disengaged\n");
1060                         break;
1061                 }
1062
1063                 time = rtc_time();
1064                 if (time >= disengage_request_timeout) {
1065                         for (partid = 0; partid < xp_max_npartitions;
1066                              partid++) {
1067                                 if (engaged & (1UL << partid)) {
1068                                         dev_info(xpc_part, "disengage from "
1069                                                  "remote partition %d timed "
1070                                                  "out\n", partid);
1071                                 }
1072                         }
1073                         break;
1074                 }
1075
1076                 if (time >= printmsg_time) {
1077                         dev_info(xpc_part, "waiting for remote partitions to "
1078                                  "disengage, timeout in %ld seconds\n",
1079                                  (disengage_request_timeout - time) /
1080                                  sn_rtc_cycles_per_second);
1081                         printmsg_time = time +
1082                             (XPC_DISENGAGE_PRINTMSG_INTERVAL *
1083                              sn_rtc_cycles_per_second);
1084                 }
1085         }
1086 }
1087
1088 /*
1089  * This function is called when the system is being restarted or halted due
1090  * to some sort of system failure. If this is the case we need to notify the
1091  * other partitions to disengage from all references to our memory.
1092  * This function can also be called when our heartbeater could be offlined
1093  * for a time. In this case we need to notify other partitions to not worry
1094  * about the lack of a heartbeat.
1095  */
1096 static int
1097 xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1098 {
1099         switch (event) {
1100         case DIE_MACHINE_RESTART:
1101         case DIE_MACHINE_HALT:
1102                 xpc_die_disengage();
1103                 break;
1104
1105         case DIE_KDEBUG_ENTER:
1106                 /* Should lack of heartbeat be ignored by other partitions? */
1107                 if (!xpc_kdebug_ignore)
1108                         break;
1109
1110                 /* fall through */
1111         case DIE_MCA_MONARCH_ENTER:
1112         case DIE_INIT_MONARCH_ENTER:
1113                 xpc_offline_heartbeat();
1114                 break;
1115
1116         case DIE_KDEBUG_LEAVE:
1117                 /* Is lack of heartbeat being ignored by other partitions? */
1118                 if (!xpc_kdebug_ignore)
1119                         break;
1120
1121                 /* fall through */
1122         case DIE_MCA_MONARCH_LEAVE:
1123         case DIE_INIT_MONARCH_LEAVE:
1124                 xpc_online_heartbeat();
1125                 break;
1126         }
1127
1128         return NOTIFY_DONE;
1129 }
1130
1131 int __init
1132 xpc_init(void)
1133 {
1134         int ret;
1135         short partid;
1136         struct xpc_partition *part;
1137         struct task_struct *kthread;
1138         size_t buf_size;
1139
1140         if (is_shub()) {
1141                 /*
1142                  * The ia64-sn2 architecture supports at most 64 partitions.
1143                  * And the inability to unregister remote AMOs restricts us
1144                  * further to only support exactly 64 partitions on this
1145                  * architecture, no less.
1146                  */
1147                 if (xp_max_npartitions != 64)
1148                         return -EINVAL;
1149
1150                 xpc_init_sn2();
1151
1152         } else if (is_uv()) {
1153                 xpc_init_uv();
1154
1155         } else {
1156                 return -ENODEV;
1157         }
1158
1159         snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
1160         snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
1161
1162         buf_size = max(XPC_RP_VARS_SIZE,
1163                        XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1164         xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1165                                                                GFP_KERNEL,
1166                                                   &xpc_remote_copy_buffer_base);
1167         if (xpc_remote_copy_buffer == NULL) {
1168                 dev_err(xpc_part, "can't get memory for remote copy buffer\n");
1169                 return -ENOMEM;
1170         }
1171
1172         xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
1173                                  xp_max_npartitions, GFP_KERNEL);
1174         if (xpc_partitions == NULL) {
1175                 dev_err(xpc_part, "can't get memory for partition structure\n");
1176                 ret = -ENOMEM;
1177                 goto out_1;
1178         }
1179
1180         /*
1181          * The first few fields of each entry of xpc_partitions[] need to
1182          * be initialized now so that calls to xpc_connect() and
1183          * xpc_disconnect() can be made prior to the activation of any remote
1184          * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
1185          * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
1186          * PARTITION HAS BEEN ACTIVATED.
1187          */
1188         for (partid = 0; partid < xp_max_npartitions; partid++) {
1189                 part = &xpc_partitions[partid];
1190
1191                 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1192
1193                 part->act_IRQ_rcvd = 0;
1194                 spin_lock_init(&part->act_lock);
1195                 part->act_state = XPC_P_INACTIVE;
1196                 XPC_SET_REASON(part, 0, 0);
1197
1198                 init_timer(&part->disengage_request_timer);
1199                 part->disengage_request_timer.function =
1200                     xpc_timeout_partition_disengage_request;
1201                 part->disengage_request_timer.data = (unsigned long)part;
1202
1203                 part->setup_state = XPC_P_UNSET;
1204                 init_waitqueue_head(&part->teardown_wq);
1205                 atomic_set(&part->references, 0);
1206         }
1207
1208         xpc_sysctl = register_sysctl_table(xpc_sys_dir);
1209
1210         /*
1211          * Open up protections for IPI operations (and AMO operations on
1212          * Shub 1.1 systems).
1213          */
1214         xpc_allow_IPI_ops();
1215
1216         /*
1217          * Interrupts being processed will increment this atomic variable and
1218          * awaken the heartbeat thread which will process the interrupts.
1219          */
1220         atomic_set(&xpc_act_IRQ_rcvd, 0);
1221
1222         /*
1223          * This is safe to do before the xpc_hb_checker thread has started
1224          * because the handler releases a wait queue.  If an interrupt is
1225          * received before the thread is waiting, it will not go to sleep,
1226          * but rather immediately process the interrupt.
1227          */
1228         ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1229                           "xpc hb", NULL);
1230         if (ret != 0) {
1231                 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1232                         "errno=%d\n", -ret);
1233                 ret = -EBUSY;
1234                 goto out_2;
1235         }
1236
1237         /*
1238          * Fill the partition reserved page with the information needed by
1239          * other partitions to discover we are alive and establish initial
1240          * communications.
1241          */
1242         xpc_rsvd_page = xpc_setup_rsvd_page();
1243         if (xpc_rsvd_page == NULL) {
1244                 dev_err(xpc_part, "can't setup our reserved page\n");
1245                 ret = -EBUSY;
1246                 goto out_3;
1247         }
1248
1249         /* add ourselves to the reboot_notifier_list */
1250         ret = register_reboot_notifier(&xpc_reboot_notifier);
1251         if (ret != 0)
1252                 dev_warn(xpc_part, "can't register reboot notifier\n");
1253
1254         /* add ourselves to the die_notifier list */
1255         ret = register_die_notifier(&xpc_die_notifier);
1256         if (ret != 0)
1257                 dev_warn(xpc_part, "can't register die notifier\n");
1258
1259         /*
1260          * The real work-horse behind xpc.  This processes incoming
1261          * interrupts and monitors remote heartbeats.
1262          */
1263         kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
1264         if (IS_ERR(kthread)) {
1265                 dev_err(xpc_part, "failed while forking hb check thread\n");
1266                 ret = -EBUSY;
1267                 goto out_4;
1268         }
1269
1270         /*
1271          * Startup a thread that will attempt to discover other partitions to
1272          * activate based on info provided by SAL. This new thread is short
1273          * lived and will exit once discovery is complete.
1274          */
1275         kthread = kthread_run(xpc_initiate_discovery, NULL,
1276                               XPC_DISCOVERY_THREAD_NAME);
1277         if (IS_ERR(kthread)) {
1278                 dev_err(xpc_part, "failed while forking discovery thread\n");
1279
1280                 /* mark this new thread as a non-starter */
1281                 complete(&xpc_discovery_exited);
1282
1283                 xpc_do_exit(xpUnloading);
1284                 return -EBUSY;
1285         }
1286
1287         /* set the interface to point at XPC's functions */
1288         xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1289                           xpc_initiate_allocate, xpc_initiate_send,
1290                           xpc_initiate_send_notify, xpc_initiate_received,
1291                           xpc_initiate_partid_to_nasids);
1292
1293         return 0;
1294
1295         /* initialization was not successful */
1296 out_4:
1297         /* indicate to others that our reserved page is uninitialized */
1298         xpc_rsvd_page->stamp = ZERO_STAMP;
1299
1300         (void)unregister_die_notifier(&xpc_die_notifier);
1301         (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1302 out_3:
1303         free_irq(SGI_XPC_ACTIVATE, NULL);
1304 out_2:
1305         xpc_restrict_IPI_ops();
1306         if (xpc_sysctl)
1307                 unregister_sysctl_table(xpc_sysctl);
1308         kfree(xpc_partitions);
1309 out_1:
1310         kfree(xpc_remote_copy_buffer_base);
1311         return ret;
1312 }
1313
1314 module_init(xpc_init);
1315
1316 void __exit
1317 xpc_exit(void)
1318 {
1319         xpc_do_exit(xpUnloading);
1320 }
1321
1322 module_exit(xpc_exit);
1323
1324 MODULE_AUTHOR("Silicon Graphics, Inc.");
1325 MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
1326 MODULE_LICENSE("GPL");
1327
1328 module_param(xpc_hb_interval, int, 0);
1329 MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1330                  "heartbeat increments.");
1331
1332 module_param(xpc_hb_check_interval, int, 0);
1333 MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1334                  "heartbeat checks.");
1335
1336 module_param(xpc_disengage_request_timelimit, int, 0);
1337 MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1338                  "for disengage request to complete.");
1339
1340 module_param(xpc_kdebug_ignore, int, 0);
1341 MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1342                  "other partitions when dropping into kdebug.");