]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/ia64/kernel/process.c
smp_call_function: get rid of the unused nonatomic/retry argument
[linux-2.6-omap-h63xx.git] / arch / ia64 / kernel / process.c
index 7377d323131dfcc6565678dad9c0e4d8ab1e6f63..fabaf08d9a695bbb088951da7e96af4a9b640443 100644 (file)
@@ -52,7 +52,6 @@
 #include "sigframe.h"
 
 void (*ia64_mark_idle)(int);
-static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
 
 unsigned long boot_option_idle_override = 0;
 EXPORT_SYMBOL(boot_option_idle_override);
@@ -157,11 +156,29 @@ show_regs (struct pt_regs *regs)
                show_stack(NULL, NULL);
 }
 
+void tsk_clear_notify_resume(struct task_struct *tsk)
+{
+#ifdef CONFIG_PERFMON
+       if (tsk->thread.pfm_needs_checking)
+               return;
+#endif
+       if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE))
+               return;
+       clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME);
+}
+
+/*
+ * do_notify_resume_user():
+ *     Called from notify_resume_user at entry.S, with interrupts disabled.
+ */
 void
-do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall)
+do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
 {
        if (fsys_mode(current, &scr->pt)) {
-               /* defer signal-handling etc. until we return to privilege-level 0.  */
+               /*
+                * defer signal-handling etc. until we return to
+                * privilege-level 0.
+                */
                if (!ia64_psr(&scr->pt)->lp)
                        ia64_psr(&scr->pt)->lp = 1;
                return;
@@ -169,12 +186,26 @@ do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall
 
 #ifdef CONFIG_PERFMON
        if (current->thread.pfm_needs_checking)
+               /*
+                * Note: pfm_handle_work() allow us to call it with interrupts
+                * disabled, and may enable interrupts within the function.
+                */
                pfm_handle_work();
 #endif
 
        /* deal with pending signal delivery */
-       if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK))
+       if (test_thread_flag(TIF_SIGPENDING)) {
+               local_irq_enable();     /* force interrupt enable */
                ia64_do_signal(scr, in_syscall);
+       }
+
+       /* copy user rbs to kernel rbs */
+       if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
+               local_irq_enable();     /* force interrupt enable */
+               ia64_sync_krbs();
+       }
+
+       local_irq_disable();    /* force interrupt disable */
 }
 
 static int pal_halt        = 1;
@@ -239,33 +270,23 @@ static inline void play_dead(void)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-void cpu_idle_wait(void)
+static void do_nothing(void *unused)
 {
-       unsigned int cpu, this_cpu = get_cpu();
-       cpumask_t map;
-       cpumask_t tmp = current->cpus_allowed;
-
-       set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-       put_cpu();
-
-       cpus_clear(map);
-       for_each_online_cpu(cpu) {
-               per_cpu(cpu_idle_state, cpu) = 1;
-               cpu_set(cpu, map);
-       }
-
-       __get_cpu_var(cpu_idle_state) = 0;
+}
 
-       wmb();
-       do {
-               ssleep(1);
-               for_each_online_cpu(cpu) {
-                       if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
-                               cpu_clear(cpu, map);
-               }
-               cpus_and(map, map, cpu_online_map);
-       } while (!cpus_empty(map));
-       set_cpus_allowed(current, tmp);
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+       smp_mb();
+       /* kick all the CPUs so that they exit out of pm_idle */
+       smp_call_function(do_nothing, NULL, 1);
 }
 EXPORT_SYMBOL_GPL(cpu_idle_wait);
 
@@ -293,9 +314,6 @@ cpu_idle (void)
 #ifdef CONFIG_SMP
                        min_xtp();
 #endif
-                       if (__get_cpu_var(cpu_idle_state))
-                               __get_cpu_var(cpu_idle_state) = 0;
-
                        rmb();
                        if (mark_idle)
                                (*mark_idle)(1);
@@ -624,42 +642,12 @@ do_dump_fpu (struct unw_frame_info *info, void *arg)
        do_dump_task_fpu(current, info, arg);
 }
 
-int
-dump_task_regs(struct task_struct *task, elf_gregset_t *regs)
-{
-       struct unw_frame_info tcore_info;
-
-       if (current == task) {
-               unw_init_running(do_copy_regs, regs);
-       } else {
-               memset(&tcore_info, 0, sizeof(tcore_info));
-               unw_init_from_blocked_task(&tcore_info, task);
-               do_copy_task_regs(task, &tcore_info, regs);
-       }
-       return 1;
-}
-
 void
 ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
 {
        unw_init_running(do_copy_regs, dst);
 }
 
-int
-dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst)
-{
-       struct unw_frame_info tcore_info;
-
-       if (current == task) {
-               unw_init_running(do_dump_fpu, dst);
-       } else {
-               memset(&tcore_info, 0, sizeof(tcore_info));
-               unw_init_from_blocked_task(&tcore_info, task);
-               do_dump_task_fpu(task, &tcore_info, dst);
-       }
-       return 1;
-}
-
 int
 dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
 {