]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[S390] Convert monitor calls to function calls.
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Thu, 17 Apr 2008 05:46:23 +0000 (07:46 +0200)
committerHeiko Carstens <heiko.carstens@de.ibm.com>
Thu, 17 Apr 2008 05:47:05 +0000 (07:47 +0200)
Remove the program check generating monitor calls and use function
calls instead. Theres is no real advantage in using monitor calls,
but they do make debugging harder, because of all the program checks
it generates.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
arch/s390/kernel/process.c
arch/s390/kernel/s390_ext.c
arch/s390/kernel/smp.c
arch/s390/kernel/traps.c
drivers/s390/cio/cio.c
include/asm-s390/cpu.h

index ce203154d8ce3a6c8f9bf4a07cd7ad072e3b3ec8..eb768ce88672e9dc50d0a1bd0abe32faf0d4c12f 100644 (file)
@@ -76,6 +76,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
  * Need to know about CPUs going idle?
  */
 static ATOMIC_NOTIFIER_HEAD(idle_chain);
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 
 int register_idle_notifier(struct notifier_block *nb)
 {
@@ -89,9 +90,33 @@ int unregister_idle_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_idle_notifier);
 
-void do_monitor_call(struct pt_regs *regs, long interruption_code)
+static int s390_idle_enter(void)
+{
+       struct s390_idle_data *idle;
+       int nr_calls = 0;
+       void *hcpu;
+       int rc;
+
+       hcpu = (void *)(long)smp_processor_id();
+       rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
+                                         &nr_calls);
+       if (rc == NOTIFY_BAD) {
+               nr_calls--;
+               __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
+                                            hcpu, nr_calls, NULL);
+               return rc;
+       }
+       idle = &__get_cpu_var(s390_idle);
+       spin_lock(&idle->lock);
+       idle->idle_count++;
+       idle->in_idle = 1;
+       idle->idle_enter = get_clock();
+       spin_unlock(&idle->lock);
+       return NOTIFY_OK;
+}
+
+void s390_idle_leave(void)
 {
-#ifdef CONFIG_SMP
        struct s390_idle_data *idle;
 
        idle = &__get_cpu_var(s390_idle);
@@ -99,10 +124,6 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code)
        idle->idle_time += get_clock() - idle->idle_enter;
        idle->in_idle = 0;
        spin_unlock(&idle->lock);
-#endif
-       /* disable monitor call class 0 */
-       __ctl_clear_bit(8, 15);
-
        atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
                                   (void *)(long) smp_processor_id());
 }
@@ -113,61 +134,30 @@ extern void s390_handle_mcck(void);
  */
 static void default_idle(void)
 {
-       int cpu, rc;
-       int nr_calls = 0;
-       void *hcpu;
-#ifdef CONFIG_SMP
-       struct s390_idle_data *idle;
-#endif
-
        /* CPU is going idle. */
-       cpu = smp_processor_id();
-       hcpu = (void *)(long)cpu;
        local_irq_disable();
        if (need_resched()) {
                local_irq_enable();
                return;
        }
-
-       rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
-                                         &nr_calls);
-       if (rc == NOTIFY_BAD) {
-               nr_calls--;
-               __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
-                                            hcpu, nr_calls, NULL);
+       if (s390_idle_enter() == NOTIFY_BAD) {
                local_irq_enable();
                return;
        }
-
-       /* enable monitor call class 0 */
-       __ctl_set_bit(8, 15);
-
 #ifdef CONFIG_HOTPLUG_CPU
-       if (cpu_is_offline(cpu)) {
+       if (cpu_is_offline(smp_processor_id())) {
                preempt_enable_no_resched();
                cpu_die();
        }
 #endif
-
        local_mcck_disable();
        if (test_thread_flag(TIF_MCCK_PENDING)) {
                local_mcck_enable();
-               /* disable monitor call class 0 */
-               __ctl_clear_bit(8, 15);
-               atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
-                                          hcpu);
+               s390_idle_leave();
                local_irq_enable();
                s390_handle_mcck();
                return;
        }
-#ifdef CONFIG_SMP
-       idle = &__get_cpu_var(s390_idle);
-       spin_lock(&idle->lock);
-       idle->idle_count++;
-       idle->in_idle = 1;
-       idle->idle_enter = get_clock();
-       spin_unlock(&idle->lock);
-#endif
        trace_hardirqs_on();
        /* Wait for external, I/O or machine check interrupt. */
        __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
index acf93dba7727002e7507bf2e28bc4cb065e3763b..3a8772d3baea37e687664ef1bbda166176e55ded 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/errno.h>
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
-
+#include <asm/cpu.h>
 #include <asm/lowcore.h>
 #include <asm/s390_ext.h>
 #include <asm/irq_regs.h>
@@ -119,7 +119,7 @@ void do_extint(struct pt_regs *regs, unsigned short code)
 
        old_regs = set_irq_regs(regs);
        irq_enter();
-       asm volatile ("mc 0,0");
+       s390_idle_check();
        if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
                /**
                 * Make sure that the i/o interrupt did not "overtake"
index d1e8e8a3fb661eb4c8f35b325701f99c09cc18b8..5a445b1b1217afe018e788bdd6912a368fcece4a 100644 (file)
@@ -73,7 +73,6 @@ static int smp_cpu_state[NR_CPUS];
 static int cpu_management;
 
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 
 static void smp_ext_bitcall(int, ec_bit_sig);
 
index 60f728aeaf12986ceebbf52c2303805bed0ef378..9452a205629b0131897e5d02443c971fce3f020d 100644 (file)
@@ -59,7 +59,6 @@ int sysctl_userprocess_debug = 0;
 
 extern pgm_check_handler_t do_protection_exception;
 extern pgm_check_handler_t do_dat_exception;
-extern pgm_check_handler_t do_monitor_call;
 extern pgm_check_handler_t do_asce_exception;
 
 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -739,6 +738,5 @@ void __init trap_init(void)
         pgm_check_table[0x15] = &operand_exception;
         pgm_check_table[0x1C] = &space_switch_exception;
         pgm_check_table[0x1D] = &hfp_sqrt_exception;
-       pgm_check_table[0x40] = &do_monitor_call;
        pfault_irq_init();
 }
index 60590a12d5299ce43e2626f045fdd488589d40cc..6dbe9488d3f99450f98296820da2ebe98281ea99 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/ipl.h>
 #include <asm/chpid.h>
 #include <asm/airq.h>
+#include <asm/cpu.h>
 #include "cio.h"
 #include "css.h"
 #include "chsc.h"
@@ -649,7 +650,7 @@ do_IRQ (struct pt_regs *regs)
 
        old_regs = set_irq_regs(regs);
        irq_enter();
-       asm volatile ("mc 0,0");
+       s390_idle_check();
        if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
                /**
                 * Make sure that the i/o interrupt did not "overtake"
index 352dde194f3cb5a2d012d7b3794e8cdf24faa262..e5a6a9ba3adfb6574153e72d75a67f50eda1b896 100644 (file)
@@ -22,4 +22,12 @@ struct s390_idle_data {
 
 DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
 
+void s390_idle_leave(void);
+
+static inline void s390_idle_check(void)
+{
+       if ((&__get_cpu_var(s390_idle))->in_idle)
+               s390_idle_leave();
+}
+
 #endif /* _ASM_S390_CPU_H_ */