static int disable_sep;
 static int disable_tsc;
 static int disable_mtrr;
+static int disable_noidle;
 
 /* Cached VMI operations */
 struct {
 }
 
 /* For NO_IDLE_HZ, we stop the clock when halting the kernel */
-#ifdef CONFIG_NO_IDLE_HZ
 static fastcall void vmi_safe_halt(void)
 {
        int idle = vmi_stop_hz_timer();
                local_irq_enable();
        }
 }
-#endif
 
 #ifdef CONFIG_DEBUG_PAGE_TYPE
 
                     (char *)paravirt_ops.save_fl);
        patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
                     (char *)paravirt_ops.irq_disable);
-#ifndef CONFIG_NO_IDLE_HZ
-       para_fill(safe_halt, Halt);
-#else
-       vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
-       paravirt_ops.safe_halt = vmi_safe_halt;
-#endif
+
        para_fill(wbinvd, WBINVD);
        /* paravirt_ops.read_msr = vmi_rdmsr */
        /* paravirt_ops.write_msr = vmi_wrmsr */
 #endif
                custom_sched_clock = vmi_sched_clock;
        }
+       if (!disable_noidle)
+               para_fill(safe_halt, Halt);
+       else {
+               vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
+               paravirt_ops.safe_halt = vmi_safe_halt;
+       }
 
        /*
         * Alternative instruction rewriting doesn't happen soon enough
 
        local_irq_save(flags);
        activate_vmi();
-#ifdef CONFIG_SMP
+
+#ifdef CONFIG_X86_IO_APIC
        no_timer_check = 1;
 #endif
+
        local_irq_restore(flags & X86_EFLAGS_IF);
 }
 
        } else if (!strcmp(arg, "disable_mtrr")) {
                clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
                disable_mtrr = 1;
-       }
+       } else if (!strcmp(arg, "disable_noidle"))
+               disable_noidle = 1;
        return 0;
 }
 
 
 
        cycles_not_accounted = cur_real_cycles - real_cycles_accounted_system;
        while (cycles_not_accounted >= cycles_per_jiffy) {
-               /* systems wide jiffies and wallclock. */
+               /* systems wide jiffies. */
                do_timer(1);
 
                cycles_not_accounted -= cycles_per_jiffy;
                real_cycles_accounted_system += cycles_per_jiffy;
        }
 
-       if (vmi_timer_ops.wallclock_updated())
-               update_xtime_from_wallclock();
-
        write_sequnlock(&xtime_lock);
 }
 
        unsigned long seq, next;
        unsigned long long real_cycles_expiry;
        int cpu = smp_processor_id();
-       int idle;
 
        BUG_ON(!irqs_disabled());
        if (sysctl_hz_timer != 0)
 
        cpu_set(cpu, nohz_cpu_mask);
        smp_mb();
+
        if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
-           (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
+           (next = next_timer_interrupt(),
+            time_before_eq(next, jiffies + HZ/CONFIG_VMI_ALARM_HZ))) {
                cpu_clear(cpu, nohz_cpu_mask);
-               next = jiffies;
-               idle = 0;
-       } else
-               idle = 1;
+               return 0;
+       }
 
        /* Convert jiffies to the real cycle counter. */
        do {
        } while (read_seqretry(&xtime_lock, seq));
 
        /* This cpu is going idle. Disable the periodic alarm. */
-       if (idle) {
-               vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
-               per_cpu(idle_start_jiffies, cpu) = jiffies;
-       }
-
+       vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
+       per_cpu(idle_start_jiffies, cpu) = jiffies;
        /* Set the real time alarm to expire at the next event. */
        vmi_timer_ops.set_alarm(
-                     VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL,
-                     real_cycles_expiry, 0);
-
-       return idle;
+               VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL,
+               real_cycles_expiry, 0);
+       return 1;
 }
 
 static void vmi_reenable_hz_timer(int cpu)