]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
ia64/pv_ops/pv_time_ops: add sched_clock hook.
authorIsaku Yamahata <yamahata@valinux.co.jp>
Wed, 4 Mar 2009 12:05:40 +0000 (21:05 +0900)
committerTony Luck <tony.luck@intel.com>
Thu, 26 Mar 2009 17:50:42 +0000 (10:50 -0700)
add sched_clock() hook to paravirtualize sched_clock().
ia64 sched_clock() is based on ar.itc which isn't stable
on virtualized environment because vcpu may move around on
pcpus. So it needs paravirtualization.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Tony Luck <tony.luck@intel.com>
arch/ia64/include/asm/paravirt.h
arch/ia64/include/asm/timex.h
arch/ia64/kernel/head.S
arch/ia64/kernel/paravirt.c
arch/ia64/kernel/time.c

index 56f69f938cca1edf759ccead45799d419167e87e..a73e77add7e21bca7a8536fd149b59448c138ef5 100644 (file)
@@ -225,6 +225,8 @@ struct pv_time_ops {
        int (*do_steal_accounting)(unsigned long *new_itm);
 
        void (*clocksource_resume)(void);
+
+       unsigned long long (*sched_clock)(void);
 };
 
 extern struct pv_time_ops pv_time_ops;
@@ -242,6 +244,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm)
        return pv_time_ops.do_steal_accounting(new_itm);
 }
 
+static inline unsigned long long paravirt_sched_clock(void)
+{
+       return pv_time_ops.sched_clock();
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #else
index 4e03cfe74a0c40712c7ff328c0436eca46946e95..86c7db86118034bcc8ed60162112786afd30c4f5 100644 (file)
@@ -40,5 +40,6 @@ get_cycles (void)
 }
 
 extern void ia64_cpu_local_tick (void);
+extern unsigned long long ia64_native_sched_clock (void);
 
 #endif /* _ASM_IA64_TIMEX_H */
index 59301c4728009ecb410516fb0e737107eda42d67..23f846de62d58ae6e60a9c445911419daa4ff17f 100644 (file)
@@ -1050,7 +1050,7 @@ END(ia64_delay_loop)
  * except that the multiplication and the shift are done with 128-bit
  * intermediate precision so that we can produce a full 64-bit result.
  */
-GLOBAL_ENTRY(sched_clock)
+GLOBAL_ENTRY(ia64_native_sched_clock)
        addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
        mov.m r9=ar.itc         // fetch cycle-counter                          (35 cyc)
        ;;
@@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock)
        ;;
        shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
        br.ret.sptk.many rp
-END(sched_clock)
+END(ia64_native_sched_clock)
+#ifndef CONFIG_PARAVIRT
+       //unsigned long long
+       //sched_clock(void) __attribute__((alias("ia64_native_sched_clock")));
+       .global sched_clock
+sched_clock = ia64_native_sched_clock
+#endif
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 GLOBAL_ENTRY(cycle_to_cputime)
index 9f14c16f63693794445645753ff6d61443151b61..6bc33a6db755c2a351f32be3266a28147fe78017 100644 (file)
@@ -366,4 +366,5 @@ ia64_native_do_steal_accounting(unsigned long *new_itm)
 
 struct pv_time_ops pv_time_ops = {
        .do_steal_accounting = ia64_native_do_steal_accounting,
+       .sched_clock = ia64_native_sched_clock,
 };
index f0ebb342409d0b4ab3cd5d9ba470aa3284ef2667..c323c7b9c775e7aad55e46180b1f98fcf11cefdb 100644 (file)
@@ -49,6 +49,15 @@ EXPORT_SYMBOL(last_cli_ip);
 
 #endif
 
+#ifdef CONFIG_PARAVIRT
+/* We need to define a real function for sched_clock, to override the
+   weak default version */
+unsigned long long sched_clock(void)
+{
+        return paravirt_sched_clock();
+}
+#endif
+
 #ifdef CONFIG_PARAVIRT
 static void
 paravirt_clocksource_resume(void)